diff --git a/py311/lib/python3.11/site-packages/PIL/AvifImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/AvifImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..366e0c864bf6ece4c401fe827430e07fd7fc4a09 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/AvifImagePlugin.py @@ -0,0 +1,291 @@ +from __future__ import annotations + +import os +from io import BytesIO +from typing import IO + +from . import ExifTags, Image, ImageFile + +try: + from . import _avif + + SUPPORTED = True +except ImportError: + SUPPORTED = False + +# Decoder options as module globals, until there is a way to pass parameters +# to Image.open (see https://github.com/python-pillow/Pillow/issues/569) +DECODE_CODEC_CHOICE = "auto" +DEFAULT_MAX_THREADS = 0 + + +def get_codec_version(codec_name: str) -> str | None: + versions = _avif.codec_versions() + for version in versions.split(", "): + if version.split(" [")[0] == codec_name: + return version.split(":")[-1].split(" ")[0] + return None + + +def _accept(prefix: bytes) -> bool | str: + if prefix[4:8] != b"ftyp": + return False + major_brand = prefix[8:12] + if major_brand in ( + # coding brands + b"avif", + b"avis", + # We accept files with AVIF container brands; we can't yet know if + # the ftyp box has the correct compatible brands, but if it doesn't + # then the plugin will raise a SyntaxError which Pillow will catch + # before moving on to the next plugin that accepts the file. + # + # Also, because this file might not actually be an AVIF file, we + # don't raise an error if AVIF support isn't properly compiled. + b"mif1", + b"msf1", + ): + if not SUPPORTED: + return ( + "image file could not be identified because AVIF support not installed" + ) + return True + return False + + +def _get_default_max_threads() -> int: + if DEFAULT_MAX_THREADS: + return DEFAULT_MAX_THREADS + if hasattr(os, "sched_getaffinity"): + return len(os.sched_getaffinity(0)) + else: + return os.cpu_count() or 1 + + +class AvifImageFile(ImageFile.ImageFile): + format = "AVIF" + format_description = "AVIF image" + __frame = -1 + + def _open(self) -> None: + if not SUPPORTED: + msg = "image file could not be opened because AVIF support not installed" + raise SyntaxError(msg) + + if DECODE_CODEC_CHOICE != "auto" and not _avif.decoder_codec_available( + DECODE_CODEC_CHOICE + ): + msg = "Invalid opening codec" + raise ValueError(msg) + self._decoder = _avif.AvifDecoder( + self.fp.read(), + DECODE_CODEC_CHOICE, + _get_default_max_threads(), + ) + + # Get info from decoder + self._size, self.n_frames, self._mode, icc, exif, exif_orientation, xmp = ( + self._decoder.get_info() + ) + self.is_animated = self.n_frames > 1 + + if icc: + self.info["icc_profile"] = icc + if xmp: + self.info["xmp"] = xmp + + if exif_orientation != 1 or exif: + exif_data = Image.Exif() + if exif: + exif_data.load(exif) + original_orientation = exif_data.get(ExifTags.Base.Orientation, 1) + else: + original_orientation = 1 + if exif_orientation != original_orientation: + exif_data[ExifTags.Base.Orientation] = exif_orientation + exif = exif_data.tobytes() + if exif: + self.info["exif"] = exif + self.seek(0) + + def seek(self, frame: int) -> None: + if not self._seek_check(frame): + return + + # Set tile + self.__frame = frame + self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 0, self.mode)] + + def load(self) -> Image.core.PixelAccess | None: + if self.tile: + # We need to load the image data for this frame + data, timescale, pts_in_timescales, duration_in_timescales = ( + self._decoder.get_frame(self.__frame) + ) + self.info["timestamp"] = round(1000 * (pts_in_timescales / timescale)) + self.info["duration"] = round(1000 * (duration_in_timescales / timescale)) + + if self.fp and self._exclusive_fp: + self.fp.close() + self.fp = BytesIO(data) + + return super().load() + + def load_seek(self, pos: int) -> None: + pass + + def tell(self) -> int: + return self.__frame + + +def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + _save(im, fp, filename, save_all=True) + + +def _save( + im: Image.Image, fp: IO[bytes], filename: str | bytes, save_all: bool = False +) -> None: + info = im.encoderinfo.copy() + if save_all: + append_images = list(info.get("append_images", [])) + else: + append_images = [] + + total = 0 + for ims in [im] + append_images: + total += getattr(ims, "n_frames", 1) + + quality = info.get("quality", 75) + if not isinstance(quality, int) or quality < 0 or quality > 100: + msg = "Invalid quality setting" + raise ValueError(msg) + + duration = info.get("duration", 0) + subsampling = info.get("subsampling", "4:2:0") + speed = info.get("speed", 6) + max_threads = info.get("max_threads", _get_default_max_threads()) + codec = info.get("codec", "auto") + if codec != "auto" and not _avif.encoder_codec_available(codec): + msg = "Invalid saving codec" + raise ValueError(msg) + range_ = info.get("range", "full") + tile_rows_log2 = info.get("tile_rows", 0) + tile_cols_log2 = info.get("tile_cols", 0) + alpha_premultiplied = bool(info.get("alpha_premultiplied", False)) + autotiling = bool(info.get("autotiling", tile_rows_log2 == tile_cols_log2 == 0)) + + icc_profile = info.get("icc_profile", im.info.get("icc_profile")) + exif_orientation = 1 + if exif := info.get("exif"): + if isinstance(exif, Image.Exif): + exif_data = exif + else: + exif_data = Image.Exif() + exif_data.load(exif) + if ExifTags.Base.Orientation in exif_data: + exif_orientation = exif_data.pop(ExifTags.Base.Orientation) + exif = exif_data.tobytes() if exif_data else b"" + elif isinstance(exif, Image.Exif): + exif = exif_data.tobytes() + + xmp = info.get("xmp") + + if isinstance(xmp, str): + xmp = xmp.encode("utf-8") + + advanced = info.get("advanced") + if advanced is not None: + if isinstance(advanced, dict): + advanced = advanced.items() + try: + advanced = tuple(advanced) + except TypeError: + invalid = True + else: + invalid = any(not isinstance(v, tuple) or len(v) != 2 for v in advanced) + if invalid: + msg = ( + "advanced codec options must be a dict of key-value string " + "pairs or a series of key-value two-tuples" + ) + raise ValueError(msg) + + # Setup the AVIF encoder + enc = _avif.AvifEncoder( + im.size, + subsampling, + quality, + speed, + max_threads, + codec, + range_, + tile_rows_log2, + tile_cols_log2, + alpha_premultiplied, + autotiling, + icc_profile or b"", + exif or b"", + exif_orientation, + xmp or b"", + advanced, + ) + + # Add each frame + frame_idx = 0 + frame_duration = 0 + cur_idx = im.tell() + is_single_frame = total == 1 + try: + for ims in [im] + append_images: + # Get number of frames in this image + nfr = getattr(ims, "n_frames", 1) + + for idx in range(nfr): + ims.seek(idx) + + # Make sure image mode is supported + frame = ims + rawmode = ims.mode + if ims.mode not in {"RGB", "RGBA"}: + rawmode = "RGBA" if ims.has_transparency_data else "RGB" + frame = ims.convert(rawmode) + + # Update frame duration + if isinstance(duration, (list, tuple)): + frame_duration = duration[frame_idx] + else: + frame_duration = duration + + # Append the frame to the animation encoder + enc.add( + frame.tobytes("raw", rawmode), + frame_duration, + frame.size, + rawmode, + is_single_frame, + ) + + # Update frame index + frame_idx += 1 + + if not save_all: + break + + finally: + im.seek(cur_idx) + + # Get the final output from the encoder + data = enc.finish() + if data is None: + msg = "cannot write file as AVIF (encoder returned None)" + raise OSError(msg) + + fp.write(data) + + +Image.register_open(AvifImageFile.format, AvifImageFile, _accept) +if SUPPORTED: + Image.register_save(AvifImageFile.format, _save) + Image.register_save_all(AvifImageFile.format, _save_all) + Image.register_extensions(AvifImageFile.format, [".avif", ".avifs"]) + Image.register_mime(AvifImageFile.format, "image/avif") diff --git a/py311/lib/python3.11/site-packages/PIL/BdfFontFile.py b/py311/lib/python3.11/site-packages/PIL/BdfFontFile.py new file mode 100644 index 0000000000000000000000000000000000000000..f175e2f4f80b1b232d79f15a6db0667296917c97 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/BdfFontFile.py @@ -0,0 +1,122 @@ +# +# The Python Imaging Library +# $Id$ +# +# bitmap distribution font (bdf) file parser +# +# history: +# 1996-05-16 fl created (as bdf2pil) +# 1997-08-25 fl converted to FontFile driver +# 2001-05-25 fl removed bogus __init__ call +# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev) +# 2003-04-22 fl more robustification (from Graham Dumpleton) +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1997-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +""" +Parse X Bitmap Distribution Format (BDF) +""" +from __future__ import annotations + +from typing import BinaryIO + +from . import FontFile, Image + + +def bdf_char( + f: BinaryIO, +) -> ( + tuple[ + str, + int, + tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]], + Image.Image, + ] + | None +): + # skip to STARTCHAR + while True: + s = f.readline() + if not s: + return None + if s.startswith(b"STARTCHAR"): + break + id = s[9:].strip().decode("ascii") + + # load symbol properties + props = {} + while True: + s = f.readline() + if not s or s.startswith(b"BITMAP"): + break + i = s.find(b" ") + props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii") + + # load bitmap + bitmap = bytearray() + while True: + s = f.readline() + if not s or s.startswith(b"ENDCHAR"): + break + bitmap += s[:-1] + + # The word BBX + # followed by the width in x (BBw), height in y (BBh), + # and x and y displacement (BBxoff0, BByoff0) + # of the lower left corner from the origin of the character. + width, height, x_disp, y_disp = (int(p) for p in props["BBX"].split()) + + # The word DWIDTH + # followed by the width in x and y of the character in device pixels. + dwx, dwy = (int(p) for p in props["DWIDTH"].split()) + + bbox = ( + (dwx, dwy), + (x_disp, -y_disp - height, width + x_disp, -y_disp), + (0, 0, width, height), + ) + + try: + im = Image.frombytes("1", (width, height), bitmap, "hex", "1") + except ValueError: + # deal with zero-width characters + im = Image.new("1", (width, height)) + + return id, int(props["ENCODING"]), bbox, im + + +class BdfFontFile(FontFile.FontFile): + """Font file plugin for the X11 BDF format.""" + + def __init__(self, fp: BinaryIO) -> None: + super().__init__() + + s = fp.readline() + if not s.startswith(b"STARTFONT 2.1"): + msg = "not a valid BDF file" + raise SyntaxError(msg) + + props = {} + comments = [] + + while True: + s = fp.readline() + if not s or s.startswith(b"ENDPROPERTIES"): + break + i = s.find(b" ") + props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii") + if s[:i] in [b"COMMENT", b"COPYRIGHT"]: + if s.find(b"LogicalFontDescription") < 0: + comments.append(s[i + 1 : -1].decode("ascii")) + + while True: + c = bdf_char(fp) + if not c: + break + id, ch, (xy, dst, src), im = c + if 0 <= ch < len(self.glyph): + self.glyph[ch] = xy, dst, src, im diff --git a/py311/lib/python3.11/site-packages/PIL/BlpImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/BlpImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..f7be7746d84bbc076e0a41124a903a8b2b05ae61 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/BlpImagePlugin.py @@ -0,0 +1,497 @@ +""" +Blizzard Mipmap Format (.blp) +Jerome Leclanche + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ + +BLP1 files, used mostly in Warcraft III, are not fully supported. +All types of BLP2 files used in World of Warcraft are supported. + +The BLP file structure consists of a header, up to 16 mipmaps of the +texture + +Texture sizes must be powers of two, though the two dimensions do +not have to be equal; 512x256 is valid, but 512x200 is not. +The first mipmap (mipmap #0) is the full size image; each subsequent +mipmap halves both dimensions. The final mipmap should be 1x1. + +BLP files come in many different flavours: +* JPEG-compressed (type == 0) - only supported for BLP1. +* RAW images (type == 1, encoding == 1). Each mipmap is stored as an + array of 8-bit values, one per pixel, left to right, top to bottom. + Each value is an index to the palette. +* DXT-compressed (type == 1, encoding == 2): +- DXT1 compression is used if alpha_encoding == 0. + - An additional alpha bit is used if alpha_depth == 1. + - DXT3 compression is used if alpha_encoding == 1. + - DXT5 compression is used if alpha_encoding == 7. +""" + +from __future__ import annotations + +import abc +import os +import struct +from enum import IntEnum +from io import BytesIO +from typing import IO + +from . import Image, ImageFile + + +class Format(IntEnum): + JPEG = 0 + + +class Encoding(IntEnum): + UNCOMPRESSED = 1 + DXT = 2 + UNCOMPRESSED_RAW_BGRA = 3 + + +class AlphaEncoding(IntEnum): + DXT1 = 0 + DXT3 = 1 + DXT5 = 7 + + +def unpack_565(i: int) -> tuple[int, int, int]: + return ((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3 + + +def decode_dxt1( + data: bytes, alpha: bool = False +) -> tuple[bytearray, bytearray, bytearray, bytearray]: + """ + input: one "row" of data (i.e. will produce 4*width pixels) + """ + + blocks = len(data) // 8 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block_index in range(blocks): + # Decode next 8-byte block. + idx = block_index * 8 + color0, color1, bits = struct.unpack_from("> 2 + + a = 0xFF + if control == 0: + r, g, b = r0, g0, b0 + elif control == 1: + r, g, b = r1, g1, b1 + elif control == 2: + if color0 > color1: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + else: + r = (r0 + r1) // 2 + g = (g0 + g1) // 2 + b = (b0 + b1) // 2 + elif control == 3: + if color0 > color1: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + else: + r, g, b, a = 0, 0, 0, 0 + + if alpha: + ret[j].extend([r, g, b, a]) + else: + ret[j].extend([r, g, b]) + + return ret + + +def decode_dxt3(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]: + """ + input: one "row" of data (i.e. will produce 4*width pixels) + """ + + blocks = len(data) // 16 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block_index in range(blocks): + idx = block_index * 16 + block = data[idx : idx + 16] + # Decode next 16-byte block. + bits = struct.unpack_from("<8B", block) + color0, color1 = struct.unpack_from(">= 4 + else: + high = True + a &= 0xF + a *= 17 # We get a value between 0 and 15 + + color_code = (code >> 2 * (4 * j + i)) & 0x03 + + if color_code == 0: + r, g, b = r0, g0, b0 + elif color_code == 1: + r, g, b = r1, g1, b1 + elif color_code == 2: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + elif color_code == 3: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + + ret[j].extend([r, g, b, a]) + + return ret + + +def decode_dxt5(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]: + """ + input: one "row" of data (i.e. will produce 4 * width pixels) + """ + + blocks = len(data) // 16 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block_index in range(blocks): + idx = block_index * 16 + block = data[idx : idx + 16] + # Decode next 16-byte block. + a0, a1 = struct.unpack_from("> alphacode_index) & 0x07 + elif alphacode_index == 15: + alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06) + else: # alphacode_index >= 18 and alphacode_index <= 45 + alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07 + + if alphacode == 0: + a = a0 + elif alphacode == 1: + a = a1 + elif a0 > a1: + a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7 + elif alphacode == 6: + a = 0 + elif alphacode == 7: + a = 255 + else: + a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5 + + color_code = (code >> 2 * (4 * j + i)) & 0x03 + + if color_code == 0: + r, g, b = r0, g0, b0 + elif color_code == 1: + r, g, b = r1, g1, b1 + elif color_code == 2: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + elif color_code == 3: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + + ret[j].extend([r, g, b, a]) + + return ret + + +class BLPFormatError(NotImplementedError): + pass + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith((b"BLP1", b"BLP2")) + + +class BlpImageFile(ImageFile.ImageFile): + """ + Blizzard Mipmap Format + """ + + format = "BLP" + format_description = "Blizzard Mipmap Format" + + def _open(self) -> None: + self.magic = self.fp.read(4) + if not _accept(self.magic): + msg = f"Bad BLP magic {repr(self.magic)}" + raise BLPFormatError(msg) + + compression = struct.unpack(" tuple[int, int]: + try: + self._read_header() + self._load() + except struct.error as e: + msg = "Truncated BLP file" + raise OSError(msg) from e + return -1, 0 + + @abc.abstractmethod + def _load(self) -> None: + pass + + def _read_header(self) -> None: + self._offsets = struct.unpack("<16I", self._safe_read(16 * 4)) + self._lengths = struct.unpack("<16I", self._safe_read(16 * 4)) + + def _safe_read(self, length: int) -> bytes: + assert self.fd is not None + return ImageFile._safe_read(self.fd, length) + + def _read_palette(self) -> list[tuple[int, int, int, int]]: + ret = [] + for i in range(256): + try: + b, g, r, a = struct.unpack("<4B", self._safe_read(4)) + except struct.error: + break + ret.append((b, g, r, a)) + return ret + + def _read_bgra( + self, palette: list[tuple[int, int, int, int]], alpha: bool + ) -> bytearray: + data = bytearray() + _data = BytesIO(self._safe_read(self._lengths[0])) + while True: + try: + (offset,) = struct.unpack(" None: + self._compression, self._encoding, alpha = self.args + + if self._compression == Format.JPEG: + self._decode_jpeg_stream() + + elif self._compression == 1: + if self._encoding in (4, 5): + palette = self._read_palette() + data = self._read_bgra(palette, alpha) + self.set_as_raw(data) + else: + msg = f"Unsupported BLP encoding {repr(self._encoding)}" + raise BLPFormatError(msg) + else: + msg = f"Unsupported BLP compression {repr(self._encoding)}" + raise BLPFormatError(msg) + + def _decode_jpeg_stream(self) -> None: + from .JpegImagePlugin import JpegImageFile + + (jpeg_header_size,) = struct.unpack(" None: + self._compression, self._encoding, alpha, self._alpha_encoding = self.args + + palette = self._read_palette() + + assert self.fd is not None + self.fd.seek(self._offsets[0]) + + if self._compression == 1: + # Uncompressed or DirectX compression + + if self._encoding == Encoding.UNCOMPRESSED: + data = self._read_bgra(palette, alpha) + + elif self._encoding == Encoding.DXT: + data = bytearray() + if self._alpha_encoding == AlphaEncoding.DXT1: + linesize = (self.state.xsize + 3) // 4 * 8 + for yb in range((self.state.ysize + 3) // 4): + for d in decode_dxt1(self._safe_read(linesize), alpha): + data += d + + elif self._alpha_encoding == AlphaEncoding.DXT3: + linesize = (self.state.xsize + 3) // 4 * 16 + for yb in range((self.state.ysize + 3) // 4): + for d in decode_dxt3(self._safe_read(linesize)): + data += d + + elif self._alpha_encoding == AlphaEncoding.DXT5: + linesize = (self.state.xsize + 3) // 4 * 16 + for yb in range((self.state.ysize + 3) // 4): + for d in decode_dxt5(self._safe_read(linesize)): + data += d + else: + msg = f"Unsupported alpha encoding {repr(self._alpha_encoding)}" + raise BLPFormatError(msg) + else: + msg = f"Unknown BLP encoding {repr(self._encoding)}" + raise BLPFormatError(msg) + + else: + msg = f"Unknown BLP compression {repr(self._compression)}" + raise BLPFormatError(msg) + + self.set_as_raw(data) + + +class BLPEncoder(ImageFile.PyEncoder): + _pushes_fd = True + + def _write_palette(self) -> bytes: + data = b"" + assert self.im is not None + palette = self.im.getpalette("RGBA", "RGBA") + for i in range(len(palette) // 4): + r, g, b, a = palette[i * 4 : (i + 1) * 4] + data += struct.pack("<4B", b, g, r, a) + while len(data) < 256 * 4: + data += b"\x00" * 4 + return data + + def encode(self, bufsize: int) -> tuple[int, int, bytes]: + palette_data = self._write_palette() + + offset = 20 + 16 * 4 * 2 + len(palette_data) + data = struct.pack("<16I", offset, *((0,) * 15)) + + assert self.im is not None + w, h = self.im.size + data += struct.pack("<16I", w * h, *((0,) * 15)) + + data += palette_data + + for y in range(h): + for x in range(w): + data += struct.pack(" None: + if im.mode != "P": + msg = "Unsupported BLP image mode" + raise ValueError(msg) + + magic = b"BLP1" if im.encoderinfo.get("blp_version") == "BLP1" else b"BLP2" + fp.write(magic) + + assert im.palette is not None + fp.write(struct.pack(" mode, rawmode + 1: ("P", "P;1"), + 4: ("P", "P;4"), + 8: ("P", "P"), + 16: ("RGB", "BGR;15"), + 24: ("RGB", "BGR"), + 32: ("RGB", "BGRX"), +} + +USE_RAW_ALPHA = False + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"BM") + + +def _dib_accept(prefix: bytes) -> bool: + return i32(prefix) in [12, 40, 52, 56, 64, 108, 124] + + +# ============================================================================= +# Image plugin for the Windows BMP format. +# ============================================================================= +class BmpImageFile(ImageFile.ImageFile): + """Image plugin for the Windows Bitmap format (BMP)""" + + # ------------------------------------------------------------- Description + format_description = "Windows Bitmap" + format = "BMP" + + # -------------------------------------------------- BMP Compression values + COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5} + for k, v in COMPRESSIONS.items(): + vars()[k] = v + + def _bitmap(self, header: int = 0, offset: int = 0) -> None: + """Read relevant info about the BMP""" + read, seek = self.fp.read, self.fp.seek + if header: + seek(header) + # read bmp header size @offset 14 (this is part of the header size) + file_info: dict[str, bool | int | tuple[int, ...]] = { + "header_size": i32(read(4)), + "direction": -1, + } + + # -------------------- If requested, read header at a specific position + # read the rest of the bmp header, without its size + assert isinstance(file_info["header_size"], int) + header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4) + + # ------------------------------- Windows Bitmap v2, IBM OS/2 Bitmap v1 + # ----- This format has different offsets because of width/height types + # 12: BITMAPCOREHEADER/OS21XBITMAPHEADER + if file_info["header_size"] == 12: + file_info["width"] = i16(header_data, 0) + file_info["height"] = i16(header_data, 2) + file_info["planes"] = i16(header_data, 4) + file_info["bits"] = i16(header_data, 6) + file_info["compression"] = self.COMPRESSIONS["RAW"] + file_info["palette_padding"] = 3 + + # --------------------------------------------- Windows Bitmap v3 to v5 + # 40: BITMAPINFOHEADER + # 52: BITMAPV2HEADER + # 56: BITMAPV3HEADER + # 64: BITMAPCOREHEADER2/OS22XBITMAPHEADER + # 108: BITMAPV4HEADER + # 124: BITMAPV5HEADER + elif file_info["header_size"] in (40, 52, 56, 64, 108, 124): + file_info["y_flip"] = header_data[7] == 0xFF + file_info["direction"] = 1 if file_info["y_flip"] else -1 + file_info["width"] = i32(header_data, 0) + file_info["height"] = ( + i32(header_data, 4) + if not file_info["y_flip"] + else 2**32 - i32(header_data, 4) + ) + file_info["planes"] = i16(header_data, 8) + file_info["bits"] = i16(header_data, 10) + file_info["compression"] = i32(header_data, 12) + # byte size of pixel data + file_info["data_size"] = i32(header_data, 16) + file_info["pixels_per_meter"] = ( + i32(header_data, 20), + i32(header_data, 24), + ) + file_info["colors"] = i32(header_data, 28) + file_info["palette_padding"] = 4 + assert isinstance(file_info["pixels_per_meter"], tuple) + self.info["dpi"] = tuple(x / 39.3701 for x in file_info["pixels_per_meter"]) + if file_info["compression"] == self.COMPRESSIONS["BITFIELDS"]: + masks = ["r_mask", "g_mask", "b_mask"] + if len(header_data) >= 48: + if len(header_data) >= 52: + masks.append("a_mask") + else: + file_info["a_mask"] = 0x0 + for idx, mask in enumerate(masks): + file_info[mask] = i32(header_data, 36 + idx * 4) + else: + # 40 byte headers only have the three components in the + # bitfields masks, ref: + # https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx + # See also + # https://github.com/python-pillow/Pillow/issues/1293 + # There is a 4th component in the RGBQuad, in the alpha + # location, but it is listed as a reserved component, + # and it is not generally an alpha channel + file_info["a_mask"] = 0x0 + for mask in masks: + file_info[mask] = i32(read(4)) + assert isinstance(file_info["r_mask"], int) + assert isinstance(file_info["g_mask"], int) + assert isinstance(file_info["b_mask"], int) + assert isinstance(file_info["a_mask"], int) + file_info["rgb_mask"] = ( + file_info["r_mask"], + file_info["g_mask"], + file_info["b_mask"], + ) + file_info["rgba_mask"] = ( + file_info["r_mask"], + file_info["g_mask"], + file_info["b_mask"], + file_info["a_mask"], + ) + else: + msg = f"Unsupported BMP header type ({file_info['header_size']})" + raise OSError(msg) + + # ------------------ Special case : header is reported 40, which + # ---------------------- is shorter than real size for bpp >= 16 + assert isinstance(file_info["width"], int) + assert isinstance(file_info["height"], int) + self._size = file_info["width"], file_info["height"] + + # ------- If color count was not found in the header, compute from bits + assert isinstance(file_info["bits"], int) + file_info["colors"] = ( + file_info["colors"] + if file_info.get("colors", 0) + else (1 << file_info["bits"]) + ) + assert isinstance(file_info["colors"], int) + if offset == 14 + file_info["header_size"] and file_info["bits"] <= 8: + offset += 4 * file_info["colors"] + + # ---------------------- Check bit depth for unusual unsupported values + self._mode, raw_mode = BIT2MODE.get(file_info["bits"], ("", "")) + if not self.mode: + msg = f"Unsupported BMP pixel depth ({file_info['bits']})" + raise OSError(msg) + + # ---------------- Process BMP with Bitfields compression (not palette) + decoder_name = "raw" + if file_info["compression"] == self.COMPRESSIONS["BITFIELDS"]: + SUPPORTED: dict[int, list[tuple[int, ...]]] = { + 32: [ + (0xFF0000, 0xFF00, 0xFF, 0x0), + (0xFF000000, 0xFF0000, 0xFF00, 0x0), + (0xFF000000, 0xFF00, 0xFF, 0x0), + (0xFF000000, 0xFF0000, 0xFF00, 0xFF), + (0xFF, 0xFF00, 0xFF0000, 0xFF000000), + (0xFF0000, 0xFF00, 0xFF, 0xFF000000), + (0xFF000000, 0xFF00, 0xFF, 0xFF0000), + (0x0, 0x0, 0x0, 0x0), + ], + 24: [(0xFF0000, 0xFF00, 0xFF)], + 16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)], + } + MASK_MODES = { + (32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX", + (32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR", + (32, (0xFF000000, 0xFF00, 0xFF, 0x0)): "BGXR", + (32, (0xFF000000, 0xFF0000, 0xFF00, 0xFF)): "ABGR", + (32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA", + (32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA", + (32, (0xFF000000, 0xFF00, 0xFF, 0xFF0000)): "BGAR", + (32, (0x0, 0x0, 0x0, 0x0)): "BGRA", + (24, (0xFF0000, 0xFF00, 0xFF)): "BGR", + (16, (0xF800, 0x7E0, 0x1F)): "BGR;16", + (16, (0x7C00, 0x3E0, 0x1F)): "BGR;15", + } + if file_info["bits"] in SUPPORTED: + if ( + file_info["bits"] == 32 + and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]] + ): + assert isinstance(file_info["rgba_mask"], tuple) + raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])] + self._mode = "RGBA" if "A" in raw_mode else self.mode + elif ( + file_info["bits"] in (24, 16) + and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]] + ): + assert isinstance(file_info["rgb_mask"], tuple) + raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])] + else: + msg = "Unsupported BMP bitfields layout" + raise OSError(msg) + else: + msg = "Unsupported BMP bitfields layout" + raise OSError(msg) + elif file_info["compression"] == self.COMPRESSIONS["RAW"]: + if file_info["bits"] == 32 and ( + header == 22 or USE_RAW_ALPHA # 32-bit .cur offset + ): + raw_mode, self._mode = "BGRA", "RGBA" + elif file_info["compression"] in ( + self.COMPRESSIONS["RLE8"], + self.COMPRESSIONS["RLE4"], + ): + decoder_name = "bmp_rle" + else: + msg = f"Unsupported BMP compression ({file_info['compression']})" + raise OSError(msg) + + # --------------- Once the header is processed, process the palette/LUT + if self.mode == "P": # Paletted for 1, 4 and 8 bit images + # ---------------------------------------------------- 1-bit images + if not (0 < file_info["colors"] <= 65536): + msg = f"Unsupported BMP Palette size ({file_info['colors']})" + raise OSError(msg) + else: + assert isinstance(file_info["palette_padding"], int) + padding = file_info["palette_padding"] + palette = read(padding * file_info["colors"]) + grayscale = True + indices = ( + (0, 255) + if file_info["colors"] == 2 + else list(range(file_info["colors"])) + ) + + # ----------------- Check if grayscale and ignore palette if so + for ind, val in enumerate(indices): + rgb = palette[ind * padding : ind * padding + 3] + if rgb != o8(val) * 3: + grayscale = False + + # ------- If all colors are gray, white or black, ditch palette + if grayscale: + self._mode = "1" if file_info["colors"] == 2 else "L" + raw_mode = self.mode + else: + self._mode = "P" + self.palette = ImagePalette.raw( + "BGRX" if padding == 4 else "BGR", palette + ) + + # ---------------------------- Finally set the tile data for the plugin + self.info["compression"] = file_info["compression"] + args: list[Any] = [raw_mode] + if decoder_name == "bmp_rle": + args.append(file_info["compression"] == self.COMPRESSIONS["RLE4"]) + else: + assert isinstance(file_info["width"], int) + args.append(((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3)) + args.append(file_info["direction"]) + self.tile = [ + ImageFile._Tile( + decoder_name, + (0, 0, file_info["width"], file_info["height"]), + offset or self.fp.tell(), + tuple(args), + ) + ] + + def _open(self) -> None: + """Open file, check magic number and read header""" + # read 14 bytes: magic number, filesize, reserved, header final offset + head_data = self.fp.read(14) + # choke if the file does not have the required magic bytes + if not _accept(head_data): + msg = "Not a BMP file" + raise SyntaxError(msg) + # read the start position of the BMP image data (u32) + offset = i32(head_data, 10) + # load bitmap information (offset=raster info) + self._bitmap(offset=offset) + + +class BmpRleDecoder(ImageFile.PyDecoder): + _pulls_fd = True + + def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: + assert self.fd is not None + rle4 = self.args[1] + data = bytearray() + x = 0 + dest_length = self.state.xsize * self.state.ysize + while len(data) < dest_length: + pixels = self.fd.read(1) + byte = self.fd.read(1) + if not pixels or not byte: + break + num_pixels = pixels[0] + if num_pixels: + # encoded mode + if x + num_pixels > self.state.xsize: + # Too much data for row + num_pixels = max(0, self.state.xsize - x) + if rle4: + first_pixel = o8(byte[0] >> 4) + second_pixel = o8(byte[0] & 0x0F) + for index in range(num_pixels): + if index % 2 == 0: + data += first_pixel + else: + data += second_pixel + else: + data += byte * num_pixels + x += num_pixels + else: + if byte[0] == 0: + # end of line + while len(data) % self.state.xsize != 0: + data += b"\x00" + x = 0 + elif byte[0] == 1: + # end of bitmap + break + elif byte[0] == 2: + # delta + bytes_read = self.fd.read(2) + if len(bytes_read) < 2: + break + right, up = self.fd.read(2) + data += b"\x00" * (right + up * self.state.xsize) + x = len(data) % self.state.xsize + else: + # absolute mode + if rle4: + # 2 pixels per byte + byte_count = byte[0] // 2 + bytes_read = self.fd.read(byte_count) + for byte_read in bytes_read: + data += o8(byte_read >> 4) + data += o8(byte_read & 0x0F) + else: + byte_count = byte[0] + bytes_read = self.fd.read(byte_count) + data += bytes_read + if len(bytes_read) < byte_count: + break + x += byte[0] + + # align to 16-bit word boundary + if self.fd.tell() % 2 != 0: + self.fd.seek(1, os.SEEK_CUR) + rawmode = "L" if self.mode == "L" else "P" + self.set_as_raw(bytes(data), rawmode, (0, self.args[-1])) + return -1, 0 + + +# ============================================================================= +# Image plugin for the DIB format (BMP alias) +# ============================================================================= +class DibImageFile(BmpImageFile): + format = "DIB" + format_description = "Windows Bitmap" + + def _open(self) -> None: + self._bitmap() + + +# +# -------------------------------------------------------------------- +# Write BMP file + + +SAVE = { + "1": ("1", 1, 2), + "L": ("L", 8, 256), + "P": ("P", 8, 256), + "RGB": ("BGR", 24, 0), + "RGBA": ("BGRA", 32, 0), +} + + +def _dib_save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + _save(im, fp, filename, False) + + +def _save( + im: Image.Image, fp: IO[bytes], filename: str | bytes, bitmap_header: bool = True +) -> None: + try: + rawmode, bits, colors = SAVE[im.mode] + except KeyError as e: + msg = f"cannot write mode {im.mode} as BMP" + raise OSError(msg) from e + + info = im.encoderinfo + + dpi = info.get("dpi", (96, 96)) + + # 1 meter == 39.3701 inches + ppm = tuple(int(x * 39.3701 + 0.5) for x in dpi) + + stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3) + header = 40 # or 64 for OS/2 version 2 + image = stride * im.size[1] + + if im.mode == "1": + palette = b"".join(o8(i) * 3 + b"\x00" for i in (0, 255)) + elif im.mode == "L": + palette = b"".join(o8(i) * 3 + b"\x00" for i in range(256)) + elif im.mode == "P": + palette = im.im.getpalette("RGB", "BGRX") + colors = len(palette) // 4 + else: + palette = None + + # bitmap header + if bitmap_header: + offset = 14 + header + colors * 4 + file_size = offset + image + if file_size > 2**32 - 1: + msg = "File size is too large for the BMP format" + raise ValueError(msg) + fp.write( + b"BM" # file type (magic) + + o32(file_size) # file size + + o32(0) # reserved + + o32(offset) # image data offset + ) + + # bitmap info header + fp.write( + o32(header) # info header size + + o32(im.size[0]) # width + + o32(im.size[1]) # height + + o16(1) # planes + + o16(bits) # depth + + o32(0) # compression (0=uncompressed) + + o32(image) # size of bitmap + + o32(ppm[0]) # resolution + + o32(ppm[1]) # resolution + + o32(colors) # colors used + + o32(colors) # colors important + ) + + fp.write(b"\0" * (header - 40)) # padding (for OS/2 format) + + if palette: + fp.write(palette) + + ImageFile._save( + im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))] + ) + + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(BmpImageFile.format, BmpImageFile, _accept) +Image.register_save(BmpImageFile.format, _save) + +Image.register_extension(BmpImageFile.format, ".bmp") + +Image.register_mime(BmpImageFile.format, "image/bmp") + +Image.register_decoder("bmp_rle", BmpRleDecoder) + +Image.register_open(DibImageFile.format, DibImageFile, _dib_accept) +Image.register_save(DibImageFile.format, _dib_save) + +Image.register_extension(DibImageFile.format, ".dib") + +Image.register_mime(DibImageFile.format, "image/bmp") diff --git a/py311/lib/python3.11/site-packages/PIL/BufrStubImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/BufrStubImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..8c5da14f5f6769127e0ed23d36fa8f3dd0b086c2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/BufrStubImagePlugin.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library +# $Id$ +# +# BUFR stub adapter +# +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import os +from typing import IO + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler: ImageFile.StubHandler | None) -> None: + """ + Install application-specific BUFR image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith((b"BUFR", b"ZCZC")) + + +class BufrStubImageFile(ImageFile.StubImageFile): + format = "BUFR" + format_description = "BUFR" + + def _open(self) -> None: + if not _accept(self.fp.read(4)): + msg = "Not a BUFR file" + raise SyntaxError(msg) + + self.fp.seek(-4, os.SEEK_CUR) + + # make something up + self._mode = "F" + self._size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self) -> ImageFile.StubHandler | None: + return _handler + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if _handler is None or not hasattr(_handler, "save"): + msg = "BUFR save handler not installed" + raise OSError(msg) + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept) +Image.register_save(BufrStubImageFile.format, _save) + +Image.register_extension(BufrStubImageFile.format, ".bufr") diff --git a/py311/lib/python3.11/site-packages/PIL/ContainerIO.py b/py311/lib/python3.11/site-packages/PIL/ContainerIO.py new file mode 100644 index 0000000000000000000000000000000000000000..ec9e66c714fbbfec8c597f6127e5d932b0da521f --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ContainerIO.py @@ -0,0 +1,173 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a class to read from a container file +# +# History: +# 1995-06-18 fl Created +# 1995-09-07 fl Added readline(), readlines() +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1995 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import io +from collections.abc import Iterable +from typing import IO, AnyStr, NoReturn + + +class ContainerIO(IO[AnyStr]): + """ + A file object that provides read access to a part of an existing + file (for example a TAR file). + """ + + def __init__(self, file: IO[AnyStr], offset: int, length: int) -> None: + """ + Create file object. + + :param file: Existing file. + :param offset: Start of region, in bytes. + :param length: Size of region, in bytes. + """ + self.fh: IO[AnyStr] = file + self.pos = 0 + self.offset = offset + self.length = length + self.fh.seek(offset) + + ## + # Always false. + + def isatty(self) -> bool: + return False + + def seekable(self) -> bool: + return True + + def seek(self, offset: int, mode: int = io.SEEK_SET) -> int: + """ + Move file pointer. + + :param offset: Offset in bytes. + :param mode: Starting position. Use 0 for beginning of region, 1 + for current offset, and 2 for end of region. You cannot move + the pointer outside the defined region. + :returns: Offset from start of region, in bytes. + """ + if mode == 1: + self.pos = self.pos + offset + elif mode == 2: + self.pos = self.length + offset + else: + self.pos = offset + # clamp + self.pos = max(0, min(self.pos, self.length)) + self.fh.seek(self.offset + self.pos) + return self.pos + + def tell(self) -> int: + """ + Get current file pointer. + + :returns: Offset from start of region, in bytes. + """ + return self.pos + + def readable(self) -> bool: + return True + + def read(self, n: int = -1) -> AnyStr: + """ + Read data. + + :param n: Number of bytes to read. If omitted, zero or negative, + read until end of region. + :returns: An 8-bit string. + """ + if n > 0: + n = min(n, self.length - self.pos) + else: + n = self.length - self.pos + if n <= 0: # EOF + return b"" if "b" in self.fh.mode else "" # type: ignore[return-value] + self.pos = self.pos + n + return self.fh.read(n) + + def readline(self, n: int = -1) -> AnyStr: + """ + Read a line of text. + + :param n: Number of bytes to read. If omitted, zero or negative, + read until end of line. + :returns: An 8-bit string. + """ + s: AnyStr = b"" if "b" in self.fh.mode else "" # type: ignore[assignment] + newline_character = b"\n" if "b" in self.fh.mode else "\n" + while True: + c = self.read(1) + if not c: + break + s = s + c + if c == newline_character or len(s) == n: + break + return s + + def readlines(self, n: int | None = -1) -> list[AnyStr]: + """ + Read multiple lines of text. + + :param n: Number of lines to read. If omitted, zero, negative or None, + read until end of region. + :returns: A list of 8-bit strings. + """ + lines = [] + while True: + s = self.readline() + if not s: + break + lines.append(s) + if len(lines) == n: + break + return lines + + def writable(self) -> bool: + return False + + def write(self, b: AnyStr) -> NoReturn: + raise NotImplementedError() + + def writelines(self, lines: Iterable[AnyStr]) -> NoReturn: + raise NotImplementedError() + + def truncate(self, size: int | None = None) -> int: + raise NotImplementedError() + + def __enter__(self) -> ContainerIO[AnyStr]: + return self + + def __exit__(self, *args: object) -> None: + self.close() + + def __iter__(self) -> ContainerIO[AnyStr]: + return self + + def __next__(self) -> AnyStr: + line = self.readline() + if not line: + msg = "end of region" + raise StopIteration(msg) + return line + + def fileno(self) -> int: + return self.fh.fileno() + + def flush(self) -> None: + self.fh.flush() + + def close(self) -> None: + self.fh.close() diff --git a/py311/lib/python3.11/site-packages/PIL/CurImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/CurImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..b817dbc87b8c291d4bffd5f0b83d5e63a5e6ecbb --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/CurImagePlugin.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Windows Cursor support for PIL +# +# notes: +# uses BmpImagePlugin.py to read the bitmap data. +# +# history: +# 96-05-27 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from . import BmpImagePlugin, Image, ImageFile +from ._binary import i16le as i16 +from ._binary import i32le as i32 + +# +# -------------------------------------------------------------------- + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"\0\0\2\0") + + +## +# Image plugin for Windows Cursor files. + + +class CurImageFile(BmpImagePlugin.BmpImageFile): + format = "CUR" + format_description = "Windows Cursor" + + def _open(self) -> None: + offset = self.fp.tell() + + # check magic + s = self.fp.read(6) + if not _accept(s): + msg = "not a CUR file" + raise SyntaxError(msg) + + # pick the largest cursor in the file + m = b"" + for i in range(i16(s, 4)): + s = self.fp.read(16) + if not m: + m = s + elif s[0] > m[0] and s[1] > m[1]: + m = s + if not m: + msg = "No cursors were found" + raise TypeError(msg) + + # load as bitmap + self._bitmap(i32(m, 12) + offset) + + # patch up the bitmap height + self._size = self.size[0], self.size[1] // 2 + d, e, o, a = self.tile[0] + self.tile[0] = ImageFile._Tile(d, (0, 0) + self.size, o, a) + + +# +# -------------------------------------------------------------------- + +Image.register_open(CurImageFile.format, CurImageFile, _accept) + +Image.register_extension(CurImageFile.format, ".cur") diff --git a/py311/lib/python3.11/site-packages/PIL/DcxImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/DcxImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..aea661b9cb6eef184696e377678ee69f66c5f772 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/DcxImagePlugin.py @@ -0,0 +1,83 @@ +# +# The Python Imaging Library. +# $Id$ +# +# DCX file handling +# +# DCX is a container file format defined by Intel, commonly used +# for fax applications. Each DCX file consists of a directory +# (a list of file offsets) followed by a set of (usually 1-bit) +# PCX files. +# +# History: +# 1995-09-09 fl Created +# 1996-03-20 fl Properly derived from PcxImageFile. +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 2002-07-30 fl Fixed file handling +# +# Copyright (c) 1997-98 by Secret Labs AB. +# Copyright (c) 1995-96 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from . import Image +from ._binary import i32le as i32 +from ._util import DeferredError +from .PcxImagePlugin import PcxImageFile + +MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then? + + +def _accept(prefix: bytes) -> bool: + return len(prefix) >= 4 and i32(prefix) == MAGIC + + +## +# Image plugin for the Intel DCX format. + + +class DcxImageFile(PcxImageFile): + format = "DCX" + format_description = "Intel DCX" + _close_exclusive_fp_after_loading = False + + def _open(self) -> None: + # Header + s = self.fp.read(4) + if not _accept(s): + msg = "not a DCX file" + raise SyntaxError(msg) + + # Component directory + self._offset = [] + for i in range(1024): + offset = i32(self.fp.read(4)) + if not offset: + break + self._offset.append(offset) + + self._fp = self.fp + self.frame = -1 + self.n_frames = len(self._offset) + self.is_animated = self.n_frames > 1 + self.seek(0) + + def seek(self, frame: int) -> None: + if not self._seek_check(frame): + return + if isinstance(self._fp, DeferredError): + raise self._fp.ex + self.frame = frame + self.fp = self._fp + self.fp.seek(self._offset[frame]) + PcxImageFile._open(self) + + def tell(self) -> int: + return self.frame + + +Image.register_open(DcxImageFile.format, DcxImageFile, _accept) + +Image.register_extension(DcxImageFile.format, ".dcx") diff --git a/py311/lib/python3.11/site-packages/PIL/DdsImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/DdsImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..f9ade18f9a1edf431524dd86a238f6b0445e6ab0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/DdsImagePlugin.py @@ -0,0 +1,624 @@ +""" +A Pillow plugin for .dds files (S3TC-compressed aka DXTC) +Jerome Leclanche + +Documentation: +https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: +https://creativecommons.org/publicdomain/zero/1.0/ +""" + +from __future__ import annotations + +import io +import struct +import sys +from enum import IntEnum, IntFlag +from typing import IO + +from . import Image, ImageFile, ImagePalette +from ._binary import i32le as i32 +from ._binary import o8 +from ._binary import o32le as o32 + +# Magic ("DDS ") +DDS_MAGIC = 0x20534444 + + +# DDS flags +class DDSD(IntFlag): + CAPS = 0x1 + HEIGHT = 0x2 + WIDTH = 0x4 + PITCH = 0x8 + PIXELFORMAT = 0x1000 + MIPMAPCOUNT = 0x20000 + LINEARSIZE = 0x80000 + DEPTH = 0x800000 + + +# DDS caps +class DDSCAPS(IntFlag): + COMPLEX = 0x8 + TEXTURE = 0x1000 + MIPMAP = 0x400000 + + +class DDSCAPS2(IntFlag): + CUBEMAP = 0x200 + CUBEMAP_POSITIVEX = 0x400 + CUBEMAP_NEGATIVEX = 0x800 + CUBEMAP_POSITIVEY = 0x1000 + CUBEMAP_NEGATIVEY = 0x2000 + CUBEMAP_POSITIVEZ = 0x4000 + CUBEMAP_NEGATIVEZ = 0x8000 + VOLUME = 0x200000 + + +# Pixel Format +class DDPF(IntFlag): + ALPHAPIXELS = 0x1 + ALPHA = 0x2 + FOURCC = 0x4 + PALETTEINDEXED8 = 0x20 + RGB = 0x40 + LUMINANCE = 0x20000 + + +# dxgiformat.h +class DXGI_FORMAT(IntEnum): + UNKNOWN = 0 + R32G32B32A32_TYPELESS = 1 + R32G32B32A32_FLOAT = 2 + R32G32B32A32_UINT = 3 + R32G32B32A32_SINT = 4 + R32G32B32_TYPELESS = 5 + R32G32B32_FLOAT = 6 + R32G32B32_UINT = 7 + R32G32B32_SINT = 8 + R16G16B16A16_TYPELESS = 9 + R16G16B16A16_FLOAT = 10 + R16G16B16A16_UNORM = 11 + R16G16B16A16_UINT = 12 + R16G16B16A16_SNORM = 13 + R16G16B16A16_SINT = 14 + R32G32_TYPELESS = 15 + R32G32_FLOAT = 16 + R32G32_UINT = 17 + R32G32_SINT = 18 + R32G8X24_TYPELESS = 19 + D32_FLOAT_S8X24_UINT = 20 + R32_FLOAT_X8X24_TYPELESS = 21 + X32_TYPELESS_G8X24_UINT = 22 + R10G10B10A2_TYPELESS = 23 + R10G10B10A2_UNORM = 24 + R10G10B10A2_UINT = 25 + R11G11B10_FLOAT = 26 + R8G8B8A8_TYPELESS = 27 + R8G8B8A8_UNORM = 28 + R8G8B8A8_UNORM_SRGB = 29 + R8G8B8A8_UINT = 30 + R8G8B8A8_SNORM = 31 + R8G8B8A8_SINT = 32 + R16G16_TYPELESS = 33 + R16G16_FLOAT = 34 + R16G16_UNORM = 35 + R16G16_UINT = 36 + R16G16_SNORM = 37 + R16G16_SINT = 38 + R32_TYPELESS = 39 + D32_FLOAT = 40 + R32_FLOAT = 41 + R32_UINT = 42 + R32_SINT = 43 + R24G8_TYPELESS = 44 + D24_UNORM_S8_UINT = 45 + R24_UNORM_X8_TYPELESS = 46 + X24_TYPELESS_G8_UINT = 47 + R8G8_TYPELESS = 48 + R8G8_UNORM = 49 + R8G8_UINT = 50 + R8G8_SNORM = 51 + R8G8_SINT = 52 + R16_TYPELESS = 53 + R16_FLOAT = 54 + D16_UNORM = 55 + R16_UNORM = 56 + R16_UINT = 57 + R16_SNORM = 58 + R16_SINT = 59 + R8_TYPELESS = 60 + R8_UNORM = 61 + R8_UINT = 62 + R8_SNORM = 63 + R8_SINT = 64 + A8_UNORM = 65 + R1_UNORM = 66 + R9G9B9E5_SHAREDEXP = 67 + R8G8_B8G8_UNORM = 68 + G8R8_G8B8_UNORM = 69 + BC1_TYPELESS = 70 + BC1_UNORM = 71 + BC1_UNORM_SRGB = 72 + BC2_TYPELESS = 73 + BC2_UNORM = 74 + BC2_UNORM_SRGB = 75 + BC3_TYPELESS = 76 + BC3_UNORM = 77 + BC3_UNORM_SRGB = 78 + BC4_TYPELESS = 79 + BC4_UNORM = 80 + BC4_SNORM = 81 + BC5_TYPELESS = 82 + BC5_UNORM = 83 + BC5_SNORM = 84 + B5G6R5_UNORM = 85 + B5G5R5A1_UNORM = 86 + B8G8R8A8_UNORM = 87 + B8G8R8X8_UNORM = 88 + R10G10B10_XR_BIAS_A2_UNORM = 89 + B8G8R8A8_TYPELESS = 90 + B8G8R8A8_UNORM_SRGB = 91 + B8G8R8X8_TYPELESS = 92 + B8G8R8X8_UNORM_SRGB = 93 + BC6H_TYPELESS = 94 + BC6H_UF16 = 95 + BC6H_SF16 = 96 + BC7_TYPELESS = 97 + BC7_UNORM = 98 + BC7_UNORM_SRGB = 99 + AYUV = 100 + Y410 = 101 + Y416 = 102 + NV12 = 103 + P010 = 104 + P016 = 105 + OPAQUE_420 = 106 + YUY2 = 107 + Y210 = 108 + Y216 = 109 + NV11 = 110 + AI44 = 111 + IA44 = 112 + P8 = 113 + A8P8 = 114 + B4G4R4A4_UNORM = 115 + P208 = 130 + V208 = 131 + V408 = 132 + SAMPLER_FEEDBACK_MIN_MIP_OPAQUE = 189 + SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE = 190 + + +class D3DFMT(IntEnum): + UNKNOWN = 0 + R8G8B8 = 20 + A8R8G8B8 = 21 + X8R8G8B8 = 22 + R5G6B5 = 23 + X1R5G5B5 = 24 + A1R5G5B5 = 25 + A4R4G4B4 = 26 + R3G3B2 = 27 + A8 = 28 + A8R3G3B2 = 29 + X4R4G4B4 = 30 + A2B10G10R10 = 31 + A8B8G8R8 = 32 + X8B8G8R8 = 33 + G16R16 = 34 + A2R10G10B10 = 35 + A16B16G16R16 = 36 + A8P8 = 40 + P8 = 41 + L8 = 50 + A8L8 = 51 + A4L4 = 52 + V8U8 = 60 + L6V5U5 = 61 + X8L8V8U8 = 62 + Q8W8V8U8 = 63 + V16U16 = 64 + A2W10V10U10 = 67 + D16_LOCKABLE = 70 + D32 = 71 + D15S1 = 73 + D24S8 = 75 + D24X8 = 77 + D24X4S4 = 79 + D16 = 80 + D32F_LOCKABLE = 82 + D24FS8 = 83 + D32_LOCKABLE = 84 + S8_LOCKABLE = 85 + L16 = 81 + VERTEXDATA = 100 + INDEX16 = 101 + INDEX32 = 102 + Q16W16V16U16 = 110 + R16F = 111 + G16R16F = 112 + A16B16G16R16F = 113 + R32F = 114 + G32R32F = 115 + A32B32G32R32F = 116 + CxV8U8 = 117 + A1 = 118 + A2B10G10R10_XR_BIAS = 119 + BINARYBUFFER = 199 + + UYVY = i32(b"UYVY") + R8G8_B8G8 = i32(b"RGBG") + YUY2 = i32(b"YUY2") + G8R8_G8B8 = i32(b"GRGB") + DXT1 = i32(b"DXT1") + DXT2 = i32(b"DXT2") + DXT3 = i32(b"DXT3") + DXT4 = i32(b"DXT4") + DXT5 = i32(b"DXT5") + DX10 = i32(b"DX10") + BC4S = i32(b"BC4S") + BC4U = i32(b"BC4U") + BC5S = i32(b"BC5S") + BC5U = i32(b"BC5U") + ATI1 = i32(b"ATI1") + ATI2 = i32(b"ATI2") + MULTI2_ARGB8 = i32(b"MET1") + + +# Backward compatibility layer +module = sys.modules[__name__] +for item in DDSD: + assert item.name is not None + setattr(module, f"DDSD_{item.name}", item.value) +for item1 in DDSCAPS: + assert item1.name is not None + setattr(module, f"DDSCAPS_{item1.name}", item1.value) +for item2 in DDSCAPS2: + assert item2.name is not None + setattr(module, f"DDSCAPS2_{item2.name}", item2.value) +for item3 in DDPF: + assert item3.name is not None + setattr(module, f"DDPF_{item3.name}", item3.value) + +DDS_FOURCC = DDPF.FOURCC +DDS_RGB = DDPF.RGB +DDS_RGBA = DDPF.RGB | DDPF.ALPHAPIXELS +DDS_LUMINANCE = DDPF.LUMINANCE +DDS_LUMINANCEA = DDPF.LUMINANCE | DDPF.ALPHAPIXELS +DDS_ALPHA = DDPF.ALPHA +DDS_PAL8 = DDPF.PALETTEINDEXED8 + +DDS_HEADER_FLAGS_TEXTURE = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PIXELFORMAT +DDS_HEADER_FLAGS_MIPMAP = DDSD.MIPMAPCOUNT +DDS_HEADER_FLAGS_VOLUME = DDSD.DEPTH +DDS_HEADER_FLAGS_PITCH = DDSD.PITCH +DDS_HEADER_FLAGS_LINEARSIZE = DDSD.LINEARSIZE + +DDS_HEIGHT = DDSD.HEIGHT +DDS_WIDTH = DDSD.WIDTH + +DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS.TEXTURE +DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS.COMPLEX | DDSCAPS.MIPMAP +DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS.COMPLEX + +DDS_CUBEMAP_POSITIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEX +DDS_CUBEMAP_NEGATIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEX +DDS_CUBEMAP_POSITIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEY +DDS_CUBEMAP_NEGATIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEY +DDS_CUBEMAP_POSITIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEZ +DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEZ + +DXT1_FOURCC = D3DFMT.DXT1 +DXT3_FOURCC = D3DFMT.DXT3 +DXT5_FOURCC = D3DFMT.DXT5 + +DXGI_FORMAT_R8G8B8A8_TYPELESS = DXGI_FORMAT.R8G8B8A8_TYPELESS +DXGI_FORMAT_R8G8B8A8_UNORM = DXGI_FORMAT.R8G8B8A8_UNORM +DXGI_FORMAT_R8G8B8A8_UNORM_SRGB = DXGI_FORMAT.R8G8B8A8_UNORM_SRGB +DXGI_FORMAT_BC5_TYPELESS = DXGI_FORMAT.BC5_TYPELESS +DXGI_FORMAT_BC5_UNORM = DXGI_FORMAT.BC5_UNORM +DXGI_FORMAT_BC5_SNORM = DXGI_FORMAT.BC5_SNORM +DXGI_FORMAT_BC6H_UF16 = DXGI_FORMAT.BC6H_UF16 +DXGI_FORMAT_BC6H_SF16 = DXGI_FORMAT.BC6H_SF16 +DXGI_FORMAT_BC7_TYPELESS = DXGI_FORMAT.BC7_TYPELESS +DXGI_FORMAT_BC7_UNORM = DXGI_FORMAT.BC7_UNORM +DXGI_FORMAT_BC7_UNORM_SRGB = DXGI_FORMAT.BC7_UNORM_SRGB + + +class DdsImageFile(ImageFile.ImageFile): + format = "DDS" + format_description = "DirectDraw Surface" + + def _open(self) -> None: + if not _accept(self.fp.read(4)): + msg = "not a DDS file" + raise SyntaxError(msg) + (header_size,) = struct.unpack(" None: + pass + + +class DdsRgbDecoder(ImageFile.PyDecoder): + _pulls_fd = True + + def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: + assert self.fd is not None + bitcount, masks = self.args + + # Some masks will be padded with zeros, e.g. R 0b11 G 0b1100 + # Calculate how many zeros each mask is padded with + mask_offsets = [] + # And the maximum value of each channel without the padding + mask_totals = [] + for mask in masks: + offset = 0 + if mask != 0: + while mask >> (offset + 1) << (offset + 1) == mask: + offset += 1 + mask_offsets.append(offset) + mask_totals.append(mask >> offset) + + data = bytearray() + bytecount = bitcount // 8 + dest_length = self.state.xsize * self.state.ysize * len(masks) + while len(data) < dest_length: + value = int.from_bytes(self.fd.read(bytecount), "little") + for i, mask in enumerate(masks): + masked_value = value & mask + # Remove the zero padding, and scale it to 8 bits + data += o8( + int(((masked_value >> mask_offsets[i]) / mask_totals[i]) * 255) + ) + self.set_as_raw(data) + return -1, 0 + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if im.mode not in ("RGB", "RGBA", "L", "LA"): + msg = f"cannot write mode {im.mode} as DDS" + raise OSError(msg) + + flags = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PIXELFORMAT + bitcount = len(im.getbands()) * 8 + pixel_format = im.encoderinfo.get("pixel_format") + args: tuple[int] | str + if pixel_format: + codec_name = "bcn" + flags |= DDSD.LINEARSIZE + pitch = (im.width + 3) * 4 + rgba_mask = [0, 0, 0, 0] + pixel_flags = DDPF.FOURCC + if pixel_format == "DXT1": + fourcc = D3DFMT.DXT1 + args = (1,) + elif pixel_format == "DXT3": + fourcc = D3DFMT.DXT3 + args = (2,) + elif pixel_format == "DXT5": + fourcc = D3DFMT.DXT5 + args = (3,) + else: + fourcc = D3DFMT.DX10 + if pixel_format == "BC2": + args = (2,) + dxgi_format = DXGI_FORMAT.BC2_TYPELESS + elif pixel_format == "BC3": + args = (3,) + dxgi_format = DXGI_FORMAT.BC3_TYPELESS + elif pixel_format == "BC5": + args = (5,) + dxgi_format = DXGI_FORMAT.BC5_TYPELESS + if im.mode != "RGB": + msg = "only RGB mode can be written as BC5" + raise OSError(msg) + else: + msg = f"cannot write pixel format {pixel_format}" + raise OSError(msg) + else: + codec_name = "raw" + flags |= DDSD.PITCH + pitch = (im.width * bitcount + 7) // 8 + + alpha = im.mode[-1] == "A" + if im.mode[0] == "L": + pixel_flags = DDPF.LUMINANCE + args = im.mode + if alpha: + rgba_mask = [0x000000FF, 0x000000FF, 0x000000FF] + else: + rgba_mask = [0xFF000000, 0xFF000000, 0xFF000000] + else: + pixel_flags = DDPF.RGB + args = im.mode[::-1] + rgba_mask = [0x00FF0000, 0x0000FF00, 0x000000FF] + + if alpha: + r, g, b, a = im.split() + im = Image.merge("RGBA", (a, r, g, b)) + if alpha: + pixel_flags |= DDPF.ALPHAPIXELS + rgba_mask.append(0xFF000000 if alpha else 0) + + fourcc = D3DFMT.UNKNOWN + fp.write( + o32(DDS_MAGIC) + + struct.pack( + "<7I", + 124, # header size + flags, # flags + im.height, + im.width, + pitch, + 0, # depth + 0, # mipmaps + ) + + struct.pack("11I", *((0,) * 11)) # reserved + # pfsize, pfflags, fourcc, bitcount + + struct.pack("<4I", 32, pixel_flags, fourcc, bitcount) + + struct.pack("<4I", *rgba_mask) # dwRGBABitMask + + struct.pack("<5I", DDSCAPS.TEXTURE, 0, 0, 0, 0) + ) + if fourcc == D3DFMT.DX10: + fp.write( + # dxgi_format, 2D resource, misc, array size, straight alpha + struct.pack("<5I", dxgi_format, 3, 0, 0, 1) + ) + ImageFile._save(im, fp, [ImageFile._Tile(codec_name, (0, 0) + im.size, 0, args)]) + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"DDS ") + + +Image.register_open(DdsImageFile.format, DdsImageFile, _accept) +Image.register_decoder("dds_rgb", DdsRgbDecoder) +Image.register_save(DdsImageFile.format, _save) +Image.register_extension(DdsImageFile.format, ".dds") diff --git a/py311/lib/python3.11/site-packages/PIL/EpsImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/EpsImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..5e2ddad99e99565fee795d49c9563f5c9ba5dac1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/EpsImagePlugin.py @@ -0,0 +1,476 @@ +# +# The Python Imaging Library. +# $Id$ +# +# EPS file handling +# +# History: +# 1995-09-01 fl Created (0.1) +# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2) +# 1996-08-22 fl Don't choke on floating point BoundingBox values +# 1996-08-23 fl Handle files from Macintosh (0.3) +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4) +# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5) +# 2014-05-07 e Handling of EPS with binary preview and fixed resolution +# resizing +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import io +import os +import re +import subprocess +import sys +import tempfile +from typing import IO + +from . import Image, ImageFile +from ._binary import i32le as i32 + +# -------------------------------------------------------------------- + + +split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$") +field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$") + +gs_binary: str | bool | None = None +gs_windows_binary = None + + +def has_ghostscript() -> bool: + global gs_binary, gs_windows_binary + if gs_binary is None: + if sys.platform.startswith("win"): + if gs_windows_binary is None: + import shutil + + for binary in ("gswin32c", "gswin64c", "gs"): + if shutil.which(binary) is not None: + gs_windows_binary = binary + break + else: + gs_windows_binary = False + gs_binary = gs_windows_binary + else: + try: + subprocess.check_call(["gs", "--version"], stdout=subprocess.DEVNULL) + gs_binary = "gs" + except OSError: + gs_binary = False + return gs_binary is not False + + +def Ghostscript( + tile: list[ImageFile._Tile], + size: tuple[int, int], + fp: IO[bytes], + scale: int = 1, + transparency: bool = False, +) -> Image.core.ImagingCore: + """Render an image using Ghostscript""" + global gs_binary + if not has_ghostscript(): + msg = "Unable to locate Ghostscript on paths" + raise OSError(msg) + assert isinstance(gs_binary, str) + + # Unpack decoder tile + args = tile[0].args + assert isinstance(args, tuple) + length, bbox = args + + # Hack to support hi-res rendering + scale = int(scale) or 1 + width = size[0] * scale + height = size[1] * scale + # resolution is dependent on bbox and size + res_x = 72.0 * width / (bbox[2] - bbox[0]) + res_y = 72.0 * height / (bbox[3] - bbox[1]) + + out_fd, outfile = tempfile.mkstemp() + os.close(out_fd) + + infile_temp = None + if hasattr(fp, "name") and os.path.exists(fp.name): + infile = fp.name + else: + in_fd, infile_temp = tempfile.mkstemp() + os.close(in_fd) + infile = infile_temp + + # Ignore length and offset! + # Ghostscript can read it + # Copy whole file to read in Ghostscript + with open(infile_temp, "wb") as f: + # fetch length of fp + fp.seek(0, io.SEEK_END) + fsize = fp.tell() + # ensure start position + # go back + fp.seek(0) + lengthfile = fsize + while lengthfile > 0: + s = fp.read(min(lengthfile, 100 * 1024)) + if not s: + break + lengthfile -= len(s) + f.write(s) + + if transparency: + # "RGBA" + device = "pngalpha" + else: + # "pnmraw" automatically chooses between + # PBM ("1"), PGM ("L"), and PPM ("RGB"). + device = "pnmraw" + + # Build Ghostscript command + command = [ + gs_binary, + "-q", # quiet mode + f"-g{width:d}x{height:d}", # set output geometry (pixels) + f"-r{res_x:f}x{res_y:f}", # set input DPI (dots per inch) + "-dBATCH", # exit after processing + "-dNOPAUSE", # don't pause between pages + "-dSAFER", # safe mode + f"-sDEVICE={device}", + f"-sOutputFile={outfile}", # output file + # adjust for image origin + "-c", + f"{-bbox[0]} {-bbox[1]} translate", + "-f", + infile, # input file + # showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272) + "-c", + "showpage", + ] + + # push data through Ghostscript + try: + startupinfo = None + if sys.platform.startswith("win"): + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + subprocess.check_call(command, startupinfo=startupinfo) + with Image.open(outfile) as out_im: + out_im.load() + return out_im.im.copy() + finally: + try: + os.unlink(outfile) + if infile_temp: + os.unlink(infile_temp) + except OSError: + pass + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"%!PS") or ( + len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5 + ) + + +## +# Image plugin for Encapsulated PostScript. This plugin supports only +# a few variants of this format. + + +class EpsImageFile(ImageFile.ImageFile): + """EPS File Parser for the Python Imaging Library""" + + format = "EPS" + format_description = "Encapsulated Postscript" + + mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"} + + def _open(self) -> None: + (length, offset) = self._find_offset(self.fp) + + # go to offset - start of "%!PS" + self.fp.seek(offset) + + self._mode = "RGB" + + # When reading header comments, the first comment is used. + # When reading trailer comments, the last comment is used. + bounding_box: list[int] | None = None + imagedata_size: tuple[int, int] | None = None + + byte_arr = bytearray(255) + bytes_mv = memoryview(byte_arr) + bytes_read = 0 + reading_header_comments = True + reading_trailer_comments = False + trailer_reached = False + + def check_required_header_comments() -> None: + """ + The EPS specification requires that some headers exist. + This should be checked when the header comments formally end, + when image data starts, or when the file ends, whichever comes first. + """ + if "PS-Adobe" not in self.info: + msg = 'EPS header missing "%!PS-Adobe" comment' + raise SyntaxError(msg) + if "BoundingBox" not in self.info: + msg = 'EPS header missing "%%BoundingBox" comment' + raise SyntaxError(msg) + + def read_comment(s: str) -> bool: + nonlocal bounding_box, reading_trailer_comments + try: + m = split.match(s) + except re.error as e: + msg = "not an EPS file" + raise SyntaxError(msg) from e + + if not m: + return False + + k, v = m.group(1, 2) + self.info[k] = v + if k == "BoundingBox": + if v == "(atend)": + reading_trailer_comments = True + elif not bounding_box or (trailer_reached and reading_trailer_comments): + try: + # Note: The DSC spec says that BoundingBox + # fields should be integers, but some drivers + # put floating point values there anyway. + bounding_box = [int(float(i)) for i in v.split()] + except Exception: + pass + return True + + while True: + byte = self.fp.read(1) + if byte == b"": + # if we didn't read a byte we must be at the end of the file + if bytes_read == 0: + if reading_header_comments: + check_required_header_comments() + break + elif byte in b"\r\n": + # if we read a line ending character, ignore it and parse what + # we have already read. if we haven't read any other characters, + # continue reading + if bytes_read == 0: + continue + else: + # ASCII/hexadecimal lines in an EPS file must not exceed + # 255 characters, not including line ending characters + if bytes_read >= 255: + # only enforce this for lines starting with a "%", + # otherwise assume it's binary data + if byte_arr[0] == ord("%"): + msg = "not an EPS file" + raise SyntaxError(msg) + else: + if reading_header_comments: + check_required_header_comments() + reading_header_comments = False + # reset bytes_read so we can keep reading + # data until the end of the line + bytes_read = 0 + byte_arr[bytes_read] = byte[0] + bytes_read += 1 + continue + + if reading_header_comments: + # Load EPS header + + # if this line doesn't start with a "%", + # or does start with "%%EndComments", + # then we've reached the end of the header/comments + if byte_arr[0] != ord("%") or bytes_mv[:13] == b"%%EndComments": + check_required_header_comments() + reading_header_comments = False + continue + + s = str(bytes_mv[:bytes_read], "latin-1") + if not read_comment(s): + m = field.match(s) + if m: + k = m.group(1) + if k.startswith("PS-Adobe"): + self.info["PS-Adobe"] = k[9:] + else: + self.info[k] = "" + elif s[0] == "%": + # handle non-DSC PostScript comments that some + # tools mistakenly put in the Comments section + pass + else: + msg = "bad EPS header" + raise OSError(msg) + elif bytes_mv[:11] == b"%ImageData:": + # Check for an "ImageData" descriptor + # https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577413_pgfId-1035096 + + # If we've already read an "ImageData" descriptor, + # don't read another one. + if imagedata_size: + bytes_read = 0 + continue + + # Values: + # columns + # rows + # bit depth (1 or 8) + # mode (1: L, 2: LAB, 3: RGB, 4: CMYK) + # number of padding channels + # block size (number of bytes per row per channel) + # binary/ascii (1: binary, 2: ascii) + # data start identifier (the image data follows after a single line + # consisting only of this quoted value) + image_data_values = byte_arr[11:bytes_read].split(None, 7) + columns, rows, bit_depth, mode_id = ( + int(value) for value in image_data_values[:4] + ) + + if bit_depth == 1: + self._mode = "1" + elif bit_depth == 8: + try: + self._mode = self.mode_map[mode_id] + except ValueError: + break + else: + break + + # Parse the columns and rows after checking the bit depth and mode + # in case the bit depth and/or mode are invalid. + imagedata_size = columns, rows + elif bytes_mv[:5] == b"%%EOF": + break + elif trailer_reached and reading_trailer_comments: + # Load EPS trailer + s = str(bytes_mv[:bytes_read], "latin-1") + read_comment(s) + elif bytes_mv[:9] == b"%%Trailer": + trailer_reached = True + bytes_read = 0 + + # A "BoundingBox" is always required, + # even if an "ImageData" descriptor size exists. + if not bounding_box: + msg = "cannot determine EPS bounding box" + raise OSError(msg) + + # An "ImageData" size takes precedence over the "BoundingBox". + self._size = imagedata_size or ( + bounding_box[2] - bounding_box[0], + bounding_box[3] - bounding_box[1], + ) + + self.tile = [ + ImageFile._Tile("eps", (0, 0) + self.size, offset, (length, bounding_box)) + ] + + def _find_offset(self, fp: IO[bytes]) -> tuple[int, int]: + s = fp.read(4) + + if s == b"%!PS": + # for HEAD without binary preview + fp.seek(0, io.SEEK_END) + length = fp.tell() + offset = 0 + elif i32(s) == 0xC6D3D0C5: + # FIX for: Some EPS file not handled correctly / issue #302 + # EPS can contain binary data + # or start directly with latin coding + # more info see: + # https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf + s = fp.read(8) + offset = i32(s) + length = i32(s, 4) + else: + msg = "not an EPS file" + raise SyntaxError(msg) + + return length, offset + + def load( + self, scale: int = 1, transparency: bool = False + ) -> Image.core.PixelAccess | None: + # Load EPS via Ghostscript + if self.tile: + self.im = Ghostscript(self.tile, self.size, self.fp, scale, transparency) + self._mode = self.im.mode + self._size = self.im.size + self.tile = [] + return Image.Image.load(self) + + def load_seek(self, pos: int) -> None: + # we can't incrementally load, so force ImageFile.parser to + # use our custom load method by defining this method. + pass + + +# -------------------------------------------------------------------- + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes, eps: int = 1) -> None: + """EPS Writer for the Python Imaging Library.""" + + # make sure image data is available + im.load() + + # determine PostScript image mode + if im.mode == "L": + operator = (8, 1, b"image") + elif im.mode == "RGB": + operator = (8, 3, b"false 3 colorimage") + elif im.mode == "CMYK": + operator = (8, 4, b"false 4 colorimage") + else: + msg = "image mode is not supported" + raise ValueError(msg) + + if eps: + # write EPS header + fp.write(b"%!PS-Adobe-3.0 EPSF-3.0\n") + fp.write(b"%%Creator: PIL 0.1 EpsEncode\n") + # fp.write("%%CreationDate: %s"...) + fp.write(b"%%%%BoundingBox: 0 0 %d %d\n" % im.size) + fp.write(b"%%Pages: 1\n") + fp.write(b"%%EndComments\n") + fp.write(b"%%Page: 1 1\n") + fp.write(b"%%ImageData: %d %d " % im.size) + fp.write(b'%d %d 0 1 1 "%s"\n' % operator) + + # image header + fp.write(b"gsave\n") + fp.write(b"10 dict begin\n") + fp.write(b"/buf %d string def\n" % (im.size[0] * operator[1])) + fp.write(b"%d %d scale\n" % im.size) + fp.write(b"%d %d 8\n" % im.size) # <= bits + fp.write(b"[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1])) + fp.write(b"{ currentfile buf readhexstring pop } bind\n") + fp.write(operator[2] + b"\n") + if hasattr(fp, "flush"): + fp.flush() + + ImageFile._save(im, fp, [ImageFile._Tile("eps", (0, 0) + im.size)]) + + fp.write(b"\n%%%%EndBinary\n") + fp.write(b"grestore end\n") + if hasattr(fp, "flush"): + fp.flush() + + +# -------------------------------------------------------------------- + + +Image.register_open(EpsImageFile.format, EpsImageFile, _accept) + +Image.register_save(EpsImageFile.format, _save) + +Image.register_extensions(EpsImageFile.format, [".ps", ".eps"]) + +Image.register_mime(EpsImageFile.format, "application/postscript") diff --git a/py311/lib/python3.11/site-packages/PIL/ExifTags.py b/py311/lib/python3.11/site-packages/PIL/ExifTags.py new file mode 100644 index 0000000000000000000000000000000000000000..2280d5ce84b9badabe16cfb0db37f739d50d51c6 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ExifTags.py @@ -0,0 +1,382 @@ +# +# The Python Imaging Library. +# $Id$ +# +# EXIF tags +# +# Copyright (c) 2003 by Secret Labs AB +# +# See the README file for information on usage and redistribution. +# + +""" +This module provides constants and clear-text names for various +well-known EXIF tags. +""" +from __future__ import annotations + +from enum import IntEnum + + +class Base(IntEnum): + # possibly incomplete + InteropIndex = 0x0001 + ProcessingSoftware = 0x000B + NewSubfileType = 0x00FE + SubfileType = 0x00FF + ImageWidth = 0x0100 + ImageLength = 0x0101 + BitsPerSample = 0x0102 + Compression = 0x0103 + PhotometricInterpretation = 0x0106 + Thresholding = 0x0107 + CellWidth = 0x0108 + CellLength = 0x0109 + FillOrder = 0x010A + DocumentName = 0x010D + ImageDescription = 0x010E + Make = 0x010F + Model = 0x0110 + StripOffsets = 0x0111 + Orientation = 0x0112 + SamplesPerPixel = 0x0115 + RowsPerStrip = 0x0116 + StripByteCounts = 0x0117 + MinSampleValue = 0x0118 + MaxSampleValue = 0x0119 + XResolution = 0x011A + YResolution = 0x011B + PlanarConfiguration = 0x011C + PageName = 0x011D + FreeOffsets = 0x0120 + FreeByteCounts = 0x0121 + GrayResponseUnit = 0x0122 + GrayResponseCurve = 0x0123 + T4Options = 0x0124 + T6Options = 0x0125 + ResolutionUnit = 0x0128 + PageNumber = 0x0129 + TransferFunction = 0x012D + Software = 0x0131 + DateTime = 0x0132 + Artist = 0x013B + HostComputer = 0x013C + Predictor = 0x013D + WhitePoint = 0x013E + PrimaryChromaticities = 0x013F + ColorMap = 0x0140 + HalftoneHints = 0x0141 + TileWidth = 0x0142 + TileLength = 0x0143 + TileOffsets = 0x0144 + TileByteCounts = 0x0145 + SubIFDs = 0x014A + InkSet = 0x014C + InkNames = 0x014D + NumberOfInks = 0x014E + DotRange = 0x0150 + TargetPrinter = 0x0151 + ExtraSamples = 0x0152 + SampleFormat = 0x0153 + SMinSampleValue = 0x0154 + SMaxSampleValue = 0x0155 + TransferRange = 0x0156 + ClipPath = 0x0157 + XClipPathUnits = 0x0158 + YClipPathUnits = 0x0159 + Indexed = 0x015A + JPEGTables = 0x015B + OPIProxy = 0x015F + JPEGProc = 0x0200 + JpegIFOffset = 0x0201 + JpegIFByteCount = 0x0202 + JpegRestartInterval = 0x0203 + JpegLosslessPredictors = 0x0205 + JpegPointTransforms = 0x0206 + JpegQTables = 0x0207 + JpegDCTables = 0x0208 + JpegACTables = 0x0209 + YCbCrCoefficients = 0x0211 + YCbCrSubSampling = 0x0212 + YCbCrPositioning = 0x0213 + ReferenceBlackWhite = 0x0214 + XMLPacket = 0x02BC + RelatedImageFileFormat = 0x1000 + RelatedImageWidth = 0x1001 + RelatedImageLength = 0x1002 + Rating = 0x4746 + RatingPercent = 0x4749 + ImageID = 0x800D + CFARepeatPatternDim = 0x828D + BatteryLevel = 0x828F + Copyright = 0x8298 + ExposureTime = 0x829A + FNumber = 0x829D + IPTCNAA = 0x83BB + ImageResources = 0x8649 + ExifOffset = 0x8769 + InterColorProfile = 0x8773 + ExposureProgram = 0x8822 + SpectralSensitivity = 0x8824 + GPSInfo = 0x8825 + ISOSpeedRatings = 0x8827 + OECF = 0x8828 + Interlace = 0x8829 + TimeZoneOffset = 0x882A + SelfTimerMode = 0x882B + SensitivityType = 0x8830 + StandardOutputSensitivity = 0x8831 + RecommendedExposureIndex = 0x8832 + ISOSpeed = 0x8833 + ISOSpeedLatitudeyyy = 0x8834 + ISOSpeedLatitudezzz = 0x8835 + ExifVersion = 0x9000 + DateTimeOriginal = 0x9003 + DateTimeDigitized = 0x9004 + OffsetTime = 0x9010 + OffsetTimeOriginal = 0x9011 + OffsetTimeDigitized = 0x9012 + ComponentsConfiguration = 0x9101 + CompressedBitsPerPixel = 0x9102 + ShutterSpeedValue = 0x9201 + ApertureValue = 0x9202 + BrightnessValue = 0x9203 + ExposureBiasValue = 0x9204 + MaxApertureValue = 0x9205 + SubjectDistance = 0x9206 + MeteringMode = 0x9207 + LightSource = 0x9208 + Flash = 0x9209 + FocalLength = 0x920A + Noise = 0x920D + ImageNumber = 0x9211 + SecurityClassification = 0x9212 + ImageHistory = 0x9213 + TIFFEPStandardID = 0x9216 + MakerNote = 0x927C + UserComment = 0x9286 + SubsecTime = 0x9290 + SubsecTimeOriginal = 0x9291 + SubsecTimeDigitized = 0x9292 + AmbientTemperature = 0x9400 + Humidity = 0x9401 + Pressure = 0x9402 + WaterDepth = 0x9403 + Acceleration = 0x9404 + CameraElevationAngle = 0x9405 + XPTitle = 0x9C9B + XPComment = 0x9C9C + XPAuthor = 0x9C9D + XPKeywords = 0x9C9E + XPSubject = 0x9C9F + FlashPixVersion = 0xA000 + ColorSpace = 0xA001 + ExifImageWidth = 0xA002 + ExifImageHeight = 0xA003 + RelatedSoundFile = 0xA004 + ExifInteroperabilityOffset = 0xA005 + FlashEnergy = 0xA20B + SpatialFrequencyResponse = 0xA20C + FocalPlaneXResolution = 0xA20E + FocalPlaneYResolution = 0xA20F + FocalPlaneResolutionUnit = 0xA210 + SubjectLocation = 0xA214 + ExposureIndex = 0xA215 + SensingMethod = 0xA217 + FileSource = 0xA300 + SceneType = 0xA301 + CFAPattern = 0xA302 + CustomRendered = 0xA401 + ExposureMode = 0xA402 + WhiteBalance = 0xA403 + DigitalZoomRatio = 0xA404 + FocalLengthIn35mmFilm = 0xA405 + SceneCaptureType = 0xA406 + GainControl = 0xA407 + Contrast = 0xA408 + Saturation = 0xA409 + Sharpness = 0xA40A + DeviceSettingDescription = 0xA40B + SubjectDistanceRange = 0xA40C + ImageUniqueID = 0xA420 + CameraOwnerName = 0xA430 + BodySerialNumber = 0xA431 + LensSpecification = 0xA432 + LensMake = 0xA433 + LensModel = 0xA434 + LensSerialNumber = 0xA435 + CompositeImage = 0xA460 + CompositeImageCount = 0xA461 + CompositeImageExposureTimes = 0xA462 + Gamma = 0xA500 + PrintImageMatching = 0xC4A5 + DNGVersion = 0xC612 + DNGBackwardVersion = 0xC613 + UniqueCameraModel = 0xC614 + LocalizedCameraModel = 0xC615 + CFAPlaneColor = 0xC616 + CFALayout = 0xC617 + LinearizationTable = 0xC618 + BlackLevelRepeatDim = 0xC619 + BlackLevel = 0xC61A + BlackLevelDeltaH = 0xC61B + BlackLevelDeltaV = 0xC61C + WhiteLevel = 0xC61D + DefaultScale = 0xC61E + DefaultCropOrigin = 0xC61F + DefaultCropSize = 0xC620 + ColorMatrix1 = 0xC621 + ColorMatrix2 = 0xC622 + CameraCalibration1 = 0xC623 + CameraCalibration2 = 0xC624 + ReductionMatrix1 = 0xC625 + ReductionMatrix2 = 0xC626 + AnalogBalance = 0xC627 + AsShotNeutral = 0xC628 + AsShotWhiteXY = 0xC629 + BaselineExposure = 0xC62A + BaselineNoise = 0xC62B + BaselineSharpness = 0xC62C + BayerGreenSplit = 0xC62D + LinearResponseLimit = 0xC62E + CameraSerialNumber = 0xC62F + LensInfo = 0xC630 + ChromaBlurRadius = 0xC631 + AntiAliasStrength = 0xC632 + ShadowScale = 0xC633 + DNGPrivateData = 0xC634 + MakerNoteSafety = 0xC635 + CalibrationIlluminant1 = 0xC65A + CalibrationIlluminant2 = 0xC65B + BestQualityScale = 0xC65C + RawDataUniqueID = 0xC65D + OriginalRawFileName = 0xC68B + OriginalRawFileData = 0xC68C + ActiveArea = 0xC68D + MaskedAreas = 0xC68E + AsShotICCProfile = 0xC68F + AsShotPreProfileMatrix = 0xC690 + CurrentICCProfile = 0xC691 + CurrentPreProfileMatrix = 0xC692 + ColorimetricReference = 0xC6BF + CameraCalibrationSignature = 0xC6F3 + ProfileCalibrationSignature = 0xC6F4 + AsShotProfileName = 0xC6F6 + NoiseReductionApplied = 0xC6F7 + ProfileName = 0xC6F8 + ProfileHueSatMapDims = 0xC6F9 + ProfileHueSatMapData1 = 0xC6FA + ProfileHueSatMapData2 = 0xC6FB + ProfileToneCurve = 0xC6FC + ProfileEmbedPolicy = 0xC6FD + ProfileCopyright = 0xC6FE + ForwardMatrix1 = 0xC714 + ForwardMatrix2 = 0xC715 + PreviewApplicationName = 0xC716 + PreviewApplicationVersion = 0xC717 + PreviewSettingsName = 0xC718 + PreviewSettingsDigest = 0xC719 + PreviewColorSpace = 0xC71A + PreviewDateTime = 0xC71B + RawImageDigest = 0xC71C + OriginalRawFileDigest = 0xC71D + SubTileBlockSize = 0xC71E + RowInterleaveFactor = 0xC71F + ProfileLookTableDims = 0xC725 + ProfileLookTableData = 0xC726 + OpcodeList1 = 0xC740 + OpcodeList2 = 0xC741 + OpcodeList3 = 0xC74E + NoiseProfile = 0xC761 + + +"""Maps EXIF tags to tag names.""" +TAGS = { + **{i.value: i.name for i in Base}, + 0x920C: "SpatialFrequencyResponse", + 0x9214: "SubjectLocation", + 0x9215: "ExposureIndex", + 0x828E: "CFAPattern", + 0x920B: "FlashEnergy", + 0x9216: "TIFF/EPStandardID", +} + + +class GPS(IntEnum): + GPSVersionID = 0x00 + GPSLatitudeRef = 0x01 + GPSLatitude = 0x02 + GPSLongitudeRef = 0x03 + GPSLongitude = 0x04 + GPSAltitudeRef = 0x05 + GPSAltitude = 0x06 + GPSTimeStamp = 0x07 + GPSSatellites = 0x08 + GPSStatus = 0x09 + GPSMeasureMode = 0x0A + GPSDOP = 0x0B + GPSSpeedRef = 0x0C + GPSSpeed = 0x0D + GPSTrackRef = 0x0E + GPSTrack = 0x0F + GPSImgDirectionRef = 0x10 + GPSImgDirection = 0x11 + GPSMapDatum = 0x12 + GPSDestLatitudeRef = 0x13 + GPSDestLatitude = 0x14 + GPSDestLongitudeRef = 0x15 + GPSDestLongitude = 0x16 + GPSDestBearingRef = 0x17 + GPSDestBearing = 0x18 + GPSDestDistanceRef = 0x19 + GPSDestDistance = 0x1A + GPSProcessingMethod = 0x1B + GPSAreaInformation = 0x1C + GPSDateStamp = 0x1D + GPSDifferential = 0x1E + GPSHPositioningError = 0x1F + + +"""Maps EXIF GPS tags to tag names.""" +GPSTAGS = {i.value: i.name for i in GPS} + + +class Interop(IntEnum): + InteropIndex = 0x0001 + InteropVersion = 0x0002 + RelatedImageFileFormat = 0x1000 + RelatedImageWidth = 0x1001 + RelatedImageHeight = 0x1002 + + +class IFD(IntEnum): + Exif = 0x8769 + GPSInfo = 0x8825 + MakerNote = 0x927C + Makernote = 0x927C # Deprecated + Interop = 0xA005 + IFD1 = -1 + + +class LightSource(IntEnum): + Unknown = 0x00 + Daylight = 0x01 + Fluorescent = 0x02 + Tungsten = 0x03 + Flash = 0x04 + Fine = 0x09 + Cloudy = 0x0A + Shade = 0x0B + DaylightFluorescent = 0x0C + DayWhiteFluorescent = 0x0D + CoolWhiteFluorescent = 0x0E + WhiteFluorescent = 0x0F + StandardLightA = 0x11 + StandardLightB = 0x12 + StandardLightC = 0x13 + D55 = 0x14 + D65 = 0x15 + D75 = 0x16 + D50 = 0x17 + ISO = 0x18 + Other = 0xFF diff --git a/py311/lib/python3.11/site-packages/PIL/FitsImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/FitsImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..a3fdc0efeec6f7ec195112ded41d8ff1e248a6a0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/FitsImagePlugin.py @@ -0,0 +1,152 @@ +# +# The Python Imaging Library +# $Id$ +# +# FITS file handling +# +# Copyright (c) 1998-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import gzip +import math + +from . import Image, ImageFile + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"SIMPLE") + + +class FitsImageFile(ImageFile.ImageFile): + format = "FITS" + format_description = "FITS" + + def _open(self) -> None: + assert self.fp is not None + + headers: dict[bytes, bytes] = {} + header_in_progress = False + decoder_name = "" + while True: + header = self.fp.read(80) + if not header: + msg = "Truncated FITS file" + raise OSError(msg) + keyword = header[:8].strip() + if keyword in (b"SIMPLE", b"XTENSION"): + header_in_progress = True + elif headers and not header_in_progress: + # This is now a data unit + break + elif keyword == b"END": + # Seek to the end of the header unit + self.fp.seek(math.ceil(self.fp.tell() / 2880) * 2880) + if not decoder_name: + decoder_name, offset, args = self._parse_headers(headers) + + header_in_progress = False + continue + + if decoder_name: + # Keep going to read past the headers + continue + + value = header[8:].split(b"/")[0].strip() + if value.startswith(b"="): + value = value[1:].strip() + if not headers and (not _accept(keyword) or value != b"T"): + msg = "Not a FITS file" + raise SyntaxError(msg) + headers[keyword] = value + + if not decoder_name: + msg = "No image data" + raise ValueError(msg) + + offset += self.fp.tell() - 80 + self.tile = [ImageFile._Tile(decoder_name, (0, 0) + self.size, offset, args)] + + def _get_size( + self, headers: dict[bytes, bytes], prefix: bytes + ) -> tuple[int, int] | None: + naxis = int(headers[prefix + b"NAXIS"]) + if naxis == 0: + return None + + if naxis == 1: + return 1, int(headers[prefix + b"NAXIS1"]) + else: + return int(headers[prefix + b"NAXIS1"]), int(headers[prefix + b"NAXIS2"]) + + def _parse_headers( + self, headers: dict[bytes, bytes] + ) -> tuple[str, int, tuple[str | int, ...]]: + prefix = b"" + decoder_name = "raw" + offset = 0 + if ( + headers.get(b"XTENSION") == b"'BINTABLE'" + and headers.get(b"ZIMAGE") == b"T" + and headers[b"ZCMPTYPE"] == b"'GZIP_1 '" + ): + no_prefix_size = self._get_size(headers, prefix) or (0, 0) + number_of_bits = int(headers[b"BITPIX"]) + offset = no_prefix_size[0] * no_prefix_size[1] * (number_of_bits // 8) + + prefix = b"Z" + decoder_name = "fits_gzip" + + size = self._get_size(headers, prefix) + if not size: + return "", 0, () + + self._size = size + + number_of_bits = int(headers[prefix + b"BITPIX"]) + if number_of_bits == 8: + self._mode = "L" + elif number_of_bits == 16: + self._mode = "I;16" + elif number_of_bits == 32: + self._mode = "I" + elif number_of_bits in (-32, -64): + self._mode = "F" + + args: tuple[str | int, ...] + if decoder_name == "raw": + args = (self.mode, 0, -1) + else: + args = (number_of_bits,) + return decoder_name, offset, args + + +class FitsGzipDecoder(ImageFile.PyDecoder): + _pulls_fd = True + + def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: + assert self.fd is not None + value = gzip.decompress(self.fd.read()) + + rows = [] + offset = 0 + number_of_bits = min(self.args[0] // 8, 4) + for y in range(self.state.ysize): + row = bytearray() + for x in range(self.state.xsize): + row += value[offset + (4 - number_of_bits) : offset + 4] + offset += 4 + rows.append(row) + self.set_as_raw(bytes([pixel for row in rows[::-1] for pixel in row])) + return -1, 0 + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(FitsImageFile.format, FitsImageFile, _accept) +Image.register_decoder("fits_gzip", FitsGzipDecoder) + +Image.register_extensions(FitsImageFile.format, [".fit", ".fits"]) diff --git a/py311/lib/python3.11/site-packages/PIL/FliImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/FliImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..7c5bfeefa1bf24ee536e13e26ce6ca8aa4ba319d --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/FliImagePlugin.py @@ -0,0 +1,178 @@ +# +# The Python Imaging Library. +# $Id$ +# +# FLI/FLC file handling. +# +# History: +# 95-09-01 fl Created +# 97-01-03 fl Fixed parser, setup decoder tile +# 98-07-15 fl Renamed offset attribute to avoid name clash +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1995-97. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import os + +from . import Image, ImageFile, ImagePalette +from ._binary import i16le as i16 +from ._binary import i32le as i32 +from ._binary import o8 +from ._util import DeferredError + +# +# decoder + + +def _accept(prefix: bytes) -> bool: + return ( + len(prefix) >= 6 + and i16(prefix, 4) in [0xAF11, 0xAF12] + and i16(prefix, 14) in [0, 3] # flags + ) + + +## +# Image plugin for the FLI/FLC animation format. Use the seek +# method to load individual frames. + + +class FliImageFile(ImageFile.ImageFile): + format = "FLI" + format_description = "Autodesk FLI/FLC Animation" + _close_exclusive_fp_after_loading = False + + def _open(self) -> None: + # HEAD + s = self.fp.read(128) + if not (_accept(s) and s[20:22] == b"\x00\x00"): + msg = "not an FLI/FLC file" + raise SyntaxError(msg) + + # frames + self.n_frames = i16(s, 6) + self.is_animated = self.n_frames > 1 + + # image characteristics + self._mode = "P" + self._size = i16(s, 8), i16(s, 10) + + # animation speed + duration = i32(s, 16) + magic = i16(s, 4) + if magic == 0xAF11: + duration = (duration * 1000) // 70 + self.info["duration"] = duration + + # look for palette + palette = [(a, a, a) for a in range(256)] + + s = self.fp.read(16) + + self.__offset = 128 + + if i16(s, 4) == 0xF100: + # prefix chunk; ignore it + self.__offset = self.__offset + i32(s) + self.fp.seek(self.__offset) + s = self.fp.read(16) + + if i16(s, 4) == 0xF1FA: + # look for palette chunk + number_of_subchunks = i16(s, 6) + chunk_size: int | None = None + for _ in range(number_of_subchunks): + if chunk_size is not None: + self.fp.seek(chunk_size - 6, os.SEEK_CUR) + s = self.fp.read(6) + chunk_type = i16(s, 4) + if chunk_type in (4, 11): + self._palette(palette, 2 if chunk_type == 11 else 0) + break + chunk_size = i32(s) + if not chunk_size: + break + + self.palette = ImagePalette.raw( + "RGB", b"".join(o8(r) + o8(g) + o8(b) for (r, g, b) in palette) + ) + + # set things up to decode first frame + self.__frame = -1 + self._fp = self.fp + self.__rewind = self.fp.tell() + self.seek(0) + + def _palette(self, palette: list[tuple[int, int, int]], shift: int) -> None: + # load palette + + i = 0 + for e in range(i16(self.fp.read(2))): + s = self.fp.read(2) + i = i + s[0] + n = s[1] + if n == 0: + n = 256 + s = self.fp.read(n * 3) + for n in range(0, len(s), 3): + r = s[n] << shift + g = s[n + 1] << shift + b = s[n + 2] << shift + palette[i] = (r, g, b) + i += 1 + + def seek(self, frame: int) -> None: + if not self._seek_check(frame): + return + if frame < self.__frame: + self._seek(0) + + for f in range(self.__frame + 1, frame + 1): + self._seek(f) + + def _seek(self, frame: int) -> None: + if isinstance(self._fp, DeferredError): + raise self._fp.ex + if frame == 0: + self.__frame = -1 + self._fp.seek(self.__rewind) + self.__offset = 128 + else: + # ensure that the previous frame was loaded + self.load() + + if frame != self.__frame + 1: + msg = f"cannot seek to frame {frame}" + raise ValueError(msg) + self.__frame = frame + + # move to next frame + self.fp = self._fp + self.fp.seek(self.__offset) + + s = self.fp.read(4) + if not s: + msg = "missing frame size" + raise EOFError(msg) + + framesize = i32(s) + + self.decodermaxblock = framesize + self.tile = [ImageFile._Tile("fli", (0, 0) + self.size, self.__offset)] + + self.__offset += framesize + + def tell(self) -> int: + return self.__frame + + +# +# registry + +Image.register_open(FliImageFile.format, FliImageFile, _accept) + +Image.register_extensions(FliImageFile.format, [".fli", ".flc"]) diff --git a/py311/lib/python3.11/site-packages/PIL/FontFile.py b/py311/lib/python3.11/site-packages/PIL/FontFile.py new file mode 100644 index 0000000000000000000000000000000000000000..1e0c1c166b5932a7621e510eba047586465e03d8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/FontFile.py @@ -0,0 +1,134 @@ +# +# The Python Imaging Library +# $Id$ +# +# base class for raster font file parsers +# +# history: +# 1997-06-05 fl created +# 1997-08-19 fl restrict image width +# +# Copyright (c) 1997-1998 by Secret Labs AB +# Copyright (c) 1997-1998 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import os +from typing import BinaryIO + +from . import Image, _binary + +WIDTH = 800 + + +def puti16( + fp: BinaryIO, values: tuple[int, int, int, int, int, int, int, int, int, int] +) -> None: + """Write network order (big-endian) 16-bit sequence""" + for v in values: + if v < 0: + v += 65536 + fp.write(_binary.o16be(v)) + + +class FontFile: + """Base class for raster font file handlers.""" + + bitmap: Image.Image | None = None + + def __init__(self) -> None: + self.info: dict[bytes, bytes | int] = {} + self.glyph: list[ + tuple[ + tuple[int, int], + tuple[int, int, int, int], + tuple[int, int, int, int], + Image.Image, + ] + | None + ] = [None] * 256 + + def __getitem__(self, ix: int) -> ( + tuple[ + tuple[int, int], + tuple[int, int, int, int], + tuple[int, int, int, int], + Image.Image, + ] + | None + ): + return self.glyph[ix] + + def compile(self) -> None: + """Create metrics and bitmap""" + + if self.bitmap: + return + + # create bitmap large enough to hold all data + h = w = maxwidth = 0 + lines = 1 + for glyph in self.glyph: + if glyph: + d, dst, src, im = glyph + h = max(h, src[3] - src[1]) + w = w + (src[2] - src[0]) + if w > WIDTH: + lines += 1 + w = src[2] - src[0] + maxwidth = max(maxwidth, w) + + xsize = maxwidth + ysize = lines * h + + if xsize == 0 and ysize == 0: + return + + self.ysize = h + + # paste glyphs into bitmap + self.bitmap = Image.new("1", (xsize, ysize)) + self.metrics: list[ + tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]] + | None + ] = [None] * 256 + x = y = 0 + for i in range(256): + glyph = self[i] + if glyph: + d, dst, src, im = glyph + xx = src[2] - src[0] + x0, y0 = x, y + x = x + xx + if x > WIDTH: + x, y = 0, y + h + x0, y0 = x, y + x = xx + s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0 + self.bitmap.paste(im.crop(src), s) + self.metrics[i] = d, dst, s + + def save(self, filename: str) -> None: + """Save font""" + + self.compile() + + # font data + if not self.bitmap: + msg = "No bitmap created" + raise ValueError(msg) + self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG") + + # font metrics + with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp: + fp.write(b"PILfont\n") + fp.write(f";;;;;;{self.ysize};\n".encode("ascii")) # HACK!!! + fp.write(b"DATA\n") + for id in range(256): + m = self.metrics[id] + if not m: + puti16(fp, (0,) * 10) + else: + puti16(fp, m[0] + m[1] + m[2]) diff --git a/py311/lib/python3.11/site-packages/PIL/FpxImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/FpxImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..fd992cd9e20eb7cf0c6de347ac0a76f928ffc238 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/FpxImagePlugin.py @@ -0,0 +1,257 @@ +# +# THIS IS WORK IN PROGRESS +# +# The Python Imaging Library. +# $Id$ +# +# FlashPix support for PIL +# +# History: +# 97-01-25 fl Created (reads uncompressed RGB images only) +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import olefile + +from . import Image, ImageFile +from ._binary import i32le as i32 + +# we map from colour field tuples to (mode, rawmode) descriptors +MODES = { + # opacity + (0x00007FFE,): ("A", "L"), + # monochrome + (0x00010000,): ("L", "L"), + (0x00018000, 0x00017FFE): ("RGBA", "LA"), + # photo YCC + (0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"), + (0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"), + # standard RGB (NIFRGB) + (0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"), + (0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"), +} + + +# +# -------------------------------------------------------------------- + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(olefile.MAGIC) + + +## +# Image plugin for the FlashPix images. + + +class FpxImageFile(ImageFile.ImageFile): + format = "FPX" + format_description = "FlashPix" + + def _open(self) -> None: + # + # read the OLE directory and see if this is a likely + # to be a FlashPix file + + try: + self.ole = olefile.OleFileIO(self.fp) + except OSError as e: + msg = "not an FPX file; invalid OLE file" + raise SyntaxError(msg) from e + + root = self.ole.root + if not root or root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B": + msg = "not an FPX file; bad root CLSID" + raise SyntaxError(msg) + + self._open_index(1) + + def _open_index(self, index: int = 1) -> None: + # + # get the Image Contents Property Set + + prop = self.ole.getproperties( + [f"Data Object Store {index:06d}", "\005Image Contents"] + ) + + # size (highest resolution) + + assert isinstance(prop[0x1000002], int) + assert isinstance(prop[0x1000003], int) + self._size = prop[0x1000002], prop[0x1000003] + + size = max(self.size) + i = 1 + while size > 64: + size = size // 2 + i += 1 + self.maxid = i - 1 + + # mode. instead of using a single field for this, flashpix + # requires you to specify the mode for each channel in each + # resolution subimage, and leaves it to the decoder to make + # sure that they all match. for now, we'll cheat and assume + # that this is always the case. + + id = self.maxid << 16 + + s = prop[0x2000002 | id] + + if not isinstance(s, bytes) or (bands := i32(s, 4)) > 4: + msg = "Invalid number of bands" + raise OSError(msg) + + # note: for now, we ignore the "uncalibrated" flag + colors = tuple(i32(s, 8 + i * 4) & 0x7FFFFFFF for i in range(bands)) + + self._mode, self.rawmode = MODES[colors] + + # load JPEG tables, if any + self.jpeg = {} + for i in range(256): + id = 0x3000001 | (i << 16) + if id in prop: + self.jpeg[i] = prop[id] + + self._open_subimage(1, self.maxid) + + def _open_subimage(self, index: int = 1, subimage: int = 0) -> None: + # + # setup tile descriptors for a given subimage + + stream = [ + f"Data Object Store {index:06d}", + f"Resolution {subimage:04d}", + "Subimage 0000 Header", + ] + + fp = self.ole.openstream(stream) + + # skip prefix + fp.read(28) + + # header stream + s = fp.read(36) + + size = i32(s, 4), i32(s, 8) + # tilecount = i32(s, 12) + tilesize = i32(s, 16), i32(s, 20) + # channels = i32(s, 24) + offset = i32(s, 28) + length = i32(s, 32) + + if size != self.size: + msg = "subimage mismatch" + raise OSError(msg) + + # get tile descriptors + fp.seek(28 + offset) + s = fp.read(i32(s, 12) * length) + + x = y = 0 + xsize, ysize = size + xtile, ytile = tilesize + self.tile = [] + + for i in range(0, len(s), length): + x1 = min(xsize, x + xtile) + y1 = min(ysize, y + ytile) + + compression = i32(s, i + 8) + + if compression == 0: + self.tile.append( + ImageFile._Tile( + "raw", + (x, y, x1, y1), + i32(s, i) + 28, + self.rawmode, + ) + ) + + elif compression == 1: + # FIXME: the fill decoder is not implemented + self.tile.append( + ImageFile._Tile( + "fill", + (x, y, x1, y1), + i32(s, i) + 28, + (self.rawmode, s[12:16]), + ) + ) + + elif compression == 2: + internal_color_conversion = s[14] + jpeg_tables = s[15] + rawmode = self.rawmode + + if internal_color_conversion: + # The image is stored as usual (usually YCbCr). + if rawmode == "RGBA": + # For "RGBA", data is stored as YCbCrA based on + # negative RGB. The following trick works around + # this problem : + jpegmode, rawmode = "YCbCrK", "CMYK" + else: + jpegmode = None # let the decoder decide + + else: + # The image is stored as defined by rawmode + jpegmode = rawmode + + self.tile.append( + ImageFile._Tile( + "jpeg", + (x, y, x1, y1), + i32(s, i) + 28, + (rawmode, jpegmode), + ) + ) + + # FIXME: jpeg tables are tile dependent; the prefix + # data must be placed in the tile descriptor itself! + + if jpeg_tables: + self.tile_prefix = self.jpeg[jpeg_tables] + + else: + msg = "unknown/invalid compression" + raise OSError(msg) + + x = x + xtile + if x >= xsize: + x, y = 0, y + ytile + if y >= ysize: + break # isn't really required + + self.stream = stream + self._fp = self.fp + self.fp = None + + def load(self) -> Image.core.PixelAccess | None: + if not self.fp: + self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"]) + + return ImageFile.ImageFile.load(self) + + def close(self) -> None: + self.ole.close() + super().close() + + def __exit__(self, *args: object) -> None: + self.ole.close() + super().__exit__() + + +# +# -------------------------------------------------------------------- + + +Image.register_open(FpxImageFile.format, FpxImageFile, _accept) + +Image.register_extension(FpxImageFile.format, ".fpx") diff --git a/py311/lib/python3.11/site-packages/PIL/FtexImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/FtexImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..d60e75bb60bdb5113c7cb3c48840918207ced694 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/FtexImagePlugin.py @@ -0,0 +1,114 @@ +""" +A Pillow loader for .ftc and .ftu files (FTEX) +Jerome Leclanche + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ + +Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001 + +The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a +packed custom format called FTEX. This file format uses file extensions FTC +and FTU. +* FTC files are compressed textures (using standard texture compression). +* FTU files are not compressed. +Texture File Format +The FTC and FTU texture files both use the same format. This +has the following structure: +{header} +{format_directory} +{data} +Where: +{header} = { + u32:magic, + u32:version, + u32:width, + u32:height, + u32:mipmap_count, + u32:format_count +} + +* The "magic" number is "FTEX". +* "width" and "height" are the dimensions of the texture. +* "mipmap_count" is the number of mipmaps in the texture. +* "format_count" is the number of texture formats (different versions of the +same texture) in this file. + +{format_directory} = format_count * { u32:format, u32:where } + +The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB +uncompressed textures. +The texture data for a format starts at the position "where" in the file. + +Each set of texture data in the file has the following structure: +{data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } } +* "mipmap_size" is the number of bytes in that mip level. For compressed +textures this is the size of the texture data compressed with DXT1. For 24 bit +uncompressed textures, this is 3 * width * height. Following this are the image +bytes for that mipmap level. + +Note: All data is stored in little-Endian (Intel) byte order. +""" + +from __future__ import annotations + +import struct +from enum import IntEnum +from io import BytesIO + +from . import Image, ImageFile + +MAGIC = b"FTEX" + + +class Format(IntEnum): + DXT1 = 0 + UNCOMPRESSED = 1 + + +class FtexImageFile(ImageFile.ImageFile): + format = "FTEX" + format_description = "Texture File Format (IW2:EOC)" + + def _open(self) -> None: + if not _accept(self.fp.read(4)): + msg = "not an FTEX file" + raise SyntaxError(msg) + struct.unpack(" None: + pass + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(MAGIC) + + +Image.register_open(FtexImageFile.format, FtexImageFile, _accept) +Image.register_extensions(FtexImageFile.format, [".ftc", ".ftu"]) diff --git a/py311/lib/python3.11/site-packages/PIL/GbrImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/GbrImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..f319d7e846e4c7ecb32a43751204bd1fbee168c0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/GbrImagePlugin.py @@ -0,0 +1,103 @@ +# +# The Python Imaging Library +# +# load a GIMP brush file +# +# History: +# 96-03-14 fl Created +# 16-01-08 es Version 2 +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# Copyright (c) Eric Soroos 2016. +# +# See the README file for information on usage and redistribution. +# +# +# See https://github.com/GNOME/gimp/blob/mainline/devel-docs/gbr.txt for +# format documentation. +# +# This code Interprets version 1 and 2 .gbr files. +# Version 1 files are obsolete, and should not be used for new +# brushes. +# Version 2 files are saved by GIMP v2.8 (at least) +# Version 3 files have a format specifier of 18 for 16bit floats in +# the color depth field. This is currently unsupported by Pillow. +from __future__ import annotations + +from . import Image, ImageFile +from ._binary import i32be as i32 + + +def _accept(prefix: bytes) -> bool: + return len(prefix) >= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2) + + +## +# Image plugin for the GIMP brush format. + + +class GbrImageFile(ImageFile.ImageFile): + format = "GBR" + format_description = "GIMP brush file" + + def _open(self) -> None: + header_size = i32(self.fp.read(4)) + if header_size < 20: + msg = "not a GIMP brush" + raise SyntaxError(msg) + version = i32(self.fp.read(4)) + if version not in (1, 2): + msg = f"Unsupported GIMP brush version: {version}" + raise SyntaxError(msg) + + width = i32(self.fp.read(4)) + height = i32(self.fp.read(4)) + color_depth = i32(self.fp.read(4)) + if width <= 0 or height <= 0: + msg = "not a GIMP brush" + raise SyntaxError(msg) + if color_depth not in (1, 4): + msg = f"Unsupported GIMP brush color depth: {color_depth}" + raise SyntaxError(msg) + + if version == 1: + comment_length = header_size - 20 + else: + comment_length = header_size - 28 + magic_number = self.fp.read(4) + if magic_number != b"GIMP": + msg = "not a GIMP brush, bad magic number" + raise SyntaxError(msg) + self.info["spacing"] = i32(self.fp.read(4)) + + comment = self.fp.read(comment_length)[:-1] + + if color_depth == 1: + self._mode = "L" + else: + self._mode = "RGBA" + + self._size = width, height + + self.info["comment"] = comment + + # Image might not be small + Image._decompression_bomb_check(self.size) + + # Data is an uncompressed block of w * h * bytes/pixel + self._data_size = width * height * color_depth + + def load(self) -> Image.core.PixelAccess | None: + if self._im is None: + self.im = Image.core.new(self.mode, self.size) + self.frombytes(self.fp.read(self._data_size)) + return Image.Image.load(self) + + +# +# registry + + +Image.register_open(GbrImageFile.format, GbrImageFile, _accept) +Image.register_extension(GbrImageFile.format, ".gbr") diff --git a/py311/lib/python3.11/site-packages/PIL/GdImageFile.py b/py311/lib/python3.11/site-packages/PIL/GdImageFile.py new file mode 100644 index 0000000000000000000000000000000000000000..891225ce2fd034a11963bb64212cfa7311190441 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/GdImageFile.py @@ -0,0 +1,102 @@ +# +# The Python Imaging Library. +# $Id$ +# +# GD file handling +# +# History: +# 1996-04-12 fl Created +# +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + + +""" +.. note:: + This format cannot be automatically recognized, so the + class is not registered for use with :py:func:`PIL.Image.open()`. To open a + gd file, use the :py:func:`PIL.GdImageFile.open()` function instead. + +.. warning:: + THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This + implementation is provided for convenience and demonstrational + purposes only. +""" +from __future__ import annotations + +from typing import IO + +from . import ImageFile, ImagePalette, UnidentifiedImageError +from ._binary import i16be as i16 +from ._binary import i32be as i32 +from ._typing import StrOrBytesPath + + +class GdImageFile(ImageFile.ImageFile): + """ + Image plugin for the GD uncompressed format. Note that this format + is not supported by the standard :py:func:`PIL.Image.open()` function. To use + this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and + use the :py:func:`PIL.GdImageFile.open()` function. + """ + + format = "GD" + format_description = "GD uncompressed images" + + def _open(self) -> None: + # Header + assert self.fp is not None + + s = self.fp.read(1037) + + if i16(s) not in [65534, 65535]: + msg = "Not a valid GD 2.x .gd file" + raise SyntaxError(msg) + + self._mode = "P" + self._size = i16(s, 2), i16(s, 4) + + true_color = s[6] + true_color_offset = 2 if true_color else 0 + + # transparency index + tindex = i32(s, 7 + true_color_offset) + if tindex < 256: + self.info["transparency"] = tindex + + self.palette = ImagePalette.raw( + "RGBX", s[7 + true_color_offset + 6 : 7 + true_color_offset + 6 + 256 * 4] + ) + + self.tile = [ + ImageFile._Tile( + "raw", + (0, 0) + self.size, + 7 + true_color_offset + 6 + 256 * 4, + "L", + ) + ] + + +def open(fp: StrOrBytesPath | IO[bytes], mode: str = "r") -> GdImageFile: + """ + Load texture from a GD image file. + + :param fp: GD file name, or an opened file handle. + :param mode: Optional mode. In this version, if the mode argument + is given, it must be "r". + :returns: An image instance. + :raises OSError: If the image could not be read. + """ + if mode != "r": + msg = "bad mode" + raise ValueError(msg) + + try: + return GdImageFile(fp) + except SyntaxError as e: + msg = "cannot identify this image file" + raise UnidentifiedImageError(msg) from e diff --git a/py311/lib/python3.11/site-packages/PIL/GifImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/GifImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..b03aa7f1505e8624b1a50551adbc4488ac3bd1fa --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/GifImagePlugin.py @@ -0,0 +1,1213 @@ +# +# The Python Imaging Library. +# $Id$ +# +# GIF file handling +# +# History: +# 1995-09-01 fl Created +# 1996-12-14 fl Added interlace support +# 1996-12-30 fl Added animation support +# 1997-01-05 fl Added write support, fixed local colour map bug +# 1997-02-23 fl Make sure to load raster data in getdata() +# 1997-07-05 fl Support external decoder (0.4) +# 1998-07-09 fl Handle all modes when saving (0.5) +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 2001-04-16 fl Added rewind support (seek to frame 0) (0.6) +# 2001-04-17 fl Added palette optimization (0.7) +# 2002-06-06 fl Added transparency support for save (0.8) +# 2004-02-24 fl Disable interlacing for small images +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1995-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import itertools +import math +import os +import subprocess +from enum import IntEnum +from functools import cached_property +from typing import IO, Any, Literal, NamedTuple, Union, cast + +from . import ( + Image, + ImageChops, + ImageFile, + ImageMath, + ImageOps, + ImagePalette, + ImageSequence, +) +from ._binary import i16le as i16 +from ._binary import o8 +from ._binary import o16le as o16 +from ._util import DeferredError + +TYPE_CHECKING = False +if TYPE_CHECKING: + from . import _imaging + from ._typing import Buffer + + +class LoadingStrategy(IntEnum): + """.. versionadded:: 9.1.0""" + + RGB_AFTER_FIRST = 0 + RGB_AFTER_DIFFERENT_PALETTE_ONLY = 1 + RGB_ALWAYS = 2 + + +#: .. versionadded:: 9.1.0 +LOADING_STRATEGY = LoadingStrategy.RGB_AFTER_FIRST + +# -------------------------------------------------------------------- +# Identify/read GIF files + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith((b"GIF87a", b"GIF89a")) + + +## +# Image plugin for GIF images. This plugin supports both GIF87 and +# GIF89 images. + + +class GifImageFile(ImageFile.ImageFile): + format = "GIF" + format_description = "Compuserve GIF" + _close_exclusive_fp_after_loading = False + + global_palette = None + + def data(self) -> bytes | None: + s = self.fp.read(1) + if s and s[0]: + return self.fp.read(s[0]) + return None + + def _is_palette_needed(self, p: bytes) -> bool: + for i in range(0, len(p), 3): + if not (i // 3 == p[i] == p[i + 1] == p[i + 2]): + return True + return False + + def _open(self) -> None: + # Screen + s = self.fp.read(13) + if not _accept(s): + msg = "not a GIF file" + raise SyntaxError(msg) + + self.info["version"] = s[:6] + self._size = i16(s, 6), i16(s, 8) + flags = s[10] + bits = (flags & 7) + 1 + + if flags & 128: + # get global palette + self.info["background"] = s[11] + # check if palette contains colour indices + p = self.fp.read(3 << bits) + if self._is_palette_needed(p): + p = ImagePalette.raw("RGB", p) + self.global_palette = self.palette = p + + self._fp = self.fp # FIXME: hack + self.__rewind = self.fp.tell() + self._n_frames: int | None = None + self._seek(0) # get ready to read first frame + + @property + def n_frames(self) -> int: + if self._n_frames is None: + current = self.tell() + try: + while True: + self._seek(self.tell() + 1, False) + except EOFError: + self._n_frames = self.tell() + 1 + self.seek(current) + return self._n_frames + + @cached_property + def is_animated(self) -> bool: + if self._n_frames is not None: + return self._n_frames != 1 + + current = self.tell() + if current: + return True + + try: + self._seek(1, False) + is_animated = True + except EOFError: + is_animated = False + + self.seek(current) + return is_animated + + def seek(self, frame: int) -> None: + if not self._seek_check(frame): + return + if frame < self.__frame: + self._im = None + self._seek(0) + + last_frame = self.__frame + for f in range(self.__frame + 1, frame + 1): + try: + self._seek(f) + except EOFError as e: + self.seek(last_frame) + msg = "no more images in GIF file" + raise EOFError(msg) from e + + def _seek(self, frame: int, update_image: bool = True) -> None: + if isinstance(self._fp, DeferredError): + raise self._fp.ex + if frame == 0: + # rewind + self.__offset = 0 + self.dispose: _imaging.ImagingCore | None = None + self.__frame = -1 + self._fp.seek(self.__rewind) + self.disposal_method = 0 + if "comment" in self.info: + del self.info["comment"] + else: + # ensure that the previous frame was loaded + if self.tile and update_image: + self.load() + + if frame != self.__frame + 1: + msg = f"cannot seek to frame {frame}" + raise ValueError(msg) + + self.fp = self._fp + if self.__offset: + # backup to last frame + self.fp.seek(self.__offset) + while self.data(): + pass + self.__offset = 0 + + s = self.fp.read(1) + if not s or s == b";": + msg = "no more images in GIF file" + raise EOFError(msg) + + palette: ImagePalette.ImagePalette | Literal[False] | None = None + + info: dict[str, Any] = {} + frame_transparency = None + interlace = None + frame_dispose_extent = None + while True: + if not s: + s = self.fp.read(1) + if not s or s == b";": + break + + elif s == b"!": + # + # extensions + # + s = self.fp.read(1) + block = self.data() + if s[0] == 249 and block is not None: + # + # graphic control extension + # + flags = block[0] + if flags & 1: + frame_transparency = block[3] + info["duration"] = i16(block, 1) * 10 + + # disposal method - find the value of bits 4 - 6 + dispose_bits = 0b00011100 & flags + dispose_bits = dispose_bits >> 2 + if dispose_bits: + # only set the dispose if it is not + # unspecified. I'm not sure if this is + # correct, but it seems to prevent the last + # frame from looking odd for some animations + self.disposal_method = dispose_bits + elif s[0] == 254: + # + # comment extension + # + comment = b"" + + # Read this comment block + while block: + comment += block + block = self.data() + + if "comment" in info: + # If multiple comment blocks in frame, separate with \n + info["comment"] += b"\n" + comment + else: + info["comment"] = comment + s = None + continue + elif s[0] == 255 and frame == 0 and block is not None: + # + # application extension + # + info["extension"] = block, self.fp.tell() + if block.startswith(b"NETSCAPE2.0"): + block = self.data() + if block and len(block) >= 3 and block[0] == 1: + self.info["loop"] = i16(block, 1) + while self.data(): + pass + + elif s == b",": + # + # local image + # + s = self.fp.read(9) + + # extent + x0, y0 = i16(s, 0), i16(s, 2) + x1, y1 = x0 + i16(s, 4), y0 + i16(s, 6) + if (x1 > self.size[0] or y1 > self.size[1]) and update_image: + self._size = max(x1, self.size[0]), max(y1, self.size[1]) + Image._decompression_bomb_check(self._size) + frame_dispose_extent = x0, y0, x1, y1 + flags = s[8] + + interlace = (flags & 64) != 0 + + if flags & 128: + bits = (flags & 7) + 1 + p = self.fp.read(3 << bits) + if self._is_palette_needed(p): + palette = ImagePalette.raw("RGB", p) + else: + palette = False + + # image data + bits = self.fp.read(1)[0] + self.__offset = self.fp.tell() + break + s = None + + if interlace is None: + msg = "image not found in GIF frame" + raise EOFError(msg) + + self.__frame = frame + if not update_image: + return + + self.tile = [] + + if self.dispose: + self.im.paste(self.dispose, self.dispose_extent) + + self._frame_palette = palette if palette is not None else self.global_palette + self._frame_transparency = frame_transparency + if frame == 0: + if self._frame_palette: + if LOADING_STRATEGY == LoadingStrategy.RGB_ALWAYS: + self._mode = "RGBA" if frame_transparency is not None else "RGB" + else: + self._mode = "P" + else: + self._mode = "L" + + if palette: + self.palette = palette + elif self.global_palette: + from copy import copy + + self.palette = copy(self.global_palette) + else: + self.palette = None + else: + if self.mode == "P": + if ( + LOADING_STRATEGY != LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY + or palette + ): + if "transparency" in self.info: + self.im.putpalettealpha(self.info["transparency"], 0) + self.im = self.im.convert("RGBA", Image.Dither.FLOYDSTEINBERG) + self._mode = "RGBA" + del self.info["transparency"] + else: + self._mode = "RGB" + self.im = self.im.convert("RGB", Image.Dither.FLOYDSTEINBERG) + + def _rgb(color: int) -> tuple[int, int, int]: + if self._frame_palette: + if color * 3 + 3 > len(self._frame_palette.palette): + color = 0 + return cast( + tuple[int, int, int], + tuple(self._frame_palette.palette[color * 3 : color * 3 + 3]), + ) + else: + return (color, color, color) + + self.dispose = None + self.dispose_extent: tuple[int, int, int, int] | None = frame_dispose_extent + if self.dispose_extent and self.disposal_method >= 2: + try: + if self.disposal_method == 2: + # replace with background colour + + # only dispose the extent in this frame + x0, y0, x1, y1 = self.dispose_extent + dispose_size = (x1 - x0, y1 - y0) + + Image._decompression_bomb_check(dispose_size) + + # by convention, attempt to use transparency first + dispose_mode = "P" + color = self.info.get("transparency", frame_transparency) + if color is not None: + if self.mode in ("RGB", "RGBA"): + dispose_mode = "RGBA" + color = _rgb(color) + (0,) + else: + color = self.info.get("background", 0) + if self.mode in ("RGB", "RGBA"): + dispose_mode = "RGB" + color = _rgb(color) + self.dispose = Image.core.fill(dispose_mode, dispose_size, color) + else: + # replace with previous contents + if self._im is not None: + # only dispose the extent in this frame + self.dispose = self._crop(self.im, self.dispose_extent) + elif frame_transparency is not None: + x0, y0, x1, y1 = self.dispose_extent + dispose_size = (x1 - x0, y1 - y0) + + Image._decompression_bomb_check(dispose_size) + dispose_mode = "P" + color = frame_transparency + if self.mode in ("RGB", "RGBA"): + dispose_mode = "RGBA" + color = _rgb(frame_transparency) + (0,) + self.dispose = Image.core.fill( + dispose_mode, dispose_size, color + ) + except AttributeError: + pass + + if interlace is not None: + transparency = -1 + if frame_transparency is not None: + if frame == 0: + if LOADING_STRATEGY != LoadingStrategy.RGB_ALWAYS: + self.info["transparency"] = frame_transparency + elif self.mode not in ("RGB", "RGBA"): + transparency = frame_transparency + self.tile = [ + ImageFile._Tile( + "gif", + (x0, y0, x1, y1), + self.__offset, + (bits, interlace, transparency), + ) + ] + + if info.get("comment"): + self.info["comment"] = info["comment"] + for k in ["duration", "extension"]: + if k in info: + self.info[k] = info[k] + elif k in self.info: + del self.info[k] + + def load_prepare(self) -> None: + temp_mode = "P" if self._frame_palette else "L" + self._prev_im = None + if self.__frame == 0: + if self._frame_transparency is not None: + self.im = Image.core.fill( + temp_mode, self.size, self._frame_transparency + ) + elif self.mode in ("RGB", "RGBA"): + self._prev_im = self.im + if self._frame_palette: + self.im = Image.core.fill("P", self.size, self._frame_transparency or 0) + self.im.putpalette("RGB", *self._frame_palette.getdata()) + else: + self._im = None + if not self._prev_im and self._im is not None and self.size != self.im.size: + expanded_im = Image.core.fill(self.im.mode, self.size) + if self._frame_palette: + expanded_im.putpalette("RGB", *self._frame_palette.getdata()) + expanded_im.paste(self.im, (0, 0) + self.im.size) + + self.im = expanded_im + self._mode = temp_mode + self._frame_palette = None + + super().load_prepare() + + def load_end(self) -> None: + if self.__frame == 0: + if self.mode == "P" and LOADING_STRATEGY == LoadingStrategy.RGB_ALWAYS: + if self._frame_transparency is not None: + self.im.putpalettealpha(self._frame_transparency, 0) + self._mode = "RGBA" + else: + self._mode = "RGB" + self.im = self.im.convert(self.mode, Image.Dither.FLOYDSTEINBERG) + return + if not self._prev_im: + return + if self.size != self._prev_im.size: + if self._frame_transparency is not None: + expanded_im = Image.core.fill("RGBA", self.size) + else: + expanded_im = Image.core.fill("P", self.size) + expanded_im.putpalette("RGB", "RGB", self.im.getpalette()) + expanded_im = expanded_im.convert("RGB") + expanded_im.paste(self._prev_im, (0, 0) + self._prev_im.size) + + self._prev_im = expanded_im + assert self._prev_im is not None + if self._frame_transparency is not None: + if self.mode == "L": + frame_im = self.im.convert_transparent("LA", self._frame_transparency) + else: + self.im.putpalettealpha(self._frame_transparency, 0) + frame_im = self.im.convert("RGBA") + else: + frame_im = self.im.convert("RGB") + + assert self.dispose_extent is not None + frame_im = self._crop(frame_im, self.dispose_extent) + + self.im = self._prev_im + self._mode = self.im.mode + if frame_im.mode in ("LA", "RGBA"): + self.im.paste(frame_im, self.dispose_extent, frame_im) + else: + self.im.paste(frame_im, self.dispose_extent) + + def tell(self) -> int: + return self.__frame + + +# -------------------------------------------------------------------- +# Write GIF files + + +RAWMODE = {"1": "L", "L": "L", "P": "P"} + + +def _normalize_mode(im: Image.Image) -> Image.Image: + """ + Takes an image (or frame), returns an image in a mode that is appropriate + for saving in a Gif. + + It may return the original image, or it may return an image converted to + palette or 'L' mode. + + :param im: Image object + :returns: Image object + """ + if im.mode in RAWMODE: + im.load() + return im + if Image.getmodebase(im.mode) == "RGB": + im = im.convert("P", palette=Image.Palette.ADAPTIVE) + assert im.palette is not None + if im.palette.mode == "RGBA": + for rgba in im.palette.colors: + if rgba[3] == 0: + im.info["transparency"] = im.palette.colors[rgba] + break + return im + return im.convert("L") + + +_Palette = Union[bytes, bytearray, list[int], ImagePalette.ImagePalette] + + +def _normalize_palette( + im: Image.Image, palette: _Palette | None, info: dict[str, Any] +) -> Image.Image: + """ + Normalizes the palette for image. + - Sets the palette to the incoming palette, if provided. + - Ensures that there's a palette for L mode images + - Optimizes the palette if necessary/desired. + + :param im: Image object + :param palette: bytes object containing the source palette, or .... + :param info: encoderinfo + :returns: Image object + """ + source_palette = None + if palette: + # a bytes palette + if isinstance(palette, (bytes, bytearray, list)): + source_palette = bytearray(palette[:768]) + if isinstance(palette, ImagePalette.ImagePalette): + source_palette = bytearray(palette.palette) + + if im.mode == "P": + if not source_palette: + im_palette = im.getpalette(None) + assert im_palette is not None + source_palette = bytearray(im_palette) + else: # L-mode + if not source_palette: + source_palette = bytearray(i // 3 for i in range(768)) + im.palette = ImagePalette.ImagePalette("RGB", palette=source_palette) + assert source_palette is not None + + if palette: + used_palette_colors: list[int | None] = [] + assert im.palette is not None + for i in range(0, len(source_palette), 3): + source_color = tuple(source_palette[i : i + 3]) + index = im.palette.colors.get(source_color) + if index in used_palette_colors: + index = None + used_palette_colors.append(index) + for i, index in enumerate(used_palette_colors): + if index is None: + for j in range(len(used_palette_colors)): + if j not in used_palette_colors: + used_palette_colors[i] = j + break + dest_map: list[int] = [] + for index in used_palette_colors: + assert index is not None + dest_map.append(index) + im = im.remap_palette(dest_map) + else: + optimized_palette_colors = _get_optimize(im, info) + if optimized_palette_colors is not None: + im = im.remap_palette(optimized_palette_colors, source_palette) + if "transparency" in info: + try: + info["transparency"] = optimized_palette_colors.index( + info["transparency"] + ) + except ValueError: + del info["transparency"] + return im + + assert im.palette is not None + im.palette.palette = source_palette + return im + + +def _write_single_frame( + im: Image.Image, + fp: IO[bytes], + palette: _Palette | None, +) -> None: + im_out = _normalize_mode(im) + for k, v in im_out.info.items(): + if isinstance(k, str): + im.encoderinfo.setdefault(k, v) + im_out = _normalize_palette(im_out, palette, im.encoderinfo) + + for s in _get_global_header(im_out, im.encoderinfo): + fp.write(s) + + # local image header + flags = 0 + if get_interlace(im): + flags = flags | 64 + _write_local_header(fp, im, (0, 0), flags) + + im_out.encoderconfig = (8, get_interlace(im)) + ImageFile._save( + im_out, fp, [ImageFile._Tile("gif", (0, 0) + im.size, 0, RAWMODE[im_out.mode])] + ) + + fp.write(b"\0") # end of image data + + +def _getbbox( + base_im: Image.Image, im_frame: Image.Image +) -> tuple[Image.Image, tuple[int, int, int, int] | None]: + palette_bytes = [ + bytes(im.palette.palette) if im.palette else b"" for im in (base_im, im_frame) + ] + if palette_bytes[0] != palette_bytes[1]: + im_frame = im_frame.convert("RGBA") + base_im = base_im.convert("RGBA") + delta = ImageChops.subtract_modulo(im_frame, base_im) + return delta, delta.getbbox(alpha_only=False) + + +class _Frame(NamedTuple): + im: Image.Image + bbox: tuple[int, int, int, int] | None + encoderinfo: dict[str, Any] + + +def _write_multiple_frames( + im: Image.Image, fp: IO[bytes], palette: _Palette | None +) -> bool: + duration = im.encoderinfo.get("duration") + disposal = im.encoderinfo.get("disposal", im.info.get("disposal")) + + im_frames: list[_Frame] = [] + previous_im: Image.Image | None = None + frame_count = 0 + background_im = None + for imSequence in itertools.chain([im], im.encoderinfo.get("append_images", [])): + for im_frame in ImageSequence.Iterator(imSequence): + # a copy is required here since seek can still mutate the image + im_frame = _normalize_mode(im_frame.copy()) + if frame_count == 0: + for k, v in im_frame.info.items(): + if k == "transparency": + continue + if isinstance(k, str): + im.encoderinfo.setdefault(k, v) + + encoderinfo = im.encoderinfo.copy() + if "transparency" in im_frame.info: + encoderinfo.setdefault("transparency", im_frame.info["transparency"]) + im_frame = _normalize_palette(im_frame, palette, encoderinfo) + if isinstance(duration, (list, tuple)): + encoderinfo["duration"] = duration[frame_count] + elif duration is None and "duration" in im_frame.info: + encoderinfo["duration"] = im_frame.info["duration"] + if isinstance(disposal, (list, tuple)): + encoderinfo["disposal"] = disposal[frame_count] + frame_count += 1 + + diff_frame = None + if im_frames and previous_im: + # delta frame + delta, bbox = _getbbox(previous_im, im_frame) + if not bbox: + # This frame is identical to the previous frame + if encoderinfo.get("duration"): + im_frames[-1].encoderinfo["duration"] += encoderinfo["duration"] + continue + if im_frames[-1].encoderinfo.get("disposal") == 2: + # To appear correctly in viewers using a convention, + # only consider transparency, and not background color + color = im.encoderinfo.get( + "transparency", im.info.get("transparency") + ) + if color is not None: + if background_im is None: + background = _get_background(im_frame, color) + background_im = Image.new("P", im_frame.size, background) + first_palette = im_frames[0].im.palette + assert first_palette is not None + background_im.putpalette(first_palette, first_palette.mode) + bbox = _getbbox(background_im, im_frame)[1] + else: + bbox = (0, 0) + im_frame.size + elif encoderinfo.get("optimize") and im_frame.mode != "1": + if "transparency" not in encoderinfo: + assert im_frame.palette is not None + try: + encoderinfo["transparency"] = ( + im_frame.palette._new_color_index(im_frame) + ) + except ValueError: + pass + if "transparency" in encoderinfo: + # When the delta is zero, fill the image with transparency + diff_frame = im_frame.copy() + fill = Image.new("P", delta.size, encoderinfo["transparency"]) + if delta.mode == "RGBA": + r, g, b, a = delta.split() + mask = ImageMath.lambda_eval( + lambda args: args["convert"]( + args["max"]( + args["max"]( + args["max"](args["r"], args["g"]), args["b"] + ), + args["a"], + ) + * 255, + "1", + ), + r=r, + g=g, + b=b, + a=a, + ) + else: + if delta.mode == "P": + # Convert to L without considering palette + delta_l = Image.new("L", delta.size) + delta_l.putdata(delta.getdata()) + delta = delta_l + mask = ImageMath.lambda_eval( + lambda args: args["convert"](args["im"] * 255, "1"), + im=delta, + ) + diff_frame.paste(fill, mask=ImageOps.invert(mask)) + else: + bbox = None + previous_im = im_frame + im_frames.append(_Frame(diff_frame or im_frame, bbox, encoderinfo)) + + if len(im_frames) == 1: + if "duration" in im.encoderinfo: + # Since multiple frames will not be written, use the combined duration + im.encoderinfo["duration"] = im_frames[0].encoderinfo["duration"] + return False + + for frame_data in im_frames: + im_frame = frame_data.im + if not frame_data.bbox: + # global header + for s in _get_global_header(im_frame, frame_data.encoderinfo): + fp.write(s) + offset = (0, 0) + else: + # compress difference + if not palette: + frame_data.encoderinfo["include_color_table"] = True + + if frame_data.bbox != (0, 0) + im_frame.size: + im_frame = im_frame.crop(frame_data.bbox) + offset = frame_data.bbox[:2] + _write_frame_data(fp, im_frame, offset, frame_data.encoderinfo) + return True + + +def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + _save(im, fp, filename, save_all=True) + + +def _save( + im: Image.Image, fp: IO[bytes], filename: str | bytes, save_all: bool = False +) -> None: + # header + if "palette" in im.encoderinfo or "palette" in im.info: + palette = im.encoderinfo.get("palette", im.info.get("palette")) + else: + palette = None + im.encoderinfo.setdefault("optimize", True) + + if not save_all or not _write_multiple_frames(im, fp, palette): + _write_single_frame(im, fp, palette) + + fp.write(b";") # end of file + + if hasattr(fp, "flush"): + fp.flush() + + +def get_interlace(im: Image.Image) -> int: + interlace = im.encoderinfo.get("interlace", 1) + + # workaround for @PIL153 + if min(im.size) < 16: + interlace = 0 + + return interlace + + +def _write_local_header( + fp: IO[bytes], im: Image.Image, offset: tuple[int, int], flags: int +) -> None: + try: + transparency = im.encoderinfo["transparency"] + except KeyError: + transparency = None + + if "duration" in im.encoderinfo: + duration = int(im.encoderinfo["duration"] / 10) + else: + duration = 0 + + disposal = int(im.encoderinfo.get("disposal", 0)) + + if transparency is not None or duration != 0 or disposal: + packed_flag = 1 if transparency is not None else 0 + packed_flag |= disposal << 2 + + fp.write( + b"!" + + o8(249) # extension intro + + o8(4) # length + + o8(packed_flag) # packed fields + + o16(duration) # duration + + o8(transparency or 0) # transparency index + + o8(0) + ) + + include_color_table = im.encoderinfo.get("include_color_table") + if include_color_table: + palette_bytes = _get_palette_bytes(im) + color_table_size = _get_color_table_size(palette_bytes) + if color_table_size: + flags = flags | 128 # local color table flag + flags = flags | color_table_size + + fp.write( + b"," + + o16(offset[0]) # offset + + o16(offset[1]) + + o16(im.size[0]) # size + + o16(im.size[1]) + + o8(flags) # flags + ) + if include_color_table and color_table_size: + fp.write(_get_header_palette(palette_bytes)) + fp.write(o8(8)) # bits + + +def _save_netpbm(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + # Unused by default. + # To use, uncomment the register_save call at the end of the file. + # + # If you need real GIF compression and/or RGB quantization, you + # can use the external NETPBM/PBMPLUS utilities. See comments + # below for information on how to enable this. + tempfile = im._dump() + + try: + with open(filename, "wb") as f: + if im.mode != "RGB": + subprocess.check_call( + ["ppmtogif", tempfile], stdout=f, stderr=subprocess.DEVNULL + ) + else: + # Pipe ppmquant output into ppmtogif + # "ppmquant 256 %s | ppmtogif > %s" % (tempfile, filename) + quant_cmd = ["ppmquant", "256", tempfile] + togif_cmd = ["ppmtogif"] + quant_proc = subprocess.Popen( + quant_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL + ) + togif_proc = subprocess.Popen( + togif_cmd, + stdin=quant_proc.stdout, + stdout=f, + stderr=subprocess.DEVNULL, + ) + + # Allow ppmquant to receive SIGPIPE if ppmtogif exits + assert quant_proc.stdout is not None + quant_proc.stdout.close() + + retcode = quant_proc.wait() + if retcode: + raise subprocess.CalledProcessError(retcode, quant_cmd) + + retcode = togif_proc.wait() + if retcode: + raise subprocess.CalledProcessError(retcode, togif_cmd) + finally: + try: + os.unlink(tempfile) + except OSError: + pass + + +# Force optimization so that we can test performance against +# cases where it took lots of memory and time previously. +_FORCE_OPTIMIZE = False + + +def _get_optimize(im: Image.Image, info: dict[str, Any]) -> list[int] | None: + """ + Palette optimization is a potentially expensive operation. + + This function determines if the palette should be optimized using + some heuristics, then returns the list of palette entries in use. + + :param im: Image object + :param info: encoderinfo + :returns: list of indexes of palette entries in use, or None + """ + if im.mode in ("P", "L") and info and info.get("optimize"): + # Potentially expensive operation. + + # The palette saves 3 bytes per color not used, but palette + # lengths are restricted to 3*(2**N) bytes. Max saving would + # be 768 -> 6 bytes if we went all the way down to 2 colors. + # * If we're over 128 colors, we can't save any space. + # * If there aren't any holes, it's not worth collapsing. + # * If we have a 'large' image, the palette is in the noise. + + # create the new palette if not every color is used + optimise = _FORCE_OPTIMIZE or im.mode == "L" + if optimise or im.width * im.height < 512 * 512: + # check which colors are used + used_palette_colors = [] + for i, count in enumerate(im.histogram()): + if count: + used_palette_colors.append(i) + + if optimise or max(used_palette_colors) >= len(used_palette_colors): + return used_palette_colors + + assert im.palette is not None + num_palette_colors = len(im.palette.palette) // Image.getmodebands( + im.palette.mode + ) + current_palette_size = 1 << (num_palette_colors - 1).bit_length() + if ( + # check that the palette would become smaller when saved + len(used_palette_colors) <= current_palette_size // 2 + # check that the palette is not already the smallest possible size + and current_palette_size > 2 + ): + return used_palette_colors + return None + + +def _get_color_table_size(palette_bytes: bytes) -> int: + # calculate the palette size for the header + if not palette_bytes: + return 0 + elif len(palette_bytes) < 9: + return 1 + else: + return math.ceil(math.log(len(palette_bytes) // 3, 2)) - 1 + + +def _get_header_palette(palette_bytes: bytes) -> bytes: + """ + Returns the palette, null padded to the next power of 2 (*3) bytes + suitable for direct inclusion in the GIF header + + :param palette_bytes: Unpadded palette bytes, in RGBRGB form + :returns: Null padded palette + """ + color_table_size = _get_color_table_size(palette_bytes) + + # add the missing amount of bytes + # the palette has to be 2< 0: + palette_bytes += o8(0) * 3 * actual_target_size_diff + return palette_bytes + + +def _get_palette_bytes(im: Image.Image) -> bytes: + """ + Gets the palette for inclusion in the gif header + + :param im: Image object + :returns: Bytes, len<=768 suitable for inclusion in gif header + """ + if not im.palette: + return b"" + + palette = bytes(im.palette.palette) + if im.palette.mode == "RGBA": + palette = b"".join(palette[i * 4 : i * 4 + 3] for i in range(len(palette) // 3)) + return palette + + +def _get_background( + im: Image.Image, + info_background: int | tuple[int, int, int] | tuple[int, int, int, int] | None, +) -> int: + background = 0 + if info_background: + if isinstance(info_background, tuple): + # WebPImagePlugin stores an RGBA value in info["background"] + # So it must be converted to the same format as GifImagePlugin's + # info["background"] - a global color table index + assert im.palette is not None + try: + background = im.palette.getcolor(info_background, im) + except ValueError as e: + if str(e) not in ( + # If all 256 colors are in use, + # then there is no need for the background color + "cannot allocate more than 256 colors", + # Ignore non-opaque WebP background + "cannot add non-opaque RGBA color to RGB palette", + ): + raise + else: + background = info_background + return background + + +def _get_global_header(im: Image.Image, info: dict[str, Any]) -> list[bytes]: + """Return a list of strings representing a GIF header""" + + # Header Block + # https://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp + + version = b"87a" + if im.info.get("version") == b"89a" or ( + info + and ( + "transparency" in info + or info.get("loop") is not None + or info.get("duration") + or info.get("comment") + ) + ): + version = b"89a" + + background = _get_background(im, info.get("background")) + + palette_bytes = _get_palette_bytes(im) + color_table_size = _get_color_table_size(palette_bytes) + + header = [ + b"GIF" # signature + + version # version + + o16(im.size[0]) # canvas width + + o16(im.size[1]), # canvas height + # Logical Screen Descriptor + # size of global color table + global color table flag + o8(color_table_size + 128), # packed fields + # background + reserved/aspect + o8(background) + o8(0), + # Global Color Table + _get_header_palette(palette_bytes), + ] + if info.get("loop") is not None: + header.append( + b"!" + + o8(255) # extension intro + + o8(11) + + b"NETSCAPE2.0" + + o8(3) + + o8(1) + + o16(info["loop"]) # number of loops + + o8(0) + ) + if info.get("comment"): + comment_block = b"!" + o8(254) # extension intro + + comment = info["comment"] + if isinstance(comment, str): + comment = comment.encode() + for i in range(0, len(comment), 255): + subblock = comment[i : i + 255] + comment_block += o8(len(subblock)) + subblock + + comment_block += o8(0) + header.append(comment_block) + return header + + +def _write_frame_data( + fp: IO[bytes], + im_frame: Image.Image, + offset: tuple[int, int], + params: dict[str, Any], +) -> None: + try: + im_frame.encoderinfo = params + + # local image header + _write_local_header(fp, im_frame, offset, 0) + + ImageFile._save( + im_frame, + fp, + [ImageFile._Tile("gif", (0, 0) + im_frame.size, 0, RAWMODE[im_frame.mode])], + ) + + fp.write(b"\0") # end of image data + finally: + del im_frame.encoderinfo + + +# -------------------------------------------------------------------- +# Legacy GIF utilities + + +def getheader( + im: Image.Image, palette: _Palette | None = None, info: dict[str, Any] | None = None +) -> tuple[list[bytes], list[int] | None]: + """ + Legacy Method to get Gif data from image. + + Warning:: May modify image data. + + :param im: Image object + :param palette: bytes object containing the source palette, or .... + :param info: encoderinfo + :returns: tuple of(list of header items, optimized palette) + + """ + if info is None: + info = {} + + used_palette_colors = _get_optimize(im, info) + + if "background" not in info and "background" in im.info: + info["background"] = im.info["background"] + + im_mod = _normalize_palette(im, palette, info) + im.palette = im_mod.palette + im.im = im_mod.im + header = _get_global_header(im, info) + + return header, used_palette_colors + + +def getdata( + im: Image.Image, offset: tuple[int, int] = (0, 0), **params: Any +) -> list[bytes]: + """ + Legacy Method + + Return a list of strings representing this image. + The first string is a local image header, the rest contains + encoded image data. + + To specify duration, add the time in milliseconds, + e.g. ``getdata(im_frame, duration=1000)`` + + :param im: Image object + :param offset: Tuple of (x, y) pixels. Defaults to (0, 0) + :param \\**params: e.g. duration or other encoder info parameters + :returns: List of bytes containing GIF encoded frame data + + """ + from io import BytesIO + + class Collector(BytesIO): + data = [] + + def write(self, data: Buffer) -> int: + self.data.append(data) + return len(data) + + im.load() # make sure raster data is available + + fp = Collector() + + _write_frame_data(fp, im, offset, params) + + return fp.data + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(GifImageFile.format, GifImageFile, _accept) +Image.register_save(GifImageFile.format, _save) +Image.register_save_all(GifImageFile.format, _save_all) +Image.register_extension(GifImageFile.format, ".gif") +Image.register_mime(GifImageFile.format, "image/gif") + +# +# Uncomment the following line if you wish to use NETPBM/PBMPLUS +# instead of the built-in "uncompressed" GIF encoder + +# Image.register_save(GifImageFile.format, _save_netpbm) diff --git a/py311/lib/python3.11/site-packages/PIL/GimpGradientFile.py b/py311/lib/python3.11/site-packages/PIL/GimpGradientFile.py new file mode 100644 index 0000000000000000000000000000000000000000..ec62f8e4ebc37d3aef9b171a0d03b7deeab702c4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/GimpGradientFile.py @@ -0,0 +1,149 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read (and render) GIMP gradient files +# +# History: +# 97-08-23 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +""" +Stuff to translate curve segments to palette values (derived from +the corresponding code in GIMP, written by Federico Mena Quintero. +See the GIMP distribution for more information.) +""" +from __future__ import annotations + +from math import log, pi, sin, sqrt +from typing import IO, Callable + +from ._binary import o8 + +EPSILON = 1e-10 +"""""" # Enable auto-doc for data member + + +def linear(middle: float, pos: float) -> float: + if pos <= middle: + if middle < EPSILON: + return 0.0 + else: + return 0.5 * pos / middle + else: + pos = pos - middle + middle = 1.0 - middle + if middle < EPSILON: + return 1.0 + else: + return 0.5 + 0.5 * pos / middle + + +def curved(middle: float, pos: float) -> float: + return pos ** (log(0.5) / log(max(middle, EPSILON))) + + +def sine(middle: float, pos: float) -> float: + return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0 + + +def sphere_increasing(middle: float, pos: float) -> float: + return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2) + + +def sphere_decreasing(middle: float, pos: float) -> float: + return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2) + + +SEGMENTS = [linear, curved, sine, sphere_increasing, sphere_decreasing] +"""""" # Enable auto-doc for data member + + +class GradientFile: + gradient: ( + list[ + tuple[ + float, + float, + float, + list[float], + list[float], + Callable[[float, float], float], + ] + ] + | None + ) = None + + def getpalette(self, entries: int = 256) -> tuple[bytes, str]: + assert self.gradient is not None + palette = [] + + ix = 0 + x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix] + + for i in range(entries): + x = i / (entries - 1) + + while x1 < x: + ix += 1 + x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix] + + w = x1 - x0 + + if w < EPSILON: + scale = segment(0.5, 0.5) + else: + scale = segment((xm - x0) / w, (x - x0) / w) + + # expand to RGBA + r = o8(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5)) + g = o8(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5)) + b = o8(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5)) + a = o8(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5)) + + # add to palette + palette.append(r + g + b + a) + + return b"".join(palette), "RGBA" + + +class GimpGradientFile(GradientFile): + """File handler for GIMP's gradient format.""" + + def __init__(self, fp: IO[bytes]) -> None: + if not fp.readline().startswith(b"GIMP Gradient"): + msg = "not a GIMP gradient file" + raise SyntaxError(msg) + + line = fp.readline() + + # GIMP 1.2 gradient files don't contain a name, but GIMP 1.3 files do + if line.startswith(b"Name: "): + line = fp.readline().strip() + + count = int(line) + + self.gradient = [] + + for i in range(count): + s = fp.readline().split() + w = [float(x) for x in s[:11]] + + x0, x1 = w[0], w[2] + xm = w[1] + rgb0 = w[3:7] + rgb1 = w[7:11] + + segment = SEGMENTS[int(s[11])] + cspace = int(s[12]) + + if cspace != 0: + msg = "cannot handle HSV colour space" + raise OSError(msg) + + self.gradient.append((x0, x1, xm, rgb0, rgb1, segment)) diff --git a/py311/lib/python3.11/site-packages/PIL/GimpPaletteFile.py b/py311/lib/python3.11/site-packages/PIL/GimpPaletteFile.py new file mode 100644 index 0000000000000000000000000000000000000000..379ffd739182c4caaad3bce92e0e8344ced2eef4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/GimpPaletteFile.py @@ -0,0 +1,72 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read GIMP palette files +# +# History: +# 1997-08-23 fl Created +# 2004-09-07 fl Support GIMP 2.0 palette files. +# +# Copyright (c) Secret Labs AB 1997-2004. All rights reserved. +# Copyright (c) Fredrik Lundh 1997-2004. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import re +from io import BytesIO +from typing import IO + + +class GimpPaletteFile: + """File handler for GIMP's palette format.""" + + rawmode = "RGB" + + def _read(self, fp: IO[bytes], limit: bool = True) -> None: + if not fp.readline().startswith(b"GIMP Palette"): + msg = "not a GIMP palette file" + raise SyntaxError(msg) + + palette: list[int] = [] + i = 0 + while True: + if limit and i == 256 + 3: + break + + i += 1 + s = fp.readline() + if not s: + break + + # skip fields and comment lines + if re.match(rb"\w+:|#", s): + continue + if limit and len(s) > 100: + msg = "bad palette file" + raise SyntaxError(msg) + + v = s.split(maxsplit=3) + if len(v) < 3: + msg = "bad palette entry" + raise ValueError(msg) + + palette += (int(v[i]) for i in range(3)) + if limit and len(palette) == 768: + break + + self.palette = bytes(palette) + + def __init__(self, fp: IO[bytes]) -> None: + self._read(fp) + + @classmethod + def frombytes(cls, data: bytes) -> GimpPaletteFile: + self = cls.__new__(cls) + self._read(BytesIO(data), False) + return self + + def getpalette(self) -> tuple[bytes, str]: + return self.palette, self.rawmode diff --git a/py311/lib/python3.11/site-packages/PIL/GribStubImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/GribStubImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..439fc5a3eda8414add95d53660eca8d11bf6ab8f --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/GribStubImagePlugin.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library +# $Id$ +# +# GRIB stub adapter +# +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import os +from typing import IO + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler: ImageFile.StubHandler | None) -> None: + """ + Install application-specific GRIB image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"GRIB") and prefix[7] == 1 + + +class GribStubImageFile(ImageFile.StubImageFile): + format = "GRIB" + format_description = "GRIB" + + def _open(self) -> None: + if not _accept(self.fp.read(8)): + msg = "Not a GRIB file" + raise SyntaxError(msg) + + self.fp.seek(-8, os.SEEK_CUR) + + # make something up + self._mode = "F" + self._size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self) -> ImageFile.StubHandler | None: + return _handler + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if _handler is None or not hasattr(_handler, "save"): + msg = "GRIB save handler not installed" + raise OSError(msg) + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(GribStubImageFile.format, GribStubImageFile, _accept) +Image.register_save(GribStubImageFile.format, _save) + +Image.register_extension(GribStubImageFile.format, ".grib") diff --git a/py311/lib/python3.11/site-packages/PIL/Hdf5StubImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/Hdf5StubImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..76e640f15abfe60a56a571380133a0463c104035 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/Hdf5StubImagePlugin.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library +# $Id$ +# +# HDF5 stub adapter +# +# Copyright (c) 2000-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import os +from typing import IO + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler: ImageFile.StubHandler | None) -> None: + """ + Install application-specific HDF5 image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"\x89HDF\r\n\x1a\n") + + +class HDF5StubImageFile(ImageFile.StubImageFile): + format = "HDF5" + format_description = "HDF5" + + def _open(self) -> None: + if not _accept(self.fp.read(8)): + msg = "Not an HDF file" + raise SyntaxError(msg) + + self.fp.seek(-8, os.SEEK_CUR) + + # make something up + self._mode = "F" + self._size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self) -> ImageFile.StubHandler | None: + return _handler + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if _handler is None or not hasattr(_handler, "save"): + msg = "HDF5 save handler not installed" + raise OSError(msg) + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept) +Image.register_save(HDF5StubImageFile.format, _save) + +Image.register_extensions(HDF5StubImageFile.format, [".h5", ".hdf"]) diff --git a/py311/lib/python3.11/site-packages/PIL/IcnsImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/IcnsImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..5a88429e5e4b3be4e57ce85b70fdaa4c7927fe09 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/IcnsImagePlugin.py @@ -0,0 +1,411 @@ +# +# The Python Imaging Library. +# $Id$ +# +# macOS icns file decoder, based on icns.py by Bob Ippolito. +# +# history: +# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies. +# 2020-04-04 Allow saving on all operating systems. +# +# Copyright (c) 2004 by Bob Ippolito. +# Copyright (c) 2004 by Secret Labs. +# Copyright (c) 2004 by Fredrik Lundh. +# Copyright (c) 2014 by Alastair Houghton. +# Copyright (c) 2020 by Pan Jing. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import io +import os +import struct +import sys +from typing import IO + +from . import Image, ImageFile, PngImagePlugin, features +from ._deprecate import deprecate + +enable_jpeg2k = features.check_codec("jpg_2000") +if enable_jpeg2k: + from . import Jpeg2KImagePlugin + +MAGIC = b"icns" +HEADERSIZE = 8 + + +def nextheader(fobj: IO[bytes]) -> tuple[bytes, int]: + return struct.unpack(">4sI", fobj.read(HEADERSIZE)) + + +def read_32t( + fobj: IO[bytes], start_length: tuple[int, int], size: tuple[int, int, int] +) -> dict[str, Image.Image]: + # The 128x128 icon seems to have an extra header for some reason. + (start, length) = start_length + fobj.seek(start) + sig = fobj.read(4) + if sig != b"\x00\x00\x00\x00": + msg = "Unknown signature, expecting 0x00000000" + raise SyntaxError(msg) + return read_32(fobj, (start + 4, length - 4), size) + + +def read_32( + fobj: IO[bytes], start_length: tuple[int, int], size: tuple[int, int, int] +) -> dict[str, Image.Image]: + """ + Read a 32bit RGB icon resource. Seems to be either uncompressed or + an RLE packbits-like scheme. + """ + (start, length) = start_length + fobj.seek(start) + pixel_size = (size[0] * size[2], size[1] * size[2]) + sizesq = pixel_size[0] * pixel_size[1] + if length == sizesq * 3: + # uncompressed ("RGBRGBGB") + indata = fobj.read(length) + im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1) + else: + # decode image + im = Image.new("RGB", pixel_size, None) + for band_ix in range(3): + data = [] + bytesleft = sizesq + while bytesleft > 0: + byte = fobj.read(1) + if not byte: + break + byte_int = byte[0] + if byte_int & 0x80: + blocksize = byte_int - 125 + byte = fobj.read(1) + for i in range(blocksize): + data.append(byte) + else: + blocksize = byte_int + 1 + data.append(fobj.read(blocksize)) + bytesleft -= blocksize + if bytesleft <= 0: + break + if bytesleft != 0: + msg = f"Error reading channel [{repr(bytesleft)} left]" + raise SyntaxError(msg) + band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1) + im.im.putband(band.im, band_ix) + return {"RGB": im} + + +def read_mk( + fobj: IO[bytes], start_length: tuple[int, int], size: tuple[int, int, int] +) -> dict[str, Image.Image]: + # Alpha masks seem to be uncompressed + start = start_length[0] + fobj.seek(start) + pixel_size = (size[0] * size[2], size[1] * size[2]) + sizesq = pixel_size[0] * pixel_size[1] + band = Image.frombuffer("L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1) + return {"A": band} + + +def read_png_or_jpeg2000( + fobj: IO[bytes], start_length: tuple[int, int], size: tuple[int, int, int] +) -> dict[str, Image.Image]: + (start, length) = start_length + fobj.seek(start) + sig = fobj.read(12) + + im: Image.Image + if sig.startswith(b"\x89PNG\x0d\x0a\x1a\x0a"): + fobj.seek(start) + im = PngImagePlugin.PngImageFile(fobj) + Image._decompression_bomb_check(im.size) + return {"RGBA": im} + elif ( + sig.startswith((b"\xff\x4f\xff\x51", b"\x0d\x0a\x87\x0a")) + or sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a" + ): + if not enable_jpeg2k: + msg = ( + "Unsupported icon subimage format (rebuild PIL " + "with JPEG 2000 support to fix this)" + ) + raise ValueError(msg) + # j2k, jpc or j2c + fobj.seek(start) + jp2kstream = fobj.read(length) + f = io.BytesIO(jp2kstream) + im = Jpeg2KImagePlugin.Jpeg2KImageFile(f) + Image._decompression_bomb_check(im.size) + if im.mode != "RGBA": + im = im.convert("RGBA") + return {"RGBA": im} + else: + msg = "Unsupported icon subimage format" + raise ValueError(msg) + + +class IcnsFile: + SIZES = { + (512, 512, 2): [(b"ic10", read_png_or_jpeg2000)], + (512, 512, 1): [(b"ic09", read_png_or_jpeg2000)], + (256, 256, 2): [(b"ic14", read_png_or_jpeg2000)], + (256, 256, 1): [(b"ic08", read_png_or_jpeg2000)], + (128, 128, 2): [(b"ic13", read_png_or_jpeg2000)], + (128, 128, 1): [ + (b"ic07", read_png_or_jpeg2000), + (b"it32", read_32t), + (b"t8mk", read_mk), + ], + (64, 64, 1): [(b"icp6", read_png_or_jpeg2000)], + (32, 32, 2): [(b"ic12", read_png_or_jpeg2000)], + (48, 48, 1): [(b"ih32", read_32), (b"h8mk", read_mk)], + (32, 32, 1): [ + (b"icp5", read_png_or_jpeg2000), + (b"il32", read_32), + (b"l8mk", read_mk), + ], + (16, 16, 2): [(b"ic11", read_png_or_jpeg2000)], + (16, 16, 1): [ + (b"icp4", read_png_or_jpeg2000), + (b"is32", read_32), + (b"s8mk", read_mk), + ], + } + + def __init__(self, fobj: IO[bytes]) -> None: + """ + fobj is a file-like object as an icns resource + """ + # signature : (start, length) + self.dct = {} + self.fobj = fobj + sig, filesize = nextheader(fobj) + if not _accept(sig): + msg = "not an icns file" + raise SyntaxError(msg) + i = HEADERSIZE + while i < filesize: + sig, blocksize = nextheader(fobj) + if blocksize <= 0: + msg = "invalid block header" + raise SyntaxError(msg) + i += HEADERSIZE + blocksize -= HEADERSIZE + self.dct[sig] = (i, blocksize) + fobj.seek(blocksize, io.SEEK_CUR) + i += blocksize + + def itersizes(self) -> list[tuple[int, int, int]]: + sizes = [] + for size, fmts in self.SIZES.items(): + for fmt, reader in fmts: + if fmt in self.dct: + sizes.append(size) + break + return sizes + + def bestsize(self) -> tuple[int, int, int]: + sizes = self.itersizes() + if not sizes: + msg = "No 32bit icon resources found" + raise SyntaxError(msg) + return max(sizes) + + def dataforsize(self, size: tuple[int, int, int]) -> dict[str, Image.Image]: + """ + Get an icon resource as {channel: array}. Note that + the arrays are bottom-up like windows bitmaps and will likely + need to be flipped or transposed in some way. + """ + dct = {} + for code, reader in self.SIZES[size]: + desc = self.dct.get(code) + if desc is not None: + dct.update(reader(self.fobj, desc, size)) + return dct + + def getimage( + self, size: tuple[int, int] | tuple[int, int, int] | None = None + ) -> Image.Image: + if size is None: + size = self.bestsize() + elif len(size) == 2: + size = (size[0], size[1], 1) + channels = self.dataforsize(size) + + im = channels.get("RGBA") + if im: + return im + + im = channels["RGB"].copy() + try: + im.putalpha(channels["A"]) + except KeyError: + pass + return im + + +## +# Image plugin for Mac OS icons. + + +class IcnsImageFile(ImageFile.ImageFile): + """ + PIL image support for Mac OS .icns files. + Chooses the best resolution, but will possibly load + a different size image if you mutate the size attribute + before calling 'load'. + + The info dictionary has a key 'sizes' that is a list + of sizes that the icns file has. + """ + + format = "ICNS" + format_description = "Mac OS icns resource" + + def _open(self) -> None: + self.icns = IcnsFile(self.fp) + self._mode = "RGBA" + self.info["sizes"] = self.icns.itersizes() + self.best_size = self.icns.bestsize() + self.size = ( + self.best_size[0] * self.best_size[2], + self.best_size[1] * self.best_size[2], + ) + + @property # type: ignore[override] + def size(self) -> tuple[int, int] | tuple[int, int, int]: + return self._size + + @size.setter + def size(self, value: tuple[int, int] | tuple[int, int, int]) -> None: + if len(value) == 3: + deprecate("Setting size to (width, height, scale)", 12, "load(scale)") + if value in self.info["sizes"]: + self._size = value # type: ignore[assignment] + return + else: + # Check that a matching size exists, + # or that there is a scale that would create a size that matches + for size in self.info["sizes"]: + simple_size = size[0] * size[2], size[1] * size[2] + scale = simple_size[0] // value[0] + if simple_size[1] / value[1] == scale: + self._size = value + return + msg = "This is not one of the allowed sizes of this image" + raise ValueError(msg) + + def load(self, scale: int | None = None) -> Image.core.PixelAccess | None: + if scale is not None or len(self.size) == 3: + if scale is None and len(self.size) == 3: + scale = self.size[2] + assert scale is not None + width, height = self.size[:2] + self.size = width * scale, height * scale + self.best_size = width, height, scale + + px = Image.Image.load(self) + if self._im is not None and self.im.size == self.size: + # Already loaded + return px + self.load_prepare() + # This is likely NOT the best way to do it, but whatever. + im = self.icns.getimage(self.best_size) + + # If this is a PNG or JPEG 2000, it won't be loaded yet + px = im.load() + + self.im = im.im + self._mode = im.mode + self.size = im.size + + return px + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + """ + Saves the image as a series of PNG files, + that are then combined into a .icns file. + """ + if hasattr(fp, "flush"): + fp.flush() + + sizes = { + b"ic07": 128, + b"ic08": 256, + b"ic09": 512, + b"ic10": 1024, + b"ic11": 32, + b"ic12": 64, + b"ic13": 256, + b"ic14": 512, + } + provided_images = {im.width: im for im in im.encoderinfo.get("append_images", [])} + size_streams = {} + for size in set(sizes.values()): + image = ( + provided_images[size] + if size in provided_images + else im.resize((size, size)) + ) + + temp = io.BytesIO() + image.save(temp, "png") + size_streams[size] = temp.getvalue() + + entries = [] + for type, size in sizes.items(): + stream = size_streams[size] + entries.append((type, HEADERSIZE + len(stream), stream)) + + # Header + fp.write(MAGIC) + file_length = HEADERSIZE # Header + file_length += HEADERSIZE + 8 * len(entries) # TOC + file_length += sum(entry[1] for entry in entries) + fp.write(struct.pack(">i", file_length)) + + # TOC + fp.write(b"TOC ") + fp.write(struct.pack(">i", HEADERSIZE + len(entries) * HEADERSIZE)) + for entry in entries: + fp.write(entry[0]) + fp.write(struct.pack(">i", entry[1])) + + # Data + for entry in entries: + fp.write(entry[0]) + fp.write(struct.pack(">i", entry[1])) + fp.write(entry[2]) + + if hasattr(fp, "flush"): + fp.flush() + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(MAGIC) + + +Image.register_open(IcnsImageFile.format, IcnsImageFile, _accept) +Image.register_extension(IcnsImageFile.format, ".icns") + +Image.register_save(IcnsImageFile.format, _save) +Image.register_mime(IcnsImageFile.format, "image/icns") + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Syntax: python3 IcnsImagePlugin.py [file]") + sys.exit() + + with open(sys.argv[1], "rb") as fp: + imf = IcnsImageFile(fp) + for size in imf.info["sizes"]: + width, height, scale = imf.size = size + imf.save(f"out-{width}-{height}-{scale}.png") + with Image.open(sys.argv[1]) as im: + im.save("out.png") + if sys.platform == "windows": + os.startfile("out.png") diff --git a/py311/lib/python3.11/site-packages/PIL/IcoImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/IcoImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..bd35ac890e6cf824e9c890404416d871e5b94f7c --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/IcoImagePlugin.py @@ -0,0 +1,381 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Windows Icon support for PIL +# +# History: +# 96-05-27 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis +# . +# https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki +# +# Icon format references: +# * https://en.wikipedia.org/wiki/ICO_(file_format) +# * https://msdn.microsoft.com/en-us/library/ms997538.aspx +from __future__ import annotations + +import warnings +from io import BytesIO +from math import ceil, log +from typing import IO, NamedTuple + +from . import BmpImagePlugin, Image, ImageFile, PngImagePlugin +from ._binary import i16le as i16 +from ._binary import i32le as i32 +from ._binary import o8 +from ._binary import o16le as o16 +from ._binary import o32le as o32 + +# +# -------------------------------------------------------------------- + +_MAGIC = b"\0\0\1\0" + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + fp.write(_MAGIC) # (2+2) + bmp = im.encoderinfo.get("bitmap_format") == "bmp" + sizes = im.encoderinfo.get( + "sizes", + [(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)], + ) + frames = [] + provided_ims = [im] + im.encoderinfo.get("append_images", []) + width, height = im.size + for size in sorted(set(sizes)): + if size[0] > width or size[1] > height or size[0] > 256 or size[1] > 256: + continue + + for provided_im in provided_ims: + if provided_im.size != size: + continue + frames.append(provided_im) + if bmp: + bits = BmpImagePlugin.SAVE[provided_im.mode][1] + bits_used = [bits] + for other_im in provided_ims: + if other_im.size != size: + continue + bits = BmpImagePlugin.SAVE[other_im.mode][1] + if bits not in bits_used: + # Another image has been supplied for this size + # with a different bit depth + frames.append(other_im) + bits_used.append(bits) + break + else: + # TODO: invent a more convenient method for proportional scalings + frame = provided_im.copy() + frame.thumbnail(size, Image.Resampling.LANCZOS, reducing_gap=None) + frames.append(frame) + fp.write(o16(len(frames))) # idCount(2) + offset = fp.tell() + len(frames) * 16 + for frame in frames: + width, height = frame.size + # 0 means 256 + fp.write(o8(width if width < 256 else 0)) # bWidth(1) + fp.write(o8(height if height < 256 else 0)) # bHeight(1) + + bits, colors = BmpImagePlugin.SAVE[frame.mode][1:] if bmp else (32, 0) + fp.write(o8(colors)) # bColorCount(1) + fp.write(b"\0") # bReserved(1) + fp.write(b"\0\0") # wPlanes(2) + fp.write(o16(bits)) # wBitCount(2) + + image_io = BytesIO() + if bmp: + frame.save(image_io, "dib") + + if bits != 32: + and_mask = Image.new("1", size) + ImageFile._save( + and_mask, + image_io, + [ImageFile._Tile("raw", (0, 0) + size, 0, ("1", 0, -1))], + ) + else: + frame.save(image_io, "png") + image_io.seek(0) + image_bytes = image_io.read() + if bmp: + image_bytes = image_bytes[:8] + o32(height * 2) + image_bytes[12:] + bytes_len = len(image_bytes) + fp.write(o32(bytes_len)) # dwBytesInRes(4) + fp.write(o32(offset)) # dwImageOffset(4) + current = fp.tell() + fp.seek(offset) + fp.write(image_bytes) + offset = offset + bytes_len + fp.seek(current) + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(_MAGIC) + + +class IconHeader(NamedTuple): + width: int + height: int + nb_color: int + reserved: int + planes: int + bpp: int + size: int + offset: int + dim: tuple[int, int] + square: int + color_depth: int + + +class IcoFile: + def __init__(self, buf: IO[bytes]) -> None: + """ + Parse image from file-like object containing ico file data + """ + + # check magic + s = buf.read(6) + if not _accept(s): + msg = "not an ICO file" + raise SyntaxError(msg) + + self.buf = buf + self.entry = [] + + # Number of items in file + self.nb_items = i16(s, 4) + + # Get headers for each item + for i in range(self.nb_items): + s = buf.read(16) + + # See Wikipedia + width = s[0] or 256 + height = s[1] or 256 + + # No. of colors in image (0 if >=8bpp) + nb_color = s[2] + bpp = i16(s, 6) + icon_header = IconHeader( + width=width, + height=height, + nb_color=nb_color, + reserved=s[3], + planes=i16(s, 4), + bpp=i16(s, 6), + size=i32(s, 8), + offset=i32(s, 12), + dim=(width, height), + square=width * height, + # See Wikipedia notes about color depth. + # We need this just to differ images with equal sizes + color_depth=bpp or (nb_color != 0 and ceil(log(nb_color, 2))) or 256, + ) + + self.entry.append(icon_header) + + self.entry = sorted(self.entry, key=lambda x: x.color_depth) + # ICO images are usually squares + self.entry = sorted(self.entry, key=lambda x: x.square, reverse=True) + + def sizes(self) -> set[tuple[int, int]]: + """ + Get a set of all available icon sizes and color depths. + """ + return {(h.width, h.height) for h in self.entry} + + def getentryindex(self, size: tuple[int, int], bpp: int | bool = False) -> int: + for i, h in enumerate(self.entry): + if size == h.dim and (bpp is False or bpp == h.color_depth): + return i + return 0 + + def getimage(self, size: tuple[int, int], bpp: int | bool = False) -> Image.Image: + """ + Get an image from the icon + """ + return self.frame(self.getentryindex(size, bpp)) + + def frame(self, idx: int) -> Image.Image: + """ + Get an image from frame idx + """ + + header = self.entry[idx] + + self.buf.seek(header.offset) + data = self.buf.read(8) + self.buf.seek(header.offset) + + im: Image.Image + if data[:8] == PngImagePlugin._MAGIC: + # png frame + im = PngImagePlugin.PngImageFile(self.buf) + Image._decompression_bomb_check(im.size) + else: + # XOR + AND mask bmp frame + im = BmpImagePlugin.DibImageFile(self.buf) + Image._decompression_bomb_check(im.size) + + # change tile dimension to only encompass XOR image + im._size = (im.size[0], int(im.size[1] / 2)) + d, e, o, a = im.tile[0] + im.tile[0] = ImageFile._Tile(d, (0, 0) + im.size, o, a) + + # figure out where AND mask image starts + if header.bpp == 32: + # 32-bit color depth icon image allows semitransparent areas + # PIL's DIB format ignores transparency bits, recover them. + # The DIB is packed in BGRX byte order where X is the alpha + # channel. + + # Back up to start of bmp data + self.buf.seek(o) + # extract every 4th byte (eg. 3,7,11,15,...) + alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4] + + # convert to an 8bpp grayscale image + try: + mask = Image.frombuffer( + "L", # 8bpp + im.size, # (w, h) + alpha_bytes, # source chars + "raw", # raw decoder + ("L", 0, -1), # 8bpp inverted, unpadded, reversed + ) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + mask = None + else: + raise + else: + # get AND image from end of bitmap + w = im.size[0] + if (w % 32) > 0: + # bitmap row data is aligned to word boundaries + w += 32 - (im.size[0] % 32) + + # the total mask data is + # padded row size * height / bits per char + + total_bytes = int((w * im.size[1]) / 8) + and_mask_offset = header.offset + header.size - total_bytes + + self.buf.seek(and_mask_offset) + mask_data = self.buf.read(total_bytes) + + # convert raw data to image + try: + mask = Image.frombuffer( + "1", # 1 bpp + im.size, # (w, h) + mask_data, # source chars + "raw", # raw decoder + ("1;I", int(w / 8), -1), # 1bpp inverted, padded, reversed + ) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + mask = None + else: + raise + + # now we have two images, im is XOR image and mask is AND image + + # apply mask image as alpha channel + if mask: + im = im.convert("RGBA") + im.putalpha(mask) + + return im + + +## +# Image plugin for Windows Icon files. + + +class IcoImageFile(ImageFile.ImageFile): + """ + PIL read-only image support for Microsoft Windows .ico files. + + By default the largest resolution image in the file will be loaded. This + can be changed by altering the 'size' attribute before calling 'load'. + + The info dictionary has a key 'sizes' that is a list of the sizes available + in the icon file. + + Handles classic, XP and Vista icon formats. + + When saving, PNG compression is used. Support for this was only added in + Windows Vista. If you are unable to view the icon in Windows, convert the + image to "RGBA" mode before saving. + + This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis + . + https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki + """ + + format = "ICO" + format_description = "Windows Icon" + + def _open(self) -> None: + self.ico = IcoFile(self.fp) + self.info["sizes"] = self.ico.sizes() + self.size = self.ico.entry[0].dim + self.load() + + @property + def size(self) -> tuple[int, int]: + return self._size + + @size.setter + def size(self, value: tuple[int, int]) -> None: + if value not in self.info["sizes"]: + msg = "This is not one of the allowed sizes of this image" + raise ValueError(msg) + self._size = value + + def load(self) -> Image.core.PixelAccess | None: + if self._im is not None and self.im.size == self.size: + # Already loaded + return Image.Image.load(self) + im = self.ico.getimage(self.size) + # if tile is PNG, it won't really be loaded yet + im.load() + self.im = im.im + self._mode = im.mode + if im.palette: + self.palette = im.palette + if im.size != self.size: + warnings.warn("Image was not the expected size") + + index = self.ico.getentryindex(self.size) + sizes = list(self.info["sizes"]) + sizes[index] = im.size + self.info["sizes"] = set(sizes) + + self.size = im.size + return Image.Image.load(self) + + def load_seek(self, pos: int) -> None: + # Flag the ImageFile.Parser so that it + # just does all the decode at the end. + pass + + +# +# -------------------------------------------------------------------- + + +Image.register_open(IcoImageFile.format, IcoImageFile, _accept) +Image.register_save(IcoImageFile.format, _save) +Image.register_extension(IcoImageFile.format, ".ico") + +Image.register_mime(IcoImageFile.format, "image/x-icon") diff --git a/py311/lib/python3.11/site-packages/PIL/ImImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/ImImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..71b9996780ce8dfc420670b5732216f934a1f677 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImImagePlugin.py @@ -0,0 +1,389 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IFUNC IM file handling for PIL +# +# history: +# 1995-09-01 fl Created. +# 1997-01-03 fl Save palette images +# 1997-01-08 fl Added sequence support +# 1997-01-23 fl Added P and RGB save support +# 1997-05-31 fl Read floating point images +# 1997-06-22 fl Save floating point images +# 1997-08-27 fl Read and save 1-bit images +# 1998-06-25 fl Added support for RGB+LUT images +# 1998-07-02 fl Added support for YCC images +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 1998-12-29 fl Added I;16 support +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7) +# 2003-09-26 fl Added LA/PA support +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2001 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import os +import re +from typing import IO, Any + +from . import Image, ImageFile, ImagePalette +from ._util import DeferredError + +# -------------------------------------------------------------------- +# Standard tags + +COMMENT = "Comment" +DATE = "Date" +EQUIPMENT = "Digitalization equipment" +FRAMES = "File size (no of images)" +LUT = "Lut" +NAME = "Name" +SCALE = "Scale (x,y)" +SIZE = "Image size (x*y)" +MODE = "Image type" + +TAGS = { + COMMENT: 0, + DATE: 0, + EQUIPMENT: 0, + FRAMES: 0, + LUT: 0, + NAME: 0, + SCALE: 0, + SIZE: 0, + MODE: 0, +} + +OPEN = { + # ifunc93/p3cfunc formats + "0 1 image": ("1", "1"), + "L 1 image": ("1", "1"), + "Greyscale image": ("L", "L"), + "Grayscale image": ("L", "L"), + "RGB image": ("RGB", "RGB;L"), + "RLB image": ("RGB", "RLB"), + "RYB image": ("RGB", "RLB"), + "B1 image": ("1", "1"), + "B2 image": ("P", "P;2"), + "B4 image": ("P", "P;4"), + "X 24 image": ("RGB", "RGB"), + "L 32 S image": ("I", "I;32"), + "L 32 F image": ("F", "F;32"), + # old p3cfunc formats + "RGB3 image": ("RGB", "RGB;T"), + "RYB3 image": ("RGB", "RYB;T"), + # extensions + "LA image": ("LA", "LA;L"), + "PA image": ("LA", "PA;L"), + "RGBA image": ("RGBA", "RGBA;L"), + "RGBX image": ("RGB", "RGBX;L"), + "CMYK image": ("CMYK", "CMYK;L"), + "YCC image": ("YCbCr", "YCbCr;L"), +} + +# ifunc95 extensions +for i in ["8", "8S", "16", "16S", "32", "32F"]: + OPEN[f"L {i} image"] = ("F", f"F;{i}") + OPEN[f"L*{i} image"] = ("F", f"F;{i}") +for i in ["16", "16L", "16B"]: + OPEN[f"L {i} image"] = (f"I;{i}", f"I;{i}") + OPEN[f"L*{i} image"] = (f"I;{i}", f"I;{i}") +for i in ["32S"]: + OPEN[f"L {i} image"] = ("I", f"I;{i}") + OPEN[f"L*{i} image"] = ("I", f"I;{i}") +for j in range(2, 33): + OPEN[f"L*{j} image"] = ("F", f"F;{j}") + + +# -------------------------------------------------------------------- +# Read IM directory + +split = re.compile(rb"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$") + + +def number(s: Any) -> float: + try: + return int(s) + except ValueError: + return float(s) + + +## +# Image plugin for the IFUNC IM file format. + + +class ImImageFile(ImageFile.ImageFile): + format = "IM" + format_description = "IFUNC Image Memory" + _close_exclusive_fp_after_loading = False + + def _open(self) -> None: + # Quick rejection: if there's not an LF among the first + # 100 bytes, this is (probably) not a text header. + + if b"\n" not in self.fp.read(100): + msg = "not an IM file" + raise SyntaxError(msg) + self.fp.seek(0) + + n = 0 + + # Default values + self.info[MODE] = "L" + self.info[SIZE] = (512, 512) + self.info[FRAMES] = 1 + + self.rawmode = "L" + + while True: + s = self.fp.read(1) + + # Some versions of IFUNC uses \n\r instead of \r\n... + if s == b"\r": + continue + + if not s or s == b"\0" or s == b"\x1a": + break + + # FIXME: this may read whole file if not a text file + s = s + self.fp.readline() + + if len(s) > 100: + msg = "not an IM file" + raise SyntaxError(msg) + + if s.endswith(b"\r\n"): + s = s[:-2] + elif s.endswith(b"\n"): + s = s[:-1] + + try: + m = split.match(s) + except re.error as e: + msg = "not an IM file" + raise SyntaxError(msg) from e + + if m: + k, v = m.group(1, 2) + + # Don't know if this is the correct encoding, + # but a decent guess (I guess) + k = k.decode("latin-1", "replace") + v = v.decode("latin-1", "replace") + + # Convert value as appropriate + if k in [FRAMES, SCALE, SIZE]: + v = v.replace("*", ",") + v = tuple(map(number, v.split(","))) + if len(v) == 1: + v = v[0] + elif k == MODE and v in OPEN: + v, self.rawmode = OPEN[v] + + # Add to dictionary. Note that COMMENT tags are + # combined into a list of strings. + if k == COMMENT: + if k in self.info: + self.info[k].append(v) + else: + self.info[k] = [v] + else: + self.info[k] = v + + if k in TAGS: + n += 1 + + else: + msg = f"Syntax error in IM header: {s.decode('ascii', 'replace')}" + raise SyntaxError(msg) + + if not n: + msg = "Not an IM file" + raise SyntaxError(msg) + + # Basic attributes + self._size = self.info[SIZE] + self._mode = self.info[MODE] + + # Skip forward to start of image data + while s and not s.startswith(b"\x1a"): + s = self.fp.read(1) + if not s: + msg = "File truncated" + raise SyntaxError(msg) + + if LUT in self.info: + # convert lookup table to palette or lut attribute + palette = self.fp.read(768) + greyscale = 1 # greyscale palette + linear = 1 # linear greyscale palette + for i in range(256): + if palette[i] == palette[i + 256] == palette[i + 512]: + if palette[i] != i: + linear = 0 + else: + greyscale = 0 + if self.mode in ["L", "LA", "P", "PA"]: + if greyscale: + if not linear: + self.lut = list(palette[:256]) + else: + if self.mode in ["L", "P"]: + self._mode = self.rawmode = "P" + elif self.mode in ["LA", "PA"]: + self._mode = "PA" + self.rawmode = "PA;L" + self.palette = ImagePalette.raw("RGB;L", palette) + elif self.mode == "RGB": + if not greyscale or not linear: + self.lut = list(palette) + + self.frame = 0 + + self.__offset = offs = self.fp.tell() + + self._fp = self.fp # FIXME: hack + + if self.rawmode.startswith("F;"): + # ifunc95 formats + try: + # use bit decoder (if necessary) + bits = int(self.rawmode[2:]) + if bits not in [8, 16, 32]: + self.tile = [ + ImageFile._Tile( + "bit", (0, 0) + self.size, offs, (bits, 8, 3, 0, -1) + ) + ] + return + except ValueError: + pass + + if self.rawmode in ["RGB;T", "RYB;T"]: + # Old LabEye/3PC files. Would be very surprised if anyone + # ever stumbled upon such a file ;-) + size = self.size[0] * self.size[1] + self.tile = [ + ImageFile._Tile("raw", (0, 0) + self.size, offs, ("G", 0, -1)), + ImageFile._Tile("raw", (0, 0) + self.size, offs + size, ("R", 0, -1)), + ImageFile._Tile( + "raw", (0, 0) + self.size, offs + 2 * size, ("B", 0, -1) + ), + ] + else: + # LabEye/IFUNC files + self.tile = [ + ImageFile._Tile("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1)) + ] + + @property + def n_frames(self) -> int: + return self.info[FRAMES] + + @property + def is_animated(self) -> bool: + return self.info[FRAMES] > 1 + + def seek(self, frame: int) -> None: + if not self._seek_check(frame): + return + if isinstance(self._fp, DeferredError): + raise self._fp.ex + + self.frame = frame + + if self.mode == "1": + bits = 1 + else: + bits = 8 * len(self.mode) + + size = ((self.size[0] * bits + 7) // 8) * self.size[1] + offs = self.__offset + frame * size + + self.fp = self._fp + + self.tile = [ + ImageFile._Tile("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1)) + ] + + def tell(self) -> int: + return self.frame + + +# +# -------------------------------------------------------------------- +# Save IM files + + +SAVE = { + # mode: (im type, raw mode) + "1": ("0 1", "1"), + "L": ("Greyscale", "L"), + "LA": ("LA", "LA;L"), + "P": ("Greyscale", "P"), + "PA": ("LA", "PA;L"), + "I": ("L 32S", "I;32S"), + "I;16": ("L 16", "I;16"), + "I;16L": ("L 16L", "I;16L"), + "I;16B": ("L 16B", "I;16B"), + "F": ("L 32F", "F;32F"), + "RGB": ("RGB", "RGB;L"), + "RGBA": ("RGBA", "RGBA;L"), + "RGBX": ("RGBX", "RGBX;L"), + "CMYK": ("CMYK", "CMYK;L"), + "YCbCr": ("YCC", "YCbCr;L"), +} + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + try: + image_type, rawmode = SAVE[im.mode] + except KeyError as e: + msg = f"Cannot save {im.mode} images as IM" + raise ValueError(msg) from e + + frames = im.encoderinfo.get("frames", 1) + + fp.write(f"Image type: {image_type} image\r\n".encode("ascii")) + if filename: + # Each line must be 100 characters or less, + # or: SyntaxError("not an IM file") + # 8 characters are used for "Name: " and "\r\n" + # Keep just the filename, ditch the potentially overlong path + if isinstance(filename, bytes): + filename = filename.decode("ascii") + name, ext = os.path.splitext(os.path.basename(filename)) + name = "".join([name[: 92 - len(ext)], ext]) + + fp.write(f"Name: {name}\r\n".encode("ascii")) + fp.write(f"Image size (x*y): {im.size[0]}*{im.size[1]}\r\n".encode("ascii")) + fp.write(f"File size (no of images): {frames}\r\n".encode("ascii")) + if im.mode in ["P", "PA"]: + fp.write(b"Lut: 1\r\n") + fp.write(b"\000" * (511 - fp.tell()) + b"\032") + if im.mode in ["P", "PA"]: + im_palette = im.im.getpalette("RGB", "RGB;L") + colors = len(im_palette) // 3 + palette = b"" + for i in range(3): + palette += im_palette[colors * i : colors * (i + 1)] + palette += b"\x00" * (256 - colors) + fp.write(palette) # 768 bytes + ImageFile._save( + im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, -1))] + ) + + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(ImImageFile.format, ImImageFile) +Image.register_save(ImImageFile.format, _save) + +Image.register_extension(ImImageFile.format, ".im") diff --git a/py311/lib/python3.11/site-packages/PIL/Image.py b/py311/lib/python3.11/site-packages/PIL/Image.py new file mode 100644 index 0000000000000000000000000000000000000000..d209405c4c5e3c00179aa82c706730bca7bc7b28 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/Image.py @@ -0,0 +1,4245 @@ +# +# The Python Imaging Library. +# $Id$ +# +# the Image class wrapper +# +# partial release history: +# 1995-09-09 fl Created +# 1996-03-11 fl PIL release 0.0 (proof of concept) +# 1996-04-30 fl PIL release 0.1b1 +# 1999-07-28 fl PIL release 1.0 final +# 2000-06-07 fl PIL release 1.1 +# 2000-10-20 fl PIL release 1.1.1 +# 2001-05-07 fl PIL release 1.1.2 +# 2002-03-15 fl PIL release 1.1.3 +# 2003-05-10 fl PIL release 1.1.4 +# 2005-03-28 fl PIL release 1.1.5 +# 2006-12-02 fl PIL release 1.1.6 +# 2009-11-15 fl PIL release 1.1.7 +# +# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-2009 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +from __future__ import annotations + +import abc +import atexit +import builtins +import io +import logging +import math +import os +import re +import struct +import sys +import tempfile +import warnings +from collections.abc import Callable, Iterator, MutableMapping, Sequence +from enum import IntEnum +from types import ModuleType +from typing import IO, Any, Literal, Protocol, cast + +# VERSION was removed in Pillow 6.0.0. +# PILLOW_VERSION was removed in Pillow 9.0.0. +# Use __version__ instead. +from . import ( + ExifTags, + ImageMode, + TiffTags, + UnidentifiedImageError, + __version__, + _plugins, +) +from ._binary import i32le, o32be, o32le +from ._deprecate import deprecate +from ._util import DeferredError, is_path + +ElementTree: ModuleType | None +try: + from defusedxml import ElementTree +except ImportError: + ElementTree = None + +logger = logging.getLogger(__name__) + + +class DecompressionBombWarning(RuntimeWarning): + pass + + +class DecompressionBombError(Exception): + pass + + +WARN_POSSIBLE_FORMATS: bool = False + +# Limit to around a quarter gigabyte for a 24-bit (3 bpp) image +MAX_IMAGE_PIXELS: int | None = int(1024 * 1024 * 1024 // 4 // 3) + + +try: + # If the _imaging C module is not present, Pillow will not load. + # Note that other modules should not refer to _imaging directly; + # import Image and use the Image.core variable instead. + # Also note that Image.core is not a publicly documented interface, + # and should be considered private and subject to change. + from . import _imaging as core + + if __version__ != getattr(core, "PILLOW_VERSION", None): + msg = ( + "The _imaging extension was built for another version of Pillow or PIL:\n" + f"Core version: {getattr(core, 'PILLOW_VERSION', None)}\n" + f"Pillow version: {__version__}" + ) + raise ImportError(msg) + +except ImportError as v: + core = DeferredError.new(ImportError("The _imaging C module is not installed.")) + # Explanations for ways that we know we might have an import error + if str(v).startswith("Module use of python"): + # The _imaging C module is present, but not compiled for + # the right version (windows only). Print a warning, if + # possible. + warnings.warn( + "The _imaging extension was built for another version of Python.", + RuntimeWarning, + ) + elif str(v).startswith("The _imaging extension"): + warnings.warn(str(v), RuntimeWarning) + # Fail here anyway. Don't let people run with a mostly broken Pillow. + # see docs/porting.rst + raise + + +def isImageType(t: Any) -> TypeGuard[Image]: + """ + Checks if an object is an image object. + + .. warning:: + + This function is for internal use only. + + :param t: object to check if it's an image + :returns: True if the object is an image + """ + deprecate("Image.isImageType(im)", 12, "isinstance(im, Image.Image)") + return hasattr(t, "im") + + +# +# Constants + + +# transpose +class Transpose(IntEnum): + FLIP_LEFT_RIGHT = 0 + FLIP_TOP_BOTTOM = 1 + ROTATE_90 = 2 + ROTATE_180 = 3 + ROTATE_270 = 4 + TRANSPOSE = 5 + TRANSVERSE = 6 + + +# transforms (also defined in Imaging.h) +class Transform(IntEnum): + AFFINE = 0 + EXTENT = 1 + PERSPECTIVE = 2 + QUAD = 3 + MESH = 4 + + +# resampling filters (also defined in Imaging.h) +class Resampling(IntEnum): + NEAREST = 0 + BOX = 4 + BILINEAR = 2 + HAMMING = 5 + BICUBIC = 3 + LANCZOS = 1 + + +_filters_support = { + Resampling.BOX: 0.5, + Resampling.BILINEAR: 1.0, + Resampling.HAMMING: 1.0, + Resampling.BICUBIC: 2.0, + Resampling.LANCZOS: 3.0, +} + + +# dithers +class Dither(IntEnum): + NONE = 0 + ORDERED = 1 # Not yet implemented + RASTERIZE = 2 # Not yet implemented + FLOYDSTEINBERG = 3 # default + + +# palettes/quantizers +class Palette(IntEnum): + WEB = 0 + ADAPTIVE = 1 + + +class Quantize(IntEnum): + MEDIANCUT = 0 + MAXCOVERAGE = 1 + FASTOCTREE = 2 + LIBIMAGEQUANT = 3 + + +module = sys.modules[__name__] +for enum in (Transpose, Transform, Resampling, Dither, Palette, Quantize): + for item in enum: + setattr(module, item.name, item.value) + + +if hasattr(core, "DEFAULT_STRATEGY"): + DEFAULT_STRATEGY = core.DEFAULT_STRATEGY + FILTERED = core.FILTERED + HUFFMAN_ONLY = core.HUFFMAN_ONLY + RLE = core.RLE + FIXED = core.FIXED + + +# -------------------------------------------------------------------- +# Registries + +TYPE_CHECKING = False +if TYPE_CHECKING: + import mmap + from xml.etree.ElementTree import Element + + from IPython.lib.pretty import PrettyPrinter + + from . import ImageFile, ImageFilter, ImagePalette, ImageQt, TiffImagePlugin + from ._typing import CapsuleType, NumpyArray, StrOrBytesPath, TypeGuard +ID: list[str] = [] +OPEN: dict[ + str, + tuple[ + Callable[[IO[bytes], str | bytes], ImageFile.ImageFile], + Callable[[bytes], bool | str] | None, + ], +] = {} +MIME: dict[str, str] = {} +SAVE: dict[str, Callable[[Image, IO[bytes], str | bytes], None]] = {} +SAVE_ALL: dict[str, Callable[[Image, IO[bytes], str | bytes], None]] = {} +EXTENSION: dict[str, str] = {} +DECODERS: dict[str, type[ImageFile.PyDecoder]] = {} +ENCODERS: dict[str, type[ImageFile.PyEncoder]] = {} + +# -------------------------------------------------------------------- +# Modes + +_ENDIAN = "<" if sys.byteorder == "little" else ">" + + +def _conv_type_shape(im: Image) -> tuple[tuple[int, ...], str]: + m = ImageMode.getmode(im.mode) + shape: tuple[int, ...] = (im.height, im.width) + extra = len(m.bands) + if extra != 1: + shape += (extra,) + return shape, m.typestr + + +MODES = [ + "1", + "CMYK", + "F", + "HSV", + "I", + "I;16", + "I;16B", + "I;16L", + "I;16N", + "L", + "LA", + "La", + "LAB", + "P", + "PA", + "RGB", + "RGBA", + "RGBa", + "RGBX", + "YCbCr", +] + +# raw modes that may be memory mapped. NOTE: if you change this, you +# may have to modify the stride calculation in map.c too! +_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B") + + +def getmodebase(mode: str) -> str: + """ + Gets the "base" mode for given mode. This function returns "L" for + images that contain grayscale data, and "RGB" for images that + contain color data. + + :param mode: Input mode. + :returns: "L" or "RGB". + :exception KeyError: If the input mode was not a standard mode. + """ + return ImageMode.getmode(mode).basemode + + +def getmodetype(mode: str) -> str: + """ + Gets the storage type mode. Given a mode, this function returns a + single-layer mode suitable for storing individual bands. + + :param mode: Input mode. + :returns: "L", "I", or "F". + :exception KeyError: If the input mode was not a standard mode. + """ + return ImageMode.getmode(mode).basetype + + +def getmodebandnames(mode: str) -> tuple[str, ...]: + """ + Gets a list of individual band names. Given a mode, this function returns + a tuple containing the names of individual bands (use + :py:method:`~PIL.Image.getmodetype` to get the mode used to store each + individual band. + + :param mode: Input mode. + :returns: A tuple containing band names. The length of the tuple + gives the number of bands in an image of the given mode. + :exception KeyError: If the input mode was not a standard mode. + """ + return ImageMode.getmode(mode).bands + + +def getmodebands(mode: str) -> int: + """ + Gets the number of individual bands for this mode. + + :param mode: Input mode. + :returns: The number of bands in this mode. + :exception KeyError: If the input mode was not a standard mode. + """ + return len(ImageMode.getmode(mode).bands) + + +# -------------------------------------------------------------------- +# Helpers + +_initialized = 0 + + +def preinit() -> None: + """ + Explicitly loads BMP, GIF, JPEG, PPM and PPM file format drivers. + + It is called when opening or saving images. + """ + + global _initialized + if _initialized >= 1: + return + + try: + from . import BmpImagePlugin + + assert BmpImagePlugin + except ImportError: + pass + try: + from . import GifImagePlugin + + assert GifImagePlugin + except ImportError: + pass + try: + from . import JpegImagePlugin + + assert JpegImagePlugin + except ImportError: + pass + try: + from . import PpmImagePlugin + + assert PpmImagePlugin + except ImportError: + pass + try: + from . import PngImagePlugin + + assert PngImagePlugin + except ImportError: + pass + + _initialized = 1 + + +def init() -> bool: + """ + Explicitly initializes the Python Imaging Library. This function + loads all available file format drivers. + + It is called when opening or saving images if :py:meth:`~preinit()` is + insufficient, and by :py:meth:`~PIL.features.pilinfo`. + """ + + global _initialized + if _initialized >= 2: + return False + + parent_name = __name__.rpartition(".")[0] + for plugin in _plugins: + try: + logger.debug("Importing %s", plugin) + __import__(f"{parent_name}.{plugin}", globals(), locals(), []) + except ImportError as e: + logger.debug("Image: failed to import %s: %s", plugin, e) + + if OPEN or SAVE: + _initialized = 2 + return True + return False + + +# -------------------------------------------------------------------- +# Codec factories (used by tobytes/frombytes and ImageFile.load) + + +def _getdecoder( + mode: str, decoder_name: str, args: Any, extra: tuple[Any, ...] = () +) -> core.ImagingDecoder | ImageFile.PyDecoder: + # tweak arguments + if args is None: + args = () + elif not isinstance(args, tuple): + args = (args,) + + try: + decoder = DECODERS[decoder_name] + except KeyError: + pass + else: + return decoder(mode, *args + extra) + + try: + # get decoder + decoder = getattr(core, f"{decoder_name}_decoder") + except AttributeError as e: + msg = f"decoder {decoder_name} not available" + raise OSError(msg) from e + return decoder(mode, *args + extra) + + +def _getencoder( + mode: str, encoder_name: str, args: Any, extra: tuple[Any, ...] = () +) -> core.ImagingEncoder | ImageFile.PyEncoder: + # tweak arguments + if args is None: + args = () + elif not isinstance(args, tuple): + args = (args,) + + try: + encoder = ENCODERS[encoder_name] + except KeyError: + pass + else: + return encoder(mode, *args + extra) + + try: + # get encoder + encoder = getattr(core, f"{encoder_name}_encoder") + except AttributeError as e: + msg = f"encoder {encoder_name} not available" + raise OSError(msg) from e + return encoder(mode, *args + extra) + + +# -------------------------------------------------------------------- +# Simple expression analyzer + + +class ImagePointTransform: + """ + Used with :py:meth:`~PIL.Image.Image.point` for single band images with more than + 8 bits, this represents an affine transformation, where the value is multiplied by + ``scale`` and ``offset`` is added. + """ + + def __init__(self, scale: float, offset: float) -> None: + self.scale = scale + self.offset = offset + + def __neg__(self) -> ImagePointTransform: + return ImagePointTransform(-self.scale, -self.offset) + + def __add__(self, other: ImagePointTransform | float) -> ImagePointTransform: + if isinstance(other, ImagePointTransform): + return ImagePointTransform( + self.scale + other.scale, self.offset + other.offset + ) + return ImagePointTransform(self.scale, self.offset + other) + + __radd__ = __add__ + + def __sub__(self, other: ImagePointTransform | float) -> ImagePointTransform: + return self + -other + + def __rsub__(self, other: ImagePointTransform | float) -> ImagePointTransform: + return other + -self + + def __mul__(self, other: ImagePointTransform | float) -> ImagePointTransform: + if isinstance(other, ImagePointTransform): + return NotImplemented + return ImagePointTransform(self.scale * other, self.offset * other) + + __rmul__ = __mul__ + + def __truediv__(self, other: ImagePointTransform | float) -> ImagePointTransform: + if isinstance(other, ImagePointTransform): + return NotImplemented + return ImagePointTransform(self.scale / other, self.offset / other) + + +def _getscaleoffset( + expr: Callable[[ImagePointTransform], ImagePointTransform | float], +) -> tuple[float, float]: + a = expr(ImagePointTransform(1, 0)) + return (a.scale, a.offset) if isinstance(a, ImagePointTransform) else (0, a) + + +# -------------------------------------------------------------------- +# Implementation wrapper + + +class SupportsGetData(Protocol): + def getdata( + self, + ) -> tuple[Transform, Sequence[int]]: ... + + +class Image: + """ + This class represents an image object. To create + :py:class:`~PIL.Image.Image` objects, use the appropriate factory + functions. There's hardly ever any reason to call the Image constructor + directly. + + * :py:func:`~PIL.Image.open` + * :py:func:`~PIL.Image.new` + * :py:func:`~PIL.Image.frombytes` + """ + + format: str | None = None + format_description: str | None = None + _close_exclusive_fp_after_loading = True + + def __init__(self) -> None: + # FIXME: take "new" parameters / other image? + self._im: core.ImagingCore | DeferredError | None = None + self._mode = "" + self._size = (0, 0) + self.palette: ImagePalette.ImagePalette | None = None + self.info: dict[str | tuple[int, int], Any] = {} + self.readonly = 0 + self._exif: Exif | None = None + + @property + def im(self) -> core.ImagingCore: + if isinstance(self._im, DeferredError): + raise self._im.ex + assert self._im is not None + return self._im + + @im.setter + def im(self, im: core.ImagingCore) -> None: + self._im = im + + @property + def width(self) -> int: + return self.size[0] + + @property + def height(self) -> int: + return self.size[1] + + @property + def size(self) -> tuple[int, int]: + return self._size + + @property + def mode(self) -> str: + return self._mode + + @property + def readonly(self) -> int: + return (self._im and self._im.readonly) or self._readonly + + @readonly.setter + def readonly(self, readonly: int) -> None: + self._readonly = readonly + + def _new(self, im: core.ImagingCore) -> Image: + new = Image() + new.im = im + new._mode = im.mode + new._size = im.size + if im.mode in ("P", "PA"): + if self.palette: + new.palette = self.palette.copy() + else: + from . import ImagePalette + + new.palette = ImagePalette.ImagePalette() + new.info = self.info.copy() + return new + + # Context manager support + def __enter__(self): + return self + + def __exit__(self, *args): + from . import ImageFile + + if isinstance(self, ImageFile.ImageFile): + if getattr(self, "_exclusive_fp", False): + self._close_fp() + self.fp = None + + def close(self) -> None: + """ + This operation will destroy the image core and release its memory. + The image data will be unusable afterward. + + This function is required to close images that have multiple frames or + have not had their file read and closed by the + :py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for + more information. + """ + if getattr(self, "map", None): + if sys.platform == "win32" and hasattr(sys, "pypy_version_info"): + self.map.close() + self.map: mmap.mmap | None = None + + # Instead of simply setting to None, we're setting up a + # deferred error that will better explain that the core image + # object is gone. + self._im = DeferredError(ValueError("Operation on closed image")) + + def _copy(self) -> None: + self.load() + self.im = self.im.copy() + self.readonly = 0 + + def _ensure_mutable(self) -> None: + if self.readonly: + self._copy() + else: + self.load() + + def _dump( + self, file: str | None = None, format: str | None = None, **options: Any + ) -> str: + suffix = "" + if format: + suffix = f".{format}" + + if not file: + f, filename = tempfile.mkstemp(suffix) + os.close(f) + else: + filename = file + if not filename.endswith(suffix): + filename = filename + suffix + + self.load() + + if not format or format == "PPM": + self.im.save_ppm(filename) + else: + self.save(filename, format, **options) + + return filename + + def __eq__(self, other: object) -> bool: + if self.__class__ is not other.__class__: + return False + assert isinstance(other, Image) + return ( + self.mode == other.mode + and self.size == other.size + and self.info == other.info + and self.getpalette() == other.getpalette() + and self.tobytes() == other.tobytes() + ) + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__module__}.{self.__class__.__name__} " + f"image mode={self.mode} size={self.size[0]}x{self.size[1]} " + f"at 0x{id(self):X}>" + ) + + def _repr_pretty_(self, p: PrettyPrinter, cycle: bool) -> None: + """IPython plain text display support""" + + # Same as __repr__ but without unpredictable id(self), + # to keep Jupyter notebook `text/plain` output stable. + p.text( + f"<{self.__class__.__module__}.{self.__class__.__name__} " + f"image mode={self.mode} size={self.size[0]}x{self.size[1]}>" + ) + + def _repr_image(self, image_format: str, **kwargs: Any) -> bytes | None: + """Helper function for iPython display hook. + + :param image_format: Image format. + :returns: image as bytes, saved into the given format. + """ + b = io.BytesIO() + try: + self.save(b, image_format, **kwargs) + except Exception: + return None + return b.getvalue() + + def _repr_png_(self) -> bytes | None: + """iPython display hook support for PNG format. + + :returns: PNG version of the image as bytes + """ + return self._repr_image("PNG", compress_level=1) + + def _repr_jpeg_(self) -> bytes | None: + """iPython display hook support for JPEG format. + + :returns: JPEG version of the image as bytes + """ + return self._repr_image("JPEG") + + @property + def __array_interface__(self) -> dict[str, str | bytes | int | tuple[int, ...]]: + # numpy array interface support + new: dict[str, str | bytes | int | tuple[int, ...]] = {"version": 3} + if self.mode == "1": + # Binary images need to be extended from bits to bytes + # See: https://github.com/python-pillow/Pillow/issues/350 + new["data"] = self.tobytes("raw", "L") + else: + new["data"] = self.tobytes() + new["shape"], new["typestr"] = _conv_type_shape(self) + return new + + def __arrow_c_schema__(self) -> object: + self.load() + return self.im.__arrow_c_schema__() + + def __arrow_c_array__( + self, requested_schema: object | None = None + ) -> tuple[object, object]: + self.load() + return (self.im.__arrow_c_schema__(), self.im.__arrow_c_array__()) + + def __getstate__(self) -> list[Any]: + im_data = self.tobytes() # load image first + return [self.info, self.mode, self.size, self.getpalette(), im_data] + + def __setstate__(self, state: list[Any]) -> None: + Image.__init__(self) + info, mode, size, palette, data = state[:5] + self.info = info + self._mode = mode + self._size = size + self.im = core.new(mode, size) + if mode in ("L", "LA", "P", "PA") and palette: + self.putpalette(palette) + self.frombytes(data) + + def tobytes(self, encoder_name: str = "raw", *args: Any) -> bytes: + """ + Return image as a bytes object. + + .. warning:: + + This method returns raw image data derived from Pillow's internal + storage. For compressed image data (e.g. PNG, JPEG) use + :meth:`~.save`, with a BytesIO parameter for in-memory data. + + :param encoder_name: What encoder to use. + + The default is to use the standard "raw" encoder. + To see how this packs pixel data into the returned + bytes, see :file:`libImaging/Pack.c`. + + A list of C encoders can be seen under codecs + section of the function array in + :file:`_imaging.c`. Python encoders are registered + within the relevant plugins. + :param args: Extra arguments to the encoder. + :returns: A :py:class:`bytes` object. + """ + + encoder_args: Any = args + if len(encoder_args) == 1 and isinstance(encoder_args[0], tuple): + # may pass tuple instead of argument list + encoder_args = encoder_args[0] + + if encoder_name == "raw" and encoder_args == (): + encoder_args = self.mode + + self.load() + + if self.width == 0 or self.height == 0: + return b"" + + # unpack data + e = _getencoder(self.mode, encoder_name, encoder_args) + e.setimage(self.im) + + from . import ImageFile + + bufsize = max(ImageFile.MAXBLOCK, self.size[0] * 4) # see RawEncode.c + + output = [] + while True: + bytes_consumed, errcode, data = e.encode(bufsize) + output.append(data) + if errcode: + break + if errcode < 0: + msg = f"encoder error {errcode} in tobytes" + raise RuntimeError(msg) + + return b"".join(output) + + def tobitmap(self, name: str = "image") -> bytes: + """ + Returns the image converted to an X11 bitmap. + + .. note:: This method only works for mode "1" images. + + :param name: The name prefix to use for the bitmap variables. + :returns: A string containing an X11 bitmap. + :raises ValueError: If the mode is not "1" + """ + + self.load() + if self.mode != "1": + msg = "not a bitmap" + raise ValueError(msg) + data = self.tobytes("xbm") + return b"".join( + [ + f"#define {name}_width {self.size[0]}\n".encode("ascii"), + f"#define {name}_height {self.size[1]}\n".encode("ascii"), + f"static char {name}_bits[] = {{\n".encode("ascii"), + data, + b"};", + ] + ) + + def frombytes( + self, + data: bytes | bytearray | SupportsArrayInterface, + decoder_name: str = "raw", + *args: Any, + ) -> None: + """ + Loads this image with pixel data from a bytes object. + + This method is similar to the :py:func:`~PIL.Image.frombytes` function, + but loads data into this image instead of creating a new image object. + """ + + if self.width == 0 or self.height == 0: + return + + decoder_args: Any = args + if len(decoder_args) == 1 and isinstance(decoder_args[0], tuple): + # may pass tuple instead of argument list + decoder_args = decoder_args[0] + + # default format + if decoder_name == "raw" and decoder_args == (): + decoder_args = self.mode + + # unpack data + d = _getdecoder(self.mode, decoder_name, decoder_args) + d.setimage(self.im) + s = d.decode(data) + + if s[0] >= 0: + msg = "not enough image data" + raise ValueError(msg) + if s[1] != 0: + msg = "cannot decode image data" + raise ValueError(msg) + + def load(self) -> core.PixelAccess | None: + """ + Allocates storage for the image and loads the pixel data. In + normal cases, you don't need to call this method, since the + Image class automatically loads an opened image when it is + accessed for the first time. + + If the file associated with the image was opened by Pillow, then this + method will close it. The exception to this is if the image has + multiple frames, in which case the file will be left open for seek + operations. See :ref:`file-handling` for more information. + + :returns: An image access object. + :rtype: :py:class:`.PixelAccess` + """ + if self._im is not None and self.palette and self.palette.dirty: + # realize palette + mode, arr = self.palette.getdata() + self.im.putpalette(self.palette.mode, mode, arr) + self.palette.dirty = 0 + self.palette.rawmode = None + if "transparency" in self.info and mode in ("LA", "PA"): + if isinstance(self.info["transparency"], int): + self.im.putpalettealpha(self.info["transparency"], 0) + else: + self.im.putpalettealphas(self.info["transparency"]) + self.palette.mode = "RGBA" + else: + self.palette.palette = self.im.getpalette( + self.palette.mode, self.palette.mode + ) + + if self._im is not None: + return self.im.pixel_access(self.readonly) + return None + + def verify(self) -> None: + """ + Verifies the contents of a file. For data read from a file, this + method attempts to determine if the file is broken, without + actually decoding the image data. If this method finds any + problems, it raises suitable exceptions. If you need to load + the image after using this method, you must reopen the image + file. + """ + pass + + def convert( + self, + mode: str | None = None, + matrix: tuple[float, ...] | None = None, + dither: Dither | None = None, + palette: Palette = Palette.WEB, + colors: int = 256, + ) -> Image: + """ + Returns a converted copy of this image. For the "P" mode, this + method translates pixels through the palette. If mode is + omitted, a mode is chosen so that all information in the image + and the palette can be represented without a palette. + + This supports all possible conversions between "L", "RGB" and "CMYK". The + ``matrix`` argument only supports "L" and "RGB". + + When translating a color image to grayscale (mode "L"), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + + The default method of converting a grayscale ("L") or "RGB" + image into a bilevel (mode "1") image uses Floyd-Steinberg + dither to approximate the original image luminosity levels. If + dither is ``None``, all values larger than 127 are set to 255 (white), + all other values to 0 (black). To use other thresholds, use the + :py:meth:`~PIL.Image.Image.point` method. + + When converting from "RGBA" to "P" without a ``matrix`` argument, + this passes the operation to :py:meth:`~PIL.Image.Image.quantize`, + and ``dither`` and ``palette`` are ignored. + + When converting from "PA", if an "RGBA" palette is present, the alpha + channel from the image will be used instead of the values from the palette. + + :param mode: The requested mode. See: :ref:`concept-modes`. + :param matrix: An optional conversion matrix. If given, this + should be 4- or 12-tuple containing floating point values. + :param dither: Dithering method, used when converting from + mode "RGB" to "P" or from "RGB" or "L" to "1". + Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG` + (default). Note that this is not used when ``matrix`` is supplied. + :param palette: Palette to use when converting from mode "RGB" + to "P". Available palettes are :data:`Palette.WEB` or + :data:`Palette.ADAPTIVE`. + :param colors: Number of colors to use for the :data:`Palette.ADAPTIVE` + palette. Defaults to 256. + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if mode in ("BGR;15", "BGR;16", "BGR;24"): + deprecate(mode, 12) + + self.load() + + has_transparency = "transparency" in self.info + if not mode and self.mode == "P": + # determine default mode + if self.palette: + mode = self.palette.mode + else: + mode = "RGB" + if mode == "RGB" and has_transparency: + mode = "RGBA" + if not mode or (mode == self.mode and not matrix): + return self.copy() + + if matrix: + # matrix conversion + if mode not in ("L", "RGB"): + msg = "illegal conversion" + raise ValueError(msg) + im = self.im.convert_matrix(mode, matrix) + new_im = self._new(im) + if has_transparency and self.im.bands == 3: + transparency = new_im.info["transparency"] + + def convert_transparency( + m: tuple[float, ...], v: tuple[int, int, int] + ) -> int: + value = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3] * 0.5 + return max(0, min(255, int(value))) + + if mode == "L": + transparency = convert_transparency(matrix, transparency) + elif len(mode) == 3: + transparency = tuple( + convert_transparency(matrix[i * 4 : i * 4 + 4], transparency) + for i in range(len(transparency)) + ) + new_im.info["transparency"] = transparency + return new_im + + if mode == "P" and self.mode == "RGBA": + return self.quantize(colors) + + trns = None + delete_trns = False + # transparency handling + if has_transparency: + if (self.mode in ("1", "L", "I", "I;16") and mode in ("LA", "RGBA")) or ( + self.mode == "RGB" and mode in ("La", "LA", "RGBa", "RGBA") + ): + # Use transparent conversion to promote from transparent + # color to an alpha channel. + new_im = self._new( + self.im.convert_transparent(mode, self.info["transparency"]) + ) + del new_im.info["transparency"] + return new_im + elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"): + t = self.info["transparency"] + if isinstance(t, bytes): + # Dragons. This can't be represented by a single color + warnings.warn( + "Palette images with Transparency expressed in bytes should be " + "converted to RGBA images" + ) + delete_trns = True + else: + # get the new transparency color. + # use existing conversions + trns_im = new(self.mode, (1, 1)) + if self.mode == "P": + assert self.palette is not None + trns_im.putpalette(self.palette, self.palette.mode) + if isinstance(t, tuple): + err = "Couldn't allocate a palette color for transparency" + assert trns_im.palette is not None + try: + t = trns_im.palette.getcolor(t, self) + except ValueError as e: + if str(e) == "cannot allocate more than 256 colors": + # If all 256 colors are in use, + # then there is no need for transparency + t = None + else: + raise ValueError(err) from e + if t is None: + trns = None + else: + trns_im.putpixel((0, 0), t) + + if mode in ("L", "RGB"): + trns_im = trns_im.convert(mode) + else: + # can't just retrieve the palette number, got to do it + # after quantization. + trns_im = trns_im.convert("RGB") + trns = trns_im.getpixel((0, 0)) + + elif self.mode == "P" and mode in ("LA", "PA", "RGBA"): + t = self.info["transparency"] + delete_trns = True + + if isinstance(t, bytes): + self.im.putpalettealphas(t) + elif isinstance(t, int): + self.im.putpalettealpha(t, 0) + else: + msg = "Transparency for P mode should be bytes or int" + raise ValueError(msg) + + if mode == "P" and palette == Palette.ADAPTIVE: + im = self.im.quantize(colors) + new_im = self._new(im) + from . import ImagePalette + + new_im.palette = ImagePalette.ImagePalette( + "RGB", new_im.im.getpalette("RGB") + ) + if delete_trns: + # This could possibly happen if we requantize to fewer colors. + # The transparency would be totally off in that case. + del new_im.info["transparency"] + if trns is not None: + try: + new_im.info["transparency"] = new_im.palette.getcolor( + cast(tuple[int, ...], trns), # trns was converted to RGB + new_im, + ) + except Exception: + # if we can't make a transparent color, don't leave the old + # transparency hanging around to mess us up. + del new_im.info["transparency"] + warnings.warn("Couldn't allocate palette entry for transparency") + return new_im + + if "LAB" in (self.mode, mode): + im = self + if mode == "LAB": + if im.mode not in ("RGB", "RGBA", "RGBX"): + im = im.convert("RGBA") + other_mode = im.mode + else: + other_mode = mode + if other_mode in ("RGB", "RGBA", "RGBX"): + from . import ImageCms + + srgb = ImageCms.createProfile("sRGB") + lab = ImageCms.createProfile("LAB") + profiles = [lab, srgb] if im.mode == "LAB" else [srgb, lab] + transform = ImageCms.buildTransform( + profiles[0], profiles[1], im.mode, mode + ) + return transform.apply(im) + + # colorspace conversion + if dither is None: + dither = Dither.FLOYDSTEINBERG + + try: + im = self.im.convert(mode, dither) + except ValueError: + try: + # normalize source image and try again + modebase = getmodebase(self.mode) + if modebase == self.mode: + raise + im = self.im.convert(modebase) + im = im.convert(mode, dither) + except KeyError as e: + msg = "illegal conversion" + raise ValueError(msg) from e + + new_im = self._new(im) + if mode == "P" and palette != Palette.ADAPTIVE: + from . import ImagePalette + + new_im.palette = ImagePalette.ImagePalette("RGB", im.getpalette("RGB")) + if delete_trns: + # crash fail if we leave a bytes transparency in an rgb/l mode. + del new_im.info["transparency"] + if trns is not None: + if new_im.mode == "P" and new_im.palette: + try: + new_im.info["transparency"] = new_im.palette.getcolor( + cast(tuple[int, ...], trns), new_im # trns was converted to RGB + ) + except ValueError as e: + del new_im.info["transparency"] + if str(e) != "cannot allocate more than 256 colors": + # If all 256 colors are in use, + # then there is no need for transparency + warnings.warn( + "Couldn't allocate palette entry for transparency" + ) + else: + new_im.info["transparency"] = trns + return new_im + + def quantize( + self, + colors: int = 256, + method: int | None = None, + kmeans: int = 0, + palette: Image | None = None, + dither: Dither = Dither.FLOYDSTEINBERG, + ) -> Image: + """ + Convert the image to 'P' mode with the specified number + of colors. + + :param colors: The desired number of colors, <= 256 + :param method: :data:`Quantize.MEDIANCUT` (median cut), + :data:`Quantize.MAXCOVERAGE` (maximum coverage), + :data:`Quantize.FASTOCTREE` (fast octree), + :data:`Quantize.LIBIMAGEQUANT` (libimagequant; check support + using :py:func:`PIL.features.check_feature` with + ``feature="libimagequant"``). + + By default, :data:`Quantize.MEDIANCUT` will be used. + + The exception to this is RGBA images. :data:`Quantize.MEDIANCUT` + and :data:`Quantize.MAXCOVERAGE` do not support RGBA images, so + :data:`Quantize.FASTOCTREE` is used by default instead. + :param kmeans: Integer greater than or equal to zero. + :param palette: Quantize to the palette of given + :py:class:`PIL.Image.Image`. + :param dither: Dithering method, used when converting from + mode "RGB" to "P" or from "RGB" or "L" to "1". + Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG` + (default). + :returns: A new image + """ + + self.load() + + if method is None: + # defaults: + method = Quantize.MEDIANCUT + if self.mode == "RGBA": + method = Quantize.FASTOCTREE + + if self.mode == "RGBA" and method not in ( + Quantize.FASTOCTREE, + Quantize.LIBIMAGEQUANT, + ): + # Caller specified an invalid mode. + msg = ( + "Fast Octree (method == 2) and libimagequant (method == 3) " + "are the only valid methods for quantizing RGBA images" + ) + raise ValueError(msg) + + if palette: + # use palette from reference image + palette.load() + if palette.mode != "P": + msg = "bad mode for palette image" + raise ValueError(msg) + if self.mode not in {"RGB", "L"}: + msg = "only RGB or L mode images can be quantized to a palette" + raise ValueError(msg) + im = self.im.convert("P", dither, palette.im) + new_im = self._new(im) + assert palette.palette is not None + new_im.palette = palette.palette.copy() + return new_im + + if kmeans < 0: + msg = "kmeans must not be negative" + raise ValueError(msg) + + im = self._new(self.im.quantize(colors, method, kmeans)) + + from . import ImagePalette + + mode = im.im.getpalettemode() + palette_data = im.im.getpalette(mode, mode)[: colors * len(mode)] + im.palette = ImagePalette.ImagePalette(mode, palette_data) + + return im + + def copy(self) -> Image: + """ + Copies this image. Use this method if you wish to paste things + into an image, but still retain the original. + + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + self.load() + return self._new(self.im.copy()) + + __copy__ = copy + + def crop(self, box: tuple[float, float, float, float] | None = None) -> Image: + """ + Returns a rectangular region from this image. The box is a + 4-tuple defining the left, upper, right, and lower pixel + coordinate. See :ref:`coordinate-system`. + + Note: Prior to Pillow 3.4.0, this was a lazy operation. + + :param box: The crop rectangle, as a (left, upper, right, lower)-tuple. + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if box is None: + return self.copy() + + if box[2] < box[0]: + msg = "Coordinate 'right' is less than 'left'" + raise ValueError(msg) + elif box[3] < box[1]: + msg = "Coordinate 'lower' is less than 'upper'" + raise ValueError(msg) + + self.load() + return self._new(self._crop(self.im, box)) + + def _crop( + self, im: core.ImagingCore, box: tuple[float, float, float, float] + ) -> core.ImagingCore: + """ + Returns a rectangular region from the core image object im. + + This is equivalent to calling im.crop((x0, y0, x1, y1)), but + includes additional sanity checks. + + :param im: a core image object + :param box: The crop rectangle, as a (left, upper, right, lower)-tuple. + :returns: A core image object. + """ + + x0, y0, x1, y1 = map(int, map(round, box)) + + absolute_values = (abs(x1 - x0), abs(y1 - y0)) + + _decompression_bomb_check(absolute_values) + + return im.crop((x0, y0, x1, y1)) + + def draft( + self, mode: str | None, size: tuple[int, int] | None + ) -> tuple[str, tuple[int, int, float, float]] | None: + """ + Configures the image file loader so it returns a version of the + image that as closely as possible matches the given mode and + size. For example, you can use this method to convert a color + JPEG to grayscale while loading it. + + If any changes are made, returns a tuple with the chosen ``mode`` and + ``box`` with coordinates of the original image within the altered one. + + Note that this method modifies the :py:class:`~PIL.Image.Image` object + in place. If the image has already been loaded, this method has no + effect. + + Note: This method is not implemented for most images. It is + currently implemented only for JPEG and MPO images. + + :param mode: The requested mode. + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + """ + pass + + def _expand(self, xmargin: int, ymargin: int | None = None) -> Image: + if ymargin is None: + ymargin = xmargin + self.load() + return self._new(self.im.expand(xmargin, ymargin)) + + def filter(self, filter: ImageFilter.Filter | type[ImageFilter.Filter]) -> Image: + """ + Filters this image using the given filter. For a list of + available filters, see the :py:mod:`~PIL.ImageFilter` module. + + :param filter: Filter kernel. + :returns: An :py:class:`~PIL.Image.Image` object.""" + + from . import ImageFilter + + self.load() + + if callable(filter): + filter = filter() + if not hasattr(filter, "filter"): + msg = "filter argument should be ImageFilter.Filter instance or class" + raise TypeError(msg) + + multiband = isinstance(filter, ImageFilter.MultibandFilter) + if self.im.bands == 1 or multiband: + return self._new(filter.filter(self.im)) + + ims = [ + self._new(filter.filter(self.im.getband(c))) for c in range(self.im.bands) + ] + return merge(self.mode, ims) + + def getbands(self) -> tuple[str, ...]: + """ + Returns a tuple containing the name of each band in this image. + For example, ``getbands`` on an RGB image returns ("R", "G", "B"). + + :returns: A tuple containing band names. + :rtype: tuple + """ + return ImageMode.getmode(self.mode).bands + + def getbbox(self, *, alpha_only: bool = True) -> tuple[int, int, int, int] | None: + """ + Calculates the bounding box of the non-zero regions in the + image. + + :param alpha_only: Optional flag, defaulting to ``True``. + If ``True`` and the image has an alpha channel, trim transparent pixels. + Otherwise, trim pixels when all channels are zero. + Keyword-only argument. + :returns: The bounding box is returned as a 4-tuple defining the + left, upper, right, and lower pixel coordinate. See + :ref:`coordinate-system`. If the image is completely empty, this + method returns None. + + """ + + self.load() + return self.im.getbbox(alpha_only) + + def getcolors( + self, maxcolors: int = 256 + ) -> list[tuple[int, tuple[int, ...]]] | list[tuple[int, float]] | None: + """ + Returns a list of colors used in this image. + + The colors will be in the image's mode. For example, an RGB image will + return a tuple of (red, green, blue) color values, and a P image will + return the index of the color in the palette. + + :param maxcolors: Maximum number of colors. If this number is + exceeded, this method returns None. The default limit is + 256 colors. + :returns: An unsorted list of (count, pixel) values. + """ + + self.load() + if self.mode in ("1", "L", "P"): + h = self.im.histogram() + out: list[tuple[int, float]] = [(h[i], i) for i in range(256) if h[i]] + if len(out) > maxcolors: + return None + return out + return self.im.getcolors(maxcolors) + + def getdata(self, band: int | None = None) -> core.ImagingCore: + """ + Returns the contents of this image as a sequence object + containing pixel values. The sequence object is flattened, so + that values for line one follow directly after the values of + line zero, and so on. + + Note that the sequence object returned by this method is an + internal PIL data type, which only supports certain sequence + operations. To convert it to an ordinary sequence (e.g. for + printing), use ``list(im.getdata())``. + + :param band: What band to return. The default is to return + all bands. To return a single band, pass in the index + value (e.g. 0 to get the "R" band from an "RGB" image). + :returns: A sequence-like object. + """ + + self.load() + if band is not None: + return self.im.getband(band) + return self.im # could be abused + + def getextrema(self) -> tuple[float, float] | tuple[tuple[int, int], ...]: + """ + Gets the minimum and maximum pixel values for each band in + the image. + + :returns: For a single-band image, a 2-tuple containing the + minimum and maximum pixel value. For a multi-band image, + a tuple containing one 2-tuple for each band. + """ + + self.load() + if self.im.bands > 1: + return tuple(self.im.getband(i).getextrema() for i in range(self.im.bands)) + return self.im.getextrema() + + def getxmp(self) -> dict[str, Any]: + """ + Returns a dictionary containing the XMP tags. + Requires defusedxml to be installed. + + :returns: XMP tags in a dictionary. + """ + + def get_name(tag: str) -> str: + return re.sub("^{[^}]+}", "", tag) + + def get_value(element: Element) -> str | dict[str, Any] | None: + value: dict[str, Any] = {get_name(k): v for k, v in element.attrib.items()} + children = list(element) + if children: + for child in children: + name = get_name(child.tag) + child_value = get_value(child) + if name in value: + if not isinstance(value[name], list): + value[name] = [value[name]] + value[name].append(child_value) + else: + value[name] = child_value + elif value: + if element.text: + value["text"] = element.text + else: + return element.text + return value + + if ElementTree is None: + warnings.warn("XMP data cannot be read without defusedxml dependency") + return {} + if "xmp" not in self.info: + return {} + root = ElementTree.fromstring(self.info["xmp"].rstrip(b"\x00 ")) + return {get_name(root.tag): get_value(root)} + + def getexif(self) -> Exif: + """ + Gets EXIF data from the image. + + :returns: an :py:class:`~PIL.Image.Exif` object. + """ + if self._exif is None: + self._exif = Exif() + elif self._exif._loaded: + return self._exif + self._exif._loaded = True + + exif_info = self.info.get("exif") + if exif_info is None: + if "Raw profile type exif" in self.info: + exif_info = bytes.fromhex( + "".join(self.info["Raw profile type exif"].split("\n")[3:]) + ) + elif hasattr(self, "tag_v2"): + self._exif.bigtiff = self.tag_v2._bigtiff + self._exif.endian = self.tag_v2._endian + self._exif.load_from_fp(self.fp, self.tag_v2._offset) + if exif_info is not None: + self._exif.load(exif_info) + + # XMP tags + if ExifTags.Base.Orientation not in self._exif: + xmp_tags = self.info.get("XML:com.adobe.xmp") + pattern: str | bytes = r'tiff:Orientation(="|>)([0-9])' + if not xmp_tags and (xmp_tags := self.info.get("xmp")): + pattern = rb'tiff:Orientation(="|>)([0-9])' + if xmp_tags: + match = re.search(pattern, xmp_tags) + if match: + self._exif[ExifTags.Base.Orientation] = int(match[2]) + + return self._exif + + def _reload_exif(self) -> None: + if self._exif is None or not self._exif._loaded: + return + self._exif._loaded = False + self.getexif() + + def get_child_images(self) -> list[ImageFile.ImageFile]: + from . import ImageFile + + deprecate("Image.Image.get_child_images", 13) + return ImageFile.ImageFile.get_child_images(self) # type: ignore[arg-type] + + def getim(self) -> CapsuleType: + """ + Returns a capsule that points to the internal image memory. + + :returns: A capsule object. + """ + + self.load() + return self.im.ptr + + def getpalette(self, rawmode: str | None = "RGB") -> list[int] | None: + """ + Returns the image palette as a list. + + :param rawmode: The mode in which to return the palette. ``None`` will + return the palette in its current mode. + + .. versionadded:: 9.1.0 + + :returns: A list of color values [r, g, b, ...], or None if the + image has no palette. + """ + + self.load() + try: + mode = self.im.getpalettemode() + except ValueError: + return None # no palette + if rawmode is None: + rawmode = mode + return list(self.im.getpalette(mode, rawmode)) + + @property + def has_transparency_data(self) -> bool: + """ + Determine if an image has transparency data, whether in the form of an + alpha channel, a palette with an alpha channel, or a "transparency" key + in the info dictionary. + + Note the image might still appear solid, if all of the values shown + within are opaque. + + :returns: A boolean. + """ + if ( + self.mode in ("LA", "La", "PA", "RGBA", "RGBa") + or "transparency" in self.info + ): + return True + if self.mode == "P": + assert self.palette is not None + return self.palette.mode.endswith("A") + return False + + def apply_transparency(self) -> None: + """ + If a P mode image has a "transparency" key in the info dictionary, + remove the key and instead apply the transparency to the palette. + Otherwise, the image is unchanged. + """ + if self.mode != "P" or "transparency" not in self.info: + return + + from . import ImagePalette + + palette = self.getpalette("RGBA") + assert palette is not None + transparency = self.info["transparency"] + if isinstance(transparency, bytes): + for i, alpha in enumerate(transparency): + palette[i * 4 + 3] = alpha + else: + palette[transparency * 4 + 3] = 0 + self.palette = ImagePalette.ImagePalette("RGBA", bytes(palette)) + self.palette.dirty = 1 + + del self.info["transparency"] + + def getpixel( + self, xy: tuple[int, int] | list[int] + ) -> float | tuple[int, ...] | None: + """ + Returns the pixel value at a given position. + + :param xy: The coordinate, given as (x, y). See + :ref:`coordinate-system`. + :returns: The pixel value. If the image is a multi-layer image, + this method returns a tuple. + """ + + self.load() + return self.im.getpixel(tuple(xy)) + + def getprojection(self) -> tuple[list[int], list[int]]: + """ + Get projection to x and y axes + + :returns: Two sequences, indicating where there are non-zero + pixels along the X-axis and the Y-axis, respectively. + """ + + self.load() + x, y = self.im.getprojection() + return list(x), list(y) + + def histogram( + self, mask: Image | None = None, extrema: tuple[float, float] | None = None + ) -> list[int]: + """ + Returns a histogram for the image. The histogram is returned as a + list of pixel counts, one for each pixel value in the source + image. Counts are grouped into 256 bins for each band, even if + the image has more than 8 bits per band. If the image has more + than one band, the histograms for all bands are concatenated (for + example, the histogram for an "RGB" image contains 768 values). + + A bilevel image (mode "1") is treated as a grayscale ("L") image + by this method. + + If a mask is provided, the method returns a histogram for those + parts of the image where the mask image is non-zero. The mask + image must have the same size as the image, and be either a + bi-level image (mode "1") or a grayscale image ("L"). + + :param mask: An optional mask. + :param extrema: An optional tuple of manually-specified extrema. + :returns: A list containing pixel counts. + """ + self.load() + if mask: + mask.load() + return self.im.histogram((0, 0), mask.im) + if self.mode in ("I", "F"): + return self.im.histogram( + extrema if extrema is not None else self.getextrema() + ) + return self.im.histogram() + + def entropy( + self, mask: Image | None = None, extrema: tuple[float, float] | None = None + ) -> float: + """ + Calculates and returns the entropy for the image. + + A bilevel image (mode "1") is treated as a grayscale ("L") + image by this method. + + If a mask is provided, the method employs the histogram for + those parts of the image where the mask image is non-zero. + The mask image must have the same size as the image, and be + either a bi-level image (mode "1") or a grayscale image ("L"). + + :param mask: An optional mask. + :param extrema: An optional tuple of manually-specified extrema. + :returns: A float value representing the image entropy + """ + self.load() + if mask: + mask.load() + return self.im.entropy((0, 0), mask.im) + if self.mode in ("I", "F"): + return self.im.entropy( + extrema if extrema is not None else self.getextrema() + ) + return self.im.entropy() + + def paste( + self, + im: Image | str | float | tuple[float, ...], + box: Image | tuple[int, int, int, int] | tuple[int, int] | None = None, + mask: Image | None = None, + ) -> None: + """ + Pastes another image into this image. The box argument is either + a 2-tuple giving the upper left corner, a 4-tuple defining the + left, upper, right, and lower pixel coordinate, or None (same as + (0, 0)). See :ref:`coordinate-system`. If a 4-tuple is given, the size + of the pasted image must match the size of the region. + + If the modes don't match, the pasted image is converted to the mode of + this image (see the :py:meth:`~PIL.Image.Image.convert` method for + details). + + Instead of an image, the source can be a integer or tuple + containing pixel values. The method then fills the region + with the given color. When creating RGB images, you can + also use color strings as supported by the ImageColor module. + + If a mask is given, this method updates only the regions + indicated by the mask. You can use either "1", "L", "LA", "RGBA" + or "RGBa" images (if present, the alpha band is used as mask). + Where the mask is 255, the given image is copied as is. Where + the mask is 0, the current value is preserved. Intermediate + values will mix the two images together, including their alpha + channels if they have them. + + See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to + combine images with respect to their alpha channels. + + :param im: Source image or pixel value (integer, float or tuple). + :param box: An optional 4-tuple giving the region to paste into. + If a 2-tuple is used instead, it's treated as the upper left + corner. If omitted or None, the source is pasted into the + upper left corner. + + If an image is given as the second argument and there is no + third, the box defaults to (0, 0), and the second argument + is interpreted as a mask image. + :param mask: An optional mask image. + """ + + if isinstance(box, Image): + if mask is not None: + msg = "If using second argument as mask, third argument must be None" + raise ValueError(msg) + # abbreviated paste(im, mask) syntax + mask = box + box = None + + if box is None: + box = (0, 0) + + if len(box) == 2: + # upper left corner given; get size from image or mask + if isinstance(im, Image): + size = im.size + elif isinstance(mask, Image): + size = mask.size + else: + # FIXME: use self.size here? + msg = "cannot determine region size; use 4-item box" + raise ValueError(msg) + box += (box[0] + size[0], box[1] + size[1]) + + source: core.ImagingCore | str | float | tuple[float, ...] + if isinstance(im, str): + from . import ImageColor + + source = ImageColor.getcolor(im, self.mode) + elif isinstance(im, Image): + im.load() + if self.mode != im.mode: + if self.mode != "RGB" or im.mode not in ("LA", "RGBA", "RGBa"): + # should use an adapter for this! + im = im.convert(self.mode) + source = im.im + else: + source = im + + self._ensure_mutable() + + if mask: + mask.load() + self.im.paste(source, box, mask.im) + else: + self.im.paste(source, box) + + def alpha_composite( + self, im: Image, dest: Sequence[int] = (0, 0), source: Sequence[int] = (0, 0) + ) -> None: + """'In-place' analog of Image.alpha_composite. Composites an image + onto this image. + + :param im: image to composite over this one + :param dest: Optional 2 tuple (left, top) specifying the upper + left corner in this (destination) image. + :param source: Optional 2 (left, top) tuple for the upper left + corner in the overlay source image, or 4 tuple (left, top, right, + bottom) for the bounds of the source rectangle + + Performance Note: Not currently implemented in-place in the core layer. + """ + + if not isinstance(source, (list, tuple)): + msg = "Source must be a list or tuple" + raise ValueError(msg) + if not isinstance(dest, (list, tuple)): + msg = "Destination must be a list or tuple" + raise ValueError(msg) + + if len(source) == 4: + overlay_crop_box = tuple(source) + elif len(source) == 2: + overlay_crop_box = tuple(source) + im.size + else: + msg = "Source must be a sequence of length 2 or 4" + raise ValueError(msg) + + if not len(dest) == 2: + msg = "Destination must be a sequence of length 2" + raise ValueError(msg) + if min(source) < 0: + msg = "Source must be non-negative" + raise ValueError(msg) + + # over image, crop if it's not the whole image. + if overlay_crop_box == (0, 0) + im.size: + overlay = im + else: + overlay = im.crop(overlay_crop_box) + + # target for the paste + box = tuple(dest) + (dest[0] + overlay.width, dest[1] + overlay.height) + + # destination image. don't copy if we're using the whole image. + if box == (0, 0) + self.size: + background = self + else: + background = self.crop(box) + + result = alpha_composite(background, overlay) + self.paste(result, box) + + def point( + self, + lut: ( + Sequence[float] + | NumpyArray + | Callable[[int], float] + | Callable[[ImagePointTransform], ImagePointTransform | float] + | ImagePointHandler + ), + mode: str | None = None, + ) -> Image: + """ + Maps this image through a lookup table or function. + + :param lut: A lookup table, containing 256 (or 65536 if + self.mode=="I" and mode == "L") values per band in the + image. A function can be used instead, it should take a + single argument. The function is called once for each + possible pixel value, and the resulting table is applied to + all bands of the image. + + It may also be an :py:class:`~PIL.Image.ImagePointHandler` + object:: + + class Example(Image.ImagePointHandler): + def point(self, im: Image) -> Image: + # Return result + :param mode: Output mode (default is same as input). This can only be used if + the source image has mode "L" or "P", and the output has mode "1" or the + source image mode is "I" and the output mode is "L". + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + self.load() + + if isinstance(lut, ImagePointHandler): + return lut.point(self) + + if callable(lut): + # if it isn't a list, it should be a function + if self.mode in ("I", "I;16", "F"): + # check if the function can be used with point_transform + # UNDONE wiredfool -- I think this prevents us from ever doing + # a gamma function point transform on > 8bit images. + scale, offset = _getscaleoffset(lut) # type: ignore[arg-type] + return self._new(self.im.point_transform(scale, offset)) + # for other modes, convert the function to a table + flatLut = [lut(i) for i in range(256)] * self.im.bands # type: ignore[arg-type] + else: + flatLut = lut + + if self.mode == "F": + # FIXME: _imaging returns a confusing error message for this case + msg = "point operation not supported for this mode" + raise ValueError(msg) + + if mode != "F": + flatLut = [round(i) for i in flatLut] + return self._new(self.im.point(flatLut, mode)) + + def putalpha(self, alpha: Image | int) -> None: + """ + Adds or replaces the alpha layer in this image. If the image + does not have an alpha layer, it's converted to "LA" or "RGBA". + The new layer must be either "L" or "1". + + :param alpha: The new alpha layer. This can either be an "L" or "1" + image having the same size as this image, or an integer. + """ + + self._ensure_mutable() + + if self.mode not in ("LA", "PA", "RGBA"): + # attempt to promote self to a matching alpha mode + try: + mode = getmodebase(self.mode) + "A" + try: + self.im.setmode(mode) + except (AttributeError, ValueError) as e: + # do things the hard way + im = self.im.convert(mode) + if im.mode not in ("LA", "PA", "RGBA"): + msg = "alpha channel could not be added" + raise ValueError(msg) from e # sanity check + self.im = im + self._mode = self.im.mode + except KeyError as e: + msg = "illegal image mode" + raise ValueError(msg) from e + + if self.mode in ("LA", "PA"): + band = 1 + else: + band = 3 + + if isinstance(alpha, Image): + # alpha layer + if alpha.mode not in ("1", "L"): + msg = "illegal image mode" + raise ValueError(msg) + alpha.load() + if alpha.mode == "1": + alpha = alpha.convert("L") + else: + # constant alpha + try: + self.im.fillband(band, alpha) + except (AttributeError, ValueError): + # do things the hard way + alpha = new("L", self.size, alpha) + else: + return + + self.im.putband(alpha.im, band) + + def putdata( + self, + data: Sequence[float] | Sequence[Sequence[int]] | core.ImagingCore | NumpyArray, + scale: float = 1.0, + offset: float = 0.0, + ) -> None: + """ + Copies pixel data from a flattened sequence object into the image. The + values should start at the upper left corner (0, 0), continue to the + end of the line, followed directly by the first value of the second + line, and so on. Data will be read until either the image or the + sequence ends. The scale and offset values are used to adjust the + sequence values: **pixel = value*scale + offset**. + + :param data: A flattened sequence object. + :param scale: An optional scale value. The default is 1.0. + :param offset: An optional offset value. The default is 0.0. + """ + + self._ensure_mutable() + + self.im.putdata(data, scale, offset) + + def putpalette( + self, + data: ImagePalette.ImagePalette | bytes | Sequence[int], + rawmode: str = "RGB", + ) -> None: + """ + Attaches a palette to this image. The image must be a "P", "PA", "L" + or "LA" image. + + The palette sequence must contain at most 256 colors, made up of one + integer value for each channel in the raw mode. + For example, if the raw mode is "RGB", then it can contain at most 768 + values, made up of red, green and blue values for the corresponding pixel + index in the 256 colors. + If the raw mode is "RGBA", then it can contain at most 1024 values, + containing red, green, blue and alpha values. + + Alternatively, an 8-bit string may be used instead of an integer sequence. + + :param data: A palette sequence (either a list or a string). + :param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a mode + that can be transformed to "RGB" or "RGBA" (e.g. "R", "BGR;15", "RGBA;L"). + """ + from . import ImagePalette + + if self.mode not in ("L", "LA", "P", "PA"): + msg = "illegal image mode" + raise ValueError(msg) + if isinstance(data, ImagePalette.ImagePalette): + if data.rawmode is not None: + palette = ImagePalette.raw(data.rawmode, data.palette) + else: + palette = ImagePalette.ImagePalette(palette=data.palette) + palette.dirty = 1 + else: + if not isinstance(data, bytes): + data = bytes(data) + palette = ImagePalette.raw(rawmode, data) + self._mode = "PA" if "A" in self.mode else "P" + self.palette = palette + self.palette.mode = "RGBA" if "A" in rawmode else "RGB" + self.load() # install new palette + + def putpixel( + self, xy: tuple[int, int], value: float | tuple[int, ...] | list[int] + ) -> None: + """ + Modifies the pixel at the given position. The color is given as + a single numerical value for single-band images, and a tuple for + multi-band images. In addition to this, RGB and RGBA tuples are + accepted for P and PA images. + + Note that this method is relatively slow. For more extensive changes, + use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw` + module instead. + + See: + + * :py:meth:`~PIL.Image.Image.paste` + * :py:meth:`~PIL.Image.Image.putdata` + * :py:mod:`~PIL.ImageDraw` + + :param xy: The pixel coordinate, given as (x, y). See + :ref:`coordinate-system`. + :param value: The pixel value. + """ + + if self.readonly: + self._copy() + self.load() + + if ( + self.mode in ("P", "PA") + and isinstance(value, (list, tuple)) + and len(value) in [3, 4] + ): + # RGB or RGBA value for a P or PA image + if self.mode == "PA": + alpha = value[3] if len(value) == 4 else 255 + value = value[:3] + assert self.palette is not None + palette_index = self.palette.getcolor(tuple(value), self) + value = (palette_index, alpha) if self.mode == "PA" else palette_index + return self.im.putpixel(xy, value) + + def remap_palette( + self, dest_map: list[int], source_palette: bytes | bytearray | None = None + ) -> Image: + """ + Rewrites the image to reorder the palette. + + :param dest_map: A list of indexes into the original palette. + e.g. ``[1,0]`` would swap a two item palette, and ``list(range(256))`` + is the identity transform. + :param source_palette: Bytes or None. + :returns: An :py:class:`~PIL.Image.Image` object. + + """ + from . import ImagePalette + + if self.mode not in ("L", "P"): + msg = "illegal image mode" + raise ValueError(msg) + + bands = 3 + palette_mode = "RGB" + if source_palette is None: + if self.mode == "P": + self.load() + palette_mode = self.im.getpalettemode() + if palette_mode == "RGBA": + bands = 4 + source_palette = self.im.getpalette(palette_mode, palette_mode) + else: # L-mode + source_palette = bytearray(i // 3 for i in range(768)) + elif len(source_palette) > 768: + bands = 4 + palette_mode = "RGBA" + + palette_bytes = b"" + new_positions = [0] * 256 + + # pick only the used colors from the palette + for i, oldPosition in enumerate(dest_map): + palette_bytes += source_palette[ + oldPosition * bands : oldPosition * bands + bands + ] + new_positions[oldPosition] = i + + # replace the palette color id of all pixel with the new id + + # Palette images are [0..255], mapped through a 1 or 3 + # byte/color map. We need to remap the whole image + # from palette 1 to palette 2. New_positions is + # an array of indexes into palette 1. Palette 2 is + # palette 1 with any holes removed. + + # We're going to leverage the convert mechanism to use the + # C code to remap the image from palette 1 to palette 2, + # by forcing the source image into 'L' mode and adding a + # mapping 'L' mode palette, then converting back to 'L' + # sans palette thus converting the image bytes, then + # assigning the optimized RGB palette. + + # perf reference, 9500x4000 gif, w/~135 colors + # 14 sec prepatch, 1 sec postpatch with optimization forced. + + mapping_palette = bytearray(new_positions) + + m_im = self.copy() + m_im._mode = "P" + + m_im.palette = ImagePalette.ImagePalette( + palette_mode, palette=mapping_palette * bands + ) + # possibly set palette dirty, then + # m_im.putpalette(mapping_palette, 'L') # converts to 'P' + # or just force it. + # UNDONE -- this is part of the general issue with palettes + m_im.im.putpalette(palette_mode, palette_mode + ";L", m_im.palette.tobytes()) + + m_im = m_im.convert("L") + + m_im.putpalette(palette_bytes, palette_mode) + m_im.palette = ImagePalette.ImagePalette(palette_mode, palette=palette_bytes) + + if "transparency" in self.info: + try: + m_im.info["transparency"] = dest_map.index(self.info["transparency"]) + except ValueError: + if "transparency" in m_im.info: + del m_im.info["transparency"] + + return m_im + + def _get_safe_box( + self, + size: tuple[int, int], + resample: Resampling, + box: tuple[float, float, float, float], + ) -> tuple[int, int, int, int]: + """Expands the box so it includes adjacent pixels + that may be used by resampling with the given resampling filter. + """ + filter_support = _filters_support[resample] - 0.5 + scale_x = (box[2] - box[0]) / size[0] + scale_y = (box[3] - box[1]) / size[1] + support_x = filter_support * scale_x + support_y = filter_support * scale_y + + return ( + max(0, int(box[0] - support_x)), + max(0, int(box[1] - support_y)), + min(self.size[0], math.ceil(box[2] + support_x)), + min(self.size[1], math.ceil(box[3] + support_y)), + ) + + def resize( + self, + size: tuple[int, int] | list[int] | NumpyArray, + resample: int | None = None, + box: tuple[float, float, float, float] | None = None, + reducing_gap: float | None = None, + ) -> Image: + """ + Returns a resized copy of this image. + + :param size: The requested size in pixels, as a tuple or array: + (width, height). + :param resample: An optional resampling filter. This can be + one of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`, + :py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`, + :py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`. + If the image has mode "1" or "P", it is always set to + :py:data:`Resampling.NEAREST`. If the image mode is "BGR;15", + "BGR;16" or "BGR;24", then the default filter is + :py:data:`Resampling.NEAREST`. Otherwise, the default filter is + :py:data:`Resampling.BICUBIC`. See: :ref:`concept-filters`. + :param box: An optional 4-tuple of floats providing + the source image region to be scaled. + The values must be within (0, 0, width, height) rectangle. + If omitted or None, the entire source is used. + :param reducing_gap: Apply optimization by resizing the image + in two steps. First, reducing the image by integer times + using :py:meth:`~PIL.Image.Image.reduce`. + Second, resizing using regular resampling. The last step + changes size no less than by ``reducing_gap`` times. + ``reducing_gap`` may be None (no first step is performed) + or should be greater than 1.0. The bigger ``reducing_gap``, + the closer the result to the fair resampling. + The smaller ``reducing_gap``, the faster resizing. + With ``reducing_gap`` greater or equal to 3.0, the result is + indistinguishable from fair resampling in most cases. + The default value is None (no optimization). + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if resample is None: + bgr = self.mode.startswith("BGR;") + resample = Resampling.NEAREST if bgr else Resampling.BICUBIC + elif resample not in ( + Resampling.NEAREST, + Resampling.BILINEAR, + Resampling.BICUBIC, + Resampling.LANCZOS, + Resampling.BOX, + Resampling.HAMMING, + ): + msg = f"Unknown resampling filter ({resample})." + + filters = [ + f"{filter[1]} ({filter[0]})" + for filter in ( + (Resampling.NEAREST, "Image.Resampling.NEAREST"), + (Resampling.LANCZOS, "Image.Resampling.LANCZOS"), + (Resampling.BILINEAR, "Image.Resampling.BILINEAR"), + (Resampling.BICUBIC, "Image.Resampling.BICUBIC"), + (Resampling.BOX, "Image.Resampling.BOX"), + (Resampling.HAMMING, "Image.Resampling.HAMMING"), + ) + ] + msg += f" Use {', '.join(filters[:-1])} or {filters[-1]}" + raise ValueError(msg) + + if reducing_gap is not None and reducing_gap < 1.0: + msg = "reducing_gap must be 1.0 or greater" + raise ValueError(msg) + + if box is None: + box = (0, 0) + self.size + + size = tuple(size) + if self.size == size and box == (0, 0) + self.size: + return self.copy() + + if self.mode in ("1", "P"): + resample = Resampling.NEAREST + + if self.mode in ["LA", "RGBA"] and resample != Resampling.NEAREST: + im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode]) + im = im.resize(size, resample, box) + return im.convert(self.mode) + + self.load() + + if reducing_gap is not None and resample != Resampling.NEAREST: + factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1 + factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1 + if factor_x > 1 or factor_y > 1: + reduce_box = self._get_safe_box(size, cast(Resampling, resample), box) + factor = (factor_x, factor_y) + self = ( + self.reduce(factor, box=reduce_box) + if callable(self.reduce) + else Image.reduce(self, factor, box=reduce_box) + ) + box = ( + (box[0] - reduce_box[0]) / factor_x, + (box[1] - reduce_box[1]) / factor_y, + (box[2] - reduce_box[0]) / factor_x, + (box[3] - reduce_box[1]) / factor_y, + ) + + return self._new(self.im.resize(size, resample, box)) + + def reduce( + self, + factor: int | tuple[int, int], + box: tuple[int, int, int, int] | None = None, + ) -> Image: + """ + Returns a copy of the image reduced ``factor`` times. + If the size of the image is not dividable by ``factor``, + the resulting size will be rounded up. + + :param factor: A greater than 0 integer or tuple of two integers + for width and height separately. + :param box: An optional 4-tuple of ints providing + the source image region to be reduced. + The values must be within ``(0, 0, width, height)`` rectangle. + If omitted or ``None``, the entire source is used. + """ + if not isinstance(factor, (list, tuple)): + factor = (factor, factor) + + if box is None: + box = (0, 0) + self.size + + if factor == (1, 1) and box == (0, 0) + self.size: + return self.copy() + + if self.mode in ["LA", "RGBA"]: + im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode]) + im = im.reduce(factor, box) + return im.convert(self.mode) + + self.load() + + return self._new(self.im.reduce(factor, box)) + + def rotate( + self, + angle: float, + resample: Resampling = Resampling.NEAREST, + expand: int | bool = False, + center: tuple[float, float] | None = None, + translate: tuple[int, int] | None = None, + fillcolor: float | tuple[float, ...] | str | None = None, + ) -> Image: + """ + Returns a rotated copy of this image. This method returns a + copy of this image, rotated the given number of degrees counter + clockwise around its centre. + + :param angle: In degrees counter clockwise. + :param resample: An optional resampling filter. This can be + one of :py:data:`Resampling.NEAREST` (use nearest neighbour), + :py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2 + environment), or :py:data:`Resampling.BICUBIC` (cubic spline + interpolation in a 4x4 environment). If omitted, or if the image has + mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`. + See :ref:`concept-filters`. + :param expand: Optional expansion flag. If true, expands the output + image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the + input image. Note that the expand flag assumes rotation around + the center and no translation. + :param center: Optional center of rotation (a 2-tuple). Origin is + the upper left corner. Default is the center of the image. + :param translate: An optional post-rotate translation (a 2-tuple). + :param fillcolor: An optional color for area outside the rotated image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + angle = angle % 360.0 + + # Fast paths regardless of filter, as long as we're not + # translating or changing the center. + if not (center or translate): + if angle == 0: + return self.copy() + if angle == 180: + return self.transpose(Transpose.ROTATE_180) + if angle in (90, 270) and (expand or self.width == self.height): + return self.transpose( + Transpose.ROTATE_90 if angle == 90 else Transpose.ROTATE_270 + ) + + # Calculate the affine matrix. Note that this is the reverse + # transformation (from destination image to source) because we + # want to interpolate the (discrete) destination pixel from + # the local area around the (floating) source pixel. + + # The matrix we actually want (note that it operates from the right): + # (1, 0, tx) (1, 0, cx) ( cos a, sin a, 0) (1, 0, -cx) + # (0, 1, ty) * (0, 1, cy) * (-sin a, cos a, 0) * (0, 1, -cy) + # (0, 0, 1) (0, 0, 1) ( 0, 0, 1) (0, 0, 1) + + # The reverse matrix is thus: + # (1, 0, cx) ( cos -a, sin -a, 0) (1, 0, -cx) (1, 0, -tx) + # (0, 1, cy) * (-sin -a, cos -a, 0) * (0, 1, -cy) * (0, 1, -ty) + # (0, 0, 1) ( 0, 0, 1) (0, 0, 1) (0, 0, 1) + + # In any case, the final translation may be updated at the end to + # compensate for the expand flag. + + w, h = self.size + + if translate is None: + post_trans = (0, 0) + else: + post_trans = translate + if center is None: + center = (w / 2, h / 2) + + angle = -math.radians(angle) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x: float, y: float, matrix: list[float]) -> tuple[float, float]: + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -center[0] - post_trans[0], -center[1] - post_trans[1], matrix + ) + matrix[2] += center[0] + matrix[5] += center[1] + + if expand: + # calculate output size + xx = [] + yy = [] + for x, y in ((0, 0), (w, 0), (w, h), (0, h)): + transformed_x, transformed_y = transform(x, y, matrix) + xx.append(transformed_x) + yy.append(transformed_y) + nw = math.ceil(max(xx)) - math.floor(min(xx)) + nh = math.ceil(max(yy)) - math.floor(min(yy)) + + # We multiply a translation matrix from the right. Because of its + # special form, this is the same as taking the image of the + # translation vector as new translation vector. + matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix) + w, h = nw, nh + + return self.transform( + (w, h), Transform.AFFINE, matrix, resample, fillcolor=fillcolor + ) + + def save( + self, fp: StrOrBytesPath | IO[bytes], format: str | None = None, **params: Any + ) -> None: + """ + Saves this image under the given filename. If no format is + specified, the format to use is determined from the filename + extension, if possible. + + Keyword options can be used to provide additional instructions + to the writer. If a writer doesn't recognise an option, it is + silently ignored. The available options are described in the + :doc:`image format documentation + <../handbook/image-file-formats>` for each writer. + + You can use a file object instead of a filename. In this case, + you must always specify the format. The file object must + implement the ``seek``, ``tell``, and ``write`` + methods, and be opened in binary mode. + + :param fp: A filename (string), os.PathLike object or file object. + :param format: Optional format override. If omitted, the + format to use is determined from the filename extension. + If a file object was used instead of a filename, this + parameter should always be used. + :param params: Extra parameters to the image writer. These can also be + set on the image itself through ``encoderinfo``. This is useful when + saving multiple images:: + + # Saving XMP data to a single image + from PIL import Image + red = Image.new("RGB", (1, 1), "#f00") + red.save("out.mpo", xmp=b"test") + + # Saving XMP data to the second frame of an image + from PIL import Image + black = Image.new("RGB", (1, 1)) + red = Image.new("RGB", (1, 1), "#f00") + red.encoderinfo = {"xmp": b"test"} + black.save("out.mpo", save_all=True, append_images=[red]) + :returns: None + :exception ValueError: If the output format could not be determined + from the file name. Use the format option to solve this. + :exception OSError: If the file could not be written. The file + may have been created, and may contain partial data. + """ + + filename: str | bytes = "" + open_fp = False + if is_path(fp): + filename = os.fspath(fp) + open_fp = True + elif fp == sys.stdout: + try: + fp = sys.stdout.buffer + except AttributeError: + pass + if not filename and hasattr(fp, "name") and is_path(fp.name): + # only set the name for metadata purposes + filename = os.fspath(fp.name) + + preinit() + + filename_ext = os.path.splitext(filename)[1].lower() + ext = filename_ext.decode() if isinstance(filename_ext, bytes) else filename_ext + + if not format: + if ext not in EXTENSION: + init() + try: + format = EXTENSION[ext] + except KeyError as e: + msg = f"unknown file extension: {ext}" + raise ValueError(msg) from e + + from . import ImageFile + + # may mutate self! + if isinstance(self, ImageFile.ImageFile) and os.path.abspath( + filename + ) == os.path.abspath(self.filename): + self._ensure_mutable() + else: + self.load() + + save_all = params.pop("save_all", None) + self._default_encoderinfo = params + encoderinfo = getattr(self, "encoderinfo", {}) + self._attach_default_encoderinfo(self) + self.encoderconfig: tuple[Any, ...] = () + + if format.upper() not in SAVE: + init() + if save_all or ( + save_all is None + and params.get("append_images") + and format.upper() in SAVE_ALL + ): + save_handler = SAVE_ALL[format.upper()] + else: + save_handler = SAVE[format.upper()] + + created = False + if open_fp: + created = not os.path.exists(filename) + if params.get("append", False): + # Open also for reading ("+"), because TIFF save_all + # writer needs to go back and edit the written data. + fp = builtins.open(filename, "r+b") + else: + fp = builtins.open(filename, "w+b") + else: + fp = cast(IO[bytes], fp) + + try: + save_handler(self, fp, filename) + except Exception: + if open_fp: + fp.close() + if created: + try: + os.remove(filename) + except PermissionError: + pass + raise + finally: + self.encoderinfo = encoderinfo + if open_fp: + fp.close() + + def _attach_default_encoderinfo(self, im: Image) -> dict[str, Any]: + encoderinfo = getattr(self, "encoderinfo", {}) + self.encoderinfo = {**im._default_encoderinfo, **encoderinfo} + return encoderinfo + + def seek(self, frame: int) -> None: + """ + Seeks to the given frame in this sequence file. If you seek + beyond the end of the sequence, the method raises an + ``EOFError`` exception. When a sequence file is opened, the + library automatically seeks to frame 0. + + See :py:meth:`~PIL.Image.Image.tell`. + + If defined, :attr:`~PIL.Image.Image.n_frames` refers to the + number of available frames. + + :param frame: Frame number, starting at 0. + :exception EOFError: If the call attempts to seek beyond the end + of the sequence. + """ + + # overridden by file handlers + if frame != 0: + msg = "no more images in file" + raise EOFError(msg) + + def show(self, title: str | None = None) -> None: + """ + Displays this image. This method is mainly intended for debugging purposes. + + This method calls :py:func:`PIL.ImageShow.show` internally. You can use + :py:func:`PIL.ImageShow.register` to override its default behaviour. + + The image is first saved to a temporary file. By default, it will be in + PNG format. + + On Unix, the image is then opened using the **xdg-open**, **display**, + **gm**, **eog** or **xv** utility, depending on which one can be found. + + On macOS, the image is opened with the native Preview application. + + On Windows, the image is opened with the standard PNG display utility. + + :param title: Optional title to use for the image window, where possible. + """ + + _show(self, title=title) + + def split(self) -> tuple[Image, ...]: + """ + Split this image into individual bands. This method returns a + tuple of individual image bands from an image. For example, + splitting an "RGB" image creates three new images each + containing a copy of one of the original bands (red, green, + blue). + + If you need only one band, :py:meth:`~PIL.Image.Image.getchannel` + method can be more convenient and faster. + + :returns: A tuple containing bands. + """ + + self.load() + if self.im.bands == 1: + return (self.copy(),) + return tuple(map(self._new, self.im.split())) + + def getchannel(self, channel: int | str) -> Image: + """ + Returns an image containing a single channel of the source image. + + :param channel: What channel to return. Could be index + (0 for "R" channel of "RGB") or channel name + ("A" for alpha channel of "RGBA"). + :returns: An image in "L" mode. + + .. versionadded:: 4.3.0 + """ + self.load() + + if isinstance(channel, str): + try: + channel = self.getbands().index(channel) + except ValueError as e: + msg = f'The image has no channel "{channel}"' + raise ValueError(msg) from e + + return self._new(self.im.getband(channel)) + + def tell(self) -> int: + """ + Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`. + + If defined, :attr:`~PIL.Image.Image.n_frames` refers to the + number of available frames. + + :returns: Frame number, starting with 0. + """ + return 0 + + def thumbnail( + self, + size: tuple[float, float], + resample: Resampling = Resampling.BICUBIC, + reducing_gap: float | None = 2.0, + ) -> None: + """ + Make this image into a thumbnail. This method modifies the + image to contain a thumbnail version of itself, no larger than + the given size. This method calculates an appropriate thumbnail + size to preserve the aspect of the image, calls the + :py:meth:`~PIL.Image.Image.draft` method to configure the file reader + (where applicable), and finally resizes the image. + + Note that this function modifies the :py:class:`~PIL.Image.Image` + object in place. If you need to use the full resolution image as well, + apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original + image. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param resample: Optional resampling filter. This can be one + of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`, + :py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`, + :py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`. + If omitted, it defaults to :py:data:`Resampling.BICUBIC`. + (was :py:data:`Resampling.NEAREST` prior to version 2.5.0). + See: :ref:`concept-filters`. + :param reducing_gap: Apply optimization by resizing the image + in two steps. First, reducing the image by integer times + using :py:meth:`~PIL.Image.Image.reduce` or + :py:meth:`~PIL.Image.Image.draft` for JPEG images. + Second, resizing using regular resampling. The last step + changes size no less than by ``reducing_gap`` times. + ``reducing_gap`` may be None (no first step is performed) + or should be greater than 1.0. The bigger ``reducing_gap``, + the closer the result to the fair resampling. + The smaller ``reducing_gap``, the faster resizing. + With ``reducing_gap`` greater or equal to 3.0, the result is + indistinguishable from fair resampling in most cases. + The default value is 2.0 (very close to fair resampling + while still being faster in many cases). + :returns: None + """ + + provided_size = tuple(map(math.floor, size)) + + def preserve_aspect_ratio() -> tuple[int, int] | None: + def round_aspect(number: float, key: Callable[[int], float]) -> int: + return max(min(math.floor(number), math.ceil(number), key=key), 1) + + x, y = provided_size + if x >= self.width and y >= self.height: + return None + + aspect = self.width / self.height + if x / y >= aspect: + x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y)) + else: + y = round_aspect( + x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n) + ) + return x, y + + preserved_size = preserve_aspect_ratio() + if preserved_size is None: + return + final_size = preserved_size + + box = None + if reducing_gap is not None: + res = self.draft( + None, (int(size[0] * reducing_gap), int(size[1] * reducing_gap)) + ) + if res is not None: + box = res[1] + + if self.size != final_size: + im = self.resize(final_size, resample, box=box, reducing_gap=reducing_gap) + + self.im = im.im + self._size = final_size + self._mode = self.im.mode + + self.readonly = 0 + + # FIXME: the different transform methods need further explanation + # instead of bloating the method docs, add a separate chapter. + def transform( + self, + size: tuple[int, int], + method: Transform | ImageTransformHandler | SupportsGetData, + data: Sequence[Any] | None = None, + resample: int = Resampling.NEAREST, + fill: int = 1, + fillcolor: float | tuple[float, ...] | str | None = None, + ) -> Image: + """ + Transforms this image. This method creates a new image with the + given size, and the same mode as the original, and copies data + to the new image using the given transform. + + :param size: The output size in pixels, as a 2-tuple: + (width, height). + :param method: The transformation method. This is one of + :py:data:`Transform.EXTENT` (cut out a rectangular subregion), + :py:data:`Transform.AFFINE` (affine transform), + :py:data:`Transform.PERSPECTIVE` (perspective transform), + :py:data:`Transform.QUAD` (map a quadrilateral to a rectangle), or + :py:data:`Transform.MESH` (map a number of source quadrilaterals + in one operation). + + It may also be an :py:class:`~PIL.Image.ImageTransformHandler` + object:: + + class Example(Image.ImageTransformHandler): + def transform(self, size, data, resample, fill=1): + # Return result + + Implementations of :py:class:`~PIL.Image.ImageTransformHandler` + for some of the :py:class:`Transform` methods are provided + in :py:mod:`~PIL.ImageTransform`. + + It may also be an object with a ``method.getdata`` method + that returns a tuple supplying new ``method`` and ``data`` values:: + + class Example: + def getdata(self): + method = Image.Transform.EXTENT + data = (0, 0, 100, 100) + return method, data + :param data: Extra data to the transformation method. + :param resample: Optional resampling filter. It can be one of + :py:data:`Resampling.NEAREST` (use nearest neighbour), + :py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2 + environment), or :py:data:`Resampling.BICUBIC` (cubic spline + interpolation in a 4x4 environment). If omitted, or if the image + has mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`. + See: :ref:`concept-filters`. + :param fill: If ``method`` is an + :py:class:`~PIL.Image.ImageTransformHandler` object, this is one of + the arguments passed to it. Otherwise, it is unused. + :param fillcolor: Optional fill color for the area outside the + transform in the output image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if self.mode in ("LA", "RGBA") and resample != Resampling.NEAREST: + return ( + self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode]) + .transform(size, method, data, resample, fill, fillcolor) + .convert(self.mode) + ) + + if isinstance(method, ImageTransformHandler): + return method.transform(size, self, resample=resample, fill=fill) + + if hasattr(method, "getdata"): + # compatibility w. old-style transform objects + method, data = method.getdata() + + if data is None: + msg = "missing method data" + raise ValueError(msg) + + im = new(self.mode, size, fillcolor) + if self.mode == "P" and self.palette: + im.palette = self.palette.copy() + im.info = self.info.copy() + if method == Transform.MESH: + # list of quads + for box, quad in data: + im.__transformer( + box, self, Transform.QUAD, quad, resample, fillcolor is None + ) + else: + im.__transformer( + (0, 0) + size, self, method, data, resample, fillcolor is None + ) + + return im + + def __transformer( + self, + box: tuple[int, int, int, int], + image: Image, + method: Transform, + data: Sequence[float], + resample: int = Resampling.NEAREST, + fill: bool = True, + ) -> None: + w = box[2] - box[0] + h = box[3] - box[1] + + if method == Transform.AFFINE: + data = data[:6] + + elif method == Transform.EXTENT: + # convert extent to an affine transform + x0, y0, x1, y1 = data + xs = (x1 - x0) / w + ys = (y1 - y0) / h + method = Transform.AFFINE + data = (xs, 0, x0, 0, ys, y0) + + elif method == Transform.PERSPECTIVE: + data = data[:8] + + elif method == Transform.QUAD: + # quadrilateral warp. data specifies the four corners + # given as NW, SW, SE, and NE. + nw = data[:2] + sw = data[2:4] + se = data[4:6] + ne = data[6:8] + x0, y0 = nw + As = 1.0 / w + At = 1.0 / h + data = ( + x0, + (ne[0] - x0) * As, + (sw[0] - x0) * At, + (se[0] - sw[0] - ne[0] + x0) * As * At, + y0, + (ne[1] - y0) * As, + (sw[1] - y0) * At, + (se[1] - sw[1] - ne[1] + y0) * As * At, + ) + + else: + msg = "unknown transformation method" + raise ValueError(msg) + + if resample not in ( + Resampling.NEAREST, + Resampling.BILINEAR, + Resampling.BICUBIC, + ): + if resample in (Resampling.BOX, Resampling.HAMMING, Resampling.LANCZOS): + unusable: dict[int, str] = { + Resampling.BOX: "Image.Resampling.BOX", + Resampling.HAMMING: "Image.Resampling.HAMMING", + Resampling.LANCZOS: "Image.Resampling.LANCZOS", + } + msg = unusable[resample] + f" ({resample}) cannot be used." + else: + msg = f"Unknown resampling filter ({resample})." + + filters = [ + f"{filter[1]} ({filter[0]})" + for filter in ( + (Resampling.NEAREST, "Image.Resampling.NEAREST"), + (Resampling.BILINEAR, "Image.Resampling.BILINEAR"), + (Resampling.BICUBIC, "Image.Resampling.BICUBIC"), + ) + ] + msg += f" Use {', '.join(filters[:-1])} or {filters[-1]}" + raise ValueError(msg) + + image.load() + + self.load() + + if image.mode in ("1", "P"): + resample = Resampling.NEAREST + + self.im.transform(box, image.im, method, data, resample, fill) + + def transpose(self, method: Transpose) -> Image: + """ + Transpose image (flip or rotate in 90 degree steps) + + :param method: One of :py:data:`Transpose.FLIP_LEFT_RIGHT`, + :py:data:`Transpose.FLIP_TOP_BOTTOM`, :py:data:`Transpose.ROTATE_90`, + :py:data:`Transpose.ROTATE_180`, :py:data:`Transpose.ROTATE_270`, + :py:data:`Transpose.TRANSPOSE` or :py:data:`Transpose.TRANSVERSE`. + :returns: Returns a flipped or rotated copy of this image. + """ + + self.load() + return self._new(self.im.transpose(method)) + + def effect_spread(self, distance: int) -> Image: + """ + Randomly spread pixels in an image. + + :param distance: Distance to spread pixels. + """ + self.load() + return self._new(self.im.effect_spread(distance)) + + def toqimage(self) -> ImageQt.ImageQt: + """Returns a QImage copy of this image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + msg = "Qt bindings are not installed" + raise ImportError(msg) + return ImageQt.toqimage(self) + + def toqpixmap(self) -> ImageQt.QPixmap: + """Returns a QPixmap copy of this image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + msg = "Qt bindings are not installed" + raise ImportError(msg) + return ImageQt.toqpixmap(self) + + +# -------------------------------------------------------------------- +# Abstract handlers. + + +class ImagePointHandler(abc.ABC): + """ + Used as a mixin by point transforms + (for use with :py:meth:`~PIL.Image.Image.point`) + """ + + @abc.abstractmethod + def point(self, im: Image) -> Image: + pass + + +class ImageTransformHandler(abc.ABC): + """ + Used as a mixin by geometry transforms + (for use with :py:meth:`~PIL.Image.Image.transform`) + """ + + @abc.abstractmethod + def transform( + self, + size: tuple[int, int], + image: Image, + **options: Any, + ) -> Image: + pass + + +# -------------------------------------------------------------------- +# Factories + + +def _check_size(size: Any) -> None: + """ + Common check to enforce type and sanity check on size tuples + + :param size: Should be a 2 tuple of (width, height) + :returns: None, or raises a ValueError + """ + + if not isinstance(size, (list, tuple)): + msg = "Size must be a list or tuple" + raise ValueError(msg) + if len(size) != 2: + msg = "Size must be a sequence of length 2" + raise ValueError(msg) + if size[0] < 0 or size[1] < 0: + msg = "Width and height must be >= 0" + raise ValueError(msg) + + +def new( + mode: str, + size: tuple[int, int] | list[int], + color: float | tuple[float, ...] | str | None = 0, +) -> Image: + """ + Creates a new image with the given mode and size. + + :param mode: The mode to use for the new image. See: + :ref:`concept-modes`. + :param size: A 2-tuple, containing (width, height) in pixels. + :param color: What color to use for the image. Default is black. + If given, this should be a single integer or floating point value + for single-band modes, and a tuple for multi-band modes (one value + per band). When creating RGB or HSV images, you can also use color + strings as supported by the ImageColor module. If the color is + None, the image is not initialised. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if mode in ("BGR;15", "BGR;16", "BGR;24"): + deprecate(mode, 12) + + _check_size(size) + + if color is None: + # don't initialize + return Image()._new(core.new(mode, size)) + + if isinstance(color, str): + # css3-style specifier + + from . import ImageColor + + color = ImageColor.getcolor(color, mode) + + im = Image() + if ( + mode == "P" + and isinstance(color, (list, tuple)) + and all(isinstance(i, int) for i in color) + ): + color_ints: tuple[int, ...] = cast(tuple[int, ...], tuple(color)) + if len(color_ints) == 3 or len(color_ints) == 4: + # RGB or RGBA value for a P image + from . import ImagePalette + + im.palette = ImagePalette.ImagePalette() + color = im.palette.getcolor(color_ints) + return im._new(core.fill(mode, size, color)) + + +def frombytes( + mode: str, + size: tuple[int, int], + data: bytes | bytearray | SupportsArrayInterface, + decoder_name: str = "raw", + *args: Any, +) -> Image: + """ + Creates a copy of an image memory from pixel data in a buffer. + + In its simplest form, this function takes three arguments + (mode, size, and unpacked pixel data). + + You can also use any pixel decoder supported by PIL. For more + information on available decoders, see the section + :ref:`Writing Your Own File Codec `. + + Note that this function decodes pixel data only, not entire images. + If you have an entire image in a string, wrap it in a + :py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load + it. + + :param mode: The image mode. See: :ref:`concept-modes`. + :param size: The image size. + :param data: A byte buffer containing raw data for the given mode. + :param decoder_name: What decoder to use. + :param args: Additional parameters for the given decoder. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + _check_size(size) + + im = new(mode, size) + if im.width != 0 and im.height != 0: + decoder_args: Any = args + if len(decoder_args) == 1 and isinstance(decoder_args[0], tuple): + # may pass tuple instead of argument list + decoder_args = decoder_args[0] + + if decoder_name == "raw" and decoder_args == (): + decoder_args = mode + + im.frombytes(data, decoder_name, decoder_args) + return im + + +def frombuffer( + mode: str, + size: tuple[int, int], + data: bytes | SupportsArrayInterface, + decoder_name: str = "raw", + *args: Any, +) -> Image: + """ + Creates an image memory referencing pixel data in a byte buffer. + + This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data + in the byte buffer, where possible. This means that changes to the + original buffer object are reflected in this image). Not all modes can + share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK". + + Note that this function decodes pixel data only, not entire images. + If you have an entire image file in a string, wrap it in a + :py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load it. + + The default parameters used for the "raw" decoder differs from that used for + :py:func:`~PIL.Image.frombytes`. This is a bug, and will probably be fixed in a + future release. The current release issues a warning if you do this; to disable + the warning, you should provide the full set of parameters. See below for details. + + :param mode: The image mode. See: :ref:`concept-modes`. + :param size: The image size. + :param data: A bytes or other buffer object containing raw + data for the given mode. + :param decoder_name: What decoder to use. + :param args: Additional parameters for the given decoder. For the + default encoder ("raw"), it's recommended that you provide the + full set of parameters:: + + frombuffer(mode, size, data, "raw", mode, 0, 1) + + :returns: An :py:class:`~PIL.Image.Image` object. + + .. versionadded:: 1.1.4 + """ + + _check_size(size) + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if decoder_name == "raw": + if args == (): + args = mode, 0, 1 + if args[0] in _MAPMODES: + im = new(mode, (0, 0)) + im = im._new(core.map_buffer(data, size, decoder_name, 0, args)) + if mode == "P": + from . import ImagePalette + + im.palette = ImagePalette.ImagePalette("RGB", im.im.getpalette("RGB")) + im.readonly = 1 + return im + + return frombytes(mode, size, data, decoder_name, args) + + +class SupportsArrayInterface(Protocol): + """ + An object that has an ``__array_interface__`` dictionary. + """ + + @property + def __array_interface__(self) -> dict[str, Any]: + raise NotImplementedError() + + +class SupportsArrowArrayInterface(Protocol): + """ + An object that has an ``__arrow_c_array__`` method corresponding to the arrow c + data interface. + """ + + def __arrow_c_array__( + self, requested_schema: "PyCapsule" = None # type: ignore[name-defined] # noqa: F821, UP037 + ) -> tuple["PyCapsule", "PyCapsule"]: # type: ignore[name-defined] # noqa: F821, UP037 + raise NotImplementedError() + + +def fromarray(obj: SupportsArrayInterface, mode: str | None = None) -> Image: + """ + Creates an image memory from an object exporting the array interface + (using the buffer protocol):: + + from PIL import Image + import numpy as np + a = np.zeros((5, 5)) + im = Image.fromarray(a) + + If ``obj`` is not contiguous, then the ``tobytes`` method is called + and :py:func:`~PIL.Image.frombuffer` is used. + + In the case of NumPy, be aware that Pillow modes do not always correspond + to NumPy dtypes. Pillow modes only offer 1-bit pixels, 8-bit pixels, + 32-bit signed integer pixels, and 32-bit floating point pixels. + + Pillow images can also be converted to arrays:: + + from PIL import Image + import numpy as np + im = Image.open("hopper.jpg") + a = np.asarray(im) + + When converting Pillow images to arrays however, only pixel values are + transferred. This means that P and PA mode images will lose their palette. + + :param obj: Object with array interface + :param mode: Optional mode to use when reading ``obj``. Will be determined from + type if ``None``. Deprecated. + + This will not be used to convert the data after reading, but will be used to + change how the data is read:: + + from PIL import Image + import numpy as np + a = np.full((1, 1), 300) + im = Image.fromarray(a, mode="L") + im.getpixel((0, 0)) # 44 + im = Image.fromarray(a, mode="RGB") + im.getpixel((0, 0)) # (44, 1, 0) + + See: :ref:`concept-modes` for general information about modes. + :returns: An image object. + + .. versionadded:: 1.1.6 + """ + arr = obj.__array_interface__ + shape = arr["shape"] + ndim = len(shape) + strides = arr.get("strides", None) + if mode is None: + try: + typekey = (1, 1) + shape[2:], arr["typestr"] + except KeyError as e: + msg = "Cannot handle this data type" + raise TypeError(msg) from e + try: + mode, rawmode = _fromarray_typemap[typekey] + except KeyError as e: + typekey_shape, typestr = typekey + msg = f"Cannot handle this data type: {typekey_shape}, {typestr}" + raise TypeError(msg) from e + else: + deprecate("'mode' parameter", 13) + rawmode = mode + if mode in ["1", "L", "I", "P", "F"]: + ndmax = 2 + elif mode == "RGB": + ndmax = 3 + else: + ndmax = 4 + if ndim > ndmax: + msg = f"Too many dimensions: {ndim} > {ndmax}." + raise ValueError(msg) + + size = 1 if ndim == 1 else shape[1], shape[0] + if strides is not None: + if hasattr(obj, "tobytes"): + obj = obj.tobytes() + elif hasattr(obj, "tostring"): + obj = obj.tostring() + else: + msg = "'strides' requires either tobytes() or tostring()" + raise ValueError(msg) + + return frombuffer(mode, size, obj, "raw", rawmode, 0, 1) + + +def fromarrow( + obj: SupportsArrowArrayInterface, mode: str, size: tuple[int, int] +) -> Image: + """Creates an image with zero-copy shared memory from an object exporting + the arrow_c_array interface protocol:: + + from PIL import Image + import pyarrow as pa + arr = pa.array([0]*(5*5*4), type=pa.uint8()) + im = Image.fromarrow(arr, 'RGBA', (5, 5)) + + If the data representation of the ``obj`` is not compatible with + Pillow internal storage, a ValueError is raised. + + Pillow images can also be converted to Arrow objects:: + + from PIL import Image + import pyarrow as pa + im = Image.open('hopper.jpg') + arr = pa.array(im) + + As with array support, when converting Pillow images to arrays, + only pixel values are transferred. This means that P and PA mode + images will lose their palette. + + :param obj: Object with an arrow_c_array interface + :param mode: Image mode. + :param size: Image size. This must match the storage of the arrow object. + :returns: An Image object + + Note that according to the Arrow spec, both the producer and the + consumer should consider the exported array to be immutable, as + unsynchronized updates will potentially cause inconsistent data. + + See: :ref:`arrow-support` for more detailed information + + .. versionadded:: 11.2.1 + + """ + if not hasattr(obj, "__arrow_c_array__"): + msg = "arrow_c_array interface not found" + raise ValueError(msg) + + (schema_capsule, array_capsule) = obj.__arrow_c_array__() + _im = core.new_arrow(mode, size, schema_capsule, array_capsule) + if _im: + return Image()._new(_im) + + msg = "new_arrow returned None without an exception" + raise ValueError(msg) + + +def fromqimage(im: ImageQt.QImage) -> ImageFile.ImageFile: + """Creates an image instance from a QImage image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + msg = "Qt bindings are not installed" + raise ImportError(msg) + return ImageQt.fromqimage(im) + + +def fromqpixmap(im: ImageQt.QPixmap) -> ImageFile.ImageFile: + """Creates an image instance from a QPixmap image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + msg = "Qt bindings are not installed" + raise ImportError(msg) + return ImageQt.fromqpixmap(im) + + +_fromarray_typemap = { + # (shape, typestr) => mode, rawmode + # first two members of shape are set to one + ((1, 1), "|b1"): ("1", "1;8"), + ((1, 1), "|u1"): ("L", "L"), + ((1, 1), "|i1"): ("I", "I;8"), + ((1, 1), "u2"): ("I", "I;16B"), + ((1, 1), "i2"): ("I", "I;16BS"), + ((1, 1), "u4"): ("I", "I;32B"), + ((1, 1), "i4"): ("I", "I;32BS"), + ((1, 1), "f4"): ("F", "F;32BF"), + ((1, 1), "f8"): ("F", "F;64BF"), + ((1, 1, 2), "|u1"): ("LA", "LA"), + ((1, 1, 3), "|u1"): ("RGB", "RGB"), + ((1, 1, 4), "|u1"): ("RGBA", "RGBA"), + # shortcuts: + ((1, 1), f"{_ENDIAN}i4"): ("I", "I"), + ((1, 1), f"{_ENDIAN}f4"): ("F", "F"), +} + + +def _decompression_bomb_check(size: tuple[int, int]) -> None: + if MAX_IMAGE_PIXELS is None: + return + + pixels = max(1, size[0]) * max(1, size[1]) + + if pixels > 2 * MAX_IMAGE_PIXELS: + msg = ( + f"Image size ({pixels} pixels) exceeds limit of {2 * MAX_IMAGE_PIXELS} " + "pixels, could be decompression bomb DOS attack." + ) + raise DecompressionBombError(msg) + + if pixels > MAX_IMAGE_PIXELS: + warnings.warn( + f"Image size ({pixels} pixels) exceeds limit of {MAX_IMAGE_PIXELS} pixels, " + "could be decompression bomb DOS attack.", + DecompressionBombWarning, + ) + + +def open( + fp: StrOrBytesPath | IO[bytes], + mode: Literal["r"] = "r", + formats: list[str] | tuple[str, ...] | None = None, +) -> ImageFile.ImageFile: + """ + Opens and identifies the given image file. + + This is a lazy operation; this function identifies the file, but + the file remains open and the actual image data is not read from + the file until you try to process the data (or call the + :py:meth:`~PIL.Image.Image.load` method). See + :py:func:`~PIL.Image.new`. See :ref:`file-handling`. + + :param fp: A filename (string), os.PathLike object or a file object. + The file object must implement ``file.read``, + ``file.seek``, and ``file.tell`` methods, + and be opened in binary mode. The file object will also seek to zero + before reading. + :param mode: The mode. If given, this argument must be "r". + :param formats: A list or tuple of formats to attempt to load the file in. + This can be used to restrict the set of formats checked. + Pass ``None`` to try all supported formats. You can print the set of + available formats by running ``python3 -m PIL`` or using + the :py:func:`PIL.features.pilinfo` function. + :returns: An :py:class:`~PIL.Image.Image` object. + :exception FileNotFoundError: If the file cannot be found. + :exception PIL.UnidentifiedImageError: If the image cannot be opened and + identified. + :exception ValueError: If the ``mode`` is not "r", or if a ``StringIO`` + instance is used for ``fp``. + :exception TypeError: If ``formats`` is not ``None``, a list or a tuple. + """ + + if mode != "r": + msg = f"bad mode {repr(mode)}" # type: ignore[unreachable] + raise ValueError(msg) + elif isinstance(fp, io.StringIO): + msg = ( # type: ignore[unreachable] + "StringIO cannot be used to open an image. " + "Binary data must be used instead." + ) + raise ValueError(msg) + + if formats is None: + formats = ID + elif not isinstance(formats, (list, tuple)): + msg = "formats must be a list or tuple" # type: ignore[unreachable] + raise TypeError(msg) + + exclusive_fp = False + filename: str | bytes = "" + if is_path(fp): + filename = os.fspath(fp) + fp = builtins.open(filename, "rb") + exclusive_fp = True + else: + fp = cast(IO[bytes], fp) + + try: + fp.seek(0) + except (AttributeError, io.UnsupportedOperation): + fp = io.BytesIO(fp.read()) + exclusive_fp = True + + prefix = fp.read(16) + + preinit() + + warning_messages: list[str] = [] + + def _open_core( + fp: IO[bytes], + filename: str | bytes, + prefix: bytes, + formats: list[str] | tuple[str, ...], + ) -> ImageFile.ImageFile | None: + for i in formats: + i = i.upper() + if i not in OPEN: + init() + try: + factory, accept = OPEN[i] + result = not accept or accept(prefix) + if isinstance(result, str): + warning_messages.append(result) + elif result: + fp.seek(0) + im = factory(fp, filename) + _decompression_bomb_check(im.size) + return im + except (SyntaxError, IndexError, TypeError, struct.error) as e: + if WARN_POSSIBLE_FORMATS: + warning_messages.append(i + " opening failed. " + str(e)) + except BaseException: + if exclusive_fp: + fp.close() + raise + return None + + im = _open_core(fp, filename, prefix, formats) + + if im is None and formats is ID: + checked_formats = ID.copy() + if init(): + im = _open_core( + fp, + filename, + prefix, + tuple(format for format in formats if format not in checked_formats), + ) + + if im: + im._exclusive_fp = exclusive_fp + return im + + if exclusive_fp: + fp.close() + for message in warning_messages: + warnings.warn(message) + msg = "cannot identify image file %r" % (filename if filename else fp) + raise UnidentifiedImageError(msg) + + +# +# Image processing. + + +def alpha_composite(im1: Image, im2: Image) -> Image: + """ + Alpha composite im2 over im1. + + :param im1: The first image. Must have mode RGBA. + :param im2: The second image. Must have mode RGBA, and the same size as + the first image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + im1.load() + im2.load() + return im1._new(core.alpha_composite(im1.im, im2.im)) + + +def blend(im1: Image, im2: Image, alpha: float) -> Image: + """ + Creates a new image by interpolating between two input images, using + a constant alpha:: + + out = image1 * (1.0 - alpha) + image2 * alpha + + :param im1: The first image. + :param im2: The second image. Must have the same mode and size as + the first image. + :param alpha: The interpolation alpha factor. If alpha is 0.0, a + copy of the first image is returned. If alpha is 1.0, a copy of + the second image is returned. There are no restrictions on the + alpha value. If necessary, the result is clipped to fit into + the allowed output range. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + im1.load() + im2.load() + return im1._new(core.blend(im1.im, im2.im, alpha)) + + +def composite(image1: Image, image2: Image, mask: Image) -> Image: + """ + Create composite image by blending images using a transparency mask. + + :param image1: The first image. + :param image2: The second image. Must have the same mode and + size as the first image. + :param mask: A mask image. This image can have mode + "1", "L", or "RGBA", and must have the same size as the + other two images. + """ + + image = image2.copy() + image.paste(image1, None, mask) + return image + + +def eval(image: Image, *args: Callable[[int], float]) -> Image: + """ + Applies the function (which should take one argument) to each pixel + in the given image. If the image has more than one band, the same + function is applied to each band. Note that the function is + evaluated once for each possible pixel value, so you cannot use + random components or other generators. + + :param image: The input image. + :param function: A function object, taking one integer argument. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + return image.point(args[0]) + + +def merge(mode: str, bands: Sequence[Image]) -> Image: + """ + Merge a set of single band images into a new multiband image. + + :param mode: The mode to use for the output image. See: + :ref:`concept-modes`. + :param bands: A sequence containing one single-band image for + each band in the output image. All bands must have the + same size. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if getmodebands(mode) != len(bands) or "*" in mode: + msg = "wrong number of bands" + raise ValueError(msg) + for band in bands[1:]: + if band.mode != getmodetype(mode): + msg = "mode mismatch" + raise ValueError(msg) + if band.size != bands[0].size: + msg = "size mismatch" + raise ValueError(msg) + for band in bands: + band.load() + return bands[0]._new(core.merge(mode, *[b.im for b in bands])) + + +# -------------------------------------------------------------------- +# Plugin registry + + +def register_open( + id: str, + factory: ( + Callable[[IO[bytes], str | bytes], ImageFile.ImageFile] + | type[ImageFile.ImageFile] + ), + accept: Callable[[bytes], bool | str] | None = None, +) -> None: + """ + Register an image file plugin. This function should not be used + in application code. + + :param id: An image format identifier. + :param factory: An image file factory method. + :param accept: An optional function that can be used to quickly + reject images having another format. + """ + id = id.upper() + if id not in ID: + ID.append(id) + OPEN[id] = factory, accept + + +def register_mime(id: str, mimetype: str) -> None: + """ + Registers an image MIME type by populating ``Image.MIME``. This function + should not be used in application code. + + ``Image.MIME`` provides a mapping from image format identifiers to mime + formats, but :py:meth:`~PIL.ImageFile.ImageFile.get_format_mimetype` can + provide a different result for specific images. + + :param id: An image format identifier. + :param mimetype: The image MIME type for this format. + """ + MIME[id.upper()] = mimetype + + +def register_save( + id: str, driver: Callable[[Image, IO[bytes], str | bytes], None] +) -> None: + """ + Registers an image save function. This function should not be + used in application code. + + :param id: An image format identifier. + :param driver: A function to save images in this format. + """ + SAVE[id.upper()] = driver + + +def register_save_all( + id: str, driver: Callable[[Image, IO[bytes], str | bytes], None] +) -> None: + """ + Registers an image function to save all the frames + of a multiframe format. This function should not be + used in application code. + + :param id: An image format identifier. + :param driver: A function to save images in this format. + """ + SAVE_ALL[id.upper()] = driver + + +def register_extension(id: str, extension: str) -> None: + """ + Registers an image extension. This function should not be + used in application code. + + :param id: An image format identifier. + :param extension: An extension used for this format. + """ + EXTENSION[extension.lower()] = id.upper() + + +def register_extensions(id: str, extensions: list[str]) -> None: + """ + Registers image extensions. This function should not be + used in application code. + + :param id: An image format identifier. + :param extensions: A list of extensions used for this format. + """ + for extension in extensions: + register_extension(id, extension) + + +def registered_extensions() -> dict[str, str]: + """ + Returns a dictionary containing all file extensions belonging + to registered plugins + """ + init() + return EXTENSION + + +def register_decoder(name: str, decoder: type[ImageFile.PyDecoder]) -> None: + """ + Registers an image decoder. This function should not be + used in application code. + + :param name: The name of the decoder + :param decoder: An ImageFile.PyDecoder object + + .. versionadded:: 4.1.0 + """ + DECODERS[name] = decoder + + +def register_encoder(name: str, encoder: type[ImageFile.PyEncoder]) -> None: + """ + Registers an image encoder. This function should not be + used in application code. + + :param name: The name of the encoder + :param encoder: An ImageFile.PyEncoder object + + .. versionadded:: 4.1.0 + """ + ENCODERS[name] = encoder + + +# -------------------------------------------------------------------- +# Simple display support. + + +def _show(image: Image, **options: Any) -> None: + from . import ImageShow + + ImageShow.show(image, **options) + + +# -------------------------------------------------------------------- +# Effects + + +def effect_mandelbrot( + size: tuple[int, int], extent: tuple[float, float, float, float], quality: int +) -> Image: + """ + Generate a Mandelbrot set covering the given extent. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param extent: The extent to cover, as a 4-tuple: + (x0, y0, x1, y1). + :param quality: Quality. + """ + return Image()._new(core.effect_mandelbrot(size, extent, quality)) + + +def effect_noise(size: tuple[int, int], sigma: float) -> Image: + """ + Generate Gaussian noise centered around 128. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param sigma: Standard deviation of noise. + """ + return Image()._new(core.effect_noise(size, sigma)) + + +def linear_gradient(mode: str) -> Image: + """ + Generate 256x256 linear gradient from black to white, top to bottom. + + :param mode: Input mode. + """ + return Image()._new(core.linear_gradient(mode)) + + +def radial_gradient(mode: str) -> Image: + """ + Generate 256x256 radial gradient from black to white, centre to edge. + + :param mode: Input mode. + """ + return Image()._new(core.radial_gradient(mode)) + + +# -------------------------------------------------------------------- +# Resources + + +def _apply_env_variables(env: dict[str, str] | None = None) -> None: + env_dict = env if env is not None else os.environ + + for var_name, setter in [ + ("PILLOW_ALIGNMENT", core.set_alignment), + ("PILLOW_BLOCK_SIZE", core.set_block_size), + ("PILLOW_BLOCKS_MAX", core.set_blocks_max), + ]: + if var_name not in env_dict: + continue + + var = env_dict[var_name].lower() + + units = 1 + for postfix, mul in [("k", 1024), ("m", 1024 * 1024)]: + if var.endswith(postfix): + units = mul + var = var[: -len(postfix)] + + try: + var_int = int(var) * units + except ValueError: + warnings.warn(f"{var_name} is not int") + continue + + try: + setter(var_int) + except ValueError as e: + warnings.warn(f"{var_name}: {e}") + + +_apply_env_variables() +atexit.register(core.clear_cache) + + +if TYPE_CHECKING: + _ExifBase = MutableMapping[int, Any] +else: + _ExifBase = MutableMapping + + +class Exif(_ExifBase): + """ + This class provides read and write access to EXIF image data:: + + from PIL import Image + im = Image.open("exif.png") + exif = im.getexif() # Returns an instance of this class + + Information can be read and written, iterated over or deleted:: + + print(exif[274]) # 1 + exif[274] = 2 + for k, v in exif.items(): + print("Tag", k, "Value", v) # Tag 274 Value 2 + del exif[274] + + To access information beyond IFD0, :py:meth:`~PIL.Image.Exif.get_ifd` + returns a dictionary:: + + from PIL import ExifTags + im = Image.open("exif_gps.jpg") + exif = im.getexif() + gps_ifd = exif.get_ifd(ExifTags.IFD.GPSInfo) + print(gps_ifd) + + Other IFDs include ``ExifTags.IFD.Exif``, ``ExifTags.IFD.MakerNote``, + ``ExifTags.IFD.Interop`` and ``ExifTags.IFD.IFD1``. + + :py:mod:`~PIL.ExifTags` also has enum classes to provide names for data:: + + print(exif[ExifTags.Base.Software]) # PIL + print(gps_ifd[ExifTags.GPS.GPSDateStamp]) # 1999:99:99 99:99:99 + """ + + endian: str | None = None + bigtiff = False + _loaded = False + + def __init__(self) -> None: + self._data: dict[int, Any] = {} + self._hidden_data: dict[int, Any] = {} + self._ifds: dict[int, dict[int, Any]] = {} + self._info: TiffImagePlugin.ImageFileDirectory_v2 | None = None + self._loaded_exif: bytes | None = None + + def _fixup(self, value: Any) -> Any: + try: + if len(value) == 1 and isinstance(value, tuple): + return value[0] + except Exception: + pass + return value + + def _fixup_dict(self, src_dict: dict[int, Any]) -> dict[int, Any]: + # Helper function + # returns a dict with any single item tuples/lists as individual values + return {k: self._fixup(v) for k, v in src_dict.items()} + + def _get_ifd_dict( + self, offset: int, group: int | None = None + ) -> dict[int, Any] | None: + try: + # an offset pointer to the location of the nested embedded IFD. + # It should be a long, but may be corrupted. + self.fp.seek(offset) + except (KeyError, TypeError): + return None + else: + from . import TiffImagePlugin + + info = TiffImagePlugin.ImageFileDirectory_v2(self.head, group=group) + info.load(self.fp) + return self._fixup_dict(dict(info)) + + def _get_head(self) -> bytes: + version = b"\x2b" if self.bigtiff else b"\x2a" + if self.endian == "<": + head = b"II" + version + b"\x00" + o32le(8) + else: + head = b"MM\x00" + version + o32be(8) + if self.bigtiff: + head += o32le(8) if self.endian == "<" else o32be(8) + head += b"\x00\x00\x00\x00" + return head + + def load(self, data: bytes) -> None: + # Extract EXIF information. This is highly experimental, + # and is likely to be replaced with something better in a future + # version. + + # The EXIF record consists of a TIFF file embedded in a JPEG + # application marker (!). + if data == self._loaded_exif: + return + self._loaded_exif = data + self._data.clear() + self._hidden_data.clear() + self._ifds.clear() + while data and data.startswith(b"Exif\x00\x00"): + data = data[6:] + if not data: + self._info = None + return + + self.fp: IO[bytes] = io.BytesIO(data) + self.head = self.fp.read(8) + # process dictionary + from . import TiffImagePlugin + + self._info = TiffImagePlugin.ImageFileDirectory_v2(self.head) + self.endian = self._info._endian + self.fp.seek(self._info.next) + self._info.load(self.fp) + + def load_from_fp(self, fp: IO[bytes], offset: int | None = None) -> None: + self._loaded_exif = None + self._data.clear() + self._hidden_data.clear() + self._ifds.clear() + + # process dictionary + from . import TiffImagePlugin + + self.fp = fp + if offset is not None: + self.head = self._get_head() + else: + self.head = self.fp.read(8) + self._info = TiffImagePlugin.ImageFileDirectory_v2(self.head) + if self.endian is None: + self.endian = self._info._endian + if offset is None: + offset = self._info.next + self.fp.tell() + self.fp.seek(offset) + self._info.load(self.fp) + + def _get_merged_dict(self) -> dict[int, Any]: + merged_dict = dict(self) + + # get EXIF extension + if ExifTags.IFD.Exif in self: + ifd = self._get_ifd_dict(self[ExifTags.IFD.Exif], ExifTags.IFD.Exif) + if ifd: + merged_dict.update(ifd) + + # GPS + if ExifTags.IFD.GPSInfo in self: + merged_dict[ExifTags.IFD.GPSInfo] = self._get_ifd_dict( + self[ExifTags.IFD.GPSInfo], ExifTags.IFD.GPSInfo + ) + + return merged_dict + + def tobytes(self, offset: int = 8) -> bytes: + from . import TiffImagePlugin + + head = self._get_head() + ifd = TiffImagePlugin.ImageFileDirectory_v2(ifh=head) + for tag, ifd_dict in self._ifds.items(): + if tag not in self: + ifd[tag] = ifd_dict + for tag, value in self.items(): + if tag in [ + ExifTags.IFD.Exif, + ExifTags.IFD.GPSInfo, + ] and not isinstance(value, dict): + value = self.get_ifd(tag) + if ( + tag == ExifTags.IFD.Exif + and ExifTags.IFD.Interop in value + and not isinstance(value[ExifTags.IFD.Interop], dict) + ): + value = value.copy() + value[ExifTags.IFD.Interop] = self.get_ifd(ExifTags.IFD.Interop) + ifd[tag] = value + return b"Exif\x00\x00" + head + ifd.tobytes(offset) + + def get_ifd(self, tag: int) -> dict[int, Any]: + if tag not in self._ifds: + if tag == ExifTags.IFD.IFD1: + if self._info is not None and self._info.next != 0: + ifd = self._get_ifd_dict(self._info.next) + if ifd is not None: + self._ifds[tag] = ifd + elif tag in [ExifTags.IFD.Exif, ExifTags.IFD.GPSInfo]: + offset = self._hidden_data.get(tag, self.get(tag)) + if offset is not None: + ifd = self._get_ifd_dict(offset, tag) + if ifd is not None: + self._ifds[tag] = ifd + elif tag in [ExifTags.IFD.Interop, ExifTags.IFD.MakerNote]: + if ExifTags.IFD.Exif not in self._ifds: + self.get_ifd(ExifTags.IFD.Exif) + tag_data = self._ifds[ExifTags.IFD.Exif][tag] + if tag == ExifTags.IFD.MakerNote: + from .TiffImagePlugin import ImageFileDirectory_v2 + + if tag_data.startswith(b"FUJIFILM"): + ifd_offset = i32le(tag_data, 8) + ifd_data = tag_data[ifd_offset:] + + makernote = {} + for i in range(struct.unpack(" 4: + (offset,) = struct.unpack("H", tag_data[:2])[0]): + ifd_tag, typ, count, data = struct.unpack( + ">HHL4s", tag_data[i * 12 + 2 : (i + 1) * 12 + 2] + ) + if ifd_tag == 0x1101: + # CameraInfo + (offset,) = struct.unpack(">L", data) + self.fp.seek(offset) + + camerainfo: dict[str, int | bytes] = { + "ModelID": self.fp.read(4) + } + + self.fp.read(4) + # Seconds since 2000 + camerainfo["TimeStamp"] = i32le(self.fp.read(12)) + + self.fp.read(4) + camerainfo["InternalSerialNumber"] = self.fp.read(4) + + self.fp.read(12) + parallax = self.fp.read(4) + handler = ImageFileDirectory_v2._load_dispatch[ + TiffTags.FLOAT + ][1] + camerainfo["Parallax"] = handler( + ImageFileDirectory_v2(), parallax, False + )[0] + + self.fp.read(4) + camerainfo["Category"] = self.fp.read(2) + + makernote = {0x1101: camerainfo} + self._ifds[tag] = makernote + else: + # Interop + ifd = self._get_ifd_dict(tag_data, tag) + if ifd is not None: + self._ifds[tag] = ifd + ifd = self._ifds.setdefault(tag, {}) + if tag == ExifTags.IFD.Exif and self._hidden_data: + ifd = { + k: v + for (k, v) in ifd.items() + if k not in (ExifTags.IFD.Interop, ExifTags.IFD.MakerNote) + } + return ifd + + def hide_offsets(self) -> None: + for tag in (ExifTags.IFD.Exif, ExifTags.IFD.GPSInfo): + if tag in self: + self._hidden_data[tag] = self[tag] + del self[tag] + + def __str__(self) -> str: + if self._info is not None: + # Load all keys into self._data + for tag in self._info: + self[tag] + + return str(self._data) + + def __len__(self) -> int: + keys = set(self._data) + if self._info is not None: + keys.update(self._info) + return len(keys) + + def __getitem__(self, tag: int) -> Any: + if self._info is not None and tag not in self._data and tag in self._info: + self._data[tag] = self._fixup(self._info[tag]) + del self._info[tag] + return self._data[tag] + + def __contains__(self, tag: object) -> bool: + return tag in self._data or (self._info is not None and tag in self._info) + + def __setitem__(self, tag: int, value: Any) -> None: + if self._info is not None and tag in self._info: + del self._info[tag] + self._data[tag] = value + + def __delitem__(self, tag: int) -> None: + if self._info is not None and tag in self._info: + del self._info[tag] + else: + del self._data[tag] + + def __iter__(self) -> Iterator[int]: + keys = set(self._data) + if self._info is not None: + keys.update(self._info) + return iter(keys) diff --git a/py311/lib/python3.11/site-packages/PIL/ImageChops.py b/py311/lib/python3.11/site-packages/PIL/ImageChops.py new file mode 100644 index 0000000000000000000000000000000000000000..29a5c995fd802c9be16784f80707cfecb88b2002 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageChops.py @@ -0,0 +1,311 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard channel operations +# +# History: +# 1996-03-24 fl Created +# 1996-08-13 fl Added logical operations (for "1" images) +# 2000-10-12 fl Added offset method (from Image.py) +# +# Copyright (c) 1997-2000 by Secret Labs AB +# Copyright (c) 1996-2000 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from __future__ import annotations + +from . import Image + + +def constant(image: Image.Image, value: int) -> Image.Image: + """Fill a channel with a given gray level. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.new("L", image.size, value) + + +def duplicate(image: Image.Image) -> Image.Image: + """Copy a channel. Alias for :py:meth:`PIL.Image.Image.copy`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return image.copy() + + +def invert(image: Image.Image) -> Image.Image: + """ + Invert an image (channel). :: + + out = MAX - image + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image.load() + return image._new(image.im.chop_invert()) + + +def lighter(image1: Image.Image, image2: Image.Image) -> Image.Image: + """ + Compares the two images, pixel by pixel, and returns a new image containing + the lighter values. :: + + out = max(image1, image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_lighter(image2.im)) + + +def darker(image1: Image.Image, image2: Image.Image) -> Image.Image: + """ + Compares the two images, pixel by pixel, and returns a new image containing + the darker values. :: + + out = min(image1, image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_darker(image2.im)) + + +def difference(image1: Image.Image, image2: Image.Image) -> Image.Image: + """ + Returns the absolute value of the pixel-by-pixel difference between the two + images. :: + + out = abs(image1 - image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_difference(image2.im)) + + +def multiply(image1: Image.Image, image2: Image.Image) -> Image.Image: + """ + Superimposes two images on top of each other. + + If you multiply an image with a solid black image, the result is black. If + you multiply with a solid white image, the image is unaffected. :: + + out = image1 * image2 / MAX + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_multiply(image2.im)) + + +def screen(image1: Image.Image, image2: Image.Image) -> Image.Image: + """ + Superimposes two inverted images on top of each other. :: + + out = MAX - ((MAX - image1) * (MAX - image2) / MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_screen(image2.im)) + + +def soft_light(image1: Image.Image, image2: Image.Image) -> Image.Image: + """ + Superimposes two images on top of each other using the Soft Light algorithm + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_soft_light(image2.im)) + + +def hard_light(image1: Image.Image, image2: Image.Image) -> Image.Image: + """ + Superimposes two images on top of each other using the Hard Light algorithm + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_hard_light(image2.im)) + + +def overlay(image1: Image.Image, image2: Image.Image) -> Image.Image: + """ + Superimposes two images on top of each other using the Overlay algorithm + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_overlay(image2.im)) + + +def add( + image1: Image.Image, image2: Image.Image, scale: float = 1.0, offset: float = 0 +) -> Image.Image: + """ + Adds two images, dividing the result by scale and adding the + offset. If omitted, scale defaults to 1.0, and offset to 0.0. :: + + out = ((image1 + image2) / scale + offset) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_add(image2.im, scale, offset)) + + +def subtract( + image1: Image.Image, image2: Image.Image, scale: float = 1.0, offset: float = 0 +) -> Image.Image: + """ + Subtracts two images, dividing the result by scale and adding the offset. + If omitted, scale defaults to 1.0, and offset to 0.0. :: + + out = ((image1 - image2) / scale + offset) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_subtract(image2.im, scale, offset)) + + +def add_modulo(image1: Image.Image, image2: Image.Image) -> Image.Image: + """Add two images, without clipping the result. :: + + out = ((image1 + image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_add_modulo(image2.im)) + + +def subtract_modulo(image1: Image.Image, image2: Image.Image) -> Image.Image: + """Subtract two images, without clipping the result. :: + + out = ((image1 - image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_subtract_modulo(image2.im)) + + +def logical_and(image1: Image.Image, image2: Image.Image) -> Image.Image: + """Logical AND between two images. + + Both of the images must have mode "1". If you would like to perform a + logical AND on an image with a mode other than "1", try + :py:meth:`~PIL.ImageChops.multiply` instead, using a black-and-white mask + as the second image. :: + + out = ((image1 and image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_and(image2.im)) + + +def logical_or(image1: Image.Image, image2: Image.Image) -> Image.Image: + """Logical OR between two images. + + Both of the images must have mode "1". :: + + out = ((image1 or image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_or(image2.im)) + + +def logical_xor(image1: Image.Image, image2: Image.Image) -> Image.Image: + """Logical XOR between two images. + + Both of the images must have mode "1". :: + + out = ((bool(image1) != bool(image2)) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_xor(image2.im)) + + +def blend(image1: Image.Image, image2: Image.Image, alpha: float) -> Image.Image: + """Blend images using constant transparency weight. Alias for + :py:func:`PIL.Image.blend`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.blend(image1, image2, alpha) + + +def composite( + image1: Image.Image, image2: Image.Image, mask: Image.Image +) -> Image.Image: + """Create composite using transparency mask. Alias for + :py:func:`PIL.Image.composite`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.composite(image1, image2, mask) + + +def offset(image: Image.Image, xoffset: int, yoffset: int | None = None) -> Image.Image: + """Returns a copy of the image where data has been offset by the given + distances. Data wraps around the edges. If ``yoffset`` is omitted, it + is assumed to be equal to ``xoffset``. + + :param image: Input image. + :param xoffset: The horizontal distance. + :param yoffset: The vertical distance. If omitted, both + distances are set to the same value. + :rtype: :py:class:`~PIL.Image.Image` + """ + + if yoffset is None: + yoffset = xoffset + image.load() + return image._new(image.im.offset(xoffset, yoffset)) diff --git a/py311/lib/python3.11/site-packages/PIL/ImageCms.py b/py311/lib/python3.11/site-packages/PIL/ImageCms.py new file mode 100644 index 0000000000000000000000000000000000000000..a1584f111d4a6c33a62dcb94ae583a539138f0c3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageCms.py @@ -0,0 +1,1123 @@ +# The Python Imaging Library. +# $Id$ + +# Optional color management support, based on Kevin Cazabon's PyCMS +# library. + +# Originally released under LGPL. Graciously donated to PIL in +# March 2009, for distribution under the standard PIL license + +# History: + +# 2009-03-08 fl Added to PIL. + +# Copyright (C) 2002-2003 Kevin Cazabon +# Copyright (c) 2009 by Fredrik Lundh +# Copyright (c) 2013 by Eric Soroos + +# See the README file for information on usage and redistribution. See +# below for the original description. +from __future__ import annotations + +import operator +import sys +from enum import IntEnum, IntFlag +from functools import reduce +from typing import Any, Literal, SupportsFloat, SupportsInt, Union + +from . import Image, __version__ +from ._deprecate import deprecate +from ._typing import SupportsRead + +try: + from . import _imagingcms as core + + _CmsProfileCompatible = Union[ + str, SupportsRead[bytes], core.CmsProfile, "ImageCmsProfile" + ] +except ImportError as ex: + # Allow error import for doc purposes, but error out when accessing + # anything in core. + from ._util import DeferredError + + core = DeferredError.new(ex) + +_DESCRIPTION = """ +pyCMS + + a Python / PIL interface to the littleCMS ICC Color Management System + Copyright (C) 2002-2003 Kevin Cazabon + kevin@cazabon.com + https://www.cazabon.com + + pyCMS home page: https://www.cazabon.com/pyCMS + littleCMS home page: https://www.littlecms.com + (littleCMS is Copyright (C) 1998-2001 Marti Maria) + + Originally released under LGPL. Graciously donated to PIL in + March 2009, for distribution under the standard PIL license + + The pyCMS.py module provides a "clean" interface between Python/PIL and + pyCMSdll, taking care of some of the more complex handling of the direct + pyCMSdll functions, as well as error-checking and making sure that all + relevant data is kept together. + + While it is possible to call pyCMSdll functions directly, it's not highly + recommended. + + Version History: + + 1.0.0 pil Oct 2013 Port to LCMS 2. + + 0.1.0 pil mod March 10, 2009 + + Renamed display profile to proof profile. The proof + profile is the profile of the device that is being + simulated, not the profile of the device which is + actually used to display/print the final simulation + (that'd be the output profile) - also see LCMSAPI.txt + input colorspace -> using 'renderingIntent' -> proof + colorspace -> using 'proofRenderingIntent' -> output + colorspace + + Added LCMS FLAGS support. + Added FLAGS["SOFTPROOFING"] as default flag for + buildProofTransform (otherwise the proof profile/intent + would be ignored). + + 0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms + + 0.0.2 alpha Jan 6, 2002 + + Added try/except statements around type() checks of + potential CObjects... Python won't let you use type() + on them, and raises a TypeError (stupid, if you ask + me!) + + Added buildProofTransformFromOpenProfiles() function. + Additional fixes in DLL, see DLL code for details. + + 0.0.1 alpha first public release, Dec. 26, 2002 + + Known to-do list with current version (of Python interface, not pyCMSdll): + + none + +""" + +_VERSION = "1.0.0 pil" + + +def __getattr__(name: str) -> Any: + if name == "DESCRIPTION": + deprecate("PIL.ImageCms.DESCRIPTION", 12) + return _DESCRIPTION + elif name == "VERSION": + deprecate("PIL.ImageCms.VERSION", 12) + return _VERSION + elif name == "FLAGS": + deprecate("PIL.ImageCms.FLAGS", 12, "PIL.ImageCms.Flags") + return _FLAGS + msg = f"module '{__name__}' has no attribute '{name}'" + raise AttributeError(msg) + + +# --------------------------------------------------------------------. + + +# +# intent/direction values + + +class Intent(IntEnum): + PERCEPTUAL = 0 + RELATIVE_COLORIMETRIC = 1 + SATURATION = 2 + ABSOLUTE_COLORIMETRIC = 3 + + +class Direction(IntEnum): + INPUT = 0 + OUTPUT = 1 + PROOF = 2 + + +# +# flags + + +class Flags(IntFlag): + """Flags and documentation are taken from ``lcms2.h``.""" + + NONE = 0 + NOCACHE = 0x0040 + """Inhibit 1-pixel cache""" + NOOPTIMIZE = 0x0100 + """Inhibit optimizations""" + NULLTRANSFORM = 0x0200 + """Don't transform anyway""" + GAMUTCHECK = 0x1000 + """Out of Gamut alarm""" + SOFTPROOFING = 0x4000 + """Do softproofing""" + BLACKPOINTCOMPENSATION = 0x2000 + NOWHITEONWHITEFIXUP = 0x0004 + """Don't fix scum dot""" + HIGHRESPRECALC = 0x0400 + """Use more memory to give better accuracy""" + LOWRESPRECALC = 0x0800 + """Use less memory to minimize resources""" + # this should be 8BITS_DEVICELINK, but that is not a valid name in Python: + USE_8BITS_DEVICELINK = 0x0008 + """Create 8 bits devicelinks""" + GUESSDEVICECLASS = 0x0020 + """Guess device class (for ``transform2devicelink``)""" + KEEP_SEQUENCE = 0x0080 + """Keep profile sequence for devicelink creation""" + FORCE_CLUT = 0x0002 + """Force CLUT optimization""" + CLUT_POST_LINEARIZATION = 0x0001 + """create postlinearization tables if possible""" + CLUT_PRE_LINEARIZATION = 0x0010 + """create prelinearization tables if possible""" + NONEGATIVES = 0x8000 + """Prevent negative numbers in floating point transforms""" + COPY_ALPHA = 0x04000000 + """Alpha channels are copied on ``cmsDoTransform()``""" + NODEFAULTRESOURCEDEF = 0x01000000 + + _GRIDPOINTS_1 = 1 << 16 + _GRIDPOINTS_2 = 2 << 16 + _GRIDPOINTS_4 = 4 << 16 + _GRIDPOINTS_8 = 8 << 16 + _GRIDPOINTS_16 = 16 << 16 + _GRIDPOINTS_32 = 32 << 16 + _GRIDPOINTS_64 = 64 << 16 + _GRIDPOINTS_128 = 128 << 16 + + @staticmethod + def GRIDPOINTS(n: int) -> Flags: + """ + Fine-tune control over number of gridpoints + + :param n: :py:class:`int` in range ``0 <= n <= 255`` + """ + return Flags.NONE | ((n & 0xFF) << 16) + + +_MAX_FLAG = reduce(operator.or_, Flags) + + +_FLAGS = { + "MATRIXINPUT": 1, + "MATRIXOUTPUT": 2, + "MATRIXONLY": (1 | 2), + "NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot + # Don't create prelinearization tables on precalculated transforms + # (internal use): + "NOPRELINEARIZATION": 16, + "GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink) + "NOTCACHE": 64, # Inhibit 1-pixel cache + "NOTPRECALC": 256, + "NULLTRANSFORM": 512, # Don't transform anyway + "HIGHRESPRECALC": 1024, # Use more memory to give better accuracy + "LOWRESPRECALC": 2048, # Use less memory to minimize resources + "WHITEBLACKCOMPENSATION": 8192, + "BLACKPOINTCOMPENSATION": 8192, + "GAMUTCHECK": 4096, # Out of Gamut alarm + "SOFTPROOFING": 16384, # Do softproofing + "PRESERVEBLACK": 32768, # Black preservation + "NODEFAULTRESOURCEDEF": 16777216, # CRD special + "GRIDPOINTS": lambda n: (n & 0xFF) << 16, # Gridpoints +} + + +# --------------------------------------------------------------------. +# Experimental PIL-level API +# --------------------------------------------------------------------. + +## +# Profile. + + +class ImageCmsProfile: + def __init__(self, profile: str | SupportsRead[bytes] | core.CmsProfile) -> None: + """ + :param profile: Either a string representing a filename, + a file like object containing a profile or a + low-level profile object + + """ + self.filename = None + self.product_name = None # profile.product_name + self.product_info = None # profile.product_info + + if isinstance(profile, str): + if sys.platform == "win32": + profile_bytes_path = profile.encode() + try: + profile_bytes_path.decode("ascii") + except UnicodeDecodeError: + with open(profile, "rb") as f: + self.profile = core.profile_frombytes(f.read()) + return + self.filename = profile + self.profile = core.profile_open(profile) + elif hasattr(profile, "read"): + self.profile = core.profile_frombytes(profile.read()) + elif isinstance(profile, core.CmsProfile): + self.profile = profile + else: + msg = "Invalid type for Profile" # type: ignore[unreachable] + raise TypeError(msg) + + def tobytes(self) -> bytes: + """ + Returns the profile in a format suitable for embedding in + saved images. + + :returns: a bytes object containing the ICC profile. + """ + + return core.profile_tobytes(self.profile) + + +class ImageCmsTransform(Image.ImagePointHandler): + """ + Transform. This can be used with the procedural API, or with the standard + :py:func:`~PIL.Image.Image.point` method. + + Will return the output profile in the ``output.info['icc_profile']``. + """ + + def __init__( + self, + input: ImageCmsProfile, + output: ImageCmsProfile, + input_mode: str, + output_mode: str, + intent: Intent = Intent.PERCEPTUAL, + proof: ImageCmsProfile | None = None, + proof_intent: Intent = Intent.ABSOLUTE_COLORIMETRIC, + flags: Flags = Flags.NONE, + ): + supported_modes = ( + "RGB", + "RGBA", + "RGBX", + "CMYK", + "I;16", + "I;16L", + "I;16B", + "YCbCr", + "LAB", + "L", + "1", + ) + for mode in (input_mode, output_mode): + if mode not in supported_modes: + deprecate( + mode, + 12, + { + "L;16": "I;16 or I;16L", + "L:16B": "I;16B", + "YCCA": "YCbCr", + "YCC": "YCbCr", + }.get(mode), + ) + if proof is None: + self.transform = core.buildTransform( + input.profile, output.profile, input_mode, output_mode, intent, flags + ) + else: + self.transform = core.buildProofTransform( + input.profile, + output.profile, + proof.profile, + input_mode, + output_mode, + intent, + proof_intent, + flags, + ) + # Note: inputMode and outputMode are for pyCMS compatibility only + self.input_mode = self.inputMode = input_mode + self.output_mode = self.outputMode = output_mode + + self.output_profile = output + + def point(self, im: Image.Image) -> Image.Image: + return self.apply(im) + + def apply(self, im: Image.Image, imOut: Image.Image | None = None) -> Image.Image: + if imOut is None: + imOut = Image.new(self.output_mode, im.size, None) + self.transform.apply(im.getim(), imOut.getim()) + imOut.info["icc_profile"] = self.output_profile.tobytes() + return imOut + + def apply_in_place(self, im: Image.Image) -> Image.Image: + if im.mode != self.output_mode: + msg = "mode mismatch" + raise ValueError(msg) # wrong output mode + self.transform.apply(im.getim(), im.getim()) + im.info["icc_profile"] = self.output_profile.tobytes() + return im + + +def get_display_profile(handle: SupportsInt | None = None) -> ImageCmsProfile | None: + """ + (experimental) Fetches the profile for the current display device. + + :returns: ``None`` if the profile is not known. + """ + + if sys.platform != "win32": + return None + + from . import ImageWin # type: ignore[unused-ignore, unreachable] + + if isinstance(handle, ImageWin.HDC): + profile = core.get_display_profile_win32(int(handle), 1) + else: + profile = core.get_display_profile_win32(int(handle or 0)) + if profile is None: + return None + return ImageCmsProfile(profile) + + +# --------------------------------------------------------------------. +# pyCMS compatible layer +# --------------------------------------------------------------------. + + +class PyCMSError(Exception): + """(pyCMS) Exception class. + This is used for all errors in the pyCMS API.""" + + pass + + +def profileToProfile( + im: Image.Image, + inputProfile: _CmsProfileCompatible, + outputProfile: _CmsProfileCompatible, + renderingIntent: Intent = Intent.PERCEPTUAL, + outputMode: str | None = None, + inPlace: bool = False, + flags: Flags = Flags.NONE, +) -> Image.Image | None: + """ + (pyCMS) Applies an ICC transformation to a given image, mapping from + ``inputProfile`` to ``outputProfile``. + + If the input or output profiles specified are not valid filenames, a + :exc:`PyCMSError` will be raised. If ``inPlace`` is ``True`` and + ``outputMode != im.mode``, a :exc:`PyCMSError` will be raised. + If an error occurs during application of the profiles, + a :exc:`PyCMSError` will be raised. + If ``outputMode`` is not a mode supported by the ``outputProfile`` (or by pyCMS), + a :exc:`PyCMSError` will be raised. + + This function applies an ICC transformation to im from ``inputProfile``'s + color space to ``outputProfile``'s color space using the specified rendering + intent to decide how to handle out-of-gamut colors. + + ``outputMode`` can be used to specify that a color mode conversion is to + be done using these profiles, but the specified profiles must be able + to handle that mode. I.e., if converting im from RGB to CMYK using + profiles, the input profile must handle RGB data, and the output + profile must handle CMYK data. + + :param im: An open :py:class:`~PIL.Image.Image` object (i.e. Image.new(...) + or Image.open(...), etc.) + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this image, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + profile you wish to use for this image, or a profile object + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the transform + + ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) + ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 + ImageCms.Intent.SATURATION = 2 + ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param outputMode: A valid PIL mode for the output image (i.e. "RGB", + "CMYK", etc.). Note: if rendering the image "inPlace", outputMode + MUST be the same mode as the input, or omitted completely. If + omitted, the outputMode will be the same as the mode of the input + image (im.mode) + :param inPlace: Boolean. If ``True``, the original image is modified in-place, + and ``None`` is returned. If ``False`` (default), a new + :py:class:`~PIL.Image.Image` object is returned with the transform applied. + :param flags: Integer (0-...) specifying additional flags + :returns: Either None or a new :py:class:`~PIL.Image.Image` object, depending on + the value of ``inPlace`` + :exception PyCMSError: + """ + + if outputMode is None: + outputMode = im.mode + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + msg = "renderingIntent must be an integer between 0 and 3" + raise PyCMSError(msg) + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + msg = f"flags must be an integer between 0 and {_MAX_FLAG}" + raise PyCMSError(msg) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + transform = ImageCmsTransform( + inputProfile, + outputProfile, + im.mode, + outputMode, + renderingIntent, + flags=flags, + ) + if inPlace: + transform.apply_in_place(im) + imOut = None + else: + imOut = transform.apply(im) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + return imOut + + +def getOpenProfile( + profileFilename: str | SupportsRead[bytes] | core.CmsProfile, +) -> ImageCmsProfile: + """ + (pyCMS) Opens an ICC profile file. + + The PyCMSProfile object can be passed back into pyCMS for use in creating + transforms and such (as in ImageCms.buildTransformFromOpenProfiles()). + + If ``profileFilename`` is not a valid filename for an ICC profile, + a :exc:`PyCMSError` will be raised. + + :param profileFilename: String, as a valid filename path to the ICC profile + you wish to open, or a file-like object. + :returns: A CmsProfile class object. + :exception PyCMSError: + """ + + try: + return ImageCmsProfile(profileFilename) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def buildTransform( + inputProfile: _CmsProfileCompatible, + outputProfile: _CmsProfileCompatible, + inMode: str, + outMode: str, + renderingIntent: Intent = Intent.PERCEPTUAL, + flags: Flags = Flags.NONE, +) -> ImageCmsTransform: + """ + (pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the + ``outputProfile``. Use applyTransform to apply the transform to a given + image. + + If the input or output profiles specified are not valid filenames, a + :exc:`PyCMSError` will be raised. If an error occurs during creation + of the transform, a :exc:`PyCMSError` will be raised. + + If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile`` + (or by pyCMS), a :exc:`PyCMSError` will be raised. + + This function builds and returns an ICC transform from the ``inputProfile`` + to the ``outputProfile`` using the ``renderingIntent`` to determine what to do + with out-of-gamut colors. It will ONLY work for converting images that + are in ``inMode`` to images that are in ``outMode`` color format (PIL mode, + i.e. "RGB", "RGBA", "CMYK", etc.). + + Building the transform is a fair part of the overhead in + ImageCms.profileToProfile(), so if you're planning on converting multiple + images using the same input/output settings, this can save you time. + Once you have a transform object, it can be used with + ImageCms.applyProfile() to convert images without the need to re-compute + the lookup table for the transform. + + The reason pyCMS returns a class object rather than a handle directly + to the transform is that it needs to keep track of the PIL input/output + modes that the transform is meant for. These attributes are stored in + the ``inMode`` and ``outMode`` attributes of the object (which can be + manually overridden if you really want to, but I don't know of any + time that would be of use, or would even work). + + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this transform, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + profile you wish to use for this transform, or a profile object + :param inMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param outMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the transform + + ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) + ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 + ImageCms.Intent.SATURATION = 2 + ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param flags: Integer (0-...) specifying additional flags + :returns: A CmsTransform class object. + :exception PyCMSError: + """ + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + msg = "renderingIntent must be an integer between 0 and 3" + raise PyCMSError(msg) + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + msg = f"flags must be an integer between 0 and {_MAX_FLAG}" + raise PyCMSError(msg) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + return ImageCmsTransform( + inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags + ) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def buildProofTransform( + inputProfile: _CmsProfileCompatible, + outputProfile: _CmsProfileCompatible, + proofProfile: _CmsProfileCompatible, + inMode: str, + outMode: str, + renderingIntent: Intent = Intent.PERCEPTUAL, + proofRenderingIntent: Intent = Intent.ABSOLUTE_COLORIMETRIC, + flags: Flags = Flags.SOFTPROOFING, +) -> ImageCmsTransform: + """ + (pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the + ``outputProfile``, but tries to simulate the result that would be + obtained on the ``proofProfile`` device. + + If the input, output, or proof profiles specified are not valid + filenames, a :exc:`PyCMSError` will be raised. + + If an error occurs during creation of the transform, + a :exc:`PyCMSError` will be raised. + + If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile`` + (or by pyCMS), a :exc:`PyCMSError` will be raised. + + This function builds and returns an ICC transform from the ``inputProfile`` + to the ``outputProfile``, but tries to simulate the result that would be + obtained on the ``proofProfile`` device using ``renderingIntent`` and + ``proofRenderingIntent`` to determine what to do with out-of-gamut + colors. This is known as "soft-proofing". It will ONLY work for + converting images that are in ``inMode`` to images that are in outMode + color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.). + + Usage of the resulting transform object is exactly the same as with + ImageCms.buildTransform(). + + Proof profiling is generally used when using an output device to get a + good idea of what the final printed/displayed image would look like on + the ``proofProfile`` device when it's quicker and easier to use the + output device for judging color. Generally, this means that the + output device is a monitor, or a dye-sub printer (etc.), and the simulated + device is something more expensive, complicated, or time consuming + (making it difficult to make a real print for color judgement purposes). + + Soft-proofing basically functions by adjusting the colors on the + output device to match the colors of the device being simulated. However, + when the simulated device has a much wider gamut than the output + device, you may obtain marginal results. + + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this transform, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + (monitor, usually) profile you wish to use for this transform, or a + profile object + :param proofProfile: String, as a valid filename path to the ICC proof + profile you wish to use for this transform, or a profile object + :param inMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param outMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the input->proof (simulated) transform + + ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) + ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 + ImageCms.Intent.SATURATION = 2 + ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param proofRenderingIntent: Integer (0-3) specifying the rendering intent + you wish to use for proof->output transform + + ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) + ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 + ImageCms.Intent.SATURATION = 2 + ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param flags: Integer (0-...) specifying additional flags + :returns: A CmsTransform class object. + :exception PyCMSError: + """ + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + msg = "renderingIntent must be an integer between 0 and 3" + raise PyCMSError(msg) + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + msg = f"flags must be an integer between 0 and {_MAX_FLAG}" + raise PyCMSError(msg) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + if not isinstance(proofProfile, ImageCmsProfile): + proofProfile = ImageCmsProfile(proofProfile) + return ImageCmsTransform( + inputProfile, + outputProfile, + inMode, + outMode, + renderingIntent, + proofProfile, + proofRenderingIntent, + flags, + ) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +buildTransformFromOpenProfiles = buildTransform +buildProofTransformFromOpenProfiles = buildProofTransform + + +def applyTransform( + im: Image.Image, transform: ImageCmsTransform, inPlace: bool = False +) -> Image.Image | None: + """ + (pyCMS) Applies a transform to a given image. + + If ``im.mode != transform.input_mode``, a :exc:`PyCMSError` is raised. + + If ``inPlace`` is ``True`` and ``transform.input_mode != transform.output_mode``, a + :exc:`PyCMSError` is raised. + + If ``im.mode``, ``transform.input_mode`` or ``transform.output_mode`` is not + supported by pyCMSdll or the profiles you used for the transform, a + :exc:`PyCMSError` is raised. + + If an error occurs while the transform is being applied, + a :exc:`PyCMSError` is raised. + + This function applies a pre-calculated transform (from + ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles()) + to an image. The transform can be used for multiple images, saving + considerable calculation time if doing the same conversion multiple times. + + If you want to modify im in-place instead of receiving a new image as + the return value, set ``inPlace`` to ``True``. This can only be done if + ``transform.input_mode`` and ``transform.output_mode`` are the same, because we + can't change the mode in-place (the buffer sizes for some modes are + different). The default behavior is to return a new :py:class:`~PIL.Image.Image` + object of the same dimensions in mode ``transform.output_mode``. + + :param im: An :py:class:`~PIL.Image.Image` object, and ``im.mode`` must be the same + as the ``input_mode`` supported by the transform. + :param transform: A valid CmsTransform class object + :param inPlace: Bool. If ``True``, ``im`` is modified in place and ``None`` is + returned, if ``False``, a new :py:class:`~PIL.Image.Image` object with the + transform applied is returned (and ``im`` is not changed). The default is + ``False``. + :returns: Either ``None``, or a new :py:class:`~PIL.Image.Image` object, + depending on the value of ``inPlace``. The profile will be returned in + the image's ``info['icc_profile']``. + :exception PyCMSError: + """ + + try: + if inPlace: + transform.apply_in_place(im) + imOut = None + else: + imOut = transform.apply(im) + except (TypeError, ValueError) as v: + raise PyCMSError(v) from v + + return imOut + + +def createProfile( + colorSpace: Literal["LAB", "XYZ", "sRGB"], colorTemp: SupportsFloat = 0 +) -> core.CmsProfile: + """ + (pyCMS) Creates a profile. + + If colorSpace not in ``["LAB", "XYZ", "sRGB"]``, + a :exc:`PyCMSError` is raised. + + If using LAB and ``colorTemp`` is not a positive integer, + a :exc:`PyCMSError` is raised. + + If an error occurs while creating the profile, + a :exc:`PyCMSError` is raised. + + Use this function to create common profiles on-the-fly instead of + having to supply a profile on disk and knowing the path to it. It + returns a normal CmsProfile object that can be passed to + ImageCms.buildTransformFromOpenProfiles() to create a transform to apply + to images. + + :param colorSpace: String, the color space of the profile you wish to + create. + Currently only "LAB", "XYZ", and "sRGB" are supported. + :param colorTemp: Positive number for the white point for the profile, in + degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50 + illuminant if omitted (5000k). colorTemp is ONLY applied to LAB + profiles, and is ignored for XYZ and sRGB. + :returns: A CmsProfile class object + :exception PyCMSError: + """ + + if colorSpace not in ["LAB", "XYZ", "sRGB"]: + msg = ( + f"Color space not supported for on-the-fly profile creation ({colorSpace})" + ) + raise PyCMSError(msg) + + if colorSpace == "LAB": + try: + colorTemp = float(colorTemp) + except (TypeError, ValueError) as e: + msg = f'Color temperature must be numeric, "{colorTemp}" not valid' + raise PyCMSError(msg) from e + + try: + return core.createProfile(colorSpace, colorTemp) + except (TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileName(profile: _CmsProfileCompatible) -> str: + """ + + (pyCMS) Gets the internal product name for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, + a :exc:`PyCMSError` is raised If an error occurs while trying + to obtain the name tag, a :exc:`PyCMSError` is raised. + + Use this function to obtain the INTERNAL name of the profile (stored + in an ICC tag in the profile itself), usually the one used when the + profile was originally created. Sometimes this tag also contains + additional information supplied by the creator. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal name of the profile as stored + in an ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # do it in python, not c. + # // name was "%s - %s" (model, manufacturer) || Description , + # // but if the Model and Manufacturer were the same or the model + # // was long, Just the model, in 1.x + model = profile.profile.model + manufacturer = profile.profile.manufacturer + + if not (model or manufacturer): + return (profile.profile.profile_description or "") + "\n" + if not manufacturer or (model and len(model) > 30): + return f"{model}\n" + return f"{model} - {manufacturer}\n" + + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileInfo(profile: _CmsProfileCompatible) -> str: + """ + (pyCMS) Gets the internal product information for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, + a :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the info tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + info tag. This often contains details about the profile, and how it + was created, as supplied by the creator. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # add an extra newline to preserve pyCMS compatibility + # Python, not C. the white point bits weren't working well, + # so skipping. + # info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint + description = profile.profile.profile_description + cpright = profile.profile.copyright + elements = [element for element in (description, cpright) if element] + return "\r\n\r\n".join(elements) + "\r\n\r\n" + + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileCopyright(profile: _CmsProfileCompatible) -> str: + """ + (pyCMS) Gets the copyright for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the copyright tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + copyright tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.copyright or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileManufacturer(profile: _CmsProfileCompatible) -> str: + """ + (pyCMS) Gets the manufacturer for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the manufacturer tag, a + :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + manufacturer tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.manufacturer or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileModel(profile: _CmsProfileCompatible) -> str: + """ + (pyCMS) Gets the model for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the model tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + model tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.model or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileDescription(profile: _CmsProfileCompatible) -> str: + """ + (pyCMS) Gets the description for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the description tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + description tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in an + ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.profile_description or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getDefaultIntent(profile: _CmsProfileCompatible) -> int: + """ + (pyCMS) Gets the default intent name for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the default intent, a + :exc:`PyCMSError` is raised. + + Use this function to determine the default (and usually best optimized) + rendering intent for this profile. Most profiles support multiple + rendering intents, but are intended mostly for one type of conversion. + If you wish to use a different intent than returned, use + ImageCms.isIntentSupported() to verify it will work first. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: Integer 0-3 specifying the default rendering intent for this + profile. + + ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) + ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 + ImageCms.Intent.SATURATION = 2 + ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return profile.profile.rendering_intent + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def isIntentSupported( + profile: _CmsProfileCompatible, intent: Intent, direction: Direction +) -> Literal[-1, 1]: + """ + (pyCMS) Checks if a given intent is supported. + + Use this function to verify that you can use your desired + ``intent`` with ``profile``, and that ``profile`` can be used for the + input/output/proof profile as you desire. + + Some profiles are created specifically for one "direction", can cannot + be used for others. Some profiles can only be used for certain + rendering intents, so it's best to either verify this before trying + to create a transform with them (using this function), or catch the + potential :exc:`PyCMSError` that will occur if they don't + support the modes you select. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :param intent: Integer (0-3) specifying the rendering intent you wish to + use with this profile + + ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT) + ImageCms.Intent.RELATIVE_COLORIMETRIC = 1 + ImageCms.Intent.SATURATION = 2 + ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param direction: Integer specifying if the profile is to be used for + input, output, or proof + + INPUT = 0 (or use ImageCms.Direction.INPUT) + OUTPUT = 1 (or use ImageCms.Direction.OUTPUT) + PROOF = 2 (or use ImageCms.Direction.PROOF) + + :returns: 1 if the intent/direction are supported, -1 if they are not. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # FIXME: I get different results for the same data w. different + # compilers. Bug in LittleCMS or in the binding? + if profile.profile.is_intent_supported(intent, direction): + return 1 + else: + return -1 + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def versions() -> tuple[str, str | None, str, str]: + """ + (pyCMS) Fetches versions. + """ + + deprecate( + "PIL.ImageCms.versions()", + 12, + '(PIL.features.version("littlecms2"), sys.version, PIL.__version__)', + ) + return _VERSION, core.littlecms_version, sys.version.split()[0], __version__ diff --git a/py311/lib/python3.11/site-packages/PIL/ImageColor.py b/py311/lib/python3.11/site-packages/PIL/ImageColor.py new file mode 100644 index 0000000000000000000000000000000000000000..9a15a8eb7597998f1bc9a01e8eae3588c087838b --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageColor.py @@ -0,0 +1,320 @@ +# +# The Python Imaging Library +# $Id$ +# +# map CSS3-style colour description strings to RGB +# +# History: +# 2002-10-24 fl Added support for CSS-style color strings +# 2002-12-15 fl Added RGBA support +# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2 +# 2004-07-19 fl Fixed gray/grey spelling issues +# 2009-03-05 fl Fixed rounding error in grayscale calculation +# +# Copyright (c) 2002-2004 by Secret Labs AB +# Copyright (c) 2002-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import re +from functools import lru_cache + +from . import Image + + +@lru_cache +def getrgb(color: str) -> tuple[int, int, int] | tuple[int, int, int, int]: + """ + Convert a color string to an RGB or RGBA tuple. If the string cannot be + parsed, this function raises a :py:exc:`ValueError` exception. + + .. versionadded:: 1.1.4 + + :param color: A color string + :return: ``(red, green, blue[, alpha])`` + """ + if len(color) > 100: + msg = "color specifier is too long" + raise ValueError(msg) + color = color.lower() + + rgb = colormap.get(color, None) + if rgb: + if isinstance(rgb, tuple): + return rgb + rgb_tuple = getrgb(rgb) + assert len(rgb_tuple) == 3 + colormap[color] = rgb_tuple + return rgb_tuple + + # check for known string formats + if re.match("#[a-f0-9]{3}$", color): + return int(color[1] * 2, 16), int(color[2] * 2, 16), int(color[3] * 2, 16) + + if re.match("#[a-f0-9]{4}$", color): + return ( + int(color[1] * 2, 16), + int(color[2] * 2, 16), + int(color[3] * 2, 16), + int(color[4] * 2, 16), + ) + + if re.match("#[a-f0-9]{6}$", color): + return int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16) + + if re.match("#[a-f0-9]{8}$", color): + return ( + int(color[1:3], 16), + int(color[3:5], 16), + int(color[5:7], 16), + int(color[7:9], 16), + ) + + m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color) + if m: + return int(m.group(1)), int(m.group(2)), int(m.group(3)) + + m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color) + if m: + return ( + int((int(m.group(1)) * 255) / 100.0 + 0.5), + int((int(m.group(2)) * 255) / 100.0 + 0.5), + int((int(m.group(3)) * 255) / 100.0 + 0.5), + ) + + m = re.match( + r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color + ) + if m: + from colorsys import hls_to_rgb + + rgb_floats = hls_to_rgb( + float(m.group(1)) / 360.0, + float(m.group(3)) / 100.0, + float(m.group(2)) / 100.0, + ) + return ( + int(rgb_floats[0] * 255 + 0.5), + int(rgb_floats[1] * 255 + 0.5), + int(rgb_floats[2] * 255 + 0.5), + ) + + m = re.match( + r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color + ) + if m: + from colorsys import hsv_to_rgb + + rgb_floats = hsv_to_rgb( + float(m.group(1)) / 360.0, + float(m.group(2)) / 100.0, + float(m.group(3)) / 100.0, + ) + return ( + int(rgb_floats[0] * 255 + 0.5), + int(rgb_floats[1] * 255 + 0.5), + int(rgb_floats[2] * 255 + 0.5), + ) + + m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color) + if m: + return int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)) + msg = f"unknown color specifier: {repr(color)}" + raise ValueError(msg) + + +@lru_cache +def getcolor(color: str, mode: str) -> int | tuple[int, ...]: + """ + Same as :py:func:`~PIL.ImageColor.getrgb` for most modes. However, if + ``mode`` is HSV, converts the RGB value to a HSV value, or if ``mode`` is + not color or a palette image, converts the RGB value to a grayscale value. + If the string cannot be parsed, this function raises a :py:exc:`ValueError` + exception. + + .. versionadded:: 1.1.4 + + :param color: A color string + :param mode: Convert result to this mode + :return: ``graylevel, (graylevel, alpha) or (red, green, blue[, alpha])`` + """ + # same as getrgb, but converts the result to the given mode + rgb, alpha = getrgb(color), 255 + if len(rgb) == 4: + alpha = rgb[3] + rgb = rgb[:3] + + if mode == "HSV": + from colorsys import rgb_to_hsv + + r, g, b = rgb + h, s, v = rgb_to_hsv(r / 255, g / 255, b / 255) + return int(h * 255), int(s * 255), int(v * 255) + elif Image.getmodebase(mode) == "L": + r, g, b = rgb + # ITU-R Recommendation 601-2 for nonlinear RGB + # scaled to 24 bits to match the convert's implementation. + graylevel = (r * 19595 + g * 38470 + b * 7471 + 0x8000) >> 16 + if mode[-1] == "A": + return graylevel, alpha + return graylevel + elif mode[-1] == "A": + return rgb + (alpha,) + return rgb + + +colormap: dict[str, str | tuple[int, int, int]] = { + # X11 colour table from https://drafts.csswg.org/css-color-4/, with + # gray/grey spelling issues fixed. This is a superset of HTML 4.0 + # colour names used in CSS 1. + "aliceblue": "#f0f8ff", + "antiquewhite": "#faebd7", + "aqua": "#00ffff", + "aquamarine": "#7fffd4", + "azure": "#f0ffff", + "beige": "#f5f5dc", + "bisque": "#ffe4c4", + "black": "#000000", + "blanchedalmond": "#ffebcd", + "blue": "#0000ff", + "blueviolet": "#8a2be2", + "brown": "#a52a2a", + "burlywood": "#deb887", + "cadetblue": "#5f9ea0", + "chartreuse": "#7fff00", + "chocolate": "#d2691e", + "coral": "#ff7f50", + "cornflowerblue": "#6495ed", + "cornsilk": "#fff8dc", + "crimson": "#dc143c", + "cyan": "#00ffff", + "darkblue": "#00008b", + "darkcyan": "#008b8b", + "darkgoldenrod": "#b8860b", + "darkgray": "#a9a9a9", + "darkgrey": "#a9a9a9", + "darkgreen": "#006400", + "darkkhaki": "#bdb76b", + "darkmagenta": "#8b008b", + "darkolivegreen": "#556b2f", + "darkorange": "#ff8c00", + "darkorchid": "#9932cc", + "darkred": "#8b0000", + "darksalmon": "#e9967a", + "darkseagreen": "#8fbc8f", + "darkslateblue": "#483d8b", + "darkslategray": "#2f4f4f", + "darkslategrey": "#2f4f4f", + "darkturquoise": "#00ced1", + "darkviolet": "#9400d3", + "deeppink": "#ff1493", + "deepskyblue": "#00bfff", + "dimgray": "#696969", + "dimgrey": "#696969", + "dodgerblue": "#1e90ff", + "firebrick": "#b22222", + "floralwhite": "#fffaf0", + "forestgreen": "#228b22", + "fuchsia": "#ff00ff", + "gainsboro": "#dcdcdc", + "ghostwhite": "#f8f8ff", + "gold": "#ffd700", + "goldenrod": "#daa520", + "gray": "#808080", + "grey": "#808080", + "green": "#008000", + "greenyellow": "#adff2f", + "honeydew": "#f0fff0", + "hotpink": "#ff69b4", + "indianred": "#cd5c5c", + "indigo": "#4b0082", + "ivory": "#fffff0", + "khaki": "#f0e68c", + "lavender": "#e6e6fa", + "lavenderblush": "#fff0f5", + "lawngreen": "#7cfc00", + "lemonchiffon": "#fffacd", + "lightblue": "#add8e6", + "lightcoral": "#f08080", + "lightcyan": "#e0ffff", + "lightgoldenrodyellow": "#fafad2", + "lightgreen": "#90ee90", + "lightgray": "#d3d3d3", + "lightgrey": "#d3d3d3", + "lightpink": "#ffb6c1", + "lightsalmon": "#ffa07a", + "lightseagreen": "#20b2aa", + "lightskyblue": "#87cefa", + "lightslategray": "#778899", + "lightslategrey": "#778899", + "lightsteelblue": "#b0c4de", + "lightyellow": "#ffffe0", + "lime": "#00ff00", + "limegreen": "#32cd32", + "linen": "#faf0e6", + "magenta": "#ff00ff", + "maroon": "#800000", + "mediumaquamarine": "#66cdaa", + "mediumblue": "#0000cd", + "mediumorchid": "#ba55d3", + "mediumpurple": "#9370db", + "mediumseagreen": "#3cb371", + "mediumslateblue": "#7b68ee", + "mediumspringgreen": "#00fa9a", + "mediumturquoise": "#48d1cc", + "mediumvioletred": "#c71585", + "midnightblue": "#191970", + "mintcream": "#f5fffa", + "mistyrose": "#ffe4e1", + "moccasin": "#ffe4b5", + "navajowhite": "#ffdead", + "navy": "#000080", + "oldlace": "#fdf5e6", + "olive": "#808000", + "olivedrab": "#6b8e23", + "orange": "#ffa500", + "orangered": "#ff4500", + "orchid": "#da70d6", + "palegoldenrod": "#eee8aa", + "palegreen": "#98fb98", + "paleturquoise": "#afeeee", + "palevioletred": "#db7093", + "papayawhip": "#ffefd5", + "peachpuff": "#ffdab9", + "peru": "#cd853f", + "pink": "#ffc0cb", + "plum": "#dda0dd", + "powderblue": "#b0e0e6", + "purple": "#800080", + "rebeccapurple": "#663399", + "red": "#ff0000", + "rosybrown": "#bc8f8f", + "royalblue": "#4169e1", + "saddlebrown": "#8b4513", + "salmon": "#fa8072", + "sandybrown": "#f4a460", + "seagreen": "#2e8b57", + "seashell": "#fff5ee", + "sienna": "#a0522d", + "silver": "#c0c0c0", + "skyblue": "#87ceeb", + "slateblue": "#6a5acd", + "slategray": "#708090", + "slategrey": "#708090", + "snow": "#fffafa", + "springgreen": "#00ff7f", + "steelblue": "#4682b4", + "tan": "#d2b48c", + "teal": "#008080", + "thistle": "#d8bfd8", + "tomato": "#ff6347", + "turquoise": "#40e0d0", + "violet": "#ee82ee", + "wheat": "#f5deb3", + "white": "#ffffff", + "whitesmoke": "#f5f5f5", + "yellow": "#ffff00", + "yellowgreen": "#9acd32", +} diff --git a/py311/lib/python3.11/site-packages/PIL/ImageDraw.py b/py311/lib/python3.11/site-packages/PIL/ImageDraw.py new file mode 100644 index 0000000000000000000000000000000000000000..6cf1ee62659f07b21f257a37565a6f81022d9910 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageDraw.py @@ -0,0 +1,1232 @@ +# +# The Python Imaging Library +# $Id$ +# +# drawing interface operations +# +# History: +# 1996-04-13 fl Created (experimental) +# 1996-08-07 fl Filled polygons, ellipses. +# 1996-08-13 fl Added text support +# 1998-06-28 fl Handle I and F images +# 1998-12-29 fl Added arc; use arc primitive to draw ellipses +# 1999-01-10 fl Added shape stuff (experimental) +# 1999-02-06 fl Added bitmap support +# 1999-02-11 fl Changed all primitives to take options +# 1999-02-20 fl Fixed backwards compatibility +# 2000-10-12 fl Copy on write, when necessary +# 2001-02-18 fl Use default ink for bitmap/text also in fill mode +# 2002-10-24 fl Added support for CSS-style color strings +# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing +# 2002-12-11 fl Refactored low-level drawing API (work in progress) +# 2004-08-26 fl Made Draw() a factory function, added getdraw() support +# 2004-09-04 fl Added width support to line primitive +# 2004-09-10 fl Added font mode handling +# 2006-06-19 fl Added font bearing support (getmask2) +# +# Copyright (c) 1997-2006 by Secret Labs AB +# Copyright (c) 1996-2006 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import math +import struct +from collections.abc import Sequence +from types import ModuleType +from typing import Any, AnyStr, Callable, Union, cast + +from . import Image, ImageColor +from ._deprecate import deprecate +from ._typing import Coords + +# experimental access to the outline API +Outline: Callable[[], Image.core._Outline] = Image.core.outline + +TYPE_CHECKING = False +if TYPE_CHECKING: + from . import ImageDraw2, ImageFont + +_Ink = Union[float, tuple[int, ...], str] + +""" +A simple 2D drawing interface for PIL images. +

+Application code should use the Draw factory, instead of +directly. +""" + + +class ImageDraw: + font: ( + ImageFont.ImageFont | ImageFont.FreeTypeFont | ImageFont.TransposedFont | None + ) = None + + def __init__(self, im: Image.Image, mode: str | None = None) -> None: + """ + Create a drawing instance. + + :param im: The image to draw in. + :param mode: Optional mode to use for color values. For RGB + images, this argument can be RGB or RGBA (to blend the + drawing into the image). For all other modes, this argument + must be the same as the image mode. If omitted, the mode + defaults to the mode of the image. + """ + im.load() + if im.readonly: + im._copy() # make it writeable + blend = 0 + if mode is None: + mode = im.mode + if mode != im.mode: + if mode == "RGBA" and im.mode == "RGB": + blend = 1 + else: + msg = "mode mismatch" + raise ValueError(msg) + if mode == "P": + self.palette = im.palette + else: + self.palette = None + self._image = im + self.im = im.im + self.draw = Image.core.draw(self.im, blend) + self.mode = mode + if mode in ("I", "F"): + self.ink = self.draw.draw_ink(1) + else: + self.ink = self.draw.draw_ink(-1) + if mode in ("1", "P", "I", "F"): + # FIXME: fix Fill2 to properly support matte for I+F images + self.fontmode = "1" + else: + self.fontmode = "L" # aliasing is okay for other modes + self.fill = False + + def getfont( + self, + ) -> ImageFont.ImageFont | ImageFont.FreeTypeFont | ImageFont.TransposedFont: + """ + Get the current default font. + + To set the default font for this ImageDraw instance:: + + from PIL import ImageDraw, ImageFont + draw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf") + + To set the default font for all future ImageDraw instances:: + + from PIL import ImageDraw, ImageFont + ImageDraw.ImageDraw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf") + + If the current default font is ``None``, + it is initialized with ``ImageFont.load_default()``. + + :returns: An image font.""" + if not self.font: + # FIXME: should add a font repository + from . import ImageFont + + self.font = ImageFont.load_default() + return self.font + + def _getfont( + self, font_size: float | None + ) -> ImageFont.ImageFont | ImageFont.FreeTypeFont | ImageFont.TransposedFont: + if font_size is not None: + from . import ImageFont + + return ImageFont.load_default(font_size) + else: + return self.getfont() + + def _getink( + self, ink: _Ink | None, fill: _Ink | None = None + ) -> tuple[int | None, int | None]: + result_ink = None + result_fill = None + if ink is None and fill is None: + if self.fill: + result_fill = self.ink + else: + result_ink = self.ink + else: + if ink is not None: + if isinstance(ink, str): + ink = ImageColor.getcolor(ink, self.mode) + if self.palette and isinstance(ink, tuple): + ink = self.palette.getcolor(ink, self._image) + result_ink = self.draw.draw_ink(ink) + if fill is not None: + if isinstance(fill, str): + fill = ImageColor.getcolor(fill, self.mode) + if self.palette and isinstance(fill, tuple): + fill = self.palette.getcolor(fill, self._image) + result_fill = self.draw.draw_ink(fill) + return result_ink, result_fill + + def arc( + self, + xy: Coords, + start: float, + end: float, + fill: _Ink | None = None, + width: int = 1, + ) -> None: + """Draw an arc.""" + ink, fill = self._getink(fill) + if ink is not None: + self.draw.draw_arc(xy, start, end, ink, width) + + def bitmap( + self, xy: Sequence[int], bitmap: Image.Image, fill: _Ink | None = None + ) -> None: + """Draw a bitmap.""" + bitmap.load() + ink, fill = self._getink(fill) + if ink is None: + ink = fill + if ink is not None: + self.draw.draw_bitmap(xy, bitmap.im, ink) + + def chord( + self, + xy: Coords, + start: float, + end: float, + fill: _Ink | None = None, + outline: _Ink | None = None, + width: int = 1, + ) -> None: + """Draw a chord.""" + ink, fill_ink = self._getink(outline, fill) + if fill_ink is not None: + self.draw.draw_chord(xy, start, end, fill_ink, 1) + if ink is not None and ink != fill_ink and width != 0: + self.draw.draw_chord(xy, start, end, ink, 0, width) + + def ellipse( + self, + xy: Coords, + fill: _Ink | None = None, + outline: _Ink | None = None, + width: int = 1, + ) -> None: + """Draw an ellipse.""" + ink, fill_ink = self._getink(outline, fill) + if fill_ink is not None: + self.draw.draw_ellipse(xy, fill_ink, 1) + if ink is not None and ink != fill_ink and width != 0: + self.draw.draw_ellipse(xy, ink, 0, width) + + def circle( + self, + xy: Sequence[float], + radius: float, + fill: _Ink | None = None, + outline: _Ink | None = None, + width: int = 1, + ) -> None: + """Draw a circle given center coordinates and a radius.""" + ellipse_xy = (xy[0] - radius, xy[1] - radius, xy[0] + radius, xy[1] + radius) + self.ellipse(ellipse_xy, fill, outline, width) + + def line( + self, + xy: Coords, + fill: _Ink | None = None, + width: int = 0, + joint: str | None = None, + ) -> None: + """Draw a line, or a connected sequence of line segments.""" + ink = self._getink(fill)[0] + if ink is not None: + self.draw.draw_lines(xy, ink, width) + if joint == "curve" and width > 4: + points: Sequence[Sequence[float]] + if isinstance(xy[0], (list, tuple)): + points = cast(Sequence[Sequence[float]], xy) + else: + points = [ + cast(Sequence[float], tuple(xy[i : i + 2])) + for i in range(0, len(xy), 2) + ] + for i in range(1, len(points) - 1): + point = points[i] + angles = [ + math.degrees(math.atan2(end[0] - start[0], start[1] - end[1])) + % 360 + for start, end in ( + (points[i - 1], point), + (point, points[i + 1]), + ) + ] + if angles[0] == angles[1]: + # This is a straight line, so no joint is required + continue + + def coord_at_angle( + coord: Sequence[float], angle: float + ) -> tuple[float, ...]: + x, y = coord + angle -= 90 + distance = width / 2 - 1 + return tuple( + p + (math.floor(p_d) if p_d > 0 else math.ceil(p_d)) + for p, p_d in ( + (x, distance * math.cos(math.radians(angle))), + (y, distance * math.sin(math.radians(angle))), + ) + ) + + flipped = ( + angles[1] > angles[0] and angles[1] - 180 > angles[0] + ) or (angles[1] < angles[0] and angles[1] + 180 > angles[0]) + coords = [ + (point[0] - width / 2 + 1, point[1] - width / 2 + 1), + (point[0] + width / 2 - 1, point[1] + width / 2 - 1), + ] + if flipped: + start, end = (angles[1] + 90, angles[0] + 90) + else: + start, end = (angles[0] - 90, angles[1] - 90) + self.pieslice(coords, start - 90, end - 90, fill) + + if width > 8: + # Cover potential gaps between the line and the joint + if flipped: + gap_coords = [ + coord_at_angle(point, angles[0] + 90), + point, + coord_at_angle(point, angles[1] + 90), + ] + else: + gap_coords = [ + coord_at_angle(point, angles[0] - 90), + point, + coord_at_angle(point, angles[1] - 90), + ] + self.line(gap_coords, fill, width=3) + + def shape( + self, + shape: Image.core._Outline, + fill: _Ink | None = None, + outline: _Ink | None = None, + ) -> None: + """(Experimental) Draw a shape.""" + shape.close() + ink, fill_ink = self._getink(outline, fill) + if fill_ink is not None: + self.draw.draw_outline(shape, fill_ink, 1) + if ink is not None and ink != fill_ink: + self.draw.draw_outline(shape, ink, 0) + + def pieslice( + self, + xy: Coords, + start: float, + end: float, + fill: _Ink | None = None, + outline: _Ink | None = None, + width: int = 1, + ) -> None: + """Draw a pieslice.""" + ink, fill_ink = self._getink(outline, fill) + if fill_ink is not None: + self.draw.draw_pieslice(xy, start, end, fill_ink, 1) + if ink is not None and ink != fill_ink and width != 0: + self.draw.draw_pieslice(xy, start, end, ink, 0, width) + + def point(self, xy: Coords, fill: _Ink | None = None) -> None: + """Draw one or more individual pixels.""" + ink, fill = self._getink(fill) + if ink is not None: + self.draw.draw_points(xy, ink) + + def polygon( + self, + xy: Coords, + fill: _Ink | None = None, + outline: _Ink | None = None, + width: int = 1, + ) -> None: + """Draw a polygon.""" + ink, fill_ink = self._getink(outline, fill) + if fill_ink is not None: + self.draw.draw_polygon(xy, fill_ink, 1) + if ink is not None and ink != fill_ink and width != 0: + if width == 1: + self.draw.draw_polygon(xy, ink, 0, width) + elif self.im is not None: + # To avoid expanding the polygon outwards, + # use the fill as a mask + mask = Image.new("1", self.im.size) + mask_ink = self._getink(1)[0] + draw = Draw(mask) + draw.draw.draw_polygon(xy, mask_ink, 1) + + self.draw.draw_polygon(xy, ink, 0, width * 2 - 1, mask.im) + + def regular_polygon( + self, + bounding_circle: Sequence[Sequence[float] | float], + n_sides: int, + rotation: float = 0, + fill: _Ink | None = None, + outline: _Ink | None = None, + width: int = 1, + ) -> None: + """Draw a regular polygon.""" + xy = _compute_regular_polygon_vertices(bounding_circle, n_sides, rotation) + self.polygon(xy, fill, outline, width) + + def rectangle( + self, + xy: Coords, + fill: _Ink | None = None, + outline: _Ink | None = None, + width: int = 1, + ) -> None: + """Draw a rectangle.""" + ink, fill_ink = self._getink(outline, fill) + if fill_ink is not None: + self.draw.draw_rectangle(xy, fill_ink, 1) + if ink is not None and ink != fill_ink and width != 0: + self.draw.draw_rectangle(xy, ink, 0, width) + + def rounded_rectangle( + self, + xy: Coords, + radius: float = 0, + fill: _Ink | None = None, + outline: _Ink | None = None, + width: int = 1, + *, + corners: tuple[bool, bool, bool, bool] | None = None, + ) -> None: + """Draw a rounded rectangle.""" + if isinstance(xy[0], (list, tuple)): + (x0, y0), (x1, y1) = cast(Sequence[Sequence[float]], xy) + else: + x0, y0, x1, y1 = cast(Sequence[float], xy) + if x1 < x0: + msg = "x1 must be greater than or equal to x0" + raise ValueError(msg) + if y1 < y0: + msg = "y1 must be greater than or equal to y0" + raise ValueError(msg) + if corners is None: + corners = (True, True, True, True) + + d = radius * 2 + + x0 = round(x0) + y0 = round(y0) + x1 = round(x1) + y1 = round(y1) + full_x, full_y = False, False + if all(corners): + full_x = d >= x1 - x0 - 1 + if full_x: + # The two left and two right corners are joined + d = x1 - x0 + full_y = d >= y1 - y0 - 1 + if full_y: + # The two top and two bottom corners are joined + d = y1 - y0 + if full_x and full_y: + # If all corners are joined, that is a circle + return self.ellipse(xy, fill, outline, width) + + if d == 0 or not any(corners): + # If the corners have no curve, + # or there are no corners, + # that is a rectangle + return self.rectangle(xy, fill, outline, width) + + r = int(d // 2) + ink, fill_ink = self._getink(outline, fill) + + def draw_corners(pieslice: bool) -> None: + parts: tuple[tuple[tuple[float, float, float, float], int, int], ...] + if full_x: + # Draw top and bottom halves + parts = ( + ((x0, y0, x0 + d, y0 + d), 180, 360), + ((x0, y1 - d, x0 + d, y1), 0, 180), + ) + elif full_y: + # Draw left and right halves + parts = ( + ((x0, y0, x0 + d, y0 + d), 90, 270), + ((x1 - d, y0, x1, y0 + d), 270, 90), + ) + else: + # Draw four separate corners + parts = tuple( + part + for i, part in enumerate( + ( + ((x0, y0, x0 + d, y0 + d), 180, 270), + ((x1 - d, y0, x1, y0 + d), 270, 360), + ((x1 - d, y1 - d, x1, y1), 0, 90), + ((x0, y1 - d, x0 + d, y1), 90, 180), + ) + ) + if corners[i] + ) + for part in parts: + if pieslice: + self.draw.draw_pieslice(*(part + (fill_ink, 1))) + else: + self.draw.draw_arc(*(part + (ink, width))) + + if fill_ink is not None: + draw_corners(True) + + if full_x: + self.draw.draw_rectangle((x0, y0 + r + 1, x1, y1 - r - 1), fill_ink, 1) + elif x1 - r - 1 > x0 + r + 1: + self.draw.draw_rectangle((x0 + r + 1, y0, x1 - r - 1, y1), fill_ink, 1) + if not full_x and not full_y: + left = [x0, y0, x0 + r, y1] + if corners[0]: + left[1] += r + 1 + if corners[3]: + left[3] -= r + 1 + self.draw.draw_rectangle(left, fill_ink, 1) + + right = [x1 - r, y0, x1, y1] + if corners[1]: + right[1] += r + 1 + if corners[2]: + right[3] -= r + 1 + self.draw.draw_rectangle(right, fill_ink, 1) + if ink is not None and ink != fill_ink and width != 0: + draw_corners(False) + + if not full_x: + top = [x0, y0, x1, y0 + width - 1] + if corners[0]: + top[0] += r + 1 + if corners[1]: + top[2] -= r + 1 + self.draw.draw_rectangle(top, ink, 1) + + bottom = [x0, y1 - width + 1, x1, y1] + if corners[3]: + bottom[0] += r + 1 + if corners[2]: + bottom[2] -= r + 1 + self.draw.draw_rectangle(bottom, ink, 1) + if not full_y: + left = [x0, y0, x0 + width - 1, y1] + if corners[0]: + left[1] += r + 1 + if corners[3]: + left[3] -= r + 1 + self.draw.draw_rectangle(left, ink, 1) + + right = [x1 - width + 1, y0, x1, y1] + if corners[1]: + right[1] += r + 1 + if corners[2]: + right[3] -= r + 1 + self.draw.draw_rectangle(right, ink, 1) + + def _multiline_check(self, text: AnyStr) -> bool: + split_character = "\n" if isinstance(text, str) else b"\n" + + return split_character in text + + def text( + self, + xy: tuple[float, float], + text: AnyStr, + fill: _Ink | None = None, + font: ( + ImageFont.ImageFont + | ImageFont.FreeTypeFont + | ImageFont.TransposedFont + | None + ) = None, + anchor: str | None = None, + spacing: float = 4, + align: str = "left", + direction: str | None = None, + features: list[str] | None = None, + language: str | None = None, + stroke_width: float = 0, + stroke_fill: _Ink | None = None, + embedded_color: bool = False, + *args: Any, + **kwargs: Any, + ) -> None: + """Draw text.""" + if embedded_color and self.mode not in ("RGB", "RGBA"): + msg = "Embedded color supported only in RGB and RGBA modes" + raise ValueError(msg) + + if font is None: + font = self._getfont(kwargs.get("font_size")) + + if self._multiline_check(text): + return self.multiline_text( + xy, + text, + fill, + font, + anchor, + spacing, + align, + direction, + features, + language, + stroke_width, + stroke_fill, + embedded_color, + ) + + def getink(fill: _Ink | None) -> int: + ink, fill_ink = self._getink(fill) + if ink is None: + assert fill_ink is not None + return fill_ink + return ink + + def draw_text(ink: int, stroke_width: float = 0) -> None: + mode = self.fontmode + if stroke_width == 0 and embedded_color: + mode = "RGBA" + coord = [] + for i in range(2): + coord.append(int(xy[i])) + start = (math.modf(xy[0])[0], math.modf(xy[1])[0]) + try: + mask, offset = font.getmask2( # type: ignore[union-attr,misc] + text, + mode, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + stroke_filled=True, + anchor=anchor, + ink=ink, + start=start, + *args, + **kwargs, + ) + coord = [coord[0] + offset[0], coord[1] + offset[1]] + except AttributeError: + try: + mask = font.getmask( # type: ignore[misc] + text, + mode, + direction, + features, + language, + stroke_width, + anchor, + ink, + start=start, + *args, + **kwargs, + ) + except TypeError: + mask = font.getmask(text) + if mode == "RGBA": + # font.getmask2(mode="RGBA") returns color in RGB bands and mask in A + # extract mask and set text alpha + color, mask = mask, mask.getband(3) + ink_alpha = struct.pack("i", ink)[3] + color.fillband(3, ink_alpha) + x, y = coord + if self.im is not None: + self.im.paste( + color, (x, y, x + mask.size[0], y + mask.size[1]), mask + ) + else: + self.draw.draw_bitmap(coord, mask, ink) + + ink = getink(fill) + if ink is not None: + stroke_ink = None + if stroke_width: + stroke_ink = getink(stroke_fill) if stroke_fill is not None else ink + + if stroke_ink is not None: + # Draw stroked text + draw_text(stroke_ink, stroke_width) + + # Draw normal text + if ink != stroke_ink: + draw_text(ink) + else: + # Only draw normal text + draw_text(ink) + + def _prepare_multiline_text( + self, + xy: tuple[float, float], + text: AnyStr, + font: ( + ImageFont.ImageFont + | ImageFont.FreeTypeFont + | ImageFont.TransposedFont + | None + ), + anchor: str | None, + spacing: float, + align: str, + direction: str | None, + features: list[str] | None, + language: str | None, + stroke_width: float, + embedded_color: bool, + font_size: float | None, + ) -> tuple[ + ImageFont.ImageFont | ImageFont.FreeTypeFont | ImageFont.TransposedFont, + list[tuple[tuple[float, float], str, AnyStr]], + ]: + if anchor is None: + anchor = "lt" if direction == "ttb" else "la" + elif len(anchor) != 2: + msg = "anchor must be a 2 character string" + raise ValueError(msg) + elif anchor[1] in "tb" and direction != "ttb": + msg = "anchor not supported for multiline text" + raise ValueError(msg) + + if font is None: + font = self._getfont(font_size) + + lines = text.split("\n" if isinstance(text, str) else b"\n") + line_spacing = ( + self.textbbox((0, 0), "A", font, stroke_width=stroke_width)[3] + + stroke_width + + spacing + ) + + top = xy[1] + parts = [] + if direction == "ttb": + left = xy[0] + for line in lines: + parts.append(((left, top), anchor, line)) + left += line_spacing + else: + widths = [] + max_width: float = 0 + for line in lines: + line_width = self.textlength( + line, + font, + direction=direction, + features=features, + language=language, + embedded_color=embedded_color, + ) + widths.append(line_width) + max_width = max(max_width, line_width) + + if anchor[1] == "m": + top -= (len(lines) - 1) * line_spacing / 2.0 + elif anchor[1] == "d": + top -= (len(lines) - 1) * line_spacing + + for idx, line in enumerate(lines): + left = xy[0] + width_difference = max_width - widths[idx] + + # align by align parameter + if align in ("left", "justify"): + pass + elif align == "center": + left += width_difference / 2.0 + elif align == "right": + left += width_difference + else: + msg = 'align must be "left", "center", "right" or "justify"' + raise ValueError(msg) + + if ( + align == "justify" + and width_difference != 0 + and idx != len(lines) - 1 + ): + words = line.split(" " if isinstance(text, str) else b" ") + if len(words) > 1: + # align left by anchor + if anchor[0] == "m": + left -= max_width / 2.0 + elif anchor[0] == "r": + left -= max_width + + word_widths = [ + self.textlength( + word, + font, + direction=direction, + features=features, + language=language, + embedded_color=embedded_color, + ) + for word in words + ] + word_anchor = "l" + anchor[1] + width_difference = max_width - sum(word_widths) + for i, word in enumerate(words): + parts.append(((left, top), word_anchor, word)) + left += word_widths[i] + width_difference / (len(words) - 1) + top += line_spacing + continue + + # align left by anchor + if anchor[0] == "m": + left -= width_difference / 2.0 + elif anchor[0] == "r": + left -= width_difference + parts.append(((left, top), anchor, line)) + top += line_spacing + + return font, parts + + def multiline_text( + self, + xy: tuple[float, float], + text: AnyStr, + fill: _Ink | None = None, + font: ( + ImageFont.ImageFont + | ImageFont.FreeTypeFont + | ImageFont.TransposedFont + | None + ) = None, + anchor: str | None = None, + spacing: float = 4, + align: str = "left", + direction: str | None = None, + features: list[str] | None = None, + language: str | None = None, + stroke_width: float = 0, + stroke_fill: _Ink | None = None, + embedded_color: bool = False, + *, + font_size: float | None = None, + ) -> None: + font, lines = self._prepare_multiline_text( + xy, + text, + font, + anchor, + spacing, + align, + direction, + features, + language, + stroke_width, + embedded_color, + font_size, + ) + + for xy, anchor, line in lines: + self.text( + xy, + line, + fill, + font, + anchor, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + stroke_fill=stroke_fill, + embedded_color=embedded_color, + ) + + def textlength( + self, + text: AnyStr, + font: ( + ImageFont.ImageFont + | ImageFont.FreeTypeFont + | ImageFont.TransposedFont + | None + ) = None, + direction: str | None = None, + features: list[str] | None = None, + language: str | None = None, + embedded_color: bool = False, + *, + font_size: float | None = None, + ) -> float: + """Get the length of a given string, in pixels with 1/64 precision.""" + if self._multiline_check(text): + msg = "can't measure length of multiline text" + raise ValueError(msg) + if embedded_color and self.mode not in ("RGB", "RGBA"): + msg = "Embedded color supported only in RGB and RGBA modes" + raise ValueError(msg) + + if font is None: + font = self._getfont(font_size) + mode = "RGBA" if embedded_color else self.fontmode + return font.getlength(text, mode, direction, features, language) + + def textbbox( + self, + xy: tuple[float, float], + text: AnyStr, + font: ( + ImageFont.ImageFont + | ImageFont.FreeTypeFont + | ImageFont.TransposedFont + | None + ) = None, + anchor: str | None = None, + spacing: float = 4, + align: str = "left", + direction: str | None = None, + features: list[str] | None = None, + language: str | None = None, + stroke_width: float = 0, + embedded_color: bool = False, + *, + font_size: float | None = None, + ) -> tuple[float, float, float, float]: + """Get the bounding box of a given string, in pixels.""" + if embedded_color and self.mode not in ("RGB", "RGBA"): + msg = "Embedded color supported only in RGB and RGBA modes" + raise ValueError(msg) + + if font is None: + font = self._getfont(font_size) + + if self._multiline_check(text): + return self.multiline_textbbox( + xy, + text, + font, + anchor, + spacing, + align, + direction, + features, + language, + stroke_width, + embedded_color, + ) + + mode = "RGBA" if embedded_color else self.fontmode + bbox = font.getbbox( + text, mode, direction, features, language, stroke_width, anchor + ) + return bbox[0] + xy[0], bbox[1] + xy[1], bbox[2] + xy[0], bbox[3] + xy[1] + + def multiline_textbbox( + self, + xy: tuple[float, float], + text: AnyStr, + font: ( + ImageFont.ImageFont + | ImageFont.FreeTypeFont + | ImageFont.TransposedFont + | None + ) = None, + anchor: str | None = None, + spacing: float = 4, + align: str = "left", + direction: str | None = None, + features: list[str] | None = None, + language: str | None = None, + stroke_width: float = 0, + embedded_color: bool = False, + *, + font_size: float | None = None, + ) -> tuple[float, float, float, float]: + font, lines = self._prepare_multiline_text( + xy, + text, + font, + anchor, + spacing, + align, + direction, + features, + language, + stroke_width, + embedded_color, + font_size, + ) + + bbox: tuple[float, float, float, float] | None = None + + for xy, anchor, line in lines: + bbox_line = self.textbbox( + xy, + line, + font, + anchor, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + embedded_color=embedded_color, + ) + if bbox is None: + bbox = bbox_line + else: + bbox = ( + min(bbox[0], bbox_line[0]), + min(bbox[1], bbox_line[1]), + max(bbox[2], bbox_line[2]), + max(bbox[3], bbox_line[3]), + ) + + if bbox is None: + return xy[0], xy[1], xy[0], xy[1] + return bbox + + +def Draw(im: Image.Image, mode: str | None = None) -> ImageDraw: + """ + A simple 2D drawing interface for PIL images. + + :param im: The image to draw in. + :param mode: Optional mode to use for color values. For RGB + images, this argument can be RGB or RGBA (to blend the + drawing into the image). For all other modes, this argument + must be the same as the image mode. If omitted, the mode + defaults to the mode of the image. + """ + try: + return getattr(im, "getdraw")(mode) + except AttributeError: + return ImageDraw(im, mode) + + +def getdraw( + im: Image.Image | None = None, hints: list[str] | None = None +) -> tuple[ImageDraw2.Draw | None, ModuleType]: + """ + :param im: The image to draw in. + :param hints: An optional list of hints. Deprecated. + :returns: A (drawing context, drawing resource factory) tuple. + """ + if hints is not None: + deprecate("'hints' parameter", 12) + from . import ImageDraw2 + + draw = ImageDraw2.Draw(im) if im is not None else None + return draw, ImageDraw2 + + +def floodfill( + image: Image.Image, + xy: tuple[int, int], + value: float | tuple[int, ...], + border: float | tuple[int, ...] | None = None, + thresh: float = 0, +) -> None: + """ + .. warning:: This method is experimental. + + Fills a bounded region with a given color. + + :param image: Target image. + :param xy: Seed position (a 2-item coordinate tuple). See + :ref:`coordinate-system`. + :param value: Fill color. + :param border: Optional border value. If given, the region consists of + pixels with a color different from the border color. If not given, + the region consists of pixels having the same color as the seed + pixel. + :param thresh: Optional threshold value which specifies a maximum + tolerable difference of a pixel value from the 'background' in + order for it to be replaced. Useful for filling regions of + non-homogeneous, but similar, colors. + """ + # based on an implementation by Eric S. Raymond + # amended by yo1995 @20180806 + pixel = image.load() + assert pixel is not None + x, y = xy + try: + background = pixel[x, y] + if _color_diff(value, background) <= thresh: + return # seed point already has fill color + pixel[x, y] = value + except (ValueError, IndexError): + return # seed point outside image + edge = {(x, y)} + # use a set to keep record of current and previous edge pixels + # to reduce memory consumption + full_edge = set() + while edge: + new_edge = set() + for x, y in edge: # 4 adjacent method + for s, t in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)): + # If already processed, or if a coordinate is negative, skip + if (s, t) in full_edge or s < 0 or t < 0: + continue + try: + p = pixel[s, t] + except (ValueError, IndexError): + pass + else: + full_edge.add((s, t)) + if border is None: + fill = _color_diff(p, background) <= thresh + else: + fill = p not in (value, border) + if fill: + pixel[s, t] = value + new_edge.add((s, t)) + full_edge = edge # discard pixels processed + edge = new_edge + + +def _compute_regular_polygon_vertices( + bounding_circle: Sequence[Sequence[float] | float], n_sides: int, rotation: float +) -> list[tuple[float, float]]: + """ + Generate a list of vertices for a 2D regular polygon. + + :param bounding_circle: The bounding circle is a sequence defined + by a point and radius. The polygon is inscribed in this circle. + (e.g. ``bounding_circle=(x, y, r)`` or ``((x, y), r)``) + :param n_sides: Number of sides + (e.g. ``n_sides=3`` for a triangle, ``6`` for a hexagon) + :param rotation: Apply an arbitrary rotation to the polygon + (e.g. ``rotation=90``, applies a 90 degree rotation) + :return: List of regular polygon vertices + (e.g. ``[(25, 50), (50, 50), (50, 25), (25, 25)]``) + + How are the vertices computed? + 1. Compute the following variables + - theta: Angle between the apothem & the nearest polygon vertex + - side_length: Length of each polygon edge + - centroid: Center of bounding circle (1st, 2nd elements of bounding_circle) + - polygon_radius: Polygon radius (last element of bounding_circle) + - angles: Location of each polygon vertex in polar grid + (e.g. A square with 0 degree rotation => [225.0, 315.0, 45.0, 135.0]) + + 2. For each angle in angles, get the polygon vertex at that angle + The vertex is computed using the equation below. + X= xcos(φ) + ysin(φ) + Y= −xsin(φ) + ycos(φ) + + Note: + φ = angle in degrees + x = 0 + y = polygon_radius + + The formula above assumes rotation around the origin. + In our case, we are rotating around the centroid. + To account for this, we use the formula below + X = xcos(φ) + ysin(φ) + centroid_x + Y = −xsin(φ) + ycos(φ) + centroid_y + """ + # 1. Error Handling + # 1.1 Check `n_sides` has an appropriate value + if not isinstance(n_sides, int): + msg = "n_sides should be an int" # type: ignore[unreachable] + raise TypeError(msg) + if n_sides < 3: + msg = "n_sides should be an int > 2" + raise ValueError(msg) + + # 1.2 Check `bounding_circle` has an appropriate value + if not isinstance(bounding_circle, (list, tuple)): + msg = "bounding_circle should be a sequence" + raise TypeError(msg) + + if len(bounding_circle) == 3: + if not all(isinstance(i, (int, float)) for i in bounding_circle): + msg = "bounding_circle should only contain numeric data" + raise ValueError(msg) + + *centroid, polygon_radius = cast(list[float], list(bounding_circle)) + elif len(bounding_circle) == 2 and isinstance(bounding_circle[0], (list, tuple)): + if not all( + isinstance(i, (int, float)) for i in bounding_circle[0] + ) or not isinstance(bounding_circle[1], (int, float)): + msg = "bounding_circle should only contain numeric data" + raise ValueError(msg) + + if len(bounding_circle[0]) != 2: + msg = "bounding_circle centre should contain 2D coordinates (e.g. (x, y))" + raise ValueError(msg) + + centroid = cast(list[float], list(bounding_circle[0])) + polygon_radius = cast(float, bounding_circle[1]) + else: + msg = ( + "bounding_circle should contain 2D coordinates " + "and a radius (e.g. (x, y, r) or ((x, y), r) )" + ) + raise ValueError(msg) + + if polygon_radius <= 0: + msg = "bounding_circle radius should be > 0" + raise ValueError(msg) + + # 1.3 Check `rotation` has an appropriate value + if not isinstance(rotation, (int, float)): + msg = "rotation should be an int or float" # type: ignore[unreachable] + raise ValueError(msg) + + # 2. Define Helper Functions + def _apply_rotation(point: list[float], degrees: float) -> tuple[float, float]: + return ( + round( + point[0] * math.cos(math.radians(360 - degrees)) + - point[1] * math.sin(math.radians(360 - degrees)) + + centroid[0], + 2, + ), + round( + point[1] * math.cos(math.radians(360 - degrees)) + + point[0] * math.sin(math.radians(360 - degrees)) + + centroid[1], + 2, + ), + ) + + def _compute_polygon_vertex(angle: float) -> tuple[float, float]: + start_point = [polygon_radius, 0] + return _apply_rotation(start_point, angle) + + def _get_angles(n_sides: int, rotation: float) -> list[float]: + angles = [] + degrees = 360 / n_sides + # Start with the bottom left polygon vertex + current_angle = (270 - 0.5 * degrees) + rotation + for _ in range(n_sides): + angles.append(current_angle) + current_angle += degrees + if current_angle > 360: + current_angle -= 360 + return angles + + # 3. Variable Declarations + angles = _get_angles(n_sides, rotation) + + # 4. Compute Vertices + return [_compute_polygon_vertex(angle) for angle in angles] + + +def _color_diff( + color1: float | tuple[int, ...], color2: float | tuple[int, ...] +) -> float: + """ + Uses 1-norm distance to calculate difference between two values. + """ + first = color1 if isinstance(color1, tuple) else (color1,) + second = color2 if isinstance(color2, tuple) else (color2,) + + return sum(abs(first[i] - second[i]) for i in range(len(second))) diff --git a/py311/lib/python3.11/site-packages/PIL/ImageDraw2.py b/py311/lib/python3.11/site-packages/PIL/ImageDraw2.py new file mode 100644 index 0000000000000000000000000000000000000000..3d68658ed5b79a36597e4953b888c41aa82fc7da --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageDraw2.py @@ -0,0 +1,243 @@ +# +# The Python Imaging Library +# $Id$ +# +# WCK-style drawing interface operations +# +# History: +# 2003-12-07 fl created +# 2005-05-15 fl updated; added to PIL as ImageDraw2 +# 2005-05-15 fl added text support +# 2005-05-20 fl added arc/chord/pieslice support +# +# Copyright (c) 2003-2005 by Secret Labs AB +# Copyright (c) 2003-2005 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + + +""" +(Experimental) WCK-style drawing interface operations + +.. seealso:: :py:mod:`PIL.ImageDraw` +""" +from __future__ import annotations + +from typing import Any, AnyStr, BinaryIO + +from . import Image, ImageColor, ImageDraw, ImageFont, ImagePath +from ._typing import Coords, StrOrBytesPath + + +class Pen: + """Stores an outline color and width.""" + + def __init__(self, color: str, width: int = 1, opacity: int = 255) -> None: + self.color = ImageColor.getrgb(color) + self.width = width + + +class Brush: + """Stores a fill color""" + + def __init__(self, color: str, opacity: int = 255) -> None: + self.color = ImageColor.getrgb(color) + + +class Font: + """Stores a TrueType font and color""" + + def __init__( + self, color: str, file: StrOrBytesPath | BinaryIO, size: float = 12 + ) -> None: + # FIXME: add support for bitmap fonts + self.color = ImageColor.getrgb(color) + self.font = ImageFont.truetype(file, size) + + +class Draw: + """ + (Experimental) WCK-style drawing interface + """ + + def __init__( + self, + image: Image.Image | str, + size: tuple[int, int] | list[int] | None = None, + color: float | tuple[float, ...] | str | None = None, + ) -> None: + if isinstance(image, str): + if size is None: + msg = "If image argument is mode string, size must be a list or tuple" + raise ValueError(msg) + image = Image.new(image, size, color) + self.draw = ImageDraw.Draw(image) + self.image = image + self.transform: tuple[float, float, float, float, float, float] | None = None + + def flush(self) -> Image.Image: + return self.image + + def render( + self, + op: str, + xy: Coords, + pen: Pen | Brush | None, + brush: Brush | Pen | None = None, + **kwargs: Any, + ) -> None: + # handle color arguments + outline = fill = None + width = 1 + if isinstance(pen, Pen): + outline = pen.color + width = pen.width + elif isinstance(brush, Pen): + outline = brush.color + width = brush.width + if isinstance(brush, Brush): + fill = brush.color + elif isinstance(pen, Brush): + fill = pen.color + # handle transformation + if self.transform: + path = ImagePath.Path(xy) + path.transform(self.transform) + xy = path + # render the item + if op in ("arc", "line"): + kwargs.setdefault("fill", outline) + else: + kwargs.setdefault("fill", fill) + kwargs.setdefault("outline", outline) + if op == "line": + kwargs.setdefault("width", width) + getattr(self.draw, op)(xy, **kwargs) + + def settransform(self, offset: tuple[float, float]) -> None: + """Sets a transformation offset.""" + (xoffset, yoffset) = offset + self.transform = (1, 0, xoffset, 0, 1, yoffset) + + def arc( + self, + xy: Coords, + pen: Pen | Brush | None, + start: float, + end: float, + *options: Any, + ) -> None: + """ + Draws an arc (a portion of a circle outline) between the start and end + angles, inside the given bounding box. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.arc` + """ + self.render("arc", xy, pen, *options, start=start, end=end) + + def chord( + self, + xy: Coords, + pen: Pen | Brush | None, + start: float, + end: float, + *options: Any, + ) -> None: + """ + Same as :py:meth:`~PIL.ImageDraw2.Draw.arc`, but connects the end points + with a straight line. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.chord` + """ + self.render("chord", xy, pen, *options, start=start, end=end) + + def ellipse(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None: + """ + Draws an ellipse inside the given bounding box. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.ellipse` + """ + self.render("ellipse", xy, pen, *options) + + def line(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None: + """ + Draws a line between the coordinates in the ``xy`` list. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.line` + """ + self.render("line", xy, pen, *options) + + def pieslice( + self, + xy: Coords, + pen: Pen | Brush | None, + start: float, + end: float, + *options: Any, + ) -> None: + """ + Same as arc, but also draws straight lines between the end points and the + center of the bounding box. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.pieslice` + """ + self.render("pieslice", xy, pen, *options, start=start, end=end) + + def polygon(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None: + """ + Draws a polygon. + + The polygon outline consists of straight lines between the given + coordinates, plus a straight line between the last and the first + coordinate. + + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.polygon` + """ + self.render("polygon", xy, pen, *options) + + def rectangle(self, xy: Coords, pen: Pen | Brush | None, *options: Any) -> None: + """ + Draws a rectangle. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.rectangle` + """ + self.render("rectangle", xy, pen, *options) + + def text(self, xy: tuple[float, float], text: AnyStr, font: Font) -> None: + """ + Draws the string at the given position. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.text` + """ + if self.transform: + path = ImagePath.Path(xy) + path.transform(self.transform) + xy = path + self.draw.text(xy, text, font=font.font, fill=font.color) + + def textbbox( + self, xy: tuple[float, float], text: AnyStr, font: Font + ) -> tuple[float, float, float, float]: + """ + Returns bounding box (in pixels) of given text. + + :return: ``(left, top, right, bottom)`` bounding box + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textbbox` + """ + if self.transform: + path = ImagePath.Path(xy) + path.transform(self.transform) + xy = path + return self.draw.textbbox(xy, text, font=font.font) + + def textlength(self, text: AnyStr, font: Font) -> float: + """ + Returns length (in pixels) of given text. + This is the amount by which following text should be offset. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textlength` + """ + return self.draw.textlength(text, font=font.font) diff --git a/py311/lib/python3.11/site-packages/PIL/ImageEnhance.py b/py311/lib/python3.11/site-packages/PIL/ImageEnhance.py new file mode 100644 index 0000000000000000000000000000000000000000..0e7e6dd8ae631ad3577bda1d3e823bd2a3227536 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageEnhance.py @@ -0,0 +1,113 @@ +# +# The Python Imaging Library. +# $Id$ +# +# image enhancement classes +# +# For a background, see "Image Processing By Interpolation and +# Extrapolation", Paul Haeberli and Douglas Voorhies. Available +# at http://www.graficaobscura.com/interp/index.html +# +# History: +# 1996-03-23 fl Created +# 2009-06-16 fl Fixed mean calculation +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from . import Image, ImageFilter, ImageStat + + +class _Enhance: + image: Image.Image + degenerate: Image.Image + + def enhance(self, factor: float) -> Image.Image: + """ + Returns an enhanced image. + + :param factor: A floating point value controlling the enhancement. + Factor 1.0 always returns a copy of the original image, + lower factors mean less color (brightness, contrast, + etc), and higher values more. There are no restrictions + on this value. + :rtype: :py:class:`~PIL.Image.Image` + """ + return Image.blend(self.degenerate, self.image, factor) + + +class Color(_Enhance): + """Adjust image color balance. + + This class can be used to adjust the colour balance of an image, in + a manner similar to the controls on a colour TV set. An enhancement + factor of 0.0 gives a black and white image. A factor of 1.0 gives + the original image. + """ + + def __init__(self, image: Image.Image) -> None: + self.image = image + self.intermediate_mode = "L" + if "A" in image.getbands(): + self.intermediate_mode = "LA" + + if self.intermediate_mode != image.mode: + image = image.convert(self.intermediate_mode).convert(image.mode) + self.degenerate = image + + +class Contrast(_Enhance): + """Adjust image contrast. + + This class can be used to control the contrast of an image, similar + to the contrast control on a TV set. An enhancement factor of 0.0 + gives a solid gray image. A factor of 1.0 gives the original image. + """ + + def __init__(self, image: Image.Image) -> None: + self.image = image + if image.mode != "L": + image = image.convert("L") + mean = int(ImageStat.Stat(image).mean[0] + 0.5) + self.degenerate = Image.new("L", image.size, mean) + if self.degenerate.mode != self.image.mode: + self.degenerate = self.degenerate.convert(self.image.mode) + + if "A" in self.image.getbands(): + self.degenerate.putalpha(self.image.getchannel("A")) + + +class Brightness(_Enhance): + """Adjust image brightness. + + This class can be used to control the brightness of an image. An + enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the + original image. + """ + + def __init__(self, image: Image.Image) -> None: + self.image = image + self.degenerate = Image.new(image.mode, image.size, 0) + + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) + + +class Sharpness(_Enhance): + """Adjust image sharpness. + + This class can be used to adjust the sharpness of an image. An + enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the + original image, and a factor of 2.0 gives a sharpened image. + """ + + def __init__(self, image: Image.Image) -> None: + self.image = image + self.degenerate = image.filter(ImageFilter.SMOOTH) + + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) diff --git a/py311/lib/python3.11/site-packages/PIL/ImageFile.py b/py311/lib/python3.11/site-packages/PIL/ImageFile.py new file mode 100644 index 0000000000000000000000000000000000000000..bf556a2c69036902d4b842cd8f9fa33ee04fa633 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageFile.py @@ -0,0 +1,922 @@ +# +# The Python Imaging Library. +# $Id$ +# +# base class for image file handlers +# +# history: +# 1995-09-09 fl Created +# 1996-03-11 fl Fixed load mechanism. +# 1996-04-15 fl Added pcx/xbm decoders. +# 1996-04-30 fl Added encoders. +# 1996-12-14 fl Added load helpers +# 1997-01-11 fl Use encode_to_file where possible +# 1997-08-27 fl Flush output in _save +# 1998-03-05 fl Use memory mapping for some modes +# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B" +# 1999-05-31 fl Added image parser +# 2000-10-12 fl Set readonly flag on memory-mapped images +# 2002-03-20 fl Use better messages for common decoder errors +# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available +# 2003-10-30 fl Added StubImageFile class +# 2004-02-25 fl Made incremental parser more robust +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1995-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import abc +import io +import itertools +import logging +import os +import struct +from typing import IO, Any, NamedTuple, cast + +from . import ExifTags, Image +from ._deprecate import deprecate +from ._util import DeferredError, is_path + +TYPE_CHECKING = False +if TYPE_CHECKING: + from ._typing import StrOrBytesPath + +logger = logging.getLogger(__name__) + +MAXBLOCK = 65536 + +SAFEBLOCK = 1024 * 1024 + +LOAD_TRUNCATED_IMAGES = False +"""Whether or not to load truncated image files. User code may change this.""" + +ERRORS = { + -1: "image buffer overrun error", + -2: "decoding error", + -3: "unknown error", + -8: "bad configuration", + -9: "out of memory error", +} +""" +Dict of known error codes returned from :meth:`.PyDecoder.decode`, +:meth:`.PyEncoder.encode` :meth:`.PyEncoder.encode_to_pyfd` and +:meth:`.PyEncoder.encode_to_file`. +""" + + +# +# -------------------------------------------------------------------- +# Helpers + + +def _get_oserror(error: int, *, encoder: bool) -> OSError: + try: + msg = Image.core.getcodecstatus(error) + except AttributeError: + msg = ERRORS.get(error) + if not msg: + msg = f"{'encoder' if encoder else 'decoder'} error {error}" + msg += f" when {'writing' if encoder else 'reading'} image file" + return OSError(msg) + + +def raise_oserror(error: int) -> OSError: + deprecate( + "raise_oserror", + 12, + action="It is only useful for translating error codes returned by a codec's " + "decode() method, which ImageFile already does automatically.", + ) + raise _get_oserror(error, encoder=False) + + +def _tilesort(t: _Tile) -> int: + # sort on offset + return t[2] + + +class _Tile(NamedTuple): + codec_name: str + extents: tuple[int, int, int, int] | None + offset: int = 0 + args: tuple[Any, ...] | str | None = None + + +# +# -------------------------------------------------------------------- +# ImageFile base class + + +class ImageFile(Image.Image): + """Base class for image file format handlers.""" + + def __init__( + self, fp: StrOrBytesPath | IO[bytes], filename: str | bytes | None = None + ) -> None: + super().__init__() + + self._min_frame = 0 + + self.custom_mimetype: str | None = None + + self.tile: list[_Tile] = [] + """ A list of tile descriptors """ + + self.readonly = 1 # until we know better + + self.decoderconfig: tuple[Any, ...] = () + self.decodermaxblock = MAXBLOCK + + if is_path(fp): + # filename + self.fp = open(fp, "rb") + self.filename = os.fspath(fp) + self._exclusive_fp = True + else: + # stream + self.fp = cast(IO[bytes], fp) + self.filename = filename if filename is not None else "" + # can be overridden + self._exclusive_fp = False + + try: + try: + self._open() + except ( + IndexError, # end of data + TypeError, # end of data (ord) + KeyError, # unsupported mode + EOFError, # got header but not the first frame + struct.error, + ) as v: + raise SyntaxError(v) from v + + if not self.mode or self.size[0] <= 0 or self.size[1] <= 0: + msg = "not identified by this driver" + raise SyntaxError(msg) + except BaseException: + # close the file only if we have opened it this constructor + if self._exclusive_fp: + self.fp.close() + raise + + def _open(self) -> None: + pass + + def _close_fp(self): + if getattr(self, "_fp", False) and not isinstance(self._fp, DeferredError): + if self._fp != self.fp: + self._fp.close() + self._fp = DeferredError(ValueError("Operation on closed image")) + if self.fp: + self.fp.close() + + def close(self) -> None: + """ + Closes the file pointer, if possible. + + This operation will destroy the image core and release its memory. + The image data will be unusable afterward. + + This function is required to close images that have multiple frames or + have not had their file read and closed by the + :py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for + more information. + """ + try: + self._close_fp() + self.fp = None + except Exception as msg: + logger.debug("Error closing: %s", msg) + + super().close() + + def get_child_images(self) -> list[ImageFile]: + child_images = [] + exif = self.getexif() + ifds = [] + if ExifTags.Base.SubIFDs in exif: + subifd_offsets = exif[ExifTags.Base.SubIFDs] + if subifd_offsets: + if not isinstance(subifd_offsets, tuple): + subifd_offsets = (subifd_offsets,) + for subifd_offset in subifd_offsets: + ifds.append((exif._get_ifd_dict(subifd_offset), subifd_offset)) + ifd1 = exif.get_ifd(ExifTags.IFD.IFD1) + if ifd1 and ifd1.get(ExifTags.Base.JpegIFOffset): + assert exif._info is not None + ifds.append((ifd1, exif._info.next)) + + offset = None + for ifd, ifd_offset in ifds: + assert self.fp is not None + current_offset = self.fp.tell() + if offset is None: + offset = current_offset + + fp = self.fp + if ifd is not None: + thumbnail_offset = ifd.get(ExifTags.Base.JpegIFOffset) + if thumbnail_offset is not None: + thumbnail_offset += getattr(self, "_exif_offset", 0) + self.fp.seek(thumbnail_offset) + + length = ifd.get(ExifTags.Base.JpegIFByteCount) + assert isinstance(length, int) + data = self.fp.read(length) + fp = io.BytesIO(data) + + with Image.open(fp) as im: + from . import TiffImagePlugin + + if thumbnail_offset is None and isinstance( + im, TiffImagePlugin.TiffImageFile + ): + im._frame_pos = [ifd_offset] + im._seek(0) + im.load() + child_images.append(im) + + if offset is not None: + assert self.fp is not None + self.fp.seek(offset) + return child_images + + def get_format_mimetype(self) -> str | None: + if self.custom_mimetype: + return self.custom_mimetype + if self.format is not None: + return Image.MIME.get(self.format.upper()) + return None + + def __getstate__(self) -> list[Any]: + return super().__getstate__() + [self.filename] + + def __setstate__(self, state: list[Any]) -> None: + self.tile = [] + if len(state) > 5: + self.filename = state[5] + super().__setstate__(state) + + def verify(self) -> None: + """Check file integrity""" + + # raise exception if something's wrong. must be called + # directly after open, and closes file when finished. + if self._exclusive_fp: + self.fp.close() + self.fp = None + + def load(self) -> Image.core.PixelAccess | None: + """Load image data based on tile list""" + + if not self.tile and self._im is None: + msg = "cannot load this image" + raise OSError(msg) + + pixel = Image.Image.load(self) + if not self.tile: + return pixel + + self.map: mmap.mmap | None = None + use_mmap = self.filename and len(self.tile) == 1 + + readonly = 0 + + # look for read/seek overrides + if hasattr(self, "load_read"): + read = self.load_read + # don't use mmap if there are custom read/seek functions + use_mmap = False + else: + read = self.fp.read + + if hasattr(self, "load_seek"): + seek = self.load_seek + use_mmap = False + else: + seek = self.fp.seek + + if use_mmap: + # try memory mapping + decoder_name, extents, offset, args = self.tile[0] + if isinstance(args, str): + args = (args, 0, 1) + if ( + decoder_name == "raw" + and isinstance(args, tuple) + and len(args) >= 3 + and args[0] == self.mode + and args[0] in Image._MAPMODES + ): + try: + # use mmap, if possible + import mmap + + with open(self.filename) as fp: + self.map = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ) + if offset + self.size[1] * args[1] > self.map.size(): + msg = "buffer is not large enough" + raise OSError(msg) + self.im = Image.core.map_buffer( + self.map, self.size, decoder_name, offset, args + ) + readonly = 1 + # After trashing self.im, + # we might need to reload the palette data. + if self.palette: + self.palette.dirty = 1 + except (AttributeError, OSError, ImportError): + self.map = None + + self.load_prepare() + err_code = -3 # initialize to unknown error + if not self.map: + # sort tiles in file order + self.tile.sort(key=_tilesort) + + # FIXME: This is a hack to handle TIFF's JpegTables tag. + prefix = getattr(self, "tile_prefix", b"") + + # Remove consecutive duplicates that only differ by their offset + self.tile = [ + list(tiles)[-1] + for _, tiles in itertools.groupby( + self.tile, lambda tile: (tile[0], tile[1], tile[3]) + ) + ] + for i, (decoder_name, extents, offset, args) in enumerate(self.tile): + seek(offset) + decoder = Image._getdecoder( + self.mode, decoder_name, args, self.decoderconfig + ) + try: + decoder.setimage(self.im, extents) + if decoder.pulls_fd: + decoder.setfd(self.fp) + err_code = decoder.decode(b"")[1] + else: + b = prefix + while True: + read_bytes = self.decodermaxblock + if i + 1 < len(self.tile): + next_offset = self.tile[i + 1].offset + if next_offset > offset: + read_bytes = next_offset - offset + try: + s = read(read_bytes) + except (IndexError, struct.error) as e: + # truncated png/gif + if LOAD_TRUNCATED_IMAGES: + break + else: + msg = "image file is truncated" + raise OSError(msg) from e + + if not s: # truncated jpeg + if LOAD_TRUNCATED_IMAGES: + break + else: + msg = ( + "image file is truncated " + f"({len(b)} bytes not processed)" + ) + raise OSError(msg) + + b = b + s + n, err_code = decoder.decode(b) + if n < 0: + break + b = b[n:] + finally: + # Need to cleanup here to prevent leaks + decoder.cleanup() + + self.tile = [] + self.readonly = readonly + + self.load_end() + + if self._exclusive_fp and self._close_exclusive_fp_after_loading: + self.fp.close() + self.fp = None + + if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0: + # still raised if decoder fails to return anything + raise _get_oserror(err_code, encoder=False) + + return Image.Image.load(self) + + def load_prepare(self) -> None: + # create image memory if necessary + if self._im is None: + self.im = Image.core.new(self.mode, self.size) + # create palette (optional) + if self.mode == "P": + Image.Image.load(self) + + def load_end(self) -> None: + # may be overridden + pass + + # may be defined for contained formats + # def load_seek(self, pos: int) -> None: + # pass + + # may be defined for blocked formats (e.g. PNG) + # def load_read(self, read_bytes: int) -> bytes: + # pass + + def _seek_check(self, frame: int) -> bool: + if ( + frame < self._min_frame + # Only check upper limit on frames if additional seek operations + # are not required to do so + or ( + not (hasattr(self, "_n_frames") and self._n_frames is None) + and frame >= getattr(self, "n_frames") + self._min_frame + ) + ): + msg = "attempt to seek outside sequence" + raise EOFError(msg) + + return self.tell() != frame + + +class StubHandler(abc.ABC): + def open(self, im: StubImageFile) -> None: + pass + + @abc.abstractmethod + def load(self, im: StubImageFile) -> Image.Image: + pass + + +class StubImageFile(ImageFile, metaclass=abc.ABCMeta): + """ + Base class for stub image loaders. + + A stub loader is an image loader that can identify files of a + certain format, but relies on external code to load the file. + """ + + @abc.abstractmethod + def _open(self) -> None: + pass + + def load(self) -> Image.core.PixelAccess | None: + loader = self._load() + if loader is None: + msg = f"cannot find loader for this {self.format} file" + raise OSError(msg) + image = loader.load(self) + assert image is not None + # become the other object (!) + self.__class__ = image.__class__ # type: ignore[assignment] + self.__dict__ = image.__dict__ + return image.load() + + @abc.abstractmethod + def _load(self) -> StubHandler | None: + """(Hook) Find actual image loader.""" + pass + + +class Parser: + """ + Incremental image parser. This class implements the standard + feed/close consumer interface. + """ + + incremental = None + image: Image.Image | None = None + data: bytes | None = None + decoder: Image.core.ImagingDecoder | PyDecoder | None = None + offset = 0 + finished = 0 + + def reset(self) -> None: + """ + (Consumer) Reset the parser. Note that you can only call this + method immediately after you've created a parser; parser + instances cannot be reused. + """ + assert self.data is None, "cannot reuse parsers" + + def feed(self, data: bytes) -> None: + """ + (Consumer) Feed data to the parser. + + :param data: A string buffer. + :exception OSError: If the parser failed to parse the image file. + """ + # collect data + + if self.finished: + return + + if self.data is None: + self.data = data + else: + self.data = self.data + data + + # parse what we have + if self.decoder: + if self.offset > 0: + # skip header + skip = min(len(self.data), self.offset) + self.data = self.data[skip:] + self.offset = self.offset - skip + if self.offset > 0 or not self.data: + return + + n, e = self.decoder.decode(self.data) + + if n < 0: + # end of stream + self.data = None + self.finished = 1 + if e < 0: + # decoding error + self.image = None + raise _get_oserror(e, encoder=False) + else: + # end of image + return + self.data = self.data[n:] + + elif self.image: + # if we end up here with no decoder, this file cannot + # be incrementally parsed. wait until we've gotten all + # available data + pass + + else: + # attempt to open this file + try: + with io.BytesIO(self.data) as fp: + im = Image.open(fp) + except OSError: + pass # not enough data + else: + flag = hasattr(im, "load_seek") or hasattr(im, "load_read") + if flag or len(im.tile) != 1: + # custom load code, or multiple tiles + self.decode = None + else: + # initialize decoder + im.load_prepare() + d, e, o, a = im.tile[0] + im.tile = [] + self.decoder = Image._getdecoder(im.mode, d, a, im.decoderconfig) + self.decoder.setimage(im.im, e) + + # calculate decoder offset + self.offset = o + if self.offset <= len(self.data): + self.data = self.data[self.offset :] + self.offset = 0 + + self.image = im + + def __enter__(self) -> Parser: + return self + + def __exit__(self, *args: object) -> None: + self.close() + + def close(self) -> Image.Image: + """ + (Consumer) Close the stream. + + :returns: An image object. + :exception OSError: If the parser failed to parse the image file either + because it cannot be identified or cannot be + decoded. + """ + # finish decoding + if self.decoder: + # get rid of what's left in the buffers + self.feed(b"") + self.data = self.decoder = None + if not self.finished: + msg = "image was incomplete" + raise OSError(msg) + if not self.image: + msg = "cannot parse this image" + raise OSError(msg) + if self.data: + # incremental parsing not possible; reopen the file + # not that we have all data + with io.BytesIO(self.data) as fp: + try: + self.image = Image.open(fp) + finally: + self.image.load() + return self.image + + +# -------------------------------------------------------------------- + + +def _save(im: Image.Image, fp: IO[bytes], tile: list[_Tile], bufsize: int = 0) -> None: + """Helper to save image based on tile list + + :param im: Image object. + :param fp: File object. + :param tile: Tile list. + :param bufsize: Optional buffer size + """ + + im.load() + if not hasattr(im, "encoderconfig"): + im.encoderconfig = () + tile.sort(key=_tilesort) + # FIXME: make MAXBLOCK a configuration parameter + # It would be great if we could have the encoder specify what it needs + # But, it would need at least the image size in most cases. RawEncode is + # a tricky case. + bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c + try: + fh = fp.fileno() + fp.flush() + _encode_tile(im, fp, tile, bufsize, fh) + except (AttributeError, io.UnsupportedOperation) as exc: + _encode_tile(im, fp, tile, bufsize, None, exc) + if hasattr(fp, "flush"): + fp.flush() + + +def _encode_tile( + im: Image.Image, + fp: IO[bytes], + tile: list[_Tile], + bufsize: int, + fh: int | None, + exc: BaseException | None = None, +) -> None: + for encoder_name, extents, offset, args in tile: + if offset > 0: + fp.seek(offset) + encoder = Image._getencoder(im.mode, encoder_name, args, im.encoderconfig) + try: + encoder.setimage(im.im, extents) + if encoder.pushes_fd: + encoder.setfd(fp) + errcode = encoder.encode_to_pyfd()[1] + else: + if exc: + # compress to Python file-compatible object + while True: + errcode, data = encoder.encode(bufsize)[1:] + fp.write(data) + if errcode: + break + else: + # slight speedup: compress to real file object + assert fh is not None + errcode = encoder.encode_to_file(fh, bufsize) + if errcode < 0: + raise _get_oserror(errcode, encoder=True) from exc + finally: + encoder.cleanup() + + +def _safe_read(fp: IO[bytes], size: int) -> bytes: + """ + Reads large blocks in a safe way. Unlike fp.read(n), this function + doesn't trust the user. If the requested size is larger than + SAFEBLOCK, the file is read block by block. + + :param fp: File handle. Must implement a read method. + :param size: Number of bytes to read. + :returns: A string containing size bytes of data. + + Raises an OSError if the file is truncated and the read cannot be completed + + """ + if size <= 0: + return b"" + if size <= SAFEBLOCK: + data = fp.read(size) + if len(data) < size: + msg = "Truncated File Read" + raise OSError(msg) + return data + blocks: list[bytes] = [] + remaining_size = size + while remaining_size > 0: + block = fp.read(min(remaining_size, SAFEBLOCK)) + if not block: + break + blocks.append(block) + remaining_size -= len(block) + if sum(len(block) for block in blocks) < size: + msg = "Truncated File Read" + raise OSError(msg) + return b"".join(blocks) + + +class PyCodecState: + def __init__(self) -> None: + self.xsize = 0 + self.ysize = 0 + self.xoff = 0 + self.yoff = 0 + + def extents(self) -> tuple[int, int, int, int]: + return self.xoff, self.yoff, self.xoff + self.xsize, self.yoff + self.ysize + + +class PyCodec: + fd: IO[bytes] | None + + def __init__(self, mode: str, *args: Any) -> None: + self.im: Image.core.ImagingCore | None = None + self.state = PyCodecState() + self.fd = None + self.mode = mode + self.init(args) + + def init(self, args: tuple[Any, ...]) -> None: + """ + Override to perform codec specific initialization + + :param args: Tuple of arg items from the tile entry + :returns: None + """ + self.args = args + + def cleanup(self) -> None: + """ + Override to perform codec specific cleanup + + :returns: None + """ + pass + + def setfd(self, fd: IO[bytes]) -> None: + """ + Called from ImageFile to set the Python file-like object + + :param fd: A Python file-like object + :returns: None + """ + self.fd = fd + + def setimage( + self, + im: Image.core.ImagingCore, + extents: tuple[int, int, int, int] | None = None, + ) -> None: + """ + Called from ImageFile to set the core output image for the codec + + :param im: A core image object + :param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle + for this tile + :returns: None + """ + + # following c code + self.im = im + + if extents: + (x0, y0, x1, y1) = extents + else: + (x0, y0, x1, y1) = (0, 0, 0, 0) + + if x0 == 0 and x1 == 0: + self.state.xsize, self.state.ysize = self.im.size + else: + self.state.xoff = x0 + self.state.yoff = y0 + self.state.xsize = x1 - x0 + self.state.ysize = y1 - y0 + + if self.state.xsize <= 0 or self.state.ysize <= 0: + msg = "Size cannot be negative" + raise ValueError(msg) + + if ( + self.state.xsize + self.state.xoff > self.im.size[0] + or self.state.ysize + self.state.yoff > self.im.size[1] + ): + msg = "Tile cannot extend outside image" + raise ValueError(msg) + + +class PyDecoder(PyCodec): + """ + Python implementation of a format decoder. Override this class and + add the decoding logic in the :meth:`decode` method. + + See :ref:`Writing Your Own File Codec in Python` + """ + + _pulls_fd = False + + @property + def pulls_fd(self) -> bool: + return self._pulls_fd + + def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: + """ + Override to perform the decoding process. + + :param buffer: A bytes object with the data to be decoded. + :returns: A tuple of ``(bytes consumed, errcode)``. + If finished with decoding return -1 for the bytes consumed. + Err codes are from :data:`.ImageFile.ERRORS`. + """ + msg = "unavailable in base decoder" + raise NotImplementedError(msg) + + def set_as_raw( + self, data: bytes, rawmode: str | None = None, extra: tuple[Any, ...] = () + ) -> None: + """ + Convenience method to set the internal image from a stream of raw data + + :param data: Bytes to be set + :param rawmode: The rawmode to be used for the decoder. + If not specified, it will default to the mode of the image + :param extra: Extra arguments for the decoder. + :returns: None + """ + + if not rawmode: + rawmode = self.mode + d = Image._getdecoder(self.mode, "raw", rawmode, extra) + assert self.im is not None + d.setimage(self.im, self.state.extents()) + s = d.decode(data) + + if s[0] >= 0: + msg = "not enough image data" + raise ValueError(msg) + if s[1] != 0: + msg = "cannot decode image data" + raise ValueError(msg) + + +class PyEncoder(PyCodec): + """ + Python implementation of a format encoder. Override this class and + add the decoding logic in the :meth:`encode` method. + + See :ref:`Writing Your Own File Codec in Python` + """ + + _pushes_fd = False + + @property + def pushes_fd(self) -> bool: + return self._pushes_fd + + def encode(self, bufsize: int) -> tuple[int, int, bytes]: + """ + Override to perform the encoding process. + + :param bufsize: Buffer size. + :returns: A tuple of ``(bytes encoded, errcode, bytes)``. + If finished with encoding return 1 for the error code. + Err codes are from :data:`.ImageFile.ERRORS`. + """ + msg = "unavailable in base encoder" + raise NotImplementedError(msg) + + def encode_to_pyfd(self) -> tuple[int, int]: + """ + If ``pushes_fd`` is ``True``, then this method will be used, + and ``encode()`` will only be called once. + + :returns: A tuple of ``(bytes consumed, errcode)``. + Err codes are from :data:`.ImageFile.ERRORS`. + """ + if not self.pushes_fd: + return 0, -8 # bad configuration + bytes_consumed, errcode, data = self.encode(0) + if data: + assert self.fd is not None + self.fd.write(data) + return bytes_consumed, errcode + + def encode_to_file(self, fh: int, bufsize: int) -> int: + """ + :param fh: File handle. + :param bufsize: Buffer size. + + :returns: If finished successfully, return 0. + Otherwise, return an error code. Err codes are from + :data:`.ImageFile.ERRORS`. + """ + errcode = 0 + while errcode == 0: + status, errcode, buf = self.encode(bufsize) + if status > 0: + os.write(fh, buf[status:]) + return errcode diff --git a/py311/lib/python3.11/site-packages/PIL/ImageFilter.py b/py311/lib/python3.11/site-packages/PIL/ImageFilter.py new file mode 100644 index 0000000000000000000000000000000000000000..b9ed54ab20a132aa9d2aec894dd846736923f70e --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageFilter.py @@ -0,0 +1,604 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard filters +# +# History: +# 1995-11-27 fl Created +# 2002-06-08 fl Added rank and mode filters +# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2002 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import abc +import functools +from collections.abc import Sequence +from types import ModuleType +from typing import Any, Callable, cast + +TYPE_CHECKING = False +if TYPE_CHECKING: + from . import _imaging + from ._typing import NumpyArray + + +class Filter(abc.ABC): + @abc.abstractmethod + def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore: + pass + + +class MultibandFilter(Filter): + pass + + +class BuiltinFilter(MultibandFilter): + filterargs: tuple[Any, ...] + + def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore: + if image.mode == "P": + msg = "cannot filter palette images" + raise ValueError(msg) + return image.filter(*self.filterargs) + + +class Kernel(BuiltinFilter): + """ + Create a convolution kernel. This only supports 3x3 and 5x5 integer and floating + point kernels. + + Kernels can only be applied to "L" and "RGB" images. + + :param size: Kernel size, given as (width, height). This must be (3,3) or (5,5). + :param kernel: A sequence containing kernel weights. The kernel will be flipped + vertically before being applied to the image. + :param scale: Scale factor. If given, the result for each pixel is divided by this + value. The default is the sum of the kernel weights. + :param offset: Offset. If given, this value is added to the result, after it has + been divided by the scale factor. + """ + + name = "Kernel" + + def __init__( + self, + size: tuple[int, int], + kernel: Sequence[float], + scale: float | None = None, + offset: float = 0, + ) -> None: + if scale is None: + # default scale is sum of kernel + scale = functools.reduce(lambda a, b: a + b, kernel) + if size[0] * size[1] != len(kernel): + msg = "not enough coefficients in kernel" + raise ValueError(msg) + self.filterargs = size, scale, offset, kernel + + +class RankFilter(Filter): + """ + Create a rank filter. The rank filter sorts all pixels in + a window of the given size, and returns the ``rank``'th value. + + :param size: The kernel size, in pixels. + :param rank: What pixel value to pick. Use 0 for a min filter, + ``size * size / 2`` for a median filter, ``size * size - 1`` + for a max filter, etc. + """ + + name = "Rank" + + def __init__(self, size: int, rank: int) -> None: + self.size = size + self.rank = rank + + def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore: + if image.mode == "P": + msg = "cannot filter palette images" + raise ValueError(msg) + image = image.expand(self.size // 2, self.size // 2) + return image.rankfilter(self.size, self.rank) + + +class MedianFilter(RankFilter): + """ + Create a median filter. Picks the median pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + + name = "Median" + + def __init__(self, size: int = 3) -> None: + self.size = size + self.rank = size * size // 2 + + +class MinFilter(RankFilter): + """ + Create a min filter. Picks the lowest pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + + name = "Min" + + def __init__(self, size: int = 3) -> None: + self.size = size + self.rank = 0 + + +class MaxFilter(RankFilter): + """ + Create a max filter. Picks the largest pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + + name = "Max" + + def __init__(self, size: int = 3) -> None: + self.size = size + self.rank = size * size - 1 + + +class ModeFilter(Filter): + """ + Create a mode filter. Picks the most frequent pixel value in a box with the + given size. Pixel values that occur only once or twice are ignored; if no + pixel value occurs more than twice, the original pixel value is preserved. + + :param size: The kernel size, in pixels. + """ + + name = "Mode" + + def __init__(self, size: int = 3) -> None: + self.size = size + + def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore: + return image.modefilter(self.size) + + +class GaussianBlur(MultibandFilter): + """Blurs the image with a sequence of extended box filters, which + approximates a Gaussian kernel. For details on accuracy see + + + :param radius: Standard deviation of the Gaussian kernel. Either a sequence of two + numbers for x and y, or a single number for both. + """ + + name = "GaussianBlur" + + def __init__(self, radius: float | Sequence[float] = 2) -> None: + self.radius = radius + + def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore: + xy = self.radius + if isinstance(xy, (int, float)): + xy = (xy, xy) + if xy == (0, 0): + return image.copy() + return image.gaussian_blur(xy) + + +class BoxBlur(MultibandFilter): + """Blurs the image by setting each pixel to the average value of the pixels + in a square box extending radius pixels in each direction. + Supports float radius of arbitrary size. Uses an optimized implementation + which runs in linear time relative to the size of the image + for any radius value. + + :param radius: Size of the box in a direction. Either a sequence of two numbers for + x and y, or a single number for both. + + Radius 0 does not blur, returns an identical image. + Radius 1 takes 1 pixel in each direction, i.e. 9 pixels in total. + """ + + name = "BoxBlur" + + def __init__(self, radius: float | Sequence[float]) -> None: + xy = radius if isinstance(radius, (tuple, list)) else (radius, radius) + if xy[0] < 0 or xy[1] < 0: + msg = "radius must be >= 0" + raise ValueError(msg) + self.radius = radius + + def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore: + xy = self.radius + if isinstance(xy, (int, float)): + xy = (xy, xy) + if xy == (0, 0): + return image.copy() + return image.box_blur(xy) + + +class UnsharpMask(MultibandFilter): + """Unsharp mask filter. + + See Wikipedia's entry on `digital unsharp masking`_ for an explanation of + the parameters. + + :param radius: Blur Radius + :param percent: Unsharp strength, in percent + :param threshold: Threshold controls the minimum brightness change that + will be sharpened + + .. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking + + """ + + name = "UnsharpMask" + + def __init__( + self, radius: float = 2, percent: int = 150, threshold: int = 3 + ) -> None: + self.radius = radius + self.percent = percent + self.threshold = threshold + + def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore: + return image.unsharp_mask(self.radius, self.percent, self.threshold) + + +class BLUR(BuiltinFilter): + name = "Blur" + # fmt: off + filterargs = (5, 5), 16, 0, ( + 1, 1, 1, 1, 1, + 1, 0, 0, 0, 1, + 1, 0, 0, 0, 1, + 1, 0, 0, 0, 1, + 1, 1, 1, 1, 1, + ) + # fmt: on + + +class CONTOUR(BuiltinFilter): + name = "Contour" + # fmt: off + filterargs = (3, 3), 1, 255, ( + -1, -1, -1, + -1, 8, -1, + -1, -1, -1, + ) + # fmt: on + + +class DETAIL(BuiltinFilter): + name = "Detail" + # fmt: off + filterargs = (3, 3), 6, 0, ( + 0, -1, 0, + -1, 10, -1, + 0, -1, 0, + ) + # fmt: on + + +class EDGE_ENHANCE(BuiltinFilter): + name = "Edge-enhance" + # fmt: off + filterargs = (3, 3), 2, 0, ( + -1, -1, -1, + -1, 10, -1, + -1, -1, -1, + ) + # fmt: on + + +class EDGE_ENHANCE_MORE(BuiltinFilter): + name = "Edge-enhance More" + # fmt: off + filterargs = (3, 3), 1, 0, ( + -1, -1, -1, + -1, 9, -1, + -1, -1, -1, + ) + # fmt: on + + +class EMBOSS(BuiltinFilter): + name = "Emboss" + # fmt: off + filterargs = (3, 3), 1, 128, ( + -1, 0, 0, + 0, 1, 0, + 0, 0, 0, + ) + # fmt: on + + +class FIND_EDGES(BuiltinFilter): + name = "Find Edges" + # fmt: off + filterargs = (3, 3), 1, 0, ( + -1, -1, -1, + -1, 8, -1, + -1, -1, -1, + ) + # fmt: on + + +class SHARPEN(BuiltinFilter): + name = "Sharpen" + # fmt: off + filterargs = (3, 3), 16, 0, ( + -2, -2, -2, + -2, 32, -2, + -2, -2, -2, + ) + # fmt: on + + +class SMOOTH(BuiltinFilter): + name = "Smooth" + # fmt: off + filterargs = (3, 3), 13, 0, ( + 1, 1, 1, + 1, 5, 1, + 1, 1, 1, + ) + # fmt: on + + +class SMOOTH_MORE(BuiltinFilter): + name = "Smooth More" + # fmt: off + filterargs = (5, 5), 100, 0, ( + 1, 1, 1, 1, 1, + 1, 5, 5, 5, 1, + 1, 5, 44, 5, 1, + 1, 5, 5, 5, 1, + 1, 1, 1, 1, 1, + ) + # fmt: on + + +class Color3DLUT(MultibandFilter): + """Three-dimensional color lookup table. + + Transforms 3-channel pixels using the values of the channels as coordinates + in the 3D lookup table and interpolating the nearest elements. + + This method allows you to apply almost any color transformation + in constant time by using pre-calculated decimated tables. + + .. versionadded:: 5.2.0 + + :param size: Size of the table. One int or tuple of (int, int, int). + Minimal size in any dimension is 2, maximum is 65. + :param table: Flat lookup table. A list of ``channels * size**3`` + float elements or a list of ``size**3`` channels-sized + tuples with floats. Channels are changed first, + then first dimension, then second, then third. + Value 0.0 corresponds lowest value of output, 1.0 highest. + :param channels: Number of channels in the table. Could be 3 or 4. + Default is 3. + :param target_mode: A mode for the result image. Should have not less + than ``channels`` channels. Default is ``None``, + which means that mode wouldn't be changed. + """ + + name = "Color 3D LUT" + + def __init__( + self, + size: int | tuple[int, int, int], + table: Sequence[float] | Sequence[Sequence[int]] | NumpyArray, + channels: int = 3, + target_mode: str | None = None, + **kwargs: bool, + ) -> None: + if channels not in (3, 4): + msg = "Only 3 or 4 output channels are supported" + raise ValueError(msg) + self.size = size = self._check_size(size) + self.channels = channels + self.mode = target_mode + + # Hidden flag `_copy_table=False` could be used to avoid extra copying + # of the table if the table is specially made for the constructor. + copy_table = kwargs.get("_copy_table", True) + items = size[0] * size[1] * size[2] + wrong_size = False + + numpy: ModuleType | None = None + if hasattr(table, "shape"): + try: + import numpy + except ImportError: + pass + + if numpy and isinstance(table, numpy.ndarray): + numpy_table: NumpyArray = table + if copy_table: + numpy_table = numpy_table.copy() + + if numpy_table.shape in [ + (items * channels,), + (items, channels), + (size[2], size[1], size[0], channels), + ]: + table = numpy_table.reshape(items * channels) + else: + wrong_size = True + + else: + if copy_table: + table = list(table) + + # Convert to a flat list + if table and isinstance(table[0], (list, tuple)): + raw_table = cast(Sequence[Sequence[int]], table) + flat_table: list[int] = [] + for pixel in raw_table: + if len(pixel) != channels: + msg = ( + "The elements of the table should " + f"have a length of {channels}." + ) + raise ValueError(msg) + flat_table.extend(pixel) + table = flat_table + + if wrong_size or len(table) != items * channels: + msg = ( + "The table should have either channels * size**3 float items " + "or size**3 items of channels-sized tuples with floats. " + f"Table should be: {channels}x{size[0]}x{size[1]}x{size[2]}. " + f"Actual length: {len(table)}" + ) + raise ValueError(msg) + self.table = table + + @staticmethod + def _check_size(size: Any) -> tuple[int, int, int]: + try: + _, _, _ = size + except ValueError as e: + msg = "Size should be either an integer or a tuple of three integers." + raise ValueError(msg) from e + except TypeError: + size = (size, size, size) + size = tuple(int(x) for x in size) + for size_1d in size: + if not 2 <= size_1d <= 65: + msg = "Size should be in [2, 65] range." + raise ValueError(msg) + return size + + @classmethod + def generate( + cls, + size: int | tuple[int, int, int], + callback: Callable[[float, float, float], tuple[float, ...]], + channels: int = 3, + target_mode: str | None = None, + ) -> Color3DLUT: + """Generates new LUT using provided callback. + + :param size: Size of the table. Passed to the constructor. + :param callback: Function with three parameters which correspond + three color channels. Will be called ``size**3`` + times with values from 0.0 to 1.0 and should return + a tuple with ``channels`` elements. + :param channels: The number of channels which should return callback. + :param target_mode: Passed to the constructor of the resulting + lookup table. + """ + size_1d, size_2d, size_3d = cls._check_size(size) + if channels not in (3, 4): + msg = "Only 3 or 4 output channels are supported" + raise ValueError(msg) + + table: list[float] = [0] * (size_1d * size_2d * size_3d * channels) + idx_out = 0 + for b in range(size_3d): + for g in range(size_2d): + for r in range(size_1d): + table[idx_out : idx_out + channels] = callback( + r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1) + ) + idx_out += channels + + return cls( + (size_1d, size_2d, size_3d), + table, + channels=channels, + target_mode=target_mode, + _copy_table=False, + ) + + def transform( + self, + callback: Callable[..., tuple[float, ...]], + with_normals: bool = False, + channels: int | None = None, + target_mode: str | None = None, + ) -> Color3DLUT: + """Transforms the table values using provided callback and returns + a new LUT with altered values. + + :param callback: A function which takes old lookup table values + and returns a new set of values. The number + of arguments which function should take is + ``self.channels`` or ``3 + self.channels`` + if ``with_normals`` flag is set. + Should return a tuple of ``self.channels`` or + ``channels`` elements if it is set. + :param with_normals: If true, ``callback`` will be called with + coordinates in the color cube as the first + three arguments. Otherwise, ``callback`` + will be called only with actual color values. + :param channels: The number of channels in the resulting lookup table. + :param target_mode: Passed to the constructor of the resulting + lookup table. + """ + if channels not in (None, 3, 4): + msg = "Only 3 or 4 output channels are supported" + raise ValueError(msg) + ch_in = self.channels + ch_out = channels or ch_in + size_1d, size_2d, size_3d = self.size + + table: list[float] = [0] * (size_1d * size_2d * size_3d * ch_out) + idx_in = 0 + idx_out = 0 + for b in range(size_3d): + for g in range(size_2d): + for r in range(size_1d): + values = self.table[idx_in : idx_in + ch_in] + if with_normals: + values = callback( + r / (size_1d - 1), + g / (size_2d - 1), + b / (size_3d - 1), + *values, + ) + else: + values = callback(*values) + table[idx_out : idx_out + ch_out] = values + idx_in += ch_in + idx_out += ch_out + + return type(self)( + self.size, + table, + channels=ch_out, + target_mode=target_mode or self.mode, + _copy_table=False, + ) + + def __repr__(self) -> str: + r = [ + f"{self.__class__.__name__} from {self.table.__class__.__name__}", + "size={:d}x{:d}x{:d}".format(*self.size), + f"channels={self.channels:d}", + ] + if self.mode: + r.append(f"target_mode={self.mode}") + return "<{}>".format(" ".join(r)) + + def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore: + from . import Image + + return image.color_lut_3d( + self.mode or image.mode, + Image.Resampling.BILINEAR, + self.channels, + self.size, + self.table, + ) diff --git a/py311/lib/python3.11/site-packages/PIL/ImageFont.py b/py311/lib/python3.11/site-packages/PIL/ImageFont.py new file mode 100644 index 0000000000000000000000000000000000000000..329c463ff864191849506bd61f7c0559af671f8f --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageFont.py @@ -0,0 +1,1339 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PIL raster font management +# +# History: +# 1996-08-07 fl created (experimental) +# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3 +# 1999-02-06 fl rewrote most font management stuff in C +# 1999-03-17 fl take pth files into account in load_path (from Richard Jones) +# 2001-02-17 fl added freetype support +# 2001-05-09 fl added TransposedFont wrapper class +# 2002-03-04 fl make sure we have a "L" or "1" font +# 2002-12-04 fl skip non-directory entries in the system path +# 2003-04-29 fl add embedded default font +# 2003-09-27 fl added support for truetype charmap encodings +# +# Todo: +# Adapt to PILFONT2 format (16-bit fonts, compressed, single file) +# +# Copyright (c) 1997-2003 by Secret Labs AB +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from __future__ import annotations + +import base64 +import os +import sys +import warnings +from enum import IntEnum +from io import BytesIO +from types import ModuleType +from typing import IO, Any, BinaryIO, TypedDict, cast + +from . import Image, features +from ._typing import StrOrBytesPath +from ._util import DeferredError, is_path + +TYPE_CHECKING = False +if TYPE_CHECKING: + from . import ImageFile + from ._imaging import ImagingFont + from ._imagingft import Font + + +class Axis(TypedDict): + minimum: int | None + default: int | None + maximum: int | None + name: bytes | None + + +class Layout(IntEnum): + BASIC = 0 + RAQM = 1 + + +MAX_STRING_LENGTH = 1_000_000 + + +core: ModuleType | DeferredError +try: + from . import _imagingft as core +except ImportError as ex: + core = DeferredError.new(ex) + + +def _string_length_check(text: str | bytes | bytearray) -> None: + if MAX_STRING_LENGTH is not None and len(text) > MAX_STRING_LENGTH: + msg = "too many characters in string" + raise ValueError(msg) + + +# FIXME: add support for pilfont2 format (see FontFile.py) + +# -------------------------------------------------------------------- +# Font metrics format: +# "PILfont" LF +# fontdescriptor LF +# (optional) key=value... LF +# "DATA" LF +# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox) +# +# To place a character, cut out srcbox and paste at dstbox, +# relative to the character position. Then move the character +# position according to dx, dy. +# -------------------------------------------------------------------- + + +class ImageFont: + """PIL font wrapper""" + + font: ImagingFont + + def _load_pilfont(self, filename: str) -> None: + with open(filename, "rb") as fp: + image: ImageFile.ImageFile | None = None + root = os.path.splitext(filename)[0] + + for ext in (".png", ".gif", ".pbm"): + if image: + image.close() + try: + fullname = root + ext + image = Image.open(fullname) + except Exception: + pass + else: + if image and image.mode in ("1", "L"): + break + else: + if image: + image.close() + + msg = f"cannot find glyph data file {root}.{{gif|pbm|png}}" + raise OSError(msg) + + self.file = fullname + + self._load_pilfont_data(fp, image) + image.close() + + def _load_pilfont_data(self, file: IO[bytes], image: Image.Image) -> None: + # read PILfont header + if file.readline() != b"PILfont\n": + msg = "Not a PILfont file" + raise SyntaxError(msg) + file.readline().split(b";") + self.info = [] # FIXME: should be a dictionary + while True: + s = file.readline() + if not s or s == b"DATA\n": + break + self.info.append(s) + + # read PILfont metrics + data = file.read(256 * 20) + + # check image + if image.mode not in ("1", "L"): + msg = "invalid font image mode" + raise TypeError(msg) + + image.load() + + self.font = Image.core.font(image.im, data) + + def getmask( + self, text: str | bytes, mode: str = "", *args: Any, **kwargs: Any + ) -> Image.core.ImagingCore: + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :return: An internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module. + """ + _string_length_check(text) + Image._decompression_bomb_check(self.font.getsize(text)) + return self.font.getmask(text, mode) + + def getbbox( + self, text: str | bytes | bytearray, *args: Any, **kwargs: Any + ) -> tuple[int, int, int, int]: + """ + Returns bounding box (in pixels) of given text. + + .. versionadded:: 9.2.0 + + :param text: Text to render. + + :return: ``(left, top, right, bottom)`` bounding box + """ + _string_length_check(text) + width, height = self.font.getsize(text) + return 0, 0, width, height + + def getlength( + self, text: str | bytes | bytearray, *args: Any, **kwargs: Any + ) -> int: + """ + Returns length (in pixels) of given text. + This is the amount by which following text should be offset. + + .. versionadded:: 9.2.0 + """ + _string_length_check(text) + width, height = self.font.getsize(text) + return width + + +## +# Wrapper for FreeType fonts. Application code should use the +# truetype factory function to create font objects. + + +class FreeTypeFont: + """FreeType font wrapper (requires _imagingft service)""" + + font: Font + font_bytes: bytes + + def __init__( + self, + font: StrOrBytesPath | BinaryIO, + size: float = 10, + index: int = 0, + encoding: str = "", + layout_engine: Layout | None = None, + ) -> None: + # FIXME: use service provider instead + + if isinstance(core, DeferredError): + raise core.ex + + if size <= 0: + msg = f"font size must be greater than 0, not {size}" + raise ValueError(msg) + + self.path = font + self.size = size + self.index = index + self.encoding = encoding + + try: + from packaging.version import parse as parse_version + except ImportError: + pass + else: + if freetype_version := features.version_module("freetype2"): + if parse_version(freetype_version) < parse_version("2.9.1"): + warnings.warn( + "Support for FreeType 2.9.0 is deprecated and will be removed " + "in Pillow 12 (2025-10-15). Please upgrade to FreeType 2.9.1 " + "or newer, preferably FreeType 2.10.4 which fixes " + "CVE-2020-15999.", + DeprecationWarning, + ) + + if layout_engine not in (Layout.BASIC, Layout.RAQM): + layout_engine = Layout.BASIC + if core.HAVE_RAQM: + layout_engine = Layout.RAQM + elif layout_engine == Layout.RAQM and not core.HAVE_RAQM: + warnings.warn( + "Raqm layout was requested, but Raqm is not available. " + "Falling back to basic layout." + ) + layout_engine = Layout.BASIC + + self.layout_engine = layout_engine + + def load_from_bytes(f: IO[bytes]) -> None: + self.font_bytes = f.read() + self.font = core.getfont( + "", size, index, encoding, self.font_bytes, layout_engine + ) + + if is_path(font): + font = os.fspath(font) + if sys.platform == "win32": + font_bytes_path = font if isinstance(font, bytes) else font.encode() + try: + font_bytes_path.decode("ascii") + except UnicodeDecodeError: + # FreeType cannot load fonts with non-ASCII characters on Windows + # So load it into memory first + with open(font, "rb") as f: + load_from_bytes(f) + return + self.font = core.getfont( + font, size, index, encoding, layout_engine=layout_engine + ) + else: + load_from_bytes(cast(IO[bytes], font)) + + def __getstate__(self) -> list[Any]: + return [self.path, self.size, self.index, self.encoding, self.layout_engine] + + def __setstate__(self, state: list[Any]) -> None: + path, size, index, encoding, layout_engine = state + FreeTypeFont.__init__(self, path, size, index, encoding, layout_engine) + + def getname(self) -> tuple[str | None, str | None]: + """ + :return: A tuple of the font family (e.g. Helvetica) and the font style + (e.g. Bold) + """ + return self.font.family, self.font.style + + def getmetrics(self) -> tuple[int, int]: + """ + :return: A tuple of the font ascent (the distance from the baseline to + the highest outline point) and descent (the distance from the + baseline to the lowest outline point, a negative value) + """ + return self.font.ascent, self.font.descent + + def getlength( + self, + text: str | bytes, + mode: str = "", + direction: str | None = None, + features: list[str] | None = None, + language: str | None = None, + ) -> float: + """ + Returns length (in pixels with 1/64 precision) of given text when rendered + in font with provided direction, features, and language. + + This is the amount by which following text should be offset. + Text bounding box may extend past the length in some fonts, + e.g. when using italics or accents. + + The result is returned as a float; it is a whole number if using basic layout. + + Note that the sum of two lengths may not equal the length of a concatenated + string due to kerning. If you need to adjust for kerning, include the following + character and subtract its length. + + For example, instead of :: + + hello = font.getlength("Hello") + world = font.getlength("World") + hello_world = hello + world # not adjusted for kerning + assert hello_world == font.getlength("HelloWorld") # may fail + + use :: + + hello = font.getlength("HelloW") - font.getlength("W") # adjusted for kerning + world = font.getlength("World") + hello_world = hello + world # adjusted for kerning + assert hello_world == font.getlength("HelloWorld") # True + + or disable kerning with (requires libraqm) :: + + hello = draw.textlength("Hello", font, features=["-kern"]) + world = draw.textlength("World", font, features=["-kern"]) + hello_world = hello + world # kerning is disabled, no need to adjust + assert hello_world == draw.textlength("HelloWorld", font, features=["-kern"]) + + .. versionadded:: 8.0.0 + + :param text: Text to measure. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + `_ + Requires libraqm. + + :return: Either width for horizontal text, or height for vertical text. + """ + _string_length_check(text) + return self.font.getlength(text, mode, direction, features, language) / 64 + + def getbbox( + self, + text: str | bytes, + mode: str = "", + direction: str | None = None, + features: list[str] | None = None, + language: str | None = None, + stroke_width: float = 0, + anchor: str | None = None, + ) -> tuple[float, float, float, float]: + """ + Returns bounding box (in pixels) of given text relative to given anchor + when rendered in font with provided direction, features, and language. + + Use :py:meth:`getlength()` to get the offset of following text with + 1/64 pixel precision. The bounding box includes extra margins for + some fonts, e.g. italics or accents. + + .. versionadded:: 8.0.0 + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + `_ + Requires libraqm. + + :param stroke_width: The width of the text stroke. + + :param anchor: The text anchor alignment. Determines the relative location of + the anchor to the text. The default alignment is top left, + specifically ``la`` for horizontal text and ``lt`` for + vertical text. See :ref:`text-anchors` for details. + + :return: ``(left, top, right, bottom)`` bounding box + """ + _string_length_check(text) + size, offset = self.font.getsize( + text, mode, direction, features, language, anchor + ) + left, top = offset[0] - stroke_width, offset[1] - stroke_width + width, height = size[0] + 2 * stroke_width, size[1] + 2 * stroke_width + return left, top, left + width, top + height + + def getmask( + self, + text: str | bytes, + mode: str = "", + direction: str | None = None, + features: list[str] | None = None, + language: str | None = None, + stroke_width: float = 0, + anchor: str | None = None, + ink: int = 0, + start: tuple[float, float] | None = None, + ) -> Image.core.ImagingCore: + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. If the font has embedded color data, the bitmap + should have mode ``RGBA``. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + `_ + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :param anchor: The text anchor alignment. Determines the relative location of + the anchor to the text. The default alignment is top left, + specifically ``la`` for horizontal text and ``lt`` for + vertical text. See :ref:`text-anchors` for details. + + .. versionadded:: 8.0.0 + + :param ink: Foreground ink for rendering in RGBA mode. + + .. versionadded:: 8.0.0 + + :param start: Tuple of horizontal and vertical offset, as text may render + differently when starting at fractional coordinates. + + .. versionadded:: 9.4.0 + + :return: An internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module. + """ + return self.getmask2( + text, + mode, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + anchor=anchor, + ink=ink, + start=start, + )[0] + + def getmask2( + self, + text: str | bytes, + mode: str = "", + direction: str | None = None, + features: list[str] | None = None, + language: str | None = None, + stroke_width: float = 0, + anchor: str | None = None, + ink: int = 0, + start: tuple[float, float] | None = None, + *args: Any, + **kwargs: Any, + ) -> tuple[Image.core.ImagingCore, tuple[int, int]]: + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. If the font has embedded color data, the bitmap + should have mode ``RGBA``. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + `_ + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :param anchor: The text anchor alignment. Determines the relative location of + the anchor to the text. The default alignment is top left, + specifically ``la`` for horizontal text and ``lt`` for + vertical text. See :ref:`text-anchors` for details. + + .. versionadded:: 8.0.0 + + :param ink: Foreground ink for rendering in RGBA mode. + + .. versionadded:: 8.0.0 + + :param start: Tuple of horizontal and vertical offset, as text may render + differently when starting at fractional coordinates. + + .. versionadded:: 9.4.0 + + :return: A tuple of an internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module, and the text offset, the + gap between the starting coordinate and the first marking + """ + _string_length_check(text) + if start is None: + start = (0, 0) + + def fill(width: int, height: int) -> Image.core.ImagingCore: + size = (width, height) + Image._decompression_bomb_check(size) + return Image.core.fill("RGBA" if mode == "RGBA" else "L", size) + + return self.font.render( + text, + fill, + mode, + direction, + features, + language, + stroke_width, + kwargs.get("stroke_filled", False), + anchor, + ink, + start, + ) + + def font_variant( + self, + font: StrOrBytesPath | BinaryIO | None = None, + size: float | None = None, + index: int | None = None, + encoding: str | None = None, + layout_engine: Layout | None = None, + ) -> FreeTypeFont: + """ + Create a copy of this FreeTypeFont object, + using any specified arguments to override the settings. + + Parameters are identical to the parameters used to initialize this + object. + + :return: A FreeTypeFont object. + """ + if font is None: + try: + font = BytesIO(self.font_bytes) + except AttributeError: + font = self.path + return FreeTypeFont( + font=font, + size=self.size if size is None else size, + index=self.index if index is None else index, + encoding=self.encoding if encoding is None else encoding, + layout_engine=layout_engine or self.layout_engine, + ) + + def get_variation_names(self) -> list[bytes]: + """ + :returns: A list of the named styles in a variation font. + :exception OSError: If the font is not a variation font. + """ + try: + names = self.font.getvarnames() + except AttributeError as e: + msg = "FreeType 2.9.1 or greater is required" + raise NotImplementedError(msg) from e + return [name.replace(b"\x00", b"") for name in names] + + def set_variation_by_name(self, name: str | bytes) -> None: + """ + :param name: The name of the style. + :exception OSError: If the font is not a variation font. + """ + names = self.get_variation_names() + if not isinstance(name, bytes): + name = name.encode() + index = names.index(name) + 1 + + if index == getattr(self, "_last_variation_index", None): + # When the same name is set twice in a row, + # there is an 'unknown freetype error' + # https://savannah.nongnu.org/bugs/?56186 + return + self._last_variation_index = index + + self.font.setvarname(index) + + def get_variation_axes(self) -> list[Axis]: + """ + :returns: A list of the axes in a variation font. + :exception OSError: If the font is not a variation font. + """ + try: + axes = self.font.getvaraxes() + except AttributeError as e: + msg = "FreeType 2.9.1 or greater is required" + raise NotImplementedError(msg) from e + for axis in axes: + if axis["name"]: + axis["name"] = axis["name"].replace(b"\x00", b"") + return axes + + def set_variation_by_axes(self, axes: list[float]) -> None: + """ + :param axes: A list of values for each axis. + :exception OSError: If the font is not a variation font. + """ + try: + self.font.setvaraxes(axes) + except AttributeError as e: + msg = "FreeType 2.9.1 or greater is required" + raise NotImplementedError(msg) from e + + +class TransposedFont: + """Wrapper for writing rotated or mirrored text""" + + def __init__( + self, font: ImageFont | FreeTypeFont, orientation: Image.Transpose | None = None + ): + """ + Wrapper that creates a transposed font from any existing font + object. + + :param font: A font object. + :param orientation: An optional orientation. If given, this should + be one of Image.Transpose.FLIP_LEFT_RIGHT, Image.Transpose.FLIP_TOP_BOTTOM, + Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_180, or + Image.Transpose.ROTATE_270. + """ + self.font = font + self.orientation = orientation # any 'transpose' argument, or None + + def getmask( + self, text: str | bytes, mode: str = "", *args: Any, **kwargs: Any + ) -> Image.core.ImagingCore: + im = self.font.getmask(text, mode, *args, **kwargs) + if self.orientation is not None: + return im.transpose(self.orientation) + return im + + def getbbox( + self, text: str | bytes, *args: Any, **kwargs: Any + ) -> tuple[int, int, float, float]: + # TransposedFont doesn't support getmask2, move top-left point to (0, 0) + # this has no effect on ImageFont and simulates anchor="lt" for FreeTypeFont + left, top, right, bottom = self.font.getbbox(text, *args, **kwargs) + width = right - left + height = bottom - top + if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270): + return 0, 0, height, width + return 0, 0, width, height + + def getlength(self, text: str | bytes, *args: Any, **kwargs: Any) -> float: + if self.orientation in (Image.Transpose.ROTATE_90, Image.Transpose.ROTATE_270): + msg = "text length is undefined for text rotated by 90 or 270 degrees" + raise ValueError(msg) + return self.font.getlength(text, *args, **kwargs) + + +def load(filename: str) -> ImageFont: + """ + Load a font file. This function loads a font object from the given + bitmap font file, and returns the corresponding font object. For loading TrueType + or OpenType fonts instead, see :py:func:`~PIL.ImageFont.truetype`. + + :param filename: Name of font file. + :return: A font object. + :exception OSError: If the file could not be read. + """ + f = ImageFont() + f._load_pilfont(filename) + return f + + +def truetype( + font: StrOrBytesPath | BinaryIO, + size: float = 10, + index: int = 0, + encoding: str = "", + layout_engine: Layout | None = None, +) -> FreeTypeFont: + """ + Load a TrueType or OpenType font from a file or file-like object, + and create a font object. This function loads a font object from the given + file or file-like object, and creates a font object for a font of the given + size. For loading bitmap fonts instead, see :py:func:`~PIL.ImageFont.load` + and :py:func:`~PIL.ImageFont.load_path`. + + Pillow uses FreeType to open font files. On Windows, be aware that FreeType + will keep the file open as long as the FreeTypeFont object exists. Windows + limits the number of files that can be open in C at once to 512, so if many + fonts are opened simultaneously and that limit is approached, an + ``OSError`` may be thrown, reporting that FreeType "cannot open resource". + A workaround would be to copy the file(s) into memory, and open that instead. + + This function requires the _imagingft service. + + :param font: A filename or file-like object containing a TrueType font. + If the file is not found in this filename, the loader may also + search in other directories, such as: + + * The :file:`fonts/` directory on Windows, + * :file:`/Library/Fonts/`, :file:`/System/Library/Fonts/` + and :file:`~/Library/Fonts/` on macOS. + * :file:`~/.local/share/fonts`, :file:`/usr/local/share/fonts`, + and :file:`/usr/share/fonts` on Linux; or those specified by + the ``XDG_DATA_HOME`` and ``XDG_DATA_DIRS`` environment variables + for user-installed and system-wide fonts, respectively. + + :param size: The requested size, in pixels. + :param index: Which font face to load (default is first available face). + :param encoding: Which font encoding to use (default is Unicode). Possible + encodings include (see the FreeType documentation for more + information): + + * "unic" (Unicode) + * "symb" (Microsoft Symbol) + * "ADOB" (Adobe Standard) + * "ADBE" (Adobe Expert) + * "ADBC" (Adobe Custom) + * "armn" (Apple Roman) + * "sjis" (Shift JIS) + * "gb " (PRC) + * "big5" + * "wans" (Extended Wansung) + * "joha" (Johab) + * "lat1" (Latin-1) + + This specifies the character set to use. It does not alter the + encoding of any text provided in subsequent operations. + :param layout_engine: Which layout engine to use, if available: + :attr:`.ImageFont.Layout.BASIC` or :attr:`.ImageFont.Layout.RAQM`. + If it is available, Raqm layout will be used by default. + Otherwise, basic layout will be used. + + Raqm layout is recommended for all non-English text. If Raqm layout + is not required, basic layout will have better performance. + + You can check support for Raqm layout using + :py:func:`PIL.features.check_feature` with ``feature="raqm"``. + + .. versionadded:: 4.2.0 + :return: A font object. + :exception OSError: If the file could not be read. + :exception ValueError: If the font size is not greater than zero. + """ + + def freetype(font: StrOrBytesPath | BinaryIO) -> FreeTypeFont: + return FreeTypeFont(font, size, index, encoding, layout_engine) + + try: + return freetype(font) + except OSError: + if not is_path(font): + raise + ttf_filename = os.path.basename(font) + + dirs = [] + if sys.platform == "win32": + # check the windows font repository + # NOTE: must use uppercase WINDIR, to work around bugs in + # 1.5.2's os.environ.get() + windir = os.environ.get("WINDIR") + if windir: + dirs.append(os.path.join(windir, "fonts")) + elif sys.platform in ("linux", "linux2"): + data_home = os.environ.get("XDG_DATA_HOME") + if not data_home: + # The freedesktop spec defines the following default directory for + # when XDG_DATA_HOME is unset or empty. This user-level directory + # takes precedence over system-level directories. + data_home = os.path.expanduser("~/.local/share") + xdg_dirs = [data_home] + + data_dirs = os.environ.get("XDG_DATA_DIRS") + if not data_dirs: + # Similarly, defaults are defined for the system-level directories + data_dirs = "/usr/local/share:/usr/share" + xdg_dirs += data_dirs.split(":") + + dirs += [os.path.join(xdg_dir, "fonts") for xdg_dir in xdg_dirs] + elif sys.platform == "darwin": + dirs += [ + "/Library/Fonts", + "/System/Library/Fonts", + os.path.expanduser("~/Library/Fonts"), + ] + + ext = os.path.splitext(ttf_filename)[1] + first_font_with_a_different_extension = None + for directory in dirs: + for walkroot, walkdir, walkfilenames in os.walk(directory): + for walkfilename in walkfilenames: + if ext and walkfilename == ttf_filename: + return freetype(os.path.join(walkroot, walkfilename)) + elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename: + fontpath = os.path.join(walkroot, walkfilename) + if os.path.splitext(fontpath)[1] == ".ttf": + return freetype(fontpath) + if not ext and first_font_with_a_different_extension is None: + first_font_with_a_different_extension = fontpath + if first_font_with_a_different_extension: + return freetype(first_font_with_a_different_extension) + raise + + +def load_path(filename: str | bytes) -> ImageFont: + """ + Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a + bitmap font along the Python path. + + :param filename: Name of font file. + :return: A font object. + :exception OSError: If the file could not be read. + """ + if not isinstance(filename, str): + filename = filename.decode("utf-8") + for directory in sys.path: + try: + return load(os.path.join(directory, filename)) + except OSError: + pass + msg = f'cannot find font file "{filename}" in sys.path' + if os.path.exists(filename): + msg += f', did you mean ImageFont.load("{filename}") instead?' + + raise OSError(msg) + + +def load_default_imagefont() -> ImageFont: + f = ImageFont() + f._load_pilfont_data( + # courB08 + BytesIO( + base64.b64decode( + b""" +UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA +BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL +AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA +AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB +ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A +BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB +//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA +AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH +AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA +ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv +AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/ +/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5 +AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA +AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG +AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA +BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA +AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA +2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF +AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA//// ++gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA +////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA +BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv +AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA +AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA +AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA +BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP// +//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA +AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF +AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB +mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn +AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA +AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7 +AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA +Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB +//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA +AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ +AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC +DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ +AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/ ++wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5 +AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/ +///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG +AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA +BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA +Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC +eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG +AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA//// ++gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA +////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA +BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT +AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A +AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA +Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA +Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP// +//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA +AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ +AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA +LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5 +AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA +AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5 +AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA +AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG +AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA +EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK +AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA +pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG +AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA//// ++QAGAAIAzgAKANUAEw== +""" + ) + ), + Image.open( + BytesIO( + base64.b64decode( + b""" +iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u +Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9 +M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g +LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F +IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA +Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791 +NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx +in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9 +SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY +AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt +y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG +ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY +lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H +/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3 +AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47 +c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/ +/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw +pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv +oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR +evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA +AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v// +Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR +w7IkEbzhVQAAAABJRU5ErkJggg== +""" + ) + ) + ), + ) + return f + + +def load_default(size: float | None = None) -> FreeTypeFont | ImageFont: + """If FreeType support is available, load a version of Aileron Regular, + https://dotcolon.net/fonts/aileron, with a more limited character set. + + Otherwise, load a "better than nothing" font. + + .. versionadded:: 1.1.4 + + :param size: The font size of Aileron Regular. + + .. versionadded:: 10.1.0 + + :return: A font object. + """ + if isinstance(core, ModuleType) or size is not None: + return truetype( + BytesIO( + base64.b64decode( + b""" +AAEAAAAPAIAAAwBwRkZUTYwDlUAAADFoAAAAHEdERUYAqADnAAAo8AAAACRHUE9ThhmITwAAKfgAA +AduR1NVQnHxefoAACkUAAAA4k9TLzJovoHLAAABeAAAAGBjbWFw5lFQMQAAA6gAAAGqZ2FzcP//AA +MAACjoAAAACGdseWYmRXoPAAAGQAAAHfhoZWFkE18ayQAAAPwAAAA2aGhlYQboArEAAAE0AAAAJGh +tdHjjERZ8AAAB2AAAAdBsb2NhuOexrgAABVQAAADqbWF4cAC7AEYAAAFYAAAAIG5hbWUr+h5lAAAk +OAAAA6Jwb3N0D3oPTQAAJ9wAAAEKAAEAAAABGhxJDqIhXw889QALA+gAAAAA0Bqf2QAAAADhCh2h/ +2r/LgOxAyAAAAAIAAIAAAAAAAAAAQAAA8r/GgAAA7j/av9qA7EAAQAAAAAAAAAAAAAAAAAAAHQAAQ +AAAHQAQwAFAAAAAAACAAAAAQABAAAAQAAAAAAAAAADAfoBkAAFAAgCigJYAAAASwKKAlgAAAFeADI +BPgAAAAAFAAAAAAAAAAAAAAcAAAAAAAAAAAAAAABVS1dOAEAAIPsCAwL/GgDIA8oA5iAAAJMAAAAA +AhICsgAAACAAAwH0AAAAAAAAAU0AAADYAAAA8gA5AVMAVgJEAEYCRAA1AuQAKQKOAEAAsAArATsAZ +AE7AB4CMABVAkQAUADc/+EBEgAgANwAJQEv//sCRAApAkQAggJEADwCRAAtAkQAIQJEADkCRAArAk +QAMgJEACwCRAAxANwAJQDc/+ECRABnAkQAUAJEAEQB8wAjA1QANgJ/AB0CcwBkArsALwLFAGQCSwB +kAjcAZALGAC8C2gBkAQgAZAIgADcCYQBkAj8AZANiAGQCzgBkAuEALwJWAGQC3QAvAmsAZAJJADQC +ZAAiAqoAXgJuACADuAAaAnEAGQJFABMCTwAuATMAYgEv//sBJwAiAkQAUAH0ADIBLAApAhMAJAJjA +EoCEQAeAmcAHgIlAB4BIgAVAmcAHgJRAEoA7gA+AOn/8wIKAEoA9wBGA1cASgJRAEoCSgAeAmMASg +JnAB4BSgBKAcsAGAE5ABQCUABCAgIAAQMRAAEB4v/6AgEAAQHOABQBLwBAAPoAYAEvACECRABNA0Y +AJAItAHgBKgAcAkQAUAEsAHQAygAgAi0AOQD3ADYA9wAWAaEANgGhABYCbAAlAYMAeAGDADkA6/9q +AhsAFAIKABUB/QAVAAAAAwAAAAMAAAAcAAEAAAAAAKQAAwABAAAAHAAEAIgAAAAeABAAAwAOAH4Aq +QCrALEAtAC3ALsgGSAdICYgOiBEISL7Av//AAAAIACpAKsAsAC0ALcAuyAYIBwgJiA5IEQhIvsB// +//4/+5/7j/tP+y/7D/reBR4E/gR+A14CzfTwVxAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAEGAAABAAAAAAAAAAECAAAAAgAAAAAAAAAAAAAAAAAAAAEAAAMEBQYHCAkKCwwNDg8QERIT +FBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVGR0hJSktMT +U5PUFFSU1RVVldYWVpbXF1eX2BhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGQAAA +AAAAAAYnFmAAAAAABlAAAAAAAAAAAAAAAAAAAAAAAAAAAAY2htAAAAAAAAAABrbGlqAAAAAHAAbm9 +ycwBnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmACYAJgAmAD4AUgCCAMoBCgFO +AVwBcgGIAaYBvAHKAdYB6AH2AgwCIAJKAogCpgLWAw4DIgNkA5wDugPUA+gD/AQQBEYEogS8BPoFJ +gVSBWoFgAWwBcoF1gX6BhQGJAZMBmgGiga0BuIHGgdUB2YHkAeiB8AH3AfyCAoIHAgqCDoITghcCG +oIogjSCPoJKglYCXwJwgnqCgIKKApACl4Klgq8CtwLDAs8C1YLjAuyC9oL7gwMDCYMSAxgDKAMrAz +qDQoNTA1mDYQNoA2uDcAN2g3oDfYODA4iDkoOXA5sDnoOnA7EDvwAAAAFAAAAAAH0ArwAAwAGAAkA +DAAPAAAxESERAxMhExcRASELARETAfT6qv6syKr+jgFUqsiqArz9RAGLAP/+1P8B/v3VAP8BLP4CA +P8AAgA5//IAuQKyAAMACwAANyMDMwIyFhQGIiY0oE4MZk84JCQ4JLQB/v3AJDgkJDgAAgBWAeUBPA +LfAAMABwAAEyMnMxcjJzOmRgpagkYKWgHl+vr6AAAAAAIARgAAAf4CsgAbAB8AAAEHMxUjByM3Iwc +jNyM1MzcjNTM3MwczNzMHMxUrAQczAZgdZXEvOi9bLzovWmYdZXEvOi9bLzovWp9bHlsBn4w429vb +2ziMONvb29s4jAAAAAMANf+mAg4DDAAfACYALAAAJRQGBxUjNS4BJzMeARcRLgE0Njc1MxUeARcjJ +icVHgEBFBYXNQ4BExU+ATU0Ag5xWDpgcgRcBz41Xl9oVTpVYwpcC1ttXP6cLTQuM5szOrVRZwlOTQ +ZqVzZECAEAGlukZAlOTQdrUG8O7iNlAQgxNhDlCDj+8/YGOjReAAAAAAUAKf/yArsCvAAHAAsAFQA +dACcAABIyFhQGIiY0EyMBMwQiBhUUFjI2NTQSMhYUBiImNDYiBhUUFjI2NTR5iFBQiFCVVwHAV/5c +OiMjOiPmiFBQiFCxOiMjOiMCvFaSVlaS/ZoCsjIzMC80NC8w/uNWklZWkhozMC80NC8wAAAAAgBA/ +/ICbgLAACIALgAAARUjEQYjIiY1NDY3LgE1NDYzMhcVJiMiBhUUFhcWOwE1MxUFFBYzMjc1IyIHDg +ECbmBcYYOOVkg7R4hsQjY4Q0RNRD4SLDxW/pJUXzksPCkUUk0BgUb+zBVUZ0BkDw5RO1huCkULQzp +COAMBcHDHRz0J/AIHRQAAAAEAKwHlAIUC3wADAAATIycze0YKWgHl+gAAAAABAGT/sAEXAwwACQAA +EzMGEBcjLgE0Nt06dXU6OUBAAwzG/jDGVePs4wAAAAEAHv+wANEDDAAJAAATMx4BFAYHIzYQHjo5Q +EA5OnUDDFXj7ONVxgHQAAAAAQBVAFIB2wHbAA4AAAE3FwcXBycHJzcnNxcnMwEtmxOfcTJjYzJxnx +ObCj4BKD07KYolmZkliik7PbMAAQBQAFUB9AIlAAsAAAEjFSM1IzUzNTMVMwH0tTq1tTq1AR/Kyjj +OzgAAAAAB/+H/iACMAGQABAAANwcjNzOMWlFOXVrS3AAAAQAgAP8A8gE3AAMAABMjNTPy0tIA/zgA +AQAl//IApQByAAcAADYyFhQGIiY0STgkJDgkciQ4JCQ4AAAAAf/7/+IBNALQAAMAABcjEzM5Pvs+H +gLuAAAAAAIAKf/yAhsCwAADAAcAABIgECA2IBAgKQHy/g5gATL+zgLA/TJEAkYAAAAAAQCCAAABlg +KyAAgAAAERIxEHNTc2MwGWVr6SIygCsv1OAldxW1sWAAEAPAAAAg4CwAAZAAA3IRUhNRM+ATU0JiM +iDwEjNz4BMzIWFRQGB7kBUv4x+kI2QTt+EAFWAQp8aGVtSl5GRjEA/0RVLzlLmAoKa3FsUkNxXQAA +AAEALf/yAhYCwAAqAAABHgEVFAYjIi8BMxceATMyNjU0KwE1MzI2NTQmIyIGDwEjNz4BMzIWFRQGA +YxBSZJo2RUBVgEHV0JBUaQREUBUQzc5TQcBVgEKfGhfcEMBbxJbQl1x0AoKRkZHPn9GSD80QUVCCg +pfbGBPOlgAAAACACEAAAIkArIACgAPAAAlIxUjNSE1ATMRMyMRBg8BAiRXVv6qAVZWV60dHLCurq4 +rAdn+QgFLMibzAAABADn/8gIZArIAHQAAATIWFRQGIyIvATMXFjMyNjU0JiMiByMTIRUhBzc2ATNv +d5Fl1RQBVgIad0VSTkVhL1IwAYj+vh8rMAHHgGdtgcUKCoFXTU5bYgGRRvAuHQAAAAACACv/8gITA +sAAFwAjAAABMhYVFAYjIhE0NjMyFh8BIycmIyIDNzYTMjY1NCYjIgYVFBYBLmp7imr0l3RZdAgBXA +IYZ5wKJzU6QVNJSz5SUAHSgWltiQFGxcNlVQoKdv7sPiz+ZF1LTmJbU0lhAAAAAQAyAAACGgKyAAY +AAAEVASMBITUCGv6oXAFL/oECsij9dgJsRgAAAAMALP/xAhgCwAAWACAALAAAAR4BFRQGIyImNTQ2 +Ny4BNTQ2MhYVFAYmIgYVFBYyNjU0AzI2NTQmIyIGFRQWAZQ5S5BmbIpPOjA7ecp5P2F8Q0J8RIVJS +0pLTEtOAW0TXTxpZ2ZqPF0SE1A3VWVlVTdQ/UU0N0RENzT9/ko+Ok1NOj1LAAIAMf/yAhkCwAAXAC +MAAAEyERQGIyImLwEzFxYzMhMHBiMiJjU0NhMyNjU0JiMiBhUUFgEl9Jd0WXQIAVwCGGecCic1SWp +7imo+UlBAQVNJAsD+usXDZVUKCnYBFD4sgWltif5kW1NJYV1LTmIAAAACACX/8gClAiAABwAPAAAS +MhYUBiImNBIyFhQGIiY0STgkJDgkJDgkJDgkAiAkOCQkOP52JDgkJDgAAAAC/+H/iAClAiAABwAMA +AASMhYUBiImNBMHIzczSTgkJDgkaFpSTl4CICQ4JCQ4/mba5gAAAQBnAB4B+AH0AAYAAAENARUlNS +UB+P6qAVb+bwGRAbCmpkbJRMkAAAIAUAC7AfQBuwADAAcAAAEhNSERITUhAfT+XAGk/lwBpAGDOP8 +AOAABAEQAHgHVAfQABgAAARUFNS0BNQHV/m8BVv6qAStEyUSmpkYAAAAAAgAj//IB1ALAABgAIAAA +ATIWFRQHDgEHIz4BNz4BNTQmIyIGByM+ARIyFhQGIiY0AQRibmktIAJWBSEqNig+NTlHBFoDezQ4J +CQ4JALAZ1BjaS03JS1DMD5LLDQ/SUVgcv2yJDgkJDgAAAAAAgA2/5gDFgKYADYAQgAAAQMGFRQzMj +Y1NCYjIg4CFRQWMzI2NxcGIyImNTQ+AjMyFhUUBiMiJwcGIyImNTQ2MzIfATcHNzYmIyIGFRQzMjY +Cej8EJjJJlnBAfGQ+oHtAhjUYg5OPx0h2k06Os3xRWQsVLjY5VHtdPBwJETcJDyUoOkZEJz8B0f74 +EQ8kZl6EkTFZjVOLlyknMVm1pmCiaTq4lX6CSCknTVRmmR8wPdYnQzxuSWVGAAIAHQAAAncCsgAHA +AoAACUjByMTMxMjATMDAcj+UVz4dO5d/sjPZPT0ArL9TgE6ATQAAAADAGQAAAJMArIAEAAbACcAAA +EeARUUBgcGKwERMzIXFhUUJRUzMjc2NTQnJiMTPgE1NCcmKwEVMzIBvkdHZkwiNt7LOSGq/oeFHBt +hahIlSTM+cB8Yj5UWAW8QT0VYYgwFArIEF5Fv1eMED2NfDAL93AU+N24PBP0AAAAAAQAv//ICjwLA +ABsAAAEyFh8BIycmIyIGFRQWMzI/ATMHDgEjIiY1NDYBdX+PCwFWAiKiaHx5ZaIiAlYBCpWBk6a0A +sCAagoKpqN/gaOmCgplhcicn8sAAAIAZAAAAp8CsgAMABkAAAEeARUUBgcGKwERMzITPgE1NCYnJi +sBETMyAY59lJp8IzXN0jUVWmdjWRs5d3I4Aq4QqJWUug8EArL9mQ+PeHGHDgX92gAAAAABAGQAAAI +vArIACwAAJRUhESEVIRUhFSEVAi/+NQHB/pUBTf6zRkYCskbwRvAAAAABAGQAAAIlArIACQAAExUh +FSERIxEhFboBQ/69VgHBAmzwRv7KArJGAAAAAAEAL//yAo8CwAAfAAABMxEjNQcGIyImNTQ2MzIWH +wEjJyYjIgYVFBYzMjY1IwGP90wfPnWTprSSf48LAVYCIqJofHllVG+hAU3+s3hARsicn8uAagoKpq +N/gaN1XAAAAAEAZAAAAowCsgALAAABESMRIREjETMRIRECjFb+hFZWAXwCsv1OAS7+0gKy/sQBPAA +AAAABAGQAAAC6ArIAAwAAMyMRM7pWVgKyAAABADf/8gHoArIAEwAAAREUBw4BIyImLwEzFxYzMjc2 +NREB6AIFcGpgbQIBVgIHfXQKAQKy/lYxIltob2EpKYyEFD0BpwAAAAABAGQAAAJ0ArIACwAACQEjA +wcVIxEzEQEzATsBJ3ntQlZWAVVlAWH+nwEnR+ACsv6RAW8AAQBkAAACLwKyAAUAACUVIREzEQIv/j +VWRkYCsv2UAAABAGQAAAMUArIAFAAAAREjETQ3BgcDIwMmJxYVESMRMxsBAxRWAiMxemx8NxsCVo7 +MywKy/U4BY7ZLco7+nAFmoFxLtP6dArL9lwJpAAAAAAEAZAAAAoACsgANAAAhIwEWFREjETMBJjUR +MwKAhP67A1aEAUUDVAJeeov+pwKy/aJ5jAFZAAAAAgAv//ICuwLAAAkAEwAAEiAWFRQGICY1NBIyN +jU0JiIGFRTbATSsrP7MrNrYenrYegLAxaKhxsahov47nIeIm5uIhwACAGQAAAJHArIADgAYAAABHg +EVFAYHBisBESMRMzITNjQnJisBETMyAZRUX2VOHzuAVtY7GlxcGDWIiDUCrgtnVlVpCgT+5gKy/rU +V1BUF/vgAAAACAC//zAK9AsAAEgAcAAAlFhcHJiMiBwYjIiY1NDYgFhUUJRQWMjY1NCYiBgI9PUMx +UDcfKh8omqysATSs/dR62Hp62HpICTg7NgkHxqGixcWitbWHnJyHiJubAAIAZAAAAlgCsgAXACMAA +CUWFyMmJyYnJisBESMRMzIXHgEVFAYHFiUzMjc+ATU0JyYrAQIqDCJfGQwNWhAhglbiOx9QXEY1Tv +6bhDATMj1lGSyMtYgtOXR0BwH+1wKyBApbU0BSESRAAgVAOGoQBAABADT/8gIoAsAAJQAAATIWFyM +uASMiBhUUFhceARUUBiMiJiczHgEzMjY1NCYnLgE1NDYBOmd2ClwGS0E6SUNRdW+HZnKKC1wPWkQ9 +Uk1cZGuEAsBwXUJHNjQ3OhIbZVZZbm5kREo+NT5DFRdYUFdrAAAAAAEAIgAAAmQCsgAHAAABIxEjE +SM1IQJk9lb2AkICbP2UAmxGAAEAXv/yAmQCsgAXAAABERQHDgEiJicmNREzERQXHgEyNjc2NRECZA +IIgfCBCAJWAgZYmlgGAgKy/k0qFFxzc1wUKgGz/lUrEkRQUEQSKwGrAAAAAAEAIAAAAnoCsgAGAAA +hIwMzGwEzAYJ07l3N1FwCsv2PAnEAAAEAGgAAA7ECsgAMAAABAyMLASMDMxsBMxsBA7HAcZyicrZi +kaB0nJkCsv1OAlP9rQKy/ZsCW/2kAmYAAAEAGQAAAm8CsgALAAAhCwEjEwMzGwEzAxMCCsrEY/bkY +re+Y/D6AST+3AFcAVb+5gEa/q3+oQAAAQATAAACUQKyAAgAAAERIxEDMxsBMwFdVvRjwLphARD+8A +EQAaL+sQFPAAABAC4AAAI5ArIACQAAJRUhNQEhNSEVAQI5/fUBof57Aen+YUZGQgIqRkX92QAAAAA +BAGL/sAEFAwwABwAAARUjETMVIxEBBWlpowMMOP0UOANcAAAB//v/4gE0AtAAAwAABSMDMwE0Pvs+ +HgLuAAAAAQAi/7AAxQMMAAcAABcjNTMRIzUzxaNpaaNQOALsOAABAFAA1wH0AmgABgAAJQsBIxMzE +wGwjY1GsESw1wFZ/qcBkf5vAAAAAQAy/6oBwv/iAAMAAAUhNSEBwv5wAZBWOAAAAAEAKQJEALYCsg +ADAAATIycztjhVUAJEbgAAAAACACT/8gHQAiAAHQAlAAAhJwcGIyImNTQ2OwE1NCcmIyIHIz4BMzI +XFh0BFBcnMjY9ASYVFAF6CR0wVUtgkJoiAgdgaQlaBm1Zrg4DCuQ9R+5MOSFQR1tbDiwUUXBUXowf +J8c9SjRORzYSgVwAAAAAAgBK//ICRQLfABEAHgAAATIWFRQGIyImLwEVIxEzETc2EzI2NTQmIyIGH +QEUFgFUcYCVbiNJEyNWVigySElcU01JXmECIJd4i5QTEDRJAt/+3jkq/hRuZV55ZWsdX14AAQAe// +IB9wIgABgAAAEyFhcjJiMiBhUUFjMyNjczDgEjIiY1NDYBF152DFocbEJXU0A1Rw1aE3pbaoKQAiB +oWH5qZm1tPDlaXYuLgZcAAAACAB7/8gIZAt8AEQAeAAABESM1BwYjIiY1NDYzMhYfAREDMjY9ATQm +IyIGFRQWAhlWKDJacYCVbiNJEyOnSV5hQUlcUwLf/SFVOSqXeIuUExA0ARb9VWVrHV9ebmVeeQACA +B7/8gH9AiAAFQAbAAABFAchHgEzMjY3Mw4BIyImNTQ2MzIWJyIGByEmAf0C/oAGUkA1SwlaD4FXbI +WObmt45UBVBwEqDQEYFhNjWD84W16Oh3+akU9aU60AAAEAFQAAARoC8gAWAAATBh0BMxUjESMRIzU +zNTQ3PgEzMhcVJqcDbW1WOTkDB0k8Hx5oAngVITRC/jQBzEIsJRs5PwVHEwAAAAIAHv8uAhkCIAAi +AC8AAAERFAcOASMiLwEzFx4BMzI2NzY9AQcGIyImNTQ2MzIWHwE1AzI2PQE0JiMiBhUUFgIZAQSEd +NwRAVcBBU5DTlUDASgyWnGAlW4jSRMjp0leYUFJXFMCEv5wSh1zeq8KCTI8VU0ZIQk5Kpd4i5QTED +RJ/iJlax1fXm5lXnkAAQBKAAACCgLkABcAAAEWFREjETQnLgEHDgEdASMRMxE3NjMyFgIIAlYCBDs +6RVRWViE5UVViAYUbQP7WASQxGzI7AQJyf+kC5P7TPSxUAAACAD4AAACsAsAABwALAAASMhYUBiIm +NBMjETNeLiAgLiBiVlYCwCAuICAu/WACEgAC//P/LgCnAsAABwAVAAASMhYUBiImNBcRFAcGIyInN +RY3NjURWS4gIC4gYgMLcRwNSgYCAsAgLiAgLo79wCUbZAJGBzMOHgJEAAAAAQBKAAACCALfAAsAAC +EnBxUjETMREzMHEwGTwTJWVvdu9/rgN6kC3/4oAQv6/ugAAQBG//wA3gLfAA8AABMRFBceATcVBiM +iJicmNRGcAQIcIxkkKi4CAQLf/bkhERoSBD4EJC8SNAJKAAAAAQBKAAADEAIgACQAAAEWFREjETQn +JiMiFREjETQnJiMiFREjETMVNzYzMhYXNzYzMhYDCwVWBAxedFYEDF50VlYiJko7ThAvJkpEVAGfI +jn+vAEcQyRZ1v76ARxDJFnW/voCEk08HzYtRB9HAAAAAAEASgAAAgoCIAAWAAABFhURIxE0JyYjIg +YdASMRMxU3NjMyFgIIAlYCCXBEVVZWITlRVWIBhRtA/tYBJDEbbHR/6QISWz0sVAAAAAACAB7/8gI +sAiAABwARAAASIBYUBiAmNBIyNjU0JiIGFRSlAQCHh/8Ah7ieWlqeWgIgn/Cfn/D+s3ZfYHV1YF8A +AgBK/zwCRQIgABEAHgAAATIWFRQGIyImLwERIxEzFTc2EzI2NTQmIyIGHQEUFgFUcYCVbiNJEyNWV +igySElcU01JXmECIJd4i5QTEDT+8wLWVTkq/hRuZV55ZWsdX14AAgAe/zwCGQIgABEAHgAAAREjEQ +cGIyImNTQ2MzIWHwE1AzI2PQE0JiMiBhUUFgIZVigyWnGAlW4jSRMjp0leYUFJXFMCEv0qARk5Kpd +4i5QTEDRJ/iJlax1fXm5lXnkAAQBKAAABPgIeAA0AAAEyFxUmBhURIxEzFTc2ARoWDkdXVlYwIwIe +B0EFVlf+0gISU0cYAAEAGP/yAa0CIAAjAAATMhYXIyYjIgYVFBYXHgEVFAYjIiYnMxYzMjY1NCYnL +gE1NDbkV2MJWhNdKy04PF1XbVhWbgxaE2ktOjlEUllkAiBaS2MrJCUoEBlPQkhOVFZoKCUmLhIWSE +BIUwAAAAEAFP/4ARQCiQAXAAATERQXHgE3FQYjIiYnJjURIzUzNTMVMxWxAQMmMx8qMjMEAUdHVmM +BzP7PGw4mFgY/BSwxDjQBNUJ7e0IAAAABAEL/8gICAhIAFwAAAREjNQcGIyImJyY1ETMRFBceATMy +Nj0BAgJWITlRT2EKBVYEBkA1RFECEv3uWj4qTToiOQE+/tIlJC43c4DpAAAAAAEAAQAAAfwCEgAGA +AABAyMDMxsBAfzJaclfop8CEv3uAhL+LQHTAAABAAEAAAMLAhIADAAAAQMjCwEjAzMbATMbAQMLqW +Z2dmapY3t0a3Z7AhL97gG+/kICEv5AAcD+QwG9AAAB//oAAAHWAhIACwAAARMjJwcjEwMzFzczARq +8ZIuKY763ZoWFYwEO/vLV1QEMAQbNzQAAAQAB/y4B+wISABEAAAEDDgEjIic1FjMyNj8BAzMbAQH7 +2iFZQB8NDRIpNhQH02GenQIS/cFVUAJGASozEwIt/i4B0gABABQAAAGxAg4ACQAAJRUhNQEhNSEVA +QGx/mMBNP7iAYL+zkREQgGIREX+ewAAAAABAED/sAEOAwwALAAAASMiBhUUFxYVFAYHHgEVFAcGFR +QWOwEVIyImNTQ3NjU0JzU2NTQnJjU0NjsBAQ4MKiMLDS4pKS4NCyMqDAtERAwLUlILDERECwLUGBk +WTlsgKzUFBTcrIFtOFhkYOC87GFVMIkUIOAhFIkxVGDsvAAAAAAEAYP84AJoDIAADAAAXIxEzmjo6 +yAPoAAEAIf+wAO8DDAAsAAATFQYVFBcWFRQGKwE1MzI2NTQnJjU0NjcuATU0NzY1NCYrATUzMhYVF +AcGFRTvUgsMREQLDCojCw0uKSkuDQsjKgwLREQMCwF6OAhFIkxVGDsvOBgZFk5bICs1BQU3KyBbTh +YZGDgvOxhVTCJFAAABAE0A3wH2AWQAEwAAATMUIyImJyYjIhUjNDMyFhcWMzIBvjhuGywtQR0xOG4 +bLC1BHTEBZIURGCNMhREYIwAAAwAk/94DIgLoAAcAEQApAAAAIBYQBiAmECQgBhUUFiA2NTQlMhYX +IyYjIgYUFjMyNjczDgEjIiY1NDYBAQFE3d3+vN0CB/7wubkBELn+xVBnD1wSWDo+QTcqOQZcEmZWX +HN2Aujg/rbg4AFKpr+Mjb6+jYxbWEldV5ZZNShLVn5na34AAgB4AFIB9AGeAAUACwAAAQcXIyc3Mw +cXIyc3AUqJiUmJifOJiUmJiQGepqampqampqYAAAIAHAHSAQ4CwAAHAA8AABIyFhQGIiY0NiIGFBY +yNjRgakREakSTNCEhNCECwEJqQkJqCiM4IyM4AAAAAAIAUAAAAfQCCwALAA8AAAEzFSMVIzUjNTM1 +MxMhNSEBP7W1OrW1OrX+XAGkAVs4tLQ4sP31OAAAAQB0AkQBAQKyAAMAABMjNzOsOD1QAkRuAAAAA +AEAIADsAKoBdgAHAAASMhYUBiImNEg6KCg6KAF2KDooKDoAAAIAOQBSAbUBngAFAAsAACUHIzcnMw +UHIzcnMwELiUmJiUkBM4lJiYlJ+KampqampqYAAAABADYB5QDhAt8ABAAAEzczByM2Xk1OXQHv8Po +AAQAWAeUAwQLfAAQAABMHIzczwV5NTl0C1fD6AAIANgHlAYsC3wAEAAkAABM3MwcjPwEzByM2Xk1O +XapeTU5dAe/w+grw+gAAAgAWAeUBawLfAAQACQAAEwcjNzMXByM3M8FeTU5dql5NTl0C1fD6CvD6A +AADACX/8gI1AHIABwAPABcAADYyFhQGIiY0NjIWFAYiJjQ2MhYUBiImNEk4JCQ4JOw4JCQ4JOw4JC +Q4JHIkOCQkOCQkOCQkOCQkOCQkOAAAAAEAeABSAUoBngAFAAABBxcjJzcBSomJSYmJAZ6mpqamAAA +AAAEAOQBSAQsBngAFAAAlByM3JzMBC4lJiYlJ+KampgAAAf9qAAABgQKyAAMAACsBATM/VwHAVwKy +AAAAAAIAFAHIAdwClAAHABQAABMVIxUjNSM1BRUjNwcjJxcjNTMXN9pKMkoByDICKzQqATJLKysCl +CmjoykBy46KiY3Lm5sAAQAVAAABvALyABgAAAERIxEjESMRIzUzNTQ3NjMyFxUmBgcGHQEBvFbCVj +k5AxHHHx5iVgcDAg798gHM/jQBzEIOJRuWBUcIJDAVIRYAAAABABX//AHkAvIAJQAAJR4BNxUGIyI +mJyY1ESYjIgcGHQEzFSMRIxEjNTM1NDc2MzIXERQBowIcIxkkKi4CAR4nXgwDbW1WLy8DEbNdOmYa +EQQ/BCQvEjQCFQZWFSEWQv40AcxCDiUblhP9uSEAAAAAAAAWAQ4AAQAAAAAAAAATACgAAQAAAAAAA +QAHAEwAAQAAAAAAAgAHAGQAAQAAAAAAAwAaAKIAAQAAAAAABAAHAM0AAQAAAAAABQA8AU8AAQAAAA +AABgAPAawAAQAAAAAACAALAdQAAQAAAAAACQALAfgAAQAAAAAACwAXAjQAAQAAAAAADAAXAnwAAwA +BBAkAAAAmAAAAAwABBAkAAQAOADwAAwABBAkAAgAOAFQAAwABBAkAAwA0AGwAAwABBAkABAAOAL0A +AwABBAkABQB4ANUAAwABBAkABgAeAYwAAwABBAkACAAWAbwAAwABBAkACQAWAeAAAwABBAkACwAuA +gQAAwABBAkADAAuAkwATgBvACAAUgBpAGcAaAB0AHMAIABSAGUAcwBlAHIAdgBlAGQALgAATm8gUm +lnaHRzIFJlc2VydmVkLgAAQQBpAGwAZQByAG8AbgAAQWlsZXJvbgAAUgBlAGcAdQBsAGEAcgAAUmV +ndWxhcgAAMQAuADEAMAAyADsAVQBLAFcATgA7AEEAaQBsAGUAcgBvAG4ALQBSAGUAZwB1AGwAYQBy +AAAxLjEwMjtVS1dOO0FpbGVyb24tUmVndWxhcgAAQQBpAGwAZQByAG8AbgAAQWlsZXJvbgAAVgBlA +HIAcwBpAG8AbgAgADEALgAxADAAMgA7AFAAUwAgADAAMAAxAC4AMQAwADIAOwBoAG8AdABjAG8Abg +B2ACAAMQAuADAALgA3ADAAOwBtAGEAawBlAG8AdABmAC4AbABpAGIAMgAuADUALgA1ADgAMwAyADk +AAFZlcnNpb24gMS4xMDI7UFMgMDAxLjEwMjtob3Rjb252IDEuMC43MDttYWtlb3RmLmxpYjIuNS41 +ODMyOQAAQQBpAGwAZQByAG8AbgAtAFIAZQBnAHUAbABhAHIAAEFpbGVyb24tUmVndWxhcgAAUwBvA +HIAYQAgAFMAYQBnAGEAbgBvAABTb3JhIFNhZ2FubwAAUwBvAHIAYQAgAFMAYQBnAGEAbgBvAABTb3 +JhIFNhZ2FubwAAaAB0AHQAcAA6AC8ALwB3AHcAdwAuAGQAbwB0AGMAbwBsAG8AbgAuAG4AZQB0AAB +odHRwOi8vd3d3LmRvdGNvbG9uLm5ldAAAaAB0AHQAcAA6AC8ALwB3AHcAdwAuAGQAbwB0AGMAbwBs +AG8AbgAuAG4AZQB0AABodHRwOi8vd3d3LmRvdGNvbG9uLm5ldAAAAAACAAAAAAAA/4MAMgAAAAAAA +AAAAAAAAAAAAAAAAAAAAHQAAAABAAIAAwAEAAUABgAHAAgACQAKAAsADAANAA4ADwAQABEAEgATAB +QAFQAWABcAGAAZABoAGwAcAB0AHgAfACAAIQAiACMAJAAlACYAJwAoACkAKgArACwALQAuAC8AMAA +xADIAMwA0ADUANgA3ADgAOQA6ADsAPAA9AD4APwBAAEEAQgBDAEQARQBGAEcASABJAEoASwBMAE0A +TgBPAFAAUQBSAFMAVABVAFYAVwBYAFkAWgBbAFwAXQBeAF8AYABhAIsAqQCDAJMAjQDDAKoAtgC3A +LQAtQCrAL4AvwC8AIwAwADBAAAAAAAB//8AAgABAAAADAAAABwAAAACAAIAAwBxAAEAcgBzAAIABA +AAAAIAAAABAAAACgBMAGYAAkRGTFQADmxhdG4AGgAEAAAAAP//AAEAAAAWAANDQVQgAB5NT0wgABZ +ST00gABYAAP//AAEAAAAA//8AAgAAAAEAAmxpZ2EADmxvY2wAFAAAAAEAAQAAAAEAAAACAAYAEAAG +AAAAAgASADQABAAAAAEATAADAAAAAgAQABYAAQAcAAAAAQABAE8AAQABAGcAAQABAE8AAwAAAAIAE +AAWAAEAHAAAAAEAAQAvAAEAAQBnAAEAAQAvAAEAGgABAAgAAgAGAAwAcwACAE8AcgACAEwAAQABAE +kAAAABAAAACgBGAGAAAkRGTFQADmxhdG4AHAAEAAAAAP//AAIAAAABABYAA0NBVCAAFk1PTCAAFlJ +PTSAAFgAA//8AAgAAAAEAAmNwc3AADmtlcm4AFAAAAAEAAAAAAAEAAQACAAYADgABAAAAAQASAAIA +AAACAB4ANgABAAoABQAFAAoAAgABACQAPQAAAAEAEgAEAAAAAQAMAAEAOP/nAAEAAQAkAAIGigAEA +AAFJAXKABoAGQAA//gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAD/sv+4/+z/7v/MAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAD/xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/9T/6AAAAAD/8QAA +ABD/vQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/7gAAAAAAAAAAAAAAAAAA//MAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABIAAAAAAAAAAP/5AAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/gAAD/4AAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA//L/9AAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAA/+gAAAAAAAkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/zAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/mAAAAAAAAAAAAAAAAAAD +/4gAA//AAAAAA//YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/+AAAAAAAAP/OAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/zv/qAAAAAP/0AAAACAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/ZAAD/egAA/1kAAAAA/5D/rgAAAAAAAAAAAA +AAAAAAAAAAAAAAAAD/9AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAD/8AAA/7b/8P+wAAD/8P/E/98AAAAA/8P/+P/0//oAAAAAAAAAAAAA//gA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/+AAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/w//C/9MAAP/SAAD/9wAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAD/yAAA/+kAAAAA//QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/9wAAAAD//QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAP/2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAP/cAAAAAAAAAAAAAAAA/7YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAP/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/6AAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAkAFAAEAAAAAQACwAAABcA +BgAAAAAAAAAIAA4AAAAAAAsAEgAAAAAAAAATABkAAwANAAAAAQAJAAAAAAAAAAAAAAAAAAAAGAAAA +AAABwAAAAAAAAAAAAAAFQAFAAAAAAAYABgAAAAUAAAACgAAAAwAAgAPABEAFgAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAEAEQBdAAYAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAcAAAAAAAAABwAAAAAACAAAAAAAAAAAAAcAAAAHAAAAEwAJ +ABUADgAPAAAACwAQAAAAAAAAAAAAAAAAAAUAGAACAAIAAgAAAAIAGAAXAAAAGAAAABYAFgACABYAA +gAWAAAAEQADAAoAFAAMAA0ABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASAAAAEgAGAAEAHgAkAC +YAJwApACoALQAuAC8AMgAzADcAOAA5ADoAPAA9AEUASABOAE8AUgBTAFUAVwBZAFoAWwBcAF0AcwA +AAAAAAQAAAADa3tfFAAAAANAan9kAAAAA4QodoQ== +""" + ) + ), + 10 if size is None else size, + layout_engine=Layout.BASIC, + ) + return load_default_imagefont() diff --git a/py311/lib/python3.11/site-packages/PIL/ImageGrab.py b/py311/lib/python3.11/site-packages/PIL/ImageGrab.py new file mode 100644 index 0000000000000000000000000000000000000000..1eb4507344cd412bed5c113d4c52e39f26cecab2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageGrab.py @@ -0,0 +1,196 @@ +# +# The Python Imaging Library +# $Id$ +# +# screen grabber +# +# History: +# 2001-04-26 fl created +# 2001-09-17 fl use builtin driver, if present +# 2002-11-19 fl added grabclipboard support +# +# Copyright (c) 2001-2002 by Secret Labs AB +# Copyright (c) 2001-2002 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import io +import os +import shutil +import subprocess +import sys +import tempfile + +from . import Image + +TYPE_CHECKING = False +if TYPE_CHECKING: + from . import ImageWin + + +def grab( + bbox: tuple[int, int, int, int] | None = None, + include_layered_windows: bool = False, + all_screens: bool = False, + xdisplay: str | None = None, + window: int | ImageWin.HWND | None = None, +) -> Image.Image: + im: Image.Image + if xdisplay is None: + if sys.platform == "darwin": + fh, filepath = tempfile.mkstemp(".png") + os.close(fh) + args = ["screencapture"] + if bbox: + left, top, right, bottom = bbox + args += ["-R", f"{left},{top},{right-left},{bottom-top}"] + subprocess.call(args + ["-x", filepath]) + im = Image.open(filepath) + im.load() + os.unlink(filepath) + if bbox: + im_resized = im.resize((right - left, bottom - top)) + im.close() + return im_resized + return im + elif sys.platform == "win32": + if window is not None: + all_screens = -1 + offset, size, data = Image.core.grabscreen_win32( + include_layered_windows, + all_screens, + int(window) if window is not None else 0, + ) + im = Image.frombytes( + "RGB", + size, + data, + # RGB, 32-bit line padding, origin lower left corner + "raw", + "BGR", + (size[0] * 3 + 3) & -4, + -1, + ) + if bbox: + x0, y0 = offset + left, top, right, bottom = bbox + im = im.crop((left - x0, top - y0, right - x0, bottom - y0)) + return im + # Cast to Optional[str] needed for Windows and macOS. + display_name: str | None = xdisplay + try: + if not Image.core.HAVE_XCB: + msg = "Pillow was built without XCB support" + raise OSError(msg) + size, data = Image.core.grabscreen_x11(display_name) + except OSError: + if display_name is None and sys.platform not in ("darwin", "win32"): + if shutil.which("gnome-screenshot"): + args = ["gnome-screenshot", "-f"] + elif shutil.which("grim"): + args = ["grim"] + elif shutil.which("spectacle"): + args = ["spectacle", "-n", "-b", "-f", "-o"] + else: + raise + fh, filepath = tempfile.mkstemp(".png") + os.close(fh) + subprocess.call(args + [filepath]) + im = Image.open(filepath) + im.load() + os.unlink(filepath) + if bbox: + im_cropped = im.crop(bbox) + im.close() + return im_cropped + return im + else: + raise + else: + im = Image.frombytes("RGB", size, data, "raw", "BGRX", size[0] * 4, 1) + if bbox: + im = im.crop(bbox) + return im + + +def grabclipboard() -> Image.Image | list[str] | None: + if sys.platform == "darwin": + p = subprocess.run( + ["osascript", "-e", "get the clipboard as «class PNGf»"], + capture_output=True, + ) + if p.returncode != 0: + return None + + import binascii + + data = io.BytesIO(binascii.unhexlify(p.stdout[11:-3])) + return Image.open(data) + elif sys.platform == "win32": + fmt, data = Image.core.grabclipboard_win32() + if fmt == "file": # CF_HDROP + import struct + + o = struct.unpack_from("I", data)[0] + if data[16] == 0: + files = data[o:].decode("mbcs").split("\0") + else: + files = data[o:].decode("utf-16le").split("\0") + return files[: files.index("")] + if isinstance(data, bytes): + data = io.BytesIO(data) + if fmt == "png": + from . import PngImagePlugin + + return PngImagePlugin.PngImageFile(data) + elif fmt == "DIB": + from . import BmpImagePlugin + + return BmpImagePlugin.DibImageFile(data) + return None + else: + if os.getenv("WAYLAND_DISPLAY"): + session_type = "wayland" + elif os.getenv("DISPLAY"): + session_type = "x11" + else: # Session type check failed + session_type = None + + if shutil.which("wl-paste") and session_type in ("wayland", None): + args = ["wl-paste", "-t", "image"] + elif shutil.which("xclip") and session_type in ("x11", None): + args = ["xclip", "-selection", "clipboard", "-t", "image/png", "-o"] + else: + msg = "wl-paste or xclip is required for ImageGrab.grabclipboard() on Linux" + raise NotImplementedError(msg) + + p = subprocess.run(args, capture_output=True) + if p.returncode != 0: + err = p.stderr + for silent_error in [ + # wl-paste, when the clipboard is empty + b"Nothing is copied", + # Ubuntu/Debian wl-paste, when the clipboard is empty + b"No selection", + # Ubuntu/Debian wl-paste, when an image isn't available + b"No suitable type of content copied", + # wl-paste or Ubuntu/Debian xclip, when an image isn't available + b" not available", + # xclip, when an image isn't available + b"cannot convert ", + # xclip, when the clipboard isn't initialized + b"xclip: Error: There is no owner for the ", + ]: + if silent_error in err: + return None + msg = f"{args[0]} error" + if err: + msg += f": {err.strip().decode()}" + raise ChildProcessError(msg) + + data = io.BytesIO(p.stdout) + im = Image.open(data) + im.load() + return im diff --git a/py311/lib/python3.11/site-packages/PIL/ImageMath.py b/py311/lib/python3.11/site-packages/PIL/ImageMath.py new file mode 100644 index 0000000000000000000000000000000000000000..c33809ced890e437d10102a6c065d9efe3207685 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageMath.py @@ -0,0 +1,368 @@ +# +# The Python Imaging Library +# $Id$ +# +# a simple math add-on for the Python Imaging Library +# +# History: +# 1999-02-15 fl Original PIL Plus release +# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6 +# 2005-09-12 fl Fixed int() and float() for Python 2.4.1 +# +# Copyright (c) 1999-2005 by Secret Labs AB +# Copyright (c) 2005 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import builtins +from types import CodeType +from typing import Any, Callable + +from . import Image, _imagingmath +from ._deprecate import deprecate + + +class _Operand: + """Wraps an image operand, providing standard operators""" + + def __init__(self, im: Image.Image): + self.im = im + + def __fixup(self, im1: _Operand | float) -> Image.Image: + # convert image to suitable mode + if isinstance(im1, _Operand): + # argument was an image. + if im1.im.mode in ("1", "L"): + return im1.im.convert("I") + elif im1.im.mode in ("I", "F"): + return im1.im + else: + msg = f"unsupported mode: {im1.im.mode}" + raise ValueError(msg) + else: + # argument was a constant + if isinstance(im1, (int, float)) and self.im.mode in ("1", "L", "I"): + return Image.new("I", self.im.size, im1) + else: + return Image.new("F", self.im.size, im1) + + def apply( + self, + op: str, + im1: _Operand | float, + im2: _Operand | float | None = None, + mode: str | None = None, + ) -> _Operand: + im_1 = self.__fixup(im1) + if im2 is None: + # unary operation + out = Image.new(mode or im_1.mode, im_1.size, None) + try: + op = getattr(_imagingmath, f"{op}_{im_1.mode}") + except AttributeError as e: + msg = f"bad operand type for '{op}'" + raise TypeError(msg) from e + _imagingmath.unop(op, out.getim(), im_1.getim()) + else: + # binary operation + im_2 = self.__fixup(im2) + if im_1.mode != im_2.mode: + # convert both arguments to floating point + if im_1.mode != "F": + im_1 = im_1.convert("F") + if im_2.mode != "F": + im_2 = im_2.convert("F") + if im_1.size != im_2.size: + # crop both arguments to a common size + size = ( + min(im_1.size[0], im_2.size[0]), + min(im_1.size[1], im_2.size[1]), + ) + if im_1.size != size: + im_1 = im_1.crop((0, 0) + size) + if im_2.size != size: + im_2 = im_2.crop((0, 0) + size) + out = Image.new(mode or im_1.mode, im_1.size, None) + try: + op = getattr(_imagingmath, f"{op}_{im_1.mode}") + except AttributeError as e: + msg = f"bad operand type for '{op}'" + raise TypeError(msg) from e + _imagingmath.binop(op, out.getim(), im_1.getim(), im_2.getim()) + return _Operand(out) + + # unary operators + def __bool__(self) -> bool: + # an image is "true" if it contains at least one non-zero pixel + return self.im.getbbox() is not None + + def __abs__(self) -> _Operand: + return self.apply("abs", self) + + def __pos__(self) -> _Operand: + return self + + def __neg__(self) -> _Operand: + return self.apply("neg", self) + + # binary operators + def __add__(self, other: _Operand | float) -> _Operand: + return self.apply("add", self, other) + + def __radd__(self, other: _Operand | float) -> _Operand: + return self.apply("add", other, self) + + def __sub__(self, other: _Operand | float) -> _Operand: + return self.apply("sub", self, other) + + def __rsub__(self, other: _Operand | float) -> _Operand: + return self.apply("sub", other, self) + + def __mul__(self, other: _Operand | float) -> _Operand: + return self.apply("mul", self, other) + + def __rmul__(self, other: _Operand | float) -> _Operand: + return self.apply("mul", other, self) + + def __truediv__(self, other: _Operand | float) -> _Operand: + return self.apply("div", self, other) + + def __rtruediv__(self, other: _Operand | float) -> _Operand: + return self.apply("div", other, self) + + def __mod__(self, other: _Operand | float) -> _Operand: + return self.apply("mod", self, other) + + def __rmod__(self, other: _Operand | float) -> _Operand: + return self.apply("mod", other, self) + + def __pow__(self, other: _Operand | float) -> _Operand: + return self.apply("pow", self, other) + + def __rpow__(self, other: _Operand | float) -> _Operand: + return self.apply("pow", other, self) + + # bitwise + def __invert__(self) -> _Operand: + return self.apply("invert", self) + + def __and__(self, other: _Operand | float) -> _Operand: + return self.apply("and", self, other) + + def __rand__(self, other: _Operand | float) -> _Operand: + return self.apply("and", other, self) + + def __or__(self, other: _Operand | float) -> _Operand: + return self.apply("or", self, other) + + def __ror__(self, other: _Operand | float) -> _Operand: + return self.apply("or", other, self) + + def __xor__(self, other: _Operand | float) -> _Operand: + return self.apply("xor", self, other) + + def __rxor__(self, other: _Operand | float) -> _Operand: + return self.apply("xor", other, self) + + def __lshift__(self, other: _Operand | float) -> _Operand: + return self.apply("lshift", self, other) + + def __rshift__(self, other: _Operand | float) -> _Operand: + return self.apply("rshift", self, other) + + # logical + def __eq__(self, other: _Operand | float) -> _Operand: # type: ignore[override] + return self.apply("eq", self, other) + + def __ne__(self, other: _Operand | float) -> _Operand: # type: ignore[override] + return self.apply("ne", self, other) + + def __lt__(self, other: _Operand | float) -> _Operand: + return self.apply("lt", self, other) + + def __le__(self, other: _Operand | float) -> _Operand: + return self.apply("le", self, other) + + def __gt__(self, other: _Operand | float) -> _Operand: + return self.apply("gt", self, other) + + def __ge__(self, other: _Operand | float) -> _Operand: + return self.apply("ge", self, other) + + +# conversions +def imagemath_int(self: _Operand) -> _Operand: + return _Operand(self.im.convert("I")) + + +def imagemath_float(self: _Operand) -> _Operand: + return _Operand(self.im.convert("F")) + + +# logical +def imagemath_equal(self: _Operand, other: _Operand | float | None) -> _Operand: + return self.apply("eq", self, other, mode="I") + + +def imagemath_notequal(self: _Operand, other: _Operand | float | None) -> _Operand: + return self.apply("ne", self, other, mode="I") + + +def imagemath_min(self: _Operand, other: _Operand | float | None) -> _Operand: + return self.apply("min", self, other) + + +def imagemath_max(self: _Operand, other: _Operand | float | None) -> _Operand: + return self.apply("max", self, other) + + +def imagemath_convert(self: _Operand, mode: str) -> _Operand: + return _Operand(self.im.convert(mode)) + + +ops = { + "int": imagemath_int, + "float": imagemath_float, + "equal": imagemath_equal, + "notequal": imagemath_notequal, + "min": imagemath_min, + "max": imagemath_max, + "convert": imagemath_convert, +} + + +def lambda_eval( + expression: Callable[[dict[str, Any]], Any], + options: dict[str, Any] = {}, + **kw: Any, +) -> Any: + """ + Returns the result of an image function. + + :py:mod:`~PIL.ImageMath` only supports single-layer images. To process multi-band + images, use the :py:meth:`~PIL.Image.Image.split` method or + :py:func:`~PIL.Image.merge` function. + + :param expression: A function that receives a dictionary. + :param options: Values to add to the function's dictionary. Deprecated. + You can instead use one or more keyword arguments. + :param **kw: Values to add to the function's dictionary. + :return: The expression result. This is usually an image object, but can + also be an integer, a floating point value, or a pixel tuple, + depending on the expression. + """ + + if options: + deprecate( + "ImageMath.lambda_eval options", + 12, + "ImageMath.lambda_eval keyword arguments", + ) + + args: dict[str, Any] = ops.copy() + args.update(options) + args.update(kw) + for k, v in args.items(): + if isinstance(v, Image.Image): + args[k] = _Operand(v) + + out = expression(args) + try: + return out.im + except AttributeError: + return out + + +def unsafe_eval( + expression: str, + options: dict[str, Any] = {}, + **kw: Any, +) -> Any: + """ + Evaluates an image expression. This uses Python's ``eval()`` function to process + the expression string, and carries the security risks of doing so. It is not + recommended to process expressions without considering this. + :py:meth:`~lambda_eval` is a more secure alternative. + + :py:mod:`~PIL.ImageMath` only supports single-layer images. To process multi-band + images, use the :py:meth:`~PIL.Image.Image.split` method or + :py:func:`~PIL.Image.merge` function. + + :param expression: A string containing a Python-style expression. + :param options: Values to add to the evaluation context. Deprecated. + You can instead use one or more keyword arguments. + :param **kw: Values to add to the evaluation context. + :return: The evaluated expression. This is usually an image object, but can + also be an integer, a floating point value, or a pixel tuple, + depending on the expression. + """ + + if options: + deprecate( + "ImageMath.unsafe_eval options", + 12, + "ImageMath.unsafe_eval keyword arguments", + ) + + # build execution namespace + args: dict[str, Any] = ops.copy() + for k in [*options, *kw]: + if "__" in k or hasattr(builtins, k): + msg = f"'{k}' not allowed" + raise ValueError(msg) + + args.update(options) + args.update(kw) + for k, v in args.items(): + if isinstance(v, Image.Image): + args[k] = _Operand(v) + + compiled_code = compile(expression, "", "eval") + + def scan(code: CodeType) -> None: + for const in code.co_consts: + if type(const) is type(compiled_code): + scan(const) + + for name in code.co_names: + if name not in args and name != "abs": + msg = f"'{name}' not allowed" + raise ValueError(msg) + + scan(compiled_code) + out = builtins.eval(expression, {"__builtins": {"abs": abs}}, args) + try: + return out.im + except AttributeError: + return out + + +def eval( + expression: str, + _dict: dict[str, Any] = {}, + **kw: Any, +) -> Any: + """ + Evaluates an image expression. + + Deprecated. Use lambda_eval() or unsafe_eval() instead. + + :param expression: A string containing a Python-style expression. + :param _dict: Values to add to the evaluation context. You + can either use a dictionary, or one or more keyword + arguments. + :return: The evaluated expression. This is usually an image object, but can + also be an integer, a floating point value, or a pixel tuple, + depending on the expression. + + .. deprecated:: 10.3.0 + """ + + deprecate( + "ImageMath.eval", + 12, + "ImageMath.lambda_eval or ImageMath.unsafe_eval", + ) + return unsafe_eval(expression, _dict, **kw) diff --git a/py311/lib/python3.11/site-packages/PIL/ImageMode.py b/py311/lib/python3.11/site-packages/PIL/ImageMode.py new file mode 100644 index 0000000000000000000000000000000000000000..92a08d2cbcb4f8f2f7e24a265b83cd6c1012b41f --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageMode.py @@ -0,0 +1,92 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard mode descriptors +# +# History: +# 2006-03-20 fl Added +# +# Copyright (c) 2006 by Secret Labs AB. +# Copyright (c) 2006 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import sys +from functools import lru_cache +from typing import NamedTuple + +from ._deprecate import deprecate + + +class ModeDescriptor(NamedTuple): + """Wrapper for mode strings.""" + + mode: str + bands: tuple[str, ...] + basemode: str + basetype: str + typestr: str + + def __str__(self) -> str: + return self.mode + + +@lru_cache +def getmode(mode: str) -> ModeDescriptor: + """Gets a mode descriptor for the given mode.""" + endian = "<" if sys.byteorder == "little" else ">" + + modes = { + # core modes + # Bits need to be extended to bytes + "1": ("L", "L", ("1",), "|b1"), + "L": ("L", "L", ("L",), "|u1"), + "I": ("L", "I", ("I",), f"{endian}i4"), + "F": ("L", "F", ("F",), f"{endian}f4"), + "P": ("P", "L", ("P",), "|u1"), + "RGB": ("RGB", "L", ("R", "G", "B"), "|u1"), + "RGBX": ("RGB", "L", ("R", "G", "B", "X"), "|u1"), + "RGBA": ("RGB", "L", ("R", "G", "B", "A"), "|u1"), + "CMYK": ("RGB", "L", ("C", "M", "Y", "K"), "|u1"), + "YCbCr": ("RGB", "L", ("Y", "Cb", "Cr"), "|u1"), + # UNDONE - unsigned |u1i1i1 + "LAB": ("RGB", "L", ("L", "A", "B"), "|u1"), + "HSV": ("RGB", "L", ("H", "S", "V"), "|u1"), + # extra experimental modes + "RGBa": ("RGB", "L", ("R", "G", "B", "a"), "|u1"), + "BGR;15": ("RGB", "L", ("B", "G", "R"), "|u1"), + "BGR;16": ("RGB", "L", ("B", "G", "R"), "|u1"), + "BGR;24": ("RGB", "L", ("B", "G", "R"), "|u1"), + "LA": ("L", "L", ("L", "A"), "|u1"), + "La": ("L", "L", ("L", "a"), "|u1"), + "PA": ("RGB", "L", ("P", "A"), "|u1"), + } + if mode in modes: + if mode in ("BGR;15", "BGR;16", "BGR;24"): + deprecate(mode, 12) + base_mode, base_type, bands, type_str = modes[mode] + return ModeDescriptor(mode, bands, base_mode, base_type, type_str) + + mapping_modes = { + # I;16 == I;16L, and I;32 == I;32L + "I;16": "u2", + "I;16BS": ">i2", + "I;16N": f"{endian}u2", + "I;16NS": f"{endian}i2", + "I;32": "u4", + "I;32L": "i4", + "I;32LS": " +from __future__ import annotations + +import re + +from . import Image, _imagingmorph + +LUT_SIZE = 1 << 9 + +# fmt: off +ROTATION_MATRIX = [ + 6, 3, 0, + 7, 4, 1, + 8, 5, 2, +] +MIRROR_MATRIX = [ + 2, 1, 0, + 5, 4, 3, + 8, 7, 6, +] +# fmt: on + + +class LutBuilder: + """A class for building a MorphLut from a descriptive language + + The input patterns is a list of a strings sequences like these:: + + 4:(... + .1. + 111)->1 + + (whitespaces including linebreaks are ignored). The option 4 + describes a series of symmetry operations (in this case a + 4-rotation), the pattern is described by: + + - . or X - Ignore + - 1 - Pixel is on + - 0 - Pixel is off + + The result of the operation is described after "->" string. + + The default is to return the current pixel value, which is + returned if no other match is found. + + Operations: + + - 4 - 4 way rotation + - N - Negate + - 1 - Dummy op for no other operation (an op must always be given) + - M - Mirroring + + Example:: + + lb = LutBuilder(patterns = ["4:(... .1. 111)->1"]) + lut = lb.build_lut() + + """ + + def __init__( + self, patterns: list[str] | None = None, op_name: str | None = None + ) -> None: + if patterns is not None: + self.patterns = patterns + else: + self.patterns = [] + self.lut: bytearray | None = None + if op_name is not None: + known_patterns = { + "corner": ["1:(... ... ...)->0", "4:(00. 01. ...)->1"], + "dilation4": ["4:(... .0. .1.)->1"], + "dilation8": ["4:(... .0. .1.)->1", "4:(... .0. ..1)->1"], + "erosion4": ["4:(... .1. .0.)->0"], + "erosion8": ["4:(... .1. .0.)->0", "4:(... .1. ..0)->0"], + "edge": [ + "1:(... ... ...)->0", + "4:(.0. .1. ...)->1", + "4:(01. .1. ...)->1", + ], + } + if op_name not in known_patterns: + msg = f"Unknown pattern {op_name}!" + raise Exception(msg) + + self.patterns = known_patterns[op_name] + + def add_patterns(self, patterns: list[str]) -> None: + self.patterns += patterns + + def build_default_lut(self) -> None: + symbols = [0, 1] + m = 1 << 4 # pos of current pixel + self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE)) + + def get_lut(self) -> bytearray | None: + return self.lut + + def _string_permute(self, pattern: str, permutation: list[int]) -> str: + """string_permute takes a pattern and a permutation and returns the + string permuted according to the permutation list. + """ + assert len(permutation) == 9 + return "".join(pattern[p] for p in permutation) + + def _pattern_permute( + self, basic_pattern: str, options: str, basic_result: int + ) -> list[tuple[str, int]]: + """pattern_permute takes a basic pattern and its result and clones + the pattern according to the modifications described in the $options + parameter. It returns a list of all cloned patterns.""" + patterns = [(basic_pattern, basic_result)] + + # rotations + if "4" in options: + res = patterns[-1][1] + for i in range(4): + patterns.append( + (self._string_permute(patterns[-1][0], ROTATION_MATRIX), res) + ) + # mirror + if "M" in options: + n = len(patterns) + for pattern, res in patterns[:n]: + patterns.append((self._string_permute(pattern, MIRROR_MATRIX), res)) + + # negate + if "N" in options: + n = len(patterns) + for pattern, res in patterns[:n]: + # Swap 0 and 1 + pattern = pattern.replace("0", "Z").replace("1", "0").replace("Z", "1") + res = 1 - int(res) + patterns.append((pattern, res)) + + return patterns + + def build_lut(self) -> bytearray: + """Compile all patterns into a morphology lut. + + TBD :Build based on (file) morphlut:modify_lut + """ + self.build_default_lut() + assert self.lut is not None + patterns = [] + + # Parse and create symmetries of the patterns strings + for p in self.patterns: + m = re.search(r"(\w*):?\s*\((.+?)\)\s*->\s*(\d)", p.replace("\n", "")) + if not m: + msg = 'Syntax error in pattern "' + p + '"' + raise Exception(msg) + options = m.group(1) + pattern = m.group(2) + result = int(m.group(3)) + + # Get rid of spaces + pattern = pattern.replace(" ", "").replace("\n", "") + + patterns += self._pattern_permute(pattern, options, result) + + # compile the patterns into regular expressions for speed + compiled_patterns = [] + for pattern in patterns: + p = pattern[0].replace(".", "X").replace("X", "[01]") + compiled_patterns.append((re.compile(p), pattern[1])) + + # Step through table and find patterns that match. + # Note that all the patterns are searched. The last one + # caught overrides + for i in range(LUT_SIZE): + # Build the bit pattern + bitpattern = bin(i)[2:] + bitpattern = ("0" * (9 - len(bitpattern)) + bitpattern)[::-1] + + for pattern, r in compiled_patterns: + if pattern.match(bitpattern): + self.lut[i] = [0, 1][r] + + return self.lut + + +class MorphOp: + """A class for binary morphological operators""" + + def __init__( + self, + lut: bytearray | None = None, + op_name: str | None = None, + patterns: list[str] | None = None, + ) -> None: + """Create a binary morphological operator""" + self.lut = lut + if op_name is not None: + self.lut = LutBuilder(op_name=op_name).build_lut() + elif patterns is not None: + self.lut = LutBuilder(patterns=patterns).build_lut() + + def apply(self, image: Image.Image) -> tuple[int, Image.Image]: + """Run a single morphological operation on an image + + Returns a tuple of the number of changed pixels and the + morphed image""" + if self.lut is None: + msg = "No operator loaded" + raise Exception(msg) + + if image.mode != "L": + msg = "Image mode must be L" + raise ValueError(msg) + outimage = Image.new(image.mode, image.size, None) + count = _imagingmorph.apply(bytes(self.lut), image.getim(), outimage.getim()) + return count, outimage + + def match(self, image: Image.Image) -> list[tuple[int, int]]: + """Get a list of coordinates matching the morphological operation on + an image. + + Returns a list of tuples of (x,y) coordinates + of all matching pixels. See :ref:`coordinate-system`.""" + if self.lut is None: + msg = "No operator loaded" + raise Exception(msg) + + if image.mode != "L": + msg = "Image mode must be L" + raise ValueError(msg) + return _imagingmorph.match(bytes(self.lut), image.getim()) + + def get_on_pixels(self, image: Image.Image) -> list[tuple[int, int]]: + """Get a list of all turned on pixels in a binary image + + Returns a list of tuples of (x,y) coordinates + of all matching pixels. See :ref:`coordinate-system`.""" + + if image.mode != "L": + msg = "Image mode must be L" + raise ValueError(msg) + return _imagingmorph.get_on_pixels(image.getim()) + + def load_lut(self, filename: str) -> None: + """Load an operator from an mrl file""" + with open(filename, "rb") as f: + self.lut = bytearray(f.read()) + + if len(self.lut) != LUT_SIZE: + self.lut = None + msg = "Wrong size operator file!" + raise Exception(msg) + + def save_lut(self, filename: str) -> None: + """Save an operator to an mrl file""" + if self.lut is None: + msg = "No operator loaded" + raise Exception(msg) + with open(filename, "wb") as f: + f.write(self.lut) + + def set_lut(self, lut: bytearray | None) -> None: + """Set the lut from an external source""" + self.lut = lut diff --git a/py311/lib/python3.11/site-packages/PIL/ImageOps.py b/py311/lib/python3.11/site-packages/PIL/ImageOps.py new file mode 100644 index 0000000000000000000000000000000000000000..da28854b5745459cc682beabb53480855092e7a4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageOps.py @@ -0,0 +1,745 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard image operations +# +# History: +# 2001-10-20 fl Created +# 2001-10-23 fl Added autocontrast operator +# 2001-12-18 fl Added Kevin's fit operator +# 2004-03-14 fl Fixed potential division by zero in equalize +# 2005-05-05 fl Fixed equalize for low number of values +# +# Copyright (c) 2001-2004 by Secret Labs AB +# Copyright (c) 2001-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import functools +import operator +import re +from collections.abc import Sequence +from typing import Literal, Protocol, cast, overload + +from . import ExifTags, Image, ImagePalette + +# +# helpers + + +def _border(border: int | tuple[int, ...]) -> tuple[int, int, int, int]: + if isinstance(border, tuple): + if len(border) == 2: + left, top = right, bottom = border + elif len(border) == 4: + left, top, right, bottom = border + else: + left = top = right = bottom = border + return left, top, right, bottom + + +def _color(color: str | int | tuple[int, ...], mode: str) -> int | tuple[int, ...]: + if isinstance(color, str): + from . import ImageColor + + color = ImageColor.getcolor(color, mode) + return color + + +def _lut(image: Image.Image, lut: list[int]) -> Image.Image: + if image.mode == "P": + # FIXME: apply to lookup table, not image data + msg = "mode P support coming soon" + raise NotImplementedError(msg) + elif image.mode in ("L", "RGB"): + if image.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return image.point(lut) + else: + msg = f"not supported for mode {image.mode}" + raise OSError(msg) + + +# +# actions + + +def autocontrast( + image: Image.Image, + cutoff: float | tuple[float, float] = 0, + ignore: int | Sequence[int] | None = None, + mask: Image.Image | None = None, + preserve_tone: bool = False, +) -> Image.Image: + """ + Maximize (normalize) image contrast. This function calculates a + histogram of the input image (or mask region), removes ``cutoff`` percent of the + lightest and darkest pixels from the histogram, and remaps the image + so that the darkest pixel becomes black (0), and the lightest + becomes white (255). + + :param image: The image to process. + :param cutoff: The percent to cut off from the histogram on the low and + high ends. Either a tuple of (low, high), or a single + number for both. + :param ignore: The background pixel value (use None for no background). + :param mask: Histogram used in contrast operation is computed using pixels + within the mask. If no mask is given the entire image is used + for histogram computation. + :param preserve_tone: Preserve image tone in Photoshop-like style autocontrast. + + .. versionadded:: 8.2.0 + + :return: An image. + """ + if preserve_tone: + histogram = image.convert("L").histogram(mask) + else: + histogram = image.histogram(mask) + + lut = [] + for layer in range(0, len(histogram), 256): + h = histogram[layer : layer + 256] + if ignore is not None: + # get rid of outliers + if isinstance(ignore, int): + h[ignore] = 0 + else: + for ix in ignore: + h[ix] = 0 + if cutoff: + # cut off pixels from both ends of the histogram + if not isinstance(cutoff, tuple): + cutoff = (cutoff, cutoff) + # get number of pixels + n = 0 + for ix in range(256): + n = n + h[ix] + # remove cutoff% pixels from the low end + cut = int(n * cutoff[0] // 100) + for lo in range(256): + if cut > h[lo]: + cut = cut - h[lo] + h[lo] = 0 + else: + h[lo] -= cut + cut = 0 + if cut <= 0: + break + # remove cutoff% samples from the high end + cut = int(n * cutoff[1] // 100) + for hi in range(255, -1, -1): + if cut > h[hi]: + cut = cut - h[hi] + h[hi] = 0 + else: + h[hi] -= cut + cut = 0 + if cut <= 0: + break + # find lowest/highest samples after preprocessing + for lo in range(256): + if h[lo]: + break + for hi in range(255, -1, -1): + if h[hi]: + break + if hi <= lo: + # don't bother + lut.extend(list(range(256))) + else: + scale = 255.0 / (hi - lo) + offset = -lo * scale + for ix in range(256): + ix = int(ix * scale + offset) + if ix < 0: + ix = 0 + elif ix > 255: + ix = 255 + lut.append(ix) + return _lut(image, lut) + + +def colorize( + image: Image.Image, + black: str | tuple[int, ...], + white: str | tuple[int, ...], + mid: str | int | tuple[int, ...] | None = None, + blackpoint: int = 0, + whitepoint: int = 255, + midpoint: int = 127, +) -> Image.Image: + """ + Colorize grayscale image. + This function calculates a color wedge which maps all black pixels in + the source image to the first color and all white pixels to the + second color. If ``mid`` is specified, it uses three-color mapping. + The ``black`` and ``white`` arguments should be RGB tuples or color names; + optionally you can use three-color mapping by also specifying ``mid``. + Mapping positions for any of the colors can be specified + (e.g. ``blackpoint``), where these parameters are the integer + value corresponding to where the corresponding color should be mapped. + These parameters must have logical order, such that + ``blackpoint <= midpoint <= whitepoint`` (if ``mid`` is specified). + + :param image: The image to colorize. + :param black: The color to use for black input pixels. + :param white: The color to use for white input pixels. + :param mid: The color to use for midtone input pixels. + :param blackpoint: an int value [0, 255] for the black mapping. + :param whitepoint: an int value [0, 255] for the white mapping. + :param midpoint: an int value [0, 255] for the midtone mapping. + :return: An image. + """ + + # Initial asserts + assert image.mode == "L" + if mid is None: + assert 0 <= blackpoint <= whitepoint <= 255 + else: + assert 0 <= blackpoint <= midpoint <= whitepoint <= 255 + + # Define colors from arguments + rgb_black = cast(Sequence[int], _color(black, "RGB")) + rgb_white = cast(Sequence[int], _color(white, "RGB")) + rgb_mid = cast(Sequence[int], _color(mid, "RGB")) if mid is not None else None + + # Empty lists for the mapping + red = [] + green = [] + blue = [] + + # Create the low-end values + for i in range(blackpoint): + red.append(rgb_black[0]) + green.append(rgb_black[1]) + blue.append(rgb_black[2]) + + # Create the mapping (2-color) + if rgb_mid is None: + range_map = range(whitepoint - blackpoint) + + for i in range_map: + red.append( + rgb_black[0] + i * (rgb_white[0] - rgb_black[0]) // len(range_map) + ) + green.append( + rgb_black[1] + i * (rgb_white[1] - rgb_black[1]) // len(range_map) + ) + blue.append( + rgb_black[2] + i * (rgb_white[2] - rgb_black[2]) // len(range_map) + ) + + # Create the mapping (3-color) + else: + range_map1 = range(midpoint - blackpoint) + range_map2 = range(whitepoint - midpoint) + + for i in range_map1: + red.append( + rgb_black[0] + i * (rgb_mid[0] - rgb_black[0]) // len(range_map1) + ) + green.append( + rgb_black[1] + i * (rgb_mid[1] - rgb_black[1]) // len(range_map1) + ) + blue.append( + rgb_black[2] + i * (rgb_mid[2] - rgb_black[2]) // len(range_map1) + ) + for i in range_map2: + red.append(rgb_mid[0] + i * (rgb_white[0] - rgb_mid[0]) // len(range_map2)) + green.append( + rgb_mid[1] + i * (rgb_white[1] - rgb_mid[1]) // len(range_map2) + ) + blue.append(rgb_mid[2] + i * (rgb_white[2] - rgb_mid[2]) // len(range_map2)) + + # Create the high-end values + for i in range(256 - whitepoint): + red.append(rgb_white[0]) + green.append(rgb_white[1]) + blue.append(rgb_white[2]) + + # Return converted image + image = image.convert("RGB") + return _lut(image, red + green + blue) + + +def contain( + image: Image.Image, size: tuple[int, int], method: int = Image.Resampling.BICUBIC +) -> Image.Image: + """ + Returns a resized version of the image, set to the maximum width and height + within the requested size, while maintaining the original aspect ratio. + + :param image: The image to resize. + :param size: The requested output size in pixels, given as a + (width, height) tuple. + :param method: Resampling method to use. Default is + :py:attr:`~PIL.Image.Resampling.BICUBIC`. + See :ref:`concept-filters`. + :return: An image. + """ + + im_ratio = image.width / image.height + dest_ratio = size[0] / size[1] + + if im_ratio != dest_ratio: + if im_ratio > dest_ratio: + new_height = round(image.height / image.width * size[0]) + if new_height != size[1]: + size = (size[0], new_height) + else: + new_width = round(image.width / image.height * size[1]) + if new_width != size[0]: + size = (new_width, size[1]) + return image.resize(size, resample=method) + + +def cover( + image: Image.Image, size: tuple[int, int], method: int = Image.Resampling.BICUBIC +) -> Image.Image: + """ + Returns a resized version of the image, so that the requested size is + covered, while maintaining the original aspect ratio. + + :param image: The image to resize. + :param size: The requested output size in pixels, given as a + (width, height) tuple. + :param method: Resampling method to use. Default is + :py:attr:`~PIL.Image.Resampling.BICUBIC`. + See :ref:`concept-filters`. + :return: An image. + """ + + im_ratio = image.width / image.height + dest_ratio = size[0] / size[1] + + if im_ratio != dest_ratio: + if im_ratio < dest_ratio: + new_height = round(image.height / image.width * size[0]) + if new_height != size[1]: + size = (size[0], new_height) + else: + new_width = round(image.width / image.height * size[1]) + if new_width != size[0]: + size = (new_width, size[1]) + return image.resize(size, resample=method) + + +def pad( + image: Image.Image, + size: tuple[int, int], + method: int = Image.Resampling.BICUBIC, + color: str | int | tuple[int, ...] | None = None, + centering: tuple[float, float] = (0.5, 0.5), +) -> Image.Image: + """ + Returns a resized and padded version of the image, expanded to fill the + requested aspect ratio and size. + + :param image: The image to resize and crop. + :param size: The requested output size in pixels, given as a + (width, height) tuple. + :param method: Resampling method to use. Default is + :py:attr:`~PIL.Image.Resampling.BICUBIC`. + See :ref:`concept-filters`. + :param color: The background color of the padded image. + :param centering: Control the position of the original image within the + padded version. + + (0.5, 0.5) will keep the image centered + (0, 0) will keep the image aligned to the top left + (1, 1) will keep the image aligned to the bottom + right + :return: An image. + """ + + resized = contain(image, size, method) + if resized.size == size: + out = resized + else: + out = Image.new(image.mode, size, color) + if resized.palette: + palette = resized.getpalette() + if palette is not None: + out.putpalette(palette) + if resized.width != size[0]: + x = round((size[0] - resized.width) * max(0, min(centering[0], 1))) + out.paste(resized, (x, 0)) + else: + y = round((size[1] - resized.height) * max(0, min(centering[1], 1))) + out.paste(resized, (0, y)) + return out + + +def crop(image: Image.Image, border: int = 0) -> Image.Image: + """ + Remove border from image. The same amount of pixels are removed + from all four sides. This function works on all image modes. + + .. seealso:: :py:meth:`~PIL.Image.Image.crop` + + :param image: The image to crop. + :param border: The number of pixels to remove. + :return: An image. + """ + left, top, right, bottom = _border(border) + return image.crop((left, top, image.size[0] - right, image.size[1] - bottom)) + + +def scale( + image: Image.Image, factor: float, resample: int = Image.Resampling.BICUBIC +) -> Image.Image: + """ + Returns a rescaled image by a specific factor given in parameter. + A factor greater than 1 expands the image, between 0 and 1 contracts the + image. + + :param image: The image to rescale. + :param factor: The expansion factor, as a float. + :param resample: Resampling method to use. Default is + :py:attr:`~PIL.Image.Resampling.BICUBIC`. + See :ref:`concept-filters`. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + if factor == 1: + return image.copy() + elif factor <= 0: + msg = "the factor must be greater than 0" + raise ValueError(msg) + else: + size = (round(factor * image.width), round(factor * image.height)) + return image.resize(size, resample) + + +class SupportsGetMesh(Protocol): + """ + An object that supports the ``getmesh`` method, taking an image as an + argument, and returning a list of tuples. Each tuple contains two tuples, + the source box as a tuple of 4 integers, and a tuple of 8 integers for the + final quadrilateral, in order of top left, bottom left, bottom right, top + right. + """ + + def getmesh( + self, image: Image.Image + ) -> list[ + tuple[tuple[int, int, int, int], tuple[int, int, int, int, int, int, int, int]] + ]: ... + + +def deform( + image: Image.Image, + deformer: SupportsGetMesh, + resample: int = Image.Resampling.BILINEAR, +) -> Image.Image: + """ + Deform the image. + + :param image: The image to deform. + :param deformer: A deformer object. Any object that implements a + ``getmesh`` method can be used. + :param resample: An optional resampling filter. Same values possible as + in the PIL.Image.transform function. + :return: An image. + """ + return image.transform( + image.size, Image.Transform.MESH, deformer.getmesh(image), resample + ) + + +def equalize(image: Image.Image, mask: Image.Image | None = None) -> Image.Image: + """ + Equalize the image histogram. This function applies a non-linear + mapping to the input image, in order to create a uniform + distribution of grayscale values in the output image. + + :param image: The image to equalize. + :param mask: An optional mask. If given, only the pixels selected by + the mask are included in the analysis. + :return: An image. + """ + if image.mode == "P": + image = image.convert("RGB") + h = image.histogram(mask) + lut = [] + for b in range(0, len(h), 256): + histo = [_f for _f in h[b : b + 256] if _f] + if len(histo) <= 1: + lut.extend(list(range(256))) + else: + step = (functools.reduce(operator.add, histo) - histo[-1]) // 255 + if not step: + lut.extend(list(range(256))) + else: + n = step // 2 + for i in range(256): + lut.append(n // step) + n = n + h[i + b] + return _lut(image, lut) + + +def expand( + image: Image.Image, + border: int | tuple[int, ...] = 0, + fill: str | int | tuple[int, ...] = 0, +) -> Image.Image: + """ + Add border to the image + + :param image: The image to expand. + :param border: Border width, in pixels. + :param fill: Pixel fill value (a color value). Default is 0 (black). + :return: An image. + """ + left, top, right, bottom = _border(border) + width = left + image.size[0] + right + height = top + image.size[1] + bottom + color = _color(fill, image.mode) + if image.palette: + palette = ImagePalette.ImagePalette(palette=image.getpalette()) + if isinstance(color, tuple) and (len(color) == 3 or len(color) == 4): + color = palette.getcolor(color) + else: + palette = None + out = Image.new(image.mode, (width, height), color) + if palette: + out.putpalette(palette.palette) + out.paste(image, (left, top)) + return out + + +def fit( + image: Image.Image, + size: tuple[int, int], + method: int = Image.Resampling.BICUBIC, + bleed: float = 0.0, + centering: tuple[float, float] = (0.5, 0.5), +) -> Image.Image: + """ + Returns a resized and cropped version of the image, cropped to the + requested aspect ratio and size. + + This function was contributed by Kevin Cazabon. + + :param image: The image to resize and crop. + :param size: The requested output size in pixels, given as a + (width, height) tuple. + :param method: Resampling method to use. Default is + :py:attr:`~PIL.Image.Resampling.BICUBIC`. + See :ref:`concept-filters`. + :param bleed: Remove a border around the outside of the image from all + four edges. The value is a decimal percentage (use 0.01 for + one percent). The default value is 0 (no border). + Cannot be greater than or equal to 0.5. + :param centering: Control the cropping position. Use (0.5, 0.5) for + center cropping (e.g. if cropping the width, take 50% off + of the left side, and therefore 50% off the right side). + (0.0, 0.0) will crop from the top left corner (i.e. if + cropping the width, take all of the crop off of the right + side, and if cropping the height, take all of it off the + bottom). (1.0, 0.0) will crop from the bottom left + corner, etc. (i.e. if cropping the width, take all of the + crop off the left side, and if cropping the height take + none from the top, and therefore all off the bottom). + :return: An image. + """ + + # by Kevin Cazabon, Feb 17/2000 + # kevin@cazabon.com + # https://www.cazabon.com + + centering_x, centering_y = centering + + if not 0.0 <= centering_x <= 1.0: + centering_x = 0.5 + if not 0.0 <= centering_y <= 1.0: + centering_y = 0.5 + + if not 0.0 <= bleed < 0.5: + bleed = 0.0 + + # calculate the area to use for resizing and cropping, subtracting + # the 'bleed' around the edges + + # number of pixels to trim off on Top and Bottom, Left and Right + bleed_pixels = (bleed * image.size[0], bleed * image.size[1]) + + live_size = ( + image.size[0] - bleed_pixels[0] * 2, + image.size[1] - bleed_pixels[1] * 2, + ) + + # calculate the aspect ratio of the live_size + live_size_ratio = live_size[0] / live_size[1] + + # calculate the aspect ratio of the output image + output_ratio = size[0] / size[1] + + # figure out if the sides or top/bottom will be cropped off + if live_size_ratio == output_ratio: + # live_size is already the needed ratio + crop_width = live_size[0] + crop_height = live_size[1] + elif live_size_ratio >= output_ratio: + # live_size is wider than what's needed, crop the sides + crop_width = output_ratio * live_size[1] + crop_height = live_size[1] + else: + # live_size is taller than what's needed, crop the top and bottom + crop_width = live_size[0] + crop_height = live_size[0] / output_ratio + + # make the crop + crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering_x + crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering_y + + crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height) + + # resize the image and return it + return image.resize(size, method, box=crop) + + +def flip(image: Image.Image) -> Image.Image: + """ + Flip the image vertically (top to bottom). + + :param image: The image to flip. + :return: An image. + """ + return image.transpose(Image.Transpose.FLIP_TOP_BOTTOM) + + +def grayscale(image: Image.Image) -> Image.Image: + """ + Convert the image to grayscale. + + :param image: The image to convert. + :return: An image. + """ + return image.convert("L") + + +def invert(image: Image.Image) -> Image.Image: + """ + Invert (negate) the image. + + :param image: The image to invert. + :return: An image. + """ + lut = list(range(255, -1, -1)) + return image.point(lut) if image.mode == "1" else _lut(image, lut) + + +def mirror(image: Image.Image) -> Image.Image: + """ + Flip image horizontally (left to right). + + :param image: The image to mirror. + :return: An image. + """ + return image.transpose(Image.Transpose.FLIP_LEFT_RIGHT) + + +def posterize(image: Image.Image, bits: int) -> Image.Image: + """ + Reduce the number of bits for each color channel. + + :param image: The image to posterize. + :param bits: The number of bits to keep for each channel (1-8). + :return: An image. + """ + mask = ~(2 ** (8 - bits) - 1) + lut = [i & mask for i in range(256)] + return _lut(image, lut) + + +def solarize(image: Image.Image, threshold: int = 128) -> Image.Image: + """ + Invert all pixel values above a threshold. + + :param image: The image to solarize. + :param threshold: All pixels above this grayscale level are inverted. + :return: An image. + """ + lut = [] + for i in range(256): + if i < threshold: + lut.append(i) + else: + lut.append(255 - i) + return _lut(image, lut) + + +@overload +def exif_transpose(image: Image.Image, *, in_place: Literal[True]) -> None: ... + + +@overload +def exif_transpose( + image: Image.Image, *, in_place: Literal[False] = False +) -> Image.Image: ... + + +def exif_transpose(image: Image.Image, *, in_place: bool = False) -> Image.Image | None: + """ + If an image has an EXIF Orientation tag, other than 1, transpose the image + accordingly, and remove the orientation data. + + :param image: The image to transpose. + :param in_place: Boolean. Keyword-only argument. + If ``True``, the original image is modified in-place, and ``None`` is returned. + If ``False`` (default), a new :py:class:`~PIL.Image.Image` object is returned + with the transposition applied. If there is no transposition, a copy of the + image will be returned. + """ + image.load() + image_exif = image.getexif() + orientation = image_exif.get(ExifTags.Base.Orientation, 1) + method = { + 2: Image.Transpose.FLIP_LEFT_RIGHT, + 3: Image.Transpose.ROTATE_180, + 4: Image.Transpose.FLIP_TOP_BOTTOM, + 5: Image.Transpose.TRANSPOSE, + 6: Image.Transpose.ROTATE_270, + 7: Image.Transpose.TRANSVERSE, + 8: Image.Transpose.ROTATE_90, + }.get(orientation) + if method is not None: + if in_place: + image.im = image.im.transpose(method) + image._size = image.im.size + else: + transposed_image = image.transpose(method) + exif_image = image if in_place else transposed_image + + exif = exif_image.getexif() + if ExifTags.Base.Orientation in exif: + del exif[ExifTags.Base.Orientation] + if "exif" in exif_image.info: + exif_image.info["exif"] = exif.tobytes() + elif "Raw profile type exif" in exif_image.info: + exif_image.info["Raw profile type exif"] = exif.tobytes().hex() + for key in ("XML:com.adobe.xmp", "xmp"): + if key in exif_image.info: + for pattern in ( + r'tiff:Orientation="([0-9])"', + r"([0-9])", + ): + value = exif_image.info[key] + if isinstance(value, str): + value = re.sub(pattern, "", value) + elif isinstance(value, tuple): + value = tuple( + re.sub(pattern.encode(), b"", v) for v in value + ) + else: + value = re.sub(pattern.encode(), b"", value) + exif_image.info[key] = value + if not in_place: + return transposed_image + elif not in_place: + return image.copy() + return None diff --git a/py311/lib/python3.11/site-packages/PIL/ImagePalette.py b/py311/lib/python3.11/site-packages/PIL/ImagePalette.py new file mode 100644 index 0000000000000000000000000000000000000000..103697117b92a3dca9794fbea5b9c92306b9b198 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImagePalette.py @@ -0,0 +1,286 @@ +# +# The Python Imaging Library. +# $Id$ +# +# image palette object +# +# History: +# 1996-03-11 fl Rewritten. +# 1997-01-03 fl Up and running. +# 1997-08-23 fl Added load hack +# 2001-04-16 fl Fixed randint shadow bug in random() +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import array +from collections.abc import Sequence +from typing import IO + +from . import GimpGradientFile, GimpPaletteFile, ImageColor, PaletteFile + +TYPE_CHECKING = False +if TYPE_CHECKING: + from . import Image + + +class ImagePalette: + """ + Color palette for palette mapped images + + :param mode: The mode to use for the palette. See: + :ref:`concept-modes`. Defaults to "RGB" + :param palette: An optional palette. If given, it must be a bytearray, + an array or a list of ints between 0-255. The list must consist of + all channels for one color followed by the next color (e.g. RGBRGBRGB). + Defaults to an empty palette. + """ + + def __init__( + self, + mode: str = "RGB", + palette: Sequence[int] | bytes | bytearray | None = None, + ) -> None: + self.mode = mode + self.rawmode: str | None = None # if set, palette contains raw data + self.palette = palette or bytearray() + self.dirty: int | None = None + + @property + def palette(self) -> Sequence[int] | bytes | bytearray: + return self._palette + + @palette.setter + def palette(self, palette: Sequence[int] | bytes | bytearray) -> None: + self._colors: dict[tuple[int, ...], int] | None = None + self._palette = palette + + @property + def colors(self) -> dict[tuple[int, ...], int]: + if self._colors is None: + mode_len = len(self.mode) + self._colors = {} + for i in range(0, len(self.palette), mode_len): + color = tuple(self.palette[i : i + mode_len]) + if color in self._colors: + continue + self._colors[color] = i // mode_len + return self._colors + + @colors.setter + def colors(self, colors: dict[tuple[int, ...], int]) -> None: + self._colors = colors + + def copy(self) -> ImagePalette: + new = ImagePalette() + + new.mode = self.mode + new.rawmode = self.rawmode + if self.palette is not None: + new.palette = self.palette[:] + new.dirty = self.dirty + + return new + + def getdata(self) -> tuple[str, Sequence[int] | bytes | bytearray]: + """ + Get palette contents in format suitable for the low-level + ``im.putpalette`` primitive. + + .. warning:: This method is experimental. + """ + if self.rawmode: + return self.rawmode, self.palette + return self.mode, self.tobytes() + + def tobytes(self) -> bytes: + """Convert palette to bytes. + + .. warning:: This method is experimental. + """ + if self.rawmode: + msg = "palette contains raw palette data" + raise ValueError(msg) + if isinstance(self.palette, bytes): + return self.palette + arr = array.array("B", self.palette) + return arr.tobytes() + + # Declare tostring as an alias for tobytes + tostring = tobytes + + def _new_color_index( + self, image: Image.Image | None = None, e: Exception | None = None + ) -> int: + if not isinstance(self.palette, bytearray): + self._palette = bytearray(self.palette) + index = len(self.palette) // 3 + special_colors: tuple[int | tuple[int, ...] | None, ...] = () + if image: + special_colors = ( + image.info.get("background"), + image.info.get("transparency"), + ) + while index in special_colors: + index += 1 + if index >= 256: + if image: + # Search for an unused index + for i, count in reversed(list(enumerate(image.histogram()))): + if count == 0 and i not in special_colors: + index = i + break + if index >= 256: + msg = "cannot allocate more than 256 colors" + raise ValueError(msg) from e + return index + + def getcolor( + self, + color: tuple[int, ...], + image: Image.Image | None = None, + ) -> int: + """Given an rgb tuple, allocate palette entry. + + .. warning:: This method is experimental. + """ + if self.rawmode: + msg = "palette contains raw palette data" + raise ValueError(msg) + if isinstance(color, tuple): + if self.mode == "RGB": + if len(color) == 4: + if color[3] != 255: + msg = "cannot add non-opaque RGBA color to RGB palette" + raise ValueError(msg) + color = color[:3] + elif self.mode == "RGBA": + if len(color) == 3: + color += (255,) + try: + return self.colors[color] + except KeyError as e: + # allocate new color slot + index = self._new_color_index(image, e) + assert isinstance(self._palette, bytearray) + self.colors[color] = index + if index * 3 < len(self.palette): + self._palette = ( + self._palette[: index * 3] + + bytes(color) + + self._palette[index * 3 + 3 :] + ) + else: + self._palette += bytes(color) + self.dirty = 1 + return index + else: + msg = f"unknown color specifier: {repr(color)}" # type: ignore[unreachable] + raise ValueError(msg) + + def save(self, fp: str | IO[str]) -> None: + """Save palette to text file. + + .. warning:: This method is experimental. + """ + if self.rawmode: + msg = "palette contains raw palette data" + raise ValueError(msg) + if isinstance(fp, str): + fp = open(fp, "w") + fp.write("# Palette\n") + fp.write(f"# Mode: {self.mode}\n") + for i in range(256): + fp.write(f"{i}") + for j in range(i * len(self.mode), (i + 1) * len(self.mode)): + try: + fp.write(f" {self.palette[j]}") + except IndexError: + fp.write(" 0") + fp.write("\n") + fp.close() + + +# -------------------------------------------------------------------- +# Internal + + +def raw(rawmode: str, data: Sequence[int] | bytes | bytearray) -> ImagePalette: + palette = ImagePalette() + palette.rawmode = rawmode + palette.palette = data + palette.dirty = 1 + return palette + + +# -------------------------------------------------------------------- +# Factories + + +def make_linear_lut(black: int, white: float) -> list[int]: + if black == 0: + return [int(white * i // 255) for i in range(256)] + + msg = "unavailable when black is non-zero" + raise NotImplementedError(msg) # FIXME + + +def make_gamma_lut(exp: float) -> list[int]: + return [int(((i / 255.0) ** exp) * 255.0 + 0.5) for i in range(256)] + + +def negative(mode: str = "RGB") -> ImagePalette: + palette = list(range(256 * len(mode))) + palette.reverse() + return ImagePalette(mode, [i // len(mode) for i in palette]) + + +def random(mode: str = "RGB") -> ImagePalette: + from random import randint + + palette = [randint(0, 255) for _ in range(256 * len(mode))] + return ImagePalette(mode, palette) + + +def sepia(white: str = "#fff0c0") -> ImagePalette: + bands = [make_linear_lut(0, band) for band in ImageColor.getrgb(white)] + return ImagePalette("RGB", [bands[i % 3][i // 3] for i in range(256 * 3)]) + + +def wedge(mode: str = "RGB") -> ImagePalette: + palette = list(range(256 * len(mode))) + return ImagePalette(mode, [i // len(mode) for i in palette]) + + +def load(filename: str) -> tuple[bytes, str]: + # FIXME: supports GIMP gradients only + + with open(filename, "rb") as fp: + paletteHandlers: list[ + type[ + GimpPaletteFile.GimpPaletteFile + | GimpGradientFile.GimpGradientFile + | PaletteFile.PaletteFile + ] + ] = [ + GimpPaletteFile.GimpPaletteFile, + GimpGradientFile.GimpGradientFile, + PaletteFile.PaletteFile, + ] + for paletteHandler in paletteHandlers: + try: + fp.seek(0) + lut = paletteHandler(fp).getpalette() + if lut: + break + except (SyntaxError, ValueError): + pass + else: + msg = "cannot load palette" + raise OSError(msg) + + return lut # data, rawmode diff --git a/py311/lib/python3.11/site-packages/PIL/ImagePath.py b/py311/lib/python3.11/site-packages/PIL/ImagePath.py new file mode 100644 index 0000000000000000000000000000000000000000..77e8a609a552ae7d8c6b87e78a36ecbfc1cdce89 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImagePath.py @@ -0,0 +1,20 @@ +# +# The Python Imaging Library +# $Id$ +# +# path interface +# +# History: +# 1996-11-04 fl Created +# 2002-04-14 fl Added documentation stub class +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from . import Image + +Path = Image.core.path diff --git a/py311/lib/python3.11/site-packages/PIL/ImageQt.py b/py311/lib/python3.11/site-packages/PIL/ImageQt.py new file mode 100644 index 0000000000000000000000000000000000000000..df7a57b652cd18b43c33774ca0006546ff4af274 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageQt.py @@ -0,0 +1,220 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a simple Qt image interface. +# +# history: +# 2006-06-03 fl: created +# 2006-06-04 fl: inherit from QImage instead of wrapping it +# 2006-06-05 fl: removed toimage helper; move string support to ImageQt +# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com) +# +# Copyright (c) 2006 by Secret Labs AB +# Copyright (c) 2006 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import sys +from io import BytesIO +from typing import Any, Callable, Union + +from . import Image +from ._util import is_path + +TYPE_CHECKING = False +if TYPE_CHECKING: + import PyQt6 + import PySide6 + + from . import ImageFile + + QBuffer: type + QByteArray = Union[PyQt6.QtCore.QByteArray, PySide6.QtCore.QByteArray] + QIODevice = Union[PyQt6.QtCore.QIODevice, PySide6.QtCore.QIODevice] + QImage = Union[PyQt6.QtGui.QImage, PySide6.QtGui.QImage] + QPixmap = Union[PyQt6.QtGui.QPixmap, PySide6.QtGui.QPixmap] + +qt_version: str | None +qt_versions = [ + ["6", "PyQt6"], + ["side6", "PySide6"], +] + +# If a version has already been imported, attempt it first +qt_versions.sort(key=lambda version: version[1] in sys.modules, reverse=True) +for version, qt_module in qt_versions: + try: + qRgba: Callable[[int, int, int, int], int] + if qt_module == "PyQt6": + from PyQt6.QtCore import QBuffer, QIODevice + from PyQt6.QtGui import QImage, QPixmap, qRgba + elif qt_module == "PySide6": + from PySide6.QtCore import QBuffer, QIODevice + from PySide6.QtGui import QImage, QPixmap, qRgba + except (ImportError, RuntimeError): + continue + qt_is_installed = True + qt_version = version + break +else: + qt_is_installed = False + qt_version = None + + +def rgb(r: int, g: int, b: int, a: int = 255) -> int: + """(Internal) Turns an RGB color into a Qt compatible color integer.""" + # use qRgb to pack the colors, and then turn the resulting long + # into a negative integer with the same bitpattern. + return qRgba(r, g, b, a) & 0xFFFFFFFF + + +def fromqimage(im: QImage | QPixmap) -> ImageFile.ImageFile: + """ + :param im: QImage or PIL ImageQt object + """ + buffer = QBuffer() + qt_openmode: object + if qt_version == "6": + try: + qt_openmode = getattr(QIODevice, "OpenModeFlag") + except AttributeError: + qt_openmode = getattr(QIODevice, "OpenMode") + else: + qt_openmode = QIODevice + buffer.open(getattr(qt_openmode, "ReadWrite")) + # preserve alpha channel with png + # otherwise ppm is more friendly with Image.open + if im.hasAlphaChannel(): + im.save(buffer, "png") + else: + im.save(buffer, "ppm") + + b = BytesIO() + b.write(buffer.data()) + buffer.close() + b.seek(0) + + return Image.open(b) + + +def fromqpixmap(im: QPixmap) -> ImageFile.ImageFile: + return fromqimage(im) + + +def align8to32(bytes: bytes, width: int, mode: str) -> bytes: + """ + converts each scanline of data from 8 bit to 32 bit aligned + """ + + bits_per_pixel = {"1": 1, "L": 8, "P": 8, "I;16": 16}[mode] + + # calculate bytes per line and the extra padding if needed + bits_per_line = bits_per_pixel * width + full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8) + bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0) + + extra_padding = -bytes_per_line % 4 + + # already 32 bit aligned by luck + if not extra_padding: + return bytes + + new_data = [ + bytes[i * bytes_per_line : (i + 1) * bytes_per_line] + b"\x00" * extra_padding + for i in range(len(bytes) // bytes_per_line) + ] + + return b"".join(new_data) + + +def _toqclass_helper(im: Image.Image | str | QByteArray) -> dict[str, Any]: + data = None + colortable = None + exclusive_fp = False + + # handle filename, if given instead of image name + if hasattr(im, "toUtf8"): + # FIXME - is this really the best way to do this? + im = str(im.toUtf8(), "utf-8") + if is_path(im): + im = Image.open(im) + exclusive_fp = True + assert isinstance(im, Image.Image) + + qt_format = getattr(QImage, "Format") if qt_version == "6" else QImage + if im.mode == "1": + format = getattr(qt_format, "Format_Mono") + elif im.mode == "L": + format = getattr(qt_format, "Format_Indexed8") + colortable = [rgb(i, i, i) for i in range(256)] + elif im.mode == "P": + format = getattr(qt_format, "Format_Indexed8") + palette = im.getpalette() + assert palette is not None + colortable = [rgb(*palette[i : i + 3]) for i in range(0, len(palette), 3)] + elif im.mode == "RGB": + # Populate the 4th channel with 255 + im = im.convert("RGBA") + + data = im.tobytes("raw", "BGRA") + format = getattr(qt_format, "Format_RGB32") + elif im.mode == "RGBA": + data = im.tobytes("raw", "BGRA") + format = getattr(qt_format, "Format_ARGB32") + elif im.mode == "I;16": + im = im.point(lambda i: i * 256) + + format = getattr(qt_format, "Format_Grayscale16") + else: + if exclusive_fp: + im.close() + msg = f"unsupported image mode {repr(im.mode)}" + raise ValueError(msg) + + size = im.size + __data = data or align8to32(im.tobytes(), size[0], im.mode) + if exclusive_fp: + im.close() + return {"data": __data, "size": size, "format": format, "colortable": colortable} + + +if qt_is_installed: + + class ImageQt(QImage): # type: ignore[misc] + def __init__(self, im: Image.Image | str | QByteArray) -> None: + """ + An PIL image wrapper for Qt. This is a subclass of PyQt's QImage + class. + + :param im: A PIL Image object, or a file name (given either as + Python string or a PyQt string object). + """ + im_data = _toqclass_helper(im) + # must keep a reference, or Qt will crash! + # All QImage constructors that take data operate on an existing + # buffer, so this buffer has to hang on for the life of the image. + # Fixes https://github.com/python-pillow/Pillow/issues/1370 + self.__data = im_data["data"] + super().__init__( + self.__data, + im_data["size"][0], + im_data["size"][1], + im_data["format"], + ) + if im_data["colortable"]: + self.setColorTable(im_data["colortable"]) + + +def toqimage(im: Image.Image | str | QByteArray) -> ImageQt: + return ImageQt(im) + + +def toqpixmap(im: Image.Image | str | QByteArray) -> QPixmap: + qimage = toqimage(im) + pixmap = getattr(QPixmap, "fromImage")(qimage) + if qt_version == "6": + pixmap.detach() + return pixmap diff --git a/py311/lib/python3.11/site-packages/PIL/ImageSequence.py b/py311/lib/python3.11/site-packages/PIL/ImageSequence.py new file mode 100644 index 0000000000000000000000000000000000000000..a6fc340d55f5516934349b3fa62c1276a204b03b --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageSequence.py @@ -0,0 +1,86 @@ +# +# The Python Imaging Library. +# $Id$ +# +# sequence support classes +# +# history: +# 1997-02-20 fl Created +# +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1997 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +## +from __future__ import annotations + +from typing import Callable + +from . import Image + + +class Iterator: + """ + This class implements an iterator object that can be used to loop + over an image sequence. + + You can use the ``[]`` operator to access elements by index. This operator + will raise an :py:exc:`IndexError` if you try to access a nonexistent + frame. + + :param im: An image object. + """ + + def __init__(self, im: Image.Image) -> None: + if not hasattr(im, "seek"): + msg = "im must have seek method" + raise AttributeError(msg) + self.im = im + self.position = getattr(self.im, "_min_frame", 0) + + def __getitem__(self, ix: int) -> Image.Image: + try: + self.im.seek(ix) + return self.im + except EOFError as e: + msg = "end of sequence" + raise IndexError(msg) from e + + def __iter__(self) -> Iterator: + return self + + def __next__(self) -> Image.Image: + try: + self.im.seek(self.position) + self.position += 1 + return self.im + except EOFError as e: + msg = "end of sequence" + raise StopIteration(msg) from e + + +def all_frames( + im: Image.Image | list[Image.Image], + func: Callable[[Image.Image], Image.Image] | None = None, +) -> list[Image.Image]: + """ + Applies a given function to all frames in an image or a list of images. + The frames are returned as a list of separate images. + + :param im: An image, or a list of images. + :param func: The function to apply to all of the image frames. + :returns: A list of images. + """ + if not isinstance(im, list): + im = [im] + + ims = [] + for imSequence in im: + current = imSequence.tell() + + ims += [im_frame.copy() for im_frame in Iterator(imSequence)] + + imSequence.seek(current) + return [func(im) for im in ims] if func else ims diff --git a/py311/lib/python3.11/site-packages/PIL/ImageShow.py b/py311/lib/python3.11/site-packages/PIL/ImageShow.py new file mode 100644 index 0000000000000000000000000000000000000000..7705608e3eccd5e82cfca87daa1264df2c81dacd --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageShow.py @@ -0,0 +1,362 @@ +# +# The Python Imaging Library. +# $Id$ +# +# im.show() drivers +# +# History: +# 2008-04-06 fl Created +# +# Copyright (c) Secret Labs AB 2008. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import abc +import os +import shutil +import subprocess +import sys +from shlex import quote +from typing import Any + +from . import Image + +_viewers = [] + + +def register(viewer: type[Viewer] | Viewer, order: int = 1) -> None: + """ + The :py:func:`register` function is used to register additional viewers:: + + from PIL import ImageShow + ImageShow.register(MyViewer()) # MyViewer will be used as a last resort + ImageShow.register(MySecondViewer(), 0) # MySecondViewer will be prioritised + ImageShow.register(ImageShow.XVViewer(), 0) # XVViewer will be prioritised + + :param viewer: The viewer to be registered. + :param order: + Zero or a negative integer to prepend this viewer to the list, + a positive integer to append it. + """ + if isinstance(viewer, type) and issubclass(viewer, Viewer): + viewer = viewer() + if order > 0: + _viewers.append(viewer) + else: + _viewers.insert(0, viewer) + + +def show(image: Image.Image, title: str | None = None, **options: Any) -> bool: + r""" + Display a given image. + + :param image: An image object. + :param title: Optional title. Not all viewers can display the title. + :param \**options: Additional viewer options. + :returns: ``True`` if a suitable viewer was found, ``False`` otherwise. + """ + for viewer in _viewers: + if viewer.show(image, title=title, **options): + return True + return False + + +class Viewer: + """Base class for viewers.""" + + # main api + + def show(self, image: Image.Image, **options: Any) -> int: + """ + The main function for displaying an image. + Converts the given image to the target format and displays it. + """ + + if not ( + image.mode in ("1", "RGBA") + or (self.format == "PNG" and image.mode in ("I;16", "LA")) + ): + base = Image.getmodebase(image.mode) + if image.mode != base: + image = image.convert(base) + + return self.show_image(image, **options) + + # hook methods + + format: str | None = None + """The format to convert the image into.""" + options: dict[str, Any] = {} + """Additional options used to convert the image.""" + + def get_format(self, image: Image.Image) -> str | None: + """Return format name, or ``None`` to save as PGM/PPM.""" + return self.format + + def get_command(self, file: str, **options: Any) -> str: + """ + Returns the command used to display the file. + Not implemented in the base class. + """ + msg = "unavailable in base viewer" + raise NotImplementedError(msg) + + def save_image(self, image: Image.Image) -> str: + """Save to temporary file and return filename.""" + return image._dump(format=self.get_format(image), **self.options) + + def show_image(self, image: Image.Image, **options: Any) -> int: + """Display the given image.""" + return self.show_file(self.save_image(image), **options) + + def show_file(self, path: str, **options: Any) -> int: + """ + Display given file. + """ + if not os.path.exists(path): + raise FileNotFoundError + os.system(self.get_command(path, **options)) # nosec + return 1 + + +# -------------------------------------------------------------------- + + +class WindowsViewer(Viewer): + """The default viewer on Windows is the default system application for PNG files.""" + + format = "PNG" + options = {"compress_level": 1, "save_all": True} + + def get_command(self, file: str, **options: Any) -> str: + return ( + f'start "Pillow" /WAIT "{file}" ' + "&& ping -n 4 127.0.0.1 >NUL " + f'&& del /f "{file}"' + ) + + def show_file(self, path: str, **options: Any) -> int: + """ + Display given file. + """ + if not os.path.exists(path): + raise FileNotFoundError + subprocess.Popen( + self.get_command(path, **options), + shell=True, + creationflags=getattr(subprocess, "CREATE_NO_WINDOW"), + ) # nosec + return 1 + + +if sys.platform == "win32": + register(WindowsViewer) + + +class MacViewer(Viewer): + """The default viewer on macOS using ``Preview.app``.""" + + format = "PNG" + options = {"compress_level": 1, "save_all": True} + + def get_command(self, file: str, **options: Any) -> str: + # on darwin open returns immediately resulting in the temp + # file removal while app is opening + command = "open -a Preview.app" + command = f"({command} {quote(file)}; sleep 20; rm -f {quote(file)})&" + return command + + def show_file(self, path: str, **options: Any) -> int: + """ + Display given file. + """ + if not os.path.exists(path): + raise FileNotFoundError + subprocess.call(["open", "-a", "Preview.app", path]) + + pyinstaller = getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS") + executable = (not pyinstaller and sys.executable) or shutil.which("python3") + if executable: + subprocess.Popen( + [ + executable, + "-c", + "import os, sys, time; time.sleep(20); os.remove(sys.argv[1])", + path, + ] + ) + return 1 + + +if sys.platform == "darwin": + register(MacViewer) + + +class UnixViewer(abc.ABC, Viewer): + format = "PNG" + options = {"compress_level": 1, "save_all": True} + + @abc.abstractmethod + def get_command_ex(self, file: str, **options: Any) -> tuple[str, str]: + pass + + def get_command(self, file: str, **options: Any) -> str: + command = self.get_command_ex(file, **options)[0] + return f"{command} {quote(file)}" + + +class XDGViewer(UnixViewer): + """ + The freedesktop.org ``xdg-open`` command. + """ + + def get_command_ex(self, file: str, **options: Any) -> tuple[str, str]: + command = executable = "xdg-open" + return command, executable + + def show_file(self, path: str, **options: Any) -> int: + """ + Display given file. + """ + if not os.path.exists(path): + raise FileNotFoundError + subprocess.Popen(["xdg-open", path]) + return 1 + + +class DisplayViewer(UnixViewer): + """ + The ImageMagick ``display`` command. + This viewer supports the ``title`` parameter. + """ + + def get_command_ex( + self, file: str, title: str | None = None, **options: Any + ) -> tuple[str, str]: + command = executable = "display" + if title: + command += f" -title {quote(title)}" + return command, executable + + def show_file(self, path: str, **options: Any) -> int: + """ + Display given file. + """ + if not os.path.exists(path): + raise FileNotFoundError + args = ["display"] + title = options.get("title") + if title: + args += ["-title", title] + args.append(path) + + subprocess.Popen(args) + return 1 + + +class GmDisplayViewer(UnixViewer): + """The GraphicsMagick ``gm display`` command.""" + + def get_command_ex(self, file: str, **options: Any) -> tuple[str, str]: + executable = "gm" + command = "gm display" + return command, executable + + def show_file(self, path: str, **options: Any) -> int: + """ + Display given file. + """ + if not os.path.exists(path): + raise FileNotFoundError + subprocess.Popen(["gm", "display", path]) + return 1 + + +class EogViewer(UnixViewer): + """The GNOME Image Viewer ``eog`` command.""" + + def get_command_ex(self, file: str, **options: Any) -> tuple[str, str]: + executable = "eog" + command = "eog -n" + return command, executable + + def show_file(self, path: str, **options: Any) -> int: + """ + Display given file. + """ + if not os.path.exists(path): + raise FileNotFoundError + subprocess.Popen(["eog", "-n", path]) + return 1 + + +class XVViewer(UnixViewer): + """ + The X Viewer ``xv`` command. + This viewer supports the ``title`` parameter. + """ + + def get_command_ex( + self, file: str, title: str | None = None, **options: Any + ) -> tuple[str, str]: + # note: xv is pretty outdated. most modern systems have + # imagemagick's display command instead. + command = executable = "xv" + if title: + command += f" -name {quote(title)}" + return command, executable + + def show_file(self, path: str, **options: Any) -> int: + """ + Display given file. + """ + if not os.path.exists(path): + raise FileNotFoundError + args = ["xv"] + title = options.get("title") + if title: + args += ["-name", title] + args.append(path) + + subprocess.Popen(args) + return 1 + + +if sys.platform not in ("win32", "darwin"): # unixoids + if shutil.which("xdg-open"): + register(XDGViewer) + if shutil.which("display"): + register(DisplayViewer) + if shutil.which("gm"): + register(GmDisplayViewer) + if shutil.which("eog"): + register(EogViewer) + if shutil.which("xv"): + register(XVViewer) + + +class IPythonViewer(Viewer): + """The viewer for IPython frontends.""" + + def show_image(self, image: Image.Image, **options: Any) -> int: + ipython_display(image) + return 1 + + +try: + from IPython.display import display as ipython_display +except ImportError: + pass +else: + register(IPythonViewer) + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Syntax: python3 ImageShow.py imagefile [title]") + sys.exit() + + with Image.open(sys.argv[1]) as im: + print(show(im, *sys.argv[2:])) diff --git a/py311/lib/python3.11/site-packages/PIL/ImageStat.py b/py311/lib/python3.11/site-packages/PIL/ImageStat.py new file mode 100644 index 0000000000000000000000000000000000000000..8bc504526f0a00cde1229798234d4f0c5db95138 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageStat.py @@ -0,0 +1,160 @@ +# +# The Python Imaging Library. +# $Id$ +# +# global image statistics +# +# History: +# 1996-04-05 fl Created +# 1997-05-21 fl Added mask; added rms, var, stddev attributes +# 1997-08-05 fl Added median +# 1998-07-05 hk Fixed integer overflow error +# +# Notes: +# This class shows how to implement delayed evaluation of attributes. +# To get a certain value, simply access the corresponding attribute. +# The __getattr__ dispatcher takes care of the rest. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996-97. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import math +from functools import cached_property + +from . import Image + + +class Stat: + def __init__( + self, image_or_list: Image.Image | list[int], mask: Image.Image | None = None + ) -> None: + """ + Calculate statistics for the given image. If a mask is included, + only the regions covered by that mask are included in the + statistics. You can also pass in a previously calculated histogram. + + :param image: A PIL image, or a precalculated histogram. + + .. note:: + + For a PIL image, calculations rely on the + :py:meth:`~PIL.Image.Image.histogram` method. The pixel counts are + grouped into 256 bins, even if the image has more than 8 bits per + channel. So ``I`` and ``F`` mode images have a maximum ``mean``, + ``median`` and ``rms`` of 255, and cannot have an ``extrema`` maximum + of more than 255. + + :param mask: An optional mask. + """ + if isinstance(image_or_list, Image.Image): + self.h = image_or_list.histogram(mask) + elif isinstance(image_or_list, list): + self.h = image_or_list + else: + msg = "first argument must be image or list" # type: ignore[unreachable] + raise TypeError(msg) + self.bands = list(range(len(self.h) // 256)) + + @cached_property + def extrema(self) -> list[tuple[int, int]]: + """ + Min/max values for each band in the image. + + .. note:: + This relies on the :py:meth:`~PIL.Image.Image.histogram` method, and + simply returns the low and high bins used. This is correct for + images with 8 bits per channel, but fails for other modes such as + ``I`` or ``F``. Instead, use :py:meth:`~PIL.Image.Image.getextrema` to + return per-band extrema for the image. This is more correct and + efficient because, for non-8-bit modes, the histogram method uses + :py:meth:`~PIL.Image.Image.getextrema` to determine the bins used. + """ + + def minmax(histogram: list[int]) -> tuple[int, int]: + res_min, res_max = 255, 0 + for i in range(256): + if histogram[i]: + res_min = i + break + for i in range(255, -1, -1): + if histogram[i]: + res_max = i + break + return res_min, res_max + + return [minmax(self.h[i:]) for i in range(0, len(self.h), 256)] + + @cached_property + def count(self) -> list[int]: + """Total number of pixels for each band in the image.""" + return [sum(self.h[i : i + 256]) for i in range(0, len(self.h), 256)] + + @cached_property + def sum(self) -> list[float]: + """Sum of all pixels for each band in the image.""" + + v = [] + for i in range(0, len(self.h), 256): + layer_sum = 0.0 + for j in range(256): + layer_sum += j * self.h[i + j] + v.append(layer_sum) + return v + + @cached_property + def sum2(self) -> list[float]: + """Squared sum of all pixels for each band in the image.""" + + v = [] + for i in range(0, len(self.h), 256): + sum2 = 0.0 + for j in range(256): + sum2 += (j**2) * float(self.h[i + j]) + v.append(sum2) + return v + + @cached_property + def mean(self) -> list[float]: + """Average (arithmetic mean) pixel level for each band in the image.""" + return [self.sum[i] / self.count[i] for i in self.bands] + + @cached_property + def median(self) -> list[int]: + """Median pixel level for each band in the image.""" + + v = [] + for i in self.bands: + s = 0 + half = self.count[i] // 2 + b = i * 256 + for j in range(256): + s = s + self.h[b + j] + if s > half: + break + v.append(j) + return v + + @cached_property + def rms(self) -> list[float]: + """RMS (root-mean-square) for each band in the image.""" + return [math.sqrt(self.sum2[i] / self.count[i]) for i in self.bands] + + @cached_property + def var(self) -> list[float]: + """Variance for each band in the image.""" + return [ + (self.sum2[i] - (self.sum[i] ** 2.0) / self.count[i]) / self.count[i] + for i in self.bands + ] + + @cached_property + def stddev(self) -> list[float]: + """Standard deviation for each band in the image.""" + return [math.sqrt(self.var[i]) for i in self.bands] + + +Global = Stat # compatibility diff --git a/py311/lib/python3.11/site-packages/PIL/ImageTk.py b/py311/lib/python3.11/site-packages/PIL/ImageTk.py new file mode 100644 index 0000000000000000000000000000000000000000..3a4cb81e9ef5ef4abe617d4a364074c2203571ad --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageTk.py @@ -0,0 +1,266 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a Tk display interface +# +# History: +# 96-04-08 fl Created +# 96-09-06 fl Added getimage method +# 96-11-01 fl Rewritten, removed image attribute and crop method +# 97-05-09 fl Use PyImagingPaste method instead of image type +# 97-05-12 fl Minor tweaks to match the IFUNC95 interface +# 97-05-17 fl Support the "pilbitmap" booster patch +# 97-06-05 fl Added file= and data= argument to image constructors +# 98-03-09 fl Added width and height methods to Image classes +# 98-07-02 fl Use default mode for "P" images without palette attribute +# 98-07-02 fl Explicitly destroy Tkinter image objects +# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch) +# 99-07-26 fl Automatically hook into Tkinter (if possible) +# 99-08-15 fl Hook uses _imagingtk instead of _imaging +# +# Copyright (c) 1997-1999 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import tkinter +from io import BytesIO +from typing import Any + +from . import Image, ImageFile + +TYPE_CHECKING = False +if TYPE_CHECKING: + from ._typing import CapsuleType + +# -------------------------------------------------------------------- +# Check for Tkinter interface hooks + + +def _get_image_from_kw(kw: dict[str, Any]) -> ImageFile.ImageFile | None: + source = None + if "file" in kw: + source = kw.pop("file") + elif "data" in kw: + source = BytesIO(kw.pop("data")) + if not source: + return None + return Image.open(source) + + +def _pyimagingtkcall( + command: str, photo: PhotoImage | tkinter.PhotoImage, ptr: CapsuleType +) -> None: + tk = photo.tk + try: + tk.call(command, photo, repr(ptr)) + except tkinter.TclError: + # activate Tkinter hook + # may raise an error if it cannot attach to Tkinter + from . import _imagingtk + + _imagingtk.tkinit(tk.interpaddr()) + tk.call(command, photo, repr(ptr)) + + +# -------------------------------------------------------------------- +# PhotoImage + + +class PhotoImage: + """ + A Tkinter-compatible photo image. This can be used + everywhere Tkinter expects an image object. If the image is an RGBA + image, pixels having alpha 0 are treated as transparent. + + The constructor takes either a PIL image, or a mode and a size. + Alternatively, you can use the ``file`` or ``data`` options to initialize + the photo image object. + + :param image: Either a PIL image, or a mode string. If a mode string is + used, a size must also be given. + :param size: If the first argument is a mode string, this defines the size + of the image. + :keyword file: A filename to load the image from (using + ``Image.open(file)``). + :keyword data: An 8-bit string containing image data (as loaded from an + image file). + """ + + def __init__( + self, + image: Image.Image | str | None = None, + size: tuple[int, int] | None = None, + **kw: Any, + ) -> None: + # Tk compatibility: file or data + if image is None: + image = _get_image_from_kw(kw) + + if image is None: + msg = "Image is required" + raise ValueError(msg) + elif isinstance(image, str): + mode = image + image = None + + if size is None: + msg = "If first argument is mode, size is required" + raise ValueError(msg) + else: + # got an image instead of a mode + mode = image.mode + if mode == "P": + # palette mapped data + image.apply_transparency() + image.load() + mode = image.palette.mode if image.palette else "RGB" + size = image.size + kw["width"], kw["height"] = size + + if mode not in ["1", "L", "RGB", "RGBA"]: + mode = Image.getmodebase(mode) + + self.__mode = mode + self.__size = size + self.__photo = tkinter.PhotoImage(**kw) + self.tk = self.__photo.tk + if image: + self.paste(image) + + def __del__(self) -> None: + try: + name = self.__photo.name + except AttributeError: + return + self.__photo.name = None + try: + self.__photo.tk.call("image", "delete", name) + except Exception: + pass # ignore internal errors + + def __str__(self) -> str: + """ + Get the Tkinter photo image identifier. This method is automatically + called by Tkinter whenever a PhotoImage object is passed to a Tkinter + method. + + :return: A Tkinter photo image identifier (a string). + """ + return str(self.__photo) + + def width(self) -> int: + """ + Get the width of the image. + + :return: The width, in pixels. + """ + return self.__size[0] + + def height(self) -> int: + """ + Get the height of the image. + + :return: The height, in pixels. + """ + return self.__size[1] + + def paste(self, im: Image.Image) -> None: + """ + Paste a PIL image into the photo image. Note that this can + be very slow if the photo image is displayed. + + :param im: A PIL image. The size must match the target region. If the + mode does not match, the image is converted to the mode of + the bitmap image. + """ + # convert to blittable + ptr = im.getim() + image = im.im + if not image.isblock() or im.mode != self.__mode: + block = Image.core.new_block(self.__mode, im.size) + image.convert2(block, image) # convert directly between buffers + ptr = block.ptr + + _pyimagingtkcall("PyImagingPhoto", self.__photo, ptr) + + +# -------------------------------------------------------------------- +# BitmapImage + + +class BitmapImage: + """ + A Tkinter-compatible bitmap image. This can be used everywhere Tkinter + expects an image object. + + The given image must have mode "1". Pixels having value 0 are treated as + transparent. Options, if any, are passed on to Tkinter. The most commonly + used option is ``foreground``, which is used to specify the color for the + non-transparent parts. See the Tkinter documentation for information on + how to specify colours. + + :param image: A PIL image. + """ + + def __init__(self, image: Image.Image | None = None, **kw: Any) -> None: + # Tk compatibility: file or data + if image is None: + image = _get_image_from_kw(kw) + + if image is None: + msg = "Image is required" + raise ValueError(msg) + self.__mode = image.mode + self.__size = image.size + + self.__photo = tkinter.BitmapImage(data=image.tobitmap(), **kw) + + def __del__(self) -> None: + try: + name = self.__photo.name + except AttributeError: + return + self.__photo.name = None + try: + self.__photo.tk.call("image", "delete", name) + except Exception: + pass # ignore internal errors + + def width(self) -> int: + """ + Get the width of the image. + + :return: The width, in pixels. + """ + return self.__size[0] + + def height(self) -> int: + """ + Get the height of the image. + + :return: The height, in pixels. + """ + return self.__size[1] + + def __str__(self) -> str: + """ + Get the Tkinter bitmap image identifier. This method is automatically + called by Tkinter whenever a BitmapImage object is passed to a Tkinter + method. + + :return: A Tkinter bitmap image identifier (a string). + """ + return str(self.__photo) + + +def getimage(photo: PhotoImage) -> Image.Image: + """Copies the contents of a PhotoImage to a PIL image memory.""" + im = Image.new("RGBA", (photo.width(), photo.height())) + + _pyimagingtkcall("PyImagingPhotoGet", photo, im.getim()) + + return im diff --git a/py311/lib/python3.11/site-packages/PIL/ImageTransform.py b/py311/lib/python3.11/site-packages/PIL/ImageTransform.py new file mode 100644 index 0000000000000000000000000000000000000000..fb144ff38a1ee7ff77cc01f3b941756a60b2b4cd --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageTransform.py @@ -0,0 +1,136 @@ +# +# The Python Imaging Library. +# $Id$ +# +# transform wrappers +# +# History: +# 2002-04-08 fl Created +# +# Copyright (c) 2002 by Secret Labs AB +# Copyright (c) 2002 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any + +from . import Image + + +class Transform(Image.ImageTransformHandler): + """Base class for other transforms defined in :py:mod:`~PIL.ImageTransform`.""" + + method: Image.Transform + + def __init__(self, data: Sequence[Any]) -> None: + self.data = data + + def getdata(self) -> tuple[Image.Transform, Sequence[int]]: + return self.method, self.data + + def transform( + self, + size: tuple[int, int], + image: Image.Image, + **options: Any, + ) -> Image.Image: + """Perform the transform. Called from :py:meth:`.Image.transform`.""" + # can be overridden + method, data = self.getdata() + return image.transform(size, method, data, **options) + + +class AffineTransform(Transform): + """ + Define an affine image transform. + + This function takes a 6-tuple (a, b, c, d, e, f) which contain the first + two rows from the inverse of an affine transform matrix. For each pixel + (x, y) in the output image, the new value is taken from a position (a x + + b y + c, d x + e y + f) in the input image, rounded to nearest pixel. + + This function can be used to scale, translate, rotate, and shear the + original image. + + See :py:meth:`.Image.transform` + + :param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows + from the inverse of an affine transform matrix. + """ + + method = Image.Transform.AFFINE + + +class PerspectiveTransform(Transform): + """ + Define a perspective image transform. + + This function takes an 8-tuple (a, b, c, d, e, f, g, h). For each pixel + (x, y) in the output image, the new value is taken from a position + ((a x + b y + c) / (g x + h y + 1), (d x + e y + f) / (g x + h y + 1)) in + the input image, rounded to nearest pixel. + + This function can be used to scale, translate, rotate, and shear the + original image. + + See :py:meth:`.Image.transform` + + :param matrix: An 8-tuple (a, b, c, d, e, f, g, h). + """ + + method = Image.Transform.PERSPECTIVE + + +class ExtentTransform(Transform): + """ + Define a transform to extract a subregion from an image. + + Maps a rectangle (defined by two corners) from the image to a rectangle of + the given size. The resulting image will contain data sampled from between + the corners, such that (x0, y0) in the input image will end up at (0,0) in + the output image, and (x1, y1) at size. + + This method can be used to crop, stretch, shrink, or mirror an arbitrary + rectangle in the current image. It is slightly slower than crop, but about + as fast as a corresponding resize operation. + + See :py:meth:`.Image.transform` + + :param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the + input image's coordinate system. See :ref:`coordinate-system`. + """ + + method = Image.Transform.EXTENT + + +class QuadTransform(Transform): + """ + Define a quad image transform. + + Maps a quadrilateral (a region defined by four corners) from the image to a + rectangle of the given size. + + See :py:meth:`.Image.transform` + + :param xy: An 8-tuple (x0, y0, x1, y1, x2, y2, x3, y3) which contain the + upper left, lower left, lower right, and upper right corner of the + source quadrilateral. + """ + + method = Image.Transform.QUAD + + +class MeshTransform(Transform): + """ + Define a mesh image transform. A mesh transform consists of one or more + individual quad transforms. + + See :py:meth:`.Image.transform` + + :param data: A list of (bbox, quad) tuples. + """ + + method = Image.Transform.MESH diff --git a/py311/lib/python3.11/site-packages/PIL/ImageWin.py b/py311/lib/python3.11/site-packages/PIL/ImageWin.py new file mode 100644 index 0000000000000000000000000000000000000000..98c28f29f1dbbb069b68dc9359051b6629148f0d --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImageWin.py @@ -0,0 +1,247 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a Windows DIB display interface +# +# History: +# 1996-05-20 fl Created +# 1996-09-20 fl Fixed subregion exposure +# 1997-09-21 fl Added draw primitive (for tzPrint) +# 2003-05-21 fl Added experimental Window/ImageWindow classes +# 2003-09-05 fl Added fromstring/tostring methods +# +# Copyright (c) Secret Labs AB 1997-2003. +# Copyright (c) Fredrik Lundh 1996-2003. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from . import Image + + +class HDC: + """ + Wraps an HDC integer. The resulting object can be passed to the + :py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose` + methods. + """ + + def __init__(self, dc: int) -> None: + self.dc = dc + + def __int__(self) -> int: + return self.dc + + +class HWND: + """ + Wraps an HWND integer. The resulting object can be passed to the + :py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose` + methods, instead of a DC. + """ + + def __init__(self, wnd: int) -> None: + self.wnd = wnd + + def __int__(self) -> int: + return self.wnd + + +class Dib: + """ + A Windows bitmap with the given mode and size. The mode can be one of "1", + "L", "P", or "RGB". + + If the display requires a palette, this constructor creates a suitable + palette and associates it with the image. For an "L" image, 128 graylevels + are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together + with 20 graylevels. + + To make sure that palettes work properly under Windows, you must call the + ``palette`` method upon certain events from Windows. + + :param image: Either a PIL image, or a mode string. If a mode string is + used, a size must also be given. The mode can be one of "1", + "L", "P", or "RGB". + :param size: If the first argument is a mode string, this + defines the size of the image. + """ + + def __init__( + self, image: Image.Image | str, size: tuple[int, int] | None = None + ) -> None: + if isinstance(image, str): + mode = image + image = "" + if size is None: + msg = "If first argument is mode, size is required" + raise ValueError(msg) + else: + mode = image.mode + size = image.size + if mode not in ["1", "L", "P", "RGB"]: + mode = Image.getmodebase(mode) + self.image = Image.core.display(mode, size) + self.mode = mode + self.size = size + if image: + assert not isinstance(image, str) + self.paste(image) + + def expose(self, handle: int | HDC | HWND) -> None: + """ + Copy the bitmap contents to a device context. + + :param handle: Device context (HDC), cast to a Python integer, or an + HDC or HWND instance. In PythonWin, you can use + ``CDC.GetHandleAttrib()`` to get a suitable handle. + """ + handle_int = int(handle) + if isinstance(handle, HWND): + dc = self.image.getdc(handle_int) + try: + self.image.expose(dc) + finally: + self.image.releasedc(handle_int, dc) + else: + self.image.expose(handle_int) + + def draw( + self, + handle: int | HDC | HWND, + dst: tuple[int, int, int, int], + src: tuple[int, int, int, int] | None = None, + ) -> None: + """ + Same as expose, but allows you to specify where to draw the image, and + what part of it to draw. + + The destination and source areas are given as 4-tuple rectangles. If + the source is omitted, the entire image is copied. If the source and + the destination have different sizes, the image is resized as + necessary. + """ + if src is None: + src = (0, 0) + self.size + handle_int = int(handle) + if isinstance(handle, HWND): + dc = self.image.getdc(handle_int) + try: + self.image.draw(dc, dst, src) + finally: + self.image.releasedc(handle_int, dc) + else: + self.image.draw(handle_int, dst, src) + + def query_palette(self, handle: int | HDC | HWND) -> int: + """ + Installs the palette associated with the image in the given device + context. + + This method should be called upon **QUERYNEWPALETTE** and + **PALETTECHANGED** events from Windows. If this method returns a + non-zero value, one or more display palette entries were changed, and + the image should be redrawn. + + :param handle: Device context (HDC), cast to a Python integer, or an + HDC or HWND instance. + :return: The number of entries that were changed (if one or more entries, + this indicates that the image should be redrawn). + """ + handle_int = int(handle) + if isinstance(handle, HWND): + handle = self.image.getdc(handle_int) + try: + result = self.image.query_palette(handle) + finally: + self.image.releasedc(handle, handle) + else: + result = self.image.query_palette(handle_int) + return result + + def paste( + self, im: Image.Image, box: tuple[int, int, int, int] | None = None + ) -> None: + """ + Paste a PIL image into the bitmap image. + + :param im: A PIL image. The size must match the target region. + If the mode does not match, the image is converted to the + mode of the bitmap image. + :param box: A 4-tuple defining the left, upper, right, and + lower pixel coordinate. See :ref:`coordinate-system`. If + None is given instead of a tuple, all of the image is + assumed. + """ + im.load() + if self.mode != im.mode: + im = im.convert(self.mode) + if box: + self.image.paste(im.im, box) + else: + self.image.paste(im.im) + + def frombytes(self, buffer: bytes) -> None: + """ + Load display memory contents from byte data. + + :param buffer: A buffer containing display data (usually + data returned from :py:func:`~PIL.ImageWin.Dib.tobytes`) + """ + self.image.frombytes(buffer) + + def tobytes(self) -> bytes: + """ + Copy display memory contents to bytes object. + + :return: A bytes object containing display data. + """ + return self.image.tobytes() + + +class Window: + """Create a Window with the given title size.""" + + def __init__( + self, title: str = "PIL", width: int | None = None, height: int | None = None + ) -> None: + self.hwnd = Image.core.createwindow( + title, self.__dispatcher, width or 0, height or 0 + ) + + def __dispatcher(self, action: str, *args: int) -> None: + getattr(self, f"ui_handle_{action}")(*args) + + def ui_handle_clear(self, dc: int, x0: int, y0: int, x1: int, y1: int) -> None: + pass + + def ui_handle_damage(self, x0: int, y0: int, x1: int, y1: int) -> None: + pass + + def ui_handle_destroy(self) -> None: + pass + + def ui_handle_repair(self, dc: int, x0: int, y0: int, x1: int, y1: int) -> None: + pass + + def ui_handle_resize(self, width: int, height: int) -> None: + pass + + def mainloop(self) -> None: + Image.core.eventloop() + + +class ImageWindow(Window): + """Create an image window which displays the given image.""" + + def __init__(self, image: Image.Image | Dib, title: str = "PIL") -> None: + if not isinstance(image, Dib): + image = Dib(image) + self.image = image + width, height = image.size + super().__init__(title, width=width, height=height) + + def ui_handle_repair(self, dc: int, x0: int, y0: int, x1: int, y1: int) -> None: + self.image.draw(dc, (x0, y0, x1, y1)) diff --git a/py311/lib/python3.11/site-packages/PIL/ImtImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/ImtImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..c4eccee3423dc6c273bdc1ea88eda5ef4e17cf7d --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/ImtImagePlugin.py @@ -0,0 +1,103 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IM Tools support for PIL +# +# history: +# 1996-05-27 fl Created (read 8-bit images only) +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.2) +# +# Copyright (c) Secret Labs AB 1997-2001. +# Copyright (c) Fredrik Lundh 1996-2001. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import re + +from . import Image, ImageFile + +# +# -------------------------------------------------------------------- + +field = re.compile(rb"([a-z]*) ([^ \r\n]*)") + + +## +# Image plugin for IM Tools images. + + +class ImtImageFile(ImageFile.ImageFile): + format = "IMT" + format_description = "IM Tools" + + def _open(self) -> None: + # Quick rejection: if there's not a LF among the first + # 100 bytes, this is (probably) not a text header. + + assert self.fp is not None + + buffer = self.fp.read(100) + if b"\n" not in buffer: + msg = "not an IM file" + raise SyntaxError(msg) + + xsize = ysize = 0 + + while True: + if buffer: + s = buffer[:1] + buffer = buffer[1:] + else: + s = self.fp.read(1) + if not s: + break + + if s == b"\x0c": + # image data begins + self.tile = [ + ImageFile._Tile( + "raw", + (0, 0) + self.size, + self.fp.tell() - len(buffer), + self.mode, + ) + ] + + break + + else: + # read key/value pair + if b"\n" not in buffer: + buffer += self.fp.read(100) + lines = buffer.split(b"\n") + s += lines.pop(0) + buffer = b"\n".join(lines) + if len(s) == 1 or len(s) > 100: + break + if s[0] == ord(b"*"): + continue # comment + + m = field.match(s) + if not m: + break + k, v = m.group(1, 2) + if k == b"width": + xsize = int(v) + self._size = xsize, ysize + elif k == b"height": + ysize = int(v) + self._size = xsize, ysize + elif k == b"pixel" and v == b"n8": + self._mode = "L" + + +# +# -------------------------------------------------------------------- + +Image.register_open(ImtImageFile.format, ImtImageFile) + +# +# no extension registered (".im" is simply too common) diff --git a/py311/lib/python3.11/site-packages/PIL/IptcImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/IptcImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..fc024d668f2f42c4b0ea4c90c702ccc6d7c528f0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/IptcImagePlugin.py @@ -0,0 +1,250 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IPTC/NAA file handling +# +# history: +# 1995-10-01 fl Created +# 1998-03-09 fl Cleaned up and added to PIL +# 2002-06-18 fl Added getiptcinfo helper +# +# Copyright (c) Secret Labs AB 1997-2002. +# Copyright (c) Fredrik Lundh 1995. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from collections.abc import Sequence +from io import BytesIO +from typing import cast + +from . import Image, ImageFile +from ._binary import i16be as i16 +from ._binary import i32be as i32 +from ._deprecate import deprecate + +COMPRESSION = {1: "raw", 5: "jpeg"} + + +def __getattr__(name: str) -> bytes: + if name == "PAD": + deprecate("IptcImagePlugin.PAD", 12) + return b"\0\0\0\0" + msg = f"module '{__name__}' has no attribute '{name}'" + raise AttributeError(msg) + + +# +# Helpers + + +def _i(c: bytes) -> int: + return i32((b"\0\0\0\0" + c)[-4:]) + + +def _i8(c: int | bytes) -> int: + return c if isinstance(c, int) else c[0] + + +def i(c: bytes) -> int: + """.. deprecated:: 10.2.0""" + deprecate("IptcImagePlugin.i", 12) + return _i(c) + + +def dump(c: Sequence[int | bytes]) -> None: + """.. deprecated:: 10.2.0""" + deprecate("IptcImagePlugin.dump", 12) + for i in c: + print(f"{_i8(i):02x}", end=" ") + print() + + +## +# Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields +# from TIFF and JPEG files, use the getiptcinfo function. + + +class IptcImageFile(ImageFile.ImageFile): + format = "IPTC" + format_description = "IPTC/NAA" + + def getint(self, key: tuple[int, int]) -> int: + return _i(self.info[key]) + + def field(self) -> tuple[tuple[int, int] | None, int]: + # + # get a IPTC field header + s = self.fp.read(5) + if not s.strip(b"\x00"): + return None, 0 + + tag = s[1], s[2] + + # syntax + if s[0] != 0x1C or tag[0] not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 240]: + msg = "invalid IPTC/NAA file" + raise SyntaxError(msg) + + # field size + size = s[3] + if size > 132: + msg = "illegal field length in IPTC/NAA file" + raise OSError(msg) + elif size == 128: + size = 0 + elif size > 128: + size = _i(self.fp.read(size - 128)) + else: + size = i16(s, 3) + + return tag, size + + def _open(self) -> None: + # load descriptive fields + while True: + offset = self.fp.tell() + tag, size = self.field() + if not tag or tag == (8, 10): + break + if size: + tagdata = self.fp.read(size) + else: + tagdata = None + if tag in self.info: + if isinstance(self.info[tag], list): + self.info[tag].append(tagdata) + else: + self.info[tag] = [self.info[tag], tagdata] + else: + self.info[tag] = tagdata + + # mode + layers = self.info[(3, 60)][0] + component = self.info[(3, 60)][1] + if (3, 65) in self.info: + id = self.info[(3, 65)][0] - 1 + else: + id = 0 + if layers == 1 and not component: + self._mode = "L" + elif layers == 3 and component: + self._mode = "RGB"[id] + elif layers == 4 and component: + self._mode = "CMYK"[id] + + # size + self._size = self.getint((3, 20)), self.getint((3, 30)) + + # compression + try: + compression = COMPRESSION[self.getint((3, 120))] + except KeyError as e: + msg = "Unknown IPTC image compression" + raise OSError(msg) from e + + # tile + if tag == (8, 10): + self.tile = [ + ImageFile._Tile("iptc", (0, 0) + self.size, offset, compression) + ] + + def load(self) -> Image.core.PixelAccess | None: + if len(self.tile) != 1 or self.tile[0][0] != "iptc": + return ImageFile.ImageFile.load(self) + + offset, compression = self.tile[0][2:] + + self.fp.seek(offset) + + # Copy image data to temporary file + o = BytesIO() + if compression == "raw": + # To simplify access to the extracted file, + # prepend a PPM header + o.write(b"P5\n%d %d\n255\n" % self.size) + while True: + type, size = self.field() + if type != (8, 10): + break + while size > 0: + s = self.fp.read(min(size, 8192)) + if not s: + break + o.write(s) + size -= len(s) + + with Image.open(o) as _im: + _im.load() + self.im = _im.im + self.tile = [] + return Image.Image.load(self) + + +Image.register_open(IptcImageFile.format, IptcImageFile) + +Image.register_extension(IptcImageFile.format, ".iim") + + +def getiptcinfo( + im: ImageFile.ImageFile, +) -> dict[tuple[int, int], bytes | list[bytes]] | None: + """ + Get IPTC information from TIFF, JPEG, or IPTC file. + + :param im: An image containing IPTC data. + :returns: A dictionary containing IPTC information, or None if + no IPTC information block was found. + """ + from . import JpegImagePlugin, TiffImagePlugin + + data = None + + info: dict[tuple[int, int], bytes | list[bytes]] = {} + if isinstance(im, IptcImageFile): + # return info dictionary right away + for k, v in im.info.items(): + if isinstance(k, tuple): + info[k] = v + return info + + elif isinstance(im, JpegImagePlugin.JpegImageFile): + # extract the IPTC/NAA resource + photoshop = im.info.get("photoshop") + if photoshop: + data = photoshop.get(0x0404) + + elif isinstance(im, TiffImagePlugin.TiffImageFile): + # get raw data from the IPTC/NAA tag (PhotoShop tags the data + # as 4-byte integers, so we cannot use the get method...) + try: + data = im.tag_v2._tagdata[TiffImagePlugin.IPTC_NAA_CHUNK] + except KeyError: + pass + + if data is None: + return None # no properties + + # create an IptcImagePlugin object without initializing it + class FakeImage: + pass + + fake_im = FakeImage() + fake_im.__class__ = IptcImageFile # type: ignore[assignment] + iptc_im = cast(IptcImageFile, fake_im) + + # parse the IPTC information chunk + iptc_im.info = {} + iptc_im.fp = BytesIO(data) + + try: + iptc_im._open() + except (IndexError, KeyError): + pass # expected failure + + for k, v in iptc_im.info.items(): + if isinstance(k, tuple): + info[k] = v + return info diff --git a/py311/lib/python3.11/site-packages/PIL/Jpeg2KImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/Jpeg2KImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..e0f4ecae595d3f1aef3f529d91efeefa560c5134 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/Jpeg2KImagePlugin.py @@ -0,0 +1,442 @@ +# +# The Python Imaging Library +# $Id$ +# +# JPEG2000 file handling +# +# History: +# 2014-03-12 ajh Created +# 2021-06-30 rogermb Extract dpi information from the 'resc' header box +# +# Copyright (c) 2014 Coriolis Systems Limited +# Copyright (c) 2014 Alastair Houghton +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import io +import os +import struct +from collections.abc import Callable +from typing import IO, cast + +from . import Image, ImageFile, ImagePalette, _binary + + +class BoxReader: + """ + A small helper class to read fields stored in JPEG2000 header boxes + and to easily step into and read sub-boxes. + """ + + def __init__(self, fp: IO[bytes], length: int = -1) -> None: + self.fp = fp + self.has_length = length >= 0 + self.length = length + self.remaining_in_box = -1 + + def _can_read(self, num_bytes: int) -> bool: + if self.has_length and self.fp.tell() + num_bytes > self.length: + # Outside box: ensure we don't read past the known file length + return False + if self.remaining_in_box >= 0: + # Inside box contents: ensure read does not go past box boundaries + return num_bytes <= self.remaining_in_box + else: + return True # No length known, just read + + def _read_bytes(self, num_bytes: int) -> bytes: + if not self._can_read(num_bytes): + msg = "Not enough data in header" + raise SyntaxError(msg) + + data = self.fp.read(num_bytes) + if len(data) < num_bytes: + msg = f"Expected to read {num_bytes} bytes but only got {len(data)}." + raise OSError(msg) + + if self.remaining_in_box > 0: + self.remaining_in_box -= num_bytes + return data + + def read_fields(self, field_format: str) -> tuple[int | bytes, ...]: + size = struct.calcsize(field_format) + data = self._read_bytes(size) + return struct.unpack(field_format, data) + + def read_boxes(self) -> BoxReader: + size = self.remaining_in_box + data = self._read_bytes(size) + return BoxReader(io.BytesIO(data), size) + + def has_next_box(self) -> bool: + if self.has_length: + return self.fp.tell() + self.remaining_in_box < self.length + else: + return True + + def next_box_type(self) -> bytes: + # Skip the rest of the box if it has not been read + if self.remaining_in_box > 0: + self.fp.seek(self.remaining_in_box, os.SEEK_CUR) + self.remaining_in_box = -1 + + # Read the length and type of the next box + lbox, tbox = cast(tuple[int, bytes], self.read_fields(">I4s")) + if lbox == 1: + lbox = cast(int, self.read_fields(">Q")[0]) + hlen = 16 + else: + hlen = 8 + + if lbox < hlen or not self._can_read(lbox - hlen): + msg = "Invalid header length" + raise SyntaxError(msg) + + self.remaining_in_box = lbox - hlen + return tbox + + +def _parse_codestream(fp: IO[bytes]) -> tuple[tuple[int, int], str]: + """Parse the JPEG 2000 codestream to extract the size and component + count from the SIZ marker segment, returning a PIL (size, mode) tuple.""" + + hdr = fp.read(2) + lsiz = _binary.i16be(hdr) + siz = hdr + fp.read(lsiz - 2) + lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from( + ">HHIIIIIIIIH", siz + ) + + size = (xsiz - xosiz, ysiz - yosiz) + if csiz == 1: + ssiz = struct.unpack_from(">B", siz, 38) + if (ssiz[0] & 0x7F) + 1 > 8: + mode = "I;16" + else: + mode = "L" + elif csiz == 2: + mode = "LA" + elif csiz == 3: + mode = "RGB" + elif csiz == 4: + mode = "RGBA" + else: + msg = "unable to determine J2K image mode" + raise SyntaxError(msg) + + return size, mode + + +def _res_to_dpi(num: int, denom: int, exp: int) -> float | None: + """Convert JPEG2000's (numerator, denominator, exponent-base-10) resolution, + calculated as (num / denom) * 10^exp and stored in dots per meter, + to floating-point dots per inch.""" + if denom == 0: + return None + return (254 * num * (10**exp)) / (10000 * denom) + + +def _parse_jp2_header( + fp: IO[bytes], +) -> tuple[ + tuple[int, int], + str, + str | None, + tuple[float, float] | None, + ImagePalette.ImagePalette | None, +]: + """Parse the JP2 header box to extract size, component count, + color space information, and optionally DPI information, + returning a (size, mode, mimetype, dpi) tuple.""" + + # Find the JP2 header box + reader = BoxReader(fp) + header = None + mimetype = None + while reader.has_next_box(): + tbox = reader.next_box_type() + + if tbox == b"jp2h": + header = reader.read_boxes() + break + elif tbox == b"ftyp": + if reader.read_fields(">4s")[0] == b"jpx ": + mimetype = "image/jpx" + assert header is not None + + size = None + mode = None + bpc = None + nc = None + dpi = None # 2-tuple of DPI info, or None + palette = None + + while header.has_next_box(): + tbox = header.next_box_type() + + if tbox == b"ihdr": + height, width, nc, bpc = header.read_fields(">IIHB") + assert isinstance(height, int) + assert isinstance(width, int) + assert isinstance(bpc, int) + size = (width, height) + if nc == 1 and (bpc & 0x7F) > 8: + mode = "I;16" + elif nc == 1: + mode = "L" + elif nc == 2: + mode = "LA" + elif nc == 3: + mode = "RGB" + elif nc == 4: + mode = "RGBA" + elif tbox == b"colr" and nc == 4: + meth, _, _, enumcs = header.read_fields(">BBBI") + if meth == 1 and enumcs == 12: + mode = "CMYK" + elif tbox == b"pclr" and mode in ("L", "LA"): + ne, npc = header.read_fields(">HB") + assert isinstance(ne, int) + assert isinstance(npc, int) + max_bitdepth = 0 + for bitdepth in header.read_fields(">" + ("B" * npc)): + assert isinstance(bitdepth, int) + if bitdepth > max_bitdepth: + max_bitdepth = bitdepth + if max_bitdepth <= 8: + palette = ImagePalette.ImagePalette("RGBA" if npc == 4 else "RGB") + for i in range(ne): + color: list[int] = [] + for value in header.read_fields(">" + ("B" * npc)): + assert isinstance(value, int) + color.append(value) + palette.getcolor(tuple(color)) + mode = "P" if mode == "L" else "PA" + elif tbox == b"res ": + res = header.read_boxes() + while res.has_next_box(): + tres = res.next_box_type() + if tres == b"resc": + vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields(">HHHHBB") + assert isinstance(vrcn, int) + assert isinstance(vrcd, int) + assert isinstance(hrcn, int) + assert isinstance(hrcd, int) + assert isinstance(vrce, int) + assert isinstance(hrce, int) + hres = _res_to_dpi(hrcn, hrcd, hrce) + vres = _res_to_dpi(vrcn, vrcd, vrce) + if hres is not None and vres is not None: + dpi = (hres, vres) + break + + if size is None or mode is None: + msg = "Malformed JP2 header" + raise SyntaxError(msg) + + return size, mode, mimetype, dpi, palette + + +## +# Image plugin for JPEG2000 images. + + +class Jpeg2KImageFile(ImageFile.ImageFile): + format = "JPEG2000" + format_description = "JPEG 2000 (ISO 15444)" + + def _open(self) -> None: + sig = self.fp.read(4) + if sig == b"\xff\x4f\xff\x51": + self.codec = "j2k" + self._size, self._mode = _parse_codestream(self.fp) + self._parse_comment() + else: + sig = sig + self.fp.read(8) + + if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a": + self.codec = "jp2" + header = _parse_jp2_header(self.fp) + self._size, self._mode, self.custom_mimetype, dpi, self.palette = header + if dpi is not None: + self.info["dpi"] = dpi + if self.fp.read(12).endswith(b"jp2c\xff\x4f\xff\x51"): + hdr = self.fp.read(2) + length = _binary.i16be(hdr) + self.fp.seek(length - 2, os.SEEK_CUR) + self._parse_comment() + else: + msg = "not a JPEG 2000 file" + raise SyntaxError(msg) + + self._reduce = 0 + self.layers = 0 + + fd = -1 + length = -1 + + try: + fd = self.fp.fileno() + length = os.fstat(fd).st_size + except Exception: + fd = -1 + try: + pos = self.fp.tell() + self.fp.seek(0, io.SEEK_END) + length = self.fp.tell() + self.fp.seek(pos) + except Exception: + length = -1 + + self.tile = [ + ImageFile._Tile( + "jpeg2k", + (0, 0) + self.size, + 0, + (self.codec, self._reduce, self.layers, fd, length), + ) + ] + + def _parse_comment(self) -> None: + while True: + marker = self.fp.read(2) + if not marker: + break + typ = marker[1] + if typ in (0x90, 0xD9): + # Start of tile or end of codestream + break + hdr = self.fp.read(2) + length = _binary.i16be(hdr) + if typ == 0x64: + # Comment + self.info["comment"] = self.fp.read(length - 2)[2:] + break + else: + self.fp.seek(length - 2, os.SEEK_CUR) + + @property # type: ignore[override] + def reduce( + self, + ) -> ( + Callable[[int | tuple[int, int], tuple[int, int, int, int] | None], Image.Image] + | int + ): + # https://github.com/python-pillow/Pillow/issues/4343 found that the + # new Image 'reduce' method was shadowed by this plugin's 'reduce' + # property. This attempts to allow for both scenarios + return self._reduce or super().reduce + + @reduce.setter + def reduce(self, value: int) -> None: + self._reduce = value + + def load(self) -> Image.core.PixelAccess | None: + if self.tile and self._reduce: + power = 1 << self._reduce + adjust = power >> 1 + self._size = ( + int((self.size[0] + adjust) / power), + int((self.size[1] + adjust) / power), + ) + + # Update the reduce and layers settings + t = self.tile[0] + assert isinstance(t[3], tuple) + t3 = (t[3][0], self._reduce, self.layers, t[3][3], t[3][4]) + self.tile = [ImageFile._Tile(t[0], (0, 0) + self.size, t[2], t3)] + + return ImageFile.ImageFile.load(self) + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith( + (b"\xff\x4f\xff\x51", b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a") + ) + + +# ------------------------------------------------------------ +# Save support + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + # Get the keyword arguments + info = im.encoderinfo + + if isinstance(filename, str): + filename = filename.encode() + if filename.endswith(b".j2k") or info.get("no_jp2", False): + kind = "j2k" + else: + kind = "jp2" + + offset = info.get("offset", None) + tile_offset = info.get("tile_offset", None) + tile_size = info.get("tile_size", None) + quality_mode = info.get("quality_mode", "rates") + quality_layers = info.get("quality_layers", None) + if quality_layers is not None and not ( + isinstance(quality_layers, (list, tuple)) + and all( + isinstance(quality_layer, (int, float)) for quality_layer in quality_layers + ) + ): + msg = "quality_layers must be a sequence of numbers" + raise ValueError(msg) + + num_resolutions = info.get("num_resolutions", 0) + cblk_size = info.get("codeblock_size", None) + precinct_size = info.get("precinct_size", None) + irreversible = info.get("irreversible", False) + progression = info.get("progression", "LRCP") + cinema_mode = info.get("cinema_mode", "no") + mct = info.get("mct", 0) + signed = info.get("signed", False) + comment = info.get("comment") + if isinstance(comment, str): + comment = comment.encode() + plt = info.get("plt", False) + + fd = -1 + if hasattr(fp, "fileno"): + try: + fd = fp.fileno() + except Exception: + fd = -1 + + im.encoderconfig = ( + offset, + tile_offset, + tile_size, + quality_mode, + quality_layers, + num_resolutions, + cblk_size, + precinct_size, + irreversible, + progression, + cinema_mode, + mct, + signed, + fd, + comment, + plt, + ) + + ImageFile._save(im, fp, [ImageFile._Tile("jpeg2k", (0, 0) + im.size, 0, kind)]) + + +# ------------------------------------------------------------ +# Registry stuff + + +Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept) +Image.register_save(Jpeg2KImageFile.format, _save) + +Image.register_extensions( + Jpeg2KImageFile.format, [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"] +) + +Image.register_mime(Jpeg2KImageFile.format, "image/jp2") diff --git a/py311/lib/python3.11/site-packages/PIL/JpegImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/JpegImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..defe9f773f9215c9f5f31f918edfdaeda6474a16 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/JpegImagePlugin.py @@ -0,0 +1,902 @@ +# +# The Python Imaging Library. +# $Id$ +# +# JPEG (JFIF) file handling +# +# See "Digital Compression and Coding of Continuous-Tone Still Images, +# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1) +# +# History: +# 1995-09-09 fl Created +# 1995-09-13 fl Added full parser +# 1996-03-25 fl Added hack to use the IJG command line utilities +# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug +# 1996-05-28 fl Added draft support, JFIF version (0.1) +# 1996-12-30 fl Added encoder options, added progression property (0.2) +# 1997-08-27 fl Save mode 1 images as BW (0.3) +# 1998-07-12 fl Added YCbCr to draft and save methods (0.4) +# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1) +# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2) +# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3) +# 2003-04-25 fl Added experimental EXIF decoder (0.5) +# 2003-06-06 fl Added experimental EXIF GPSinfo decoder +# 2003-09-13 fl Extract COM markers +# 2009-09-06 fl Added icc_profile support (from Florian Hoech) +# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6) +# 2009-03-08 fl Added subsampling support (from Justin Huff). +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import array +import io +import math +import os +import struct +import subprocess +import sys +import tempfile +import warnings +from typing import IO, Any + +from . import Image, ImageFile +from ._binary import i16be as i16 +from ._binary import i32be as i32 +from ._binary import o8 +from ._binary import o16be as o16 +from ._deprecate import deprecate +from .JpegPresets import presets + +TYPE_CHECKING = False +if TYPE_CHECKING: + from .MpoImagePlugin import MpoImageFile + +# +# Parser + + +def Skip(self: JpegImageFile, marker: int) -> None: + n = i16(self.fp.read(2)) - 2 + ImageFile._safe_read(self.fp, n) + + +def APP(self: JpegImageFile, marker: int) -> None: + # + # Application marker. Store these in the APP dictionary. + # Also look for well-known application markers. + + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + + app = f"APP{marker & 15}" + + self.app[app] = s # compatibility + self.applist.append((app, s)) + + if marker == 0xFFE0 and s.startswith(b"JFIF"): + # extract JFIF information + self.info["jfif"] = version = i16(s, 5) # version + self.info["jfif_version"] = divmod(version, 256) + # extract JFIF properties + try: + jfif_unit = s[7] + jfif_density = i16(s, 8), i16(s, 10) + except Exception: + pass + else: + if jfif_unit == 1: + self.info["dpi"] = jfif_density + elif jfif_unit == 2: # cm + # 1 dpcm = 2.54 dpi + self.info["dpi"] = tuple(d * 2.54 for d in jfif_density) + self.info["jfif_unit"] = jfif_unit + self.info["jfif_density"] = jfif_density + elif marker == 0xFFE1 and s.startswith(b"Exif\0\0"): + # extract EXIF information + if "exif" in self.info: + self.info["exif"] += s[6:] + else: + self.info["exif"] = s + self._exif_offset = self.fp.tell() - n + 6 + elif marker == 0xFFE1 and s.startswith(b"http://ns.adobe.com/xap/1.0/\x00"): + self.info["xmp"] = s.split(b"\x00", 1)[1] + elif marker == 0xFFE2 and s.startswith(b"FPXR\0"): + # extract FlashPix information (incomplete) + self.info["flashpix"] = s # FIXME: value will change + elif marker == 0xFFE2 and s.startswith(b"ICC_PROFILE\0"): + # Since an ICC profile can be larger than the maximum size of + # a JPEG marker (64K), we need provisions to split it into + # multiple markers. The format defined by the ICC specifies + # one or more APP2 markers containing the following data: + # Identifying string ASCII "ICC_PROFILE\0" (12 bytes) + # Marker sequence number 1, 2, etc (1 byte) + # Number of markers Total of APP2's used (1 byte) + # Profile data (remainder of APP2 data) + # Decoders should use the marker sequence numbers to + # reassemble the profile, rather than assuming that the APP2 + # markers appear in the correct sequence. + self.icclist.append(s) + elif marker == 0xFFED and s.startswith(b"Photoshop 3.0\x00"): + # parse the image resource block + offset = 14 + photoshop = self.info.setdefault("photoshop", {}) + while s[offset : offset + 4] == b"8BIM": + try: + offset += 4 + # resource code + code = i16(s, offset) + offset += 2 + # resource name (usually empty) + name_len = s[offset] + # name = s[offset+1:offset+1+name_len] + offset += 1 + name_len + offset += offset & 1 # align + # resource data block + size = i32(s, offset) + offset += 4 + data = s[offset : offset + size] + if code == 0x03ED: # ResolutionInfo + photoshop[code] = { + "XResolution": i32(data, 0) / 65536, + "DisplayedUnitsX": i16(data, 4), + "YResolution": i32(data, 8) / 65536, + "DisplayedUnitsY": i16(data, 12), + } + else: + photoshop[code] = data + offset += size + offset += offset & 1 # align + except struct.error: + break # insufficient data + + elif marker == 0xFFEE and s.startswith(b"Adobe"): + self.info["adobe"] = i16(s, 5) + # extract Adobe custom properties + try: + adobe_transform = s[11] + except IndexError: + pass + else: + self.info["adobe_transform"] = adobe_transform + elif marker == 0xFFE2 and s.startswith(b"MPF\0"): + # extract MPO information + self.info["mp"] = s[4:] + # offset is current location minus buffer size + # plus constant header size + self.info["mpoffset"] = self.fp.tell() - n + 4 + + +def COM(self: JpegImageFile, marker: int) -> None: + # + # Comment marker. Store these in the APP dictionary. + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + + self.info["comment"] = s + self.app["COM"] = s # compatibility + self.applist.append(("COM", s)) + + +def SOF(self: JpegImageFile, marker: int) -> None: + # + # Start of frame marker. Defines the size and mode of the + # image. JPEG is colour blind, so we use some simple + # heuristics to map the number of layers to an appropriate + # mode. Note that this could be made a bit brighter, by + # looking for JFIF and Adobe APP markers. + + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + self._size = i16(s, 3), i16(s, 1) + + self.bits = s[0] + if self.bits != 8: + msg = f"cannot handle {self.bits}-bit layers" + raise SyntaxError(msg) + + self.layers = s[5] + if self.layers == 1: + self._mode = "L" + elif self.layers == 3: + self._mode = "RGB" + elif self.layers == 4: + self._mode = "CMYK" + else: + msg = f"cannot handle {self.layers}-layer images" + raise SyntaxError(msg) + + if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]: + self.info["progressive"] = self.info["progression"] = 1 + + if self.icclist: + # fixup icc profile + self.icclist.sort() # sort by sequence number + if self.icclist[0][13] == len(self.icclist): + profile = [p[14:] for p in self.icclist] + icc_profile = b"".join(profile) + else: + icc_profile = None # wrong number of fragments + self.info["icc_profile"] = icc_profile + self.icclist = [] + + for i in range(6, len(s), 3): + t = s[i : i + 3] + # 4-tuples: id, vsamp, hsamp, qtable + self.layer.append((t[0], t[1] // 16, t[1] & 15, t[2])) + + +def DQT(self: JpegImageFile, marker: int) -> None: + # + # Define quantization table. Note that there might be more + # than one table in each marker. + + # FIXME: The quantization tables can be used to estimate the + # compression quality. + + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + while len(s): + v = s[0] + precision = 1 if (v // 16 == 0) else 2 # in bytes + qt_length = 1 + precision * 64 + if len(s) < qt_length: + msg = "bad quantization table marker" + raise SyntaxError(msg) + data = array.array("B" if precision == 1 else "H", s[1:qt_length]) + if sys.byteorder == "little" and precision > 1: + data.byteswap() # the values are always big-endian + self.quantization[v & 15] = [data[i] for i in zigzag_index] + s = s[qt_length:] + + +# +# JPEG marker table + +MARKER = { + 0xFFC0: ("SOF0", "Baseline DCT", SOF), + 0xFFC1: ("SOF1", "Extended Sequential DCT", SOF), + 0xFFC2: ("SOF2", "Progressive DCT", SOF), + 0xFFC3: ("SOF3", "Spatial lossless", SOF), + 0xFFC4: ("DHT", "Define Huffman table", Skip), + 0xFFC5: ("SOF5", "Differential sequential DCT", SOF), + 0xFFC6: ("SOF6", "Differential progressive DCT", SOF), + 0xFFC7: ("SOF7", "Differential spatial", SOF), + 0xFFC8: ("JPG", "Extension", None), + 0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF), + 0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF), + 0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF), + 0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip), + 0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF), + 0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF), + 0xFFCF: ("SOF15", "Differential spatial (AC)", SOF), + 0xFFD0: ("RST0", "Restart 0", None), + 0xFFD1: ("RST1", "Restart 1", None), + 0xFFD2: ("RST2", "Restart 2", None), + 0xFFD3: ("RST3", "Restart 3", None), + 0xFFD4: ("RST4", "Restart 4", None), + 0xFFD5: ("RST5", "Restart 5", None), + 0xFFD6: ("RST6", "Restart 6", None), + 0xFFD7: ("RST7", "Restart 7", None), + 0xFFD8: ("SOI", "Start of image", None), + 0xFFD9: ("EOI", "End of image", None), + 0xFFDA: ("SOS", "Start of scan", Skip), + 0xFFDB: ("DQT", "Define quantization table", DQT), + 0xFFDC: ("DNL", "Define number of lines", Skip), + 0xFFDD: ("DRI", "Define restart interval", Skip), + 0xFFDE: ("DHP", "Define hierarchical progression", SOF), + 0xFFDF: ("EXP", "Expand reference component", Skip), + 0xFFE0: ("APP0", "Application segment 0", APP), + 0xFFE1: ("APP1", "Application segment 1", APP), + 0xFFE2: ("APP2", "Application segment 2", APP), + 0xFFE3: ("APP3", "Application segment 3", APP), + 0xFFE4: ("APP4", "Application segment 4", APP), + 0xFFE5: ("APP5", "Application segment 5", APP), + 0xFFE6: ("APP6", "Application segment 6", APP), + 0xFFE7: ("APP7", "Application segment 7", APP), + 0xFFE8: ("APP8", "Application segment 8", APP), + 0xFFE9: ("APP9", "Application segment 9", APP), + 0xFFEA: ("APP10", "Application segment 10", APP), + 0xFFEB: ("APP11", "Application segment 11", APP), + 0xFFEC: ("APP12", "Application segment 12", APP), + 0xFFED: ("APP13", "Application segment 13", APP), + 0xFFEE: ("APP14", "Application segment 14", APP), + 0xFFEF: ("APP15", "Application segment 15", APP), + 0xFFF0: ("JPG0", "Extension 0", None), + 0xFFF1: ("JPG1", "Extension 1", None), + 0xFFF2: ("JPG2", "Extension 2", None), + 0xFFF3: ("JPG3", "Extension 3", None), + 0xFFF4: ("JPG4", "Extension 4", None), + 0xFFF5: ("JPG5", "Extension 5", None), + 0xFFF6: ("JPG6", "Extension 6", None), + 0xFFF7: ("JPG7", "Extension 7", None), + 0xFFF8: ("JPG8", "Extension 8", None), + 0xFFF9: ("JPG9", "Extension 9", None), + 0xFFFA: ("JPG10", "Extension 10", None), + 0xFFFB: ("JPG11", "Extension 11", None), + 0xFFFC: ("JPG12", "Extension 12", None), + 0xFFFD: ("JPG13", "Extension 13", None), + 0xFFFE: ("COM", "Comment", COM), +} + + +def _accept(prefix: bytes) -> bool: + # Magic number was taken from https://en.wikipedia.org/wiki/JPEG + return prefix.startswith(b"\xff\xd8\xff") + + +## +# Image plugin for JPEG and JFIF images. + + +class JpegImageFile(ImageFile.ImageFile): + format = "JPEG" + format_description = "JPEG (ISO 10918)" + + def _open(self) -> None: + s = self.fp.read(3) + + if not _accept(s): + msg = "not a JPEG file" + raise SyntaxError(msg) + s = b"\xff" + + # Create attributes + self.bits = self.layers = 0 + self._exif_offset = 0 + + # JPEG specifics (internal) + self.layer: list[tuple[int, int, int, int]] = [] + self._huffman_dc: dict[Any, Any] = {} + self._huffman_ac: dict[Any, Any] = {} + self.quantization: dict[int, list[int]] = {} + self.app: dict[str, bytes] = {} # compatibility + self.applist: list[tuple[str, bytes]] = [] + self.icclist: list[bytes] = [] + + while True: + i = s[0] + if i == 0xFF: + s = s + self.fp.read(1) + i = i16(s) + else: + # Skip non-0xFF junk + s = self.fp.read(1) + continue + + if i in MARKER: + name, description, handler = MARKER[i] + if handler is not None: + handler(self, i) + if i == 0xFFDA: # start of scan + rawmode = self.mode + if self.mode == "CMYK": + rawmode = "CMYK;I" # assume adobe conventions + self.tile = [ + ImageFile._Tile("jpeg", (0, 0) + self.size, 0, (rawmode, "")) + ] + # self.__offset = self.fp.tell() + break + s = self.fp.read(1) + elif i in {0, 0xFFFF}: + # padded marker or junk; move on + s = b"\xff" + elif i == 0xFF00: # Skip extraneous data (escaped 0xFF) + s = self.fp.read(1) + else: + msg = "no marker found" + raise SyntaxError(msg) + + self._read_dpi_from_exif() + + def __getattr__(self, name: str) -> Any: + if name in ("huffman_ac", "huffman_dc"): + deprecate(name, 12) + return getattr(self, "_" + name) + raise AttributeError(name) + + def __getstate__(self) -> list[Any]: + return super().__getstate__() + [self.layers, self.layer] + + def __setstate__(self, state: list[Any]) -> None: + self.layers, self.layer = state[6:] + super().__setstate__(state) + + def load_read(self, read_bytes: int) -> bytes: + """ + internal: read more image data + For premature EOF and LOAD_TRUNCATED_IMAGES adds EOI marker + so libjpeg can finish decoding + """ + s = self.fp.read(read_bytes) + + if not s and ImageFile.LOAD_TRUNCATED_IMAGES and not hasattr(self, "_ended"): + # Premature EOF. + # Pretend file is finished adding EOI marker + self._ended = True + return b"\xff\xd9" + + return s + + def draft( + self, mode: str | None, size: tuple[int, int] | None + ) -> tuple[str, tuple[int, int, float, float]] | None: + if len(self.tile) != 1: + return None + + # Protect from second call + if self.decoderconfig: + return None + + d, e, o, a = self.tile[0] + scale = 1 + original_size = self.size + + assert isinstance(a, tuple) + if a[0] == "RGB" and mode in ["L", "YCbCr"]: + self._mode = mode + a = mode, "" + + if size: + scale = min(self.size[0] // size[0], self.size[1] // size[1]) + for s in [8, 4, 2, 1]: + if scale >= s: + break + assert e is not None + e = ( + e[0], + e[1], + (e[2] - e[0] + s - 1) // s + e[0], + (e[3] - e[1] + s - 1) // s + e[1], + ) + self._size = ((self.size[0] + s - 1) // s, (self.size[1] + s - 1) // s) + scale = s + + self.tile = [ImageFile._Tile(d, e, o, a)] + self.decoderconfig = (scale, 0) + + box = (0, 0, original_size[0] / scale, original_size[1] / scale) + return self.mode, box + + def load_djpeg(self) -> None: + # ALTERNATIVE: handle JPEGs via the IJG command line utilities + + f, path = tempfile.mkstemp() + os.close(f) + if os.path.exists(self.filename): + subprocess.check_call(["djpeg", "-outfile", path, self.filename]) + else: + try: + os.unlink(path) + except OSError: + pass + + msg = "Invalid Filename" + raise ValueError(msg) + + try: + with Image.open(path) as _im: + _im.load() + self.im = _im.im + finally: + try: + os.unlink(path) + except OSError: + pass + + self._mode = self.im.mode + self._size = self.im.size + + self.tile = [] + + def _getexif(self) -> dict[int, Any] | None: + return _getexif(self) + + def _read_dpi_from_exif(self) -> None: + # If DPI isn't in JPEG header, fetch from EXIF + if "dpi" in self.info or "exif" not in self.info: + return + try: + exif = self.getexif() + resolution_unit = exif[0x0128] + x_resolution = exif[0x011A] + try: + dpi = float(x_resolution[0]) / x_resolution[1] + except TypeError: + dpi = x_resolution + if math.isnan(dpi): + msg = "DPI is not a number" + raise ValueError(msg) + if resolution_unit == 3: # cm + # 1 dpcm = 2.54 dpi + dpi *= 2.54 + self.info["dpi"] = dpi, dpi + except ( + struct.error, # truncated EXIF + KeyError, # dpi not included + SyntaxError, # invalid/unreadable EXIF + TypeError, # dpi is an invalid float + ValueError, # dpi is an invalid float + ZeroDivisionError, # invalid dpi rational value + ): + self.info["dpi"] = 72, 72 + + def _getmp(self) -> dict[int, Any] | None: + return _getmp(self) + + +def _getexif(self: JpegImageFile) -> dict[int, Any] | None: + if "exif" not in self.info: + return None + return self.getexif()._get_merged_dict() + + +def _getmp(self: JpegImageFile) -> dict[int, Any] | None: + # Extract MP information. This method was inspired by the "highly + # experimental" _getexif version that's been in use for years now, + # itself based on the ImageFileDirectory class in the TIFF plugin. + + # The MP record essentially consists of a TIFF file embedded in a JPEG + # application marker. + try: + data = self.info["mp"] + except KeyError: + return None + file_contents = io.BytesIO(data) + head = file_contents.read(8) + endianness = ">" if head.startswith(b"\x4d\x4d\x00\x2a") else "<" + # process dictionary + from . import TiffImagePlugin + + try: + info = TiffImagePlugin.ImageFileDirectory_v2(head) + file_contents.seek(info.next) + info.load(file_contents) + mp = dict(info) + except Exception as e: + msg = "malformed MP Index (unreadable directory)" + raise SyntaxError(msg) from e + # it's an error not to have a number of images + try: + quant = mp[0xB001] + except KeyError as e: + msg = "malformed MP Index (no number of images)" + raise SyntaxError(msg) from e + # get MP entries + mpentries = [] + try: + rawmpentries = mp[0xB002] + for entrynum in range(quant): + unpackedentry = struct.unpack_from( + f"{endianness}LLLHH", rawmpentries, entrynum * 16 + ) + labels = ("Attribute", "Size", "DataOffset", "EntryNo1", "EntryNo2") + mpentry = dict(zip(labels, unpackedentry)) + mpentryattr = { + "DependentParentImageFlag": bool(mpentry["Attribute"] & (1 << 31)), + "DependentChildImageFlag": bool(mpentry["Attribute"] & (1 << 30)), + "RepresentativeImageFlag": bool(mpentry["Attribute"] & (1 << 29)), + "Reserved": (mpentry["Attribute"] & (3 << 27)) >> 27, + "ImageDataFormat": (mpentry["Attribute"] & (7 << 24)) >> 24, + "MPType": mpentry["Attribute"] & 0x00FFFFFF, + } + if mpentryattr["ImageDataFormat"] == 0: + mpentryattr["ImageDataFormat"] = "JPEG" + else: + msg = "unsupported picture format in MPO" + raise SyntaxError(msg) + mptypemap = { + 0x000000: "Undefined", + 0x010001: "Large Thumbnail (VGA Equivalent)", + 0x010002: "Large Thumbnail (Full HD Equivalent)", + 0x020001: "Multi-Frame Image (Panorama)", + 0x020002: "Multi-Frame Image: (Disparity)", + 0x020003: "Multi-Frame Image: (Multi-Angle)", + 0x030000: "Baseline MP Primary Image", + } + mpentryattr["MPType"] = mptypemap.get(mpentryattr["MPType"], "Unknown") + mpentry["Attribute"] = mpentryattr + mpentries.append(mpentry) + mp[0xB002] = mpentries + except KeyError as e: + msg = "malformed MP Index (bad MP Entry)" + raise SyntaxError(msg) from e + # Next we should try and parse the individual image unique ID list; + # we don't because I've never seen this actually used in a real MPO + # file and so can't test it. + return mp + + +# -------------------------------------------------------------------- +# stuff to save JPEG files + +RAWMODE = { + "1": "L", + "L": "L", + "RGB": "RGB", + "RGBX": "RGB", + "CMYK": "CMYK;I", # assume adobe conventions + "YCbCr": "YCbCr", +} + +# fmt: off +zigzag_index = ( + 0, 1, 5, 6, 14, 15, 27, 28, + 2, 4, 7, 13, 16, 26, 29, 42, + 3, 8, 12, 17, 25, 30, 41, 43, + 9, 11, 18, 24, 31, 40, 44, 53, + 10, 19, 23, 32, 39, 45, 52, 54, + 20, 22, 33, 38, 46, 51, 55, 60, + 21, 34, 37, 47, 50, 56, 59, 61, + 35, 36, 48, 49, 57, 58, 62, 63, +) + +samplings = { + (1, 1, 1, 1, 1, 1): 0, + (2, 1, 1, 1, 1, 1): 1, + (2, 2, 1, 1, 1, 1): 2, +} +# fmt: on + + +def get_sampling(im: Image.Image) -> int: + # There's no subsampling when images have only 1 layer + # (grayscale images) or when they are CMYK (4 layers), + # so set subsampling to the default value. + # + # NOTE: currently Pillow can't encode JPEG to YCCK format. + # If YCCK support is added in the future, subsampling code will have + # to be updated (here and in JpegEncode.c) to deal with 4 layers. + if not isinstance(im, JpegImageFile) or im.layers in (1, 4): + return -1 + sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3] + return samplings.get(sampling, -1) + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if im.width == 0 or im.height == 0: + msg = "cannot write empty image as JPEG" + raise ValueError(msg) + + try: + rawmode = RAWMODE[im.mode] + except KeyError as e: + msg = f"cannot write mode {im.mode} as JPEG" + raise OSError(msg) from e + + info = im.encoderinfo + + dpi = [round(x) for x in info.get("dpi", (0, 0))] + + quality = info.get("quality", -1) + subsampling = info.get("subsampling", -1) + qtables = info.get("qtables") + + if quality == "keep": + quality = -1 + subsampling = "keep" + qtables = "keep" + elif quality in presets: + preset = presets[quality] + quality = -1 + subsampling = preset.get("subsampling", -1) + qtables = preset.get("quantization") + elif not isinstance(quality, int): + msg = "Invalid quality setting" + raise ValueError(msg) + else: + if subsampling in presets: + subsampling = presets[subsampling].get("subsampling", -1) + if isinstance(qtables, str) and qtables in presets: + qtables = presets[qtables].get("quantization") + + if subsampling == "4:4:4": + subsampling = 0 + elif subsampling == "4:2:2": + subsampling = 1 + elif subsampling == "4:2:0": + subsampling = 2 + elif subsampling == "4:1:1": + # For compatibility. Before Pillow 4.3, 4:1:1 actually meant 4:2:0. + # Set 4:2:0 if someone is still using that value. + subsampling = 2 + elif subsampling == "keep": + if im.format != "JPEG": + msg = "Cannot use 'keep' when original image is not a JPEG" + raise ValueError(msg) + subsampling = get_sampling(im) + + def validate_qtables( + qtables: ( + str | tuple[list[int], ...] | list[list[int]] | dict[int, list[int]] | None + ), + ) -> list[list[int]] | None: + if qtables is None: + return qtables + if isinstance(qtables, str): + try: + lines = [ + int(num) + for line in qtables.splitlines() + for num in line.split("#", 1)[0].split() + ] + except ValueError as e: + msg = "Invalid quantization table" + raise ValueError(msg) from e + else: + qtables = [lines[s : s + 64] for s in range(0, len(lines), 64)] + if isinstance(qtables, (tuple, list, dict)): + if isinstance(qtables, dict): + qtables = [ + qtables[key] for key in range(len(qtables)) if key in qtables + ] + elif isinstance(qtables, tuple): + qtables = list(qtables) + if not (0 < len(qtables) < 5): + msg = "None or too many quantization tables" + raise ValueError(msg) + for idx, table in enumerate(qtables): + try: + if len(table) != 64: + msg = "Invalid quantization table" + raise TypeError(msg) + table_array = array.array("H", table) + except TypeError as e: + msg = "Invalid quantization table" + raise ValueError(msg) from e + else: + qtables[idx] = list(table_array) + return qtables + + if qtables == "keep": + if im.format != "JPEG": + msg = "Cannot use 'keep' when original image is not a JPEG" + raise ValueError(msg) + qtables = getattr(im, "quantization", None) + qtables = validate_qtables(qtables) + + extra = info.get("extra", b"") + + MAX_BYTES_IN_MARKER = 65533 + if xmp := info.get("xmp"): + overhead_len = 29 # b"http://ns.adobe.com/xap/1.0/\x00" + max_data_bytes_in_marker = MAX_BYTES_IN_MARKER - overhead_len + if len(xmp) > max_data_bytes_in_marker: + msg = "XMP data is too long" + raise ValueError(msg) + size = o16(2 + overhead_len + len(xmp)) + extra += b"\xff\xe1" + size + b"http://ns.adobe.com/xap/1.0/\x00" + xmp + + if icc_profile := info.get("icc_profile"): + overhead_len = 14 # b"ICC_PROFILE\0" + o8(i) + o8(len(markers)) + max_data_bytes_in_marker = MAX_BYTES_IN_MARKER - overhead_len + markers = [] + while icc_profile: + markers.append(icc_profile[:max_data_bytes_in_marker]) + icc_profile = icc_profile[max_data_bytes_in_marker:] + i = 1 + for marker in markers: + size = o16(2 + overhead_len + len(marker)) + extra += ( + b"\xff\xe2" + + size + + b"ICC_PROFILE\0" + + o8(i) + + o8(len(markers)) + + marker + ) + i += 1 + + comment = info.get("comment", im.info.get("comment")) + + # "progressive" is the official name, but older documentation + # says "progression" + # FIXME: issue a warning if the wrong form is used (post-1.1.7) + progressive = info.get("progressive", False) or info.get("progression", False) + + optimize = info.get("optimize", False) + + exif = info.get("exif", b"") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + if len(exif) > MAX_BYTES_IN_MARKER: + msg = "EXIF data is too long" + raise ValueError(msg) + + # get keyword arguments + im.encoderconfig = ( + quality, + progressive, + info.get("smooth", 0), + optimize, + info.get("keep_rgb", False), + info.get("streamtype", 0), + dpi, + subsampling, + info.get("restart_marker_blocks", 0), + info.get("restart_marker_rows", 0), + qtables, + comment, + extra, + exif, + ) + + # if we optimize, libjpeg needs a buffer big enough to hold the whole image + # in a shot. Guessing on the size, at im.size bytes. (raw pixel size is + # channels*size, this is a value that's been used in a django patch. + # https://github.com/matthewwithanm/django-imagekit/issues/50 + if optimize or progressive: + # CMYK can be bigger + if im.mode == "CMYK": + bufsize = 4 * im.size[0] * im.size[1] + # keep sets quality to -1, but the actual value may be high. + elif quality >= 95 or quality == -1: + bufsize = 2 * im.size[0] * im.size[1] + else: + bufsize = im.size[0] * im.size[1] + if exif: + bufsize += len(exif) + 5 + if extra: + bufsize += len(extra) + 1 + else: + # The EXIF info needs to be written as one block, + APP1, + one spare byte. + # Ensure that our buffer is big enough. Same with the icc_profile block. + bufsize = max(len(exif) + 5, len(extra) + 1) + + ImageFile._save( + im, fp, [ImageFile._Tile("jpeg", (0, 0) + im.size, 0, rawmode)], bufsize + ) + + +def _save_cjpeg(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + # ALTERNATIVE: handle JPEGs via the IJG command line utilities. + tempfile = im._dump() + subprocess.check_call(["cjpeg", "-outfile", filename, tempfile]) + try: + os.unlink(tempfile) + except OSError: + pass + + +## +# Factory for making JPEG and MPO instances +def jpeg_factory( + fp: IO[bytes], filename: str | bytes | None = None +) -> JpegImageFile | MpoImageFile: + im = JpegImageFile(fp, filename) + try: + mpheader = im._getmp() + if mpheader is not None and mpheader[45057] > 1: + for segment, content in im.applist: + if segment == "APP1" and b' hdrgm:Version="' in content: + # Ultra HDR images are not yet supported + return im + # It's actually an MPO + from .MpoImagePlugin import MpoImageFile + + # Don't reload everything, just convert it. + im = MpoImageFile.adopt(im, mpheader) + except (TypeError, IndexError): + # It is really a JPEG + pass + except SyntaxError: + warnings.warn( + "Image appears to be a malformed MPO file, it will be " + "interpreted as a base JPEG file" + ) + return im + + +# --------------------------------------------------------------------- +# Registry stuff + +Image.register_open(JpegImageFile.format, jpeg_factory, _accept) +Image.register_save(JpegImageFile.format, _save) + +Image.register_extensions(JpegImageFile.format, [".jfif", ".jpe", ".jpg", ".jpeg"]) + +Image.register_mime(JpegImageFile.format, "image/jpeg") diff --git a/py311/lib/python3.11/site-packages/PIL/JpegPresets.py b/py311/lib/python3.11/site-packages/PIL/JpegPresets.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e64a35ee1b6fe3ac6da792682a3129253993bb --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/JpegPresets.py @@ -0,0 +1,242 @@ +""" +JPEG quality settings equivalent to the Photoshop settings. +Can be used when saving JPEG files. + +The following presets are available by default: +``web_low``, ``web_medium``, ``web_high``, ``web_very_high``, ``web_maximum``, +``low``, ``medium``, ``high``, ``maximum``. +More presets can be added to the :py:data:`presets` dict if needed. + +To apply the preset, specify:: + + quality="preset_name" + +To apply only the quantization table:: + + qtables="preset_name" + +To apply only the subsampling setting:: + + subsampling="preset_name" + +Example:: + + im.save("image_name.jpg", quality="web_high") + +Subsampling +----------- + +Subsampling is the practice of encoding images by implementing less resolution +for chroma information than for luma information. +(ref.: https://en.wikipedia.org/wiki/Chroma_subsampling) + +Possible subsampling values are 0, 1 and 2 that correspond to 4:4:4, 4:2:2 and +4:2:0. + +You can get the subsampling of a JPEG with the +:func:`.JpegImagePlugin.get_sampling` function. + +In JPEG compressed data a JPEG marker is used instead of an EXIF tag. +(ref.: https://exiv2.org/tags.html) + + +Quantization tables +------------------- + +They are values use by the DCT (Discrete cosine transform) to remove +*unnecessary* information from the image (the lossy part of the compression). +(ref.: https://en.wikipedia.org/wiki/Quantization_matrix#Quantization_matrices, +https://en.wikipedia.org/wiki/JPEG#Quantization) + +You can get the quantization tables of a JPEG with:: + + im.quantization + +This will return a dict with a number of lists. You can pass this dict +directly as the qtables argument when saving a JPEG. + +The quantization table format in presets is a list with sublists. These formats +are interchangeable. + +Libjpeg ref.: +https://web.archive.org/web/20120328125543/http://www.jpegcameras.com/libjpeg/libjpeg-3.html + +""" + +from __future__ import annotations + +# fmt: off +presets = { + 'web_low': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [20, 16, 25, 39, 50, 46, 62, 68, + 16, 18, 23, 38, 38, 53, 65, 68, + 25, 23, 31, 38, 53, 65, 68, 68, + 39, 38, 38, 53, 65, 68, 68, 68, + 50, 38, 53, 65, 68, 68, 68, 68, + 46, 53, 65, 68, 68, 68, 68, 68, + 62, 65, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68], + [21, 25, 32, 38, 54, 68, 68, 68, + 25, 28, 24, 38, 54, 68, 68, 68, + 32, 24, 32, 43, 66, 68, 68, 68, + 38, 38, 43, 53, 68, 68, 68, 68, + 54, 54, 66, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68] + ]}, + 'web_medium': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [16, 11, 11, 16, 23, 27, 31, 30, + 11, 12, 12, 15, 20, 23, 23, 30, + 11, 12, 13, 16, 23, 26, 35, 47, + 16, 15, 16, 23, 26, 37, 47, 64, + 23, 20, 23, 26, 39, 51, 64, 64, + 27, 23, 26, 37, 51, 64, 64, 64, + 31, 23, 35, 47, 64, 64, 64, 64, + 30, 30, 47, 64, 64, 64, 64, 64], + [17, 15, 17, 21, 20, 26, 38, 48, + 15, 19, 18, 17, 20, 26, 35, 43, + 17, 18, 20, 22, 26, 30, 46, 53, + 21, 17, 22, 28, 30, 39, 53, 64, + 20, 20, 26, 30, 39, 48, 64, 64, + 26, 26, 30, 39, 48, 63, 64, 64, + 38, 35, 46, 53, 64, 64, 64, 64, + 48, 43, 53, 64, 64, 64, 64, 64] + ]}, + 'web_high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [6, 4, 4, 6, 9, 11, 12, 16, + 4, 5, 5, 6, 8, 10, 12, 12, + 4, 5, 5, 6, 10, 12, 14, 19, + 6, 6, 6, 11, 12, 15, 19, 28, + 9, 8, 10, 12, 16, 20, 27, 31, + 11, 10, 12, 15, 20, 27, 31, 31, + 12, 12, 14, 19, 27, 31, 31, 31, + 16, 12, 19, 28, 31, 31, 31, 31], + [7, 7, 13, 24, 26, 31, 31, 31, + 7, 12, 16, 21, 31, 31, 31, 31, + 13, 16, 17, 31, 31, 31, 31, 31, + 24, 21, 31, 31, 31, 31, 31, 31, + 26, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31] + ]}, + 'web_very_high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 4, 5, 7, 9, + 2, 2, 2, 4, 5, 7, 9, 12, + 3, 3, 4, 5, 8, 10, 12, 12, + 4, 4, 5, 7, 10, 12, 12, 12, + 5, 5, 7, 9, 12, 12, 12, 12, + 6, 6, 9, 12, 12, 12, 12, 12], + [3, 3, 5, 9, 13, 15, 15, 15, + 3, 4, 6, 11, 14, 12, 12, 12, + 5, 6, 9, 14, 12, 12, 12, 12, + 9, 11, 14, 12, 12, 12, 12, 12, + 13, 14, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'web_maximum': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 1, 1, 2, 2, 3, + 1, 1, 1, 1, 2, 2, 3, 3, + 1, 1, 1, 2, 2, 3, 3, 3, + 1, 1, 2, 2, 3, 3, 3, 3], + [1, 1, 1, 2, 2, 3, 3, 3, + 1, 1, 1, 2, 3, 3, 3, 3, + 1, 1, 1, 3, 3, 3, 3, 3, + 2, 2, 3, 3, 3, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3] + ]}, + 'low': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [18, 14, 14, 21, 30, 35, 34, 17, + 14, 16, 16, 19, 26, 23, 12, 12, + 14, 16, 17, 21, 23, 12, 12, 12, + 21, 19, 21, 23, 12, 12, 12, 12, + 30, 26, 23, 12, 12, 12, 12, 12, + 35, 23, 12, 12, 12, 12, 12, 12, + 34, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12], + [20, 19, 22, 27, 20, 20, 17, 17, + 19, 25, 23, 14, 14, 12, 12, 12, + 22, 23, 14, 14, 12, 12, 12, 12, + 27, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'medium': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [12, 8, 8, 12, 17, 21, 24, 17, + 8, 9, 9, 11, 15, 19, 12, 12, + 8, 9, 10, 12, 19, 12, 12, 12, + 12, 11, 12, 21, 12, 12, 12, 12, + 17, 15, 19, 12, 12, 12, 12, 12, + 21, 19, 12, 12, 12, 12, 12, 12, + 24, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12], + [13, 11, 13, 16, 20, 20, 17, 17, + 11, 14, 14, 14, 14, 12, 12, 12, + 13, 14, 14, 14, 12, 12, 12, 12, + 16, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [6, 4, 4, 6, 9, 11, 12, 16, + 4, 5, 5, 6, 8, 10, 12, 12, + 4, 5, 5, 6, 10, 12, 12, 12, + 6, 6, 6, 11, 12, 12, 12, 12, + 9, 8, 10, 12, 12, 12, 12, 12, + 11, 10, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, + 16, 12, 12, 12, 12, 12, 12, 12], + [7, 7, 13, 24, 20, 20, 17, 17, + 7, 12, 16, 14, 14, 12, 12, 12, + 13, 16, 14, 14, 12, 12, 12, 12, + 24, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'maximum': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 4, 5, 7, 9, + 2, 2, 2, 4, 5, 7, 9, 12, + 3, 3, 4, 5, 8, 10, 12, 12, + 4, 4, 5, 7, 10, 12, 12, 12, + 5, 5, 7, 9, 12, 12, 12, 12, + 6, 6, 9, 12, 12, 12, 12, 12], + [3, 3, 5, 9, 13, 15, 15, 15, + 3, 4, 6, 10, 14, 12, 12, 12, + 5, 6, 9, 14, 12, 12, 12, 12, + 9, 10, 14, 12, 12, 12, 12, 12, + 13, 14, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12] + ]}, +} +# fmt: on diff --git a/py311/lib/python3.11/site-packages/PIL/McIdasImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/McIdasImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..9a47933b69cbdc628faafb67b2fca8de703abfc1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/McIdasImagePlugin.py @@ -0,0 +1,78 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Basic McIdas support for PIL +# +# History: +# 1997-05-05 fl Created (8-bit images only) +# 2009-03-08 fl Added 16/32-bit support. +# +# Thanks to Richard Jones and Craig Swank for specs and samples. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import struct + +from . import Image, ImageFile + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"\x00\x00\x00\x00\x00\x00\x00\x04") + + +## +# Image plugin for McIdas area images. + + +class McIdasImageFile(ImageFile.ImageFile): + format = "MCIDAS" + format_description = "McIdas area file" + + def _open(self) -> None: + # parse area file directory + assert self.fp is not None + + s = self.fp.read(256) + if not _accept(s) or len(s) != 256: + msg = "not an McIdas area file" + raise SyntaxError(msg) + + self.area_descriptor_raw = s + self.area_descriptor = w = [0, *struct.unpack("!64i", s)] + + # get mode + if w[11] == 1: + mode = rawmode = "L" + elif w[11] == 2: + mode = rawmode = "I;16B" + elif w[11] == 4: + # FIXME: add memory map support + mode = "I" + rawmode = "I;32B" + else: + msg = "unsupported McIdas format" + raise SyntaxError(msg) + + self._mode = mode + self._size = w[10], w[9] + + offset = w[34] + w[15] + stride = w[15] + w[10] * w[11] * w[14] + + self.tile = [ + ImageFile._Tile("raw", (0, 0) + self.size, offset, (rawmode, stride, 1)) + ] + + +# -------------------------------------------------------------------- +# registry + +Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept) + +# no default extension diff --git a/py311/lib/python3.11/site-packages/PIL/MicImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/MicImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..9ce38c427b6c19be9e0c5092181a54b936a7a2f3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/MicImagePlugin.py @@ -0,0 +1,102 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Microsoft Image Composer support for PIL +# +# Notes: +# uses TiffImagePlugin.py to read the actual image streams +# +# History: +# 97-01-20 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import olefile + +from . import Image, TiffImagePlugin + +# +# -------------------------------------------------------------------- + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(olefile.MAGIC) + + +## +# Image plugin for Microsoft's Image Composer file format. + + +class MicImageFile(TiffImagePlugin.TiffImageFile): + format = "MIC" + format_description = "Microsoft Image Composer" + _close_exclusive_fp_after_loading = False + + def _open(self) -> None: + # read the OLE directory and see if this is a likely + # to be a Microsoft Image Composer file + + try: + self.ole = olefile.OleFileIO(self.fp) + except OSError as e: + msg = "not an MIC file; invalid OLE file" + raise SyntaxError(msg) from e + + # find ACI subfiles with Image members (maybe not the + # best way to identify MIC files, but what the... ;-) + + self.images = [ + path + for path in self.ole.listdir() + if path[1:] and path[0].endswith(".ACI") and path[1] == "Image" + ] + + # if we didn't find any images, this is probably not + # an MIC file. + if not self.images: + msg = "not an MIC file; no image entries" + raise SyntaxError(msg) + + self.frame = -1 + self._n_frames = len(self.images) + self.is_animated = self._n_frames > 1 + + self.__fp = self.fp + self.seek(0) + + def seek(self, frame: int) -> None: + if not self._seek_check(frame): + return + filename = self.images[frame] + self.fp = self.ole.openstream(filename) + + TiffImagePlugin.TiffImageFile._open(self) + + self.frame = frame + + def tell(self) -> int: + return self.frame + + def close(self) -> None: + self.__fp.close() + self.ole.close() + super().close() + + def __exit__(self, *args: object) -> None: + self.__fp.close() + self.ole.close() + super().__exit__() + + +# +# -------------------------------------------------------------------- + +Image.register_open(MicImageFile.format, MicImageFile, _accept) + +Image.register_extension(MicImageFile.format, ".mic") diff --git a/py311/lib/python3.11/site-packages/PIL/MpegImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/MpegImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..47ebe9d62c4edd3b5e97f760ff7e9b0417e5b5ab --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/MpegImagePlugin.py @@ -0,0 +1,84 @@ +# +# The Python Imaging Library. +# $Id$ +# +# MPEG file handling +# +# History: +# 95-09-09 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from . import Image, ImageFile +from ._binary import i8 +from ._typing import SupportsRead + +# +# Bitstream parser + + +class BitStream: + def __init__(self, fp: SupportsRead[bytes]) -> None: + self.fp = fp + self.bits = 0 + self.bitbuffer = 0 + + def next(self) -> int: + return i8(self.fp.read(1)) + + def peek(self, bits: int) -> int: + while self.bits < bits: + self.bitbuffer = (self.bitbuffer << 8) + self.next() + self.bits += 8 + return self.bitbuffer >> (self.bits - bits) & (1 << bits) - 1 + + def skip(self, bits: int) -> None: + while self.bits < bits: + self.bitbuffer = (self.bitbuffer << 8) + i8(self.fp.read(1)) + self.bits += 8 + self.bits = self.bits - bits + + def read(self, bits: int) -> int: + v = self.peek(bits) + self.bits = self.bits - bits + return v + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"\x00\x00\x01\xb3") + + +## +# Image plugin for MPEG streams. This plugin can identify a stream, +# but it cannot read it. + + +class MpegImageFile(ImageFile.ImageFile): + format = "MPEG" + format_description = "MPEG" + + def _open(self) -> None: + assert self.fp is not None + + s = BitStream(self.fp) + if s.read(32) != 0x1B3: + msg = "not an MPEG file" + raise SyntaxError(msg) + + self._mode = "RGB" + self._size = s.read(12), s.read(12) + + +# -------------------------------------------------------------------- +# Registry stuff + +Image.register_open(MpegImageFile.format, MpegImageFile, _accept) + +Image.register_extensions(MpegImageFile.format, [".mpg", ".mpeg"]) + +Image.register_mime(MpegImageFile.format, "video/mpeg") diff --git a/py311/lib/python3.11/site-packages/PIL/MpoImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/MpoImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..b1ae07873ac215b7abeeed9fe32d0f17db45d124 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/MpoImagePlugin.py @@ -0,0 +1,202 @@ +# +# The Python Imaging Library. +# $Id$ +# +# MPO file handling +# +# See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the +# Camera & Imaging Products Association) +# +# The multi-picture object combines multiple JPEG images (with a modified EXIF +# data format) into a single file. While it can theoretically be used much like +# a GIF animation, it is commonly used to represent 3D photographs and is (as +# of this writing) the most commonly used format by 3D cameras. +# +# History: +# 2014-03-13 Feneric Created +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import os +import struct +from typing import IO, Any, cast + +from . import ( + Image, + ImageFile, + ImageSequence, + JpegImagePlugin, + TiffImagePlugin, +) +from ._binary import o32le +from ._util import DeferredError + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + JpegImagePlugin._save(im, fp, filename) + + +def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + append_images = im.encoderinfo.get("append_images", []) + if not append_images and not getattr(im, "is_animated", False): + _save(im, fp, filename) + return + + mpf_offset = 28 + offsets: list[int] = [] + im_sequences = [im, *append_images] + total = sum(getattr(seq, "n_frames", 1) for seq in im_sequences) + for im_sequence in im_sequences: + for im_frame in ImageSequence.Iterator(im_sequence): + if not offsets: + # APP2 marker + ifd_length = 66 + 16 * total + im_frame.encoderinfo["extra"] = ( + b"\xff\xe2" + + struct.pack(">H", 6 + ifd_length) + + b"MPF\0" + + b" " * ifd_length + ) + exif = im_frame.encoderinfo.get("exif") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + im_frame.encoderinfo["exif"] = exif + if exif: + mpf_offset += 4 + len(exif) + + JpegImagePlugin._save(im_frame, fp, filename) + offsets.append(fp.tell()) + else: + encoderinfo = im_frame._attach_default_encoderinfo(im) + im_frame.save(fp, "JPEG") + im_frame.encoderinfo = encoderinfo + offsets.append(fp.tell() - offsets[-1]) + + ifd = TiffImagePlugin.ImageFileDirectory_v2() + ifd[0xB000] = b"0100" + ifd[0xB001] = len(offsets) + + mpentries = b"" + data_offset = 0 + for i, size in enumerate(offsets): + if i == 0: + mptype = 0x030000 # Baseline MP Primary Image + else: + mptype = 0x000000 # Undefined + mpentries += struct.pack(" None: + self.fp.seek(0) # prep the fp in order to pass the JPEG test + JpegImagePlugin.JpegImageFile._open(self) + self._after_jpeg_open() + + def _after_jpeg_open(self, mpheader: dict[int, Any] | None = None) -> None: + self.mpinfo = mpheader if mpheader is not None else self._getmp() + if self.mpinfo is None: + msg = "Image appears to be a malformed MPO file" + raise ValueError(msg) + self.n_frames = self.mpinfo[0xB001] + self.__mpoffsets = [ + mpent["DataOffset"] + self.info["mpoffset"] for mpent in self.mpinfo[0xB002] + ] + self.__mpoffsets[0] = 0 + # Note that the following assertion will only be invalid if something + # gets broken within JpegImagePlugin. + assert self.n_frames == len(self.__mpoffsets) + del self.info["mpoffset"] # no longer needed + self.is_animated = self.n_frames > 1 + self._fp = self.fp # FIXME: hack + self._fp.seek(self.__mpoffsets[0]) # get ready to read first frame + self.__frame = 0 + self.offset = 0 + # for now we can only handle reading and individual frame extraction + self.readonly = 1 + + def load_seek(self, pos: int) -> None: + if isinstance(self._fp, DeferredError): + raise self._fp.ex + self._fp.seek(pos) + + def seek(self, frame: int) -> None: + if not self._seek_check(frame): + return + if isinstance(self._fp, DeferredError): + raise self._fp.ex + self.fp = self._fp + self.offset = self.__mpoffsets[frame] + + original_exif = self.info.get("exif") + if "exif" in self.info: + del self.info["exif"] + + self.fp.seek(self.offset + 2) # skip SOI marker + if not self.fp.read(2): + msg = "No data found for frame" + raise ValueError(msg) + self.fp.seek(self.offset) + JpegImagePlugin.JpegImageFile._open(self) + if self.info.get("exif") != original_exif: + self._reload_exif() + + self.tile = [ + ImageFile._Tile("jpeg", (0, 0) + self.size, self.offset, self.tile[0][-1]) + ] + self.__frame = frame + + def tell(self) -> int: + return self.__frame + + @staticmethod + def adopt( + jpeg_instance: JpegImagePlugin.JpegImageFile, + mpheader: dict[int, Any] | None = None, + ) -> MpoImageFile: + """ + Transform the instance of JpegImageFile into + an instance of MpoImageFile. + After the call, the JpegImageFile is extended + to be an MpoImageFile. + + This is essentially useful when opening a JPEG + file that reveals itself as an MPO, to avoid + double call to _open. + """ + jpeg_instance.__class__ = MpoImageFile + mpo_instance = cast(MpoImageFile, jpeg_instance) + mpo_instance._after_jpeg_open(mpheader) + return mpo_instance + + +# --------------------------------------------------------------------- +# Registry stuff + +# Note that since MPO shares a factory with JPEG, we do not need to do a +# separate registration for it here. +# Image.register_open(MpoImageFile.format, +# JpegImagePlugin.jpeg_factory, _accept) +Image.register_save(MpoImageFile.format, _save) +Image.register_save_all(MpoImageFile.format, _save_all) + +Image.register_extension(MpoImageFile.format, ".mpo") + +Image.register_mime(MpoImageFile.format, "image/mpo") diff --git a/py311/lib/python3.11/site-packages/PIL/MspImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/MspImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..277087a8677708a3a5fe21a3f6d2c3b27f880d03 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/MspImagePlugin.py @@ -0,0 +1,200 @@ +# +# The Python Imaging Library. +# +# MSP file handling +# +# This is the format used by the Paint program in Windows 1 and 2. +# +# History: +# 95-09-05 fl Created +# 97-01-03 fl Read/write MSP images +# 17-02-21 es Fixed RLE interpretation +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995-97. +# Copyright (c) Eric Soroos 2017. +# +# See the README file for information on usage and redistribution. +# +# More info on this format: https://archive.org/details/gg243631 +# Page 313: +# Figure 205. Windows Paint Version 1: "DanM" Format +# Figure 206. Windows Paint Version 2: "LinS" Format. Used in Windows V2.03 +# +# See also: https://www.fileformat.info/format/mspaint/egff.htm +from __future__ import annotations + +import io +import struct +from typing import IO + +from . import Image, ImageFile +from ._binary import i16le as i16 +from ._binary import o16le as o16 + +# +# read MSP files + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith((b"DanM", b"LinS")) + + +## +# Image plugin for Windows MSP images. This plugin supports both +# uncompressed (Windows 1.0). + + +class MspImageFile(ImageFile.ImageFile): + format = "MSP" + format_description = "Windows Paint" + + def _open(self) -> None: + # Header + assert self.fp is not None + + s = self.fp.read(32) + if not _accept(s): + msg = "not an MSP file" + raise SyntaxError(msg) + + # Header checksum + checksum = 0 + for i in range(0, 32, 2): + checksum = checksum ^ i16(s, i) + if checksum != 0: + msg = "bad MSP checksum" + raise SyntaxError(msg) + + self._mode = "1" + self._size = i16(s, 4), i16(s, 6) + + if s.startswith(b"DanM"): + self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 32, "1")] + else: + self.tile = [ImageFile._Tile("MSP", (0, 0) + self.size, 32)] + + +class MspDecoder(ImageFile.PyDecoder): + # The algo for the MSP decoder is from + # https://www.fileformat.info/format/mspaint/egff.htm + # cc-by-attribution -- That page references is taken from the + # Encyclopedia of Graphics File Formats and is licensed by + # O'Reilly under the Creative Common/Attribution license + # + # For RLE encoded files, the 32byte header is followed by a scan + # line map, encoded as one 16bit word of encoded byte length per + # line. + # + # NOTE: the encoded length of the line can be 0. This was not + # handled in the previous version of this encoder, and there's no + # mention of how to handle it in the documentation. From the few + # examples I've seen, I've assumed that it is a fill of the + # background color, in this case, white. + # + # + # Pseudocode of the decoder: + # Read a BYTE value as the RunType + # If the RunType value is zero + # Read next byte as the RunCount + # Read the next byte as the RunValue + # Write the RunValue byte RunCount times + # If the RunType value is non-zero + # Use this value as the RunCount + # Read and write the next RunCount bytes literally + # + # e.g.: + # 0x00 03 ff 05 00 01 02 03 04 + # would yield the bytes: + # 0xff ff ff 00 01 02 03 04 + # + # which are then interpreted as a bit packed mode '1' image + + _pulls_fd = True + + def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: + assert self.fd is not None + + img = io.BytesIO() + blank_line = bytearray((0xFF,) * ((self.state.xsize + 7) // 8)) + try: + self.fd.seek(32) + rowmap = struct.unpack_from( + f"<{self.state.ysize}H", self.fd.read(self.state.ysize * 2) + ) + except struct.error as e: + msg = "Truncated MSP file in row map" + raise OSError(msg) from e + + for x, rowlen in enumerate(rowmap): + try: + if rowlen == 0: + img.write(blank_line) + continue + row = self.fd.read(rowlen) + if len(row) != rowlen: + msg = f"Truncated MSP file, expected {rowlen} bytes on row {x}" + raise OSError(msg) + idx = 0 + while idx < rowlen: + runtype = row[idx] + idx += 1 + if runtype == 0: + (runcount, runval) = struct.unpack_from("Bc", row, idx) + img.write(runval * runcount) + idx += 2 + else: + runcount = runtype + img.write(row[idx : idx + runcount]) + idx += runcount + + except struct.error as e: + msg = f"Corrupted MSP file in row {x}" + raise OSError(msg) from e + + self.set_as_raw(img.getvalue(), "1") + + return -1, 0 + + +Image.register_decoder("MSP", MspDecoder) + + +# +# write MSP files (uncompressed only) + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if im.mode != "1": + msg = f"cannot write mode {im.mode} as MSP" + raise OSError(msg) + + # create MSP header + header = [0] * 16 + + header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1 + header[2], header[3] = im.size + header[4], header[5] = 1, 1 + header[6], header[7] = 1, 1 + header[8], header[9] = im.size + + checksum = 0 + for h in header: + checksum = checksum ^ h + header[12] = checksum # FIXME: is this the right field? + + # header + for h in header: + fp.write(o16(h)) + + # image body + ImageFile._save(im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 32, "1")]) + + +# +# registry + +Image.register_open(MspImageFile.format, MspImageFile, _accept) +Image.register_save(MspImageFile.format, _save) + +Image.register_extension(MspImageFile.format, ".msp") diff --git a/py311/lib/python3.11/site-packages/PIL/PSDraw.py b/py311/lib/python3.11/site-packages/PIL/PSDraw.py new file mode 100644 index 0000000000000000000000000000000000000000..7fd4c5c94cfa7ec46332f4da78f3e402fd5b311b --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PSDraw.py @@ -0,0 +1,237 @@ +# +# The Python Imaging Library +# $Id$ +# +# Simple PostScript graphics interface +# +# History: +# 1996-04-20 fl Created +# 1999-01-10 fl Added gsave/grestore to image method +# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge) +# +# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved. +# Copyright (c) 1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import sys +from typing import IO + +from . import EpsImagePlugin + +TYPE_CHECKING = False + + +## +# Simple PostScript graphics interface. + + +class PSDraw: + """ + Sets up printing to the given file. If ``fp`` is omitted, + ``sys.stdout.buffer`` is assumed. + """ + + def __init__(self, fp: IO[bytes] | None = None) -> None: + if not fp: + fp = sys.stdout.buffer + self.fp = fp + + def begin_document(self, id: str | None = None) -> None: + """Set up printing of a document. (Write PostScript DSC header.)""" + # FIXME: incomplete + self.fp.write( + b"%!PS-Adobe-3.0\n" + b"save\n" + b"/showpage { } def\n" + b"%%EndComments\n" + b"%%BeginDocument\n" + ) + # self.fp.write(ERROR_PS) # debugging! + self.fp.write(EDROFF_PS) + self.fp.write(VDI_PS) + self.fp.write(b"%%EndProlog\n") + self.isofont: dict[bytes, int] = {} + + def end_document(self) -> None: + """Ends printing. (Write PostScript DSC footer.)""" + self.fp.write(b"%%EndDocument\nrestore showpage\n%%End\n") + if hasattr(self.fp, "flush"): + self.fp.flush() + + def setfont(self, font: str, size: int) -> None: + """ + Selects which font to use. + + :param font: A PostScript font name + :param size: Size in points. + """ + font_bytes = bytes(font, "UTF-8") + if font_bytes not in self.isofont: + # reencode font + self.fp.write( + b"/PSDraw-%s ISOLatin1Encoding /%s E\n" % (font_bytes, font_bytes) + ) + self.isofont[font_bytes] = 1 + # rough + self.fp.write(b"/F0 %d /PSDraw-%s F\n" % (size, font_bytes)) + + def line(self, xy0: tuple[int, int], xy1: tuple[int, int]) -> None: + """ + Draws a line between the two points. Coordinates are given in + PostScript point coordinates (72 points per inch, (0, 0) is the lower + left corner of the page). + """ + self.fp.write(b"%d %d %d %d Vl\n" % (*xy0, *xy1)) + + def rectangle(self, box: tuple[int, int, int, int]) -> None: + """ + Draws a rectangle. + + :param box: A tuple of four integers, specifying left, bottom, width and + height. + """ + self.fp.write(b"%d %d M 0 %d %d Vr\n" % box) + + def text(self, xy: tuple[int, int], text: str) -> None: + """ + Draws text at the given position. You must use + :py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method. + """ + text_bytes = bytes(text, "UTF-8") + text_bytes = b"\\(".join(text_bytes.split(b"(")) + text_bytes = b"\\)".join(text_bytes.split(b")")) + self.fp.write(b"%d %d M (%s) S\n" % (xy + (text_bytes,))) + + if TYPE_CHECKING: + from . import Image + + def image( + self, box: tuple[int, int, int, int], im: Image.Image, dpi: int | None = None + ) -> None: + """Draw a PIL image, centered in the given box.""" + # default resolution depends on mode + if not dpi: + if im.mode == "1": + dpi = 200 # fax + else: + dpi = 100 # grayscale + # image size (on paper) + x = im.size[0] * 72 / dpi + y = im.size[1] * 72 / dpi + # max allowed size + xmax = float(box[2] - box[0]) + ymax = float(box[3] - box[1]) + if x > xmax: + y = y * xmax / x + x = xmax + if y > ymax: + x = x * ymax / y + y = ymax + dx = (xmax - x) / 2 + box[0] + dy = (ymax - y) / 2 + box[1] + self.fp.write(b"gsave\n%f %f translate\n" % (dx, dy)) + if (x, y) != im.size: + # EpsImagePlugin._save prints the image at (0,0,xsize,ysize) + sx = x / im.size[0] + sy = y / im.size[1] + self.fp.write(b"%f %f scale\n" % (sx, sy)) + EpsImagePlugin._save(im, self.fp, "", 0) + self.fp.write(b"\ngrestore\n") + + +# -------------------------------------------------------------------- +# PostScript driver + +# +# EDROFF.PS -- PostScript driver for Edroff 2 +# +# History: +# 94-01-25 fl: created (edroff 2.04) +# +# Copyright (c) Fredrik Lundh 1994. +# + + +EDROFF_PS = b"""\ +/S { show } bind def +/P { moveto show } bind def +/M { moveto } bind def +/X { 0 rmoveto } bind def +/Y { 0 exch rmoveto } bind def +/E { findfont + dup maxlength dict begin + { + 1 index /FID ne { def } { pop pop } ifelse + } forall + /Encoding exch def + dup /FontName exch def + currentdict end definefont pop +} bind def +/F { findfont exch scalefont dup setfont + [ exch /setfont cvx ] cvx bind def +} bind def +""" + +# +# VDI.PS -- PostScript driver for VDI meta commands +# +# History: +# 94-01-25 fl: created (edroff 2.04) +# +# Copyright (c) Fredrik Lundh 1994. +# + +VDI_PS = b"""\ +/Vm { moveto } bind def +/Va { newpath arcn stroke } bind def +/Vl { moveto lineto stroke } bind def +/Vc { newpath 0 360 arc closepath } bind def +/Vr { exch dup 0 rlineto + exch dup 0 exch rlineto + exch neg 0 rlineto + 0 exch neg rlineto + setgray fill } bind def +/Tm matrix def +/Ve { Tm currentmatrix pop + translate scale newpath 0 0 .5 0 360 arc closepath + Tm setmatrix +} bind def +/Vf { currentgray exch setgray fill setgray } bind def +""" + +# +# ERROR.PS -- Error handler +# +# History: +# 89-11-21 fl: created (pslist 1.10) +# + +ERROR_PS = b"""\ +/landscape false def +/errorBUF 200 string def +/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def +errordict begin /handleerror { + initmatrix /Courier findfont 10 scalefont setfont + newpath 72 720 moveto $error begin /newerror false def + (PostScript Error) show errorNL errorNL + (Error: ) show + /errorname load errorBUF cvs show errorNL errorNL + (Command: ) show + /command load dup type /stringtype ne { errorBUF cvs } if show + errorNL errorNL + (VMstatus: ) show + vmstatus errorBUF cvs show ( bytes available, ) show + errorBUF cvs show ( bytes used at level ) show + errorBUF cvs show errorNL errorNL + (Operand stargck: ) show errorNL /ostargck load { + dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL + } forall errorNL + (Execution stargck: ) show errorNL /estargck load { + dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL + } forall + end showpage +} def end +""" diff --git a/py311/lib/python3.11/site-packages/PIL/PaletteFile.py b/py311/lib/python3.11/site-packages/PIL/PaletteFile.py new file mode 100644 index 0000000000000000000000000000000000000000..2a26e5d4e223ba0bc80ad1bfb37b4c3927e222ac --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PaletteFile.py @@ -0,0 +1,54 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read simple, teragon-style palette files +# +# History: +# 97-08-23 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from typing import IO + +from ._binary import o8 + + +class PaletteFile: + """File handler for Teragon-style palette files.""" + + rawmode = "RGB" + + def __init__(self, fp: IO[bytes]) -> None: + palette = [o8(i) * 3 for i in range(256)] + + while True: + s = fp.readline() + + if not s: + break + if s.startswith(b"#"): + continue + if len(s) > 100: + msg = "bad palette file" + raise SyntaxError(msg) + + v = [int(x) for x in s.split()] + try: + [i, r, g, b] = v + except ValueError: + [i, r] = v + g = b = r + + if 0 <= i <= 255: + palette[i] = o8(r) + o8(g) + o8(b) + + self.palette = b"".join(palette) + + def getpalette(self) -> tuple[bytes, str]: + return self.palette, self.rawmode diff --git a/py311/lib/python3.11/site-packages/PIL/PalmImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/PalmImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..15f71290816c5fa6a5178842260a1520eb0b372f --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PalmImagePlugin.py @@ -0,0 +1,217 @@ +# +# The Python Imaging Library. +# $Id$ +# + +## +# Image plugin for Palm pixmap images (output only). +## +from __future__ import annotations + +from typing import IO + +from . import Image, ImageFile +from ._binary import o8 +from ._binary import o16be as o16b + +# fmt: off +_Palm8BitColormapValues = ( + (255, 255, 255), (255, 204, 255), (255, 153, 255), (255, 102, 255), + (255, 51, 255), (255, 0, 255), (255, 255, 204), (255, 204, 204), + (255, 153, 204), (255, 102, 204), (255, 51, 204), (255, 0, 204), + (255, 255, 153), (255, 204, 153), (255, 153, 153), (255, 102, 153), + (255, 51, 153), (255, 0, 153), (204, 255, 255), (204, 204, 255), + (204, 153, 255), (204, 102, 255), (204, 51, 255), (204, 0, 255), + (204, 255, 204), (204, 204, 204), (204, 153, 204), (204, 102, 204), + (204, 51, 204), (204, 0, 204), (204, 255, 153), (204, 204, 153), + (204, 153, 153), (204, 102, 153), (204, 51, 153), (204, 0, 153), + (153, 255, 255), (153, 204, 255), (153, 153, 255), (153, 102, 255), + (153, 51, 255), (153, 0, 255), (153, 255, 204), (153, 204, 204), + (153, 153, 204), (153, 102, 204), (153, 51, 204), (153, 0, 204), + (153, 255, 153), (153, 204, 153), (153, 153, 153), (153, 102, 153), + (153, 51, 153), (153, 0, 153), (102, 255, 255), (102, 204, 255), + (102, 153, 255), (102, 102, 255), (102, 51, 255), (102, 0, 255), + (102, 255, 204), (102, 204, 204), (102, 153, 204), (102, 102, 204), + (102, 51, 204), (102, 0, 204), (102, 255, 153), (102, 204, 153), + (102, 153, 153), (102, 102, 153), (102, 51, 153), (102, 0, 153), + (51, 255, 255), (51, 204, 255), (51, 153, 255), (51, 102, 255), + (51, 51, 255), (51, 0, 255), (51, 255, 204), (51, 204, 204), + (51, 153, 204), (51, 102, 204), (51, 51, 204), (51, 0, 204), + (51, 255, 153), (51, 204, 153), (51, 153, 153), (51, 102, 153), + (51, 51, 153), (51, 0, 153), (0, 255, 255), (0, 204, 255), + (0, 153, 255), (0, 102, 255), (0, 51, 255), (0, 0, 255), + (0, 255, 204), (0, 204, 204), (0, 153, 204), (0, 102, 204), + (0, 51, 204), (0, 0, 204), (0, 255, 153), (0, 204, 153), + (0, 153, 153), (0, 102, 153), (0, 51, 153), (0, 0, 153), + (255, 255, 102), (255, 204, 102), (255, 153, 102), (255, 102, 102), + (255, 51, 102), (255, 0, 102), (255, 255, 51), (255, 204, 51), + (255, 153, 51), (255, 102, 51), (255, 51, 51), (255, 0, 51), + (255, 255, 0), (255, 204, 0), (255, 153, 0), (255, 102, 0), + (255, 51, 0), (255, 0, 0), (204, 255, 102), (204, 204, 102), + (204, 153, 102), (204, 102, 102), (204, 51, 102), (204, 0, 102), + (204, 255, 51), (204, 204, 51), (204, 153, 51), (204, 102, 51), + (204, 51, 51), (204, 0, 51), (204, 255, 0), (204, 204, 0), + (204, 153, 0), (204, 102, 0), (204, 51, 0), (204, 0, 0), + (153, 255, 102), (153, 204, 102), (153, 153, 102), (153, 102, 102), + (153, 51, 102), (153, 0, 102), (153, 255, 51), (153, 204, 51), + (153, 153, 51), (153, 102, 51), (153, 51, 51), (153, 0, 51), + (153, 255, 0), (153, 204, 0), (153, 153, 0), (153, 102, 0), + (153, 51, 0), (153, 0, 0), (102, 255, 102), (102, 204, 102), + (102, 153, 102), (102, 102, 102), (102, 51, 102), (102, 0, 102), + (102, 255, 51), (102, 204, 51), (102, 153, 51), (102, 102, 51), + (102, 51, 51), (102, 0, 51), (102, 255, 0), (102, 204, 0), + (102, 153, 0), (102, 102, 0), (102, 51, 0), (102, 0, 0), + (51, 255, 102), (51, 204, 102), (51, 153, 102), (51, 102, 102), + (51, 51, 102), (51, 0, 102), (51, 255, 51), (51, 204, 51), + (51, 153, 51), (51, 102, 51), (51, 51, 51), (51, 0, 51), + (51, 255, 0), (51, 204, 0), (51, 153, 0), (51, 102, 0), + (51, 51, 0), (51, 0, 0), (0, 255, 102), (0, 204, 102), + (0, 153, 102), (0, 102, 102), (0, 51, 102), (0, 0, 102), + (0, 255, 51), (0, 204, 51), (0, 153, 51), (0, 102, 51), + (0, 51, 51), (0, 0, 51), (0, 255, 0), (0, 204, 0), + (0, 153, 0), (0, 102, 0), (0, 51, 0), (17, 17, 17), + (34, 34, 34), (68, 68, 68), (85, 85, 85), (119, 119, 119), + (136, 136, 136), (170, 170, 170), (187, 187, 187), (221, 221, 221), + (238, 238, 238), (192, 192, 192), (128, 0, 0), (128, 0, 128), + (0, 128, 0), (0, 128, 128), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)) +# fmt: on + + +# so build a prototype image to be used for palette resampling +def build_prototype_image() -> Image.Image: + image = Image.new("L", (1, len(_Palm8BitColormapValues))) + image.putdata(list(range(len(_Palm8BitColormapValues)))) + palettedata: tuple[int, ...] = () + for colormapValue in _Palm8BitColormapValues: + palettedata += colormapValue + palettedata += (0, 0, 0) * (256 - len(_Palm8BitColormapValues)) + image.putpalette(palettedata) + return image + + +Palm8BitColormapImage = build_prototype_image() + +# OK, we now have in Palm8BitColormapImage, +# a "P"-mode image with the right palette +# +# -------------------------------------------------------------------- + +_FLAGS = {"custom-colormap": 0x4000, "is-compressed": 0x8000, "has-transparent": 0x2000} + +_COMPRESSION_TYPES = {"none": 0xFF, "rle": 0x01, "scanline": 0x00} + + +# +# -------------------------------------------------------------------- + +## +# (Internal) Image save plugin for the Palm format. + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if im.mode == "P": + rawmode = "P" + bpp = 8 + version = 1 + + elif im.mode == "L": + if im.encoderinfo.get("bpp") in (1, 2, 4): + # this is 8-bit grayscale, so we shift it to get the high-order bits, + # and invert it because + # Palm does grayscale from white (0) to black (1) + bpp = im.encoderinfo["bpp"] + maxval = (1 << bpp) - 1 + shift = 8 - bpp + im = im.point(lambda x: maxval - (x >> shift)) + elif im.info.get("bpp") in (1, 2, 4): + # here we assume that even though the inherent mode is 8-bit grayscale, + # only the lower bpp bits are significant. + # We invert them to match the Palm. + bpp = im.info["bpp"] + maxval = (1 << bpp) - 1 + im = im.point(lambda x: maxval - (x & maxval)) + else: + msg = f"cannot write mode {im.mode} as Palm" + raise OSError(msg) + + # we ignore the palette here + im._mode = "P" + rawmode = f"P;{bpp}" + version = 1 + + elif im.mode == "1": + # monochrome -- write it inverted, as is the Palm standard + rawmode = "1;I" + bpp = 1 + version = 0 + + else: + msg = f"cannot write mode {im.mode} as Palm" + raise OSError(msg) + + # + # make sure image data is available + im.load() + + # write header + + cols = im.size[0] + rows = im.size[1] + + rowbytes = int((cols + (16 // bpp - 1)) / (16 // bpp)) * 2 + transparent_index = 0 + compression_type = _COMPRESSION_TYPES["none"] + + flags = 0 + if im.mode == "P": + flags |= _FLAGS["custom-colormap"] + colormap = im.im.getpalette() + colors = len(colormap) // 3 + colormapsize = 4 * colors + 2 + else: + colormapsize = 0 + + if "offset" in im.info: + offset = (rowbytes * rows + 16 + 3 + colormapsize) // 4 + else: + offset = 0 + + fp.write(o16b(cols) + o16b(rows) + o16b(rowbytes) + o16b(flags)) + fp.write(o8(bpp)) + fp.write(o8(version)) + fp.write(o16b(offset)) + fp.write(o8(transparent_index)) + fp.write(o8(compression_type)) + fp.write(o16b(0)) # reserved by Palm + + # now write colormap if necessary + + if colormapsize: + fp.write(o16b(colors)) + for i in range(colors): + fp.write(o8(i)) + fp.write(colormap[3 * i : 3 * i + 3]) + + # now convert data to raw form + ImageFile._save( + im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, rowbytes, 1))] + ) + + if hasattr(fp, "flush"): + fp.flush() + + +# +# -------------------------------------------------------------------- + +Image.register_save("Palm", _save) + +Image.register_extension("Palm", ".palm") + +Image.register_mime("Palm", "image/palm") diff --git a/py311/lib/python3.11/site-packages/PIL/PcdImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/PcdImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..3aa249988c8b035822ab994866e405990d1cc96e --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PcdImagePlugin.py @@ -0,0 +1,64 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PCD file handling +# +# History: +# 96-05-10 fl Created +# 96-05-27 fl Added draft mode (128x192, 256x384) +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from . import Image, ImageFile + +## +# Image plugin for PhotoCD images. This plugin only reads the 768x512 +# image from the file; higher resolutions are encoded in a proprietary +# encoding. + + +class PcdImageFile(ImageFile.ImageFile): + format = "PCD" + format_description = "Kodak PhotoCD" + + def _open(self) -> None: + # rough + assert self.fp is not None + + self.fp.seek(2048) + s = self.fp.read(2048) + + if not s.startswith(b"PCD_"): + msg = "not a PCD file" + raise SyntaxError(msg) + + orientation = s[1538] & 3 + self.tile_post_rotate = None + if orientation == 1: + self.tile_post_rotate = 90 + elif orientation == 3: + self.tile_post_rotate = -90 + + self._mode = "RGB" + self._size = 768, 512 # FIXME: not correct for rotated images! + self.tile = [ImageFile._Tile("pcd", (0, 0) + self.size, 96 * 2048)] + + def load_end(self) -> None: + if self.tile_post_rotate: + # Handle rotated PCDs + self.im = self.im.rotate(self.tile_post_rotate) + self._size = self.im.size + + +# +# registry + +Image.register_open(PcdImageFile.format, PcdImageFile) + +Image.register_extension(PcdImageFile.format, ".pcd") diff --git a/py311/lib/python3.11/site-packages/PIL/PcfFontFile.py b/py311/lib/python3.11/site-packages/PIL/PcfFontFile.py new file mode 100644 index 0000000000000000000000000000000000000000..0d1968b140a93fb3d1a026d2fd9186e8696e2d1b --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PcfFontFile.py @@ -0,0 +1,254 @@ +# +# THIS IS WORK IN PROGRESS +# +# The Python Imaging Library +# $Id$ +# +# portable compiled font file parser +# +# history: +# 1997-08-19 fl created +# 2003-09-13 fl fixed loading of unicode fonts +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1997-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import io +from typing import BinaryIO, Callable + +from . import FontFile, Image +from ._binary import i8 +from ._binary import i16be as b16 +from ._binary import i16le as l16 +from ._binary import i32be as b32 +from ._binary import i32le as l32 + +# -------------------------------------------------------------------- +# declarations + +PCF_MAGIC = 0x70636601 # "\x01fcp" + +PCF_PROPERTIES = 1 << 0 +PCF_ACCELERATORS = 1 << 1 +PCF_METRICS = 1 << 2 +PCF_BITMAPS = 1 << 3 +PCF_INK_METRICS = 1 << 4 +PCF_BDF_ENCODINGS = 1 << 5 +PCF_SWIDTHS = 1 << 6 +PCF_GLYPH_NAMES = 1 << 7 +PCF_BDF_ACCELERATORS = 1 << 8 + +BYTES_PER_ROW: list[Callable[[int], int]] = [ + lambda bits: ((bits + 7) >> 3), + lambda bits: ((bits + 15) >> 3) & ~1, + lambda bits: ((bits + 31) >> 3) & ~3, + lambda bits: ((bits + 63) >> 3) & ~7, +] + + +def sz(s: bytes, o: int) -> bytes: + return s[o : s.index(b"\0", o)] + + +class PcfFontFile(FontFile.FontFile): + """Font file plugin for the X11 PCF format.""" + + name = "name" + + def __init__(self, fp: BinaryIO, charset_encoding: str = "iso8859-1"): + self.charset_encoding = charset_encoding + + magic = l32(fp.read(4)) + if magic != PCF_MAGIC: + msg = "not a PCF file" + raise SyntaxError(msg) + + super().__init__() + + count = l32(fp.read(4)) + self.toc = {} + for i in range(count): + type = l32(fp.read(4)) + self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4)) + + self.fp = fp + + self.info = self._load_properties() + + metrics = self._load_metrics() + bitmaps = self._load_bitmaps(metrics) + encoding = self._load_encoding() + + # + # create glyph structure + + for ch, ix in enumerate(encoding): + if ix is not None: + ( + xsize, + ysize, + left, + right, + width, + ascent, + descent, + attributes, + ) = metrics[ix] + self.glyph[ch] = ( + (width, 0), + (left, descent - ysize, xsize + left, descent), + (0, 0, xsize, ysize), + bitmaps[ix], + ) + + def _getformat( + self, tag: int + ) -> tuple[BinaryIO, int, Callable[[bytes], int], Callable[[bytes], int]]: + format, size, offset = self.toc[tag] + + fp = self.fp + fp.seek(offset) + + format = l32(fp.read(4)) + + if format & 4: + i16, i32 = b16, b32 + else: + i16, i32 = l16, l32 + + return fp, format, i16, i32 + + def _load_properties(self) -> dict[bytes, bytes | int]: + # + # font properties + + properties = {} + + fp, format, i16, i32 = self._getformat(PCF_PROPERTIES) + + nprops = i32(fp.read(4)) + + # read property description + p = [(i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4))) for _ in range(nprops)] + + if nprops & 3: + fp.seek(4 - (nprops & 3), io.SEEK_CUR) # pad + + data = fp.read(i32(fp.read(4))) + + for k, s, v in p: + property_value: bytes | int = sz(data, v) if s else v + properties[sz(data, k)] = property_value + + return properties + + def _load_metrics(self) -> list[tuple[int, int, int, int, int, int, int, int]]: + # + # font metrics + + metrics: list[tuple[int, int, int, int, int, int, int, int]] = [] + + fp, format, i16, i32 = self._getformat(PCF_METRICS) + + append = metrics.append + + if (format & 0xFF00) == 0x100: + # "compressed" metrics + for i in range(i16(fp.read(2))): + left = i8(fp.read(1)) - 128 + right = i8(fp.read(1)) - 128 + width = i8(fp.read(1)) - 128 + ascent = i8(fp.read(1)) - 128 + descent = i8(fp.read(1)) - 128 + xsize = right - left + ysize = ascent + descent + append((xsize, ysize, left, right, width, ascent, descent, 0)) + + else: + # "jumbo" metrics + for i in range(i32(fp.read(4))): + left = i16(fp.read(2)) + right = i16(fp.read(2)) + width = i16(fp.read(2)) + ascent = i16(fp.read(2)) + descent = i16(fp.read(2)) + attributes = i16(fp.read(2)) + xsize = right - left + ysize = ascent + descent + append((xsize, ysize, left, right, width, ascent, descent, attributes)) + + return metrics + + def _load_bitmaps( + self, metrics: list[tuple[int, int, int, int, int, int, int, int]] + ) -> list[Image.Image]: + # + # bitmap data + + fp, format, i16, i32 = self._getformat(PCF_BITMAPS) + + nbitmaps = i32(fp.read(4)) + + if nbitmaps != len(metrics): + msg = "Wrong number of bitmaps" + raise OSError(msg) + + offsets = [i32(fp.read(4)) for _ in range(nbitmaps)] + + bitmap_sizes = [i32(fp.read(4)) for _ in range(4)] + + # byteorder = format & 4 # non-zero => MSB + bitorder = format & 8 # non-zero => MSB + padindex = format & 3 + + bitmapsize = bitmap_sizes[padindex] + offsets.append(bitmapsize) + + data = fp.read(bitmapsize) + + pad = BYTES_PER_ROW[padindex] + mode = "1;R" + if bitorder: + mode = "1" + + bitmaps = [] + for i in range(nbitmaps): + xsize, ysize = metrics[i][:2] + b, e = offsets[i : i + 2] + bitmaps.append( + Image.frombytes("1", (xsize, ysize), data[b:e], "raw", mode, pad(xsize)) + ) + + return bitmaps + + def _load_encoding(self) -> list[int | None]: + fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS) + + first_col, last_col = i16(fp.read(2)), i16(fp.read(2)) + first_row, last_row = i16(fp.read(2)), i16(fp.read(2)) + + i16(fp.read(2)) # default + + nencoding = (last_col - first_col + 1) * (last_row - first_row + 1) + + # map character code to bitmap index + encoding: list[int | None] = [None] * min(256, nencoding) + + encoding_offsets = [i16(fp.read(2)) for _ in range(nencoding)] + + for i in range(first_col, len(encoding)): + try: + encoding_offset = encoding_offsets[ + ord(bytearray([i]).decode(self.charset_encoding)) + ] + if encoding_offset != 0xFFFF: + encoding[i] = encoding_offset + except UnicodeDecodeError: + # character is not supported in selected encoding + pass + + return encoding diff --git a/py311/lib/python3.11/site-packages/PIL/PcxImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/PcxImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..458d586c463ce5355d807f2a2dd583545ccc8229 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PcxImagePlugin.py @@ -0,0 +1,228 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PCX file handling +# +# This format was originally used by ZSoft's popular PaintBrush +# program for the IBM PC. It is also supported by many MS-DOS and +# Windows applications, including the Windows PaintBrush program in +# Windows 3. +# +# history: +# 1995-09-01 fl Created +# 1996-05-20 fl Fixed RGB support +# 1997-01-03 fl Fixed 2-bit and 4-bit support +# 1999-02-03 fl Fixed 8-bit support (broken in 1.0b1) +# 1999-02-07 fl Added write support +# 2002-06-09 fl Made 2-bit and 4-bit support a bit more robust +# 2002-07-30 fl Seek from to current position, not beginning of file +# 2003-06-03 fl Extract DPI settings (info["dpi"]) +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import io +import logging +from typing import IO + +from . import Image, ImageFile, ImagePalette +from ._binary import i16le as i16 +from ._binary import o8 +from ._binary import o16le as o16 + +logger = logging.getLogger(__name__) + + +def _accept(prefix: bytes) -> bool: + return prefix[0] == 10 and prefix[1] in [0, 2, 3, 5] + + +## +# Image plugin for Paintbrush images. + + +class PcxImageFile(ImageFile.ImageFile): + format = "PCX" + format_description = "Paintbrush" + + def _open(self) -> None: + # header + assert self.fp is not None + + s = self.fp.read(68) + if not _accept(s): + msg = "not a PCX file" + raise SyntaxError(msg) + + # image + bbox = i16(s, 4), i16(s, 6), i16(s, 8) + 1, i16(s, 10) + 1 + if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]: + msg = "bad PCX image size" + raise SyntaxError(msg) + logger.debug("BBox: %s %s %s %s", *bbox) + + offset = self.fp.tell() + 60 + + # format + version = s[1] + bits = s[3] + planes = s[65] + provided_stride = i16(s, 66) + logger.debug( + "PCX version %s, bits %s, planes %s, stride %s", + version, + bits, + planes, + provided_stride, + ) + + self.info["dpi"] = i16(s, 12), i16(s, 14) + + if bits == 1 and planes == 1: + mode = rawmode = "1" + + elif bits == 1 and planes in (2, 4): + mode = "P" + rawmode = f"P;{planes}L" + self.palette = ImagePalette.raw("RGB", s[16:64]) + + elif version == 5 and bits == 8 and planes == 1: + mode = rawmode = "L" + # FIXME: hey, this doesn't work with the incremental loader !!! + self.fp.seek(-769, io.SEEK_END) + s = self.fp.read(769) + if len(s) == 769 and s[0] == 12: + # check if the palette is linear grayscale + for i in range(256): + if s[i * 3 + 1 : i * 3 + 4] != o8(i) * 3: + mode = rawmode = "P" + break + if mode == "P": + self.palette = ImagePalette.raw("RGB", s[1:]) + + elif version == 5 and bits == 8 and planes == 3: + mode = "RGB" + rawmode = "RGB;L" + + else: + msg = "unknown PCX mode" + raise OSError(msg) + + self._mode = mode + self._size = bbox[2] - bbox[0], bbox[3] - bbox[1] + + # Don't trust the passed in stride. + # Calculate the approximate position for ourselves. + # CVE-2020-35653 + stride = (self._size[0] * bits + 7) // 8 + + # While the specification states that this must be even, + # not all images follow this + if provided_stride != stride: + stride += stride % 2 + + bbox = (0, 0) + self.size + logger.debug("size: %sx%s", *self.size) + + self.tile = [ImageFile._Tile("pcx", bbox, offset, (rawmode, planes * stride))] + + +# -------------------------------------------------------------------- +# save PCX files + + +SAVE = { + # mode: (version, bits, planes, raw mode) + "1": (2, 1, 1, "1"), + "L": (5, 8, 1, "L"), + "P": (5, 8, 1, "P"), + "RGB": (5, 8, 3, "RGB;L"), +} + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + try: + version, bits, planes, rawmode = SAVE[im.mode] + except KeyError as e: + msg = f"Cannot save {im.mode} images as PCX" + raise ValueError(msg) from e + + # bytes per plane + stride = (im.size[0] * bits + 7) // 8 + # stride should be even + stride += stride % 2 + # Stride needs to be kept in sync with the PcxEncode.c version. + # Ideally it should be passed in in the state, but the bytes value + # gets overwritten. + + logger.debug( + "PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d", + im.size[0], + bits, + stride, + ) + + # under windows, we could determine the current screen size with + # "Image.core.display_mode()[1]", but I think that's overkill... + + screen = im.size + + dpi = 100, 100 + + # PCX header + fp.write( + o8(10) + + o8(version) + + o8(1) + + o8(bits) + + o16(0) + + o16(0) + + o16(im.size[0] - 1) + + o16(im.size[1] - 1) + + o16(dpi[0]) + + o16(dpi[1]) + + b"\0" * 24 + + b"\xff" * 24 + + b"\0" + + o8(planes) + + o16(stride) + + o16(1) + + o16(screen[0]) + + o16(screen[1]) + + b"\0" * 54 + ) + + assert fp.tell() == 128 + + ImageFile._save( + im, fp, [ImageFile._Tile("pcx", (0, 0) + im.size, 0, (rawmode, bits * planes))] + ) + + if im.mode == "P": + # colour palette + fp.write(o8(12)) + palette = im.im.getpalette("RGB", "RGB") + palette += b"\x00" * (768 - len(palette)) + fp.write(palette) # 768 bytes + elif im.mode == "L": + # grayscale palette + fp.write(o8(12)) + for i in range(256): + fp.write(o8(i) * 3) + + +# -------------------------------------------------------------------- +# registry + + +Image.register_open(PcxImageFile.format, PcxImageFile, _accept) +Image.register_save(PcxImageFile.format, _save) + +Image.register_extension(PcxImageFile.format, ".pcx") + +Image.register_mime(PcxImageFile.format, "image/x-pcx") diff --git a/py311/lib/python3.11/site-packages/PIL/PdfImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/PdfImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..e9c20ddc159e150676ead01cf314420008f05232 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PdfImagePlugin.py @@ -0,0 +1,311 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PDF (Acrobat) file handling +# +# History: +# 1996-07-16 fl Created +# 1997-01-18 fl Fixed header +# 2004-02-21 fl Fixes for 1/L/CMYK images, etc. +# 2004-02-24 fl Fixes for 1 and P images. +# +# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved. +# Copyright (c) 1996-1997 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +## +# Image plugin for PDF images (output only). +## +from __future__ import annotations + +import io +import math +import os +import time +from typing import IO, Any + +from . import Image, ImageFile, ImageSequence, PdfParser, __version__, features + +# +# -------------------------------------------------------------------- + +# object ids: +# 1. catalogue +# 2. pages +# 3. image +# 4. page +# 5. page contents + + +def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + _save(im, fp, filename, save_all=True) + + +## +# (Internal) Image save plugin for the PDF format. + + +def _write_image( + im: Image.Image, + filename: str | bytes, + existing_pdf: PdfParser.PdfParser, + image_refs: list[PdfParser.IndirectReference], +) -> tuple[PdfParser.IndirectReference, str]: + # FIXME: Should replace ASCIIHexDecode with RunLengthDecode + # (packbits) or LZWDecode (tiff/lzw compression). Note that + # PDF 1.2 also supports Flatedecode (zip compression). + + params = None + decode = None + + # + # Get image characteristics + + width, height = im.size + + dict_obj: dict[str, Any] = {"BitsPerComponent": 8} + if im.mode == "1": + if features.check("libtiff"): + decode_filter = "CCITTFaxDecode" + dict_obj["BitsPerComponent"] = 1 + params = PdfParser.PdfArray( + [ + PdfParser.PdfDict( + { + "K": -1, + "BlackIs1": True, + "Columns": width, + "Rows": height, + } + ) + ] + ) + else: + decode_filter = "DCTDecode" + dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceGray") + procset = "ImageB" # grayscale + elif im.mode == "L": + decode_filter = "DCTDecode" + # params = f"<< /Predictor 15 /Columns {width-2} >>" + dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceGray") + procset = "ImageB" # grayscale + elif im.mode == "LA": + decode_filter = "JPXDecode" + # params = f"<< /Predictor 15 /Columns {width-2} >>" + procset = "ImageB" # grayscale + dict_obj["SMaskInData"] = 1 + elif im.mode == "P": + decode_filter = "ASCIIHexDecode" + palette = im.getpalette() + assert palette is not None + dict_obj["ColorSpace"] = [ + PdfParser.PdfName("Indexed"), + PdfParser.PdfName("DeviceRGB"), + len(palette) // 3 - 1, + PdfParser.PdfBinary(palette), + ] + procset = "ImageI" # indexed color + + if "transparency" in im.info: + smask = im.convert("LA").getchannel("A") + smask.encoderinfo = {} + + image_ref = _write_image(smask, filename, existing_pdf, image_refs)[0] + dict_obj["SMask"] = image_ref + elif im.mode == "RGB": + decode_filter = "DCTDecode" + dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceRGB") + procset = "ImageC" # color images + elif im.mode == "RGBA": + decode_filter = "JPXDecode" + procset = "ImageC" # color images + dict_obj["SMaskInData"] = 1 + elif im.mode == "CMYK": + decode_filter = "DCTDecode" + dict_obj["ColorSpace"] = PdfParser.PdfName("DeviceCMYK") + procset = "ImageC" # color images + decode = [1, 0, 1, 0, 1, 0, 1, 0] + else: + msg = f"cannot save mode {im.mode}" + raise ValueError(msg) + + # + # image + + op = io.BytesIO() + + if decode_filter == "ASCIIHexDecode": + ImageFile._save(im, op, [ImageFile._Tile("hex", (0, 0) + im.size, 0, im.mode)]) + elif decode_filter == "CCITTFaxDecode": + im.save( + op, + "TIFF", + compression="group4", + # use a single strip + strip_size=math.ceil(width / 8) * height, + ) + elif decode_filter == "DCTDecode": + Image.SAVE["JPEG"](im, op, filename) + elif decode_filter == "JPXDecode": + del dict_obj["BitsPerComponent"] + Image.SAVE["JPEG2000"](im, op, filename) + else: + msg = f"unsupported PDF filter ({decode_filter})" + raise ValueError(msg) + + stream = op.getvalue() + filter: PdfParser.PdfArray | PdfParser.PdfName + if decode_filter == "CCITTFaxDecode": + stream = stream[8:] + filter = PdfParser.PdfArray([PdfParser.PdfName(decode_filter)]) + else: + filter = PdfParser.PdfName(decode_filter) + + image_ref = image_refs.pop(0) + existing_pdf.write_obj( + image_ref, + stream=stream, + Type=PdfParser.PdfName("XObject"), + Subtype=PdfParser.PdfName("Image"), + Width=width, # * 72.0 / x_resolution, + Height=height, # * 72.0 / y_resolution, + Filter=filter, + Decode=decode, + DecodeParms=params, + **dict_obj, + ) + + return image_ref, procset + + +def _save( + im: Image.Image, fp: IO[bytes], filename: str | bytes, save_all: bool = False +) -> None: + is_appending = im.encoderinfo.get("append", False) + filename_str = filename.decode() if isinstance(filename, bytes) else filename + if is_appending: + existing_pdf = PdfParser.PdfParser(f=fp, filename=filename_str, mode="r+b") + else: + existing_pdf = PdfParser.PdfParser(f=fp, filename=filename_str, mode="w+b") + + dpi = im.encoderinfo.get("dpi") + if dpi: + x_resolution = dpi[0] + y_resolution = dpi[1] + else: + x_resolution = y_resolution = im.encoderinfo.get("resolution", 72.0) + + info = { + "title": ( + None if is_appending else os.path.splitext(os.path.basename(filename))[0] + ), + "author": None, + "subject": None, + "keywords": None, + "creator": None, + "producer": None, + "creationDate": None if is_appending else time.gmtime(), + "modDate": None if is_appending else time.gmtime(), + } + for k, default in info.items(): + v = im.encoderinfo.get(k) if k in im.encoderinfo else default + if v: + existing_pdf.info[k[0].upper() + k[1:]] = v + + # + # make sure image data is available + im.load() + + existing_pdf.start_writing() + existing_pdf.write_header() + existing_pdf.write_comment(f"created by Pillow {__version__} PDF driver") + + # + # pages + ims = [im] + if save_all: + append_images = im.encoderinfo.get("append_images", []) + for append_im in append_images: + append_im.encoderinfo = im.encoderinfo.copy() + ims.append(append_im) + number_of_pages = 0 + image_refs = [] + page_refs = [] + contents_refs = [] + for im in ims: + im_number_of_pages = 1 + if save_all: + im_number_of_pages = getattr(im, "n_frames", 1) + number_of_pages += im_number_of_pages + for i in range(im_number_of_pages): + image_refs.append(existing_pdf.next_object_id(0)) + if im.mode == "P" and "transparency" in im.info: + image_refs.append(existing_pdf.next_object_id(0)) + + page_refs.append(existing_pdf.next_object_id(0)) + contents_refs.append(existing_pdf.next_object_id(0)) + existing_pdf.pages.append(page_refs[-1]) + + # + # catalog and list of pages + existing_pdf.write_catalog() + + page_number = 0 + for im_sequence in ims: + im_pages: ImageSequence.Iterator | list[Image.Image] = ( + ImageSequence.Iterator(im_sequence) if save_all else [im_sequence] + ) + for im in im_pages: + image_ref, procset = _write_image(im, filename, existing_pdf, image_refs) + + # + # page + + existing_pdf.write_page( + page_refs[page_number], + Resources=PdfParser.PdfDict( + ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)], + XObject=PdfParser.PdfDict(image=image_ref), + ), + MediaBox=[ + 0, + 0, + im.width * 72.0 / x_resolution, + im.height * 72.0 / y_resolution, + ], + Contents=contents_refs[page_number], + ) + + # + # page contents + + page_contents = b"q %f 0 0 %f 0 0 cm /image Do Q\n" % ( + im.width * 72.0 / x_resolution, + im.height * 72.0 / y_resolution, + ) + + existing_pdf.write_obj(contents_refs[page_number], stream=page_contents) + + page_number += 1 + + # + # trailer + existing_pdf.write_xref_and_trailer() + if hasattr(fp, "flush"): + fp.flush() + existing_pdf.close() + + +# +# -------------------------------------------------------------------- + + +Image.register_save("PDF", _save) +Image.register_save_all("PDF", _save_all) + +Image.register_extension("PDF", ".pdf") + +Image.register_mime("PDF", "application/pdf") diff --git a/py311/lib/python3.11/site-packages/PIL/PdfParser.py b/py311/lib/python3.11/site-packages/PIL/PdfParser.py new file mode 100644 index 0000000000000000000000000000000000000000..73d8c21c023c95304c6fddfc2e5fce6d331401b1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PdfParser.py @@ -0,0 +1,1074 @@ +from __future__ import annotations + +import calendar +import codecs +import collections +import mmap +import os +import re +import time +import zlib +from typing import IO, Any, NamedTuple, Union + + +# see 7.9.2.2 Text String Type on page 86 and D.3 PDFDocEncoding Character Set +# on page 656 +def encode_text(s: str) -> bytes: + return codecs.BOM_UTF16_BE + s.encode("utf_16_be") + + +PDFDocEncoding = { + 0x16: "\u0017", + 0x18: "\u02d8", + 0x19: "\u02c7", + 0x1A: "\u02c6", + 0x1B: "\u02d9", + 0x1C: "\u02dd", + 0x1D: "\u02db", + 0x1E: "\u02da", + 0x1F: "\u02dc", + 0x80: "\u2022", + 0x81: "\u2020", + 0x82: "\u2021", + 0x83: "\u2026", + 0x84: "\u2014", + 0x85: "\u2013", + 0x86: "\u0192", + 0x87: "\u2044", + 0x88: "\u2039", + 0x89: "\u203a", + 0x8A: "\u2212", + 0x8B: "\u2030", + 0x8C: "\u201e", + 0x8D: "\u201c", + 0x8E: "\u201d", + 0x8F: "\u2018", + 0x90: "\u2019", + 0x91: "\u201a", + 0x92: "\u2122", + 0x93: "\ufb01", + 0x94: "\ufb02", + 0x95: "\u0141", + 0x96: "\u0152", + 0x97: "\u0160", + 0x98: "\u0178", + 0x99: "\u017d", + 0x9A: "\u0131", + 0x9B: "\u0142", + 0x9C: "\u0153", + 0x9D: "\u0161", + 0x9E: "\u017e", + 0xA0: "\u20ac", +} + + +def decode_text(b: bytes) -> str: + if b[: len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE: + return b[len(codecs.BOM_UTF16_BE) :].decode("utf_16_be") + else: + return "".join(PDFDocEncoding.get(byte, chr(byte)) for byte in b) + + +class PdfFormatError(RuntimeError): + """An error that probably indicates a syntactic or semantic error in the + PDF file structure""" + + pass + + +def check_format_condition(condition: bool, error_message: str) -> None: + if not condition: + raise PdfFormatError(error_message) + + +class IndirectReferenceTuple(NamedTuple): + object_id: int + generation: int + + +class IndirectReference(IndirectReferenceTuple): + def __str__(self) -> str: + return f"{self.object_id} {self.generation} R" + + def __bytes__(self) -> bytes: + return self.__str__().encode("us-ascii") + + def __eq__(self, other: object) -> bool: + if self.__class__ is not other.__class__: + return False + assert isinstance(other, IndirectReference) + return other.object_id == self.object_id and other.generation == self.generation + + def __ne__(self, other: object) -> bool: + return not (self == other) + + def __hash__(self) -> int: + return hash((self.object_id, self.generation)) + + +class IndirectObjectDef(IndirectReference): + def __str__(self) -> str: + return f"{self.object_id} {self.generation} obj" + + +class XrefTable: + def __init__(self) -> None: + self.existing_entries: dict[int, tuple[int, int]] = ( + {} + ) # object ID => (offset, generation) + self.new_entries: dict[int, tuple[int, int]] = ( + {} + ) # object ID => (offset, generation) + self.deleted_entries = {0: 65536} # object ID => generation + self.reading_finished = False + + def __setitem__(self, key: int, value: tuple[int, int]) -> None: + if self.reading_finished: + self.new_entries[key] = value + else: + self.existing_entries[key] = value + if key in self.deleted_entries: + del self.deleted_entries[key] + + def __getitem__(self, key: int) -> tuple[int, int]: + try: + return self.new_entries[key] + except KeyError: + return self.existing_entries[key] + + def __delitem__(self, key: int) -> None: + if key in self.new_entries: + generation = self.new_entries[key][1] + 1 + del self.new_entries[key] + self.deleted_entries[key] = generation + elif key in self.existing_entries: + generation = self.existing_entries[key][1] + 1 + self.deleted_entries[key] = generation + elif key in self.deleted_entries: + generation = self.deleted_entries[key] + else: + msg = f"object ID {key} cannot be deleted because it doesn't exist" + raise IndexError(msg) + + def __contains__(self, key: int) -> bool: + return key in self.existing_entries or key in self.new_entries + + def __len__(self) -> int: + return len( + set(self.existing_entries.keys()) + | set(self.new_entries.keys()) + | set(self.deleted_entries.keys()) + ) + + def keys(self) -> set[int]: + return ( + set(self.existing_entries.keys()) - set(self.deleted_entries.keys()) + ) | set(self.new_entries.keys()) + + def write(self, f: IO[bytes]) -> int: + keys = sorted(set(self.new_entries.keys()) | set(self.deleted_entries.keys())) + deleted_keys = sorted(set(self.deleted_entries.keys())) + startxref = f.tell() + f.write(b"xref\n") + while keys: + # find a contiguous sequence of object IDs + prev: int | None = None + for index, key in enumerate(keys): + if prev is None or prev + 1 == key: + prev = key + else: + contiguous_keys = keys[:index] + keys = keys[index:] + break + else: + contiguous_keys = keys + keys = [] + f.write(b"%d %d\n" % (contiguous_keys[0], len(contiguous_keys))) + for object_id in contiguous_keys: + if object_id in self.new_entries: + f.write(b"%010d %05d n \n" % self.new_entries[object_id]) + else: + this_deleted_object_id = deleted_keys.pop(0) + check_format_condition( + object_id == this_deleted_object_id, + f"expected the next deleted object ID to be {object_id}, " + f"instead found {this_deleted_object_id}", + ) + try: + next_in_linked_list = deleted_keys[0] + except IndexError: + next_in_linked_list = 0 + f.write( + b"%010d %05d f \n" + % (next_in_linked_list, self.deleted_entries[object_id]) + ) + return startxref + + +class PdfName: + name: bytes + + def __init__(self, name: PdfName | bytes | str) -> None: + if isinstance(name, PdfName): + self.name = name.name + elif isinstance(name, bytes): + self.name = name + else: + self.name = name.encode("us-ascii") + + def name_as_str(self) -> str: + return self.name.decode("us-ascii") + + def __eq__(self, other: object) -> bool: + return ( + isinstance(other, PdfName) and other.name == self.name + ) or other == self.name + + def __hash__(self) -> int: + return hash(self.name) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({repr(self.name)})" + + @classmethod + def from_pdf_stream(cls, data: bytes) -> PdfName: + return cls(PdfParser.interpret_name(data)) + + allowed_chars = set(range(33, 127)) - {ord(c) for c in "#%/()<>[]{}"} + + def __bytes__(self) -> bytes: + result = bytearray(b"/") + for b in self.name: + if b in self.allowed_chars: + result.append(b) + else: + result.extend(b"#%02X" % b) + return bytes(result) + + +class PdfArray(list[Any]): + def __bytes__(self) -> bytes: + return b"[ " + b" ".join(pdf_repr(x) for x in self) + b" ]" + + +TYPE_CHECKING = False +if TYPE_CHECKING: + _DictBase = collections.UserDict[Union[str, bytes], Any] +else: + _DictBase = collections.UserDict + + +class PdfDict(_DictBase): + def __setattr__(self, key: str, value: Any) -> None: + if key == "data": + collections.UserDict.__setattr__(self, key, value) + else: + self[key.encode("us-ascii")] = value + + def __getattr__(self, key: str) -> str | time.struct_time: + try: + value = self[key.encode("us-ascii")] + except KeyError as e: + raise AttributeError(key) from e + if isinstance(value, bytes): + value = decode_text(value) + if key.endswith("Date"): + if value.startswith("D:"): + value = value[2:] + + relationship = "Z" + if len(value) > 17: + relationship = value[14] + offset = int(value[15:17]) * 60 + if len(value) > 20: + offset += int(value[18:20]) + + format = "%Y%m%d%H%M%S"[: len(value) - 2] + value = time.strptime(value[: len(format) + 2], format) + if relationship in ["+", "-"]: + offset *= 60 + if relationship == "+": + offset *= -1 + value = time.gmtime(calendar.timegm(value) + offset) + return value + + def __bytes__(self) -> bytes: + out = bytearray(b"<<") + for key, value in self.items(): + if value is None: + continue + value = pdf_repr(value) + out.extend(b"\n") + out.extend(bytes(PdfName(key))) + out.extend(b" ") + out.extend(value) + out.extend(b"\n>>") + return bytes(out) + + +class PdfBinary: + def __init__(self, data: list[int] | bytes) -> None: + self.data = data + + def __bytes__(self) -> bytes: + return b"<%s>" % b"".join(b"%02X" % b for b in self.data) + + +class PdfStream: + def __init__(self, dictionary: PdfDict, buf: bytes) -> None: + self.dictionary = dictionary + self.buf = buf + + def decode(self) -> bytes: + try: + filter = self.dictionary[b"Filter"] + except KeyError: + return self.buf + if filter == b"FlateDecode": + try: + expected_length = self.dictionary[b"DL"] + except KeyError: + expected_length = self.dictionary[b"Length"] + return zlib.decompress(self.buf, bufsize=int(expected_length)) + else: + msg = f"stream filter {repr(filter)} unknown/unsupported" + raise NotImplementedError(msg) + + +def pdf_repr(x: Any) -> bytes: + if x is True: + return b"true" + elif x is False: + return b"false" + elif x is None: + return b"null" + elif isinstance(x, (PdfName, PdfDict, PdfArray, PdfBinary)): + return bytes(x) + elif isinstance(x, (int, float)): + return str(x).encode("us-ascii") + elif isinstance(x, time.struct_time): + return b"(D:" + time.strftime("%Y%m%d%H%M%SZ", x).encode("us-ascii") + b")" + elif isinstance(x, dict): + return bytes(PdfDict(x)) + elif isinstance(x, list): + return bytes(PdfArray(x)) + elif isinstance(x, str): + return pdf_repr(encode_text(x)) + elif isinstance(x, bytes): + # XXX escape more chars? handle binary garbage + x = x.replace(b"\\", b"\\\\") + x = x.replace(b"(", b"\\(") + x = x.replace(b")", b"\\)") + return b"(" + x + b")" + else: + return bytes(x) + + +class PdfParser: + """Based on + https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf + Supports PDF up to 1.4 + """ + + def __init__( + self, + filename: str | None = None, + f: IO[bytes] | None = None, + buf: bytes | bytearray | None = None, + start_offset: int = 0, + mode: str = "rb", + ) -> None: + if buf and f: + msg = "specify buf or f or filename, but not both buf and f" + raise RuntimeError(msg) + self.filename = filename + self.buf: bytes | bytearray | mmap.mmap | None = buf + self.f = f + self.start_offset = start_offset + self.should_close_buf = False + self.should_close_file = False + if filename is not None and f is None: + self.f = f = open(filename, mode) + self.should_close_file = True + if f is not None: + self.buf = self.get_buf_from_file(f) + self.should_close_buf = True + if not filename and hasattr(f, "name"): + self.filename = f.name + self.cached_objects: dict[IndirectReference, Any] = {} + self.root_ref: IndirectReference | None + self.info_ref: IndirectReference | None + self.pages_ref: IndirectReference | None + self.last_xref_section_offset: int | None + if self.buf: + self.read_pdf_info() + else: + self.file_size_total = self.file_size_this = 0 + self.root = PdfDict() + self.root_ref = None + self.info = PdfDict() + self.info_ref = None + self.page_tree_root = PdfDict() + self.pages: list[IndirectReference] = [] + self.orig_pages: list[IndirectReference] = [] + self.pages_ref = None + self.last_xref_section_offset = None + self.trailer_dict: dict[bytes, Any] = {} + self.xref_table = XrefTable() + self.xref_table.reading_finished = True + if f: + self.seek_end() + + def __enter__(self) -> PdfParser: + return self + + def __exit__(self, *args: object) -> None: + self.close() + + def start_writing(self) -> None: + self.close_buf() + self.seek_end() + + def close_buf(self) -> None: + if isinstance(self.buf, mmap.mmap): + self.buf.close() + self.buf = None + + def close(self) -> None: + if self.should_close_buf: + self.close_buf() + if self.f is not None and self.should_close_file: + self.f.close() + self.f = None + + def seek_end(self) -> None: + assert self.f is not None + self.f.seek(0, os.SEEK_END) + + def write_header(self) -> None: + assert self.f is not None + self.f.write(b"%PDF-1.4\n") + + def write_comment(self, s: str) -> None: + assert self.f is not None + self.f.write(f"% {s}\n".encode()) + + def write_catalog(self) -> IndirectReference: + assert self.f is not None + self.del_root() + self.root_ref = self.next_object_id(self.f.tell()) + self.pages_ref = self.next_object_id(0) + self.rewrite_pages() + self.write_obj(self.root_ref, Type=PdfName(b"Catalog"), Pages=self.pages_ref) + self.write_obj( + self.pages_ref, + Type=PdfName(b"Pages"), + Count=len(self.pages), + Kids=self.pages, + ) + return self.root_ref + + def rewrite_pages(self) -> None: + pages_tree_nodes_to_delete = [] + for i, page_ref in enumerate(self.orig_pages): + page_info = self.cached_objects[page_ref] + del self.xref_table[page_ref.object_id] + pages_tree_nodes_to_delete.append(page_info[PdfName(b"Parent")]) + if page_ref not in self.pages: + # the page has been deleted + continue + # make dict keys into strings for passing to write_page + stringified_page_info = {} + for key, value in page_info.items(): + # key should be a PdfName + stringified_page_info[key.name_as_str()] = value + stringified_page_info["Parent"] = self.pages_ref + new_page_ref = self.write_page(None, **stringified_page_info) + for j, cur_page_ref in enumerate(self.pages): + if cur_page_ref == page_ref: + # replace the page reference with the new one + self.pages[j] = new_page_ref + # delete redundant Pages tree nodes from xref table + for pages_tree_node_ref in pages_tree_nodes_to_delete: + while pages_tree_node_ref: + pages_tree_node = self.cached_objects[pages_tree_node_ref] + if pages_tree_node_ref.object_id in self.xref_table: + del self.xref_table[pages_tree_node_ref.object_id] + pages_tree_node_ref = pages_tree_node.get(b"Parent", None) + self.orig_pages = [] + + def write_xref_and_trailer( + self, new_root_ref: IndirectReference | None = None + ) -> None: + assert self.f is not None + if new_root_ref: + self.del_root() + self.root_ref = new_root_ref + if self.info: + self.info_ref = self.write_obj(None, self.info) + start_xref = self.xref_table.write(self.f) + num_entries = len(self.xref_table) + trailer_dict: dict[str | bytes, Any] = { + b"Root": self.root_ref, + b"Size": num_entries, + } + if self.last_xref_section_offset is not None: + trailer_dict[b"Prev"] = self.last_xref_section_offset + if self.info: + trailer_dict[b"Info"] = self.info_ref + self.last_xref_section_offset = start_xref + self.f.write( + b"trailer\n" + + bytes(PdfDict(trailer_dict)) + + b"\nstartxref\n%d\n%%%%EOF" % start_xref + ) + + def write_page( + self, ref: int | IndirectReference | None, *objs: Any, **dict_obj: Any + ) -> IndirectReference: + obj_ref = self.pages[ref] if isinstance(ref, int) else ref + if "Type" not in dict_obj: + dict_obj["Type"] = PdfName(b"Page") + if "Parent" not in dict_obj: + dict_obj["Parent"] = self.pages_ref + return self.write_obj(obj_ref, *objs, **dict_obj) + + def write_obj( + self, ref: IndirectReference | None, *objs: Any, **dict_obj: Any + ) -> IndirectReference: + assert self.f is not None + f = self.f + if ref is None: + ref = self.next_object_id(f.tell()) + else: + self.xref_table[ref.object_id] = (f.tell(), ref.generation) + f.write(bytes(IndirectObjectDef(*ref))) + stream = dict_obj.pop("stream", None) + if stream is not None: + dict_obj["Length"] = len(stream) + if dict_obj: + f.write(pdf_repr(dict_obj)) + for obj in objs: + f.write(pdf_repr(obj)) + if stream is not None: + f.write(b"stream\n") + f.write(stream) + f.write(b"\nendstream\n") + f.write(b"endobj\n") + return ref + + def del_root(self) -> None: + if self.root_ref is None: + return + del self.xref_table[self.root_ref.object_id] + del self.xref_table[self.root[b"Pages"].object_id] + + @staticmethod + def get_buf_from_file(f: IO[bytes]) -> bytes | mmap.mmap: + if hasattr(f, "getbuffer"): + return f.getbuffer() + elif hasattr(f, "getvalue"): + return f.getvalue() + else: + try: + return mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + except ValueError: # cannot mmap an empty file + return b"" + + def read_pdf_info(self) -> None: + assert self.buf is not None + self.file_size_total = len(self.buf) + self.file_size_this = self.file_size_total - self.start_offset + self.read_trailer() + check_format_condition( + self.trailer_dict.get(b"Root") is not None, "Root is missing" + ) + self.root_ref = self.trailer_dict[b"Root"] + assert self.root_ref is not None + self.info_ref = self.trailer_dict.get(b"Info", None) + self.root = PdfDict(self.read_indirect(self.root_ref)) + if self.info_ref is None: + self.info = PdfDict() + else: + self.info = PdfDict(self.read_indirect(self.info_ref)) + check_format_condition(b"Type" in self.root, "/Type missing in Root") + check_format_condition( + self.root[b"Type"] == b"Catalog", "/Type in Root is not /Catalog" + ) + check_format_condition( + self.root.get(b"Pages") is not None, "/Pages missing in Root" + ) + check_format_condition( + isinstance(self.root[b"Pages"], IndirectReference), + "/Pages in Root is not an indirect reference", + ) + self.pages_ref = self.root[b"Pages"] + assert self.pages_ref is not None + self.page_tree_root = self.read_indirect(self.pages_ref) + self.pages = self.linearize_page_tree(self.page_tree_root) + # save the original list of page references + # in case the user modifies, adds or deletes some pages + # and we need to rewrite the pages and their list + self.orig_pages = self.pages[:] + + def next_object_id(self, offset: int | None = None) -> IndirectReference: + try: + # TODO: support reuse of deleted objects + reference = IndirectReference(max(self.xref_table.keys()) + 1, 0) + except ValueError: + reference = IndirectReference(1, 0) + if offset is not None: + self.xref_table[reference.object_id] = (offset, 0) + return reference + + delimiter = rb"[][()<>{}/%]" + delimiter_or_ws = rb"[][()<>{}/%\000\011\012\014\015\040]" + whitespace = rb"[\000\011\012\014\015\040]" + whitespace_or_hex = rb"[\000\011\012\014\015\0400-9a-fA-F]" + whitespace_optional = whitespace + b"*" + whitespace_mandatory = whitespace + b"+" + # No "\012" aka "\n" or "\015" aka "\r": + whitespace_optional_no_nl = rb"[\000\011\014\040]*" + newline_only = rb"[\r\n]+" + newline = whitespace_optional_no_nl + newline_only + whitespace_optional_no_nl + re_trailer_end = re.compile( + whitespace_mandatory + + rb"trailer" + + whitespace_optional + + rb"<<(.*>>)" + + newline + + rb"startxref" + + newline + + rb"([0-9]+)" + + newline + + rb"%%EOF" + + whitespace_optional + + rb"$", + re.DOTALL, + ) + re_trailer_prev = re.compile( + whitespace_optional + + rb"trailer" + + whitespace_optional + + rb"<<(.*?>>)" + + newline + + rb"startxref" + + newline + + rb"([0-9]+)" + + newline + + rb"%%EOF" + + whitespace_optional, + re.DOTALL, + ) + + def read_trailer(self) -> None: + assert self.buf is not None + search_start_offset = len(self.buf) - 16384 + if search_start_offset < self.start_offset: + search_start_offset = self.start_offset + m = self.re_trailer_end.search(self.buf, search_start_offset) + check_format_condition(m is not None, "trailer end not found") + # make sure we found the LAST trailer + last_match = m + while m: + last_match = m + m = self.re_trailer_end.search(self.buf, m.start() + 16) + if not m: + m = last_match + assert m is not None + trailer_data = m.group(1) + self.last_xref_section_offset = int(m.group(2)) + self.trailer_dict = self.interpret_trailer(trailer_data) + self.xref_table = XrefTable() + self.read_xref_table(xref_section_offset=self.last_xref_section_offset) + if b"Prev" in self.trailer_dict: + self.read_prev_trailer(self.trailer_dict[b"Prev"]) + + def read_prev_trailer(self, xref_section_offset: int) -> None: + assert self.buf is not None + trailer_offset = self.read_xref_table(xref_section_offset=xref_section_offset) + m = self.re_trailer_prev.search( + self.buf[trailer_offset : trailer_offset + 16384] + ) + check_format_condition(m is not None, "previous trailer not found") + assert m is not None + trailer_data = m.group(1) + check_format_condition( + int(m.group(2)) == xref_section_offset, + "xref section offset in previous trailer doesn't match what was expected", + ) + trailer_dict = self.interpret_trailer(trailer_data) + if b"Prev" in trailer_dict: + self.read_prev_trailer(trailer_dict[b"Prev"]) + + re_whitespace_optional = re.compile(whitespace_optional) + re_name = re.compile( + whitespace_optional + + rb"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?=" + + delimiter_or_ws + + rb")" + ) + re_dict_start = re.compile(whitespace_optional + rb"<<") + re_dict_end = re.compile(whitespace_optional + rb">>" + whitespace_optional) + + @classmethod + def interpret_trailer(cls, trailer_data: bytes) -> dict[bytes, Any]: + trailer = {} + offset = 0 + while True: + m = cls.re_name.match(trailer_data, offset) + if not m: + m = cls.re_dict_end.match(trailer_data, offset) + check_format_condition( + m is not None and m.end() == len(trailer_data), + "name not found in trailer, remaining data: " + + repr(trailer_data[offset:]), + ) + break + key = cls.interpret_name(m.group(1)) + assert isinstance(key, bytes) + value, value_offset = cls.get_value(trailer_data, m.end()) + trailer[key] = value + if value_offset is None: + break + offset = value_offset + check_format_condition( + b"Size" in trailer and isinstance(trailer[b"Size"], int), + "/Size not in trailer or not an integer", + ) + check_format_condition( + b"Root" in trailer and isinstance(trailer[b"Root"], IndirectReference), + "/Root not in trailer or not an indirect reference", + ) + return trailer + + re_hashes_in_name = re.compile(rb"([^#]*)(#([0-9a-fA-F]{2}))?") + + @classmethod + def interpret_name(cls, raw: bytes, as_text: bool = False) -> str | bytes: + name = b"" + for m in cls.re_hashes_in_name.finditer(raw): + if m.group(3): + name += m.group(1) + bytearray.fromhex(m.group(3).decode("us-ascii")) + else: + name += m.group(1) + if as_text: + return name.decode("utf-8") + else: + return bytes(name) + + re_null = re.compile(whitespace_optional + rb"null(?=" + delimiter_or_ws + rb")") + re_true = re.compile(whitespace_optional + rb"true(?=" + delimiter_or_ws + rb")") + re_false = re.compile(whitespace_optional + rb"false(?=" + delimiter_or_ws + rb")") + re_int = re.compile( + whitespace_optional + rb"([-+]?[0-9]+)(?=" + delimiter_or_ws + rb")" + ) + re_real = re.compile( + whitespace_optional + + rb"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?=" + + delimiter_or_ws + + rb")" + ) + re_array_start = re.compile(whitespace_optional + rb"\[") + re_array_end = re.compile(whitespace_optional + rb"]") + re_string_hex = re.compile( + whitespace_optional + rb"<(" + whitespace_or_hex + rb"*)>" + ) + re_string_lit = re.compile(whitespace_optional + rb"\(") + re_indirect_reference = re.compile( + whitespace_optional + + rb"([-+]?[0-9]+)" + + whitespace_mandatory + + rb"([-+]?[0-9]+)" + + whitespace_mandatory + + rb"R(?=" + + delimiter_or_ws + + rb")" + ) + re_indirect_def_start = re.compile( + whitespace_optional + + rb"([-+]?[0-9]+)" + + whitespace_mandatory + + rb"([-+]?[0-9]+)" + + whitespace_mandatory + + rb"obj(?=" + + delimiter_or_ws + + rb")" + ) + re_indirect_def_end = re.compile( + whitespace_optional + rb"endobj(?=" + delimiter_or_ws + rb")" + ) + re_comment = re.compile( + rb"(" + whitespace_optional + rb"%[^\r\n]*" + newline + rb")*" + ) + re_stream_start = re.compile(whitespace_optional + rb"stream\r?\n") + re_stream_end = re.compile( + whitespace_optional + rb"endstream(?=" + delimiter_or_ws + rb")" + ) + + @classmethod + def get_value( + cls, + data: bytes | bytearray | mmap.mmap, + offset: int, + expect_indirect: IndirectReference | None = None, + max_nesting: int = -1, + ) -> tuple[Any, int | None]: + if max_nesting == 0: + return None, None + m = cls.re_comment.match(data, offset) + if m: + offset = m.end() + m = cls.re_indirect_def_start.match(data, offset) + if m: + check_format_condition( + int(m.group(1)) > 0, + "indirect object definition: object ID must be greater than 0", + ) + check_format_condition( + int(m.group(2)) >= 0, + "indirect object definition: generation must be non-negative", + ) + check_format_condition( + expect_indirect is None + or expect_indirect + == IndirectReference(int(m.group(1)), int(m.group(2))), + "indirect object definition different than expected", + ) + object, object_offset = cls.get_value( + data, m.end(), max_nesting=max_nesting - 1 + ) + if object_offset is None: + return object, None + m = cls.re_indirect_def_end.match(data, object_offset) + check_format_condition( + m is not None, "indirect object definition end not found" + ) + assert m is not None + return object, m.end() + check_format_condition( + not expect_indirect, "indirect object definition not found" + ) + m = cls.re_indirect_reference.match(data, offset) + if m: + check_format_condition( + int(m.group(1)) > 0, + "indirect object reference: object ID must be greater than 0", + ) + check_format_condition( + int(m.group(2)) >= 0, + "indirect object reference: generation must be non-negative", + ) + return IndirectReference(int(m.group(1)), int(m.group(2))), m.end() + m = cls.re_dict_start.match(data, offset) + if m: + offset = m.end() + result: dict[Any, Any] = {} + m = cls.re_dict_end.match(data, offset) + current_offset: int | None = offset + while not m: + assert current_offset is not None + key, current_offset = cls.get_value( + data, current_offset, max_nesting=max_nesting - 1 + ) + if current_offset is None: + return result, None + value, current_offset = cls.get_value( + data, current_offset, max_nesting=max_nesting - 1 + ) + result[key] = value + if current_offset is None: + return result, None + m = cls.re_dict_end.match(data, current_offset) + current_offset = m.end() + m = cls.re_stream_start.match(data, current_offset) + if m: + stream_len = result.get(b"Length") + if stream_len is None or not isinstance(stream_len, int): + msg = f"bad or missing Length in stream dict ({stream_len})" + raise PdfFormatError(msg) + stream_data = data[m.end() : m.end() + stream_len] + m = cls.re_stream_end.match(data, m.end() + stream_len) + check_format_condition(m is not None, "stream end not found") + assert m is not None + current_offset = m.end() + return PdfStream(PdfDict(result), stream_data), current_offset + return PdfDict(result), current_offset + m = cls.re_array_start.match(data, offset) + if m: + offset = m.end() + results = [] + m = cls.re_array_end.match(data, offset) + current_offset = offset + while not m: + assert current_offset is not None + value, current_offset = cls.get_value( + data, current_offset, max_nesting=max_nesting - 1 + ) + results.append(value) + if current_offset is None: + return results, None + m = cls.re_array_end.match(data, current_offset) + return results, m.end() + m = cls.re_null.match(data, offset) + if m: + return None, m.end() + m = cls.re_true.match(data, offset) + if m: + return True, m.end() + m = cls.re_false.match(data, offset) + if m: + return False, m.end() + m = cls.re_name.match(data, offset) + if m: + return PdfName(cls.interpret_name(m.group(1))), m.end() + m = cls.re_int.match(data, offset) + if m: + return int(m.group(1)), m.end() + m = cls.re_real.match(data, offset) + if m: + # XXX Decimal instead of float??? + return float(m.group(1)), m.end() + m = cls.re_string_hex.match(data, offset) + if m: + # filter out whitespace + hex_string = bytearray( + b for b in m.group(1) if b in b"0123456789abcdefABCDEF" + ) + if len(hex_string) % 2 == 1: + # append a 0 if the length is not even - yes, at the end + hex_string.append(ord(b"0")) + return bytearray.fromhex(hex_string.decode("us-ascii")), m.end() + m = cls.re_string_lit.match(data, offset) + if m: + return cls.get_literal_string(data, m.end()) + # return None, offset # fallback (only for debugging) + msg = f"unrecognized object: {repr(data[offset : offset + 32])}" + raise PdfFormatError(msg) + + re_lit_str_token = re.compile( + rb"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))" + ) + escaped_chars = { + b"n": b"\n", + b"r": b"\r", + b"t": b"\t", + b"b": b"\b", + b"f": b"\f", + b"(": b"(", + b")": b")", + b"\\": b"\\", + ord(b"n"): b"\n", + ord(b"r"): b"\r", + ord(b"t"): b"\t", + ord(b"b"): b"\b", + ord(b"f"): b"\f", + ord(b"("): b"(", + ord(b")"): b")", + ord(b"\\"): b"\\", + } + + @classmethod + def get_literal_string( + cls, data: bytes | bytearray | mmap.mmap, offset: int + ) -> tuple[bytes, int]: + nesting_depth = 0 + result = bytearray() + for m in cls.re_lit_str_token.finditer(data, offset): + result.extend(data[offset : m.start()]) + if m.group(1): + result.extend(cls.escaped_chars[m.group(1)[1]]) + elif m.group(2): + result.append(int(m.group(2)[1:], 8)) + elif m.group(3): + pass + elif m.group(5): + result.extend(b"\n") + elif m.group(6): + result.extend(b"(") + nesting_depth += 1 + elif m.group(7): + if nesting_depth == 0: + return bytes(result), m.end() + result.extend(b")") + nesting_depth -= 1 + offset = m.end() + msg = "unfinished literal string" + raise PdfFormatError(msg) + + re_xref_section_start = re.compile(whitespace_optional + rb"xref" + newline) + re_xref_subsection_start = re.compile( + whitespace_optional + + rb"([0-9]+)" + + whitespace_mandatory + + rb"([0-9]+)" + + whitespace_optional + + newline_only + ) + re_xref_entry = re.compile(rb"([0-9]{10}) ([0-9]{5}) ([fn])( \r| \n|\r\n)") + + def read_xref_table(self, xref_section_offset: int) -> int: + assert self.buf is not None + subsection_found = False + m = self.re_xref_section_start.match( + self.buf, xref_section_offset + self.start_offset + ) + check_format_condition(m is not None, "xref section start not found") + assert m is not None + offset = m.end() + while True: + m = self.re_xref_subsection_start.match(self.buf, offset) + if not m: + check_format_condition( + subsection_found, "xref subsection start not found" + ) + break + subsection_found = True + offset = m.end() + first_object = int(m.group(1)) + num_objects = int(m.group(2)) + for i in range(first_object, first_object + num_objects): + m = self.re_xref_entry.match(self.buf, offset) + check_format_condition(m is not None, "xref entry not found") + assert m is not None + offset = m.end() + is_free = m.group(3) == b"f" + if not is_free: + generation = int(m.group(2)) + new_entry = (int(m.group(1)), generation) + if i not in self.xref_table: + self.xref_table[i] = new_entry + return offset + + def read_indirect(self, ref: IndirectReference, max_nesting: int = -1) -> Any: + offset, generation = self.xref_table[ref[0]] + check_format_condition( + generation == ref[1], + f"expected to find generation {ref[1]} for object ID {ref[0]} in xref " + f"table, instead found generation {generation} at offset {offset}", + ) + assert self.buf is not None + value = self.get_value( + self.buf, + offset + self.start_offset, + expect_indirect=IndirectReference(*ref), + max_nesting=max_nesting, + )[0] + self.cached_objects[ref] = value + return value + + def linearize_page_tree( + self, node: PdfDict | None = None + ) -> list[IndirectReference]: + page_node = node if node is not None else self.page_tree_root + check_format_condition( + page_node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages" + ) + pages = [] + for kid in page_node[b"Kids"]: + kid_object = self.read_indirect(kid) + if kid_object[b"Type"] == b"Page": + pages.append(kid) + else: + pages.extend(self.linearize_page_tree(node=kid_object)) + return pages diff --git a/py311/lib/python3.11/site-packages/PIL/PixarImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/PixarImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..d2b6d0a97e4bd230134d4741fc997baca5b4507f --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PixarImagePlugin.py @@ -0,0 +1,72 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PIXAR raster support for PIL +# +# history: +# 97-01-29 fl Created +# +# notes: +# This is incomplete; it is based on a few samples created with +# Photoshop 2.5 and 3.0, and a summary description provided by +# Greg Coats . Hopefully, "L" and +# "RGBA" support will be added in future versions. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from . import Image, ImageFile +from ._binary import i16le as i16 + +# +# helpers + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"\200\350\000\000") + + +## +# Image plugin for PIXAR raster images. + + +class PixarImageFile(ImageFile.ImageFile): + format = "PIXAR" + format_description = "PIXAR raster image" + + def _open(self) -> None: + # assuming a 4-byte magic label + assert self.fp is not None + + s = self.fp.read(4) + if not _accept(s): + msg = "not a PIXAR file" + raise SyntaxError(msg) + + # read rest of header + s = s + self.fp.read(508) + + self._size = i16(s, 418), i16(s, 416) + + # get channel/depth descriptions + mode = i16(s, 424), i16(s, 426) + + if mode == (14, 2): + self._mode = "RGB" + # FIXME: to be continued... + + # create tile descriptor (assuming "dumped") + self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 1024, self.mode)] + + +# +# -------------------------------------------------------------------- + +Image.register_open(PixarImageFile.format, PixarImageFile, _accept) + +Image.register_extension(PixarImageFile.format, ".pxr") diff --git a/py311/lib/python3.11/site-packages/PIL/PngImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/PngImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..1b9a89aef0dd15a292b5e2e55b6e56369dc31b62 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PngImagePlugin.py @@ -0,0 +1,1551 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PNG support code +# +# See "PNG (Portable Network Graphics) Specification, version 1.0; +# W3C Recommendation", 1996-10-01, Thomas Boutell (ed.). +# +# history: +# 1996-05-06 fl Created (couldn't resist it) +# 1996-12-14 fl Upgraded, added read and verify support (0.2) +# 1996-12-15 fl Separate PNG stream parser +# 1996-12-29 fl Added write support, added getchunks +# 1996-12-30 fl Eliminated circular references in decoder (0.3) +# 1998-07-12 fl Read/write 16-bit images as mode I (0.4) +# 2001-02-08 fl Added transparency support (from Zircon) (0.5) +# 2001-04-16 fl Don't close data source in "open" method (0.6) +# 2004-02-24 fl Don't even pretend to support interlaced files (0.7) +# 2004-08-31 fl Do basic sanity check on chunk identifiers (0.8) +# 2004-09-20 fl Added PngInfo chunk container +# 2004-12-18 fl Added DPI read support (based on code by Niki Spahiev) +# 2008-08-13 fl Added tRNS support for RGB images +# 2009-03-06 fl Support for preserving ICC profiles (by Florian Hoech) +# 2009-03-08 fl Added zTXT support (from Lowell Alleman) +# 2009-03-29 fl Read interlaced PNG files (from Conrado Porto Lopes Gouvua) +# +# Copyright (c) 1997-2009 by Secret Labs AB +# Copyright (c) 1996 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import itertools +import logging +import re +import struct +import warnings +import zlib +from collections.abc import Callable +from enum import IntEnum +from typing import IO, Any, NamedTuple, NoReturn, cast + +from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence +from ._binary import i16be as i16 +from ._binary import i32be as i32 +from ._binary import o8 +from ._binary import o16be as o16 +from ._binary import o32be as o32 +from ._deprecate import deprecate +from ._util import DeferredError + +TYPE_CHECKING = False +if TYPE_CHECKING: + from . import _imaging + +logger = logging.getLogger(__name__) + +is_cid = re.compile(rb"\w\w\w\w").match + + +_MAGIC = b"\211PNG\r\n\032\n" + + +_MODES = { + # supported bits/color combinations, and corresponding modes/rawmodes + # Grayscale + (1, 0): ("1", "1"), + (2, 0): ("L", "L;2"), + (4, 0): ("L", "L;4"), + (8, 0): ("L", "L"), + (16, 0): ("I;16", "I;16B"), + # Truecolour + (8, 2): ("RGB", "RGB"), + (16, 2): ("RGB", "RGB;16B"), + # Indexed-colour + (1, 3): ("P", "P;1"), + (2, 3): ("P", "P;2"), + (4, 3): ("P", "P;4"), + (8, 3): ("P", "P"), + # Grayscale with alpha + (8, 4): ("LA", "LA"), + (16, 4): ("RGBA", "LA;16B"), # LA;16B->LA not yet available + # Truecolour with alpha + (8, 6): ("RGBA", "RGBA"), + (16, 6): ("RGBA", "RGBA;16B"), +} + + +_simple_palette = re.compile(b"^\xff*\x00\xff*$") + +MAX_TEXT_CHUNK = ImageFile.SAFEBLOCK +""" +Maximum decompressed size for a iTXt or zTXt chunk. +Eliminates decompression bombs where compressed chunks can expand 1000x. +See :ref:`Text in PNG File Format`. +""" +MAX_TEXT_MEMORY = 64 * MAX_TEXT_CHUNK +""" +Set the maximum total text chunk size. +See :ref:`Text in PNG File Format`. +""" + + +# APNG frame disposal modes +class Disposal(IntEnum): + OP_NONE = 0 + """ + No disposal is done on this frame before rendering the next frame. + See :ref:`Saving APNG sequences`. + """ + OP_BACKGROUND = 1 + """ + This frame’s modified region is cleared to fully transparent black before rendering + the next frame. + See :ref:`Saving APNG sequences`. + """ + OP_PREVIOUS = 2 + """ + This frame’s modified region is reverted to the previous frame’s contents before + rendering the next frame. + See :ref:`Saving APNG sequences`. + """ + + +# APNG frame blend modes +class Blend(IntEnum): + OP_SOURCE = 0 + """ + All color components of this frame, including alpha, overwrite the previous output + image contents. + See :ref:`Saving APNG sequences`. + """ + OP_OVER = 1 + """ + This frame should be alpha composited with the previous output image contents. + See :ref:`Saving APNG sequences`. + """ + + +def _safe_zlib_decompress(s: bytes) -> bytes: + dobj = zlib.decompressobj() + plaintext = dobj.decompress(s, MAX_TEXT_CHUNK) + if dobj.unconsumed_tail: + msg = "Decompressed data too large for PngImagePlugin.MAX_TEXT_CHUNK" + raise ValueError(msg) + return plaintext + + +def _crc32(data: bytes, seed: int = 0) -> int: + return zlib.crc32(data, seed) & 0xFFFFFFFF + + +# -------------------------------------------------------------------- +# Support classes. Suitable for PNG and related formats like MNG etc. + + +class ChunkStream: + def __init__(self, fp: IO[bytes]) -> None: + self.fp: IO[bytes] | None = fp + self.queue: list[tuple[bytes, int, int]] | None = [] + + def read(self) -> tuple[bytes, int, int]: + """Fetch a new chunk. Returns header information.""" + cid = None + + assert self.fp is not None + if self.queue: + cid, pos, length = self.queue.pop() + self.fp.seek(pos) + else: + s = self.fp.read(8) + cid = s[4:] + pos = self.fp.tell() + length = i32(s) + + if not is_cid(cid): + if not ImageFile.LOAD_TRUNCATED_IMAGES: + msg = f"broken PNG file (chunk {repr(cid)})" + raise SyntaxError(msg) + + return cid, pos, length + + def __enter__(self) -> ChunkStream: + return self + + def __exit__(self, *args: object) -> None: + self.close() + + def close(self) -> None: + self.queue = self.fp = None + + def push(self, cid: bytes, pos: int, length: int) -> None: + assert self.queue is not None + self.queue.append((cid, pos, length)) + + def call(self, cid: bytes, pos: int, length: int) -> bytes: + """Call the appropriate chunk handler""" + + logger.debug("STREAM %r %s %s", cid, pos, length) + return getattr(self, f"chunk_{cid.decode('ascii')}")(pos, length) + + def crc(self, cid: bytes, data: bytes) -> None: + """Read and verify checksum""" + + # Skip CRC checks for ancillary chunks if allowed to load truncated + # images + # 5th byte of first char is 1 [specs, section 5.4] + if ImageFile.LOAD_TRUNCATED_IMAGES and (cid[0] >> 5 & 1): + self.crc_skip(cid, data) + return + + assert self.fp is not None + try: + crc1 = _crc32(data, _crc32(cid)) + crc2 = i32(self.fp.read(4)) + if crc1 != crc2: + msg = f"broken PNG file (bad header checksum in {repr(cid)})" + raise SyntaxError(msg) + except struct.error as e: + msg = f"broken PNG file (incomplete checksum in {repr(cid)})" + raise SyntaxError(msg) from e + + def crc_skip(self, cid: bytes, data: bytes) -> None: + """Read checksum""" + + assert self.fp is not None + self.fp.read(4) + + def verify(self, endchunk: bytes = b"IEND") -> list[bytes]: + # Simple approach; just calculate checksum for all remaining + # blocks. Must be called directly after open. + + cids = [] + + assert self.fp is not None + while True: + try: + cid, pos, length = self.read() + except struct.error as e: + msg = "truncated PNG file" + raise OSError(msg) from e + + if cid == endchunk: + break + self.crc(cid, ImageFile._safe_read(self.fp, length)) + cids.append(cid) + + return cids + + +class iTXt(str): + """ + Subclass of string to allow iTXt chunks to look like strings while + keeping their extra information + + """ + + lang: str | bytes | None + tkey: str | bytes | None + + @staticmethod + def __new__( + cls, text: str, lang: str | None = None, tkey: str | None = None + ) -> iTXt: + """ + :param cls: the class to use when creating the instance + :param text: value for this key + :param lang: language code + :param tkey: UTF-8 version of the key name + """ + + self = str.__new__(cls, text) + self.lang = lang + self.tkey = tkey + return self + + +class PngInfo: + """ + PNG chunk container (for use with save(pnginfo=)) + + """ + + def __init__(self) -> None: + self.chunks: list[tuple[bytes, bytes, bool]] = [] + + def add(self, cid: bytes, data: bytes, after_idat: bool = False) -> None: + """Appends an arbitrary chunk. Use with caution. + + :param cid: a byte string, 4 bytes long. + :param data: a byte string of the encoded data + :param after_idat: for use with private chunks. Whether the chunk + should be written after IDAT + + """ + + self.chunks.append((cid, data, after_idat)) + + def add_itxt( + self, + key: str | bytes, + value: str | bytes, + lang: str | bytes = "", + tkey: str | bytes = "", + zip: bool = False, + ) -> None: + """Appends an iTXt chunk. + + :param key: latin-1 encodable text key name + :param value: value for this key + :param lang: language code + :param tkey: UTF-8 version of the key name + :param zip: compression flag + + """ + + if not isinstance(key, bytes): + key = key.encode("latin-1", "strict") + if not isinstance(value, bytes): + value = value.encode("utf-8", "strict") + if not isinstance(lang, bytes): + lang = lang.encode("utf-8", "strict") + if not isinstance(tkey, bytes): + tkey = tkey.encode("utf-8", "strict") + + if zip: + self.add( + b"iTXt", + key + b"\0\x01\0" + lang + b"\0" + tkey + b"\0" + zlib.compress(value), + ) + else: + self.add(b"iTXt", key + b"\0\0\0" + lang + b"\0" + tkey + b"\0" + value) + + def add_text( + self, key: str | bytes, value: str | bytes | iTXt, zip: bool = False + ) -> None: + """Appends a text chunk. + + :param key: latin-1 encodable text key name + :param value: value for this key, text or an + :py:class:`PIL.PngImagePlugin.iTXt` instance + :param zip: compression flag + + """ + if isinstance(value, iTXt): + return self.add_itxt( + key, + value, + value.lang if value.lang is not None else b"", + value.tkey if value.tkey is not None else b"", + zip=zip, + ) + + # The tEXt chunk stores latin-1 text + if not isinstance(value, bytes): + try: + value = value.encode("latin-1", "strict") + except UnicodeError: + return self.add_itxt(key, value, zip=zip) + + if not isinstance(key, bytes): + key = key.encode("latin-1", "strict") + + if zip: + self.add(b"zTXt", key + b"\0\0" + zlib.compress(value)) + else: + self.add(b"tEXt", key + b"\0" + value) + + +# -------------------------------------------------------------------- +# PNG image stream (IHDR/IEND) + + +class _RewindState(NamedTuple): + info: dict[str | tuple[int, int], Any] + tile: list[ImageFile._Tile] + seq_num: int | None + + +class PngStream(ChunkStream): + def __init__(self, fp: IO[bytes]) -> None: + super().__init__(fp) + + # local copies of Image attributes + self.im_info: dict[str | tuple[int, int], Any] = {} + self.im_text: dict[str, str | iTXt] = {} + self.im_size = (0, 0) + self.im_mode = "" + self.im_tile: list[ImageFile._Tile] = [] + self.im_palette: tuple[str, bytes] | None = None + self.im_custom_mimetype: str | None = None + self.im_n_frames: int | None = None + self._seq_num: int | None = None + self.rewind_state = _RewindState({}, [], None) + + self.text_memory = 0 + + def check_text_memory(self, chunklen: int) -> None: + self.text_memory += chunklen + if self.text_memory > MAX_TEXT_MEMORY: + msg = ( + "Too much memory used in text chunks: " + f"{self.text_memory}>MAX_TEXT_MEMORY" + ) + raise ValueError(msg) + + def save_rewind(self) -> None: + self.rewind_state = _RewindState( + self.im_info.copy(), + self.im_tile, + self._seq_num, + ) + + def rewind(self) -> None: + self.im_info = self.rewind_state.info.copy() + self.im_tile = self.rewind_state.tile + self._seq_num = self.rewind_state.seq_num + + def chunk_iCCP(self, pos: int, length: int) -> bytes: + # ICC profile + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + # according to PNG spec, the iCCP chunk contains: + # Profile name 1-79 bytes (character string) + # Null separator 1 byte (null character) + # Compression method 1 byte (0) + # Compressed profile n bytes (zlib with deflate compression) + i = s.find(b"\0") + logger.debug("iCCP profile name %r", s[:i]) + comp_method = s[i + 1] + logger.debug("Compression method %s", comp_method) + if comp_method != 0: + msg = f"Unknown compression method {comp_method} in iCCP chunk" + raise SyntaxError(msg) + try: + icc_profile = _safe_zlib_decompress(s[i + 2 :]) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + icc_profile = None + else: + raise + except zlib.error: + icc_profile = None # FIXME + self.im_info["icc_profile"] = icc_profile + return s + + def chunk_IHDR(self, pos: int, length: int) -> bytes: + # image header + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + if length < 13: + if ImageFile.LOAD_TRUNCATED_IMAGES: + return s + msg = "Truncated IHDR chunk" + raise ValueError(msg) + self.im_size = i32(s, 0), i32(s, 4) + try: + self.im_mode, self.im_rawmode = _MODES[(s[8], s[9])] + except Exception: + pass + if s[12]: + self.im_info["interlace"] = 1 + if s[11]: + msg = "unknown filter category" + raise SyntaxError(msg) + return s + + def chunk_IDAT(self, pos: int, length: int) -> NoReturn: + # image data + if "bbox" in self.im_info: + tile = [ImageFile._Tile("zip", self.im_info["bbox"], pos, self.im_rawmode)] + else: + if self.im_n_frames is not None: + self.im_info["default_image"] = True + tile = [ImageFile._Tile("zip", (0, 0) + self.im_size, pos, self.im_rawmode)] + self.im_tile = tile + self.im_idat = length + msg = "image data found" + raise EOFError(msg) + + def chunk_IEND(self, pos: int, length: int) -> NoReturn: + msg = "end of PNG image" + raise EOFError(msg) + + def chunk_PLTE(self, pos: int, length: int) -> bytes: + # palette + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + if self.im_mode == "P": + self.im_palette = "RGB", s + return s + + def chunk_tRNS(self, pos: int, length: int) -> bytes: + # transparency + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + if self.im_mode == "P": + if _simple_palette.match(s): + # tRNS contains only one full-transparent entry, + # other entries are full opaque + i = s.find(b"\0") + if i >= 0: + self.im_info["transparency"] = i + else: + # otherwise, we have a byte string with one alpha value + # for each palette entry + self.im_info["transparency"] = s + elif self.im_mode in ("1", "L", "I;16"): + self.im_info["transparency"] = i16(s) + elif self.im_mode == "RGB": + self.im_info["transparency"] = i16(s), i16(s, 2), i16(s, 4) + return s + + def chunk_gAMA(self, pos: int, length: int) -> bytes: + # gamma setting + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + self.im_info["gamma"] = i32(s) / 100000.0 + return s + + def chunk_cHRM(self, pos: int, length: int) -> bytes: + # chromaticity, 8 unsigned ints, actual value is scaled by 100,000 + # WP x,y, Red x,y, Green x,y Blue x,y + + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + raw_vals = struct.unpack(f">{len(s) // 4}I", s) + self.im_info["chromaticity"] = tuple(elt / 100000.0 for elt in raw_vals) + return s + + def chunk_sRGB(self, pos: int, length: int) -> bytes: + # srgb rendering intent, 1 byte + # 0 perceptual + # 1 relative colorimetric + # 2 saturation + # 3 absolute colorimetric + + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + if length < 1: + if ImageFile.LOAD_TRUNCATED_IMAGES: + return s + msg = "Truncated sRGB chunk" + raise ValueError(msg) + self.im_info["srgb"] = s[0] + return s + + def chunk_pHYs(self, pos: int, length: int) -> bytes: + # pixels per unit + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + if length < 9: + if ImageFile.LOAD_TRUNCATED_IMAGES: + return s + msg = "Truncated pHYs chunk" + raise ValueError(msg) + px, py = i32(s, 0), i32(s, 4) + unit = s[8] + if unit == 1: # meter + dpi = px * 0.0254, py * 0.0254 + self.im_info["dpi"] = dpi + elif unit == 0: + self.im_info["aspect"] = px, py + return s + + def chunk_tEXt(self, pos: int, length: int) -> bytes: + # text + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + try: + k, v = s.split(b"\0", 1) + except ValueError: + # fallback for broken tEXt tags + k = s + v = b"" + if k: + k_str = k.decode("latin-1", "strict") + v_str = v.decode("latin-1", "replace") + + self.im_info[k_str] = v if k == b"exif" else v_str + self.im_text[k_str] = v_str + self.check_text_memory(len(v_str)) + + return s + + def chunk_zTXt(self, pos: int, length: int) -> bytes: + # compressed text + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + try: + k, v = s.split(b"\0", 1) + except ValueError: + k = s + v = b"" + if v: + comp_method = v[0] + else: + comp_method = 0 + if comp_method != 0: + msg = f"Unknown compression method {comp_method} in zTXt chunk" + raise SyntaxError(msg) + try: + v = _safe_zlib_decompress(v[1:]) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + v = b"" + else: + raise + except zlib.error: + v = b"" + + if k: + k_str = k.decode("latin-1", "strict") + v_str = v.decode("latin-1", "replace") + + self.im_info[k_str] = self.im_text[k_str] = v_str + self.check_text_memory(len(v_str)) + + return s + + def chunk_iTXt(self, pos: int, length: int) -> bytes: + # international text + assert self.fp is not None + r = s = ImageFile._safe_read(self.fp, length) + try: + k, r = r.split(b"\0", 1) + except ValueError: + return s + if len(r) < 2: + return s + cf, cm, r = r[0], r[1], r[2:] + try: + lang, tk, v = r.split(b"\0", 2) + except ValueError: + return s + if cf != 0: + if cm == 0: + try: + v = _safe_zlib_decompress(v) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + return s + else: + raise + except zlib.error: + return s + else: + return s + if k == b"XML:com.adobe.xmp": + self.im_info["xmp"] = v + try: + k_str = k.decode("latin-1", "strict") + lang_str = lang.decode("utf-8", "strict") + tk_str = tk.decode("utf-8", "strict") + v_str = v.decode("utf-8", "strict") + except UnicodeError: + return s + + self.im_info[k_str] = self.im_text[k_str] = iTXt(v_str, lang_str, tk_str) + self.check_text_memory(len(v_str)) + + return s + + def chunk_eXIf(self, pos: int, length: int) -> bytes: + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + self.im_info["exif"] = b"Exif\x00\x00" + s + return s + + # APNG chunks + def chunk_acTL(self, pos: int, length: int) -> bytes: + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + if length < 8: + if ImageFile.LOAD_TRUNCATED_IMAGES: + return s + msg = "APNG contains truncated acTL chunk" + raise ValueError(msg) + if self.im_n_frames is not None: + self.im_n_frames = None + warnings.warn("Invalid APNG, will use default PNG image if possible") + return s + n_frames = i32(s) + if n_frames == 0 or n_frames > 0x80000000: + warnings.warn("Invalid APNG, will use default PNG image if possible") + return s + self.im_n_frames = n_frames + self.im_info["loop"] = i32(s, 4) + self.im_custom_mimetype = "image/apng" + return s + + def chunk_fcTL(self, pos: int, length: int) -> bytes: + assert self.fp is not None + s = ImageFile._safe_read(self.fp, length) + if length < 26: + if ImageFile.LOAD_TRUNCATED_IMAGES: + return s + msg = "APNG contains truncated fcTL chunk" + raise ValueError(msg) + seq = i32(s) + if (self._seq_num is None and seq != 0) or ( + self._seq_num is not None and self._seq_num != seq - 1 + ): + msg = "APNG contains frame sequence errors" + raise SyntaxError(msg) + self._seq_num = seq + width, height = i32(s, 4), i32(s, 8) + px, py = i32(s, 12), i32(s, 16) + im_w, im_h = self.im_size + if px + width > im_w or py + height > im_h: + msg = "APNG contains invalid frames" + raise SyntaxError(msg) + self.im_info["bbox"] = (px, py, px + width, py + height) + delay_num, delay_den = i16(s, 20), i16(s, 22) + if delay_den == 0: + delay_den = 100 + self.im_info["duration"] = float(delay_num) / float(delay_den) * 1000 + self.im_info["disposal"] = s[24] + self.im_info["blend"] = s[25] + return s + + def chunk_fdAT(self, pos: int, length: int) -> bytes: + assert self.fp is not None + if length < 4: + if ImageFile.LOAD_TRUNCATED_IMAGES: + s = ImageFile._safe_read(self.fp, length) + return s + msg = "APNG contains truncated fDAT chunk" + raise ValueError(msg) + s = ImageFile._safe_read(self.fp, 4) + seq = i32(s) + if self._seq_num != seq - 1: + msg = "APNG contains frame sequence errors" + raise SyntaxError(msg) + self._seq_num = seq + return self.chunk_IDAT(pos + 4, length - 4) + + +# -------------------------------------------------------------------- +# PNG reader + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(_MAGIC) + + +## +# Image plugin for PNG images. + + +class PngImageFile(ImageFile.ImageFile): + format = "PNG" + format_description = "Portable network graphics" + + def _open(self) -> None: + if not _accept(self.fp.read(8)): + msg = "not a PNG file" + raise SyntaxError(msg) + self._fp = self.fp + self.__frame = 0 + + # + # Parse headers up to the first IDAT or fDAT chunk + + self.private_chunks: list[tuple[bytes, bytes] | tuple[bytes, bytes, bool]] = [] + self.png: PngStream | None = PngStream(self.fp) + + while True: + # + # get next chunk + + cid, pos, length = self.png.read() + + try: + s = self.png.call(cid, pos, length) + except EOFError: + break + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + s = ImageFile._safe_read(self.fp, length) + if cid[1:2].islower(): + self.private_chunks.append((cid, s)) + + self.png.crc(cid, s) + + # + # Copy relevant attributes from the PngStream. An alternative + # would be to let the PngStream class modify these attributes + # directly, but that introduces circular references which are + # difficult to break if things go wrong in the decoder... + # (believe me, I've tried ;-) + + self._mode = self.png.im_mode + self._size = self.png.im_size + self.info = self.png.im_info + self._text: dict[str, str | iTXt] | None = None + self.tile = self.png.im_tile + self.custom_mimetype = self.png.im_custom_mimetype + self.n_frames = self.png.im_n_frames or 1 + self.default_image = self.info.get("default_image", False) + + if self.png.im_palette: + rawmode, data = self.png.im_palette + self.palette = ImagePalette.raw(rawmode, data) + + if cid == b"fdAT": + self.__prepare_idat = length - 4 + else: + self.__prepare_idat = length # used by load_prepare() + + if self.png.im_n_frames is not None: + self._close_exclusive_fp_after_loading = False + self.png.save_rewind() + self.__rewind_idat = self.__prepare_idat + self.__rewind = self._fp.tell() + if self.default_image: + # IDAT chunk contains default image and not first animation frame + self.n_frames += 1 + self._seek(0) + self.is_animated = self.n_frames > 1 + + @property + def text(self) -> dict[str, str | iTXt]: + # experimental + if self._text is None: + # iTxt, tEXt and zTXt chunks may appear at the end of the file + # So load the file to ensure that they are read + if self.is_animated: + frame = self.__frame + # for APNG, seek to the final frame before loading + self.seek(self.n_frames - 1) + self.load() + if self.is_animated: + self.seek(frame) + assert self._text is not None + return self._text + + def verify(self) -> None: + """Verify PNG file""" + + if self.fp is None: + msg = "verify must be called directly after open" + raise RuntimeError(msg) + + # back up to beginning of IDAT block + self.fp.seek(self.tile[0][2] - 8) + + assert self.png is not None + self.png.verify() + self.png.close() + + if self._exclusive_fp: + self.fp.close() + self.fp = None + + def seek(self, frame: int) -> None: + if not self._seek_check(frame): + return + if frame < self.__frame: + self._seek(0, True) + + last_frame = self.__frame + for f in range(self.__frame + 1, frame + 1): + try: + self._seek(f) + except EOFError as e: + self.seek(last_frame) + msg = "no more images in APNG file" + raise EOFError(msg) from e + + def _seek(self, frame: int, rewind: bool = False) -> None: + assert self.png is not None + if isinstance(self._fp, DeferredError): + raise self._fp.ex + + self.dispose: _imaging.ImagingCore | None + dispose_extent = None + if frame == 0: + if rewind: + self._fp.seek(self.__rewind) + self.png.rewind() + self.__prepare_idat = self.__rewind_idat + self._im = None + self.info = self.png.im_info + self.tile = self.png.im_tile + self.fp = self._fp + self._prev_im = None + self.dispose = None + self.default_image = self.info.get("default_image", False) + self.dispose_op = self.info.get("disposal") + self.blend_op = self.info.get("blend") + dispose_extent = self.info.get("bbox") + self.__frame = 0 + else: + if frame != self.__frame + 1: + msg = f"cannot seek to frame {frame}" + raise ValueError(msg) + + # ensure previous frame was loaded + self.load() + + if self.dispose: + self.im.paste(self.dispose, self.dispose_extent) + self._prev_im = self.im.copy() + + self.fp = self._fp + + # advance to the next frame + if self.__prepare_idat: + ImageFile._safe_read(self.fp, self.__prepare_idat) + self.__prepare_idat = 0 + frame_start = False + while True: + self.fp.read(4) # CRC + + try: + cid, pos, length = self.png.read() + except (struct.error, SyntaxError): + break + + if cid == b"IEND": + msg = "No more images in APNG file" + raise EOFError(msg) + if cid == b"fcTL": + if frame_start: + # there must be at least one fdAT chunk between fcTL chunks + msg = "APNG missing frame data" + raise SyntaxError(msg) + frame_start = True + + try: + self.png.call(cid, pos, length) + except UnicodeDecodeError: + break + except EOFError: + if cid == b"fdAT": + length -= 4 + if frame_start: + self.__prepare_idat = length + break + ImageFile._safe_read(self.fp, length) + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + ImageFile._safe_read(self.fp, length) + + self.__frame = frame + self.tile = self.png.im_tile + self.dispose_op = self.info.get("disposal") + self.blend_op = self.info.get("blend") + dispose_extent = self.info.get("bbox") + + if not self.tile: + msg = "image not found in APNG frame" + raise EOFError(msg) + if dispose_extent: + self.dispose_extent: tuple[float, float, float, float] = dispose_extent + + # setup frame disposal (actual disposal done when needed in the next _seek()) + if self._prev_im is None and self.dispose_op == Disposal.OP_PREVIOUS: + self.dispose_op = Disposal.OP_BACKGROUND + + self.dispose = None + if self.dispose_op == Disposal.OP_PREVIOUS: + if self._prev_im: + self.dispose = self._prev_im.copy() + self.dispose = self._crop(self.dispose, self.dispose_extent) + elif self.dispose_op == Disposal.OP_BACKGROUND: + self.dispose = Image.core.fill(self.mode, self.size) + self.dispose = self._crop(self.dispose, self.dispose_extent) + + def tell(self) -> int: + return self.__frame + + def load_prepare(self) -> None: + """internal: prepare to read PNG file""" + + if self.info.get("interlace"): + self.decoderconfig = self.decoderconfig + (1,) + + self.__idat = self.__prepare_idat # used by load_read() + ImageFile.ImageFile.load_prepare(self) + + def load_read(self, read_bytes: int) -> bytes: + """internal: read more image data""" + + assert self.png is not None + while self.__idat == 0: + # end of chunk, skip forward to next one + + self.fp.read(4) # CRC + + cid, pos, length = self.png.read() + + if cid not in [b"IDAT", b"DDAT", b"fdAT"]: + self.png.push(cid, pos, length) + return b"" + + if cid == b"fdAT": + try: + self.png.call(cid, pos, length) + except EOFError: + pass + self.__idat = length - 4 # sequence_num has already been read + else: + self.__idat = length # empty chunks are allowed + + # read more data from this chunk + if read_bytes <= 0: + read_bytes = self.__idat + else: + read_bytes = min(read_bytes, self.__idat) + + self.__idat = self.__idat - read_bytes + + return self.fp.read(read_bytes) + + def load_end(self) -> None: + """internal: finished reading image data""" + assert self.png is not None + if self.__idat != 0: + self.fp.read(self.__idat) + while True: + self.fp.read(4) # CRC + + try: + cid, pos, length = self.png.read() + except (struct.error, SyntaxError): + break + + if cid == b"IEND": + break + elif cid == b"fcTL" and self.is_animated: + # start of the next frame, stop reading + self.__prepare_idat = 0 + self.png.push(cid, pos, length) + break + + try: + self.png.call(cid, pos, length) + except UnicodeDecodeError: + break + except EOFError: + if cid == b"fdAT": + length -= 4 + try: + ImageFile._safe_read(self.fp, length) + except OSError as e: + if ImageFile.LOAD_TRUNCATED_IMAGES: + break + else: + raise e + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + s = ImageFile._safe_read(self.fp, length) + if cid[1:2].islower(): + self.private_chunks.append((cid, s, True)) + self._text = self.png.im_text + if not self.is_animated: + self.png.close() + self.png = None + else: + if self._prev_im and self.blend_op == Blend.OP_OVER: + updated = self._crop(self.im, self.dispose_extent) + if self.im.mode == "RGB" and "transparency" in self.info: + mask = updated.convert_transparent( + "RGBA", self.info["transparency"] + ) + else: + if self.im.mode == "P" and "transparency" in self.info: + t = self.info["transparency"] + if isinstance(t, bytes): + updated.putpalettealphas(t) + elif isinstance(t, int): + updated.putpalettealpha(t) + mask = updated.convert("RGBA") + self._prev_im.paste(updated, self.dispose_extent, mask) + self.im = self._prev_im + + def _getexif(self) -> dict[int, Any] | None: + if "exif" not in self.info: + self.load() + if "exif" not in self.info and "Raw profile type exif" not in self.info: + return None + return self.getexif()._get_merged_dict() + + def getexif(self) -> Image.Exif: + if "exif" not in self.info: + self.load() + + return super().getexif() + + +# -------------------------------------------------------------------- +# PNG writer + +_OUTMODES = { + # supported PIL modes, and corresponding rawmode, bit depth and color type + "1": ("1", b"\x01", b"\x00"), + "L;1": ("L;1", b"\x01", b"\x00"), + "L;2": ("L;2", b"\x02", b"\x00"), + "L;4": ("L;4", b"\x04", b"\x00"), + "L": ("L", b"\x08", b"\x00"), + "LA": ("LA", b"\x08", b"\x04"), + "I": ("I;16B", b"\x10", b"\x00"), + "I;16": ("I;16B", b"\x10", b"\x00"), + "I;16B": ("I;16B", b"\x10", b"\x00"), + "P;1": ("P;1", b"\x01", b"\x03"), + "P;2": ("P;2", b"\x02", b"\x03"), + "P;4": ("P;4", b"\x04", b"\x03"), + "P": ("P", b"\x08", b"\x03"), + "RGB": ("RGB", b"\x08", b"\x02"), + "RGBA": ("RGBA", b"\x08", b"\x06"), +} + + +def putchunk(fp: IO[bytes], cid: bytes, *data: bytes) -> None: + """Write a PNG chunk (including CRC field)""" + + byte_data = b"".join(data) + + fp.write(o32(len(byte_data)) + cid) + fp.write(byte_data) + crc = _crc32(byte_data, _crc32(cid)) + fp.write(o32(crc)) + + +class _idat: + # wrap output from the encoder in IDAT chunks + + def __init__(self, fp: IO[bytes], chunk: Callable[..., None]) -> None: + self.fp = fp + self.chunk = chunk + + def write(self, data: bytes) -> None: + self.chunk(self.fp, b"IDAT", data) + + +class _fdat: + # wrap encoder output in fdAT chunks + + def __init__(self, fp: IO[bytes], chunk: Callable[..., None], seq_num: int) -> None: + self.fp = fp + self.chunk = chunk + self.seq_num = seq_num + + def write(self, data: bytes) -> None: + self.chunk(self.fp, b"fdAT", o32(self.seq_num), data) + self.seq_num += 1 + + +class _Frame(NamedTuple): + im: Image.Image + bbox: tuple[int, int, int, int] | None + encoderinfo: dict[str, Any] + + +def _write_multiple_frames( + im: Image.Image, + fp: IO[bytes], + chunk: Callable[..., None], + mode: str, + rawmode: str, + default_image: Image.Image | None, + append_images: list[Image.Image], +) -> Image.Image | None: + duration = im.encoderinfo.get("duration") + loop = im.encoderinfo.get("loop", im.info.get("loop", 0)) + disposal = im.encoderinfo.get("disposal", im.info.get("disposal", Disposal.OP_NONE)) + blend = im.encoderinfo.get("blend", im.info.get("blend", Blend.OP_SOURCE)) + + if default_image: + chain = itertools.chain(append_images) + else: + chain = itertools.chain([im], append_images) + + im_frames: list[_Frame] = [] + frame_count = 0 + for im_seq in chain: + for im_frame in ImageSequence.Iterator(im_seq): + if im_frame.mode == mode: + im_frame = im_frame.copy() + else: + im_frame = im_frame.convert(mode) + encoderinfo = im.encoderinfo.copy() + if isinstance(duration, (list, tuple)): + encoderinfo["duration"] = duration[frame_count] + elif duration is None and "duration" in im_frame.info: + encoderinfo["duration"] = im_frame.info["duration"] + if isinstance(disposal, (list, tuple)): + encoderinfo["disposal"] = disposal[frame_count] + if isinstance(blend, (list, tuple)): + encoderinfo["blend"] = blend[frame_count] + frame_count += 1 + + if im_frames: + previous = im_frames[-1] + prev_disposal = previous.encoderinfo.get("disposal") + prev_blend = previous.encoderinfo.get("blend") + if prev_disposal == Disposal.OP_PREVIOUS and len(im_frames) < 2: + prev_disposal = Disposal.OP_BACKGROUND + + if prev_disposal == Disposal.OP_BACKGROUND: + base_im = previous.im.copy() + dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0)) + bbox = previous.bbox + if bbox: + dispose = dispose.crop(bbox) + else: + bbox = (0, 0) + im.size + base_im.paste(dispose, bbox) + elif prev_disposal == Disposal.OP_PREVIOUS: + base_im = im_frames[-2].im + else: + base_im = previous.im + delta = ImageChops.subtract_modulo( + im_frame.convert("RGBA"), base_im.convert("RGBA") + ) + bbox = delta.getbbox(alpha_only=False) + if ( + not bbox + and prev_disposal == encoderinfo.get("disposal") + and prev_blend == encoderinfo.get("blend") + and "duration" in encoderinfo + ): + previous.encoderinfo["duration"] += encoderinfo["duration"] + continue + else: + bbox = None + im_frames.append(_Frame(im_frame, bbox, encoderinfo)) + + if len(im_frames) == 1 and not default_image: + return im_frames[0].im + + # animation control + chunk( + fp, + b"acTL", + o32(len(im_frames)), # 0: num_frames + o32(loop), # 4: num_plays + ) + + # default image IDAT (if it exists) + if default_image: + if im.mode != mode: + im = im.convert(mode) + ImageFile._save( + im, + cast(IO[bytes], _idat(fp, chunk)), + [ImageFile._Tile("zip", (0, 0) + im.size, 0, rawmode)], + ) + + seq_num = 0 + for frame, frame_data in enumerate(im_frames): + im_frame = frame_data.im + if not frame_data.bbox: + bbox = (0, 0) + im_frame.size + else: + bbox = frame_data.bbox + im_frame = im_frame.crop(bbox) + size = im_frame.size + encoderinfo = frame_data.encoderinfo + frame_duration = int(round(encoderinfo.get("duration", 0))) + frame_disposal = encoderinfo.get("disposal", disposal) + frame_blend = encoderinfo.get("blend", blend) + # frame control + chunk( + fp, + b"fcTL", + o32(seq_num), # sequence_number + o32(size[0]), # width + o32(size[1]), # height + o32(bbox[0]), # x_offset + o32(bbox[1]), # y_offset + o16(frame_duration), # delay_numerator + o16(1000), # delay_denominator + o8(frame_disposal), # dispose_op + o8(frame_blend), # blend_op + ) + seq_num += 1 + # frame data + if frame == 0 and not default_image: + # first frame must be in IDAT chunks for backwards compatibility + ImageFile._save( + im_frame, + cast(IO[bytes], _idat(fp, chunk)), + [ImageFile._Tile("zip", (0, 0) + im_frame.size, 0, rawmode)], + ) + else: + fdat_chunks = _fdat(fp, chunk, seq_num) + ImageFile._save( + im_frame, + cast(IO[bytes], fdat_chunks), + [ImageFile._Tile("zip", (0, 0) + im_frame.size, 0, rawmode)], + ) + seq_num = fdat_chunks.seq_num + return None + + +def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + _save(im, fp, filename, save_all=True) + + +def _save( + im: Image.Image, + fp: IO[bytes], + filename: str | bytes, + chunk: Callable[..., None] = putchunk, + save_all: bool = False, +) -> None: + # save an image to disk (called by the save method) + + if save_all: + default_image = im.encoderinfo.get( + "default_image", im.info.get("default_image") + ) + modes = set() + sizes = set() + append_images = im.encoderinfo.get("append_images", []) + for im_seq in itertools.chain([im], append_images): + for im_frame in ImageSequence.Iterator(im_seq): + modes.add(im_frame.mode) + sizes.add(im_frame.size) + for mode in ("RGBA", "RGB", "P"): + if mode in modes: + break + else: + mode = modes.pop() + size = tuple(max(frame_size[i] for frame_size in sizes) for i in range(2)) + else: + size = im.size + mode = im.mode + + outmode = mode + if mode == "P": + # + # attempt to minimize storage requirements for palette images + if "bits" in im.encoderinfo: + # number of bits specified by user + colors = min(1 << im.encoderinfo["bits"], 256) + else: + # check palette contents + if im.palette: + colors = max(min(len(im.palette.getdata()[1]) // 3, 256), 1) + else: + colors = 256 + + if colors <= 16: + if colors <= 2: + bits = 1 + elif colors <= 4: + bits = 2 + else: + bits = 4 + outmode += f";{bits}" + + # encoder options + im.encoderconfig = ( + im.encoderinfo.get("optimize", False), + im.encoderinfo.get("compress_level", -1), + im.encoderinfo.get("compress_type", -1), + im.encoderinfo.get("dictionary", b""), + ) + + # get the corresponding PNG mode + try: + rawmode, bit_depth, color_type = _OUTMODES[outmode] + except KeyError as e: + msg = f"cannot write mode {mode} as PNG" + raise OSError(msg) from e + if outmode == "I": + deprecate("Saving I mode images as PNG", 13, stacklevel=4) + + # + # write minimal PNG file + + fp.write(_MAGIC) + + chunk( + fp, + b"IHDR", + o32(size[0]), # 0: size + o32(size[1]), + bit_depth, + color_type, + b"\0", # 10: compression + b"\0", # 11: filter category + b"\0", # 12: interlace flag + ) + + chunks = [b"cHRM", b"cICP", b"gAMA", b"sBIT", b"sRGB", b"tIME"] + + icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile")) + if icc: + # ICC profile + # according to PNG spec, the iCCP chunk contains: + # Profile name 1-79 bytes (character string) + # Null separator 1 byte (null character) + # Compression method 1 byte (0) + # Compressed profile n bytes (zlib with deflate compression) + name = b"ICC Profile" + data = name + b"\0\0" + zlib.compress(icc) + chunk(fp, b"iCCP", data) + + # You must either have sRGB or iCCP. + # Disallow sRGB chunks when an iCCP-chunk has been emitted. + chunks.remove(b"sRGB") + + info = im.encoderinfo.get("pnginfo") + if info: + chunks_multiple_allowed = [b"sPLT", b"iTXt", b"tEXt", b"zTXt"] + for info_chunk in info.chunks: + cid, data = info_chunk[:2] + if cid in chunks: + chunks.remove(cid) + chunk(fp, cid, data) + elif cid in chunks_multiple_allowed: + chunk(fp, cid, data) + elif cid[1:2].islower(): + # Private chunk + after_idat = len(info_chunk) == 3 and info_chunk[2] + if not after_idat: + chunk(fp, cid, data) + + if im.mode == "P": + palette_byte_number = colors * 3 + palette_bytes = im.im.getpalette("RGB")[:palette_byte_number] + while len(palette_bytes) < palette_byte_number: + palette_bytes += b"\0" + chunk(fp, b"PLTE", palette_bytes) + + transparency = im.encoderinfo.get("transparency", im.info.get("transparency", None)) + + if transparency or transparency == 0: + if im.mode == "P": + # limit to actual palette size + alpha_bytes = colors + if isinstance(transparency, bytes): + chunk(fp, b"tRNS", transparency[:alpha_bytes]) + else: + transparency = max(0, min(255, transparency)) + alpha = b"\xff" * transparency + b"\0" + chunk(fp, b"tRNS", alpha[:alpha_bytes]) + elif im.mode in ("1", "L", "I", "I;16"): + transparency = max(0, min(65535, transparency)) + chunk(fp, b"tRNS", o16(transparency)) + elif im.mode == "RGB": + red, green, blue = transparency + chunk(fp, b"tRNS", o16(red) + o16(green) + o16(blue)) + else: + if "transparency" in im.encoderinfo: + # don't bother with transparency if it's an RGBA + # and it's in the info dict. It's probably just stale. + msg = "cannot use transparency for this mode" + raise OSError(msg) + else: + if im.mode == "P" and im.im.getpalettemode() == "RGBA": + alpha = im.im.getpalette("RGBA", "A") + alpha_bytes = colors + chunk(fp, b"tRNS", alpha[:alpha_bytes]) + + dpi = im.encoderinfo.get("dpi") + if dpi: + chunk( + fp, + b"pHYs", + o32(int(dpi[0] / 0.0254 + 0.5)), + o32(int(dpi[1] / 0.0254 + 0.5)), + b"\x01", + ) + + if info: + chunks = [b"bKGD", b"hIST"] + for info_chunk in info.chunks: + cid, data = info_chunk[:2] + if cid in chunks: + chunks.remove(cid) + chunk(fp, cid, data) + + exif = im.encoderinfo.get("exif") + if exif: + if isinstance(exif, Image.Exif): + exif = exif.tobytes(8) + if exif.startswith(b"Exif\x00\x00"): + exif = exif[6:] + chunk(fp, b"eXIf", exif) + + single_im: Image.Image | None = im + if save_all: + single_im = _write_multiple_frames( + im, fp, chunk, mode, rawmode, default_image, append_images + ) + if single_im: + ImageFile._save( + single_im, + cast(IO[bytes], _idat(fp, chunk)), + [ImageFile._Tile("zip", (0, 0) + single_im.size, 0, rawmode)], + ) + + if info: + for info_chunk in info.chunks: + cid, data = info_chunk[:2] + if cid[1:2].islower(): + # Private chunk + after_idat = len(info_chunk) == 3 and info_chunk[2] + if after_idat: + chunk(fp, cid, data) + + chunk(fp, b"IEND", b"") + + if hasattr(fp, "flush"): + fp.flush() + + +# -------------------------------------------------------------------- +# PNG chunk converter + + +def getchunks(im: Image.Image, **params: Any) -> list[tuple[bytes, bytes, bytes]]: + """Return a list of PNG chunks representing this image.""" + from io import BytesIO + + chunks = [] + + def append(fp: IO[bytes], cid: bytes, *data: bytes) -> None: + byte_data = b"".join(data) + crc = o32(_crc32(byte_data, _crc32(cid))) + chunks.append((cid, byte_data, crc)) + + fp = BytesIO() + + try: + im.encoderinfo = params + _save(im, fp, "", append) + finally: + del im.encoderinfo + + return chunks + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(PngImageFile.format, PngImageFile, _accept) +Image.register_save(PngImageFile.format, _save) +Image.register_save_all(PngImageFile.format, _save_all) + +Image.register_extensions(PngImageFile.format, [".png", ".apng"]) + +Image.register_mime(PngImageFile.format, "image/png") diff --git a/py311/lib/python3.11/site-packages/PIL/PpmImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/PpmImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..db34d107a4f4a9a7a27fe332e211133d88870fce --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PpmImagePlugin.py @@ -0,0 +1,375 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PPM support for PIL +# +# History: +# 96-03-24 fl Created +# 98-03-06 fl Write RGBA images (as RGB, that is) +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import math +from typing import IO + +from . import Image, ImageFile +from ._binary import i16be as i16 +from ._binary import o8 +from ._binary import o32le as o32 + +# +# -------------------------------------------------------------------- + +b_whitespace = b"\x20\x09\x0a\x0b\x0c\x0d" + +MODES = { + # standard + b"P1": "1", + b"P2": "L", + b"P3": "RGB", + b"P4": "1", + b"P5": "L", + b"P6": "RGB", + # extensions + b"P0CMYK": "CMYK", + b"Pf": "F", + # PIL extensions (for test purposes only) + b"PyP": "P", + b"PyRGBA": "RGBA", + b"PyCMYK": "CMYK", +} + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"P") and prefix[1] in b"0123456fy" + + +## +# Image plugin for PBM, PGM, and PPM images. + + +class PpmImageFile(ImageFile.ImageFile): + format = "PPM" + format_description = "Pbmplus image" + + def _read_magic(self) -> bytes: + assert self.fp is not None + + magic = b"" + # read until whitespace or longest available magic number + for _ in range(6): + c = self.fp.read(1) + if not c or c in b_whitespace: + break + magic += c + return magic + + def _read_token(self) -> bytes: + assert self.fp is not None + + token = b"" + while len(token) <= 10: # read until next whitespace or limit of 10 characters + c = self.fp.read(1) + if not c: + break + elif c in b_whitespace: # token ended + if not token: + # skip whitespace at start + continue + break + elif c == b"#": + # ignores rest of the line; stops at CR, LF or EOF + while self.fp.read(1) not in b"\r\n": + pass + continue + token += c + if not token: + # Token was not even 1 byte + msg = "Reached EOF while reading header" + raise ValueError(msg) + elif len(token) > 10: + msg_too_long = b"Token too long in file header: %s" % token + raise ValueError(msg_too_long) + return token + + def _open(self) -> None: + assert self.fp is not None + + magic_number = self._read_magic() + try: + mode = MODES[magic_number] + except KeyError: + msg = "not a PPM file" + raise SyntaxError(msg) + self._mode = mode + + if magic_number in (b"P1", b"P4"): + self.custom_mimetype = "image/x-portable-bitmap" + elif magic_number in (b"P2", b"P5"): + self.custom_mimetype = "image/x-portable-graymap" + elif magic_number in (b"P3", b"P6"): + self.custom_mimetype = "image/x-portable-pixmap" + + self._size = int(self._read_token()), int(self._read_token()) + + decoder_name = "raw" + if magic_number in (b"P1", b"P2", b"P3"): + decoder_name = "ppm_plain" + + args: str | tuple[str | int, ...] + if mode == "1": + args = "1;I" + elif mode == "F": + scale = float(self._read_token()) + if scale == 0.0 or not math.isfinite(scale): + msg = "scale must be finite and non-zero" + raise ValueError(msg) + self.info["scale"] = abs(scale) + + rawmode = "F;32F" if scale < 0 else "F;32BF" + args = (rawmode, 0, -1) + else: + maxval = int(self._read_token()) + if not 0 < maxval < 65536: + msg = "maxval must be greater than 0 and less than 65536" + raise ValueError(msg) + if maxval > 255 and mode == "L": + self._mode = "I" + + rawmode = mode + if decoder_name != "ppm_plain": + # If maxval matches a bit depth, use the raw decoder directly + if maxval == 65535 and mode == "L": + rawmode = "I;16B" + elif maxval != 255: + decoder_name = "ppm" + + args = rawmode if decoder_name == "raw" else (rawmode, maxval) + self.tile = [ + ImageFile._Tile(decoder_name, (0, 0) + self.size, self.fp.tell(), args) + ] + + +# +# -------------------------------------------------------------------- + + +class PpmPlainDecoder(ImageFile.PyDecoder): + _pulls_fd = True + _comment_spans: bool + + def _read_block(self) -> bytes: + assert self.fd is not None + + return self.fd.read(ImageFile.SAFEBLOCK) + + def _find_comment_end(self, block: bytes, start: int = 0) -> int: + a = block.find(b"\n", start) + b = block.find(b"\r", start) + return min(a, b) if a * b > 0 else max(a, b) # lowest nonnegative index (or -1) + + def _ignore_comments(self, block: bytes) -> bytes: + if self._comment_spans: + # Finish current comment + while block: + comment_end = self._find_comment_end(block) + if comment_end != -1: + # Comment ends in this block + # Delete tail of comment + block = block[comment_end + 1 :] + break + else: + # Comment spans whole block + # So read the next block, looking for the end + block = self._read_block() + + # Search for any further comments + self._comment_spans = False + while True: + comment_start = block.find(b"#") + if comment_start == -1: + # No comment found + break + comment_end = self._find_comment_end(block, comment_start) + if comment_end != -1: + # Comment ends in this block + # Delete comment + block = block[:comment_start] + block[comment_end + 1 :] + else: + # Comment continues to next block(s) + block = block[:comment_start] + self._comment_spans = True + break + return block + + def _decode_bitonal(self) -> bytearray: + """ + This is a separate method because in the plain PBM format, all data tokens are + exactly one byte, so the inter-token whitespace is optional. + """ + data = bytearray() + total_bytes = self.state.xsize * self.state.ysize + + while len(data) != total_bytes: + block = self._read_block() # read next block + if not block: + # eof + break + + block = self._ignore_comments(block) + + tokens = b"".join(block.split()) + for token in tokens: + if token not in (48, 49): + msg = b"Invalid token for this mode: %s" % bytes([token]) + raise ValueError(msg) + data = (data + tokens)[:total_bytes] + invert = bytes.maketrans(b"01", b"\xff\x00") + return data.translate(invert) + + def _decode_blocks(self, maxval: int) -> bytearray: + data = bytearray() + max_len = 10 + out_byte_count = 4 if self.mode == "I" else 1 + out_max = 65535 if self.mode == "I" else 255 + bands = Image.getmodebands(self.mode) + total_bytes = self.state.xsize * self.state.ysize * bands * out_byte_count + + half_token = b"" + while len(data) != total_bytes: + block = self._read_block() # read next block + if not block: + if half_token: + block = bytearray(b" ") # flush half_token + else: + # eof + break + + block = self._ignore_comments(block) + + if half_token: + block = half_token + block # stitch half_token to new block + half_token = b"" + + tokens = block.split() + + if block and not block[-1:].isspace(): # block might split token + half_token = tokens.pop() # save half token for later + if len(half_token) > max_len: # prevent buildup of half_token + msg = ( + b"Token too long found in data: %s" % half_token[: max_len + 1] + ) + raise ValueError(msg) + + for token in tokens: + if len(token) > max_len: + msg = b"Token too long found in data: %s" % token[: max_len + 1] + raise ValueError(msg) + value = int(token) + if value < 0: + msg_str = f"Channel value is negative: {value}" + raise ValueError(msg_str) + if value > maxval: + msg_str = f"Channel value too large for this mode: {value}" + raise ValueError(msg_str) + value = round(value / maxval * out_max) + data += o32(value) if self.mode == "I" else o8(value) + if len(data) == total_bytes: # finished! + break + return data + + def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: + self._comment_spans = False + if self.mode == "1": + data = self._decode_bitonal() + rawmode = "1;8" + else: + maxval = self.args[-1] + data = self._decode_blocks(maxval) + rawmode = "I;32" if self.mode == "I" else self.mode + self.set_as_raw(bytes(data), rawmode) + return -1, 0 + + +class PpmDecoder(ImageFile.PyDecoder): + _pulls_fd = True + + def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: + assert self.fd is not None + + data = bytearray() + maxval = self.args[-1] + in_byte_count = 1 if maxval < 256 else 2 + out_byte_count = 4 if self.mode == "I" else 1 + out_max = 65535 if self.mode == "I" else 255 + bands = Image.getmodebands(self.mode) + dest_length = self.state.xsize * self.state.ysize * bands * out_byte_count + while len(data) < dest_length: + pixels = self.fd.read(in_byte_count * bands) + if len(pixels) < in_byte_count * bands: + # eof + break + for b in range(bands): + value = ( + pixels[b] if in_byte_count == 1 else i16(pixels, b * in_byte_count) + ) + value = min(out_max, round(value / maxval * out_max)) + data += o32(value) if self.mode == "I" else o8(value) + rawmode = "I;32" if self.mode == "I" else self.mode + self.set_as_raw(bytes(data), rawmode) + return -1, 0 + + +# +# -------------------------------------------------------------------- + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if im.mode == "1": + rawmode, head = "1;I", b"P4" + elif im.mode == "L": + rawmode, head = "L", b"P5" + elif im.mode in ("I", "I;16"): + rawmode, head = "I;16B", b"P5" + elif im.mode in ("RGB", "RGBA"): + rawmode, head = "RGB", b"P6" + elif im.mode == "F": + rawmode, head = "F;32F", b"Pf" + else: + msg = f"cannot write mode {im.mode} as PPM" + raise OSError(msg) + fp.write(head + b"\n%d %d\n" % im.size) + if head == b"P6": + fp.write(b"255\n") + elif head == b"P5": + if rawmode == "L": + fp.write(b"255\n") + else: + fp.write(b"65535\n") + elif head == b"Pf": + fp.write(b"-1.0\n") + row_order = -1 if im.mode == "F" else 1 + ImageFile._save( + im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, row_order))] + ) + + +# +# -------------------------------------------------------------------- + + +Image.register_open(PpmImageFile.format, PpmImageFile, _accept) +Image.register_save(PpmImageFile.format, _save) + +Image.register_decoder("ppm", PpmDecoder) +Image.register_decoder("ppm_plain", PpmPlainDecoder) + +Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm", ".pfm"]) + +Image.register_mime(PpmImageFile.format, "image/x-portable-anymap") diff --git a/py311/lib/python3.11/site-packages/PIL/PsdImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/PsdImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..f49aaeeb1f55cd2b6b17d9aa10f68876384fd410 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/PsdImagePlugin.py @@ -0,0 +1,333 @@ +# +# The Python Imaging Library +# $Id$ +# +# Adobe PSD 2.5/3.0 file handling +# +# History: +# 1995-09-01 fl Created +# 1997-01-03 fl Read most PSD images +# 1997-01-18 fl Fixed P and CMYK support +# 2001-10-21 fl Added seek/tell support (for layers) +# +# Copyright (c) 1997-2001 by Secret Labs AB. +# Copyright (c) 1995-2001 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import io +from functools import cached_property +from typing import IO + +from . import Image, ImageFile, ImagePalette +from ._binary import i8 +from ._binary import i16be as i16 +from ._binary import i32be as i32 +from ._binary import si16be as si16 +from ._binary import si32be as si32 +from ._util import DeferredError + +MODES = { + # (photoshop mode, bits) -> (pil mode, required channels) + (0, 1): ("1", 1), + (0, 8): ("L", 1), + (1, 8): ("L", 1), + (2, 8): ("P", 1), + (3, 8): ("RGB", 3), + (4, 8): ("CMYK", 4), + (7, 8): ("L", 1), # FIXME: multilayer + (8, 8): ("L", 1), # duotone + (9, 8): ("LAB", 3), +} + + +# --------------------------------------------------------------------. +# read PSD images + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"8BPS") + + +## +# Image plugin for Photoshop images. + + +class PsdImageFile(ImageFile.ImageFile): + format = "PSD" + format_description = "Adobe Photoshop" + _close_exclusive_fp_after_loading = False + + def _open(self) -> None: + read = self.fp.read + + # + # header + + s = read(26) + if not _accept(s) or i16(s, 4) != 1: + msg = "not a PSD file" + raise SyntaxError(msg) + + psd_bits = i16(s, 22) + psd_channels = i16(s, 12) + psd_mode = i16(s, 24) + + mode, channels = MODES[(psd_mode, psd_bits)] + + if channels > psd_channels: + msg = "not enough channels" + raise OSError(msg) + if mode == "RGB" and psd_channels == 4: + mode = "RGBA" + channels = 4 + + self._mode = mode + self._size = i32(s, 18), i32(s, 14) + + # + # color mode data + + size = i32(read(4)) + if size: + data = read(size) + if mode == "P" and size == 768: + self.palette = ImagePalette.raw("RGB;L", data) + + # + # image resources + + self.resources = [] + + size = i32(read(4)) + if size: + # load resources + end = self.fp.tell() + size + while self.fp.tell() < end: + read(4) # signature + id = i16(read(2)) + name = read(i8(read(1))) + if not (len(name) & 1): + read(1) # padding + data = read(i32(read(4))) + if len(data) & 1: + read(1) # padding + self.resources.append((id, name, data)) + if id == 1039: # ICC profile + self.info["icc_profile"] = data + + # + # layer and mask information + + self._layers_position = None + + size = i32(read(4)) + if size: + end = self.fp.tell() + size + size = i32(read(4)) + if size: + self._layers_position = self.fp.tell() + self._layers_size = size + self.fp.seek(end) + self._n_frames: int | None = None + + # + # image descriptor + + self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels) + + # keep the file open + self._fp = self.fp + self.frame = 1 + self._min_frame = 1 + + @cached_property + def layers( + self, + ) -> list[tuple[str, str, tuple[int, int, int, int], list[ImageFile._Tile]]]: + layers = [] + if self._layers_position is not None: + if isinstance(self._fp, DeferredError): + raise self._fp.ex + self._fp.seek(self._layers_position) + _layer_data = io.BytesIO(ImageFile._safe_read(self._fp, self._layers_size)) + layers = _layerinfo(_layer_data, self._layers_size) + self._n_frames = len(layers) + return layers + + @property + def n_frames(self) -> int: + if self._n_frames is None: + self._n_frames = len(self.layers) + return self._n_frames + + @property + def is_animated(self) -> bool: + return len(self.layers) > 1 + + def seek(self, layer: int) -> None: + if not self._seek_check(layer): + return + if isinstance(self._fp, DeferredError): + raise self._fp.ex + + # seek to given layer (1..max) + _, mode, _, tile = self.layers[layer - 1] + self._mode = mode + self.tile = tile + self.frame = layer + self.fp = self._fp + + def tell(self) -> int: + # return layer number (0=image, 1..max=layers) + return self.frame + + +def _layerinfo( + fp: IO[bytes], ct_bytes: int +) -> list[tuple[str, str, tuple[int, int, int, int], list[ImageFile._Tile]]]: + # read layerinfo block + layers = [] + + def read(size: int) -> bytes: + return ImageFile._safe_read(fp, size) + + ct = si16(read(2)) + + # sanity check + if ct_bytes < (abs(ct) * 20): + msg = "Layer block too short for number of layers requested" + raise SyntaxError(msg) + + for _ in range(abs(ct)): + # bounding box + y0 = si32(read(4)) + x0 = si32(read(4)) + y1 = si32(read(4)) + x1 = si32(read(4)) + + # image info + bands = [] + ct_types = i16(read(2)) + if ct_types > 4: + fp.seek(ct_types * 6 + 12, io.SEEK_CUR) + size = i32(read(4)) + fp.seek(size, io.SEEK_CUR) + continue + + for _ in range(ct_types): + type = i16(read(2)) + + if type == 65535: + b = "A" + else: + b = "RGBA"[type] + + bands.append(b) + read(4) # size + + # figure out the image mode + bands.sort() + if bands == ["R"]: + mode = "L" + elif bands == ["B", "G", "R"]: + mode = "RGB" + elif bands == ["A", "B", "G", "R"]: + mode = "RGBA" + else: + mode = "" # unknown + + # skip over blend flags and extra information + read(12) # filler + name = "" + size = i32(read(4)) # length of the extra data field + if size: + data_end = fp.tell() + size + + length = i32(read(4)) + if length: + fp.seek(length - 16, io.SEEK_CUR) + + length = i32(read(4)) + if length: + fp.seek(length, io.SEEK_CUR) + + length = i8(read(1)) + if length: + # Don't know the proper encoding, + # Latin-1 should be a good guess + name = read(length).decode("latin-1", "replace") + + fp.seek(data_end) + layers.append((name, mode, (x0, y0, x1, y1))) + + # get tiles + layerinfo = [] + for i, (name, mode, bbox) in enumerate(layers): + tile = [] + for m in mode: + t = _maketile(fp, m, bbox, 1) + if t: + tile.extend(t) + layerinfo.append((name, mode, bbox, tile)) + + return layerinfo + + +def _maketile( + file: IO[bytes], mode: str, bbox: tuple[int, int, int, int], channels: int +) -> list[ImageFile._Tile]: + tiles = [] + read = file.read + + compression = i16(read(2)) + + xsize = bbox[2] - bbox[0] + ysize = bbox[3] - bbox[1] + + offset = file.tell() + + if compression == 0: + # + # raw compression + for channel in range(channels): + layer = mode[channel] + if mode == "CMYK": + layer += ";I" + tiles.append(ImageFile._Tile("raw", bbox, offset, layer)) + offset = offset + xsize * ysize + + elif compression == 1: + # + # packbits compression + i = 0 + bytecount = read(channels * ysize * 2) + offset = file.tell() + for channel in range(channels): + layer = mode[channel] + if mode == "CMYK": + layer += ";I" + tiles.append(ImageFile._Tile("packbits", bbox, offset, layer)) + for y in range(ysize): + offset = offset + i16(bytecount, i) + i += 2 + + file.seek(offset) + + if offset & 1: + read(1) # padding + + return tiles + + +# -------------------------------------------------------------------- +# registry + + +Image.register_open(PsdImageFile.format, PsdImageFile, _accept) + +Image.register_extension(PsdImageFile.format, ".psd") + +Image.register_mime(PsdImageFile.format, "image/vnd.adobe.photoshop") diff --git a/py311/lib/python3.11/site-packages/PIL/QoiImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/QoiImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..dba5d809fef75e281ac10f92f1868c58b1b4508c --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/QoiImagePlugin.py @@ -0,0 +1,234 @@ +# +# The Python Imaging Library. +# +# QOI support for PIL +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import os +from typing import IO + +from . import Image, ImageFile +from ._binary import i32be as i32 +from ._binary import o8 +from ._binary import o32be as o32 + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"qoif") + + +class QoiImageFile(ImageFile.ImageFile): + format = "QOI" + format_description = "Quite OK Image" + + def _open(self) -> None: + if not _accept(self.fp.read(4)): + msg = "not a QOI file" + raise SyntaxError(msg) + + self._size = i32(self.fp.read(4)), i32(self.fp.read(4)) + + channels = self.fp.read(1)[0] + self._mode = "RGB" if channels == 3 else "RGBA" + + self.fp.seek(1, os.SEEK_CUR) # colorspace + self.tile = [ImageFile._Tile("qoi", (0, 0) + self._size, self.fp.tell())] + + +class QoiDecoder(ImageFile.PyDecoder): + _pulls_fd = True + _previous_pixel: bytes | bytearray | None = None + _previously_seen_pixels: dict[int, bytes | bytearray] = {} + + def _add_to_previous_pixels(self, value: bytes | bytearray) -> None: + self._previous_pixel = value + + r, g, b, a = value + hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64 + self._previously_seen_pixels[hash_value] = value + + def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: + assert self.fd is not None + + self._previously_seen_pixels = {} + self._previous_pixel = bytearray((0, 0, 0, 255)) + + data = bytearray() + bands = Image.getmodebands(self.mode) + dest_length = self.state.xsize * self.state.ysize * bands + while len(data) < dest_length: + byte = self.fd.read(1)[0] + value: bytes | bytearray + if byte == 0b11111110 and self._previous_pixel: # QOI_OP_RGB + value = bytearray(self.fd.read(3)) + self._previous_pixel[3:] + elif byte == 0b11111111: # QOI_OP_RGBA + value = self.fd.read(4) + else: + op = byte >> 6 + if op == 0: # QOI_OP_INDEX + op_index = byte & 0b00111111 + value = self._previously_seen_pixels.get( + op_index, bytearray((0, 0, 0, 0)) + ) + elif op == 1 and self._previous_pixel: # QOI_OP_DIFF + value = bytearray( + ( + (self._previous_pixel[0] + ((byte & 0b00110000) >> 4) - 2) + % 256, + (self._previous_pixel[1] + ((byte & 0b00001100) >> 2) - 2) + % 256, + (self._previous_pixel[2] + (byte & 0b00000011) - 2) % 256, + self._previous_pixel[3], + ) + ) + elif op == 2 and self._previous_pixel: # QOI_OP_LUMA + second_byte = self.fd.read(1)[0] + diff_green = (byte & 0b00111111) - 32 + diff_red = ((second_byte & 0b11110000) >> 4) - 8 + diff_blue = (second_byte & 0b00001111) - 8 + + value = bytearray( + tuple( + (self._previous_pixel[i] + diff_green + diff) % 256 + for i, diff in enumerate((diff_red, 0, diff_blue)) + ) + ) + value += self._previous_pixel[3:] + elif op == 3 and self._previous_pixel: # QOI_OP_RUN + run_length = (byte & 0b00111111) + 1 + value = self._previous_pixel + if bands == 3: + value = value[:3] + data += value * run_length + continue + self._add_to_previous_pixels(value) + + if bands == 3: + value = value[:3] + data += value + self.set_as_raw(data) + return -1, 0 + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if im.mode == "RGB": + channels = 3 + elif im.mode == "RGBA": + channels = 4 + else: + msg = "Unsupported QOI image mode" + raise ValueError(msg) + + colorspace = 0 if im.encoderinfo.get("colorspace") == "sRGB" else 1 + + fp.write(b"qoif") + fp.write(o32(im.size[0])) + fp.write(o32(im.size[1])) + fp.write(o8(channels)) + fp.write(o8(colorspace)) + + ImageFile._save(im, fp, [ImageFile._Tile("qoi", (0, 0) + im.size)]) + + +class QoiEncoder(ImageFile.PyEncoder): + _pushes_fd = True + _previous_pixel: tuple[int, int, int, int] | None = None + _previously_seen_pixels: dict[int, tuple[int, int, int, int]] = {} + _run = 0 + + def _write_run(self) -> bytes: + data = o8(0b11000000 | (self._run - 1)) # QOI_OP_RUN + self._run = 0 + return data + + def _delta(self, left: int, right: int) -> int: + result = (left - right) & 255 + if result >= 128: + result -= 256 + return result + + def encode(self, bufsize: int) -> tuple[int, int, bytes]: + assert self.im is not None + + self._previously_seen_pixels = {0: (0, 0, 0, 0)} + self._previous_pixel = (0, 0, 0, 255) + + data = bytearray() + w, h = self.im.size + bands = Image.getmodebands(self.mode) + + for y in range(h): + for x in range(w): + pixel = self.im.getpixel((x, y)) + if bands == 3: + pixel = (*pixel, 255) + + if pixel == self._previous_pixel: + self._run += 1 + if self._run == 62: + data += self._write_run() + else: + if self._run: + data += self._write_run() + + r, g, b, a = pixel + hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64 + if self._previously_seen_pixels.get(hash_value) == pixel: + data += o8(hash_value) # QOI_OP_INDEX + elif self._previous_pixel: + self._previously_seen_pixels[hash_value] = pixel + + prev_r, prev_g, prev_b, prev_a = self._previous_pixel + if prev_a == a: + delta_r = self._delta(r, prev_r) + delta_g = self._delta(g, prev_g) + delta_b = self._delta(b, prev_b) + + if ( + -2 <= delta_r < 2 + and -2 <= delta_g < 2 + and -2 <= delta_b < 2 + ): + data += o8( + 0b01000000 + | (delta_r + 2) << 4 + | (delta_g + 2) << 2 + | (delta_b + 2) + ) # QOI_OP_DIFF + else: + delta_gr = self._delta(delta_r, delta_g) + delta_gb = self._delta(delta_b, delta_g) + if ( + -8 <= delta_gr < 8 + and -32 <= delta_g < 32 + and -8 <= delta_gb < 8 + ): + data += o8( + 0b10000000 | (delta_g + 32) + ) # QOI_OP_LUMA + data += o8((delta_gr + 8) << 4 | (delta_gb + 8)) + else: + data += o8(0b11111110) # QOI_OP_RGB + data += bytes(pixel[:3]) + else: + data += o8(0b11111111) # QOI_OP_RGBA + data += bytes(pixel) + + self._previous_pixel = pixel + + if self._run: + data += self._write_run() + data += bytes((0, 0, 0, 0, 0, 0, 0, 1)) # padding + + return len(data), 0, data + + +Image.register_open(QoiImageFile.format, QoiImageFile, _accept) +Image.register_decoder("qoi", QoiDecoder) +Image.register_extension(QoiImageFile.format, ".qoi") + +Image.register_save(QoiImageFile.format, _save) +Image.register_encoder("qoi", QoiEncoder) diff --git a/py311/lib/python3.11/site-packages/PIL/SgiImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/SgiImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..853022150ae849e490378e41e831897050c207a2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/SgiImagePlugin.py @@ -0,0 +1,231 @@ +# +# The Python Imaging Library. +# $Id$ +# +# SGI image file handling +# +# See "The SGI Image File Format (Draft version 0.97)", Paul Haeberli. +# +# +# +# History: +# 2017-22-07 mb Add RLE decompression +# 2016-16-10 mb Add save method without compression +# 1995-09-10 fl Created +# +# Copyright (c) 2016 by Mickael Bonfill. +# Copyright (c) 2008 by Karsten Hiddemann. +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1995 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import os +import struct +from typing import IO + +from . import Image, ImageFile +from ._binary import i16be as i16 +from ._binary import o8 + + +def _accept(prefix: bytes) -> bool: + return len(prefix) >= 2 and i16(prefix) == 474 + + +MODES = { + (1, 1, 1): "L", + (1, 2, 1): "L", + (2, 1, 1): "L;16B", + (2, 2, 1): "L;16B", + (1, 3, 3): "RGB", + (2, 3, 3): "RGB;16B", + (1, 3, 4): "RGBA", + (2, 3, 4): "RGBA;16B", +} + + +## +# Image plugin for SGI images. +class SgiImageFile(ImageFile.ImageFile): + format = "SGI" + format_description = "SGI Image File Format" + + def _open(self) -> None: + # HEAD + assert self.fp is not None + + headlen = 512 + s = self.fp.read(headlen) + + if not _accept(s): + msg = "Not an SGI image file" + raise ValueError(msg) + + # compression : verbatim or RLE + compression = s[2] + + # bpc : 1 or 2 bytes (8bits or 16bits) + bpc = s[3] + + # dimension : 1, 2 or 3 (depending on xsize, ysize and zsize) + dimension = i16(s, 4) + + # xsize : width + xsize = i16(s, 6) + + # ysize : height + ysize = i16(s, 8) + + # zsize : channels count + zsize = i16(s, 10) + + # determine mode from bits/zsize + try: + rawmode = MODES[(bpc, dimension, zsize)] + except KeyError: + msg = "Unsupported SGI image mode" + raise ValueError(msg) + + self._size = xsize, ysize + self._mode = rawmode.split(";")[0] + if self.mode == "RGB": + self.custom_mimetype = "image/rgb" + + # orientation -1 : scanlines begins at the bottom-left corner + orientation = -1 + + # decoder info + if compression == 0: + pagesize = xsize * ysize * bpc + if bpc == 2: + self.tile = [ + ImageFile._Tile( + "SGI16", + (0, 0) + self.size, + headlen, + (self.mode, 0, orientation), + ) + ] + else: + self.tile = [] + offset = headlen + for layer in self.mode: + self.tile.append( + ImageFile._Tile( + "raw", (0, 0) + self.size, offset, (layer, 0, orientation) + ) + ) + offset += pagesize + elif compression == 1: + self.tile = [ + ImageFile._Tile( + "sgi_rle", (0, 0) + self.size, headlen, (rawmode, orientation, bpc) + ) + ] + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if im.mode not in {"RGB", "RGBA", "L"}: + msg = "Unsupported SGI image mode" + raise ValueError(msg) + + # Get the keyword arguments + info = im.encoderinfo + + # Byte-per-pixel precision, 1 = 8bits per pixel + bpc = info.get("bpc", 1) + + if bpc not in (1, 2): + msg = "Unsupported number of bytes per pixel" + raise ValueError(msg) + + # Flip the image, since the origin of SGI file is the bottom-left corner + orientation = -1 + # Define the file as SGI File Format + magic_number = 474 + # Run-Length Encoding Compression - Unsupported at this time + rle = 0 + + # X Dimension = width / Y Dimension = height + x, y = im.size + # Z Dimension: Number of channels + z = len(im.mode) + # Number of dimensions (x,y,z) + if im.mode == "L": + dimension = 1 if y == 1 else 2 + else: + dimension = 3 + + # Minimum Byte value + pinmin = 0 + # Maximum Byte value (255 = 8bits per pixel) + pinmax = 255 + # Image name (79 characters max, truncated below in write) + img_name = os.path.splitext(os.path.basename(filename))[0] + if isinstance(img_name, str): + img_name = img_name.encode("ascii", "ignore") + # Standard representation of pixel in the file + colormap = 0 + fp.write(struct.pack(">h", magic_number)) + fp.write(o8(rle)) + fp.write(o8(bpc)) + fp.write(struct.pack(">H", dimension)) + fp.write(struct.pack(">H", x)) + fp.write(struct.pack(">H", y)) + fp.write(struct.pack(">H", z)) + fp.write(struct.pack(">l", pinmin)) + fp.write(struct.pack(">l", pinmax)) + fp.write(struct.pack("4s", b"")) # dummy + fp.write(struct.pack("79s", img_name)) # truncates to 79 chars + fp.write(struct.pack("s", b"")) # force null byte after img_name + fp.write(struct.pack(">l", colormap)) + fp.write(struct.pack("404s", b"")) # dummy + + rawmode = "L" + if bpc == 2: + rawmode = "L;16B" + + for channel in im.split(): + fp.write(channel.tobytes("raw", rawmode, 0, orientation)) + + if hasattr(fp, "flush"): + fp.flush() + + +class SGI16Decoder(ImageFile.PyDecoder): + _pulls_fd = True + + def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: + assert self.fd is not None + assert self.im is not None + + rawmode, stride, orientation = self.args + pagesize = self.state.xsize * self.state.ysize + zsize = len(self.mode) + self.fd.seek(512) + + for band in range(zsize): + channel = Image.new("L", (self.state.xsize, self.state.ysize)) + channel.frombytes( + self.fd.read(2 * pagesize), "raw", "L;16B", stride, orientation + ) + self.im.putband(channel.im, band) + + return -1, 0 + + +# +# registry + + +Image.register_decoder("SGI16", SGI16Decoder) +Image.register_open(SgiImageFile.format, SgiImageFile, _accept) +Image.register_save(SgiImageFile.format, _save) +Image.register_mime(SgiImageFile.format, "image/sgi") + +Image.register_extensions(SgiImageFile.format, [".bw", ".rgb", ".rgba", ".sgi"]) + +# End of file diff --git a/py311/lib/python3.11/site-packages/PIL/SpiderImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/SpiderImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..868019e80a80cffc5e9f193ddbf96a0ba64ad9ea --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/SpiderImagePlugin.py @@ -0,0 +1,331 @@ +# +# The Python Imaging Library. +# +# SPIDER image file handling +# +# History: +# 2004-08-02 Created BB +# 2006-03-02 added save method +# 2006-03-13 added support for stack images +# +# Copyright (c) 2004 by Health Research Inc. (HRI) RENSSELAER, NY 12144. +# Copyright (c) 2004 by William Baxter. +# Copyright (c) 2004 by Secret Labs AB. +# Copyright (c) 2004 by Fredrik Lundh. +# + +## +# Image plugin for the Spider image format. This format is used +# by the SPIDER software, in processing image data from electron +# microscopy and tomography. +## + +# +# SpiderImagePlugin.py +# +# The Spider image format is used by SPIDER software, in processing +# image data from electron microscopy and tomography. +# +# Spider home page: +# https://spider.wadsworth.org/spider_doc/spider/docs/spider.html +# +# Details about the Spider image format: +# https://spider.wadsworth.org/spider_doc/spider/docs/image_doc.html +# +from __future__ import annotations + +import os +import struct +import sys +from typing import IO, Any, cast + +from . import Image, ImageFile +from ._util import DeferredError + +TYPE_CHECKING = False + + +def isInt(f: Any) -> int: + try: + i = int(f) + if f - i == 0: + return 1 + else: + return 0 + except (ValueError, OverflowError): + return 0 + + +iforms = [1, 3, -11, -12, -21, -22] + + +# There is no magic number to identify Spider files, so just check a +# series of header locations to see if they have reasonable values. +# Returns no. of bytes in the header, if it is a valid Spider header, +# otherwise returns 0 + + +def isSpiderHeader(t: tuple[float, ...]) -> int: + h = (99,) + t # add 1 value so can use spider header index start=1 + # header values 1,2,5,12,13,22,23 should be integers + for i in [1, 2, 5, 12, 13, 22, 23]: + if not isInt(h[i]): + return 0 + # check iform + iform = int(h[5]) + if iform not in iforms: + return 0 + # check other header values + labrec = int(h[13]) # no. records in file header + labbyt = int(h[22]) # total no. of bytes in header + lenbyt = int(h[23]) # record length in bytes + if labbyt != (labrec * lenbyt): + return 0 + # looks like a valid header + return labbyt + + +def isSpiderImage(filename: str) -> int: + with open(filename, "rb") as fp: + f = fp.read(92) # read 23 * 4 bytes + t = struct.unpack(">23f", f) # try big-endian first + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + t = struct.unpack("<23f", f) # little-endian + hdrlen = isSpiderHeader(t) + return hdrlen + + +class SpiderImageFile(ImageFile.ImageFile): + format = "SPIDER" + format_description = "Spider 2D image" + _close_exclusive_fp_after_loading = False + + def _open(self) -> None: + # check header + n = 27 * 4 # read 27 float values + f = self.fp.read(n) + + try: + self.bigendian = 1 + t = struct.unpack(">27f", f) # try big-endian first + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + self.bigendian = 0 + t = struct.unpack("<27f", f) # little-endian + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + msg = "not a valid Spider file" + raise SyntaxError(msg) + except struct.error as e: + msg = "not a valid Spider file" + raise SyntaxError(msg) from e + + h = (99,) + t # add 1 value : spider header index starts at 1 + iform = int(h[5]) + if iform != 1: + msg = "not a Spider 2D image" + raise SyntaxError(msg) + + self._size = int(h[12]), int(h[2]) # size in pixels (width, height) + self.istack = int(h[24]) + self.imgnumber = int(h[27]) + + if self.istack == 0 and self.imgnumber == 0: + # stk=0, img=0: a regular 2D image + offset = hdrlen + self._nimages = 1 + elif self.istack > 0 and self.imgnumber == 0: + # stk>0, img=0: Opening the stack for the first time + self.imgbytes = int(h[12]) * int(h[2]) * 4 + self.hdrlen = hdrlen + self._nimages = int(h[26]) + # Point to the first image in the stack + offset = hdrlen * 2 + self.imgnumber = 1 + elif self.istack == 0 and self.imgnumber > 0: + # stk=0, img>0: an image within the stack + offset = hdrlen + self.stkoffset + self.istack = 2 # So Image knows it's still a stack + else: + msg = "inconsistent stack header values" + raise SyntaxError(msg) + + if self.bigendian: + self.rawmode = "F;32BF" + else: + self.rawmode = "F;32F" + self._mode = "F" + + self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, offset, self.rawmode)] + self._fp = self.fp # FIXME: hack + + @property + def n_frames(self) -> int: + return self._nimages + + @property + def is_animated(self) -> bool: + return self._nimages > 1 + + # 1st image index is zero (although SPIDER imgnumber starts at 1) + def tell(self) -> int: + if self.imgnumber < 1: + return 0 + else: + return self.imgnumber - 1 + + def seek(self, frame: int) -> None: + if self.istack == 0: + msg = "attempt to seek in a non-stack file" + raise EOFError(msg) + if not self._seek_check(frame): + return + if isinstance(self._fp, DeferredError): + raise self._fp.ex + self.stkoffset = self.hdrlen + frame * (self.hdrlen + self.imgbytes) + self.fp = self._fp + self.fp.seek(self.stkoffset) + self._open() + + # returns a byte image after rescaling to 0..255 + def convert2byte(self, depth: int = 255) -> Image.Image: + extrema = self.getextrema() + assert isinstance(extrema[0], float) + minimum, maximum = cast(tuple[float, float], extrema) + m: float = 1 + if maximum != minimum: + m = depth / (maximum - minimum) + b = -m * minimum + return self.point(lambda i: i * m + b).convert("L") + + if TYPE_CHECKING: + from . import ImageTk + + # returns a ImageTk.PhotoImage object, after rescaling to 0..255 + def tkPhotoImage(self) -> ImageTk.PhotoImage: + from . import ImageTk + + return ImageTk.PhotoImage(self.convert2byte(), palette=256) + + +# -------------------------------------------------------------------- +# Image series + + +# given a list of filenames, return a list of images +def loadImageSeries(filelist: list[str] | None = None) -> list[Image.Image] | None: + """create a list of :py:class:`~PIL.Image.Image` objects for use in a montage""" + if filelist is None or len(filelist) < 1: + return None + + byte_imgs = [] + for img in filelist: + if not os.path.exists(img): + print(f"unable to find {img}") + continue + try: + with Image.open(img) as im: + assert isinstance(im, SpiderImageFile) + byte_im = im.convert2byte() + except Exception: + if not isSpiderImage(img): + print(f"{img} is not a Spider image file") + continue + byte_im.info["filename"] = img + byte_imgs.append(byte_im) + return byte_imgs + + +# -------------------------------------------------------------------- +# For saving images in Spider format + + +def makeSpiderHeader(im: Image.Image) -> list[bytes]: + nsam, nrow = im.size + lenbyt = nsam * 4 # There are labrec records in the header + labrec = int(1024 / lenbyt) + if 1024 % lenbyt != 0: + labrec += 1 + labbyt = labrec * lenbyt + nvalues = int(labbyt / 4) + if nvalues < 23: + return [] + + hdr = [0.0] * nvalues + + # NB these are Fortran indices + hdr[1] = 1.0 # nslice (=1 for an image) + hdr[2] = float(nrow) # number of rows per slice + hdr[3] = float(nrow) # number of records in the image + hdr[5] = 1.0 # iform for 2D image + hdr[12] = float(nsam) # number of pixels per line + hdr[13] = float(labrec) # number of records in file header + hdr[22] = float(labbyt) # total number of bytes in header + hdr[23] = float(lenbyt) # record length in bytes + + # adjust for Fortran indexing + hdr = hdr[1:] + hdr.append(0.0) + # pack binary data into a string + return [struct.pack("f", v) for v in hdr] + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if im.mode != "F": + im = im.convert("F") + + hdr = makeSpiderHeader(im) + if len(hdr) < 256: + msg = "Error creating Spider header" + raise OSError(msg) + + # write the SPIDER header + fp.writelines(hdr) + + rawmode = "F;32NF" # 32-bit native floating point + ImageFile._save(im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, rawmode)]) + + +def _save_spider(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + # get the filename extension and register it with Image + filename_ext = os.path.splitext(filename)[1] + ext = filename_ext.decode() if isinstance(filename_ext, bytes) else filename_ext + Image.register_extension(SpiderImageFile.format, ext) + _save(im, fp, filename) + + +# -------------------------------------------------------------------- + + +Image.register_open(SpiderImageFile.format, SpiderImageFile) +Image.register_save(SpiderImageFile.format, _save_spider) + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Syntax: python3 SpiderImagePlugin.py [infile] [outfile]") + sys.exit() + + filename = sys.argv[1] + if not isSpiderImage(filename): + print("input image must be in Spider format") + sys.exit() + + with Image.open(filename) as im: + print(f"image: {im}") + print(f"format: {im.format}") + print(f"size: {im.size}") + print(f"mode: {im.mode}") + print("max, min: ", end=" ") + print(im.getextrema()) + + if len(sys.argv) > 2: + outfile = sys.argv[2] + + # perform some image operation + im = im.transpose(Image.Transpose.FLIP_LEFT_RIGHT) + print( + f"saving a flipped version of {os.path.basename(filename)} " + f"as {outfile} " + ) + im.save(outfile, SpiderImageFile.format) diff --git a/py311/lib/python3.11/site-packages/PIL/SunImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/SunImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..8912379ea3e7801cdac9a557d2bc0c557bce8991 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/SunImagePlugin.py @@ -0,0 +1,145 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Sun image file handling +# +# History: +# 1995-09-10 fl Created +# 1996-05-28 fl Fixed 32-bit alignment +# 1998-12-29 fl Import ImagePalette module +# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault) +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1995-1996 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +from . import Image, ImageFile, ImagePalette +from ._binary import i32be as i32 + + +def _accept(prefix: bytes) -> bool: + return len(prefix) >= 4 and i32(prefix) == 0x59A66A95 + + +## +# Image plugin for Sun raster files. + + +class SunImageFile(ImageFile.ImageFile): + format = "SUN" + format_description = "Sun Raster File" + + def _open(self) -> None: + # The Sun Raster file header is 32 bytes in length + # and has the following format: + + # typedef struct _SunRaster + # { + # DWORD MagicNumber; /* Magic (identification) number */ + # DWORD Width; /* Width of image in pixels */ + # DWORD Height; /* Height of image in pixels */ + # DWORD Depth; /* Number of bits per pixel */ + # DWORD Length; /* Size of image data in bytes */ + # DWORD Type; /* Type of raster file */ + # DWORD ColorMapType; /* Type of color map */ + # DWORD ColorMapLength; /* Size of the color map in bytes */ + # } SUNRASTER; + + assert self.fp is not None + + # HEAD + s = self.fp.read(32) + if not _accept(s): + msg = "not an SUN raster file" + raise SyntaxError(msg) + + offset = 32 + + self._size = i32(s, 4), i32(s, 8) + + depth = i32(s, 12) + # data_length = i32(s, 16) # unreliable, ignore. + file_type = i32(s, 20) + palette_type = i32(s, 24) # 0: None, 1: RGB, 2: Raw/arbitrary + palette_length = i32(s, 28) + + if depth == 1: + self._mode, rawmode = "1", "1;I" + elif depth == 4: + self._mode, rawmode = "L", "L;4" + elif depth == 8: + self._mode = rawmode = "L" + elif depth == 24: + if file_type == 3: + self._mode, rawmode = "RGB", "RGB" + else: + self._mode, rawmode = "RGB", "BGR" + elif depth == 32: + if file_type == 3: + self._mode, rawmode = "RGB", "RGBX" + else: + self._mode, rawmode = "RGB", "BGRX" + else: + msg = "Unsupported Mode/Bit Depth" + raise SyntaxError(msg) + + if palette_length: + if palette_length > 1024: + msg = "Unsupported Color Palette Length" + raise SyntaxError(msg) + + if palette_type != 1: + msg = "Unsupported Palette Type" + raise SyntaxError(msg) + + offset = offset + palette_length + self.palette = ImagePalette.raw("RGB;L", self.fp.read(palette_length)) + if self.mode == "L": + self._mode = "P" + rawmode = rawmode.replace("L", "P") + + # 16 bit boundaries on stride + stride = ((self.size[0] * depth + 15) // 16) * 2 + + # file type: Type is the version (or flavor) of the bitmap + # file. The following values are typically found in the Type + # field: + # 0000h Old + # 0001h Standard + # 0002h Byte-encoded + # 0003h RGB format + # 0004h TIFF format + # 0005h IFF format + # FFFFh Experimental + + # Old and standard are the same, except for the length tag. + # byte-encoded is run-length-encoded + # RGB looks similar to standard, but RGB byte order + # TIFF and IFF mean that they were converted from T/IFF + # Experimental means that it's something else. + # (https://www.fileformat.info/format/sunraster/egff.htm) + + if file_type in (0, 1, 3, 4, 5): + self.tile = [ + ImageFile._Tile("raw", (0, 0) + self.size, offset, (rawmode, stride)) + ] + elif file_type == 2: + self.tile = [ + ImageFile._Tile("sun_rle", (0, 0) + self.size, offset, rawmode) + ] + else: + msg = "Unsupported Sun Raster file type" + raise SyntaxError(msg) + + +# +# registry + + +Image.register_open(SunImageFile.format, SunImageFile, _accept) + +Image.register_extension(SunImageFile.format, ".ras") diff --git a/py311/lib/python3.11/site-packages/PIL/TarIO.py b/py311/lib/python3.11/site-packages/PIL/TarIO.py new file mode 100644 index 0000000000000000000000000000000000000000..86490a496f3f106fcc042c03fb235ed5fb41f3a7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/TarIO.py @@ -0,0 +1,61 @@ +# +# The Python Imaging Library. +# $Id$ +# +# read files from within a tar file +# +# History: +# 95-06-18 fl Created +# 96-05-28 fl Open files in binary mode +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995-96. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import io + +from . import ContainerIO + + +class TarIO(ContainerIO.ContainerIO[bytes]): + """A file object that provides read access to a given member of a TAR file.""" + + def __init__(self, tarfile: str, file: str) -> None: + """ + Create file object. + + :param tarfile: Name of TAR file. + :param file: Name of member file. + """ + self.fh = open(tarfile, "rb") + + while True: + s = self.fh.read(512) + if len(s) != 512: + self.fh.close() + + msg = "unexpected end of tar file" + raise OSError(msg) + + name = s[:100].decode("utf-8") + i = name.find("\0") + if i == 0: + self.fh.close() + + msg = "cannot find subfile" + raise OSError(msg) + if i > 0: + name = name[:i] + + size = int(s[124:135], 8) + + if file == name: + break + + self.fh.seek((size + 511) & (~511), io.SEEK_CUR) + + # Open region + super().__init__(self.fh, self.fh.tell(), size) diff --git a/py311/lib/python3.11/site-packages/PIL/TgaImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/TgaImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..90d5b5cf4ee17fc050784bf591adae3247407b56 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/TgaImagePlugin.py @@ -0,0 +1,264 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TGA file handling +# +# History: +# 95-09-01 fl created (reads 24-bit files only) +# 97-01-04 fl support more TGA versions, including compressed images +# 98-07-04 fl fixed orientation and alpha layer bugs +# 98-09-11 fl fixed orientation for runlength decoder +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1995-97. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import warnings +from typing import IO + +from . import Image, ImageFile, ImagePalette +from ._binary import i16le as i16 +from ._binary import o8 +from ._binary import o16le as o16 + +# +# -------------------------------------------------------------------- +# Read RGA file + + +MODES = { + # map imagetype/depth to rawmode + (1, 8): "P", + (3, 1): "1", + (3, 8): "L", + (3, 16): "LA", + (2, 16): "BGRA;15Z", + (2, 24): "BGR", + (2, 32): "BGRA", +} + + +## +# Image plugin for Targa files. + + +class TgaImageFile(ImageFile.ImageFile): + format = "TGA" + format_description = "Targa" + + def _open(self) -> None: + # process header + assert self.fp is not None + + s = self.fp.read(18) + + id_len = s[0] + + colormaptype = s[1] + imagetype = s[2] + + depth = s[16] + + flags = s[17] + + self._size = i16(s, 12), i16(s, 14) + + # validate header fields + if ( + colormaptype not in (0, 1) + or self.size[0] <= 0 + or self.size[1] <= 0 + or depth not in (1, 8, 16, 24, 32) + ): + msg = "not a TGA file" + raise SyntaxError(msg) + + # image mode + if imagetype in (3, 11): + self._mode = "L" + if depth == 1: + self._mode = "1" # ??? + elif depth == 16: + self._mode = "LA" + elif imagetype in (1, 9): + self._mode = "P" if colormaptype else "L" + elif imagetype in (2, 10): + self._mode = "RGB" if depth == 24 else "RGBA" + else: + msg = "unknown TGA mode" + raise SyntaxError(msg) + + # orientation + orientation = flags & 0x30 + self._flip_horizontally = orientation in [0x10, 0x30] + if orientation in [0x20, 0x30]: + orientation = 1 + elif orientation in [0, 0x10]: + orientation = -1 + else: + msg = "unknown TGA orientation" + raise SyntaxError(msg) + + self.info["orientation"] = orientation + + if imagetype & 8: + self.info["compression"] = "tga_rle" + + if id_len: + self.info["id_section"] = self.fp.read(id_len) + + if colormaptype: + # read palette + start, size, mapdepth = i16(s, 3), i16(s, 5), s[7] + if mapdepth == 16: + self.palette = ImagePalette.raw( + "BGRA;15Z", bytes(2 * start) + self.fp.read(2 * size) + ) + self.palette.mode = "RGBA" + elif mapdepth == 24: + self.palette = ImagePalette.raw( + "BGR", bytes(3 * start) + self.fp.read(3 * size) + ) + elif mapdepth == 32: + self.palette = ImagePalette.raw( + "BGRA", bytes(4 * start) + self.fp.read(4 * size) + ) + else: + msg = "unknown TGA map depth" + raise SyntaxError(msg) + + # setup tile descriptor + try: + rawmode = MODES[(imagetype & 7, depth)] + if imagetype & 8: + # compressed + self.tile = [ + ImageFile._Tile( + "tga_rle", + (0, 0) + self.size, + self.fp.tell(), + (rawmode, orientation, depth), + ) + ] + else: + self.tile = [ + ImageFile._Tile( + "raw", + (0, 0) + self.size, + self.fp.tell(), + (rawmode, 0, orientation), + ) + ] + except KeyError: + pass # cannot decode + + def load_end(self) -> None: + if self._flip_horizontally: + self.im = self.im.transpose(Image.Transpose.FLIP_LEFT_RIGHT) + + +# +# -------------------------------------------------------------------- +# Write TGA file + + +SAVE = { + "1": ("1", 1, 0, 3), + "L": ("L", 8, 0, 3), + "LA": ("LA", 16, 0, 3), + "P": ("P", 8, 1, 1), + "RGB": ("BGR", 24, 0, 2), + "RGBA": ("BGRA", 32, 0, 2), +} + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + try: + rawmode, bits, colormaptype, imagetype = SAVE[im.mode] + except KeyError as e: + msg = f"cannot write mode {im.mode} as TGA" + raise OSError(msg) from e + + if "rle" in im.encoderinfo: + rle = im.encoderinfo["rle"] + else: + compression = im.encoderinfo.get("compression", im.info.get("compression")) + rle = compression == "tga_rle" + if rle: + imagetype += 8 + + id_section = im.encoderinfo.get("id_section", im.info.get("id_section", "")) + id_len = len(id_section) + if id_len > 255: + id_len = 255 + id_section = id_section[:255] + warnings.warn("id_section has been trimmed to 255 characters") + + if colormaptype: + palette = im.im.getpalette("RGB", "BGR") + colormaplength, colormapentry = len(palette) // 3, 24 + else: + colormaplength, colormapentry = 0, 0 + + if im.mode in ("LA", "RGBA"): + flags = 8 + else: + flags = 0 + + orientation = im.encoderinfo.get("orientation", im.info.get("orientation", -1)) + if orientation > 0: + flags = flags | 0x20 + + fp.write( + o8(id_len) + + o8(colormaptype) + + o8(imagetype) + + o16(0) # colormapfirst + + o16(colormaplength) + + o8(colormapentry) + + o16(0) + + o16(0) + + o16(im.size[0]) + + o16(im.size[1]) + + o8(bits) + + o8(flags) + ) + + if id_section: + fp.write(id_section) + + if colormaptype: + fp.write(palette) + + if rle: + ImageFile._save( + im, + fp, + [ImageFile._Tile("tga_rle", (0, 0) + im.size, 0, (rawmode, orientation))], + ) + else: + ImageFile._save( + im, + fp, + [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, orientation))], + ) + + # write targa version 2 footer + fp.write(b"\000" * 8 + b"TRUEVISION-XFILE." + b"\000") + + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(TgaImageFile.format, TgaImageFile) +Image.register_save(TgaImageFile.format, _save) + +Image.register_extensions(TgaImageFile.format, [".tga", ".icb", ".vda", ".vst"]) + +Image.register_mime(TgaImageFile.format, "image/x-tga") diff --git a/py311/lib/python3.11/site-packages/PIL/TiffImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/TiffImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..daf20f2e899608c28905272de7b5c0620ef5938c --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/TiffImagePlugin.py @@ -0,0 +1,2339 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TIFF file handling +# +# TIFF is a flexible, if somewhat aged, image file format originally +# defined by Aldus. Although TIFF supports a wide variety of pixel +# layouts and compression methods, the name doesn't really stand for +# "thousands of incompatible file formats," it just feels that way. +# +# To read TIFF data from a stream, the stream must be seekable. For +# progressive decoding, make sure to use TIFF files where the tag +# directory is placed first in the file. +# +# History: +# 1995-09-01 fl Created +# 1996-05-04 fl Handle JPEGTABLES tag +# 1996-05-18 fl Fixed COLORMAP support +# 1997-01-05 fl Fixed PREDICTOR support +# 1997-08-27 fl Added support for rational tags (from Perry Stoll) +# 1998-01-10 fl Fixed seek/tell (from Jan Blom) +# 1998-07-15 fl Use private names for internal variables +# 1999-06-13 fl Rewritten for PIL 1.0 (1.0) +# 2000-10-11 fl Additional fixes for Python 2.0 (1.1) +# 2001-04-17 fl Fixed rewind support (seek to frame 0) (1.2) +# 2001-05-12 fl Added write support for more tags (from Greg Couch) (1.3) +# 2001-12-18 fl Added workaround for broken Matrox library +# 2002-01-18 fl Don't mess up if photometric tag is missing (D. Alan Stewart) +# 2003-05-19 fl Check FILLORDER tag +# 2003-09-26 fl Added RGBa support +# 2004-02-24 fl Added DPI support; fixed rational write support +# 2005-02-07 fl Added workaround for broken Corel Draw 10 files +# 2006-01-09 fl Added support for float/double tags (from Russell Nelson) +# +# Copyright (c) 1997-2006 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import io +import itertools +import logging +import math +import os +import struct +import warnings +from collections.abc import Iterator, MutableMapping +from fractions import Fraction +from numbers import Number, Rational +from typing import IO, Any, Callable, NoReturn, cast + +from . import ExifTags, Image, ImageFile, ImageOps, ImagePalette, TiffTags +from ._binary import i16be as i16 +from ._binary import i32be as i32 +from ._binary import o8 +from ._deprecate import deprecate +from ._typing import StrOrBytesPath +from ._util import DeferredError, is_path +from .TiffTags import TYPES + +TYPE_CHECKING = False +if TYPE_CHECKING: + from ._typing import Buffer, IntegralLike + +logger = logging.getLogger(__name__) + +# Set these to true to force use of libtiff for reading or writing. +READ_LIBTIFF = False +WRITE_LIBTIFF = False +STRIP_SIZE = 65536 + +II = b"II" # little-endian (Intel style) +MM = b"MM" # big-endian (Motorola style) + +# +# -------------------------------------------------------------------- +# Read TIFF files + +# a few tag names, just to make the code below a bit more readable +OSUBFILETYPE = 255 +IMAGEWIDTH = 256 +IMAGELENGTH = 257 +BITSPERSAMPLE = 258 +COMPRESSION = 259 +PHOTOMETRIC_INTERPRETATION = 262 +FILLORDER = 266 +IMAGEDESCRIPTION = 270 +STRIPOFFSETS = 273 +SAMPLESPERPIXEL = 277 +ROWSPERSTRIP = 278 +STRIPBYTECOUNTS = 279 +X_RESOLUTION = 282 +Y_RESOLUTION = 283 +PLANAR_CONFIGURATION = 284 +RESOLUTION_UNIT = 296 +TRANSFERFUNCTION = 301 +SOFTWARE = 305 +DATE_TIME = 306 +ARTIST = 315 +PREDICTOR = 317 +COLORMAP = 320 +TILEWIDTH = 322 +TILELENGTH = 323 +TILEOFFSETS = 324 +TILEBYTECOUNTS = 325 +SUBIFD = 330 +EXTRASAMPLES = 338 +SAMPLEFORMAT = 339 +JPEGTABLES = 347 +YCBCRSUBSAMPLING = 530 +REFERENCEBLACKWHITE = 532 +COPYRIGHT = 33432 +IPTC_NAA_CHUNK = 33723 # newsphoto properties +PHOTOSHOP_CHUNK = 34377 # photoshop properties +ICCPROFILE = 34675 +EXIFIFD = 34665 +XMP = 700 +JPEGQUALITY = 65537 # pseudo-tag by libtiff + +# https://github.com/imagej/ImageJA/blob/master/src/main/java/ij/io/TiffDecoder.java +IMAGEJ_META_DATA_BYTE_COUNTS = 50838 +IMAGEJ_META_DATA = 50839 + +COMPRESSION_INFO = { + # Compression => pil compression name + 1: "raw", + 2: "tiff_ccitt", + 3: "group3", + 4: "group4", + 5: "tiff_lzw", + 6: "tiff_jpeg", # obsolete + 7: "jpeg", + 8: "tiff_adobe_deflate", + 32771: "tiff_raw_16", # 16-bit padding + 32773: "packbits", + 32809: "tiff_thunderscan", + 32946: "tiff_deflate", + 34676: "tiff_sgilog", + 34677: "tiff_sgilog24", + 34925: "lzma", + 50000: "zstd", + 50001: "webp", +} + +COMPRESSION_INFO_REV = {v: k for k, v in COMPRESSION_INFO.items()} + +OPEN_INFO = { + # (ByteOrder, PhotoInterpretation, SampleFormat, FillOrder, BitsPerSample, + # ExtraSamples) => mode, rawmode + (II, 0, (1,), 1, (1,), ()): ("1", "1;I"), + (MM, 0, (1,), 1, (1,), ()): ("1", "1;I"), + (II, 0, (1,), 2, (1,), ()): ("1", "1;IR"), + (MM, 0, (1,), 2, (1,), ()): ("1", "1;IR"), + (II, 1, (1,), 1, (1,), ()): ("1", "1"), + (MM, 1, (1,), 1, (1,), ()): ("1", "1"), + (II, 1, (1,), 2, (1,), ()): ("1", "1;R"), + (MM, 1, (1,), 2, (1,), ()): ("1", "1;R"), + (II, 0, (1,), 1, (2,), ()): ("L", "L;2I"), + (MM, 0, (1,), 1, (2,), ()): ("L", "L;2I"), + (II, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), + (MM, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), + (II, 1, (1,), 1, (2,), ()): ("L", "L;2"), + (MM, 1, (1,), 1, (2,), ()): ("L", "L;2"), + (II, 1, (1,), 2, (2,), ()): ("L", "L;2R"), + (MM, 1, (1,), 2, (2,), ()): ("L", "L;2R"), + (II, 0, (1,), 1, (4,), ()): ("L", "L;4I"), + (MM, 0, (1,), 1, (4,), ()): ("L", "L;4I"), + (II, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), + (MM, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), + (II, 1, (1,), 1, (4,), ()): ("L", "L;4"), + (MM, 1, (1,), 1, (4,), ()): ("L", "L;4"), + (II, 1, (1,), 2, (4,), ()): ("L", "L;4R"), + (MM, 1, (1,), 2, (4,), ()): ("L", "L;4R"), + (II, 0, (1,), 1, (8,), ()): ("L", "L;I"), + (MM, 0, (1,), 1, (8,), ()): ("L", "L;I"), + (II, 0, (1,), 2, (8,), ()): ("L", "L;IR"), + (MM, 0, (1,), 2, (8,), ()): ("L", "L;IR"), + (II, 1, (1,), 1, (8,), ()): ("L", "L"), + (MM, 1, (1,), 1, (8,), ()): ("L", "L"), + (II, 1, (2,), 1, (8,), ()): ("L", "L"), + (MM, 1, (2,), 1, (8,), ()): ("L", "L"), + (II, 1, (1,), 2, (8,), ()): ("L", "L;R"), + (MM, 1, (1,), 2, (8,), ()): ("L", "L;R"), + (II, 1, (1,), 1, (12,), ()): ("I;16", "I;12"), + (II, 0, (1,), 1, (16,), ()): ("I;16", "I;16"), + (II, 1, (1,), 1, (16,), ()): ("I;16", "I;16"), + (MM, 1, (1,), 1, (16,), ()): ("I;16B", "I;16B"), + (II, 1, (1,), 2, (16,), ()): ("I;16", "I;16R"), + (II, 1, (2,), 1, (16,), ()): ("I", "I;16S"), + (MM, 1, (2,), 1, (16,), ()): ("I", "I;16BS"), + (II, 0, (3,), 1, (32,), ()): ("F", "F;32F"), + (MM, 0, (3,), 1, (32,), ()): ("F", "F;32BF"), + (II, 1, (1,), 1, (32,), ()): ("I", "I;32N"), + (II, 1, (2,), 1, (32,), ()): ("I", "I;32S"), + (MM, 1, (2,), 1, (32,), ()): ("I", "I;32BS"), + (II, 1, (3,), 1, (32,), ()): ("F", "F;32F"), + (MM, 1, (3,), 1, (32,), ()): ("F", "F;32BF"), + (II, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), + (MM, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), + (II, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), + (MM, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), + (II, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), + (MM, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), + (II, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples + (MM, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples + (II, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGB", "RGBX"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGB", "RGBX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGB", "RGBXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGB", "RGBXX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGB", "RGBXXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGB", "RGBXXX"), + (II, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8), (1, 0)): ("RGBA", "RGBaX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (1, 0)): ("RGBA", "RGBaX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (1, 0, 0)): ("RGBA", "RGBaXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (1, 0, 0)): ("RGBA", "RGBaXX"), + (II, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8), (2, 0)): ("RGBA", "RGBAX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (2, 0)): ("RGBA", "RGBAX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"), + (II, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 + (MM, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 + (II, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16L"), + (MM, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGB", "RGBX;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGB", "RGBX;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16B"), + (II, 3, (1,), 1, (1,), ()): ("P", "P;1"), + (MM, 3, (1,), 1, (1,), ()): ("P", "P;1"), + (II, 3, (1,), 2, (1,), ()): ("P", "P;1R"), + (MM, 3, (1,), 2, (1,), ()): ("P", "P;1R"), + (II, 3, (1,), 1, (2,), ()): ("P", "P;2"), + (MM, 3, (1,), 1, (2,), ()): ("P", "P;2"), + (II, 3, (1,), 2, (2,), ()): ("P", "P;2R"), + (MM, 3, (1,), 2, (2,), ()): ("P", "P;2R"), + (II, 3, (1,), 1, (4,), ()): ("P", "P;4"), + (MM, 3, (1,), 1, (4,), ()): ("P", "P;4"), + (II, 3, (1,), 2, (4,), ()): ("P", "P;4R"), + (MM, 3, (1,), 2, (4,), ()): ("P", "P;4R"), + (II, 3, (1,), 1, (8,), ()): ("P", "P"), + (MM, 3, (1,), 1, (8,), ()): ("P", "P"), + (II, 3, (1,), 1, (8, 8), (0,)): ("P", "PX"), + (II, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), + (MM, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), + (II, 3, (1,), 2, (8,), ()): ("P", "P;R"), + (MM, 3, (1,), 2, (8,), ()): ("P", "P;R"), + (II, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), + (MM, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), + (II, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), + (MM, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), + (II, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), + (MM, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), + (II, 5, (1,), 1, (16, 16, 16, 16), ()): ("CMYK", "CMYK;16L"), + (MM, 5, (1,), 1, (16, 16, 16, 16), ()): ("CMYK", "CMYK;16B"), + (II, 6, (1,), 1, (8,), ()): ("L", "L"), + (MM, 6, (1,), 1, (8,), ()): ("L", "L"), + # JPEG compressed images handled by LibTiff and auto-converted to RGBX + # Minimal Baseline TIFF requires YCbCr images to have 3 SamplesPerPixel + (II, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"), + (MM, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"), + (II, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), + (MM, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), +} + +MAX_SAMPLESPERPIXEL = max(len(key_tp[4]) for key_tp in OPEN_INFO) + +PREFIXES = [ + b"MM\x00\x2a", # Valid TIFF header with big-endian byte order + b"II\x2a\x00", # Valid TIFF header with little-endian byte order + b"MM\x2a\x00", # Invalid TIFF header, assume big-endian + b"II\x00\x2a", # Invalid TIFF header, assume little-endian + b"MM\x00\x2b", # BigTIFF with big-endian byte order + b"II\x2b\x00", # BigTIFF with little-endian byte order +] + +if not getattr(Image.core, "libtiff_support_custom_tags", True): + deprecate("Support for LibTIFF earlier than version 4", 12) + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(tuple(PREFIXES)) + + +def _limit_rational( + val: float | Fraction | IFDRational, max_val: int +) -> tuple[IntegralLike, IntegralLike]: + inv = abs(val) > 1 + n_d = IFDRational(1 / val if inv else val).limit_rational(max_val) + return n_d[::-1] if inv else n_d + + +def _limit_signed_rational( + val: IFDRational, max_val: int, min_val: int +) -> tuple[IntegralLike, IntegralLike]: + frac = Fraction(val) + n_d: tuple[IntegralLike, IntegralLike] = frac.numerator, frac.denominator + + if min(float(i) for i in n_d) < min_val: + n_d = _limit_rational(val, abs(min_val)) + + n_d_float = tuple(float(i) for i in n_d) + if max(n_d_float) > max_val: + n_d = _limit_rational(n_d_float[0] / n_d_float[1], max_val) + + return n_d + + +## +# Wrapper for TIFF IFDs. + +_load_dispatch = {} +_write_dispatch = {} + + +def _delegate(op: str) -> Any: + def delegate( + self: IFDRational, *args: tuple[float, ...] + ) -> bool | float | Fraction: + return getattr(self._val, op)(*args) + + return delegate + + +class IFDRational(Rational): + """Implements a rational class where 0/0 is a legal value to match + the in the wild use of exif rationals. + + e.g., DigitalZoomRatio - 0.00/0.00 indicates that no digital zoom was used + """ + + """ If the denominator is 0, store this as a float('nan'), otherwise store + as a fractions.Fraction(). Delegate as appropriate + + """ + + __slots__ = ("_numerator", "_denominator", "_val") + + def __init__( + self, value: float | Fraction | IFDRational, denominator: int = 1 + ) -> None: + """ + :param value: either an integer numerator, a + float/rational/other number, or an IFDRational + :param denominator: Optional integer denominator + """ + self._val: Fraction | float + if isinstance(value, IFDRational): + self._numerator = value.numerator + self._denominator = value.denominator + self._val = value._val + return + + if isinstance(value, Fraction): + self._numerator = value.numerator + self._denominator = value.denominator + else: + if TYPE_CHECKING: + self._numerator = cast(IntegralLike, value) + else: + self._numerator = value + self._denominator = denominator + + if denominator == 0: + self._val = float("nan") + elif denominator == 1: + self._val = Fraction(value) + elif int(value) == value: + self._val = Fraction(int(value), denominator) + else: + self._val = Fraction(value / denominator) + + @property + def numerator(self) -> IntegralLike: + return self._numerator + + @property + def denominator(self) -> int: + return self._denominator + + def limit_rational(self, max_denominator: int) -> tuple[IntegralLike, int]: + """ + + :param max_denominator: Integer, the maximum denominator value + :returns: Tuple of (numerator, denominator) + """ + + if self.denominator == 0: + return self.numerator, self.denominator + + assert isinstance(self._val, Fraction) + f = self._val.limit_denominator(max_denominator) + return f.numerator, f.denominator + + def __repr__(self) -> str: + return str(float(self._val)) + + def __hash__(self) -> int: # type: ignore[override] + return self._val.__hash__() + + def __eq__(self, other: object) -> bool: + val = self._val + if isinstance(other, IFDRational): + other = other._val + if isinstance(other, float): + val = float(val) + return val == other + + def __getstate__(self) -> list[float | Fraction | IntegralLike]: + return [self._val, self._numerator, self._denominator] + + def __setstate__(self, state: list[float | Fraction | IntegralLike]) -> None: + IFDRational.__init__(self, 0) + _val, _numerator, _denominator = state + assert isinstance(_val, (float, Fraction)) + self._val = _val + if TYPE_CHECKING: + self._numerator = cast(IntegralLike, _numerator) + else: + self._numerator = _numerator + assert isinstance(_denominator, int) + self._denominator = _denominator + + """ a = ['add','radd', 'sub', 'rsub', 'mul', 'rmul', + 'truediv', 'rtruediv', 'floordiv', 'rfloordiv', + 'mod','rmod', 'pow','rpow', 'pos', 'neg', + 'abs', 'trunc', 'lt', 'gt', 'le', 'ge', 'bool', + 'ceil', 'floor', 'round'] + print("\n".join("__%s__ = _delegate('__%s__')" % (s,s) for s in a)) + """ + + __add__ = _delegate("__add__") + __radd__ = _delegate("__radd__") + __sub__ = _delegate("__sub__") + __rsub__ = _delegate("__rsub__") + __mul__ = _delegate("__mul__") + __rmul__ = _delegate("__rmul__") + __truediv__ = _delegate("__truediv__") + __rtruediv__ = _delegate("__rtruediv__") + __floordiv__ = _delegate("__floordiv__") + __rfloordiv__ = _delegate("__rfloordiv__") + __mod__ = _delegate("__mod__") + __rmod__ = _delegate("__rmod__") + __pow__ = _delegate("__pow__") + __rpow__ = _delegate("__rpow__") + __pos__ = _delegate("__pos__") + __neg__ = _delegate("__neg__") + __abs__ = _delegate("__abs__") + __trunc__ = _delegate("__trunc__") + __lt__ = _delegate("__lt__") + __gt__ = _delegate("__gt__") + __le__ = _delegate("__le__") + __ge__ = _delegate("__ge__") + __bool__ = _delegate("__bool__") + __ceil__ = _delegate("__ceil__") + __floor__ = _delegate("__floor__") + __round__ = _delegate("__round__") + # Python >= 3.11 + if hasattr(Fraction, "__int__"): + __int__ = _delegate("__int__") + + +_LoaderFunc = Callable[["ImageFileDirectory_v2", bytes, bool], Any] + + +def _register_loader(idx: int, size: int) -> Callable[[_LoaderFunc], _LoaderFunc]: + def decorator(func: _LoaderFunc) -> _LoaderFunc: + from .TiffTags import TYPES + + if func.__name__.startswith("load_"): + TYPES[idx] = func.__name__[5:].replace("_", " ") + _load_dispatch[idx] = size, func # noqa: F821 + return func + + return decorator + + +def _register_writer(idx: int) -> Callable[[Callable[..., Any]], Callable[..., Any]]: + def decorator(func: Callable[..., Any]) -> Callable[..., Any]: + _write_dispatch[idx] = func # noqa: F821 + return func + + return decorator + + +def _register_basic(idx_fmt_name: tuple[int, str, str]) -> None: + from .TiffTags import TYPES + + idx, fmt, name = idx_fmt_name + TYPES[idx] = name + size = struct.calcsize(f"={fmt}") + + def basic_handler( + self: ImageFileDirectory_v2, data: bytes, legacy_api: bool = True + ) -> tuple[Any, ...]: + return self._unpack(f"{len(data) // size}{fmt}", data) + + _load_dispatch[idx] = size, basic_handler # noqa: F821 + _write_dispatch[idx] = lambda self, *values: ( # noqa: F821 + b"".join(self._pack(fmt, value) for value in values) + ) + + +if TYPE_CHECKING: + _IFDv2Base = MutableMapping[int, Any] +else: + _IFDv2Base = MutableMapping + + +class ImageFileDirectory_v2(_IFDv2Base): + """This class represents a TIFF tag directory. To speed things up, we + don't decode tags unless they're asked for. + + Exposes a dictionary interface of the tags in the directory:: + + ifd = ImageFileDirectory_v2() + ifd[key] = 'Some Data' + ifd.tagtype[key] = TiffTags.ASCII + print(ifd[key]) + 'Some Data' + + Individual values are returned as the strings or numbers, sequences are + returned as tuples of the values. + + The tiff metadata type of each item is stored in a dictionary of + tag types in + :attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v2.tagtype`. The types + are read from a tiff file, guessed from the type added, or added + manually. + + Data Structures: + + * ``self.tagtype = {}`` + + * Key: numerical TIFF tag number + * Value: integer corresponding to the data type from + :py:data:`.TiffTags.TYPES` + + .. versionadded:: 3.0.0 + + 'Internal' data structures: + + * ``self._tags_v2 = {}`` + + * Key: numerical TIFF tag number + * Value: decoded data, as tuple for multiple values + + * ``self._tagdata = {}`` + + * Key: numerical TIFF tag number + * Value: undecoded byte string from file + + * ``self._tags_v1 = {}`` + + * Key: numerical TIFF tag number + * Value: decoded data in the v1 format + + Tags will be found in the private attributes ``self._tagdata``, and in + ``self._tags_v2`` once decoded. + + ``self.legacy_api`` is a value for internal use, and shouldn't be changed + from outside code. In cooperation with + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1`, if ``legacy_api`` + is true, then decoded tags will be populated into both ``_tags_v1`` and + ``_tags_v2``. ``_tags_v2`` will be used if this IFD is used in the TIFF + save routine. Tags should be read from ``_tags_v1`` if + ``legacy_api == true``. + + """ + + _load_dispatch: dict[int, tuple[int, _LoaderFunc]] = {} + _write_dispatch: dict[int, Callable[..., Any]] = {} + + def __init__( + self, + ifh: bytes = b"II\x2a\x00\x00\x00\x00\x00", + prefix: bytes | None = None, + group: int | None = None, + ) -> None: + """Initialize an ImageFileDirectory. + + To construct an ImageFileDirectory from a real file, pass the 8-byte + magic header to the constructor. To only set the endianness, pass it + as the 'prefix' keyword argument. + + :param ifh: One of the accepted magic headers (cf. PREFIXES); also sets + endianness. + :param prefix: Override the endianness of the file. + """ + if not _accept(ifh): + msg = f"not a TIFF file (header {repr(ifh)} not valid)" + raise SyntaxError(msg) + self._prefix = prefix if prefix is not None else ifh[:2] + if self._prefix == MM: + self._endian = ">" + elif self._prefix == II: + self._endian = "<" + else: + msg = "not a TIFF IFD" + raise SyntaxError(msg) + self._bigtiff = ifh[2] == 43 + self.group = group + self.tagtype: dict[int, int] = {} + """ Dictionary of tag types """ + self.reset() + self.next = ( + self._unpack("Q", ifh[8:])[0] + if self._bigtiff + else self._unpack("L", ifh[4:])[0] + ) + self._legacy_api = False + + prefix = property(lambda self: self._prefix) + offset = property(lambda self: self._offset) + + @property + def legacy_api(self) -> bool: + return self._legacy_api + + @legacy_api.setter + def legacy_api(self, value: bool) -> NoReturn: + msg = "Not allowing setting of legacy api" + raise Exception(msg) + + def reset(self) -> None: + self._tags_v1: dict[int, Any] = {} # will remain empty if legacy_api is false + self._tags_v2: dict[int, Any] = {} # main tag storage + self._tagdata: dict[int, bytes] = {} + self.tagtype = {} # added 2008-06-05 by Florian Hoech + self._next = None + self._offset: int | None = None + + def __str__(self) -> str: + return str(dict(self)) + + def named(self) -> dict[str, Any]: + """ + :returns: dict of name|key: value + + Returns the complete tag dictionary, with named tags where possible. + """ + return { + TiffTags.lookup(code, self.group).name: value + for code, value in self.items() + } + + def __len__(self) -> int: + return len(set(self._tagdata) | set(self._tags_v2)) + + def __getitem__(self, tag: int) -> Any: + if tag not in self._tags_v2: # unpack on the fly + data = self._tagdata[tag] + typ = self.tagtype[tag] + size, handler = self._load_dispatch[typ] + self[tag] = handler(self, data, self.legacy_api) # check type + val = self._tags_v2[tag] + if self.legacy_api and not isinstance(val, (tuple, bytes)): + val = (val,) + return val + + def __contains__(self, tag: object) -> bool: + return tag in self._tags_v2 or tag in self._tagdata + + def __setitem__(self, tag: int, value: Any) -> None: + self._setitem(tag, value, self.legacy_api) + + def _setitem(self, tag: int, value: Any, legacy_api: bool) -> None: + basetypes = (Number, bytes, str) + + info = TiffTags.lookup(tag, self.group) + values = [value] if isinstance(value, basetypes) else value + + if tag not in self.tagtype: + if info.type: + self.tagtype[tag] = info.type + else: + self.tagtype[tag] = TiffTags.UNDEFINED + if all(isinstance(v, IFDRational) for v in values): + for v in values: + assert isinstance(v, IFDRational) + if v < 0: + self.tagtype[tag] = TiffTags.SIGNED_RATIONAL + break + else: + self.tagtype[tag] = TiffTags.RATIONAL + elif all(isinstance(v, int) for v in values): + short = True + signed_short = True + long = True + for v in values: + assert isinstance(v, int) + if short and not (0 <= v < 2**16): + short = False + if signed_short and not (-(2**15) < v < 2**15): + signed_short = False + if long and v < 0: + long = False + if short: + self.tagtype[tag] = TiffTags.SHORT + elif signed_short: + self.tagtype[tag] = TiffTags.SIGNED_SHORT + elif long: + self.tagtype[tag] = TiffTags.LONG + else: + self.tagtype[tag] = TiffTags.SIGNED_LONG + elif all(isinstance(v, float) for v in values): + self.tagtype[tag] = TiffTags.DOUBLE + elif all(isinstance(v, str) for v in values): + self.tagtype[tag] = TiffTags.ASCII + elif all(isinstance(v, bytes) for v in values): + self.tagtype[tag] = TiffTags.BYTE + + if self.tagtype[tag] == TiffTags.UNDEFINED: + values = [ + v.encode("ascii", "replace") if isinstance(v, str) else v + for v in values + ] + elif self.tagtype[tag] == TiffTags.RATIONAL: + values = [float(v) if isinstance(v, int) else v for v in values] + + is_ifd = self.tagtype[tag] == TiffTags.LONG and isinstance(values, dict) + if not is_ifd: + values = tuple( + info.cvt_enum(value) if isinstance(value, str) else value + for value in values + ) + + dest = self._tags_v1 if legacy_api else self._tags_v2 + + # Three branches: + # Spec'd length == 1, Actual length 1, store as element + # Spec'd length == 1, Actual > 1, Warn and truncate. Formerly barfed. + # No Spec, Actual length 1, Formerly (<4.2) returned a 1 element tuple. + # Don't mess with the legacy api, since it's frozen. + if not is_ifd and ( + (info.length == 1) + or self.tagtype[tag] == TiffTags.BYTE + or (info.length is None and len(values) == 1 and not legacy_api) + ): + # Don't mess with the legacy api, since it's frozen. + if legacy_api and self.tagtype[tag] in [ + TiffTags.RATIONAL, + TiffTags.SIGNED_RATIONAL, + ]: # rationals + values = (values,) + try: + (dest[tag],) = values + except ValueError: + # We've got a builtin tag with 1 expected entry + warnings.warn( + f"Metadata Warning, tag {tag} had too many entries: " + f"{len(values)}, expected 1" + ) + dest[tag] = values[0] + + else: + # Spec'd length > 1 or undefined + # Unspec'd, and length > 1 + dest[tag] = values + + def __delitem__(self, tag: int) -> None: + self._tags_v2.pop(tag, None) + self._tags_v1.pop(tag, None) + self._tagdata.pop(tag, None) + + def __iter__(self) -> Iterator[int]: + return iter(set(self._tagdata) | set(self._tags_v2)) + + def _unpack(self, fmt: str, data: bytes) -> tuple[Any, ...]: + return struct.unpack(self._endian + fmt, data) + + def _pack(self, fmt: str, *values: Any) -> bytes: + return struct.pack(self._endian + fmt, *values) + + list( + map( + _register_basic, + [ + (TiffTags.SHORT, "H", "short"), + (TiffTags.LONG, "L", "long"), + (TiffTags.SIGNED_BYTE, "b", "signed byte"), + (TiffTags.SIGNED_SHORT, "h", "signed short"), + (TiffTags.SIGNED_LONG, "l", "signed long"), + (TiffTags.FLOAT, "f", "float"), + (TiffTags.DOUBLE, "d", "double"), + (TiffTags.IFD, "L", "long"), + (TiffTags.LONG8, "Q", "long8"), + ], + ) + ) + + @_register_loader(1, 1) # Basic type, except for the legacy API. + def load_byte(self, data: bytes, legacy_api: bool = True) -> bytes: + return data + + @_register_writer(1) # Basic type, except for the legacy API. + def write_byte(self, data: bytes | int | IFDRational) -> bytes: + if isinstance(data, IFDRational): + data = int(data) + if isinstance(data, int): + data = bytes((data,)) + return data + + @_register_loader(2, 1) + def load_string(self, data: bytes, legacy_api: bool = True) -> str: + if data.endswith(b"\0"): + data = data[:-1] + return data.decode("latin-1", "replace") + + @_register_writer(2) + def write_string(self, value: str | bytes | int) -> bytes: + # remerge of https://github.com/python-pillow/Pillow/pull/1416 + if isinstance(value, int): + value = str(value) + if not isinstance(value, bytes): + value = value.encode("ascii", "replace") + return value + b"\0" + + @_register_loader(5, 8) + def load_rational( + self, data: bytes, legacy_api: bool = True + ) -> tuple[tuple[int, int] | IFDRational, ...]: + vals = self._unpack(f"{len(data) // 4}L", data) + + def combine(a: int, b: int) -> tuple[int, int] | IFDRational: + return (a, b) if legacy_api else IFDRational(a, b) + + return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2])) + + @_register_writer(5) + def write_rational(self, *values: IFDRational) -> bytes: + return b"".join( + self._pack("2L", *_limit_rational(frac, 2**32 - 1)) for frac in values + ) + + @_register_loader(7, 1) + def load_undefined(self, data: bytes, legacy_api: bool = True) -> bytes: + return data + + @_register_writer(7) + def write_undefined(self, value: bytes | int | IFDRational) -> bytes: + if isinstance(value, IFDRational): + value = int(value) + if isinstance(value, int): + value = str(value).encode("ascii", "replace") + return value + + @_register_loader(10, 8) + def load_signed_rational( + self, data: bytes, legacy_api: bool = True + ) -> tuple[tuple[int, int] | IFDRational, ...]: + vals = self._unpack(f"{len(data) // 4}l", data) + + def combine(a: int, b: int) -> tuple[int, int] | IFDRational: + return (a, b) if legacy_api else IFDRational(a, b) + + return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2])) + + @_register_writer(10) + def write_signed_rational(self, *values: IFDRational) -> bytes: + return b"".join( + self._pack("2l", *_limit_signed_rational(frac, 2**31 - 1, -(2**31))) + for frac in values + ) + + def _ensure_read(self, fp: IO[bytes], size: int) -> bytes: + ret = fp.read(size) + if len(ret) != size: + msg = ( + "Corrupt EXIF data. " + f"Expecting to read {size} bytes but only got {len(ret)}. " + ) + raise OSError(msg) + return ret + + def load(self, fp: IO[bytes]) -> None: + self.reset() + self._offset = fp.tell() + + try: + tag_count = ( + self._unpack("Q", self._ensure_read(fp, 8)) + if self._bigtiff + else self._unpack("H", self._ensure_read(fp, 2)) + )[0] + for i in range(tag_count): + tag, typ, count, data = ( + self._unpack("HHQ8s", self._ensure_read(fp, 20)) + if self._bigtiff + else self._unpack("HHL4s", self._ensure_read(fp, 12)) + ) + + tagname = TiffTags.lookup(tag, self.group).name + typname = TYPES.get(typ, "unknown") + msg = f"tag: {tagname} ({tag}) - type: {typname} ({typ})" + + try: + unit_size, handler = self._load_dispatch[typ] + except KeyError: + logger.debug("%s - unsupported type %s", msg, typ) + continue # ignore unsupported type + size = count * unit_size + if size > (8 if self._bigtiff else 4): + here = fp.tell() + (offset,) = self._unpack("Q" if self._bigtiff else "L", data) + msg += f" Tag Location: {here} - Data Location: {offset}" + fp.seek(offset) + data = ImageFile._safe_read(fp, size) + fp.seek(here) + else: + data = data[:size] + + if len(data) != size: + warnings.warn( + "Possibly corrupt EXIF data. " + f"Expecting to read {size} bytes but only got {len(data)}." + f" Skipping tag {tag}" + ) + logger.debug(msg) + continue + + if not data: + logger.debug(msg) + continue + + self._tagdata[tag] = data + self.tagtype[tag] = typ + + msg += " - value: " + msg += f"" if size > 32 else repr(data) + + logger.debug(msg) + + (self.next,) = ( + self._unpack("Q", self._ensure_read(fp, 8)) + if self._bigtiff + else self._unpack("L", self._ensure_read(fp, 4)) + ) + except OSError as msg: + warnings.warn(str(msg)) + return + + def _get_ifh(self) -> bytes: + ifh = self._prefix + self._pack("H", 43 if self._bigtiff else 42) + if self._bigtiff: + ifh += self._pack("HH", 8, 0) + ifh += self._pack("Q", 16) if self._bigtiff else self._pack("L", 8) + + return ifh + + def tobytes(self, offset: int = 0) -> bytes: + # FIXME What about tagdata? + result = self._pack("Q" if self._bigtiff else "H", len(self._tags_v2)) + + entries: list[tuple[int, int, int, bytes, bytes]] = [] + + fmt = "Q" if self._bigtiff else "L" + fmt_size = 8 if self._bigtiff else 4 + offset += ( + len(result) + len(self._tags_v2) * (20 if self._bigtiff else 12) + fmt_size + ) + stripoffsets = None + + # pass 1: convert tags to binary format + # always write tags in ascending order + for tag, value in sorted(self._tags_v2.items()): + if tag == STRIPOFFSETS: + stripoffsets = len(entries) + typ = self.tagtype[tag] + logger.debug("Tag %s, Type: %s, Value: %s", tag, typ, repr(value)) + is_ifd = typ == TiffTags.LONG and isinstance(value, dict) + if is_ifd: + ifd = ImageFileDirectory_v2(self._get_ifh(), group=tag) + values = self._tags_v2[tag] + for ifd_tag, ifd_value in values.items(): + ifd[ifd_tag] = ifd_value + data = ifd.tobytes(offset) + else: + values = value if isinstance(value, tuple) else (value,) + data = self._write_dispatch[typ](self, *values) + + tagname = TiffTags.lookup(tag, self.group).name + typname = "ifd" if is_ifd else TYPES.get(typ, "unknown") + msg = f"save: {tagname} ({tag}) - type: {typname} ({typ}) - value: " + msg += f"" if len(data) >= 16 else str(values) + logger.debug(msg) + + # count is sum of lengths for string and arbitrary data + if is_ifd: + count = 1 + elif typ in [TiffTags.BYTE, TiffTags.ASCII, TiffTags.UNDEFINED]: + count = len(data) + else: + count = len(values) + # figure out if data fits into the entry + if len(data) <= fmt_size: + entries.append((tag, typ, count, data.ljust(fmt_size, b"\0"), b"")) + else: + entries.append((tag, typ, count, self._pack(fmt, offset), data)) + offset += (len(data) + 1) // 2 * 2 # pad to word + + # update strip offset data to point beyond auxiliary data + if stripoffsets is not None: + tag, typ, count, value, data = entries[stripoffsets] + if data: + size, handler = self._load_dispatch[typ] + values = [val + offset for val in handler(self, data, self.legacy_api)] + data = self._write_dispatch[typ](self, *values) + else: + value = self._pack(fmt, self._unpack(fmt, value)[0] + offset) + entries[stripoffsets] = tag, typ, count, value, data + + # pass 2: write entries to file + for tag, typ, count, value, data in entries: + logger.debug("%s %s %s %s %s", tag, typ, count, repr(value), repr(data)) + result += self._pack( + "HHQ8s" if self._bigtiff else "HHL4s", tag, typ, count, value + ) + + # -- overwrite here for multi-page -- + result += self._pack(fmt, 0) # end of entries + + # pass 3: write auxiliary data to file + for tag, typ, count, value, data in entries: + result += data + if len(data) & 1: + result += b"\0" + + return result + + def save(self, fp: IO[bytes]) -> int: + if fp.tell() == 0: # skip TIFF header on subsequent pages + fp.write(self._get_ifh()) + + offset = fp.tell() + result = self.tobytes(offset) + fp.write(result) + return offset + len(result) + + +ImageFileDirectory_v2._load_dispatch = _load_dispatch +ImageFileDirectory_v2._write_dispatch = _write_dispatch +for idx, name in TYPES.items(): + name = name.replace(" ", "_") + setattr(ImageFileDirectory_v2, f"load_{name}", _load_dispatch[idx][1]) + setattr(ImageFileDirectory_v2, f"write_{name}", _write_dispatch[idx]) +del _load_dispatch, _write_dispatch, idx, name + + +# Legacy ImageFileDirectory support. +class ImageFileDirectory_v1(ImageFileDirectory_v2): + """This class represents the **legacy** interface to a TIFF tag directory. + + Exposes a dictionary interface of the tags in the directory:: + + ifd = ImageFileDirectory_v1() + ifd[key] = 'Some Data' + ifd.tagtype[key] = TiffTags.ASCII + print(ifd[key]) + ('Some Data',) + + Also contains a dictionary of tag types as read from the tiff image file, + :attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v1.tagtype`. + + Values are returned as a tuple. + + .. deprecated:: 3.0.0 + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self._legacy_api = True + + tags = property(lambda self: self._tags_v1) + tagdata = property(lambda self: self._tagdata) + + # defined in ImageFileDirectory_v2 + tagtype: dict[int, int] + """Dictionary of tag types""" + + @classmethod + def from_v2(cls, original: ImageFileDirectory_v2) -> ImageFileDirectory_v1: + """Returns an + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + instance with the same data as is contained in the original + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + instance. + + :returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + + """ + + ifd = cls(prefix=original.prefix) + ifd._tagdata = original._tagdata + ifd.tagtype = original.tagtype + ifd.next = original.next # an indicator for multipage tiffs + return ifd + + def to_v2(self) -> ImageFileDirectory_v2: + """Returns an + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + instance with the same data as is contained in the original + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + instance. + + :returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + + """ + + ifd = ImageFileDirectory_v2(prefix=self.prefix) + ifd._tagdata = dict(self._tagdata) + ifd.tagtype = dict(self.tagtype) + ifd._tags_v2 = dict(self._tags_v2) + return ifd + + def __contains__(self, tag: object) -> bool: + return tag in self._tags_v1 or tag in self._tagdata + + def __len__(self) -> int: + return len(set(self._tagdata) | set(self._tags_v1)) + + def __iter__(self) -> Iterator[int]: + return iter(set(self._tagdata) | set(self._tags_v1)) + + def __setitem__(self, tag: int, value: Any) -> None: + for legacy_api in (False, True): + self._setitem(tag, value, legacy_api) + + def __getitem__(self, tag: int) -> Any: + if tag not in self._tags_v1: # unpack on the fly + data = self._tagdata[tag] + typ = self.tagtype[tag] + size, handler = self._load_dispatch[typ] + for legacy in (False, True): + self._setitem(tag, handler(self, data, legacy), legacy) + val = self._tags_v1[tag] + if not isinstance(val, (tuple, bytes)): + val = (val,) + return val + + +# undone -- switch this pointer +ImageFileDirectory = ImageFileDirectory_v1 + + +## +# Image plugin for TIFF files. + + +class TiffImageFile(ImageFile.ImageFile): + format = "TIFF" + format_description = "Adobe TIFF" + _close_exclusive_fp_after_loading = False + + def __init__( + self, + fp: StrOrBytesPath | IO[bytes], + filename: str | bytes | None = None, + ) -> None: + self.tag_v2: ImageFileDirectory_v2 + """ Image file directory (tag dictionary) """ + + self.tag: ImageFileDirectory_v1 + """ Legacy tag entries """ + + super().__init__(fp, filename) + + def _open(self) -> None: + """Open the first image in a TIFF file""" + + # Header + ifh = self.fp.read(8) + if ifh[2] == 43: + ifh += self.fp.read(8) + + self.tag_v2 = ImageFileDirectory_v2(ifh) + + # setup frame pointers + self.__first = self.__next = self.tag_v2.next + self.__frame = -1 + self._fp = self.fp + self._frame_pos: list[int] = [] + self._n_frames: int | None = None + + logger.debug("*** TiffImageFile._open ***") + logger.debug("- __first: %s", self.__first) + logger.debug("- ifh: %s", repr(ifh)) # Use repr to avoid str(bytes) + + # and load the first frame + self._seek(0) + + @property + def n_frames(self) -> int: + current_n_frames = self._n_frames + if current_n_frames is None: + current = self.tell() + self._seek(len(self._frame_pos)) + while self._n_frames is None: + self._seek(self.tell() + 1) + self.seek(current) + assert self._n_frames is not None + return self._n_frames + + def seek(self, frame: int) -> None: + """Select a given frame as current image""" + if not self._seek_check(frame): + return + self._seek(frame) + if self._im is not None and ( + self.im.size != self._tile_size + or self.im.mode != self.mode + or self.readonly + ): + self._im = None + + def _seek(self, frame: int) -> None: + if isinstance(self._fp, DeferredError): + raise self._fp.ex + self.fp = self._fp + + while len(self._frame_pos) <= frame: + if not self.__next: + msg = "no more images in TIFF file" + raise EOFError(msg) + logger.debug( + "Seeking to frame %s, on frame %s, __next %s, location: %s", + frame, + self.__frame, + self.__next, + self.fp.tell(), + ) + if self.__next >= 2**63: + msg = "Unable to seek to frame" + raise ValueError(msg) + self.fp.seek(self.__next) + self._frame_pos.append(self.__next) + logger.debug("Loading tags, location: %s", self.fp.tell()) + self.tag_v2.load(self.fp) + if self.tag_v2.next in self._frame_pos: + # This IFD has already been processed + # Declare this to be the end of the image + self.__next = 0 + else: + self.__next = self.tag_v2.next + if self.__next == 0: + self._n_frames = frame + 1 + if len(self._frame_pos) == 1: + self.is_animated = self.__next != 0 + self.__frame += 1 + self.fp.seek(self._frame_pos[frame]) + self.tag_v2.load(self.fp) + if XMP in self.tag_v2: + xmp = self.tag_v2[XMP] + if isinstance(xmp, tuple) and len(xmp) == 1: + xmp = xmp[0] + self.info["xmp"] = xmp + elif "xmp" in self.info: + del self.info["xmp"] + self._reload_exif() + # fill the legacy tag/ifd entries + self.tag = self.ifd = ImageFileDirectory_v1.from_v2(self.tag_v2) + self.__frame = frame + self._setup() + + def tell(self) -> int: + """Return the current frame number""" + return self.__frame + + def get_photoshop_blocks(self) -> dict[int, dict[str, bytes]]: + """ + Returns a dictionary of Photoshop "Image Resource Blocks". + The keys are the image resource ID. For more information, see + https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577409_pgfId-1037727 + + :returns: Photoshop "Image Resource Blocks" in a dictionary. + """ + blocks = {} + val = self.tag_v2.get(ExifTags.Base.ImageResources) + if val: + while val.startswith(b"8BIM"): + id = i16(val[4:6]) + n = math.ceil((val[6] + 1) / 2) * 2 + size = i32(val[6 + n : 10 + n]) + data = val[10 + n : 10 + n + size] + blocks[id] = {"data": data} + + val = val[math.ceil((10 + n + size) / 2) * 2 :] + return blocks + + def load(self) -> Image.core.PixelAccess | None: + if self.tile and self.use_load_libtiff: + return self._load_libtiff() + return super().load() + + def load_prepare(self) -> None: + if self._im is None: + Image._decompression_bomb_check(self._tile_size) + self.im = Image.core.new(self.mode, self._tile_size) + ImageFile.ImageFile.load_prepare(self) + + def load_end(self) -> None: + # allow closing if we're on the first frame, there's no next + # This is the ImageFile.load path only, libtiff specific below. + if not self.is_animated: + self._close_exclusive_fp_after_loading = True + + # load IFD data from fp before it is closed + exif = self.getexif() + for key in TiffTags.TAGS_V2_GROUPS: + if key not in exif: + continue + exif.get_ifd(key) + + ImageOps.exif_transpose(self, in_place=True) + if ExifTags.Base.Orientation in self.tag_v2: + del self.tag_v2[ExifTags.Base.Orientation] + + def _load_libtiff(self) -> Image.core.PixelAccess | None: + """Overload method triggered when we detect a compressed tiff + Calls out to libtiff""" + + Image.Image.load(self) + + self.load_prepare() + + if not len(self.tile) == 1: + msg = "Not exactly one tile" + raise OSError(msg) + + # (self._compression, (extents tuple), + # 0, (rawmode, self._compression, fp)) + extents = self.tile[0][1] + args = self.tile[0][3] + + # To be nice on memory footprint, if there's a + # file descriptor, use that instead of reading + # into a string in python. + try: + fp = hasattr(self.fp, "fileno") and self.fp.fileno() + # flush the file descriptor, prevents error on pypy 2.4+ + # should also eliminate the need for fp.tell + # in _seek + if hasattr(self.fp, "flush"): + self.fp.flush() + except OSError: + # io.BytesIO have a fileno, but returns an OSError if + # it doesn't use a file descriptor. + fp = False + + if fp: + assert isinstance(args, tuple) + args_list = list(args) + args_list[2] = fp + args = tuple(args_list) + + decoder = Image._getdecoder(self.mode, "libtiff", args, self.decoderconfig) + try: + decoder.setimage(self.im, extents) + except ValueError as e: + msg = "Couldn't set the image" + raise OSError(msg) from e + + close_self_fp = self._exclusive_fp and not self.is_animated + if hasattr(self.fp, "getvalue"): + # We've got a stringio like thing passed in. Yay for all in memory. + # The decoder needs the entire file in one shot, so there's not + # a lot we can do here other than give it the entire file. + # unless we could do something like get the address of the + # underlying string for stringio. + # + # Rearranging for supporting byteio items, since they have a fileno + # that returns an OSError if there's no underlying fp. Easier to + # deal with here by reordering. + logger.debug("have getvalue. just sending in a string from getvalue") + n, err = decoder.decode(self.fp.getvalue()) + elif fp: + # we've got a actual file on disk, pass in the fp. + logger.debug("have fileno, calling fileno version of the decoder.") + if not close_self_fp: + self.fp.seek(0) + # Save and restore the file position, because libtiff will move it + # outside of the Python runtime, and that will confuse + # io.BufferedReader and possible others. + # NOTE: This must use os.lseek(), and not fp.tell()/fp.seek(), + # because the buffer read head already may not equal the actual + # file position, and fp.seek() may just adjust it's internal + # pointer and not actually seek the OS file handle. + pos = os.lseek(fp, 0, os.SEEK_CUR) + # 4 bytes, otherwise the trace might error out + n, err = decoder.decode(b"fpfp") + os.lseek(fp, pos, os.SEEK_SET) + else: + # we have something else. + logger.debug("don't have fileno or getvalue. just reading") + self.fp.seek(0) + # UNDONE -- so much for that buffer size thing. + n, err = decoder.decode(self.fp.read()) + + self.tile = [] + self.readonly = 0 + + self.load_end() + + if close_self_fp: + self.fp.close() + self.fp = None # might be shared + + if err < 0: + msg = f"decoder error {err}" + raise OSError(msg) + + return Image.Image.load(self) + + def _setup(self) -> None: + """Setup this image object based on current tags""" + + if 0xBC01 in self.tag_v2: + msg = "Windows Media Photo files not yet supported" + raise OSError(msg) + + # extract relevant tags + self._compression = COMPRESSION_INFO[self.tag_v2.get(COMPRESSION, 1)] + self._planar_configuration = self.tag_v2.get(PLANAR_CONFIGURATION, 1) + + # photometric is a required tag, but not everyone is reading + # the specification + photo = self.tag_v2.get(PHOTOMETRIC_INTERPRETATION, 0) + + # old style jpeg compression images most certainly are YCbCr + if self._compression == "tiff_jpeg": + photo = 6 + + fillorder = self.tag_v2.get(FILLORDER, 1) + + logger.debug("*** Summary ***") + logger.debug("- compression: %s", self._compression) + logger.debug("- photometric_interpretation: %s", photo) + logger.debug("- planar_configuration: %s", self._planar_configuration) + logger.debug("- fill_order: %s", fillorder) + logger.debug("- YCbCr subsampling: %s", self.tag_v2.get(YCBCRSUBSAMPLING)) + + # size + try: + xsize = self.tag_v2[IMAGEWIDTH] + ysize = self.tag_v2[IMAGELENGTH] + except KeyError as e: + msg = "Missing dimensions" + raise TypeError(msg) from e + if not isinstance(xsize, int) or not isinstance(ysize, int): + msg = "Invalid dimensions" + raise ValueError(msg) + self._tile_size = xsize, ysize + orientation = self.tag_v2.get(ExifTags.Base.Orientation) + if orientation in (5, 6, 7, 8): + self._size = ysize, xsize + else: + self._size = xsize, ysize + + logger.debug("- size: %s", self.size) + + sample_format = self.tag_v2.get(SAMPLEFORMAT, (1,)) + if len(sample_format) > 1 and max(sample_format) == min(sample_format) == 1: + # SAMPLEFORMAT is properly per band, so an RGB image will + # be (1,1,1). But, we don't support per band pixel types, + # and anything more than one band is a uint8. So, just + # take the first element. Revisit this if adding support + # for more exotic images. + sample_format = (1,) + + bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,)) + extra_tuple = self.tag_v2.get(EXTRASAMPLES, ()) + if photo in (2, 6, 8): # RGB, YCbCr, LAB + bps_count = 3 + elif photo == 5: # CMYK + bps_count = 4 + else: + bps_count = 1 + bps_count += len(extra_tuple) + bps_actual_count = len(bps_tuple) + samples_per_pixel = self.tag_v2.get( + SAMPLESPERPIXEL, + 3 if self._compression == "tiff_jpeg" and photo in (2, 6) else 1, + ) + + if samples_per_pixel > MAX_SAMPLESPERPIXEL: + # DOS check, samples_per_pixel can be a Long, and we extend the tuple below + logger.error( + "More samples per pixel than can be decoded: %s", samples_per_pixel + ) + msg = "Invalid value for samples per pixel" + raise SyntaxError(msg) + + if samples_per_pixel < bps_actual_count: + # If a file has more values in bps_tuple than expected, + # remove the excess. + bps_tuple = bps_tuple[:samples_per_pixel] + elif samples_per_pixel > bps_actual_count and bps_actual_count == 1: + # If a file has only one value in bps_tuple, when it should have more, + # presume it is the same number of bits for all of the samples. + bps_tuple = bps_tuple * samples_per_pixel + + if len(bps_tuple) != samples_per_pixel: + msg = "unknown data organization" + raise SyntaxError(msg) + + # mode: check photometric interpretation and bits per pixel + key = ( + self.tag_v2.prefix, + photo, + sample_format, + fillorder, + bps_tuple, + extra_tuple, + ) + logger.debug("format key: %s", key) + try: + self._mode, rawmode = OPEN_INFO[key] + except KeyError as e: + logger.debug("- unsupported format") + msg = "unknown pixel mode" + raise SyntaxError(msg) from e + + logger.debug("- raw mode: %s", rawmode) + logger.debug("- pil mode: %s", self.mode) + + self.info["compression"] = self._compression + + xres = self.tag_v2.get(X_RESOLUTION, 1) + yres = self.tag_v2.get(Y_RESOLUTION, 1) + + if xres and yres: + resunit = self.tag_v2.get(RESOLUTION_UNIT) + if resunit == 2: # dots per inch + self.info["dpi"] = (xres, yres) + elif resunit == 3: # dots per centimeter. convert to dpi + self.info["dpi"] = (xres * 2.54, yres * 2.54) + elif resunit is None: # used to default to 1, but now 2) + self.info["dpi"] = (xres, yres) + # For backward compatibility, + # we also preserve the old behavior + self.info["resolution"] = xres, yres + else: # No absolute unit of measurement + self.info["resolution"] = xres, yres + + # build tile descriptors + x = y = layer = 0 + self.tile = [] + self.use_load_libtiff = READ_LIBTIFF or self._compression != "raw" + if self.use_load_libtiff: + # Decoder expects entire file as one tile. + # There's a buffer size limit in load (64k) + # so large g4 images will fail if we use that + # function. + # + # Setup the one tile for the whole image, then + # use the _load_libtiff function. + + # libtiff handles the fillmode for us, so 1;IR should + # actually be 1;I. Including the R double reverses the + # bits, so stripes of the image are reversed. See + # https://github.com/python-pillow/Pillow/issues/279 + if fillorder == 2: + # Replace fillorder with fillorder=1 + key = key[:3] + (1,) + key[4:] + logger.debug("format key: %s", key) + # this should always work, since all the + # fillorder==2 modes have a corresponding + # fillorder=1 mode + self._mode, rawmode = OPEN_INFO[key] + # YCbCr images with new jpeg compression with pixels in one plane + # unpacked straight into RGB values + if ( + photo == 6 + and self._compression == "jpeg" + and self._planar_configuration == 1 + ): + rawmode = "RGB" + # libtiff always returns the bytes in native order. + # we're expecting image byte order. So, if the rawmode + # contains I;16, we need to convert from native to image + # byte order. + elif rawmode == "I;16": + rawmode = "I;16N" + elif rawmode.endswith((";16B", ";16L")): + rawmode = rawmode[:-1] + "N" + + # Offset in the tile tuple is 0, we go from 0,0 to + # w,h, and we only do this once -- eds + a = (rawmode, self._compression, False, self.tag_v2.offset) + self.tile.append(ImageFile._Tile("libtiff", (0, 0, xsize, ysize), 0, a)) + + elif STRIPOFFSETS in self.tag_v2 or TILEOFFSETS in self.tag_v2: + # striped image + if STRIPOFFSETS in self.tag_v2: + offsets = self.tag_v2[STRIPOFFSETS] + h = self.tag_v2.get(ROWSPERSTRIP, ysize) + w = xsize + else: + # tiled image + offsets = self.tag_v2[TILEOFFSETS] + tilewidth = self.tag_v2.get(TILEWIDTH) + h = self.tag_v2.get(TILELENGTH) + if not isinstance(tilewidth, int) or not isinstance(h, int): + msg = "Invalid tile dimensions" + raise ValueError(msg) + w = tilewidth + + if w == xsize and h == ysize and self._planar_configuration != 2: + # Every tile covers the image. Only use the last offset + offsets = offsets[-1:] + + for offset in offsets: + if x + w > xsize: + stride = w * sum(bps_tuple) / 8 # bytes per line + else: + stride = 0 + + tile_rawmode = rawmode + if self._planar_configuration == 2: + # each band on it's own layer + tile_rawmode = rawmode[layer] + # adjust stride width accordingly + stride /= bps_count + + args = (tile_rawmode, int(stride), 1) + self.tile.append( + ImageFile._Tile( + self._compression, + (x, y, min(x + w, xsize), min(y + h, ysize)), + offset, + args, + ) + ) + x += w + if x >= xsize: + x, y = 0, y + h + if y >= ysize: + y = 0 + layer += 1 + else: + logger.debug("- unsupported data organization") + msg = "unknown data organization" + raise SyntaxError(msg) + + # Fix up info. + if ICCPROFILE in self.tag_v2: + self.info["icc_profile"] = self.tag_v2[ICCPROFILE] + + # fixup palette descriptor + + if self.mode in ["P", "PA"]: + palette = [o8(b // 256) for b in self.tag_v2[COLORMAP]] + self.palette = ImagePalette.raw("RGB;L", b"".join(palette)) + + +# +# -------------------------------------------------------------------- +# Write TIFF files + +# little endian is default except for image modes with +# explicit big endian byte-order + +SAVE_INFO = { + # mode => rawmode, byteorder, photometrics, + # sampleformat, bitspersample, extra + "1": ("1", II, 1, 1, (1,), None), + "L": ("L", II, 1, 1, (8,), None), + "LA": ("LA", II, 1, 1, (8, 8), 2), + "P": ("P", II, 3, 1, (8,), None), + "PA": ("PA", II, 3, 1, (8, 8), 2), + "I": ("I;32S", II, 1, 2, (32,), None), + "I;16": ("I;16", II, 1, 1, (16,), None), + "I;16L": ("I;16L", II, 1, 1, (16,), None), + "F": ("F;32F", II, 1, 3, (32,), None), + "RGB": ("RGB", II, 2, 1, (8, 8, 8), None), + "RGBX": ("RGBX", II, 2, 1, (8, 8, 8, 8), 0), + "RGBA": ("RGBA", II, 2, 1, (8, 8, 8, 8), 2), + "CMYK": ("CMYK", II, 5, 1, (8, 8, 8, 8), None), + "YCbCr": ("YCbCr", II, 6, 1, (8, 8, 8), None), + "LAB": ("LAB", II, 8, 1, (8, 8, 8), None), + "I;16B": ("I;16B", MM, 1, 1, (16,), None), +} + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + try: + rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode] + except KeyError as e: + msg = f"cannot write mode {im.mode} as TIFF" + raise OSError(msg) from e + + encoderinfo = im.encoderinfo + encoderconfig = im.encoderconfig + + ifd = ImageFileDirectory_v2(prefix=prefix) + if encoderinfo.get("big_tiff"): + ifd._bigtiff = True + + try: + compression = encoderinfo["compression"] + except KeyError: + compression = im.info.get("compression") + if isinstance(compression, int): + # compression value may be from BMP. Ignore it + compression = None + if compression is None: + compression = "raw" + elif compression == "tiff_jpeg": + # OJPEG is obsolete, so use new-style JPEG compression instead + compression = "jpeg" + elif compression == "tiff_deflate": + compression = "tiff_adobe_deflate" + + libtiff = WRITE_LIBTIFF or compression != "raw" + + # required for color libtiff images + ifd[PLANAR_CONFIGURATION] = 1 + + ifd[IMAGEWIDTH] = im.size[0] + ifd[IMAGELENGTH] = im.size[1] + + # write any arbitrary tags passed in as an ImageFileDirectory + if "tiffinfo" in encoderinfo: + info = encoderinfo["tiffinfo"] + elif "exif" in encoderinfo: + info = encoderinfo["exif"] + if isinstance(info, bytes): + exif = Image.Exif() + exif.load(info) + info = exif + else: + info = {} + logger.debug("Tiffinfo Keys: %s", list(info)) + if isinstance(info, ImageFileDirectory_v1): + info = info.to_v2() + for key in info: + if isinstance(info, Image.Exif) and key in TiffTags.TAGS_V2_GROUPS: + ifd[key] = info.get_ifd(key) + else: + ifd[key] = info.get(key) + try: + ifd.tagtype[key] = info.tagtype[key] + except Exception: + pass # might not be an IFD. Might not have populated type + + legacy_ifd = {} + if hasattr(im, "tag"): + legacy_ifd = im.tag.to_v2() + + supplied_tags = {**legacy_ifd, **getattr(im, "tag_v2", {})} + for tag in ( + # IFD offset that may not be correct in the saved image + EXIFIFD, + # Determined by the image format and should not be copied from legacy_ifd. + SAMPLEFORMAT, + ): + if tag in supplied_tags: + del supplied_tags[tag] + + # additions written by Greg Couch, gregc@cgl.ucsf.edu + # inspired by image-sig posting from Kevin Cazabon, kcazabon@home.com + if hasattr(im, "tag_v2"): + # preserve tags from original TIFF image file + for key in ( + RESOLUTION_UNIT, + X_RESOLUTION, + Y_RESOLUTION, + IPTC_NAA_CHUNK, + PHOTOSHOP_CHUNK, + XMP, + ): + if key in im.tag_v2: + if key == IPTC_NAA_CHUNK and im.tag_v2.tagtype[key] not in ( + TiffTags.BYTE, + TiffTags.UNDEFINED, + ): + del supplied_tags[key] + else: + ifd[key] = im.tag_v2[key] + ifd.tagtype[key] = im.tag_v2.tagtype[key] + + # preserve ICC profile (should also work when saving other formats + # which support profiles as TIFF) -- 2008-06-06 Florian Hoech + icc = encoderinfo.get("icc_profile", im.info.get("icc_profile")) + if icc: + ifd[ICCPROFILE] = icc + + for key, name in [ + (IMAGEDESCRIPTION, "description"), + (X_RESOLUTION, "resolution"), + (Y_RESOLUTION, "resolution"), + (X_RESOLUTION, "x_resolution"), + (Y_RESOLUTION, "y_resolution"), + (RESOLUTION_UNIT, "resolution_unit"), + (SOFTWARE, "software"), + (DATE_TIME, "date_time"), + (ARTIST, "artist"), + (COPYRIGHT, "copyright"), + ]: + if name in encoderinfo: + ifd[key] = encoderinfo[name] + + dpi = encoderinfo.get("dpi") + if dpi: + ifd[RESOLUTION_UNIT] = 2 + ifd[X_RESOLUTION] = dpi[0] + ifd[Y_RESOLUTION] = dpi[1] + + if bits != (1,): + ifd[BITSPERSAMPLE] = bits + if len(bits) != 1: + ifd[SAMPLESPERPIXEL] = len(bits) + if extra is not None: + ifd[EXTRASAMPLES] = extra + if format != 1: + ifd[SAMPLEFORMAT] = format + + if PHOTOMETRIC_INTERPRETATION not in ifd: + ifd[PHOTOMETRIC_INTERPRETATION] = photo + elif im.mode in ("1", "L") and ifd[PHOTOMETRIC_INTERPRETATION] == 0: + if im.mode == "1": + inverted_im = im.copy() + px = inverted_im.load() + if px is not None: + for y in range(inverted_im.height): + for x in range(inverted_im.width): + px[x, y] = 0 if px[x, y] == 255 else 255 + im = inverted_im + else: + im = ImageOps.invert(im) + + if im.mode in ["P", "PA"]: + lut = im.im.getpalette("RGB", "RGB;L") + colormap = [] + colors = len(lut) // 3 + for i in range(3): + colormap += [v * 256 for v in lut[colors * i : colors * (i + 1)]] + colormap += [0] * (256 - colors) + ifd[COLORMAP] = colormap + # data orientation + w, h = ifd[IMAGEWIDTH], ifd[IMAGELENGTH] + stride = len(bits) * ((w * bits[0] + 7) // 8) + if ROWSPERSTRIP not in ifd: + # aim for given strip size (64 KB by default) when using libtiff writer + if libtiff: + im_strip_size = encoderinfo.get("strip_size", STRIP_SIZE) + rows_per_strip = 1 if stride == 0 else min(im_strip_size // stride, h) + # JPEG encoder expects multiple of 8 rows + if compression == "jpeg": + rows_per_strip = min(((rows_per_strip + 7) // 8) * 8, h) + else: + rows_per_strip = h + if rows_per_strip == 0: + rows_per_strip = 1 + ifd[ROWSPERSTRIP] = rows_per_strip + strip_byte_counts = 1 if stride == 0 else stride * ifd[ROWSPERSTRIP] + strips_per_image = (h + ifd[ROWSPERSTRIP] - 1) // ifd[ROWSPERSTRIP] + if strip_byte_counts >= 2**16: + ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG + ifd[STRIPBYTECOUNTS] = (strip_byte_counts,) * (strips_per_image - 1) + ( + stride * h - strip_byte_counts * (strips_per_image - 1), + ) + ifd[STRIPOFFSETS] = tuple( + range(0, strip_byte_counts * strips_per_image, strip_byte_counts) + ) # this is adjusted by IFD writer + # no compression by default: + ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1) + + if im.mode == "YCbCr": + for tag, default_value in { + YCBCRSUBSAMPLING: (1, 1), + REFERENCEBLACKWHITE: (0, 255, 128, 255, 128, 255), + }.items(): + ifd.setdefault(tag, default_value) + + blocklist = [TILEWIDTH, TILELENGTH, TILEOFFSETS, TILEBYTECOUNTS] + if libtiff: + if "quality" in encoderinfo: + quality = encoderinfo["quality"] + if not isinstance(quality, int) or quality < 0 or quality > 100: + msg = "Invalid quality setting" + raise ValueError(msg) + if compression != "jpeg": + msg = "quality setting only supported for 'jpeg' compression" + raise ValueError(msg) + ifd[JPEGQUALITY] = quality + + logger.debug("Saving using libtiff encoder") + logger.debug("Items: %s", sorted(ifd.items())) + _fp = 0 + if hasattr(fp, "fileno"): + try: + fp.seek(0) + _fp = fp.fileno() + except io.UnsupportedOperation: + pass + + # optional types for non core tags + types = {} + # STRIPOFFSETS and STRIPBYTECOUNTS are added by the library + # based on the data in the strip. + # OSUBFILETYPE is deprecated. + # The other tags expect arrays with a certain length (fixed or depending on + # BITSPERSAMPLE, etc), passing arrays with a different length will result in + # segfaults. Block these tags until we add extra validation. + # SUBIFD may also cause a segfault. + blocklist += [ + OSUBFILETYPE, + REFERENCEBLACKWHITE, + STRIPBYTECOUNTS, + STRIPOFFSETS, + TRANSFERFUNCTION, + SUBIFD, + ] + + # bits per sample is a single short in the tiff directory, not a list. + atts: dict[int, Any] = {BITSPERSAMPLE: bits[0]} + # Merge the ones that we have with (optional) more bits from + # the original file, e.g x,y resolution so that we can + # save(load('')) == original file. + for tag, value in itertools.chain(ifd.items(), supplied_tags.items()): + # Libtiff can only process certain core items without adding + # them to the custom dictionary. + # Custom items are supported for int, float, unicode, string and byte + # values. Other types and tuples require a tagtype. + if tag not in TiffTags.LIBTIFF_CORE: + if not getattr(Image.core, "libtiff_support_custom_tags", False): + continue + + if tag in TiffTags.TAGS_V2_GROUPS: + types[tag] = TiffTags.LONG8 + elif tag in ifd.tagtype: + types[tag] = ifd.tagtype[tag] + elif not (isinstance(value, (int, float, str, bytes))): + continue + else: + type = TiffTags.lookup(tag).type + if type: + types[tag] = type + if tag not in atts and tag not in blocklist: + if isinstance(value, str): + atts[tag] = value.encode("ascii", "replace") + b"\0" + elif isinstance(value, IFDRational): + atts[tag] = float(value) + else: + atts[tag] = value + + if SAMPLEFORMAT in atts and len(atts[SAMPLEFORMAT]) == 1: + atts[SAMPLEFORMAT] = atts[SAMPLEFORMAT][0] + + logger.debug("Converted items: %s", sorted(atts.items())) + + # libtiff always expects the bytes in native order. + # we're storing image byte order. So, if the rawmode + # contains I;16, we need to convert from native to image + # byte order. + if im.mode in ("I;16", "I;16B", "I;16L"): + rawmode = "I;16N" + + # Pass tags as sorted list so that the tags are set in a fixed order. + # This is required by libtiff for some tags. For example, the JPEGQUALITY + # pseudo tag requires that the COMPRESS tag was already set. + tags = list(atts.items()) + tags.sort() + a = (rawmode, compression, _fp, filename, tags, types) + encoder = Image._getencoder(im.mode, "libtiff", a, encoderconfig) + encoder.setimage(im.im, (0, 0) + im.size) + while True: + errcode, data = encoder.encode(ImageFile.MAXBLOCK)[1:] + if not _fp: + fp.write(data) + if errcode: + break + if errcode < 0: + msg = f"encoder error {errcode} when writing image file" + raise OSError(msg) + + else: + for tag in blocklist: + del ifd[tag] + offset = ifd.save(fp) + + ImageFile._save( + im, + fp, + [ImageFile._Tile("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))], + ) + + # -- helper for multi-page save -- + if "_debug_multipage" in encoderinfo: + # just to access o32 and o16 (using correct byte order) + setattr(im, "_debug_multipage", ifd) + + +class AppendingTiffWriter(io.BytesIO): + fieldSizes = [ + 0, # None + 1, # byte + 1, # ascii + 2, # short + 4, # long + 8, # rational + 1, # sbyte + 1, # undefined + 2, # sshort + 4, # slong + 8, # srational + 4, # float + 8, # double + 4, # ifd + 2, # unicode + 4, # complex + 8, # long8 + ] + + Tags = { + 273, # StripOffsets + 288, # FreeOffsets + 324, # TileOffsets + 519, # JPEGQTables + 520, # JPEGDCTables + 521, # JPEGACTables + } + + def __init__(self, fn: StrOrBytesPath | IO[bytes], new: bool = False) -> None: + self.f: IO[bytes] + if is_path(fn): + self.name = fn + self.close_fp = True + try: + self.f = open(fn, "w+b" if new else "r+b") + except OSError: + self.f = open(fn, "w+b") + else: + self.f = cast(IO[bytes], fn) + self.close_fp = False + self.beginning = self.f.tell() + self.setup() + + def setup(self) -> None: + # Reset everything. + self.f.seek(self.beginning, os.SEEK_SET) + + self.whereToWriteNewIFDOffset: int | None = None + self.offsetOfNewPage = 0 + + self.IIMM = iimm = self.f.read(4) + self._bigtiff = b"\x2b" in iimm + if not iimm: + # empty file - first page + self.isFirst = True + return + + self.isFirst = False + if iimm not in PREFIXES: + msg = "Invalid TIFF file header" + raise RuntimeError(msg) + + self.setEndian("<" if iimm.startswith(II) else ">") + + if self._bigtiff: + self.f.seek(4, os.SEEK_CUR) + self.skipIFDs() + self.goToEnd() + + def finalize(self) -> None: + if self.isFirst: + return + + # fix offsets + self.f.seek(self.offsetOfNewPage) + + iimm = self.f.read(4) + if not iimm: + # Make it easy to finish a frame without committing to a new one. + return + + if iimm != self.IIMM: + msg = "IIMM of new page doesn't match IIMM of first page" + raise RuntimeError(msg) + + if self._bigtiff: + self.f.seek(4, os.SEEK_CUR) + ifd_offset = self._read(8 if self._bigtiff else 4) + ifd_offset += self.offsetOfNewPage + assert self.whereToWriteNewIFDOffset is not None + self.f.seek(self.whereToWriteNewIFDOffset) + self._write(ifd_offset, 8 if self._bigtiff else 4) + self.f.seek(ifd_offset) + self.fixIFD() + + def newFrame(self) -> None: + # Call this to finish a frame. + self.finalize() + self.setup() + + def __enter__(self) -> AppendingTiffWriter: + return self + + def __exit__(self, *args: object) -> None: + if self.close_fp: + self.close() + + def tell(self) -> int: + return self.f.tell() - self.offsetOfNewPage + + def seek(self, offset: int, whence: int = io.SEEK_SET) -> int: + """ + :param offset: Distance to seek. + :param whence: Whether the distance is relative to the start, + end or current position. + :returns: The resulting position, relative to the start. + """ + if whence == os.SEEK_SET: + offset += self.offsetOfNewPage + + self.f.seek(offset, whence) + return self.tell() + + def goToEnd(self) -> None: + self.f.seek(0, os.SEEK_END) + pos = self.f.tell() + + # pad to 16 byte boundary + pad_bytes = 16 - pos % 16 + if 0 < pad_bytes < 16: + self.f.write(bytes(pad_bytes)) + self.offsetOfNewPage = self.f.tell() + + def setEndian(self, endian: str) -> None: + self.endian = endian + self.longFmt = f"{self.endian}L" + self.shortFmt = f"{self.endian}H" + self.tagFormat = f"{self.endian}HH" + ("Q" if self._bigtiff else "L") + + def skipIFDs(self) -> None: + while True: + ifd_offset = self._read(8 if self._bigtiff else 4) + if ifd_offset == 0: + self.whereToWriteNewIFDOffset = self.f.tell() - ( + 8 if self._bigtiff else 4 + ) + break + + self.f.seek(ifd_offset) + num_tags = self._read(8 if self._bigtiff else 2) + self.f.seek(num_tags * (20 if self._bigtiff else 12), os.SEEK_CUR) + + def write(self, data: Buffer, /) -> int: + return self.f.write(data) + + def _fmt(self, field_size: int) -> str: + try: + return {2: "H", 4: "L", 8: "Q"}[field_size] + except KeyError: + msg = "offset is not supported" + raise RuntimeError(msg) + + def _read(self, field_size: int) -> int: + (value,) = struct.unpack( + self.endian + self._fmt(field_size), self.f.read(field_size) + ) + return value + + def readShort(self) -> int: + return self._read(2) + + def readLong(self) -> int: + return self._read(4) + + @staticmethod + def _verify_bytes_written(bytes_written: int | None, expected: int) -> None: + if bytes_written is not None and bytes_written != expected: + msg = f"wrote only {bytes_written} bytes but wanted {expected}" + raise RuntimeError(msg) + + def _rewriteLast( + self, value: int, field_size: int, new_field_size: int = 0 + ) -> None: + self.f.seek(-field_size, os.SEEK_CUR) + if not new_field_size: + new_field_size = field_size + bytes_written = self.f.write( + struct.pack(self.endian + self._fmt(new_field_size), value) + ) + self._verify_bytes_written(bytes_written, new_field_size) + + def rewriteLastShortToLong(self, value: int) -> None: + self._rewriteLast(value, 2, 4) + + def rewriteLastShort(self, value: int) -> None: + return self._rewriteLast(value, 2) + + def rewriteLastLong(self, value: int) -> None: + return self._rewriteLast(value, 4) + + def _write(self, value: int, field_size: int) -> None: + bytes_written = self.f.write( + struct.pack(self.endian + self._fmt(field_size), value) + ) + self._verify_bytes_written(bytes_written, field_size) + + def writeShort(self, value: int) -> None: + self._write(value, 2) + + def writeLong(self, value: int) -> None: + self._write(value, 4) + + def close(self) -> None: + self.finalize() + if self.close_fp: + self.f.close() + + def fixIFD(self) -> None: + num_tags = self._read(8 if self._bigtiff else 2) + + for i in range(num_tags): + tag, field_type, count = struct.unpack( + self.tagFormat, self.f.read(12 if self._bigtiff else 8) + ) + + field_size = self.fieldSizes[field_type] + total_size = field_size * count + fmt_size = 8 if self._bigtiff else 4 + is_local = total_size <= fmt_size + if not is_local: + offset = self._read(fmt_size) + self.offsetOfNewPage + self._rewriteLast(offset, fmt_size) + + if tag in self.Tags: + cur_pos = self.f.tell() + + logger.debug( + "fixIFD: %s (%d) - type: %s (%d) - type size: %d - count: %d", + TiffTags.lookup(tag).name, + tag, + TYPES.get(field_type, "unknown"), + field_type, + field_size, + count, + ) + + if is_local: + self._fixOffsets(count, field_size) + self.f.seek(cur_pos + fmt_size) + else: + self.f.seek(offset) + self._fixOffsets(count, field_size) + self.f.seek(cur_pos) + + elif is_local: + # skip the locally stored value that is not an offset + self.f.seek(fmt_size, os.SEEK_CUR) + + def _fixOffsets(self, count: int, field_size: int) -> None: + for i in range(count): + offset = self._read(field_size) + offset += self.offsetOfNewPage + + new_field_size = 0 + if self._bigtiff and field_size in (2, 4) and offset >= 2**32: + # offset is now too large - we must convert long to long8 + new_field_size = 8 + elif field_size == 2 and offset >= 2**16: + # offset is now too large - we must convert short to long + new_field_size = 4 + if new_field_size: + if count != 1: + msg = "not implemented" + raise RuntimeError(msg) # XXX TODO + + # simple case - the offset is just one and therefore it is + # local (not referenced with another offset) + self._rewriteLast(offset, field_size, new_field_size) + # Move back past the new offset, past 'count', and before 'field_type' + rewind = -new_field_size - 4 - 2 + self.f.seek(rewind, os.SEEK_CUR) + self.writeShort(new_field_size) # rewrite the type + self.f.seek(2 - rewind, os.SEEK_CUR) + else: + self._rewriteLast(offset, field_size) + + def fixOffsets( + self, count: int, isShort: bool = False, isLong: bool = False + ) -> None: + if isShort: + field_size = 2 + elif isLong: + field_size = 4 + else: + field_size = 0 + return self._fixOffsets(count, field_size) + + +def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + append_images = list(im.encoderinfo.get("append_images", [])) + if not hasattr(im, "n_frames") and not append_images: + return _save(im, fp, filename) + + cur_idx = im.tell() + try: + with AppendingTiffWriter(fp) as tf: + for ims in [im] + append_images: + encoderinfo = ims._attach_default_encoderinfo(im) + if not hasattr(ims, "encoderconfig"): + ims.encoderconfig = () + nfr = getattr(ims, "n_frames", 1) + + for idx in range(nfr): + ims.seek(idx) + ims.load() + _save(ims, tf, filename) + tf.newFrame() + ims.encoderinfo = encoderinfo + finally: + im.seek(cur_idx) + + +# +# -------------------------------------------------------------------- +# Register + +Image.register_open(TiffImageFile.format, TiffImageFile, _accept) +Image.register_save(TiffImageFile.format, _save) +Image.register_save_all(TiffImageFile.format, _save_all) + +Image.register_extensions(TiffImageFile.format, [".tif", ".tiff"]) + +Image.register_mime(TiffImageFile.format, "image/tiff") diff --git a/py311/lib/python3.11/site-packages/PIL/TiffTags.py b/py311/lib/python3.11/site-packages/PIL/TiffTags.py new file mode 100644 index 0000000000000000000000000000000000000000..86adaa45857338f9fa8864296077393a3dc3350f --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/TiffTags.py @@ -0,0 +1,562 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TIFF tags +# +# This module provides clear-text names for various well-known +# TIFF tags. the TIFF codec works just fine without it. +# +# Copyright (c) Secret Labs AB 1999. +# +# See the README file for information on usage and redistribution. +# + +## +# This module provides constants and clear-text names for various +# well-known TIFF tags. +## +from __future__ import annotations + +from typing import NamedTuple + + +class _TagInfo(NamedTuple): + value: int | None + name: str + type: int | None + length: int | None + enum: dict[str, int] + + +class TagInfo(_TagInfo): + __slots__: list[str] = [] + + def __new__( + cls, + value: int | None = None, + name: str = "unknown", + type: int | None = None, + length: int | None = None, + enum: dict[str, int] | None = None, + ) -> TagInfo: + return super().__new__(cls, value, name, type, length, enum or {}) + + def cvt_enum(self, value: str) -> int | str: + # Using get will call hash(value), which can be expensive + # for some types (e.g. Fraction). Since self.enum is rarely + # used, it's usually better to test it first. + return self.enum.get(value, value) if self.enum else value + + +def lookup(tag: int, group: int | None = None) -> TagInfo: + """ + :param tag: Integer tag number + :param group: Which :py:data:`~PIL.TiffTags.TAGS_V2_GROUPS` to look in + + .. versionadded:: 8.3.0 + + :returns: Taginfo namedtuple, From the ``TAGS_V2`` info if possible, + otherwise just populating the value and name from ``TAGS``. + If the tag is not recognized, "unknown" is returned for the name + + """ + + if group is not None: + info = TAGS_V2_GROUPS[group].get(tag) if group in TAGS_V2_GROUPS else None + else: + info = TAGS_V2.get(tag) + return info or TagInfo(tag, TAGS.get(tag, "unknown")) + + +## +# Map tag numbers to tag info. +# +# id: (Name, Type, Length[, enum_values]) +# +# The length here differs from the length in the tiff spec. For +# numbers, the tiff spec is for the number of fields returned. We +# agree here. For string-like types, the tiff spec uses the length of +# field in bytes. In Pillow, we are using the number of expected +# fields, in general 1 for string-like types. + + +BYTE = 1 +ASCII = 2 +SHORT = 3 +LONG = 4 +RATIONAL = 5 +SIGNED_BYTE = 6 +UNDEFINED = 7 +SIGNED_SHORT = 8 +SIGNED_LONG = 9 +SIGNED_RATIONAL = 10 +FLOAT = 11 +DOUBLE = 12 +IFD = 13 +LONG8 = 16 + +_tags_v2: dict[int, tuple[str, int, int] | tuple[str, int, int, dict[str, int]]] = { + 254: ("NewSubfileType", LONG, 1), + 255: ("SubfileType", SHORT, 1), + 256: ("ImageWidth", LONG, 1), + 257: ("ImageLength", LONG, 1), + 258: ("BitsPerSample", SHORT, 0), + 259: ( + "Compression", + SHORT, + 1, + { + "Uncompressed": 1, + "CCITT 1d": 2, + "Group 3 Fax": 3, + "Group 4 Fax": 4, + "LZW": 5, + "JPEG": 6, + "PackBits": 32773, + }, + ), + 262: ( + "PhotometricInterpretation", + SHORT, + 1, + { + "WhiteIsZero": 0, + "BlackIsZero": 1, + "RGB": 2, + "RGB Palette": 3, + "Transparency Mask": 4, + "CMYK": 5, + "YCbCr": 6, + "CieLAB": 8, + "CFA": 32803, # TIFF/EP, Adobe DNG + "LinearRaw": 32892, # Adobe DNG + }, + ), + 263: ("Threshholding", SHORT, 1), + 264: ("CellWidth", SHORT, 1), + 265: ("CellLength", SHORT, 1), + 266: ("FillOrder", SHORT, 1), + 269: ("DocumentName", ASCII, 1), + 270: ("ImageDescription", ASCII, 1), + 271: ("Make", ASCII, 1), + 272: ("Model", ASCII, 1), + 273: ("StripOffsets", LONG, 0), + 274: ("Orientation", SHORT, 1), + 277: ("SamplesPerPixel", SHORT, 1), + 278: ("RowsPerStrip", LONG, 1), + 279: ("StripByteCounts", LONG, 0), + 280: ("MinSampleValue", SHORT, 0), + 281: ("MaxSampleValue", SHORT, 0), + 282: ("XResolution", RATIONAL, 1), + 283: ("YResolution", RATIONAL, 1), + 284: ("PlanarConfiguration", SHORT, 1, {"Contiguous": 1, "Separate": 2}), + 285: ("PageName", ASCII, 1), + 286: ("XPosition", RATIONAL, 1), + 287: ("YPosition", RATIONAL, 1), + 288: ("FreeOffsets", LONG, 1), + 289: ("FreeByteCounts", LONG, 1), + 290: ("GrayResponseUnit", SHORT, 1), + 291: ("GrayResponseCurve", SHORT, 0), + 292: ("T4Options", LONG, 1), + 293: ("T6Options", LONG, 1), + 296: ("ResolutionUnit", SHORT, 1, {"none": 1, "inch": 2, "cm": 3}), + 297: ("PageNumber", SHORT, 2), + 301: ("TransferFunction", SHORT, 0), + 305: ("Software", ASCII, 1), + 306: ("DateTime", ASCII, 1), + 315: ("Artist", ASCII, 1), + 316: ("HostComputer", ASCII, 1), + 317: ("Predictor", SHORT, 1, {"none": 1, "Horizontal Differencing": 2}), + 318: ("WhitePoint", RATIONAL, 2), + 319: ("PrimaryChromaticities", RATIONAL, 6), + 320: ("ColorMap", SHORT, 0), + 321: ("HalftoneHints", SHORT, 2), + 322: ("TileWidth", LONG, 1), + 323: ("TileLength", LONG, 1), + 324: ("TileOffsets", LONG, 0), + 325: ("TileByteCounts", LONG, 0), + 330: ("SubIFDs", LONG, 0), + 332: ("InkSet", SHORT, 1), + 333: ("InkNames", ASCII, 1), + 334: ("NumberOfInks", SHORT, 1), + 336: ("DotRange", SHORT, 0), + 337: ("TargetPrinter", ASCII, 1), + 338: ("ExtraSamples", SHORT, 0), + 339: ("SampleFormat", SHORT, 0), + 340: ("SMinSampleValue", DOUBLE, 0), + 341: ("SMaxSampleValue", DOUBLE, 0), + 342: ("TransferRange", SHORT, 6), + 347: ("JPEGTables", UNDEFINED, 1), + # obsolete JPEG tags + 512: ("JPEGProc", SHORT, 1), + 513: ("JPEGInterchangeFormat", LONG, 1), + 514: ("JPEGInterchangeFormatLength", LONG, 1), + 515: ("JPEGRestartInterval", SHORT, 1), + 517: ("JPEGLosslessPredictors", SHORT, 0), + 518: ("JPEGPointTransforms", SHORT, 0), + 519: ("JPEGQTables", LONG, 0), + 520: ("JPEGDCTables", LONG, 0), + 521: ("JPEGACTables", LONG, 0), + 529: ("YCbCrCoefficients", RATIONAL, 3), + 530: ("YCbCrSubSampling", SHORT, 2), + 531: ("YCbCrPositioning", SHORT, 1), + 532: ("ReferenceBlackWhite", RATIONAL, 6), + 700: ("XMP", BYTE, 0), + 33432: ("Copyright", ASCII, 1), + 33723: ("IptcNaaInfo", UNDEFINED, 1), + 34377: ("PhotoshopInfo", BYTE, 0), + # FIXME add more tags here + 34665: ("ExifIFD", LONG, 1), + 34675: ("ICCProfile", UNDEFINED, 1), + 34853: ("GPSInfoIFD", LONG, 1), + 36864: ("ExifVersion", UNDEFINED, 1), + 37724: ("ImageSourceData", UNDEFINED, 1), + 40965: ("InteroperabilityIFD", LONG, 1), + 41730: ("CFAPattern", UNDEFINED, 1), + # MPInfo + 45056: ("MPFVersion", UNDEFINED, 1), + 45057: ("NumberOfImages", LONG, 1), + 45058: ("MPEntry", UNDEFINED, 1), + 45059: ("ImageUIDList", UNDEFINED, 0), # UNDONE, check + 45060: ("TotalFrames", LONG, 1), + 45313: ("MPIndividualNum", LONG, 1), + 45569: ("PanOrientation", LONG, 1), + 45570: ("PanOverlap_H", RATIONAL, 1), + 45571: ("PanOverlap_V", RATIONAL, 1), + 45572: ("BaseViewpointNum", LONG, 1), + 45573: ("ConvergenceAngle", SIGNED_RATIONAL, 1), + 45574: ("BaselineLength", RATIONAL, 1), + 45575: ("VerticalDivergence", SIGNED_RATIONAL, 1), + 45576: ("AxisDistance_X", SIGNED_RATIONAL, 1), + 45577: ("AxisDistance_Y", SIGNED_RATIONAL, 1), + 45578: ("AxisDistance_Z", SIGNED_RATIONAL, 1), + 45579: ("YawAngle", SIGNED_RATIONAL, 1), + 45580: ("PitchAngle", SIGNED_RATIONAL, 1), + 45581: ("RollAngle", SIGNED_RATIONAL, 1), + 40960: ("FlashPixVersion", UNDEFINED, 1), + 50741: ("MakerNoteSafety", SHORT, 1, {"Unsafe": 0, "Safe": 1}), + 50780: ("BestQualityScale", RATIONAL, 1), + 50838: ("ImageJMetaDataByteCounts", LONG, 0), # Can be more than one + 50839: ("ImageJMetaData", UNDEFINED, 1), # see Issue #2006 +} +_tags_v2_groups = { + # ExifIFD + 34665: { + 36864: ("ExifVersion", UNDEFINED, 1), + 40960: ("FlashPixVersion", UNDEFINED, 1), + 40965: ("InteroperabilityIFD", LONG, 1), + 41730: ("CFAPattern", UNDEFINED, 1), + }, + # GPSInfoIFD + 34853: { + 0: ("GPSVersionID", BYTE, 4), + 1: ("GPSLatitudeRef", ASCII, 2), + 2: ("GPSLatitude", RATIONAL, 3), + 3: ("GPSLongitudeRef", ASCII, 2), + 4: ("GPSLongitude", RATIONAL, 3), + 5: ("GPSAltitudeRef", BYTE, 1), + 6: ("GPSAltitude", RATIONAL, 1), + 7: ("GPSTimeStamp", RATIONAL, 3), + 8: ("GPSSatellites", ASCII, 0), + 9: ("GPSStatus", ASCII, 2), + 10: ("GPSMeasureMode", ASCII, 2), + 11: ("GPSDOP", RATIONAL, 1), + 12: ("GPSSpeedRef", ASCII, 2), + 13: ("GPSSpeed", RATIONAL, 1), + 14: ("GPSTrackRef", ASCII, 2), + 15: ("GPSTrack", RATIONAL, 1), + 16: ("GPSImgDirectionRef", ASCII, 2), + 17: ("GPSImgDirection", RATIONAL, 1), + 18: ("GPSMapDatum", ASCII, 0), + 19: ("GPSDestLatitudeRef", ASCII, 2), + 20: ("GPSDestLatitude", RATIONAL, 3), + 21: ("GPSDestLongitudeRef", ASCII, 2), + 22: ("GPSDestLongitude", RATIONAL, 3), + 23: ("GPSDestBearingRef", ASCII, 2), + 24: ("GPSDestBearing", RATIONAL, 1), + 25: ("GPSDestDistanceRef", ASCII, 2), + 26: ("GPSDestDistance", RATIONAL, 1), + 27: ("GPSProcessingMethod", UNDEFINED, 0), + 28: ("GPSAreaInformation", UNDEFINED, 0), + 29: ("GPSDateStamp", ASCII, 11), + 30: ("GPSDifferential", SHORT, 1), + }, + # InteroperabilityIFD + 40965: {1: ("InteropIndex", ASCII, 1), 2: ("InteropVersion", UNDEFINED, 1)}, +} + +# Legacy Tags structure +# these tags aren't included above, but were in the previous versions +TAGS: dict[int | tuple[int, int], str] = { + 347: "JPEGTables", + 700: "XMP", + # Additional Exif Info + 32932: "Wang Annotation", + 33434: "ExposureTime", + 33437: "FNumber", + 33445: "MD FileTag", + 33446: "MD ScalePixel", + 33447: "MD ColorTable", + 33448: "MD LabName", + 33449: "MD SampleInfo", + 33450: "MD PrepDate", + 33451: "MD PrepTime", + 33452: "MD FileUnits", + 33550: "ModelPixelScaleTag", + 33723: "IptcNaaInfo", + 33918: "INGR Packet Data Tag", + 33919: "INGR Flag Registers", + 33920: "IrasB Transformation Matrix", + 33922: "ModelTiepointTag", + 34264: "ModelTransformationTag", + 34377: "PhotoshopInfo", + 34735: "GeoKeyDirectoryTag", + 34736: "GeoDoubleParamsTag", + 34737: "GeoAsciiParamsTag", + 34850: "ExposureProgram", + 34852: "SpectralSensitivity", + 34855: "ISOSpeedRatings", + 34856: "OECF", + 34864: "SensitivityType", + 34865: "StandardOutputSensitivity", + 34866: "RecommendedExposureIndex", + 34867: "ISOSpeed", + 34868: "ISOSpeedLatitudeyyy", + 34869: "ISOSpeedLatitudezzz", + 34908: "HylaFAX FaxRecvParams", + 34909: "HylaFAX FaxSubAddress", + 34910: "HylaFAX FaxRecvTime", + 36864: "ExifVersion", + 36867: "DateTimeOriginal", + 36868: "DateTimeDigitized", + 37121: "ComponentsConfiguration", + 37122: "CompressedBitsPerPixel", + 37724: "ImageSourceData", + 37377: "ShutterSpeedValue", + 37378: "ApertureValue", + 37379: "BrightnessValue", + 37380: "ExposureBiasValue", + 37381: "MaxApertureValue", + 37382: "SubjectDistance", + 37383: "MeteringMode", + 37384: "LightSource", + 37385: "Flash", + 37386: "FocalLength", + 37396: "SubjectArea", + 37500: "MakerNote", + 37510: "UserComment", + 37520: "SubSec", + 37521: "SubSecTimeOriginal", + 37522: "SubsecTimeDigitized", + 40960: "FlashPixVersion", + 40961: "ColorSpace", + 40962: "PixelXDimension", + 40963: "PixelYDimension", + 40964: "RelatedSoundFile", + 40965: "InteroperabilityIFD", + 41483: "FlashEnergy", + 41484: "SpatialFrequencyResponse", + 41486: "FocalPlaneXResolution", + 41487: "FocalPlaneYResolution", + 41488: "FocalPlaneResolutionUnit", + 41492: "SubjectLocation", + 41493: "ExposureIndex", + 41495: "SensingMethod", + 41728: "FileSource", + 41729: "SceneType", + 41730: "CFAPattern", + 41985: "CustomRendered", + 41986: "ExposureMode", + 41987: "WhiteBalance", + 41988: "DigitalZoomRatio", + 41989: "FocalLengthIn35mmFilm", + 41990: "SceneCaptureType", + 41991: "GainControl", + 41992: "Contrast", + 41993: "Saturation", + 41994: "Sharpness", + 41995: "DeviceSettingDescription", + 41996: "SubjectDistanceRange", + 42016: "ImageUniqueID", + 42032: "CameraOwnerName", + 42033: "BodySerialNumber", + 42034: "LensSpecification", + 42035: "LensMake", + 42036: "LensModel", + 42037: "LensSerialNumber", + 42112: "GDAL_METADATA", + 42113: "GDAL_NODATA", + 42240: "Gamma", + 50215: "Oce Scanjob Description", + 50216: "Oce Application Selector", + 50217: "Oce Identification Number", + 50218: "Oce ImageLogic Characteristics", + # Adobe DNG + 50706: "DNGVersion", + 50707: "DNGBackwardVersion", + 50708: "UniqueCameraModel", + 50709: "LocalizedCameraModel", + 50710: "CFAPlaneColor", + 50711: "CFALayout", + 50712: "LinearizationTable", + 50713: "BlackLevelRepeatDim", + 50714: "BlackLevel", + 50715: "BlackLevelDeltaH", + 50716: "BlackLevelDeltaV", + 50717: "WhiteLevel", + 50718: "DefaultScale", + 50719: "DefaultCropOrigin", + 50720: "DefaultCropSize", + 50721: "ColorMatrix1", + 50722: "ColorMatrix2", + 50723: "CameraCalibration1", + 50724: "CameraCalibration2", + 50725: "ReductionMatrix1", + 50726: "ReductionMatrix2", + 50727: "AnalogBalance", + 50728: "AsShotNeutral", + 50729: "AsShotWhiteXY", + 50730: "BaselineExposure", + 50731: "BaselineNoise", + 50732: "BaselineSharpness", + 50733: "BayerGreenSplit", + 50734: "LinearResponseLimit", + 50735: "CameraSerialNumber", + 50736: "LensInfo", + 50737: "ChromaBlurRadius", + 50738: "AntiAliasStrength", + 50740: "DNGPrivateData", + 50778: "CalibrationIlluminant1", + 50779: "CalibrationIlluminant2", + 50784: "Alias Layer Metadata", +} + +TAGS_V2: dict[int, TagInfo] = {} +TAGS_V2_GROUPS: dict[int, dict[int, TagInfo]] = {} + + +def _populate() -> None: + for k, v in _tags_v2.items(): + # Populate legacy structure. + TAGS[k] = v[0] + if len(v) == 4: + for sk, sv in v[3].items(): + TAGS[(k, sv)] = sk + + TAGS_V2[k] = TagInfo(k, *v) + + for group, tags in _tags_v2_groups.items(): + TAGS_V2_GROUPS[group] = {k: TagInfo(k, *v) for k, v in tags.items()} + + +_populate() +## +# Map type numbers to type names -- defined in ImageFileDirectory. + +TYPES: dict[int, str] = {} + +# +# These tags are handled by default in libtiff, without +# adding to the custom dictionary. From tif_dir.c, searching for +# case TIFFTAG in the _TIFFVSetField function: +# Line: item. +# 148: case TIFFTAG_SUBFILETYPE: +# 151: case TIFFTAG_IMAGEWIDTH: +# 154: case TIFFTAG_IMAGELENGTH: +# 157: case TIFFTAG_BITSPERSAMPLE: +# 181: case TIFFTAG_COMPRESSION: +# 202: case TIFFTAG_PHOTOMETRIC: +# 205: case TIFFTAG_THRESHHOLDING: +# 208: case TIFFTAG_FILLORDER: +# 214: case TIFFTAG_ORIENTATION: +# 221: case TIFFTAG_SAMPLESPERPIXEL: +# 228: case TIFFTAG_ROWSPERSTRIP: +# 238: case TIFFTAG_MINSAMPLEVALUE: +# 241: case TIFFTAG_MAXSAMPLEVALUE: +# 244: case TIFFTAG_SMINSAMPLEVALUE: +# 247: case TIFFTAG_SMAXSAMPLEVALUE: +# 250: case TIFFTAG_XRESOLUTION: +# 256: case TIFFTAG_YRESOLUTION: +# 262: case TIFFTAG_PLANARCONFIG: +# 268: case TIFFTAG_XPOSITION: +# 271: case TIFFTAG_YPOSITION: +# 274: case TIFFTAG_RESOLUTIONUNIT: +# 280: case TIFFTAG_PAGENUMBER: +# 284: case TIFFTAG_HALFTONEHINTS: +# 288: case TIFFTAG_COLORMAP: +# 294: case TIFFTAG_EXTRASAMPLES: +# 298: case TIFFTAG_MATTEING: +# 305: case TIFFTAG_TILEWIDTH: +# 316: case TIFFTAG_TILELENGTH: +# 327: case TIFFTAG_TILEDEPTH: +# 333: case TIFFTAG_DATATYPE: +# 344: case TIFFTAG_SAMPLEFORMAT: +# 361: case TIFFTAG_IMAGEDEPTH: +# 364: case TIFFTAG_SUBIFD: +# 376: case TIFFTAG_YCBCRPOSITIONING: +# 379: case TIFFTAG_YCBCRSUBSAMPLING: +# 383: case TIFFTAG_TRANSFERFUNCTION: +# 389: case TIFFTAG_REFERENCEBLACKWHITE: +# 393: case TIFFTAG_INKNAMES: + +# Following pseudo-tags are also handled by default in libtiff: +# TIFFTAG_JPEGQUALITY 65537 + +# some of these are not in our TAGS_V2 dict and were included from tiff.h + +# This list also exists in encode.c +LIBTIFF_CORE = { + 255, + 256, + 257, + 258, + 259, + 262, + 263, + 266, + 274, + 277, + 278, + 280, + 281, + 340, + 341, + 282, + 283, + 284, + 286, + 287, + 296, + 297, + 321, + 320, + 338, + 32995, + 322, + 323, + 32998, + 32996, + 339, + 32997, + 330, + 531, + 530, + 301, + 532, + 333, + # as above + 269, # this has been in our tests forever, and works + 65537, +} + +LIBTIFF_CORE.remove(255) # We don't have support for subfiletypes +LIBTIFF_CORE.remove(322) # We don't have support for writing tiled images with libtiff +LIBTIFF_CORE.remove(323) # Tiled images +LIBTIFF_CORE.remove(333) # Ink Names either + +# Note to advanced users: There may be combinations of these +# parameters and values that when added properly, will work and +# produce valid tiff images that may work in your application. +# It is safe to add and remove tags from this set from Pillow's point +# of view so long as you test against libtiff. diff --git a/py311/lib/python3.11/site-packages/PIL/WalImageFile.py b/py311/lib/python3.11/site-packages/PIL/WalImageFile.py new file mode 100644 index 0000000000000000000000000000000000000000..87e32878b1970876913e88fd5e8480d6813392c4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/WalImageFile.py @@ -0,0 +1,127 @@ +# +# The Python Imaging Library. +# $Id$ +# +# WAL file handling +# +# History: +# 2003-04-23 fl created +# +# Copyright (c) 2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +""" +This reader is based on the specification available from: +https://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml +and has been tested with a few sample files found using google. + +.. note:: + This format cannot be automatically recognized, so the reader + is not registered for use with :py:func:`PIL.Image.open()`. + To open a WAL file, use the :py:func:`PIL.WalImageFile.open()` function instead. +""" +from __future__ import annotations + +from typing import IO + +from . import Image, ImageFile +from ._binary import i32le as i32 +from ._typing import StrOrBytesPath + + +class WalImageFile(ImageFile.ImageFile): + format = "WAL" + format_description = "Quake2 Texture" + + def _open(self) -> None: + self._mode = "P" + + # read header fields + header = self.fp.read(32 + 24 + 32 + 12) + self._size = i32(header, 32), i32(header, 36) + Image._decompression_bomb_check(self.size) + + # load pixel data + offset = i32(header, 40) + self.fp.seek(offset) + + # strings are null-terminated + self.info["name"] = header[:32].split(b"\0", 1)[0] + next_name = header[56 : 56 + 32].split(b"\0", 1)[0] + if next_name: + self.info["next_name"] = next_name + + def load(self) -> Image.core.PixelAccess | None: + if self._im is None: + self.im = Image.core.new(self.mode, self.size) + self.frombytes(self.fp.read(self.size[0] * self.size[1])) + self.putpalette(quake2palette) + return Image.Image.load(self) + + +def open(filename: StrOrBytesPath | IO[bytes]) -> WalImageFile: + """ + Load texture from a Quake2 WAL texture file. + + By default, a Quake2 standard palette is attached to the texture. + To override the palette, use the :py:func:`PIL.Image.Image.putpalette()` method. + + :param filename: WAL file name, or an opened file handle. + :returns: An image instance. + """ + return WalImageFile(filename) + + +quake2palette = ( + # default palette taken from piffo 0.93 by Hans Häggström + b"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e" + b"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f" + b"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c" + b"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b" + b"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10" + b"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07" + b"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f" + b"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16" + b"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d" + b"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31" + b"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28" + b"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07" + b"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27" + b"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b" + b"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01" + b"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21" + b"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14" + b"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07" + b"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14" + b"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f" + b"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34" + b"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d" + b"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14" + b"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01" + b"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24" + b"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10" + b"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01" + b"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27" + b"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c" + b"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a" + b"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26" + b"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d" + b"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01" + b"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20" + b"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17" + b"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07" + b"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25" + b"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c" + b"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01" + b"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23" + b"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f" + b"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b" + b"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37" + b"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b" + b"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01" + b"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10" + b"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b" + b"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20" +) diff --git a/py311/lib/python3.11/site-packages/PIL/WebPImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/WebPImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..1716a18ccda48ea47c6176c22914ab1f8b035e7c --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/WebPImagePlugin.py @@ -0,0 +1,320 @@ +from __future__ import annotations + +from io import BytesIO +from typing import IO, Any + +from . import Image, ImageFile + +try: + from . import _webp + + SUPPORTED = True +except ImportError: + SUPPORTED = False + + +_VP8_MODES_BY_IDENTIFIER = { + b"VP8 ": "RGB", + b"VP8X": "RGBA", + b"VP8L": "RGBA", # lossless +} + + +def _accept(prefix: bytes) -> bool | str: + is_riff_file_format = prefix.startswith(b"RIFF") + is_webp_file = prefix[8:12] == b"WEBP" + is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER + + if is_riff_file_format and is_webp_file and is_valid_vp8_mode: + if not SUPPORTED: + return ( + "image file could not be identified because WEBP support not installed" + ) + return True + return False + + +class WebPImageFile(ImageFile.ImageFile): + format = "WEBP" + format_description = "WebP image" + __loaded = 0 + __logical_frame = 0 + + def _open(self) -> None: + # Use the newer AnimDecoder API to parse the (possibly) animated file, + # and access muxed chunks like ICC/EXIF/XMP. + self._decoder = _webp.WebPAnimDecoder(self.fp.read()) + + # Get info from decoder + self._size, loop_count, bgcolor, frame_count, mode = self._decoder.get_info() + self.info["loop"] = loop_count + bg_a, bg_r, bg_g, bg_b = ( + (bgcolor >> 24) & 0xFF, + (bgcolor >> 16) & 0xFF, + (bgcolor >> 8) & 0xFF, + bgcolor & 0xFF, + ) + self.info["background"] = (bg_r, bg_g, bg_b, bg_a) + self.n_frames = frame_count + self.is_animated = self.n_frames > 1 + self._mode = "RGB" if mode == "RGBX" else mode + self.rawmode = mode + + # Attempt to read ICC / EXIF / XMP chunks from file + icc_profile = self._decoder.get_chunk("ICCP") + exif = self._decoder.get_chunk("EXIF") + xmp = self._decoder.get_chunk("XMP ") + if icc_profile: + self.info["icc_profile"] = icc_profile + if exif: + self.info["exif"] = exif + if xmp: + self.info["xmp"] = xmp + + # Initialize seek state + self._reset(reset=False) + + def _getexif(self) -> dict[int, Any] | None: + if "exif" not in self.info: + return None + return self.getexif()._get_merged_dict() + + def seek(self, frame: int) -> None: + if not self._seek_check(frame): + return + + # Set logical frame to requested position + self.__logical_frame = frame + + def _reset(self, reset: bool = True) -> None: + if reset: + self._decoder.reset() + self.__physical_frame = 0 + self.__loaded = -1 + self.__timestamp = 0 + + def _get_next(self) -> tuple[bytes, int, int]: + # Get next frame + ret = self._decoder.get_next() + self.__physical_frame += 1 + + # Check if an error occurred + if ret is None: + self._reset() # Reset just to be safe + self.seek(0) + msg = "failed to decode next frame in WebP file" + raise EOFError(msg) + + # Compute duration + data, timestamp = ret + duration = timestamp - self.__timestamp + self.__timestamp = timestamp + + # libwebp gives frame end, adjust to start of frame + timestamp -= duration + return data, timestamp, duration + + def _seek(self, frame: int) -> None: + if self.__physical_frame == frame: + return # Nothing to do + if frame < self.__physical_frame: + self._reset() # Rewind to beginning + while self.__physical_frame < frame: + self._get_next() # Advance to the requested frame + + def load(self) -> Image.core.PixelAccess | None: + if self.__loaded != self.__logical_frame: + self._seek(self.__logical_frame) + + # We need to load the image data for this frame + data, timestamp, duration = self._get_next() + self.info["timestamp"] = timestamp + self.info["duration"] = duration + self.__loaded = self.__logical_frame + + # Set tile + if self.fp and self._exclusive_fp: + self.fp.close() + self.fp = BytesIO(data) + self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 0, self.rawmode)] + + return super().load() + + def load_seek(self, pos: int) -> None: + pass + + def tell(self) -> int: + return self.__logical_frame + + +def _convert_frame(im: Image.Image) -> Image.Image: + # Make sure image mode is supported + if im.mode not in ("RGBX", "RGBA", "RGB"): + im = im.convert("RGBA" if im.has_transparency_data else "RGB") + return im + + +def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + encoderinfo = im.encoderinfo.copy() + append_images = list(encoderinfo.get("append_images", [])) + + # If total frame count is 1, then save using the legacy API, which + # will preserve non-alpha modes + total = 0 + for ims in [im] + append_images: + total += getattr(ims, "n_frames", 1) + if total == 1: + _save(im, fp, filename) + return + + background: int | tuple[int, ...] = (0, 0, 0, 0) + if "background" in encoderinfo: + background = encoderinfo["background"] + elif "background" in im.info: + background = im.info["background"] + if isinstance(background, int): + # GifImagePlugin stores a global color table index in + # info["background"]. So it must be converted to an RGBA value + palette = im.getpalette() + if palette: + r, g, b = palette[background * 3 : (background + 1) * 3] + background = (r, g, b, 255) + else: + background = (background, background, background, 255) + + duration = im.encoderinfo.get("duration", im.info.get("duration", 0)) + loop = im.encoderinfo.get("loop", 0) + minimize_size = im.encoderinfo.get("minimize_size", False) + kmin = im.encoderinfo.get("kmin", None) + kmax = im.encoderinfo.get("kmax", None) + allow_mixed = im.encoderinfo.get("allow_mixed", False) + verbose = False + lossless = im.encoderinfo.get("lossless", False) + quality = im.encoderinfo.get("quality", 80) + alpha_quality = im.encoderinfo.get("alpha_quality", 100) + method = im.encoderinfo.get("method", 0) + icc_profile = im.encoderinfo.get("icc_profile") or "" + exif = im.encoderinfo.get("exif", "") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + xmp = im.encoderinfo.get("xmp", "") + if allow_mixed: + lossless = False + + # Sensible keyframe defaults are from gif2webp.c script + if kmin is None: + kmin = 9 if lossless else 3 + if kmax is None: + kmax = 17 if lossless else 5 + + # Validate background color + if ( + not isinstance(background, (list, tuple)) + or len(background) != 4 + or not all(0 <= v < 256 for v in background) + ): + msg = f"Background color is not an RGBA tuple clamped to (0-255): {background}" + raise OSError(msg) + + # Convert to packed uint + bg_r, bg_g, bg_b, bg_a = background + background = (bg_a << 24) | (bg_r << 16) | (bg_g << 8) | (bg_b << 0) + + # Setup the WebP animation encoder + enc = _webp.WebPAnimEncoder( + im.size, + background, + loop, + minimize_size, + kmin, + kmax, + allow_mixed, + verbose, + ) + + # Add each frame + frame_idx = 0 + timestamp = 0 + cur_idx = im.tell() + try: + for ims in [im] + append_images: + # Get number of frames in this image + nfr = getattr(ims, "n_frames", 1) + + for idx in range(nfr): + ims.seek(idx) + + frame = _convert_frame(ims) + + # Append the frame to the animation encoder + enc.add( + frame.getim(), + round(timestamp), + lossless, + quality, + alpha_quality, + method, + ) + + # Update timestamp and frame index + if isinstance(duration, (list, tuple)): + timestamp += duration[frame_idx] + else: + timestamp += duration + frame_idx += 1 + + finally: + im.seek(cur_idx) + + # Force encoder to flush frames + enc.add(None, round(timestamp), lossless, quality, alpha_quality, 0) + + # Get the final output from the encoder + data = enc.assemble(icc_profile, exif, xmp) + if data is None: + msg = "cannot write file as WebP (encoder returned None)" + raise OSError(msg) + + fp.write(data) + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + lossless = im.encoderinfo.get("lossless", False) + quality = im.encoderinfo.get("quality", 80) + alpha_quality = im.encoderinfo.get("alpha_quality", 100) + icc_profile = im.encoderinfo.get("icc_profile") or "" + exif = im.encoderinfo.get("exif", b"") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + if exif.startswith(b"Exif\x00\x00"): + exif = exif[6:] + xmp = im.encoderinfo.get("xmp", "") + method = im.encoderinfo.get("method", 4) + exact = 1 if im.encoderinfo.get("exact") else 0 + + im = _convert_frame(im) + + data = _webp.WebPEncode( + im.getim(), + lossless, + float(quality), + float(alpha_quality), + icc_profile, + method, + exact, + exif, + xmp, + ) + if data is None: + msg = "cannot write file as WebP (encoder returned None)" + raise OSError(msg) + + fp.write(data) + + +Image.register_open(WebPImageFile.format, WebPImageFile, _accept) +if SUPPORTED: + Image.register_save(WebPImageFile.format, _save) + Image.register_save_all(WebPImageFile.format, _save_all) + Image.register_extension(WebPImageFile.format, ".webp") + Image.register_mime(WebPImageFile.format, "image/webp") diff --git a/py311/lib/python3.11/site-packages/PIL/WmfImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/WmfImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..d569cb4b819db35966c67fbe75518c719973ef59 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/WmfImagePlugin.py @@ -0,0 +1,186 @@ +# +# The Python Imaging Library +# $Id$ +# +# WMF stub codec +# +# history: +# 1996-12-14 fl Created +# 2004-02-22 fl Turned into a stub driver +# 2004-02-23 fl Added EMF support +# +# Copyright (c) Secret Labs AB 1997-2004. All rights reserved. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +# WMF/EMF reference documentation: +# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf +# http://wvware.sourceforge.net/caolan/index.html +# http://wvware.sourceforge.net/caolan/ora-wmf.html +from __future__ import annotations + +from typing import IO + +from . import Image, ImageFile +from ._binary import i16le as word +from ._binary import si16le as short +from ._binary import si32le as _long + +_handler = None + + +def register_handler(handler: ImageFile.StubHandler | None) -> None: + """ + Install application-specific WMF image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +if hasattr(Image.core, "drawwmf"): + # install default handler (windows only) + + class WmfHandler(ImageFile.StubHandler): + def open(self, im: ImageFile.StubImageFile) -> None: + im._mode = "RGB" + self.bbox = im.info["wmf_bbox"] + + def load(self, im: ImageFile.StubImageFile) -> Image.Image: + im.fp.seek(0) # rewind + return Image.frombytes( + "RGB", + im.size, + Image.core.drawwmf(im.fp.read(), im.size, self.bbox), + "raw", + "BGR", + (im.size[0] * 3 + 3) & -4, + -1, + ) + + register_handler(WmfHandler()) + +# +# -------------------------------------------------------------------- +# Read WMF file + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith((b"\xd7\xcd\xc6\x9a\x00\x00", b"\x01\x00\x00\x00")) + + +## +# Image plugin for Windows metafiles. + + +class WmfStubImageFile(ImageFile.StubImageFile): + format = "WMF" + format_description = "Windows Metafile" + + def _open(self) -> None: + # check placable header + s = self.fp.read(44) + + if s.startswith(b"\xd7\xcd\xc6\x9a\x00\x00"): + # placeable windows metafile + + # get units per inch + inch = word(s, 14) + if inch == 0: + msg = "Invalid inch" + raise ValueError(msg) + self._inch: tuple[float, float] = inch, inch + + # get bounding box + x0 = short(s, 6) + y0 = short(s, 8) + x1 = short(s, 10) + y1 = short(s, 12) + + # normalize size to 72 dots per inch + self.info["dpi"] = 72 + size = ( + (x1 - x0) * self.info["dpi"] // inch, + (y1 - y0) * self.info["dpi"] // inch, + ) + + self.info["wmf_bbox"] = x0, y0, x1, y1 + + # sanity check (standard metafile header) + if s[22:26] != b"\x01\x00\t\x00": + msg = "Unsupported WMF file format" + raise SyntaxError(msg) + + elif s.startswith(b"\x01\x00\x00\x00") and s[40:44] == b" EMF": + # enhanced metafile + + # get bounding box + x0 = _long(s, 8) + y0 = _long(s, 12) + x1 = _long(s, 16) + y1 = _long(s, 20) + + # get frame (in 0.01 millimeter units) + frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36) + + size = x1 - x0, y1 - y0 + + # calculate dots per inch from bbox and frame + xdpi = 2540.0 * (x1 - x0) / (frame[2] - frame[0]) + ydpi = 2540.0 * (y1 - y0) / (frame[3] - frame[1]) + + self.info["wmf_bbox"] = x0, y0, x1, y1 + + if xdpi == ydpi: + self.info["dpi"] = xdpi + else: + self.info["dpi"] = xdpi, ydpi + self._inch = xdpi, ydpi + + else: + msg = "Unsupported file format" + raise SyntaxError(msg) + + self._mode = "RGB" + self._size = size + + loader = self._load() + if loader: + loader.open(self) + + def _load(self) -> ImageFile.StubHandler | None: + return _handler + + def load( + self, dpi: float | tuple[float, float] | None = None + ) -> Image.core.PixelAccess | None: + if dpi is not None: + self.info["dpi"] = dpi + x0, y0, x1, y1 = self.info["wmf_bbox"] + if not isinstance(dpi, tuple): + dpi = dpi, dpi + self._size = ( + int((x1 - x0) * dpi[0] / self._inch[0]), + int((y1 - y0) * dpi[1] / self._inch[1]), + ) + return super().load() + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if _handler is None or not hasattr(_handler, "save"): + msg = "WMF save handler not installed" + raise OSError(msg) + _handler.save(im, fp, filename) + + +# +# -------------------------------------------------------------------- +# Registry stuff + + +Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept) +Image.register_save(WmfStubImageFile.format, _save) + +Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"]) diff --git a/py311/lib/python3.11/site-packages/PIL/XVThumbImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/XVThumbImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..cde28388ff0535262770dd0336ee2b48db2178ba --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/XVThumbImagePlugin.py @@ -0,0 +1,83 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XV Thumbnail file handler by Charles E. "Gene" Cash +# (gcash@magicnet.net) +# +# see xvcolor.c and xvbrowse.c in the sources to John Bradley's XV, +# available from ftp://ftp.cis.upenn.edu/pub/xv/ +# +# history: +# 98-08-15 cec created (b/w only) +# 98-12-09 cec added color palette +# 98-12-28 fl added to PIL (with only a few very minor modifications) +# +# To do: +# FIXME: make save work (this requires quantization support) +# +from __future__ import annotations + +from . import Image, ImageFile, ImagePalette +from ._binary import o8 + +_MAGIC = b"P7 332" + +# standard color palette for thumbnails (RGB332) +PALETTE = b"" +for r in range(8): + for g in range(8): + for b in range(4): + PALETTE = PALETTE + ( + o8((r * 255) // 7) + o8((g * 255) // 7) + o8((b * 255) // 3) + ) + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(_MAGIC) + + +## +# Image plugin for XV thumbnail images. + + +class XVThumbImageFile(ImageFile.ImageFile): + format = "XVThumb" + format_description = "XV thumbnail image" + + def _open(self) -> None: + # check magic + assert self.fp is not None + + if not _accept(self.fp.read(6)): + msg = "not an XV thumbnail file" + raise SyntaxError(msg) + + # Skip to beginning of next line + self.fp.readline() + + # skip info comments + while True: + s = self.fp.readline() + if not s: + msg = "Unexpected EOF reading XV thumbnail file" + raise SyntaxError(msg) + if s[0] != 35: # ie. when not a comment: '#' + break + + # parse header line (already read) + s = s.strip().split() + + self._mode = "P" + self._size = int(s[0]), int(s[1]) + + self.palette = ImagePalette.raw("RGB", PALETTE) + + self.tile = [ + ImageFile._Tile("raw", (0, 0) + self.size, self.fp.tell(), self.mode) + ] + + +# -------------------------------------------------------------------- + +Image.register_open(XVThumbImageFile.format, XVThumbImageFile, _accept) diff --git a/py311/lib/python3.11/site-packages/PIL/XbmImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/XbmImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..1e57aa162ea4f8618dac66cf042352f73d2199c8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/XbmImagePlugin.py @@ -0,0 +1,98 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XBM File handling +# +# History: +# 1995-09-08 fl Created +# 1996-11-01 fl Added save support +# 1997-07-07 fl Made header parser more tolerant +# 1997-07-22 fl Fixed yet another parser bug +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4) +# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog) +# 2004-02-24 fl Allow some whitespace before first #define +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import re +from typing import IO + +from . import Image, ImageFile + +# XBM header +xbm_head = re.compile( + rb"\s*#define[ \t]+.*_width[ \t]+(?P[0-9]+)[\r\n]+" + b"#define[ \t]+.*_height[ \t]+(?P[0-9]+)[\r\n]+" + b"(?P" + b"#define[ \t]+[^_]*_x_hot[ \t]+(?P[0-9]+)[\r\n]+" + b"#define[ \t]+[^_]*_y_hot[ \t]+(?P[0-9]+)[\r\n]+" + b")?" + rb"[\000-\377]*_bits\[]" +) + + +def _accept(prefix: bytes) -> bool: + return prefix.lstrip().startswith(b"#define") + + +## +# Image plugin for X11 bitmaps. + + +class XbmImageFile(ImageFile.ImageFile): + format = "XBM" + format_description = "X11 Bitmap" + + def _open(self) -> None: + assert self.fp is not None + + m = xbm_head.match(self.fp.read(512)) + + if not m: + msg = "not a XBM file" + raise SyntaxError(msg) + + xsize = int(m.group("width")) + ysize = int(m.group("height")) + + if m.group("hotspot"): + self.info["hotspot"] = (int(m.group("xhot")), int(m.group("yhot"))) + + self._mode = "1" + self._size = xsize, ysize + + self.tile = [ImageFile._Tile("xbm", (0, 0) + self.size, m.end())] + + +def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: + if im.mode != "1": + msg = f"cannot write mode {im.mode} as XBM" + raise OSError(msg) + + fp.write(f"#define im_width {im.size[0]}\n".encode("ascii")) + fp.write(f"#define im_height {im.size[1]}\n".encode("ascii")) + + hotspot = im.encoderinfo.get("hotspot") + if hotspot: + fp.write(f"#define im_x_hot {hotspot[0]}\n".encode("ascii")) + fp.write(f"#define im_y_hot {hotspot[1]}\n".encode("ascii")) + + fp.write(b"static char im_bits[] = {\n") + + ImageFile._save(im, fp, [ImageFile._Tile("xbm", (0, 0) + im.size)]) + + fp.write(b"};\n") + + +Image.register_open(XbmImageFile.format, XbmImageFile, _accept) +Image.register_save(XbmImageFile.format, _save) + +Image.register_extension(XbmImageFile.format, ".xbm") + +Image.register_mime(XbmImageFile.format, "image/xbm") diff --git a/py311/lib/python3.11/site-packages/PIL/XpmImagePlugin.py b/py311/lib/python3.11/site-packages/PIL/XpmImagePlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..3be240fbc1aeb7660de46fbd4f99f309ce9915dd --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/XpmImagePlugin.py @@ -0,0 +1,157 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XPM File handling +# +# History: +# 1996-12-29 fl Created +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7) +# +# Copyright (c) Secret Labs AB 1997-2001. +# Copyright (c) Fredrik Lundh 1996-2001. +# +# See the README file for information on usage and redistribution. +# +from __future__ import annotations + +import re + +from . import Image, ImageFile, ImagePalette +from ._binary import o8 + +# XPM header +xpm_head = re.compile(b'"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)') + + +def _accept(prefix: bytes) -> bool: + return prefix.startswith(b"/* XPM */") + + +## +# Image plugin for X11 pixel maps. + + +class XpmImageFile(ImageFile.ImageFile): + format = "XPM" + format_description = "X11 Pixel Map" + + def _open(self) -> None: + assert self.fp is not None + if not _accept(self.fp.read(9)): + msg = "not an XPM file" + raise SyntaxError(msg) + + # skip forward to next string + while True: + line = self.fp.readline() + if not line: + msg = "broken XPM file" + raise SyntaxError(msg) + m = xpm_head.match(line) + if m: + break + + self._size = int(m.group(1)), int(m.group(2)) + + palette_length = int(m.group(3)) + bpp = int(m.group(4)) + + # + # load palette description + + palette = {} + + for _ in range(palette_length): + line = self.fp.readline().rstrip() + + c = line[1 : bpp + 1] + s = line[bpp + 1 : -2].split() + + for i in range(0, len(s), 2): + if s[i] == b"c": + # process colour key + rgb = s[i + 1] + if rgb == b"None": + self.info["transparency"] = c + elif rgb.startswith(b"#"): + rgb_int = int(rgb[1:], 16) + palette[c] = ( + o8((rgb_int >> 16) & 255) + + o8((rgb_int >> 8) & 255) + + o8(rgb_int & 255) + ) + else: + # unknown colour + msg = "cannot read this XPM file" + raise ValueError(msg) + break + + else: + # missing colour key + msg = "cannot read this XPM file" + raise ValueError(msg) + + args: tuple[int, dict[bytes, bytes] | tuple[bytes, ...]] + if palette_length > 256: + self._mode = "RGB" + args = (bpp, palette) + else: + self._mode = "P" + self.palette = ImagePalette.raw("RGB", b"".join(palette.values())) + args = (bpp, tuple(palette.keys())) + + self.tile = [ImageFile._Tile("xpm", (0, 0) + self.size, self.fp.tell(), args)] + + def load_read(self, read_bytes: int) -> bytes: + # + # load all image data in one chunk + + xsize, ysize = self.size + + assert self.fp is not None + s = [self.fp.readline()[1 : xsize + 1].ljust(xsize) for i in range(ysize)] + + return b"".join(s) + + +class XpmDecoder(ImageFile.PyDecoder): + _pulls_fd = True + + def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]: + assert self.fd is not None + + data = bytearray() + bpp, palette = self.args + dest_length = self.state.xsize * self.state.ysize + if self.mode == "RGB": + dest_length *= 3 + pixel_header = False + while len(data) < dest_length: + line = self.fd.readline() + if not line: + break + if line.rstrip() == b"/* pixels */" and not pixel_header: + pixel_header = True + continue + line = b'"'.join(line.split(b'"')[1:-1]) + for i in range(0, len(line), bpp): + key = line[i : i + bpp] + if self.mode == "RGB": + data += palette[key] + else: + data += o8(palette.index(key)) + self.set_as_raw(bytes(data)) + return -1, 0 + + +# +# Registry + + +Image.register_open(XpmImageFile.format, XpmImageFile, _accept) +Image.register_decoder("xpm", XpmDecoder) + +Image.register_extension(XpmImageFile.format, ".xpm") + +Image.register_mime(XpmImageFile.format, "image/xpm") diff --git a/py311/lib/python3.11/site-packages/PIL/__init__.py b/py311/lib/python3.11/site-packages/PIL/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6e4c23f897f83ef72fc10070bd22e9dc70614cf9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/__init__.py @@ -0,0 +1,87 @@ +"""Pillow (Fork of the Python Imaging Library) + +Pillow is the friendly PIL fork by Jeffrey A. Clark and contributors. + https://github.com/python-pillow/Pillow/ + +Pillow is forked from PIL 1.1.7. + +PIL is the Python Imaging Library by Fredrik Lundh and contributors. +Copyright (c) 1999 by Secret Labs AB. + +Use PIL.__version__ for this Pillow version. + +;-) +""" + +from __future__ import annotations + +from . import _version + +# VERSION was removed in Pillow 6.0.0. +# PILLOW_VERSION was removed in Pillow 9.0.0. +# Use __version__ instead. +__version__ = _version.__version__ +del _version + + +_plugins = [ + "AvifImagePlugin", + "BlpImagePlugin", + "BmpImagePlugin", + "BufrStubImagePlugin", + "CurImagePlugin", + "DcxImagePlugin", + "DdsImagePlugin", + "EpsImagePlugin", + "FitsImagePlugin", + "FliImagePlugin", + "FpxImagePlugin", + "FtexImagePlugin", + "GbrImagePlugin", + "GifImagePlugin", + "GribStubImagePlugin", + "Hdf5StubImagePlugin", + "IcnsImagePlugin", + "IcoImagePlugin", + "ImImagePlugin", + "ImtImagePlugin", + "IptcImagePlugin", + "JpegImagePlugin", + "Jpeg2KImagePlugin", + "McIdasImagePlugin", + "MicImagePlugin", + "MpegImagePlugin", + "MpoImagePlugin", + "MspImagePlugin", + "PalmImagePlugin", + "PcdImagePlugin", + "PcxImagePlugin", + "PdfImagePlugin", + "PixarImagePlugin", + "PngImagePlugin", + "PpmImagePlugin", + "PsdImagePlugin", + "QoiImagePlugin", + "SgiImagePlugin", + "SpiderImagePlugin", + "SunImagePlugin", + "TgaImagePlugin", + "TiffImagePlugin", + "WebPImagePlugin", + "WmfImagePlugin", + "XbmImagePlugin", + "XpmImagePlugin", + "XVThumbImagePlugin", +] + + +class UnidentifiedImageError(OSError): + """ + Raised in :py:meth:`PIL.Image.open` if an image cannot be opened and identified. + + If a PNG image raises this error, setting :data:`.ImageFile.LOAD_TRUNCATED_IMAGES` + to true may allow the image to be opened after all. The setting will ignore missing + data and checksum failures. + """ + + pass diff --git a/py311/lib/python3.11/site-packages/PIL/__main__.py b/py311/lib/python3.11/site-packages/PIL/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..043156e892dadc4fb1222b33f5eda33251cd15aa --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/__main__.py @@ -0,0 +1,7 @@ +from __future__ import annotations + +import sys + +from .features import pilinfo + +pilinfo(supported_formats="--report" not in sys.argv) diff --git a/py311/lib/python3.11/site-packages/PIL/_avif.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/PIL/_avif.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3dd71fc6e22bde2f179734c1c53ba7d6b5411dd9 Binary files /dev/null and b/py311/lib/python3.11/site-packages/PIL/_avif.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/PIL/_avif.pyi b/py311/lib/python3.11/site-packages/PIL/_avif.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e27843e5338213713e26973127c738c14313ff98 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_avif.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/py311/lib/python3.11/site-packages/PIL/_binary.py b/py311/lib/python3.11/site-packages/PIL/_binary.py new file mode 100644 index 0000000000000000000000000000000000000000..4594ccce361168cf77e630cb88ffb09bb4362831 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_binary.py @@ -0,0 +1,112 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Binary input/output support routines. +# +# Copyright (c) 1997-2003 by Secret Labs AB +# Copyright (c) 1995-2003 by Fredrik Lundh +# Copyright (c) 2012 by Brian Crowell +# +# See the README file for information on usage and redistribution. +# + + +"""Binary input/output support routines.""" +from __future__ import annotations + +from struct import pack, unpack_from + + +def i8(c: bytes) -> int: + return c[0] + + +def o8(i: int) -> bytes: + return bytes((i & 255,)) + + +# Input, le = little endian, be = big endian +def i16le(c: bytes, o: int = 0) -> int: + """ + Converts a 2-bytes (16 bits) string to an unsigned integer. + + :param c: string containing bytes to convert + :param o: offset of bytes to convert in string + """ + return unpack_from(" int: + """ + Converts a 2-bytes (16 bits) string to a signed integer. + + :param c: string containing bytes to convert + :param o: offset of bytes to convert in string + """ + return unpack_from(" int: + """ + Converts a 2-bytes (16 bits) string to a signed integer, big endian. + + :param c: string containing bytes to convert + :param o: offset of bytes to convert in string + """ + return unpack_from(">h", c, o)[0] + + +def i32le(c: bytes, o: int = 0) -> int: + """ + Converts a 4-bytes (32 bits) string to an unsigned integer. + + :param c: string containing bytes to convert + :param o: offset of bytes to convert in string + """ + return unpack_from(" int: + """ + Converts a 4-bytes (32 bits) string to a signed integer. + + :param c: string containing bytes to convert + :param o: offset of bytes to convert in string + """ + return unpack_from(" int: + """ + Converts a 4-bytes (32 bits) string to a signed integer, big endian. + + :param c: string containing bytes to convert + :param o: offset of bytes to convert in string + """ + return unpack_from(">i", c, o)[0] + + +def i16be(c: bytes, o: int = 0) -> int: + return unpack_from(">H", c, o)[0] + + +def i32be(c: bytes, o: int = 0) -> int: + return unpack_from(">I", c, o)[0] + + +# Output, le = little endian, be = big endian +def o16le(i: int) -> bytes: + return pack(" bytes: + return pack(" bytes: + return pack(">H", i) + + +def o32be(i: int) -> bytes: + return pack(">I", i) diff --git a/py311/lib/python3.11/site-packages/PIL/_deprecate.py b/py311/lib/python3.11/site-packages/PIL/_deprecate.py new file mode 100644 index 0000000000000000000000000000000000000000..170d444904996cac753bf33deedcd13e442c9027 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_deprecate.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +import warnings + +from . import __version__ + + +def deprecate( + deprecated: str, + when: int | None, + replacement: str | None = None, + *, + action: str | None = None, + plural: bool = False, + stacklevel: int = 3, +) -> None: + """ + Deprecations helper. + + :param deprecated: Name of thing to be deprecated. + :param when: Pillow major version to be removed in. + :param replacement: Name of replacement. + :param action: Instead of "replacement", give a custom call to action + e.g. "Upgrade to new thing". + :param plural: if the deprecated thing is plural, needing "are" instead of "is". + + Usually of the form: + + "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd). + Use [replacement] instead." + + You can leave out the replacement sentence: + + "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd)" + + Or with another call to action: + + "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd). + [action]." + """ + + is_ = "are" if plural else "is" + + if when is None: + removed = "a future version" + elif when <= int(__version__.split(".")[0]): + msg = f"{deprecated} {is_} deprecated and should be removed." + raise RuntimeError(msg) + elif when == 12: + removed = "Pillow 12 (2025-10-15)" + elif when == 13: + removed = "Pillow 13 (2026-10-15)" + else: + msg = f"Unknown removal version: {when}. Update {__name__}?" + raise ValueError(msg) + + if replacement and action: + msg = "Use only one of 'replacement' and 'action'" + raise ValueError(msg) + + if replacement: + action = f". Use {replacement} instead." + elif action: + action = f". {action.rstrip('.')}." + else: + action = "" + + warnings.warn( + f"{deprecated} {is_} deprecated and will be removed in {removed}{action}", + DeprecationWarning, + stacklevel=stacklevel, + ) diff --git a/py311/lib/python3.11/site-packages/PIL/_imaging.pyi b/py311/lib/python3.11/site-packages/PIL/_imaging.pyi new file mode 100644 index 0000000000000000000000000000000000000000..998bc52eb8a73b5ee5868cd2c8e5c87c4e6d3037 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_imaging.pyi @@ -0,0 +1,31 @@ +from typing import Any + +class ImagingCore: + def __getitem__(self, index: int) -> float: ... + def __getattr__(self, name: str) -> Any: ... + +class ImagingFont: + def __getattr__(self, name: str) -> Any: ... + +class ImagingDraw: + def __getattr__(self, name: str) -> Any: ... + +class PixelAccess: + def __getitem__(self, xy: tuple[int, int]) -> float | tuple[int, ...]: ... + def __setitem__( + self, xy: tuple[int, int], color: float | tuple[int, ...] + ) -> None: ... + +class ImagingDecoder: + def __getattr__(self, name: str) -> Any: ... + +class ImagingEncoder: + def __getattr__(self, name: str) -> Any: ... + +class _Outline: + def close(self) -> None: ... + def __getattr__(self, name: str) -> Any: ... + +def font(image: ImagingCore, glyphdata: bytes) -> ImagingFont: ... +def outline() -> _Outline: ... +def __getattr__(name: str) -> Any: ... diff --git a/py311/lib/python3.11/site-packages/PIL/_imagingcms.pyi b/py311/lib/python3.11/site-packages/PIL/_imagingcms.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ddcf93ab1ebd51947f900ad2dba1aee9392338bb --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_imagingcms.pyi @@ -0,0 +1,143 @@ +import datetime +import sys +from typing import Literal, SupportsFloat, TypedDict + +from ._typing import CapsuleType + +littlecms_version: str | None + +_Tuple3f = tuple[float, float, float] +_Tuple2x3f = tuple[_Tuple3f, _Tuple3f] +_Tuple3x3f = tuple[_Tuple3f, _Tuple3f, _Tuple3f] + +class _IccMeasurementCondition(TypedDict): + observer: int + backing: _Tuple3f + geo: str + flare: float + illuminant_type: str + +class _IccViewingCondition(TypedDict): + illuminant: _Tuple3f + surround: _Tuple3f + illuminant_type: str + +class CmsProfile: + @property + def rendering_intent(self) -> int: ... + @property + def creation_date(self) -> datetime.datetime | None: ... + @property + def copyright(self) -> str | None: ... + @property + def target(self) -> str | None: ... + @property + def manufacturer(self) -> str | None: ... + @property + def model(self) -> str | None: ... + @property + def profile_description(self) -> str | None: ... + @property + def screening_description(self) -> str | None: ... + @property + def viewing_condition(self) -> str | None: ... + @property + def version(self) -> float: ... + @property + def icc_version(self) -> int: ... + @property + def attributes(self) -> int: ... + @property + def header_flags(self) -> int: ... + @property + def header_manufacturer(self) -> str: ... + @property + def header_model(self) -> str: ... + @property + def device_class(self) -> str: ... + @property + def connection_space(self) -> str: ... + @property + def xcolor_space(self) -> str: ... + @property + def profile_id(self) -> bytes: ... + @property + def is_matrix_shaper(self) -> bool: ... + @property + def technology(self) -> str | None: ... + @property + def colorimetric_intent(self) -> str | None: ... + @property + def perceptual_rendering_intent_gamut(self) -> str | None: ... + @property + def saturation_rendering_intent_gamut(self) -> str | None: ... + @property + def red_colorant(self) -> _Tuple2x3f | None: ... + @property + def green_colorant(self) -> _Tuple2x3f | None: ... + @property + def blue_colorant(self) -> _Tuple2x3f | None: ... + @property + def red_primary(self) -> _Tuple2x3f | None: ... + @property + def green_primary(self) -> _Tuple2x3f | None: ... + @property + def blue_primary(self) -> _Tuple2x3f | None: ... + @property + def media_white_point_temperature(self) -> float | None: ... + @property + def media_white_point(self) -> _Tuple2x3f | None: ... + @property + def media_black_point(self) -> _Tuple2x3f | None: ... + @property + def luminance(self) -> _Tuple2x3f | None: ... + @property + def chromatic_adaptation(self) -> tuple[_Tuple3x3f, _Tuple3x3f] | None: ... + @property + def chromaticity(self) -> _Tuple3x3f | None: ... + @property + def colorant_table(self) -> list[str] | None: ... + @property + def colorant_table_out(self) -> list[str] | None: ... + @property + def intent_supported(self) -> dict[int, tuple[bool, bool, bool]] | None: ... + @property + def clut(self) -> dict[int, tuple[bool, bool, bool]] | None: ... + @property + def icc_measurement_condition(self) -> _IccMeasurementCondition | None: ... + @property + def icc_viewing_condition(self) -> _IccViewingCondition | None: ... + def is_intent_supported(self, intent: int, direction: int, /) -> int: ... + +class CmsTransform: + def apply(self, id_in: CapsuleType, id_out: CapsuleType) -> int: ... + +def profile_open(profile: str, /) -> CmsProfile: ... +def profile_frombytes(profile: bytes, /) -> CmsProfile: ... +def profile_tobytes(profile: CmsProfile, /) -> bytes: ... +def buildTransform( + input_profile: CmsProfile, + output_profile: CmsProfile, + in_mode: str, + out_mode: str, + rendering_intent: int = 0, + cms_flags: int = 0, + /, +) -> CmsTransform: ... +def buildProofTransform( + input_profile: CmsProfile, + output_profile: CmsProfile, + proof_profile: CmsProfile, + in_mode: str, + out_mode: str, + rendering_intent: int = 0, + proof_intent: int = 0, + cms_flags: int = 0, + /, +) -> CmsTransform: ... +def createProfile( + color_space: Literal["LAB", "XYZ", "sRGB"], color_temp: SupportsFloat = 0.0, / +) -> CmsProfile: ... + +if sys.platform == "win32": + def get_display_profile_win32(handle: int = 0, is_dc: int = 0, /) -> str | None: ... diff --git a/py311/lib/python3.11/site-packages/PIL/_imagingft.pyi b/py311/lib/python3.11/site-packages/PIL/_imagingft.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1cb1429d6cf6f29432fbd7f56d3aa0e45f0722f3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_imagingft.pyi @@ -0,0 +1,69 @@ +from typing import Any, Callable + +from . import ImageFont, _imaging + +class Font: + @property + def family(self) -> str | None: ... + @property + def style(self) -> str | None: ... + @property + def ascent(self) -> int: ... + @property + def descent(self) -> int: ... + @property + def height(self) -> int: ... + @property + def x_ppem(self) -> int: ... + @property + def y_ppem(self) -> int: ... + @property + def glyphs(self) -> int: ... + def render( + self, + string: str | bytes, + fill: Callable[[int, int], _imaging.ImagingCore], + mode: str, + dir: str | None, + features: list[str] | None, + lang: str | None, + stroke_width: float, + stroke_filled: bool, + anchor: str | None, + foreground_ink_long: int, + start: tuple[float, float], + /, + ) -> tuple[_imaging.ImagingCore, tuple[int, int]]: ... + def getsize( + self, + string: str | bytes | bytearray, + mode: str, + dir: str | None, + features: list[str] | None, + lang: str | None, + anchor: str | None, + /, + ) -> tuple[tuple[int, int], tuple[int, int]]: ... + def getlength( + self, + string: str | bytes, + mode: str, + dir: str | None, + features: list[str] | None, + lang: str | None, + /, + ) -> float: ... + def getvarnames(self) -> list[bytes]: ... + def getvaraxes(self) -> list[ImageFont.Axis]: ... + def setvarname(self, instance_index: int, /) -> None: ... + def setvaraxes(self, axes: list[float], /) -> None: ... + +def getfont( + filename: str | bytes, + size: float, + index: int, + encoding: str, + font_bytes: bytes, + layout_engine: int, +) -> Font: ... +def __getattr__(name: str) -> Any: ... diff --git a/py311/lib/python3.11/site-packages/PIL/_imagingmath.pyi b/py311/lib/python3.11/site-packages/PIL/_imagingmath.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e27843e5338213713e26973127c738c14313ff98 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_imagingmath.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/py311/lib/python3.11/site-packages/PIL/_imagingmorph.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/PIL/_imagingmorph.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..62468175deea82a45f123622e77227628748adb0 Binary files /dev/null and b/py311/lib/python3.11/site-packages/PIL/_imagingmorph.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/PIL/_imagingmorph.pyi b/py311/lib/python3.11/site-packages/PIL/_imagingmorph.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e27843e5338213713e26973127c738c14313ff98 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_imagingmorph.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/py311/lib/python3.11/site-packages/PIL/_imagingtk.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/PIL/_imagingtk.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..36df39362e08233d254f9e2859f08aabd36113ae Binary files /dev/null and b/py311/lib/python3.11/site-packages/PIL/_imagingtk.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/PIL/_imagingtk.pyi b/py311/lib/python3.11/site-packages/PIL/_imagingtk.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e27843e5338213713e26973127c738c14313ff98 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_imagingtk.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/py311/lib/python3.11/site-packages/PIL/_tkinter_finder.py b/py311/lib/python3.11/site-packages/PIL/_tkinter_finder.py new file mode 100644 index 0000000000000000000000000000000000000000..9c0143003a7320dd475cfcd168168b82e4f64964 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_tkinter_finder.py @@ -0,0 +1,20 @@ +"""Find compiled module linking to Tcl / Tk libraries""" + +from __future__ import annotations + +import sys +import tkinter + +tk = getattr(tkinter, "_tkinter") + +try: + if hasattr(sys, "pypy_find_executable"): + TKINTER_LIB = tk.tklib_cffi.__file__ + else: + TKINTER_LIB = tk.__file__ +except AttributeError: + # _tkinter may be compiled directly into Python, in which case __file__ is + # not available. load_tkinter_funcs will check the binary first in any case. + TKINTER_LIB = None + +tk_version = str(tkinter.TkVersion) diff --git a/py311/lib/python3.11/site-packages/PIL/_typing.py b/py311/lib/python3.11/site-packages/PIL/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..373938e71e0331ed5d9ce2517dcf5f5098ffca72 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_typing.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +import os +import sys +from collections.abc import Sequence +from typing import Any, Protocol, TypeVar, Union + +TYPE_CHECKING = False +if TYPE_CHECKING: + from numbers import _IntegralLike as IntegralLike + + try: + import numpy.typing as npt + + NumpyArray = npt.NDArray[Any] # requires numpy>=1.21 + except (ImportError, AttributeError): + pass + +if sys.version_info >= (3, 13): + from types import CapsuleType +else: + CapsuleType = object + +if sys.version_info >= (3, 12): + from collections.abc import Buffer +else: + Buffer = Any + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + try: + from typing_extensions import TypeGuard + except ImportError: + + class TypeGuard: # type: ignore[no-redef] + def __class_getitem__(cls, item: Any) -> type[bool]: + return bool + + +Coords = Union[Sequence[float], Sequence[Sequence[float]]] + + +_T_co = TypeVar("_T_co", covariant=True) + + +class SupportsRead(Protocol[_T_co]): + def read(self, length: int = ..., /) -> _T_co: ... + + +StrOrBytesPath = Union[str, bytes, os.PathLike[str], os.PathLike[bytes]] + + +__all__ = ["Buffer", "IntegralLike", "StrOrBytesPath", "SupportsRead", "TypeGuard"] diff --git a/py311/lib/python3.11/site-packages/PIL/_util.py b/py311/lib/python3.11/site-packages/PIL/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..8ef0d36f7545a85e57829e036818bc4fa2eb72c7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_util.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +import os +from typing import Any, NoReturn + +from ._typing import StrOrBytesPath, TypeGuard + + +def is_path(f: Any) -> TypeGuard[StrOrBytesPath]: + return isinstance(f, (bytes, str, os.PathLike)) + + +class DeferredError: + def __init__(self, ex: BaseException): + self.ex = ex + + def __getattr__(self, elt: str) -> NoReturn: + raise self.ex + + @staticmethod + def new(ex: BaseException) -> Any: + """ + Creates an object that raises the wrapped exception ``ex`` when used, + and casts it to :py:obj:`~typing.Any` type. + """ + return DeferredError(ex) diff --git a/py311/lib/python3.11/site-packages/PIL/_version.py b/py311/lib/python3.11/site-packages/PIL/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..74e63356c95519ff405d0fdd29ce997d3dd5c8f5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_version.py @@ -0,0 +1,4 @@ +# Master version for Pillow +from __future__ import annotations + +__version__ = "11.3.0" diff --git a/py311/lib/python3.11/site-packages/PIL/_webp.cpython-311-x86_64-linux-gnu.so b/py311/lib/python3.11/site-packages/PIL/_webp.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5a0d1518fcb089c70ead4bb20afe03db439ecfa6 Binary files /dev/null and b/py311/lib/python3.11/site-packages/PIL/_webp.cpython-311-x86_64-linux-gnu.so differ diff --git a/py311/lib/python3.11/site-packages/PIL/_webp.pyi b/py311/lib/python3.11/site-packages/PIL/_webp.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e27843e5338213713e26973127c738c14313ff98 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/_webp.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/py311/lib/python3.11/site-packages/PIL/features.py b/py311/lib/python3.11/site-packages/PIL/features.py new file mode 100644 index 0000000000000000000000000000000000000000..573f1d41256012ccf1a021055222c1f5c3c23339 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/features.py @@ -0,0 +1,361 @@ +from __future__ import annotations + +import collections +import os +import sys +import warnings +from typing import IO + +import PIL + +from . import Image +from ._deprecate import deprecate + +modules = { + "pil": ("PIL._imaging", "PILLOW_VERSION"), + "tkinter": ("PIL._tkinter_finder", "tk_version"), + "freetype2": ("PIL._imagingft", "freetype2_version"), + "littlecms2": ("PIL._imagingcms", "littlecms_version"), + "webp": ("PIL._webp", "webpdecoder_version"), + "avif": ("PIL._avif", "libavif_version"), +} + + +def check_module(feature: str) -> bool: + """ + Checks if a module is available. + + :param feature: The module to check for. + :returns: ``True`` if available, ``False`` otherwise. + :raises ValueError: If the module is not defined in this version of Pillow. + """ + if feature not in modules: + msg = f"Unknown module {feature}" + raise ValueError(msg) + + module, ver = modules[feature] + + try: + __import__(module) + return True + except ModuleNotFoundError: + return False + except ImportError as ex: + warnings.warn(str(ex)) + return False + + +def version_module(feature: str) -> str | None: + """ + :param feature: The module to check for. + :returns: + The loaded version number as a string, or ``None`` if unknown or not available. + :raises ValueError: If the module is not defined in this version of Pillow. + """ + if not check_module(feature): + return None + + module, ver = modules[feature] + + return getattr(__import__(module, fromlist=[ver]), ver) + + +def get_supported_modules() -> list[str]: + """ + :returns: A list of all supported modules. + """ + return [f for f in modules if check_module(f)] + + +codecs = { + "jpg": ("jpeg", "jpeglib"), + "jpg_2000": ("jpeg2k", "jp2klib"), + "zlib": ("zip", "zlib"), + "libtiff": ("libtiff", "libtiff"), +} + + +def check_codec(feature: str) -> bool: + """ + Checks if a codec is available. + + :param feature: The codec to check for. + :returns: ``True`` if available, ``False`` otherwise. + :raises ValueError: If the codec is not defined in this version of Pillow. + """ + if feature not in codecs: + msg = f"Unknown codec {feature}" + raise ValueError(msg) + + codec, lib = codecs[feature] + + return f"{codec}_encoder" in dir(Image.core) + + +def version_codec(feature: str) -> str | None: + """ + :param feature: The codec to check for. + :returns: + The version number as a string, or ``None`` if not available. + Checked at compile time for ``jpg``, run-time otherwise. + :raises ValueError: If the codec is not defined in this version of Pillow. + """ + if not check_codec(feature): + return None + + codec, lib = codecs[feature] + + version = getattr(Image.core, f"{lib}_version") + + if feature == "libtiff": + return version.split("\n")[0].split("Version ")[1] + + return version + + +def get_supported_codecs() -> list[str]: + """ + :returns: A list of all supported codecs. + """ + return [f for f in codecs if check_codec(f)] + + +features: dict[str, tuple[str, str | bool, str | None]] = { + "webp_anim": ("PIL._webp", True, None), + "webp_mux": ("PIL._webp", True, None), + "transp_webp": ("PIL._webp", True, None), + "raqm": ("PIL._imagingft", "HAVE_RAQM", "raqm_version"), + "fribidi": ("PIL._imagingft", "HAVE_FRIBIDI", "fribidi_version"), + "harfbuzz": ("PIL._imagingft", "HAVE_HARFBUZZ", "harfbuzz_version"), + "libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO", "libjpeg_turbo_version"), + "mozjpeg": ("PIL._imaging", "HAVE_MOZJPEG", "libjpeg_turbo_version"), + "zlib_ng": ("PIL._imaging", "HAVE_ZLIBNG", "zlib_ng_version"), + "libimagequant": ("PIL._imaging", "HAVE_LIBIMAGEQUANT", "imagequant_version"), + "xcb": ("PIL._imaging", "HAVE_XCB", None), +} + + +def check_feature(feature: str) -> bool | None: + """ + Checks if a feature is available. + + :param feature: The feature to check for. + :returns: ``True`` if available, ``False`` if unavailable, ``None`` if unknown. + :raises ValueError: If the feature is not defined in this version of Pillow. + """ + if feature not in features: + msg = f"Unknown feature {feature}" + raise ValueError(msg) + + module, flag, ver = features[feature] + + if isinstance(flag, bool): + deprecate(f'check_feature("{feature}")', 12) + try: + imported_module = __import__(module, fromlist=["PIL"]) + if isinstance(flag, bool): + return flag + return getattr(imported_module, flag) + except ModuleNotFoundError: + return None + except ImportError as ex: + warnings.warn(str(ex)) + return None + + +def version_feature(feature: str) -> str | None: + """ + :param feature: The feature to check for. + :returns: The version number as a string, or ``None`` if not available. + :raises ValueError: If the feature is not defined in this version of Pillow. + """ + if not check_feature(feature): + return None + + module, flag, ver = features[feature] + + if ver is None: + return None + + return getattr(__import__(module, fromlist=[ver]), ver) + + +def get_supported_features() -> list[str]: + """ + :returns: A list of all supported features. + """ + supported_features = [] + for f, (module, flag, _) in features.items(): + if flag is True: + for feature, (feature_module, _) in modules.items(): + if feature_module == module: + if check_module(feature): + supported_features.append(f) + break + elif check_feature(f): + supported_features.append(f) + return supported_features + + +def check(feature: str) -> bool | None: + """ + :param feature: A module, codec, or feature name. + :returns: + ``True`` if the module, codec, or feature is available, + ``False`` or ``None`` otherwise. + """ + + if feature in modules: + return check_module(feature) + if feature in codecs: + return check_codec(feature) + if feature in features: + return check_feature(feature) + warnings.warn(f"Unknown feature '{feature}'.", stacklevel=2) + return False + + +def version(feature: str) -> str | None: + """ + :param feature: + The module, codec, or feature to check for. + :returns: + The version number as a string, or ``None`` if unknown or not available. + """ + if feature in modules: + return version_module(feature) + if feature in codecs: + return version_codec(feature) + if feature in features: + return version_feature(feature) + return None + + +def get_supported() -> list[str]: + """ + :returns: A list of all supported modules, features, and codecs. + """ + + ret = get_supported_modules() + ret.extend(get_supported_features()) + ret.extend(get_supported_codecs()) + return ret + + +def pilinfo(out: IO[str] | None = None, supported_formats: bool = True) -> None: + """ + Prints information about this installation of Pillow. + This function can be called with ``python3 -m PIL``. + It can also be called with ``python3 -m PIL.report`` or ``python3 -m PIL --report`` + to have "supported_formats" set to ``False``, omitting the list of all supported + image file formats. + + :param out: + The output stream to print to. Defaults to ``sys.stdout`` if ``None``. + :param supported_formats: + If ``True``, a list of all supported image file formats will be printed. + """ + + if out is None: + out = sys.stdout + + Image.init() + + print("-" * 68, file=out) + print(f"Pillow {PIL.__version__}", file=out) + py_version_lines = sys.version.splitlines() + print(f"Python {py_version_lines[0].strip()}", file=out) + for py_version in py_version_lines[1:]: + print(f" {py_version.strip()}", file=out) + print("-" * 68, file=out) + print(f"Python executable is {sys.executable or 'unknown'}", file=out) + if sys.prefix != sys.base_prefix: + print(f"Environment Python files loaded from {sys.prefix}", file=out) + print(f"System Python files loaded from {sys.base_prefix}", file=out) + print("-" * 68, file=out) + print( + f"Python Pillow modules loaded from {os.path.dirname(Image.__file__)}", + file=out, + ) + print( + f"Binary Pillow modules loaded from {os.path.dirname(Image.core.__file__)}", + file=out, + ) + print("-" * 68, file=out) + + for name, feature in [ + ("pil", "PIL CORE"), + ("tkinter", "TKINTER"), + ("freetype2", "FREETYPE2"), + ("littlecms2", "LITTLECMS2"), + ("webp", "WEBP"), + ("avif", "AVIF"), + ("jpg", "JPEG"), + ("jpg_2000", "OPENJPEG (JPEG2000)"), + ("zlib", "ZLIB (PNG/ZIP)"), + ("libtiff", "LIBTIFF"), + ("raqm", "RAQM (Bidirectional Text)"), + ("libimagequant", "LIBIMAGEQUANT (Quantization method)"), + ("xcb", "XCB (X protocol)"), + ]: + if check(name): + v: str | None = None + if name == "jpg": + libjpeg_turbo_version = version_feature("libjpeg_turbo") + if libjpeg_turbo_version is not None: + v = "mozjpeg" if check_feature("mozjpeg") else "libjpeg-turbo" + v += " " + libjpeg_turbo_version + if v is None: + v = version(name) + if v is not None: + version_static = name in ("pil", "jpg") + if name == "littlecms2": + # this check is also in src/_imagingcms.c:setup_module() + version_static = tuple(int(x) for x in v.split(".")) < (2, 7) + t = "compiled for" if version_static else "loaded" + if name == "zlib": + zlib_ng_version = version_feature("zlib_ng") + if zlib_ng_version is not None: + v += ", compiled for zlib-ng " + zlib_ng_version + elif name == "raqm": + for f in ("fribidi", "harfbuzz"): + v2 = version_feature(f) + if v2 is not None: + v += f", {f} {v2}" + print("---", feature, "support ok,", t, v, file=out) + else: + print("---", feature, "support ok", file=out) + else: + print("***", feature, "support not installed", file=out) + print("-" * 68, file=out) + + if supported_formats: + extensions = collections.defaultdict(list) + for ext, i in Image.EXTENSION.items(): + extensions[i].append(ext) + + for i in sorted(Image.ID): + line = f"{i}" + if i in Image.MIME: + line = f"{line} {Image.MIME[i]}" + print(line, file=out) + + if i in extensions: + print( + "Extensions: {}".format(", ".join(sorted(extensions[i]))), file=out + ) + + features = [] + if i in Image.OPEN: + features.append("open") + if i in Image.SAVE: + features.append("save") + if i in Image.SAVE_ALL: + features.append("save_all") + if i in Image.DECODERS: + features.append("decode") + if i in Image.ENCODERS: + features.append("encode") + + print("Features: {}".format(", ".join(features)), file=out) + print("-" * 68, file=out) diff --git a/py311/lib/python3.11/site-packages/PIL/py.typed b/py311/lib/python3.11/site-packages/PIL/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/PIL/report.py b/py311/lib/python3.11/site-packages/PIL/report.py new file mode 100644 index 0000000000000000000000000000000000000000..d2815e8455e2ead803de4417314987ce7e9b7598 --- /dev/null +++ b/py311/lib/python3.11/site-packages/PIL/report.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from .features import pilinfo + +pilinfo(supported_formats=False) diff --git a/py311/lib/python3.11/site-packages/aiohttp/__init__.py b/py311/lib/python3.11/site-packages/aiohttp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..357baf019de77a08af52b18334cffcc9f078390a --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/__init__.py @@ -0,0 +1,278 @@ +__version__ = "3.13.3" + +from typing import TYPE_CHECKING, Tuple + +from . import hdrs as hdrs +from .client import ( + BaseConnector, + ClientConnectionError, + ClientConnectionResetError, + ClientConnectorCertificateError, + ClientConnectorDNSError, + ClientConnectorError, + ClientConnectorSSLError, + ClientError, + ClientHttpProxyError, + ClientOSError, + ClientPayloadError, + ClientProxyConnectionError, + ClientRequest, + ClientResponse, + ClientResponseError, + ClientSession, + ClientSSLError, + ClientTimeout, + ClientWebSocketResponse, + ClientWSTimeout, + ConnectionTimeoutError, + ContentTypeError, + Fingerprint, + InvalidURL, + InvalidUrlClientError, + InvalidUrlRedirectClientError, + NamedPipeConnector, + NonHttpUrlClientError, + NonHttpUrlRedirectClientError, + RedirectClientError, + RequestInfo, + ServerConnectionError, + ServerDisconnectedError, + ServerFingerprintMismatch, + ServerTimeoutError, + SocketTimeoutError, + TCPConnector, + TooManyRedirects, + UnixConnector, + WSMessageTypeError, + WSServerHandshakeError, + request, +) +from .client_middleware_digest_auth import DigestAuthMiddleware +from .client_middlewares import ClientHandlerType, ClientMiddlewareType +from .compression_utils import set_zlib_backend +from .connector import ( + AddrInfoType as AddrInfoType, + SocketFactoryType as SocketFactoryType, +) +from .cookiejar import CookieJar as CookieJar, DummyCookieJar as DummyCookieJar +from .formdata import FormData as FormData +from .helpers import BasicAuth, ChainMapProxy, ETag +from .http import ( + HttpVersion as HttpVersion, + HttpVersion10 as HttpVersion10, + HttpVersion11 as HttpVersion11, + WebSocketError as WebSocketError, + WSCloseCode as WSCloseCode, + WSMessage as WSMessage, + WSMsgType as WSMsgType, +) +from .multipart import ( + BadContentDispositionHeader as BadContentDispositionHeader, + BadContentDispositionParam as BadContentDispositionParam, + BodyPartReader as BodyPartReader, + MultipartReader as MultipartReader, + MultipartWriter as MultipartWriter, + content_disposition_filename as content_disposition_filename, + parse_content_disposition as parse_content_disposition, +) +from .payload import ( + PAYLOAD_REGISTRY as PAYLOAD_REGISTRY, + AsyncIterablePayload as AsyncIterablePayload, + BufferedReaderPayload as BufferedReaderPayload, + BytesIOPayload as BytesIOPayload, + BytesPayload as BytesPayload, + IOBasePayload as IOBasePayload, + JsonPayload as JsonPayload, + Payload as Payload, + StringIOPayload as StringIOPayload, + StringPayload as StringPayload, + TextIOPayload as TextIOPayload, + get_payload as get_payload, + payload_type as payload_type, +) +from .payload_streamer import streamer as streamer +from .resolver import ( + AsyncResolver as AsyncResolver, + DefaultResolver as DefaultResolver, + ThreadedResolver as ThreadedResolver, +) +from .streams import ( + EMPTY_PAYLOAD as EMPTY_PAYLOAD, + DataQueue as DataQueue, + EofStream as EofStream, + FlowControlDataQueue as FlowControlDataQueue, + StreamReader as StreamReader, +) +from .tracing import ( + TraceConfig as TraceConfig, + TraceConnectionCreateEndParams as TraceConnectionCreateEndParams, + TraceConnectionCreateStartParams as TraceConnectionCreateStartParams, + TraceConnectionQueuedEndParams as TraceConnectionQueuedEndParams, + TraceConnectionQueuedStartParams as TraceConnectionQueuedStartParams, + TraceConnectionReuseconnParams as TraceConnectionReuseconnParams, + TraceDnsCacheHitParams as TraceDnsCacheHitParams, + TraceDnsCacheMissParams as TraceDnsCacheMissParams, + TraceDnsResolveHostEndParams as TraceDnsResolveHostEndParams, + TraceDnsResolveHostStartParams as TraceDnsResolveHostStartParams, + TraceRequestChunkSentParams as TraceRequestChunkSentParams, + TraceRequestEndParams as TraceRequestEndParams, + TraceRequestExceptionParams as TraceRequestExceptionParams, + TraceRequestHeadersSentParams as TraceRequestHeadersSentParams, + TraceRequestRedirectParams as TraceRequestRedirectParams, + TraceRequestStartParams as TraceRequestStartParams, + TraceResponseChunkReceivedParams as TraceResponseChunkReceivedParams, +) + +if TYPE_CHECKING: + # At runtime these are lazy-loaded at the bottom of the file. + from .worker import ( + GunicornUVLoopWebWorker as GunicornUVLoopWebWorker, + GunicornWebWorker as GunicornWebWorker, + ) + +__all__: Tuple[str, ...] = ( + "hdrs", + # client + "AddrInfoType", + "BaseConnector", + "ClientConnectionError", + "ClientConnectionResetError", + "ClientConnectorCertificateError", + "ClientConnectorDNSError", + "ClientConnectorError", + "ClientConnectorSSLError", + "ClientError", + "ClientHttpProxyError", + "ClientOSError", + "ClientPayloadError", + "ClientProxyConnectionError", + "ClientResponse", + "ClientRequest", + "ClientResponseError", + "ClientSSLError", + "ClientSession", + "ClientTimeout", + "ClientWebSocketResponse", + "ClientWSTimeout", + "ConnectionTimeoutError", + "ContentTypeError", + "Fingerprint", + "FlowControlDataQueue", + "InvalidURL", + "InvalidUrlClientError", + "InvalidUrlRedirectClientError", + "NonHttpUrlClientError", + "NonHttpUrlRedirectClientError", + "RedirectClientError", + "RequestInfo", + "ServerConnectionError", + "ServerDisconnectedError", + "ServerFingerprintMismatch", + "ServerTimeoutError", + "SocketFactoryType", + "SocketTimeoutError", + "TCPConnector", + "TooManyRedirects", + "UnixConnector", + "NamedPipeConnector", + "WSServerHandshakeError", + "request", + # client_middleware + "ClientMiddlewareType", + "ClientHandlerType", + # cookiejar + "CookieJar", + "DummyCookieJar", + # formdata + "FormData", + # helpers + "BasicAuth", + "ChainMapProxy", + "DigestAuthMiddleware", + "ETag", + "set_zlib_backend", + # http + "HttpVersion", + "HttpVersion10", + "HttpVersion11", + "WSMsgType", + "WSCloseCode", + "WSMessage", + "WebSocketError", + # multipart + "BadContentDispositionHeader", + "BadContentDispositionParam", + "BodyPartReader", + "MultipartReader", + "MultipartWriter", + "content_disposition_filename", + "parse_content_disposition", + # payload + "AsyncIterablePayload", + "BufferedReaderPayload", + "BytesIOPayload", + "BytesPayload", + "IOBasePayload", + "JsonPayload", + "PAYLOAD_REGISTRY", + "Payload", + "StringIOPayload", + "StringPayload", + "TextIOPayload", + "get_payload", + "payload_type", + # payload_streamer + "streamer", + # resolver + "AsyncResolver", + "DefaultResolver", + "ThreadedResolver", + # streams + "DataQueue", + "EMPTY_PAYLOAD", + "EofStream", + "StreamReader", + # tracing + "TraceConfig", + "TraceConnectionCreateEndParams", + "TraceConnectionCreateStartParams", + "TraceConnectionQueuedEndParams", + "TraceConnectionQueuedStartParams", + "TraceConnectionReuseconnParams", + "TraceDnsCacheHitParams", + "TraceDnsCacheMissParams", + "TraceDnsResolveHostEndParams", + "TraceDnsResolveHostStartParams", + "TraceRequestChunkSentParams", + "TraceRequestEndParams", + "TraceRequestExceptionParams", + "TraceRequestHeadersSentParams", + "TraceRequestRedirectParams", + "TraceRequestStartParams", + "TraceResponseChunkReceivedParams", + # workers (imported lazily with __getattr__) + "GunicornUVLoopWebWorker", + "GunicornWebWorker", + "WSMessageTypeError", +) + + +def __dir__() -> Tuple[str, ...]: + return __all__ + ("__doc__",) + + +def __getattr__(name: str) -> object: + global GunicornUVLoopWebWorker, GunicornWebWorker + + # Importing gunicorn takes a long time (>100ms), so only import if actually needed. + if name in ("GunicornUVLoopWebWorker", "GunicornWebWorker"): + try: + from .worker import GunicornUVLoopWebWorker as guv, GunicornWebWorker as gw + except ImportError: + return None + + GunicornUVLoopWebWorker = guv # type: ignore[misc] + GunicornWebWorker = gw # type: ignore[misc] + return guv if name == "GunicornUVLoopWebWorker" else gw + + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/py311/lib/python3.11/site-packages/aiohttp/_cookie_helpers.py b/py311/lib/python3.11/site-packages/aiohttp/_cookie_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..10e2e0eff9de9d28ecbfe511a1227a1fec1d83cd --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/_cookie_helpers.py @@ -0,0 +1,338 @@ +""" +Internal cookie handling helpers. + +This module contains internal utilities for cookie parsing and manipulation. +These are not part of the public API and may change without notice. +""" + +import re +from http.cookies import Morsel +from typing import List, Optional, Sequence, Tuple, cast + +from .log import internal_logger + +__all__ = ( + "parse_set_cookie_headers", + "parse_cookie_header", + "preserve_morsel_with_coded_value", +) + +# Cookie parsing constants +# Allow more characters in cookie names to handle real-world cookies +# that don't strictly follow RFC standards (fixes #2683) +# RFC 6265 defines cookie-name token as per RFC 2616 Section 2.2, +# but many servers send cookies with characters like {} [] () etc. +# This makes the cookie parser more tolerant of real-world cookies +# while still providing some validation to catch obviously malformed names. +_COOKIE_NAME_RE = re.compile(r"^[!#$%&\'()*+\-./0-9:<=>?@A-Z\[\]^_`a-z{|}~]+$") +_COOKIE_KNOWN_ATTRS = frozenset( # AKA Morsel._reserved + ( + "path", + "domain", + "max-age", + "expires", + "secure", + "httponly", + "samesite", + "partitioned", + "version", + "comment", + ) +) +_COOKIE_BOOL_ATTRS = frozenset( # AKA Morsel._flags + ("secure", "httponly", "partitioned") +) + +# SimpleCookie's pattern for parsing cookies with relaxed validation +# Based on http.cookies pattern but extended to allow more characters in cookie names +# to handle real-world cookies (fixes #2683) +_COOKIE_PATTERN = re.compile( + r""" + \s* # Optional whitespace at start of cookie + (?P # Start of group 'key' + # aiohttp has extended to include [] for compatibility with real-world cookies + [\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\[\]]+ # Any word of at least one letter + ) # End of group 'key' + ( # Optional group: there may not be a value. + \s*=\s* # Equal Sign + (?P # Start of group 'val' + "(?:[^\\"]|\\.)*" # Any double-quoted string (properly closed) + | # or + "[^";]* # Unmatched opening quote (differs from SimpleCookie - issue #7993) + | # or + # Special case for "expires" attr - RFC 822, RFC 850, RFC 1036, RFC 1123 + (\w{3,6}day|\w{3}),\s # Day of the week or abbreviated day (with comma) + [\w\d\s-]{9,11}\s[\d:]{8}\s # Date and time in specific format + (GMT|[+-]\d{4}) # Timezone: GMT or RFC 2822 offset like -0000, +0100 + # NOTE: RFC 2822 timezone support is an aiohttp extension + # for issue #4493 - SimpleCookie does NOT support this + | # or + # ANSI C asctime() format: "Wed Jun 9 10:18:14 2021" + # NOTE: This is an aiohttp extension for issue #4327 - SimpleCookie does NOT support this format + \w{3}\s+\w{3}\s+[\s\d]\d\s+\d{2}:\d{2}:\d{2}\s+\d{4} + | # or + [\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=\[\]]* # Any word or empty string + ) # End of group 'val' + )? # End of optional value group + \s* # Any number of spaces. + (\s+|;|$) # Ending either at space, semicolon, or EOS. + """, + re.VERBOSE | re.ASCII, +) + + +def preserve_morsel_with_coded_value(cookie: Morsel[str]) -> Morsel[str]: + """ + Preserve a Morsel's coded_value exactly as received from the server. + + This function ensures that cookie encoding is preserved exactly as sent by + the server, which is critical for compatibility with old servers that have + strict requirements about cookie formats. + + This addresses the issue described in https://github.com/aio-libs/aiohttp/pull/1453 + where Python's SimpleCookie would re-encode cookies, breaking authentication + with certain servers. + + Args: + cookie: A Morsel object from SimpleCookie + + Returns: + A Morsel object with preserved coded_value + + """ + mrsl_val = cast("Morsel[str]", cookie.get(cookie.key, Morsel())) + # We use __setstate__ instead of the public set() API because it allows us to + # bypass validation and set already validated state. This is more stable than + # setting protected attributes directly and unlikely to change since it would + # break pickling. + mrsl_val.__setstate__( # type: ignore[attr-defined] + {"key": cookie.key, "value": cookie.value, "coded_value": cookie.coded_value} + ) + return mrsl_val + + +_unquote_sub = re.compile(r"\\(?:([0-3][0-7][0-7])|(.))").sub + + +def _unquote_replace(m: re.Match[str]) -> str: + """ + Replace function for _unquote_sub regex substitution. + + Handles escaped characters in cookie values: + - Octal sequences are converted to their character representation + - Other escaped characters are unescaped by removing the backslash + """ + if m[1]: + return chr(int(m[1], 8)) + return m[2] + + +def _unquote(value: str) -> str: + """ + Unquote a cookie value. + + Vendored from http.cookies._unquote to ensure compatibility. + + Note: The original implementation checked for None, but we've removed + that check since all callers already ensure the value is not None. + """ + # If there aren't any doublequotes, + # then there can't be any special characters. See RFC 2109. + if len(value) < 2: + return value + if value[0] != '"' or value[-1] != '"': + return value + + # We have to assume that we must decode this string. + # Down to work. + + # Remove the "s + value = value[1:-1] + + # Check for special sequences. Examples: + # \012 --> \n + # \" --> " + # + return _unquote_sub(_unquote_replace, value) + + +def parse_cookie_header(header: str) -> List[Tuple[str, Morsel[str]]]: + """ + Parse a Cookie header according to RFC 6265 Section 5.4. + + Cookie headers contain only name-value pairs separated by semicolons. + There are no attributes in Cookie headers - even names that match + attribute names (like 'path' or 'secure') should be treated as cookies. + + This parser uses the same regex-based approach as parse_set_cookie_headers + to properly handle quoted values that may contain semicolons. When the + regex fails to match a malformed cookie, it falls back to simple parsing + to ensure subsequent cookies are not lost + https://github.com/aio-libs/aiohttp/issues/11632 + + Args: + header: The Cookie header value to parse + + Returns: + List of (name, Morsel) tuples for compatibility with SimpleCookie.update() + """ + if not header: + return [] + + cookies: List[Tuple[str, Morsel[str]]] = [] + morsel: Morsel[str] + i = 0 + n = len(header) + + invalid_names = [] + while i < n: + # Use the same pattern as parse_set_cookie_headers to find cookies + match = _COOKIE_PATTERN.match(header, i) + if not match: + # Fallback for malformed cookies https://github.com/aio-libs/aiohttp/issues/11632 + # Find next semicolon to skip or attempt simple key=value parsing + next_semi = header.find(";", i) + eq_pos = header.find("=", i) + + # Try to extract key=value if '=' comes before ';' + if eq_pos != -1 and (next_semi == -1 or eq_pos < next_semi): + end_pos = next_semi if next_semi != -1 else n + key = header[i:eq_pos].strip() + value = header[eq_pos + 1 : end_pos].strip() + + # Validate the name (same as regex path) + if not _COOKIE_NAME_RE.match(key): + invalid_names.append(key) + else: + morsel = Morsel() + morsel.__setstate__( # type: ignore[attr-defined] + {"key": key, "value": _unquote(value), "coded_value": value} + ) + cookies.append((key, morsel)) + + # Move to next cookie or end + i = next_semi + 1 if next_semi != -1 else n + continue + + key = match.group("key") + value = match.group("val") or "" + i = match.end(0) + + # Validate the name + if not key or not _COOKIE_NAME_RE.match(key): + invalid_names.append(key) + continue + + # Create new morsel + morsel = Morsel() + # Preserve the original value as coded_value (with quotes if present) + # We use __setstate__ instead of the public set() API because it allows us to + # bypass validation and set already validated state. This is more stable than + # setting protected attributes directly and unlikely to change since it would + # break pickling. + morsel.__setstate__( # type: ignore[attr-defined] + {"key": key, "value": _unquote(value), "coded_value": value} + ) + + cookies.append((key, morsel)) + + if invalid_names: + internal_logger.debug( + "Cannot load cookie. Illegal cookie names: %r", invalid_names + ) + + return cookies + + +def parse_set_cookie_headers(headers: Sequence[str]) -> List[Tuple[str, Morsel[str]]]: + """ + Parse cookie headers using a vendored version of SimpleCookie parsing. + + This implementation is based on SimpleCookie.__parse_string to ensure + compatibility with how SimpleCookie parses cookies, including handling + of malformed cookies with missing semicolons. + + This function is used for both Cookie and Set-Cookie headers in order to be + forgiving. Ideally we would have followed RFC 6265 Section 5.2 (for Cookie + headers) and RFC 6265 Section 4.2.1 (for Set-Cookie headers), but the + real world data makes it impossible since we need to be a bit more forgiving. + + NOTE: This implementation differs from SimpleCookie in handling unmatched quotes. + SimpleCookie will stop parsing when it encounters a cookie value with an unmatched + quote (e.g., 'cookie="value'), causing subsequent cookies to be silently dropped. + This implementation handles unmatched quotes more gracefully to prevent cookie loss. + See https://github.com/aio-libs/aiohttp/issues/7993 + """ + parsed_cookies: List[Tuple[str, Morsel[str]]] = [] + + for header in headers: + if not header: + continue + + # Parse cookie string using SimpleCookie's algorithm + i = 0 + n = len(header) + current_morsel: Optional[Morsel[str]] = None + morsel_seen = False + + while 0 <= i < n: + # Start looking for a cookie + match = _COOKIE_PATTERN.match(header, i) + if not match: + # No more cookies + break + + key, value = match.group("key"), match.group("val") + i = match.end(0) + lower_key = key.lower() + + if key[0] == "$": + if not morsel_seen: + # We ignore attributes which pertain to the cookie + # mechanism as a whole, such as "$Version". + continue + # Process as attribute + if current_morsel is not None: + attr_lower_key = lower_key[1:] + if attr_lower_key in _COOKIE_KNOWN_ATTRS: + current_morsel[attr_lower_key] = value or "" + elif lower_key in _COOKIE_KNOWN_ATTRS: + if not morsel_seen: + # Invalid cookie string - attribute before cookie + break + if lower_key in _COOKIE_BOOL_ATTRS: + # Boolean attribute with any value should be True + if current_morsel is not None and current_morsel.isReservedKey(key): + current_morsel[lower_key] = True + elif value is None: + # Invalid cookie string - non-boolean attribute without value + break + elif current_morsel is not None: + # Regular attribute with value + current_morsel[lower_key] = _unquote(value) + elif value is not None: + # This is a cookie name=value pair + # Validate the name + if key in _COOKIE_KNOWN_ATTRS or not _COOKIE_NAME_RE.match(key): + internal_logger.warning( + "Can not load cookies: Illegal cookie name %r", key + ) + current_morsel = None + else: + # Create new morsel + current_morsel = Morsel() + # Preserve the original value as coded_value (with quotes if present) + # We use __setstate__ instead of the public set() API because it allows us to + # bypass validation and set already validated state. This is more stable than + # setting protected attributes directly and unlikely to change since it would + # break pickling. + current_morsel.__setstate__( # type: ignore[attr-defined] + {"key": key, "value": _unquote(value), "coded_value": value} + ) + parsed_cookies.append((key, current_morsel)) + morsel_seen = True + else: + # Invalid cookie string - no value for non-attribute + break + + return parsed_cookies diff --git a/py311/lib/python3.11/site-packages/aiohttp/_cparser.pxd b/py311/lib/python3.11/site-packages/aiohttp/_cparser.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1b3be6d4efb682bca9397da34f8e727b381bc84f --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/_cparser.pxd @@ -0,0 +1,158 @@ +from libc.stdint cimport int32_t, uint8_t, uint16_t, uint64_t + + +cdef extern from "llhttp.h": + + struct llhttp__internal_s: + int32_t _index + void* _span_pos0 + void* _span_cb0 + int32_t error + const char* reason + const char* error_pos + void* data + void* _current + uint64_t content_length + uint8_t type + uint8_t method + uint8_t http_major + uint8_t http_minor + uint8_t header_state + uint8_t lenient_flags + uint8_t upgrade + uint8_t finish + uint16_t flags + uint16_t status_code + void* settings + + ctypedef llhttp__internal_s llhttp__internal_t + ctypedef llhttp__internal_t llhttp_t + + ctypedef int (*llhttp_data_cb)(llhttp_t*, const char *at, size_t length) except -1 + ctypedef int (*llhttp_cb)(llhttp_t*) except -1 + + struct llhttp_settings_s: + llhttp_cb on_message_begin + llhttp_data_cb on_url + llhttp_data_cb on_status + llhttp_data_cb on_header_field + llhttp_data_cb on_header_value + llhttp_cb on_headers_complete + llhttp_data_cb on_body + llhttp_cb on_message_complete + llhttp_cb on_chunk_header + llhttp_cb on_chunk_complete + + llhttp_cb on_url_complete + llhttp_cb on_status_complete + llhttp_cb on_header_field_complete + llhttp_cb on_header_value_complete + + ctypedef llhttp_settings_s llhttp_settings_t + + enum llhttp_errno: + HPE_OK, + HPE_INTERNAL, + HPE_STRICT, + HPE_LF_EXPECTED, + HPE_UNEXPECTED_CONTENT_LENGTH, + HPE_CLOSED_CONNECTION, + HPE_INVALID_METHOD, + HPE_INVALID_URL, + HPE_INVALID_CONSTANT, + HPE_INVALID_VERSION, + HPE_INVALID_HEADER_TOKEN, + HPE_INVALID_CONTENT_LENGTH, + HPE_INVALID_CHUNK_SIZE, + HPE_INVALID_STATUS, + HPE_INVALID_EOF_STATE, + HPE_INVALID_TRANSFER_ENCODING, + HPE_CB_MESSAGE_BEGIN, + HPE_CB_HEADERS_COMPLETE, + HPE_CB_MESSAGE_COMPLETE, + HPE_CB_CHUNK_HEADER, + HPE_CB_CHUNK_COMPLETE, + HPE_PAUSED, + HPE_PAUSED_UPGRADE, + HPE_USER + + ctypedef llhttp_errno llhttp_errno_t + + enum llhttp_flags: + F_CHUNKED, + F_CONTENT_LENGTH + + enum llhttp_type: + HTTP_REQUEST, + HTTP_RESPONSE, + HTTP_BOTH + + enum llhttp_method: + HTTP_DELETE, + HTTP_GET, + HTTP_HEAD, + HTTP_POST, + HTTP_PUT, + HTTP_CONNECT, + HTTP_OPTIONS, + HTTP_TRACE, + HTTP_COPY, + HTTP_LOCK, + HTTP_MKCOL, + HTTP_MOVE, + HTTP_PROPFIND, + HTTP_PROPPATCH, + HTTP_SEARCH, + HTTP_UNLOCK, + HTTP_BIND, + HTTP_REBIND, + HTTP_UNBIND, + HTTP_ACL, + HTTP_REPORT, + HTTP_MKACTIVITY, + HTTP_CHECKOUT, + HTTP_MERGE, + HTTP_MSEARCH, + HTTP_NOTIFY, + HTTP_SUBSCRIBE, + HTTP_UNSUBSCRIBE, + HTTP_PATCH, + HTTP_PURGE, + HTTP_MKCALENDAR, + HTTP_LINK, + HTTP_UNLINK, + HTTP_SOURCE, + HTTP_PRI, + HTTP_DESCRIBE, + HTTP_ANNOUNCE, + HTTP_SETUP, + HTTP_PLAY, + HTTP_PAUSE, + HTTP_TEARDOWN, + HTTP_GET_PARAMETER, + HTTP_SET_PARAMETER, + HTTP_REDIRECT, + HTTP_RECORD, + HTTP_FLUSH + + ctypedef llhttp_method llhttp_method_t; + + void llhttp_settings_init(llhttp_settings_t* settings) + void llhttp_init(llhttp_t* parser, llhttp_type type, + const llhttp_settings_t* settings) + + llhttp_errno_t llhttp_execute(llhttp_t* parser, const char* data, size_t len) + + int llhttp_should_keep_alive(const llhttp_t* parser) + + void llhttp_resume_after_upgrade(llhttp_t* parser) + + llhttp_errno_t llhttp_get_errno(const llhttp_t* parser) + const char* llhttp_get_error_reason(const llhttp_t* parser) + const char* llhttp_get_error_pos(const llhttp_t* parser) + + const char* llhttp_method_name(llhttp_method_t method) + + void llhttp_set_lenient_headers(llhttp_t* parser, int enabled) + void llhttp_set_lenient_optional_cr_before_lf(llhttp_t* parser, int enabled) + void llhttp_set_lenient_spaces_after_chunk_size(llhttp_t* parser, int enabled) diff --git a/py311/lib/python3.11/site-packages/aiohttp/_find_header.pxd b/py311/lib/python3.11/site-packages/aiohttp/_find_header.pxd new file mode 100644 index 0000000000000000000000000000000000000000..37a6c37268ee30b182fd77d109688d35d5577c7f --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/_find_header.pxd @@ -0,0 +1,2 @@ +cdef extern from "_find_header.h": + int find_header(char *, int) diff --git a/py311/lib/python3.11/site-packages/aiohttp/_headers.pxi b/py311/lib/python3.11/site-packages/aiohttp/_headers.pxi new file mode 100644 index 0000000000000000000000000000000000000000..3744721d4786a6c79b90aa349c8d02fa66204ecc --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/_headers.pxi @@ -0,0 +1,83 @@ +# The file is autogenerated from aiohttp/hdrs.py +# Run ./tools/gen.py to update it after the origin changing. + +from . import hdrs +cdef tuple headers = ( + hdrs.ACCEPT, + hdrs.ACCEPT_CHARSET, + hdrs.ACCEPT_ENCODING, + hdrs.ACCEPT_LANGUAGE, + hdrs.ACCEPT_RANGES, + hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS, + hdrs.ACCESS_CONTROL_ALLOW_HEADERS, + hdrs.ACCESS_CONTROL_ALLOW_METHODS, + hdrs.ACCESS_CONTROL_ALLOW_ORIGIN, + hdrs.ACCESS_CONTROL_EXPOSE_HEADERS, + hdrs.ACCESS_CONTROL_MAX_AGE, + hdrs.ACCESS_CONTROL_REQUEST_HEADERS, + hdrs.ACCESS_CONTROL_REQUEST_METHOD, + hdrs.AGE, + hdrs.ALLOW, + hdrs.AUTHORIZATION, + hdrs.CACHE_CONTROL, + hdrs.CONNECTION, + hdrs.CONTENT_DISPOSITION, + hdrs.CONTENT_ENCODING, + hdrs.CONTENT_LANGUAGE, + hdrs.CONTENT_LENGTH, + hdrs.CONTENT_LOCATION, + hdrs.CONTENT_MD5, + hdrs.CONTENT_RANGE, + hdrs.CONTENT_TRANSFER_ENCODING, + hdrs.CONTENT_TYPE, + hdrs.COOKIE, + hdrs.DATE, + hdrs.DESTINATION, + hdrs.DIGEST, + hdrs.ETAG, + hdrs.EXPECT, + hdrs.EXPIRES, + hdrs.FORWARDED, + hdrs.FROM, + hdrs.HOST, + hdrs.IF_MATCH, + hdrs.IF_MODIFIED_SINCE, + hdrs.IF_NONE_MATCH, + hdrs.IF_RANGE, + hdrs.IF_UNMODIFIED_SINCE, + hdrs.KEEP_ALIVE, + hdrs.LAST_EVENT_ID, + hdrs.LAST_MODIFIED, + hdrs.LINK, + hdrs.LOCATION, + hdrs.MAX_FORWARDS, + hdrs.ORIGIN, + hdrs.PRAGMA, + hdrs.PROXY_AUTHENTICATE, + hdrs.PROXY_AUTHORIZATION, + hdrs.RANGE, + hdrs.REFERER, + hdrs.RETRY_AFTER, + hdrs.SEC_WEBSOCKET_ACCEPT, + hdrs.SEC_WEBSOCKET_EXTENSIONS, + hdrs.SEC_WEBSOCKET_KEY, + hdrs.SEC_WEBSOCKET_KEY1, + hdrs.SEC_WEBSOCKET_PROTOCOL, + hdrs.SEC_WEBSOCKET_VERSION, + hdrs.SERVER, + hdrs.SET_COOKIE, + hdrs.TE, + hdrs.TRAILER, + hdrs.TRANSFER_ENCODING, + hdrs.URI, + hdrs.UPGRADE, + hdrs.USER_AGENT, + hdrs.VARY, + hdrs.VIA, + hdrs.WWW_AUTHENTICATE, + hdrs.WANT_DIGEST, + hdrs.WARNING, + hdrs.X_FORWARDED_FOR, + hdrs.X_FORWARDED_HOST, + hdrs.X_FORWARDED_PROTO, +) diff --git a/py311/lib/python3.11/site-packages/aiohttp/_http_parser.pyx b/py311/lib/python3.11/site-packages/aiohttp/_http_parser.pyx new file mode 100644 index 0000000000000000000000000000000000000000..4a7101edbcb1a7f938f8356775bc73ceca8fadf3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/_http_parser.pyx @@ -0,0 +1,835 @@ +# Based on https://github.com/MagicStack/httptools +# + +from cpython cimport ( + Py_buffer, + PyBUF_SIMPLE, + PyBuffer_Release, + PyBytes_AsString, + PyBytes_AsStringAndSize, + PyObject_GetBuffer, +) +from cpython.mem cimport PyMem_Free, PyMem_Malloc +from libc.limits cimport ULLONG_MAX +from libc.string cimport memcpy + +from multidict import CIMultiDict as _CIMultiDict, CIMultiDictProxy as _CIMultiDictProxy +from yarl import URL as _URL + +from aiohttp import hdrs +from aiohttp.helpers import DEBUG, set_exception + +from .http_exceptions import ( + BadHttpMessage, + BadHttpMethod, + BadStatusLine, + ContentLengthError, + InvalidHeader, + InvalidURLError, + LineTooLong, + PayloadEncodingError, + TransferEncodingError, +) +from .http_parser import DeflateBuffer as _DeflateBuffer +from .http_writer import ( + HttpVersion as _HttpVersion, + HttpVersion10 as _HttpVersion10, + HttpVersion11 as _HttpVersion11, +) +from .streams import EMPTY_PAYLOAD as _EMPTY_PAYLOAD, StreamReader as _StreamReader + +cimport cython + +from aiohttp cimport _cparser as cparser + +include "_headers.pxi" + +from aiohttp cimport _find_header + +ALLOWED_UPGRADES = frozenset({"websocket"}) +DEF DEFAULT_FREELIST_SIZE = 250 + +cdef extern from "Python.h": + int PyByteArray_Resize(object, Py_ssize_t) except -1 + Py_ssize_t PyByteArray_Size(object) except -1 + char* PyByteArray_AsString(object) + +__all__ = ('HttpRequestParser', 'HttpResponseParser', + 'RawRequestMessage', 'RawResponseMessage') + +cdef object URL = _URL +cdef object URL_build = URL.build +cdef object CIMultiDict = _CIMultiDict +cdef object CIMultiDictProxy = _CIMultiDictProxy +cdef object HttpVersion = _HttpVersion +cdef object HttpVersion10 = _HttpVersion10 +cdef object HttpVersion11 = _HttpVersion11 +cdef object SEC_WEBSOCKET_KEY1 = hdrs.SEC_WEBSOCKET_KEY1 +cdef object CONTENT_ENCODING = hdrs.CONTENT_ENCODING +cdef object EMPTY_PAYLOAD = _EMPTY_PAYLOAD +cdef object StreamReader = _StreamReader +cdef object DeflateBuffer = _DeflateBuffer +cdef bytes EMPTY_BYTES = b"" + +cdef inline object extend(object buf, const char* at, size_t length): + cdef Py_ssize_t s + cdef char* ptr + s = PyByteArray_Size(buf) + PyByteArray_Resize(buf, s + length) + ptr = PyByteArray_AsString(buf) + memcpy(ptr + s, at, length) + + +DEF METHODS_COUNT = 46; + +cdef list _http_method = [] + +for i in range(METHODS_COUNT): + _http_method.append( + cparser.llhttp_method_name( i).decode('ascii')) + + +cdef inline str http_method_str(int i): + if i < METHODS_COUNT: + return _http_method[i] + else: + return "" + +cdef inline object find_header(bytes raw_header): + cdef Py_ssize_t size + cdef char *buf + cdef int idx + PyBytes_AsStringAndSize(raw_header, &buf, &size) + idx = _find_header.find_header(buf, size) + if idx == -1: + return raw_header.decode('utf-8', 'surrogateescape') + return headers[idx] + + +@cython.freelist(DEFAULT_FREELIST_SIZE) +cdef class RawRequestMessage: + cdef readonly str method + cdef readonly str path + cdef readonly object version # HttpVersion + cdef readonly object headers # CIMultiDict + cdef readonly object raw_headers # tuple + cdef readonly object should_close + cdef readonly object compression + cdef readonly object upgrade + cdef readonly object chunked + cdef readonly object url # yarl.URL + + def __init__(self, method, path, version, headers, raw_headers, + should_close, compression, upgrade, chunked, url): + self.method = method + self.path = path + self.version = version + self.headers = headers + self.raw_headers = raw_headers + self.should_close = should_close + self.compression = compression + self.upgrade = upgrade + self.chunked = chunked + self.url = url + + def __repr__(self): + info = [] + info.append(("method", self.method)) + info.append(("path", self.path)) + info.append(("version", self.version)) + info.append(("headers", self.headers)) + info.append(("raw_headers", self.raw_headers)) + info.append(("should_close", self.should_close)) + info.append(("compression", self.compression)) + info.append(("upgrade", self.upgrade)) + info.append(("chunked", self.chunked)) + info.append(("url", self.url)) + sinfo = ', '.join(name + '=' + repr(val) for name, val in info) + return '' + + def _replace(self, **dct): + cdef RawRequestMessage ret + ret = _new_request_message(self.method, + self.path, + self.version, + self.headers, + self.raw_headers, + self.should_close, + self.compression, + self.upgrade, + self.chunked, + self.url) + if "method" in dct: + ret.method = dct["method"] + if "path" in dct: + ret.path = dct["path"] + if "version" in dct: + ret.version = dct["version"] + if "headers" in dct: + ret.headers = dct["headers"] + if "raw_headers" in dct: + ret.raw_headers = dct["raw_headers"] + if "should_close" in dct: + ret.should_close = dct["should_close"] + if "compression" in dct: + ret.compression = dct["compression"] + if "upgrade" in dct: + ret.upgrade = dct["upgrade"] + if "chunked" in dct: + ret.chunked = dct["chunked"] + if "url" in dct: + ret.url = dct["url"] + return ret + +cdef _new_request_message(str method, + str path, + object version, + object headers, + object raw_headers, + bint should_close, + object compression, + bint upgrade, + bint chunked, + object url): + cdef RawRequestMessage ret + ret = RawRequestMessage.__new__(RawRequestMessage) + ret.method = method + ret.path = path + ret.version = version + ret.headers = headers + ret.raw_headers = raw_headers + ret.should_close = should_close + ret.compression = compression + ret.upgrade = upgrade + ret.chunked = chunked + ret.url = url + return ret + + +@cython.freelist(DEFAULT_FREELIST_SIZE) +cdef class RawResponseMessage: + cdef readonly object version # HttpVersion + cdef readonly int code + cdef readonly str reason + cdef readonly object headers # CIMultiDict + cdef readonly object raw_headers # tuple + cdef readonly object should_close + cdef readonly object compression + cdef readonly object upgrade + cdef readonly object chunked + + def __init__(self, version, code, reason, headers, raw_headers, + should_close, compression, upgrade, chunked): + self.version = version + self.code = code + self.reason = reason + self.headers = headers + self.raw_headers = raw_headers + self.should_close = should_close + self.compression = compression + self.upgrade = upgrade + self.chunked = chunked + + def __repr__(self): + info = [] + info.append(("version", self.version)) + info.append(("code", self.code)) + info.append(("reason", self.reason)) + info.append(("headers", self.headers)) + info.append(("raw_headers", self.raw_headers)) + info.append(("should_close", self.should_close)) + info.append(("compression", self.compression)) + info.append(("upgrade", self.upgrade)) + info.append(("chunked", self.chunked)) + sinfo = ', '.join(name + '=' + repr(val) for name, val in info) + return '' + + +cdef _new_response_message(object version, + int code, + str reason, + object headers, + object raw_headers, + bint should_close, + object compression, + bint upgrade, + bint chunked): + cdef RawResponseMessage ret + ret = RawResponseMessage.__new__(RawResponseMessage) + ret.version = version + ret.code = code + ret.reason = reason + ret.headers = headers + ret.raw_headers = raw_headers + ret.should_close = should_close + ret.compression = compression + ret.upgrade = upgrade + ret.chunked = chunked + return ret + + +@cython.internal +cdef class HttpParser: + + cdef: + cparser.llhttp_t* _cparser + cparser.llhttp_settings_t* _csettings + + bytes _raw_name + object _name + bytes _raw_value + bint _has_value + + object _protocol + object _loop + object _timer + + size_t _max_line_size + size_t _max_field_size + size_t _max_headers + bint _response_with_body + bint _read_until_eof + + bint _started + object _url + bytearray _buf + str _path + str _reason + list _headers + list _raw_headers + bint _upgraded + list _messages + object _payload + bint _payload_error + object _payload_exception + object _last_error + bint _auto_decompress + int _limit + + str _content_encoding + + Py_buffer py_buf + + def __cinit__(self): + self._cparser = \ + PyMem_Malloc(sizeof(cparser.llhttp_t)) + if self._cparser is NULL: + raise MemoryError() + + self._csettings = \ + PyMem_Malloc(sizeof(cparser.llhttp_settings_t)) + if self._csettings is NULL: + raise MemoryError() + + def __dealloc__(self): + PyMem_Free(self._cparser) + PyMem_Free(self._csettings) + + cdef _init( + self, cparser.llhttp_type mode, + object protocol, object loop, int limit, + object timer=None, + size_t max_line_size=8190, size_t max_headers=32768, + size_t max_field_size=8190, payload_exception=None, + bint response_with_body=True, bint read_until_eof=False, + bint auto_decompress=True, + ): + cparser.llhttp_settings_init(self._csettings) + cparser.llhttp_init(self._cparser, mode, self._csettings) + self._cparser.data = self + self._cparser.content_length = 0 + + self._protocol = protocol + self._loop = loop + self._timer = timer + + self._buf = bytearray() + self._payload = None + self._payload_error = 0 + self._payload_exception = payload_exception + self._messages = [] + + self._raw_name = EMPTY_BYTES + self._raw_value = EMPTY_BYTES + self._has_value = False + + self._max_line_size = max_line_size + self._max_headers = max_headers + self._max_field_size = max_field_size + self._response_with_body = response_with_body + self._read_until_eof = read_until_eof + self._upgraded = False + self._auto_decompress = auto_decompress + self._content_encoding = None + + self._csettings.on_url = cb_on_url + self._csettings.on_status = cb_on_status + self._csettings.on_header_field = cb_on_header_field + self._csettings.on_header_value = cb_on_header_value + self._csettings.on_headers_complete = cb_on_headers_complete + self._csettings.on_body = cb_on_body + self._csettings.on_message_begin = cb_on_message_begin + self._csettings.on_message_complete = cb_on_message_complete + self._csettings.on_chunk_header = cb_on_chunk_header + self._csettings.on_chunk_complete = cb_on_chunk_complete + + self._last_error = None + self._limit = limit + + cdef _process_header(self): + cdef str value + if self._raw_name is not EMPTY_BYTES: + name = find_header(self._raw_name) + value = self._raw_value.decode('utf-8', 'surrogateescape') + + self._headers.append((name, value)) + + if name is CONTENT_ENCODING: + self._content_encoding = value + + self._has_value = False + self._raw_headers.append((self._raw_name, self._raw_value)) + self._raw_name = EMPTY_BYTES + self._raw_value = EMPTY_BYTES + + cdef _on_header_field(self, char* at, size_t length): + if self._has_value: + self._process_header() + + if self._raw_name is EMPTY_BYTES: + self._raw_name = at[:length] + else: + self._raw_name += at[:length] + + cdef _on_header_value(self, char* at, size_t length): + if self._raw_value is EMPTY_BYTES: + self._raw_value = at[:length] + else: + self._raw_value += at[:length] + self._has_value = True + + cdef _on_headers_complete(self): + self._process_header() + + should_close = not cparser.llhttp_should_keep_alive(self._cparser) + upgrade = self._cparser.upgrade + chunked = self._cparser.flags & cparser.F_CHUNKED + + raw_headers = tuple(self._raw_headers) + headers = CIMultiDictProxy(CIMultiDict(self._headers)) + + if self._cparser.type == cparser.HTTP_REQUEST: + h_upg = headers.get("upgrade", "") + allowed = upgrade and h_upg.isascii() and h_upg.lower() in ALLOWED_UPGRADES + if allowed or self._cparser.method == cparser.HTTP_CONNECT: + self._upgraded = True + else: + if upgrade and self._cparser.status_code == 101: + self._upgraded = True + + # do not support old websocket spec + if SEC_WEBSOCKET_KEY1 in headers: + raise InvalidHeader(SEC_WEBSOCKET_KEY1) + + encoding = None + enc = self._content_encoding + if enc is not None: + self._content_encoding = None + if enc.isascii() and enc.lower() in {"gzip", "deflate", "br", "zstd"}: + encoding = enc + + if self._cparser.type == cparser.HTTP_REQUEST: + method = http_method_str(self._cparser.method) + msg = _new_request_message( + method, self._path, + self.http_version(), headers, raw_headers, + should_close, encoding, upgrade, chunked, self._url) + else: + msg = _new_response_message( + self.http_version(), self._cparser.status_code, self._reason, + headers, raw_headers, should_close, encoding, + upgrade, chunked) + + if ( + ULLONG_MAX > self._cparser.content_length > 0 or chunked or + self._cparser.method == cparser.HTTP_CONNECT or + (self._cparser.status_code >= 199 and + self._cparser.content_length == 0 and + self._read_until_eof) + ): + payload = StreamReader( + self._protocol, timer=self._timer, loop=self._loop, + limit=self._limit) + else: + payload = EMPTY_PAYLOAD + + self._payload = payload + if encoding is not None and self._auto_decompress: + self._payload = DeflateBuffer(payload, encoding) + + if not self._response_with_body: + payload = EMPTY_PAYLOAD + + self._messages.append((msg, payload)) + + cdef _on_message_complete(self): + self._payload.feed_eof() + self._payload = None + + cdef _on_chunk_header(self): + self._payload.begin_http_chunk_receiving() + + cdef _on_chunk_complete(self): + self._payload.end_http_chunk_receiving() + + cdef object _on_status_complete(self): + pass + + cdef inline http_version(self): + cdef cparser.llhttp_t* parser = self._cparser + + if parser.http_major == 1: + if parser.http_minor == 0: + return HttpVersion10 + elif parser.http_minor == 1: + return HttpVersion11 + + return HttpVersion(parser.http_major, parser.http_minor) + + ### Public API ### + + def feed_eof(self): + cdef bytes desc + + if self._payload is not None: + if self._cparser.flags & cparser.F_CHUNKED: + raise TransferEncodingError( + "Not enough data to satisfy transfer length header.") + elif self._cparser.flags & cparser.F_CONTENT_LENGTH: + raise ContentLengthError( + "Not enough data to satisfy content length header.") + elif cparser.llhttp_get_errno(self._cparser) != cparser.HPE_OK: + desc = cparser.llhttp_get_error_reason(self._cparser) + raise PayloadEncodingError(desc.decode('latin-1')) + else: + self._payload.feed_eof() + elif self._started: + self._on_headers_complete() + if self._messages: + return self._messages[-1][0] + + def feed_data(self, data): + cdef: + size_t data_len + size_t nb + cdef cparser.llhttp_errno_t errno + + PyObject_GetBuffer(data, &self.py_buf, PyBUF_SIMPLE) + data_len = self.py_buf.len + + errno = cparser.llhttp_execute( + self._cparser, + self.py_buf.buf, + data_len) + + if errno is cparser.HPE_PAUSED_UPGRADE: + cparser.llhttp_resume_after_upgrade(self._cparser) + + nb = cparser.llhttp_get_error_pos(self._cparser) - self.py_buf.buf + + PyBuffer_Release(&self.py_buf) + + if errno not in (cparser.HPE_OK, cparser.HPE_PAUSED_UPGRADE): + if self._payload_error == 0: + if self._last_error is not None: + ex = self._last_error + self._last_error = None + else: + after = cparser.llhttp_get_error_pos(self._cparser) + before = data[:after - self.py_buf.buf] + after_b = after.split(b"\r\n", 1)[0] + before = before.rsplit(b"\r\n", 1)[-1] + data = before + after_b + pointer = " " * (len(repr(before))-1) + "^" + ex = parser_error_from_errno(self._cparser, data, pointer) + self._payload = None + raise ex + + if self._messages: + messages = self._messages + self._messages = [] + else: + messages = () + + if self._upgraded: + return messages, True, data[nb:] + else: + return messages, False, b"" + + def set_upgraded(self, val): + self._upgraded = val + + +cdef class HttpRequestParser(HttpParser): + + def __init__( + self, protocol, loop, int limit, timer=None, + size_t max_line_size=8190, size_t max_headers=32768, + size_t max_field_size=8190, payload_exception=None, + bint response_with_body=True, bint read_until_eof=False, + bint auto_decompress=True, + ): + self._init(cparser.HTTP_REQUEST, protocol, loop, limit, timer, + max_line_size, max_headers, max_field_size, + payload_exception, response_with_body, read_until_eof, + auto_decompress) + + cdef object _on_status_complete(self): + cdef int idx1, idx2 + if not self._buf: + return + self._path = self._buf.decode('utf-8', 'surrogateescape') + try: + idx3 = len(self._path) + if self._cparser.method == cparser.HTTP_CONNECT: + # authority-form, + # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.3 + self._url = URL.build(authority=self._path, encoded=True) + elif idx3 > 1 and self._path[0] == '/': + # origin-form, + # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.1 + idx1 = self._path.find("?") + if idx1 == -1: + query = "" + idx2 = self._path.find("#") + if idx2 == -1: + path = self._path + fragment = "" + else: + path = self._path[0: idx2] + fragment = self._path[idx2+1:] + + else: + path = self._path[0:idx1] + idx1 += 1 + idx2 = self._path.find("#", idx1+1) + if idx2 == -1: + query = self._path[idx1:] + fragment = "" + else: + query = self._path[idx1: idx2] + fragment = self._path[idx2+1:] + + self._url = URL.build( + path=path, + query_string=query, + fragment=fragment, + encoded=True, + ) + else: + # absolute-form for proxy maybe, + # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.2 + self._url = URL(self._path, encoded=True) + finally: + PyByteArray_Resize(self._buf, 0) + + +cdef class HttpResponseParser(HttpParser): + + def __init__( + self, protocol, loop, int limit, timer=None, + size_t max_line_size=8190, size_t max_headers=32768, + size_t max_field_size=8190, payload_exception=None, + bint response_with_body=True, bint read_until_eof=False, + bint auto_decompress=True + ): + self._init(cparser.HTTP_RESPONSE, protocol, loop, limit, timer, + max_line_size, max_headers, max_field_size, + payload_exception, response_with_body, read_until_eof, + auto_decompress) + # Use strict parsing on dev mode, so users are warned about broken servers. + if not DEBUG: + cparser.llhttp_set_lenient_headers(self._cparser, 1) + cparser.llhttp_set_lenient_optional_cr_before_lf(self._cparser, 1) + cparser.llhttp_set_lenient_spaces_after_chunk_size(self._cparser, 1) + + cdef object _on_status_complete(self): + if self._buf: + self._reason = self._buf.decode('utf-8', 'surrogateescape') + PyByteArray_Resize(self._buf, 0) + else: + self._reason = self._reason or '' + +cdef int cb_on_message_begin(cparser.llhttp_t* parser) except -1: + cdef HttpParser pyparser = parser.data + + pyparser._started = True + pyparser._headers = [] + pyparser._raw_headers = [] + PyByteArray_Resize(pyparser._buf, 0) + pyparser._path = None + pyparser._reason = None + return 0 + + +cdef int cb_on_url(cparser.llhttp_t* parser, + const char *at, size_t length) except -1: + cdef HttpParser pyparser = parser.data + try: + if length > pyparser._max_line_size: + raise LineTooLong( + 'Status line is too long', pyparser._max_line_size, length) + extend(pyparser._buf, at, length) + except BaseException as ex: + pyparser._last_error = ex + return -1 + else: + return 0 + + +cdef int cb_on_status(cparser.llhttp_t* parser, + const char *at, size_t length) except -1: + cdef HttpParser pyparser = parser.data + cdef str reason + try: + if length > pyparser._max_line_size: + raise LineTooLong( + 'Status line is too long', pyparser._max_line_size, length) + extend(pyparser._buf, at, length) + except BaseException as ex: + pyparser._last_error = ex + return -1 + else: + return 0 + + +cdef int cb_on_header_field(cparser.llhttp_t* parser, + const char *at, size_t length) except -1: + cdef HttpParser pyparser = parser.data + cdef Py_ssize_t size + try: + pyparser._on_status_complete() + size = len(pyparser._raw_name) + length + if size > pyparser._max_field_size: + raise LineTooLong( + 'Header name is too long', pyparser._max_field_size, size) + pyparser._on_header_field(at, length) + except BaseException as ex: + pyparser._last_error = ex + return -1 + else: + return 0 + + +cdef int cb_on_header_value(cparser.llhttp_t* parser, + const char *at, size_t length) except -1: + cdef HttpParser pyparser = parser.data + cdef Py_ssize_t size + try: + size = len(pyparser._raw_value) + length + if size > pyparser._max_field_size: + raise LineTooLong( + 'Header value is too long', pyparser._max_field_size, size) + pyparser._on_header_value(at, length) + except BaseException as ex: + pyparser._last_error = ex + return -1 + else: + return 0 + + +cdef int cb_on_headers_complete(cparser.llhttp_t* parser) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._on_status_complete() + pyparser._on_headers_complete() + except BaseException as exc: + pyparser._last_error = exc + return -1 + else: + if pyparser._upgraded or pyparser._cparser.method == cparser.HTTP_CONNECT: + return 2 + else: + return 0 + + +cdef int cb_on_body(cparser.llhttp_t* parser, + const char *at, size_t length) except -1: + cdef HttpParser pyparser = parser.data + cdef bytes body = at[:length] + try: + pyparser._payload.feed_data(body, length) + except BaseException as underlying_exc: + reraised_exc = underlying_exc + if pyparser._payload_exception is not None: + reraised_exc = pyparser._payload_exception(str(underlying_exc)) + + set_exception(pyparser._payload, reraised_exc, underlying_exc) + + pyparser._payload_error = 1 + return -1 + else: + return 0 + + +cdef int cb_on_message_complete(cparser.llhttp_t* parser) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._started = False + pyparser._on_message_complete() + except BaseException as exc: + pyparser._last_error = exc + return -1 + else: + return 0 + + +cdef int cb_on_chunk_header(cparser.llhttp_t* parser) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._on_chunk_header() + except BaseException as exc: + pyparser._last_error = exc + return -1 + else: + return 0 + + +cdef int cb_on_chunk_complete(cparser.llhttp_t* parser) except -1: + cdef HttpParser pyparser = parser.data + try: + pyparser._on_chunk_complete() + except BaseException as exc: + pyparser._last_error = exc + return -1 + else: + return 0 + + +cdef parser_error_from_errno(cparser.llhttp_t* parser, data, pointer): + cdef cparser.llhttp_errno_t errno = cparser.llhttp_get_errno(parser) + cdef bytes desc = cparser.llhttp_get_error_reason(parser) + + err_msg = "{}:\n\n {!r}\n {}".format(desc.decode("latin-1"), data, pointer) + + if errno in {cparser.HPE_CB_MESSAGE_BEGIN, + cparser.HPE_CB_HEADERS_COMPLETE, + cparser.HPE_CB_MESSAGE_COMPLETE, + cparser.HPE_CB_CHUNK_HEADER, + cparser.HPE_CB_CHUNK_COMPLETE, + cparser.HPE_INVALID_CONSTANT, + cparser.HPE_INVALID_HEADER_TOKEN, + cparser.HPE_INVALID_CONTENT_LENGTH, + cparser.HPE_INVALID_CHUNK_SIZE, + cparser.HPE_INVALID_EOF_STATE, + cparser.HPE_INVALID_TRANSFER_ENCODING}: + return BadHttpMessage(err_msg) + elif errno == cparser.HPE_INVALID_METHOD: + return BadHttpMethod(error=err_msg) + elif errno in {cparser.HPE_INVALID_STATUS, + cparser.HPE_INVALID_VERSION}: + return BadStatusLine(error=err_msg) + elif errno == cparser.HPE_INVALID_URL: + return InvalidURLError(err_msg) + + return BadHttpMessage(err_msg) diff --git a/py311/lib/python3.11/site-packages/aiohttp/_http_writer.pyx b/py311/lib/python3.11/site-packages/aiohttp/_http_writer.pyx new file mode 100644 index 0000000000000000000000000000000000000000..7989c186c89296e7fa9e336bdad447d27e6f3416 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/_http_writer.pyx @@ -0,0 +1,162 @@ +from cpython.bytes cimport PyBytes_FromStringAndSize +from cpython.exc cimport PyErr_NoMemory +from cpython.mem cimport PyMem_Free, PyMem_Malloc, PyMem_Realloc +from cpython.object cimport PyObject_Str +from libc.stdint cimport uint8_t, uint64_t +from libc.string cimport memcpy + +from multidict import istr + +DEF BUF_SIZE = 16 * 1024 # 16KiB + +cdef object _istr = istr + + +# ----------------- writer --------------------------- + +cdef struct Writer: + char *buf + Py_ssize_t size + Py_ssize_t pos + bint heap_allocated + +cdef inline void _init_writer(Writer* writer, char *buf): + writer.buf = buf + writer.size = BUF_SIZE + writer.pos = 0 + writer.heap_allocated = 0 + + +cdef inline void _release_writer(Writer* writer): + if writer.heap_allocated: + PyMem_Free(writer.buf) + + +cdef inline int _write_byte(Writer* writer, uint8_t ch): + cdef char * buf + cdef Py_ssize_t size + + if writer.pos == writer.size: + # reallocate + size = writer.size + BUF_SIZE + if not writer.heap_allocated: + buf = PyMem_Malloc(size) + if buf == NULL: + PyErr_NoMemory() + return -1 + memcpy(buf, writer.buf, writer.size) + else: + buf = PyMem_Realloc(writer.buf, size) + if buf == NULL: + PyErr_NoMemory() + return -1 + writer.buf = buf + writer.size = size + writer.heap_allocated = 1 + writer.buf[writer.pos] = ch + writer.pos += 1 + return 0 + + +cdef inline int _write_utf8(Writer* writer, Py_UCS4 symbol): + cdef uint64_t utf = symbol + + if utf < 0x80: + return _write_byte(writer, utf) + elif utf < 0x800: + if _write_byte(writer, (0xc0 | (utf >> 6))) < 0: + return -1 + return _write_byte(writer, (0x80 | (utf & 0x3f))) + elif 0xD800 <= utf <= 0xDFFF: + # surogate pair, ignored + return 0 + elif utf < 0x10000: + if _write_byte(writer, (0xe0 | (utf >> 12))) < 0: + return -1 + if _write_byte(writer, (0x80 | ((utf >> 6) & 0x3f))) < 0: + return -1 + return _write_byte(writer, (0x80 | (utf & 0x3f))) + elif utf > 0x10FFFF: + # symbol is too large + return 0 + else: + if _write_byte(writer, (0xf0 | (utf >> 18))) < 0: + return -1 + if _write_byte(writer, + (0x80 | ((utf >> 12) & 0x3f))) < 0: + return -1 + if _write_byte(writer, + (0x80 | ((utf >> 6) & 0x3f))) < 0: + return -1 + return _write_byte(writer, (0x80 | (utf & 0x3f))) + + +cdef inline int _write_str(Writer* writer, str s): + cdef Py_UCS4 ch + for ch in s: + if _write_utf8(writer, ch) < 0: + return -1 + + +cdef inline int _write_str_raise_on_nlcr(Writer* writer, object s): + cdef Py_UCS4 ch + cdef str out_str + if type(s) is str: + out_str = s + elif type(s) is _istr: + out_str = PyObject_Str(s) + elif not isinstance(s, str): + raise TypeError("Cannot serialize non-str key {!r}".format(s)) + else: + out_str = str(s) + + for ch in out_str: + if ch == 0x0D or ch == 0x0A: + raise ValueError( + "Newline or carriage return detected in headers. " + "Potential header injection attack." + ) + if _write_utf8(writer, ch) < 0: + return -1 + + +# --------------- _serialize_headers ---------------------- + +def _serialize_headers(str status_line, headers): + cdef Writer writer + cdef object key + cdef object val + cdef char buf[BUF_SIZE] + + _init_writer(&writer, buf) + + try: + if _write_str(&writer, status_line) < 0: + raise + if _write_byte(&writer, b'\r') < 0: + raise + if _write_byte(&writer, b'\n') < 0: + raise + + for key, val in headers.items(): + if _write_str_raise_on_nlcr(&writer, key) < 0: + raise + if _write_byte(&writer, b':') < 0: + raise + if _write_byte(&writer, b' ') < 0: + raise + if _write_str_raise_on_nlcr(&writer, val) < 0: + raise + if _write_byte(&writer, b'\r') < 0: + raise + if _write_byte(&writer, b'\n') < 0: + raise + + if _write_byte(&writer, b'\r') < 0: + raise + if _write_byte(&writer, b'\n') < 0: + raise + + return PyBytes_FromStringAndSize(writer.buf, writer.pos) + finally: + _release_writer(&writer) diff --git a/py311/lib/python3.11/site-packages/aiohttp/abc.py b/py311/lib/python3.11/site-packages/aiohttp/abc.py new file mode 100644 index 0000000000000000000000000000000000000000..faf09575afbb2d6842ad639dada0fdebc2e28945 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/abc.py @@ -0,0 +1,268 @@ +import asyncio +import logging +import socket +from abc import ABC, abstractmethod +from collections.abc import Sized +from http.cookies import BaseCookie, Morsel +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Dict, + Generator, + Iterable, + List, + Optional, + Sequence, + Tuple, + TypedDict, + Union, +) + +from multidict import CIMultiDict +from yarl import URL + +from ._cookie_helpers import parse_set_cookie_headers +from .typedefs import LooseCookies + +if TYPE_CHECKING: + from .web_app import Application + from .web_exceptions import HTTPException + from .web_request import BaseRequest, Request + from .web_response import StreamResponse +else: + BaseRequest = Request = Application = StreamResponse = None + HTTPException = None + + +class AbstractRouter(ABC): + def __init__(self) -> None: + self._frozen = False + + def post_init(self, app: Application) -> None: + """Post init stage. + + Not an abstract method for sake of backward compatibility, + but if the router wants to be aware of the application + it can override this. + """ + + @property + def frozen(self) -> bool: + return self._frozen + + def freeze(self) -> None: + """Freeze router.""" + self._frozen = True + + @abstractmethod + async def resolve(self, request: Request) -> "AbstractMatchInfo": + """Return MATCH_INFO for given request""" + + +class AbstractMatchInfo(ABC): + + __slots__ = () + + @property # pragma: no branch + @abstractmethod + def handler(self) -> Callable[[Request], Awaitable[StreamResponse]]: + """Execute matched request handler""" + + @property + @abstractmethod + def expect_handler( + self, + ) -> Callable[[Request], Awaitable[Optional[StreamResponse]]]: + """Expect handler for 100-continue processing""" + + @property # pragma: no branch + @abstractmethod + def http_exception(self) -> Optional[HTTPException]: + """HTTPException instance raised on router's resolving, or None""" + + @abstractmethod # pragma: no branch + def get_info(self) -> Dict[str, Any]: + """Return a dict with additional info useful for introspection""" + + @property # pragma: no branch + @abstractmethod + def apps(self) -> Tuple[Application, ...]: + """Stack of nested applications. + + Top level application is left-most element. + + """ + + @abstractmethod + def add_app(self, app: Application) -> None: + """Add application to the nested apps stack.""" + + @abstractmethod + def freeze(self) -> None: + """Freeze the match info. + + The method is called after route resolution. + + After the call .add_app() is forbidden. + + """ + + +class AbstractView(ABC): + """Abstract class based view.""" + + def __init__(self, request: Request) -> None: + self._request = request + + @property + def request(self) -> Request: + """Request instance.""" + return self._request + + @abstractmethod + def __await__(self) -> Generator[None, None, StreamResponse]: + """Execute the view handler.""" + + +class ResolveResult(TypedDict): + """Resolve result. + + This is the result returned from an AbstractResolver's + resolve method. + + :param hostname: The hostname that was provided. + :param host: The IP address that was resolved. + :param port: The port that was resolved. + :param family: The address family that was resolved. + :param proto: The protocol that was resolved. + :param flags: The flags that were resolved. + """ + + hostname: str + host: str + port: int + family: int + proto: int + flags: int + + +class AbstractResolver(ABC): + """Abstract DNS resolver.""" + + @abstractmethod + async def resolve( + self, host: str, port: int = 0, family: socket.AddressFamily = socket.AF_INET + ) -> List[ResolveResult]: + """Return IP address for given hostname""" + + @abstractmethod + async def close(self) -> None: + """Release resolver""" + + +if TYPE_CHECKING: + IterableBase = Iterable[Morsel[str]] +else: + IterableBase = Iterable + + +ClearCookiePredicate = Callable[["Morsel[str]"], bool] + + +class AbstractCookieJar(Sized, IterableBase): + """Abstract Cookie Jar.""" + + def __init__(self, *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: + self._loop = loop or asyncio.get_running_loop() + + @property + @abstractmethod + def quote_cookie(self) -> bool: + """Return True if cookies should be quoted.""" + + @abstractmethod + def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None: + """Clear all cookies if no predicate is passed.""" + + @abstractmethod + def clear_domain(self, domain: str) -> None: + """Clear all cookies for domain and all subdomains.""" + + @abstractmethod + def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None: + """Update cookies.""" + + def update_cookies_from_headers( + self, headers: Sequence[str], response_url: URL + ) -> None: + """Update cookies from raw Set-Cookie headers.""" + if headers and (cookies_to_update := parse_set_cookie_headers(headers)): + self.update_cookies(cookies_to_update, response_url) + + @abstractmethod + def filter_cookies(self, request_url: URL) -> "BaseCookie[str]": + """Return the jar's cookies filtered by their attributes.""" + + +class AbstractStreamWriter(ABC): + """Abstract stream writer.""" + + buffer_size: int = 0 + output_size: int = 0 + length: Optional[int] = 0 + + @abstractmethod + async def write(self, chunk: Union[bytes, bytearray, memoryview]) -> None: + """Write chunk into stream.""" + + @abstractmethod + async def write_eof(self, chunk: bytes = b"") -> None: + """Write last chunk.""" + + @abstractmethod + async def drain(self) -> None: + """Flush the write buffer.""" + + @abstractmethod + def enable_compression( + self, encoding: str = "deflate", strategy: Optional[int] = None + ) -> None: + """Enable HTTP body compression""" + + @abstractmethod + def enable_chunking(self) -> None: + """Enable HTTP chunked mode""" + + @abstractmethod + async def write_headers( + self, status_line: str, headers: "CIMultiDict[str]" + ) -> None: + """Write HTTP headers""" + + def send_headers(self) -> None: + """Force sending buffered headers if not already sent. + + Required only if write_headers() buffers headers instead of sending immediately. + For backwards compatibility, this method does nothing by default. + """ + + +class AbstractAccessLogger(ABC): + """Abstract writer to access log.""" + + __slots__ = ("logger", "log_format") + + def __init__(self, logger: logging.Logger, log_format: str) -> None: + self.logger = logger + self.log_format = log_format + + @abstractmethod + def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None: + """Emit log to logger.""" + + @property + def enabled(self) -> bool: + """Check if logger is enabled.""" + return True diff --git a/py311/lib/python3.11/site-packages/aiohttp/base_protocol.py b/py311/lib/python3.11/site-packages/aiohttp/base_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..b0a67ed6ff68ca5bc48be9ac472ee755369b2720 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/base_protocol.py @@ -0,0 +1,100 @@ +import asyncio +from typing import Optional, cast + +from .client_exceptions import ClientConnectionResetError +from .helpers import set_exception +from .tcp_helpers import tcp_nodelay + + +class BaseProtocol(asyncio.Protocol): + __slots__ = ( + "_loop", + "_paused", + "_drain_waiter", + "_connection_lost", + "_reading_paused", + "transport", + ) + + def __init__(self, loop: asyncio.AbstractEventLoop) -> None: + self._loop: asyncio.AbstractEventLoop = loop + self._paused = False + self._drain_waiter: Optional[asyncio.Future[None]] = None + self._reading_paused = False + + self.transport: Optional[asyncio.Transport] = None + + @property + def connected(self) -> bool: + """Return True if the connection is open.""" + return self.transport is not None + + @property + def writing_paused(self) -> bool: + return self._paused + + def pause_writing(self) -> None: + assert not self._paused + self._paused = True + + def resume_writing(self) -> None: + assert self._paused + self._paused = False + + waiter = self._drain_waiter + if waiter is not None: + self._drain_waiter = None + if not waiter.done(): + waiter.set_result(None) + + def pause_reading(self) -> None: + if not self._reading_paused and self.transport is not None: + try: + self.transport.pause_reading() + except (AttributeError, NotImplementedError, RuntimeError): + pass + self._reading_paused = True + + def resume_reading(self) -> None: + if self._reading_paused and self.transport is not None: + try: + self.transport.resume_reading() + except (AttributeError, NotImplementedError, RuntimeError): + pass + self._reading_paused = False + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + tr = cast(asyncio.Transport, transport) + tcp_nodelay(tr, True) + self.transport = tr + + def connection_lost(self, exc: Optional[BaseException]) -> None: + # Wake up the writer if currently paused. + self.transport = None + if not self._paused: + return + waiter = self._drain_waiter + if waiter is None: + return + self._drain_waiter = None + if waiter.done(): + return + if exc is None: + waiter.set_result(None) + else: + set_exception( + waiter, + ConnectionError("Connection lost"), + exc, + ) + + async def _drain_helper(self) -> None: + if self.transport is None: + raise ClientConnectionResetError("Connection lost") + if not self._paused: + return + waiter = self._drain_waiter + if waiter is None: + waiter = self._loop.create_future() + self._drain_waiter = waiter + await asyncio.shield(waiter) diff --git a/py311/lib/python3.11/site-packages/aiohttp/client.py b/py311/lib/python3.11/site-packages/aiohttp/client.py new file mode 100644 index 0000000000000000000000000000000000000000..bc4ee17caf088e6148b72a5daa5e854a269732eb --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/client.py @@ -0,0 +1,1635 @@ +"""HTTP Client for asyncio.""" + +import asyncio +import base64 +import hashlib +import json +import os +import sys +import traceback +import warnings +from contextlib import suppress +from types import TracebackType +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Coroutine, + Final, + FrozenSet, + Generator, + Generic, + Iterable, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Type, + TypedDict, + TypeVar, + Union, +) + +import attr +from multidict import CIMultiDict, MultiDict, MultiDictProxy, istr +from yarl import URL + +from . import hdrs, http, payload +from ._websocket.reader import WebSocketDataQueue +from .abc import AbstractCookieJar +from .client_exceptions import ( + ClientConnectionError, + ClientConnectionResetError, + ClientConnectorCertificateError, + ClientConnectorDNSError, + ClientConnectorError, + ClientConnectorSSLError, + ClientError, + ClientHttpProxyError, + ClientOSError, + ClientPayloadError, + ClientProxyConnectionError, + ClientResponseError, + ClientSSLError, + ConnectionTimeoutError, + ContentTypeError, + InvalidURL, + InvalidUrlClientError, + InvalidUrlRedirectClientError, + NonHttpUrlClientError, + NonHttpUrlRedirectClientError, + RedirectClientError, + ServerConnectionError, + ServerDisconnectedError, + ServerFingerprintMismatch, + ServerTimeoutError, + SocketTimeoutError, + TooManyRedirects, + WSMessageTypeError, + WSServerHandshakeError, +) +from .client_middlewares import ClientMiddlewareType, build_client_middlewares +from .client_reqrep import ( + ClientRequest as ClientRequest, + ClientResponse as ClientResponse, + Fingerprint as Fingerprint, + RequestInfo as RequestInfo, + _merge_ssl_params, +) +from .client_ws import ( + DEFAULT_WS_CLIENT_TIMEOUT, + ClientWebSocketResponse as ClientWebSocketResponse, + ClientWSTimeout as ClientWSTimeout, +) +from .connector import ( + HTTP_AND_EMPTY_SCHEMA_SET, + BaseConnector as BaseConnector, + NamedPipeConnector as NamedPipeConnector, + TCPConnector as TCPConnector, + UnixConnector as UnixConnector, +) +from .cookiejar import CookieJar +from .helpers import ( + _SENTINEL, + DEBUG, + EMPTY_BODY_METHODS, + BasicAuth, + TimeoutHandle, + basicauth_from_netrc, + get_env_proxy_for_url, + netrc_from_env, + sentinel, + strip_auth_from_url, +) +from .http import WS_KEY, HttpVersion, WebSocketReader, WebSocketWriter +from .http_websocket import WSHandshakeError, ws_ext_gen, ws_ext_parse +from .tracing import Trace, TraceConfig +from .typedefs import JSONEncoder, LooseCookies, LooseHeaders, Query, StrOrURL + +__all__ = ( + # client_exceptions + "ClientConnectionError", + "ClientConnectionResetError", + "ClientConnectorCertificateError", + "ClientConnectorDNSError", + "ClientConnectorError", + "ClientConnectorSSLError", + "ClientError", + "ClientHttpProxyError", + "ClientOSError", + "ClientPayloadError", + "ClientProxyConnectionError", + "ClientResponseError", + "ClientSSLError", + "ConnectionTimeoutError", + "ContentTypeError", + "InvalidURL", + "InvalidUrlClientError", + "RedirectClientError", + "NonHttpUrlClientError", + "InvalidUrlRedirectClientError", + "NonHttpUrlRedirectClientError", + "ServerConnectionError", + "ServerDisconnectedError", + "ServerFingerprintMismatch", + "ServerTimeoutError", + "SocketTimeoutError", + "TooManyRedirects", + "WSServerHandshakeError", + # client_reqrep + "ClientRequest", + "ClientResponse", + "Fingerprint", + "RequestInfo", + # connector + "BaseConnector", + "TCPConnector", + "UnixConnector", + "NamedPipeConnector", + # client_ws + "ClientWebSocketResponse", + # client + "ClientSession", + "ClientTimeout", + "ClientWSTimeout", + "request", + "WSMessageTypeError", +) + + +if TYPE_CHECKING: + from ssl import SSLContext +else: + SSLContext = None + +if sys.version_info >= (3, 11) and TYPE_CHECKING: + from typing import Unpack + + +class _RequestOptions(TypedDict, total=False): + params: Query + data: Any + json: Any + cookies: Union[LooseCookies, None] + headers: Union[LooseHeaders, None] + skip_auto_headers: Union[Iterable[str], None] + auth: Union[BasicAuth, None] + allow_redirects: bool + max_redirects: int + compress: Union[str, bool, None] + chunked: Union[bool, None] + expect100: bool + raise_for_status: Union[None, bool, Callable[[ClientResponse], Awaitable[None]]] + read_until_eof: bool + proxy: Union[StrOrURL, None] + proxy_auth: Union[BasicAuth, None] + timeout: "Union[ClientTimeout, _SENTINEL, None]" + ssl: Union[SSLContext, bool, Fingerprint] + server_hostname: Union[str, None] + proxy_headers: Union[LooseHeaders, None] + trace_request_ctx: Union[Mapping[str, Any], None] + read_bufsize: Union[int, None] + auto_decompress: Union[bool, None] + max_line_size: Union[int, None] + max_field_size: Union[int, None] + middlewares: Optional[Sequence[ClientMiddlewareType]] + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class ClientTimeout: + total: Optional[float] = None + connect: Optional[float] = None + sock_read: Optional[float] = None + sock_connect: Optional[float] = None + ceil_threshold: float = 5 + + # pool_queue_timeout: Optional[float] = None + # dns_resolution_timeout: Optional[float] = None + # socket_connect_timeout: Optional[float] = None + # connection_acquiring_timeout: Optional[float] = None + # new_connection_timeout: Optional[float] = None + # http_header_timeout: Optional[float] = None + # response_body_timeout: Optional[float] = None + + # to create a timeout specific for a single request, either + # - create a completely new one to overwrite the default + # - or use http://www.attrs.org/en/stable/api.html#attr.evolve + # to overwrite the defaults + + +# 5 Minute default read timeout +DEFAULT_TIMEOUT: Final[ClientTimeout] = ClientTimeout(total=5 * 60, sock_connect=30) + +# https://www.rfc-editor.org/rfc/rfc9110#section-9.2.2 +IDEMPOTENT_METHODS = frozenset({"GET", "HEAD", "OPTIONS", "TRACE", "PUT", "DELETE"}) + +_RetType = TypeVar("_RetType", ClientResponse, ClientWebSocketResponse) +_CharsetResolver = Callable[[ClientResponse, bytes], str] + + +class ClientSession: + """First-class interface for making HTTP requests.""" + + ATTRS = frozenset( + [ + "_base_url", + "_base_url_origin", + "_source_traceback", + "_connector", + "_loop", + "_cookie_jar", + "_connector_owner", + "_default_auth", + "_version", + "_json_serialize", + "_requote_redirect_url", + "_timeout", + "_raise_for_status", + "_auto_decompress", + "_trust_env", + "_default_headers", + "_skip_auto_headers", + "_request_class", + "_response_class", + "_ws_response_class", + "_trace_configs", + "_read_bufsize", + "_max_line_size", + "_max_field_size", + "_resolve_charset", + "_default_proxy", + "_default_proxy_auth", + "_retry_connection", + "_middlewares", + "requote_redirect_url", + ] + ) + + _source_traceback: Optional[traceback.StackSummary] = None + _connector: Optional[BaseConnector] = None + + def __init__( + self, + base_url: Optional[StrOrURL] = None, + *, + connector: Optional[BaseConnector] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + cookies: Optional[LooseCookies] = None, + headers: Optional[LooseHeaders] = None, + proxy: Optional[StrOrURL] = None, + proxy_auth: Optional[BasicAuth] = None, + skip_auto_headers: Optional[Iterable[str]] = None, + auth: Optional[BasicAuth] = None, + json_serialize: JSONEncoder = json.dumps, + request_class: Type[ClientRequest] = ClientRequest, + response_class: Type[ClientResponse] = ClientResponse, + ws_response_class: Type[ClientWebSocketResponse] = ClientWebSocketResponse, + version: HttpVersion = http.HttpVersion11, + cookie_jar: Optional[AbstractCookieJar] = None, + connector_owner: bool = True, + raise_for_status: Union[ + bool, Callable[[ClientResponse], Awaitable[None]] + ] = False, + read_timeout: Union[float, _SENTINEL] = sentinel, + conn_timeout: Optional[float] = None, + timeout: Union[object, ClientTimeout] = sentinel, + auto_decompress: bool = True, + trust_env: bool = False, + requote_redirect_url: bool = True, + trace_configs: Optional[List[TraceConfig]] = None, + read_bufsize: int = 2**16, + max_line_size: int = 8190, + max_field_size: int = 8190, + fallback_charset_resolver: _CharsetResolver = lambda r, b: "utf-8", + middlewares: Sequence[ClientMiddlewareType] = (), + ssl_shutdown_timeout: Union[_SENTINEL, None, float] = sentinel, + ) -> None: + # We initialise _connector to None immediately, as it's referenced in __del__() + # and could cause issues if an exception occurs during initialisation. + self._connector: Optional[BaseConnector] = None + + if loop is None: + if connector is not None: + loop = connector._loop + + loop = loop or asyncio.get_running_loop() + + if base_url is None or isinstance(base_url, URL): + self._base_url: Optional[URL] = base_url + self._base_url_origin = None if base_url is None else base_url.origin() + else: + self._base_url = URL(base_url) + self._base_url_origin = self._base_url.origin() + assert self._base_url.absolute, "Only absolute URLs are supported" + if self._base_url is not None and not self._base_url.path.endswith("/"): + raise ValueError("base_url must have a trailing '/'") + + if timeout is sentinel or timeout is None: + self._timeout = DEFAULT_TIMEOUT + if read_timeout is not sentinel: + warnings.warn( + "read_timeout is deprecated, use timeout argument instead", + DeprecationWarning, + stacklevel=2, + ) + self._timeout = attr.evolve(self._timeout, total=read_timeout) + if conn_timeout is not None: + self._timeout = attr.evolve(self._timeout, connect=conn_timeout) + warnings.warn( + "conn_timeout is deprecated, use timeout argument instead", + DeprecationWarning, + stacklevel=2, + ) + else: + if not isinstance(timeout, ClientTimeout): + raise ValueError( + f"timeout parameter cannot be of {type(timeout)} type, " + "please use 'timeout=ClientTimeout(...)'", + ) + self._timeout = timeout + if read_timeout is not sentinel: + raise ValueError( + "read_timeout and timeout parameters " + "conflict, please setup " + "timeout.read" + ) + if conn_timeout is not None: + raise ValueError( + "conn_timeout and timeout parameters " + "conflict, please setup " + "timeout.connect" + ) + + if ssl_shutdown_timeout is not sentinel: + warnings.warn( + "The ssl_shutdown_timeout parameter is deprecated and will be removed in aiohttp 4.0", + DeprecationWarning, + stacklevel=2, + ) + + if connector is None: + connector = TCPConnector( + loop=loop, ssl_shutdown_timeout=ssl_shutdown_timeout + ) + + if connector._loop is not loop: + raise RuntimeError("Session and connector has to use same event loop") + + self._loop = loop + + if loop.get_debug(): + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + + if cookie_jar is None: + cookie_jar = CookieJar(loop=loop) + self._cookie_jar = cookie_jar + + if cookies: + self._cookie_jar.update_cookies(cookies) + + self._connector = connector + self._connector_owner = connector_owner + self._default_auth = auth + self._version = version + self._json_serialize = json_serialize + self._raise_for_status = raise_for_status + self._auto_decompress = auto_decompress + self._trust_env = trust_env + self._requote_redirect_url = requote_redirect_url + self._read_bufsize = read_bufsize + self._max_line_size = max_line_size + self._max_field_size = max_field_size + + # Convert to list of tuples + if headers: + real_headers: CIMultiDict[str] = CIMultiDict(headers) + else: + real_headers = CIMultiDict() + self._default_headers: CIMultiDict[str] = real_headers + if skip_auto_headers is not None: + self._skip_auto_headers = frozenset(istr(i) for i in skip_auto_headers) + else: + self._skip_auto_headers = frozenset() + + self._request_class = request_class + self._response_class = response_class + self._ws_response_class = ws_response_class + + self._trace_configs = trace_configs or [] + for trace_config in self._trace_configs: + trace_config.freeze() + + self._resolve_charset = fallback_charset_resolver + + self._default_proxy = proxy + self._default_proxy_auth = proxy_auth + self._retry_connection: bool = True + self._middlewares = middlewares + + def __init_subclass__(cls: Type["ClientSession"]) -> None: + warnings.warn( + "Inheritance class {} from ClientSession " + "is discouraged".format(cls.__name__), + DeprecationWarning, + stacklevel=2, + ) + + if DEBUG: + + def __setattr__(self, name: str, val: Any) -> None: + if name not in self.ATTRS: + warnings.warn( + "Setting custom ClientSession.{} attribute " + "is discouraged".format(name), + DeprecationWarning, + stacklevel=2, + ) + super().__setattr__(name, val) + + def __del__(self, _warnings: Any = warnings) -> None: + if not self.closed: + kwargs = {"source": self} + _warnings.warn( + f"Unclosed client session {self!r}", ResourceWarning, **kwargs + ) + context = {"client_session": self, "message": "Unclosed client session"} + if self._source_traceback is not None: + context["source_traceback"] = self._source_traceback + self._loop.call_exception_handler(context) + + if sys.version_info >= (3, 11) and TYPE_CHECKING: + + def request( + self, + method: str, + url: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> "_RequestContextManager": ... + + else: + + def request( + self, method: str, url: StrOrURL, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP request.""" + return _RequestContextManager(self._request(method, url, **kwargs)) + + def _build_url(self, str_or_url: StrOrURL) -> URL: + url = URL(str_or_url) + if self._base_url and not url.absolute: + return self._base_url.join(url) + return url + + async def _request( + self, + method: str, + str_or_url: StrOrURL, + *, + params: Query = None, + data: Any = None, + json: Any = None, + cookies: Optional[LooseCookies] = None, + headers: Optional[LooseHeaders] = None, + skip_auto_headers: Optional[Iterable[str]] = None, + auth: Optional[BasicAuth] = None, + allow_redirects: bool = True, + max_redirects: int = 10, + compress: Union[str, bool, None] = None, + chunked: Optional[bool] = None, + expect100: bool = False, + raise_for_status: Union[ + None, bool, Callable[[ClientResponse], Awaitable[None]] + ] = None, + read_until_eof: bool = True, + proxy: Optional[StrOrURL] = None, + proxy_auth: Optional[BasicAuth] = None, + timeout: Union[ClientTimeout, _SENTINEL] = sentinel, + verify_ssl: Optional[bool] = None, + fingerprint: Optional[bytes] = None, + ssl_context: Optional[SSLContext] = None, + ssl: Union[SSLContext, bool, Fingerprint] = True, + server_hostname: Optional[str] = None, + proxy_headers: Optional[LooseHeaders] = None, + trace_request_ctx: Optional[Mapping[str, Any]] = None, + read_bufsize: Optional[int] = None, + auto_decompress: Optional[bool] = None, + max_line_size: Optional[int] = None, + max_field_size: Optional[int] = None, + middlewares: Optional[Sequence[ClientMiddlewareType]] = None, + ) -> ClientResponse: + + # NOTE: timeout clamps existing connect and read timeouts. We cannot + # set the default to None because we need to detect if the user wants + # to use the existing timeouts by setting timeout to None. + + if self.closed: + raise RuntimeError("Session is closed") + + ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint) + + if data is not None and json is not None: + raise ValueError( + "data and json parameters can not be used at the same time" + ) + elif json is not None: + data = payload.JsonPayload(json, dumps=self._json_serialize) + + if not isinstance(chunked, bool) and chunked is not None: + warnings.warn("Chunk size is deprecated #1615", DeprecationWarning) + + redirects = 0 + history: List[ClientResponse] = [] + version = self._version + params = params or {} + + # Merge with default headers and transform to CIMultiDict + headers = self._prepare_headers(headers) + + try: + url = self._build_url(str_or_url) + except ValueError as e: + raise InvalidUrlClientError(str_or_url) from e + + assert self._connector is not None + if url.scheme not in self._connector.allowed_protocol_schema_set: + raise NonHttpUrlClientError(url) + + skip_headers: Optional[Iterable[istr]] + if skip_auto_headers is not None: + skip_headers = { + istr(i) for i in skip_auto_headers + } | self._skip_auto_headers + elif self._skip_auto_headers: + skip_headers = self._skip_auto_headers + else: + skip_headers = None + + if proxy is None: + proxy = self._default_proxy + if proxy_auth is None: + proxy_auth = self._default_proxy_auth + + if proxy is None: + proxy_headers = None + else: + proxy_headers = self._prepare_headers(proxy_headers) + try: + proxy = URL(proxy) + except ValueError as e: + raise InvalidURL(proxy) from e + + if timeout is sentinel: + real_timeout: ClientTimeout = self._timeout + else: + if not isinstance(timeout, ClientTimeout): + real_timeout = ClientTimeout(total=timeout) + else: + real_timeout = timeout + # timeout is cumulative for all request operations + # (request, redirects, responses, data consuming) + tm = TimeoutHandle( + self._loop, real_timeout.total, ceil_threshold=real_timeout.ceil_threshold + ) + handle = tm.start() + + if read_bufsize is None: + read_bufsize = self._read_bufsize + + if auto_decompress is None: + auto_decompress = self._auto_decompress + + if max_line_size is None: + max_line_size = self._max_line_size + + if max_field_size is None: + max_field_size = self._max_field_size + + traces = [ + Trace( + self, + trace_config, + trace_config.trace_config_ctx(trace_request_ctx=trace_request_ctx), + ) + for trace_config in self._trace_configs + ] + + for trace in traces: + await trace.send_request_start(method, url.update_query(params), headers) + + timer = tm.timer() + try: + with timer: + # https://www.rfc-editor.org/rfc/rfc9112.html#name-retrying-requests + retry_persistent_connection = ( + self._retry_connection and method in IDEMPOTENT_METHODS + ) + while True: + url, auth_from_url = strip_auth_from_url(url) + if not url.raw_host: + # NOTE: Bail early, otherwise, causes `InvalidURL` through + # NOTE: `self._request_class()` below. + err_exc_cls = ( + InvalidUrlRedirectClientError + if redirects + else InvalidUrlClientError + ) + raise err_exc_cls(url) + # If `auth` was passed for an already authenticated URL, + # disallow only if this is the initial URL; this is to avoid issues + # with sketchy redirects that are not the caller's responsibility + if not history and (auth and auth_from_url): + raise ValueError( + "Cannot combine AUTH argument with " + "credentials encoded in URL" + ) + + # Override the auth with the one from the URL only if we + # have no auth, or if we got an auth from a redirect URL + if auth is None or (history and auth_from_url is not None): + auth = auth_from_url + + if ( + auth is None + and self._default_auth + and ( + not self._base_url or self._base_url_origin == url.origin() + ) + ): + auth = self._default_auth + + # Try netrc if auth is still None and trust_env is enabled. + if auth is None and self._trust_env and url.host is not None: + auth = await self._loop.run_in_executor( + None, self._get_netrc_auth, url.host + ) + + # It would be confusing if we support explicit + # Authorization header with auth argument + if ( + headers is not None + and auth is not None + and hdrs.AUTHORIZATION in headers + ): + raise ValueError( + "Cannot combine AUTHORIZATION header " + "with AUTH argument or credentials " + "encoded in URL" + ) + + all_cookies = self._cookie_jar.filter_cookies(url) + + if cookies is not None: + tmp_cookie_jar = CookieJar( + quote_cookie=self._cookie_jar.quote_cookie + ) + tmp_cookie_jar.update_cookies(cookies) + req_cookies = tmp_cookie_jar.filter_cookies(url) + if req_cookies: + all_cookies.load(req_cookies) + + proxy_: Optional[URL] = None + if proxy is not None: + proxy_ = URL(proxy) + elif self._trust_env: + with suppress(LookupError): + proxy_, proxy_auth = await asyncio.to_thread( + get_env_proxy_for_url, url + ) + + req = self._request_class( + method, + url, + params=params, + headers=headers, + skip_auto_headers=skip_headers, + data=data, + cookies=all_cookies, + auth=auth, + version=version, + compress=compress, + chunked=chunked, + expect100=expect100, + loop=self._loop, + response_class=self._response_class, + proxy=proxy_, + proxy_auth=proxy_auth, + timer=timer, + session=self, + ssl=ssl if ssl is not None else True, + server_hostname=server_hostname, + proxy_headers=proxy_headers, + traces=traces, + trust_env=self.trust_env, + ) + + async def _connect_and_send_request( + req: ClientRequest, + ) -> ClientResponse: + # connection timeout + assert self._connector is not None + try: + conn = await self._connector.connect( + req, traces=traces, timeout=real_timeout + ) + except asyncio.TimeoutError as exc: + raise ConnectionTimeoutError( + f"Connection timeout to host {req.url}" + ) from exc + + assert conn.protocol is not None + conn.protocol.set_response_params( + timer=timer, + skip_payload=req.method in EMPTY_BODY_METHODS, + read_until_eof=read_until_eof, + auto_decompress=auto_decompress, + read_timeout=real_timeout.sock_read, + read_bufsize=read_bufsize, + timeout_ceil_threshold=self._connector._timeout_ceil_threshold, + max_line_size=max_line_size, + max_field_size=max_field_size, + ) + try: + resp = await req.send(conn) + try: + await resp.start(conn) + except BaseException: + resp.close() + raise + except BaseException: + conn.close() + raise + return resp + + # Apply middleware (if any) - per-request middleware overrides session middleware + effective_middlewares = ( + self._middlewares if middlewares is None else middlewares + ) + + if effective_middlewares: + handler = build_client_middlewares( + _connect_and_send_request, effective_middlewares + ) + else: + handler = _connect_and_send_request + + try: + resp = await handler(req) + # Client connector errors should not be retried + except ( + ConnectionTimeoutError, + ClientConnectorError, + ClientConnectorCertificateError, + ClientConnectorSSLError, + ): + raise + except (ClientOSError, ServerDisconnectedError): + if retry_persistent_connection: + retry_persistent_connection = False + continue + raise + except ClientError: + raise + except OSError as exc: + if exc.errno is None and isinstance(exc, asyncio.TimeoutError): + raise + raise ClientOSError(*exc.args) from exc + + # Update cookies from raw headers to preserve duplicates + if resp._raw_cookie_headers: + self._cookie_jar.update_cookies_from_headers( + resp._raw_cookie_headers, resp.url + ) + + # redirects + if resp.status in (301, 302, 303, 307, 308) and allow_redirects: + + for trace in traces: + await trace.send_request_redirect( + method, url.update_query(params), headers, resp + ) + + redirects += 1 + history.append(resp) + if max_redirects and redirects >= max_redirects: + if req._body is not None: + await req._body.close() + resp.close() + raise TooManyRedirects( + history[0].request_info, tuple(history) + ) + + # For 301 and 302, mimic IE, now changed in RFC + # https://github.com/kennethreitz/requests/pull/269 + if (resp.status == 303 and resp.method != hdrs.METH_HEAD) or ( + resp.status in (301, 302) and resp.method == hdrs.METH_POST + ): + method = hdrs.METH_GET + data = None + if headers.get(hdrs.CONTENT_LENGTH): + headers.pop(hdrs.CONTENT_LENGTH) + else: + # For 307/308, always preserve the request body + # For 301/302 with non-POST methods, preserve the request body + # https://www.rfc-editor.org/rfc/rfc9110#section-15.4.3-3.1 + # Use the existing payload to avoid recreating it from a potentially consumed file + data = req._body + + r_url = resp.headers.get(hdrs.LOCATION) or resp.headers.get( + hdrs.URI + ) + if r_url is None: + # see github.com/aio-libs/aiohttp/issues/2022 + break + else: + # reading from correct redirection + # response is forbidden + resp.release() + + try: + parsed_redirect_url = URL( + r_url, encoded=not self._requote_redirect_url + ) + except ValueError as e: + if req._body is not None: + await req._body.close() + resp.close() + raise InvalidUrlRedirectClientError( + r_url, + "Server attempted redirecting to a location that does not look like a URL", + ) from e + + scheme = parsed_redirect_url.scheme + if scheme not in HTTP_AND_EMPTY_SCHEMA_SET: + if req._body is not None: + await req._body.close() + resp.close() + raise NonHttpUrlRedirectClientError(r_url) + elif not scheme: + parsed_redirect_url = url.join(parsed_redirect_url) + + try: + redirect_origin = parsed_redirect_url.origin() + except ValueError as origin_val_err: + if req._body is not None: + await req._body.close() + resp.close() + raise InvalidUrlRedirectClientError( + parsed_redirect_url, + "Invalid redirect URL origin", + ) from origin_val_err + + if url.origin() != redirect_origin: + auth = None + headers.pop(hdrs.AUTHORIZATION, None) + + url = parsed_redirect_url + params = {} + resp.release() + continue + + break + + if req._body is not None: + await req._body.close() + # check response status + if raise_for_status is None: + raise_for_status = self._raise_for_status + + if raise_for_status is None: + pass + elif callable(raise_for_status): + await raise_for_status(resp) + elif raise_for_status: + resp.raise_for_status() + + # register connection + if handle is not None: + if resp.connection is not None: + resp.connection.add_callback(handle.cancel) + else: + handle.cancel() + + resp._history = tuple(history) + + for trace in traces: + await trace.send_request_end( + method, url.update_query(params), headers, resp + ) + return resp + + except BaseException as e: + # cleanup timer + tm.close() + if handle: + handle.cancel() + handle = None + + for trace in traces: + await trace.send_request_exception( + method, url.update_query(params), headers, e + ) + raise + + def ws_connect( + self, + url: StrOrURL, + *, + method: str = hdrs.METH_GET, + protocols: Iterable[str] = (), + timeout: Union[ClientWSTimeout, _SENTINEL] = sentinel, + receive_timeout: Optional[float] = None, + autoclose: bool = True, + autoping: bool = True, + heartbeat: Optional[float] = None, + auth: Optional[BasicAuth] = None, + origin: Optional[str] = None, + params: Query = None, + headers: Optional[LooseHeaders] = None, + proxy: Optional[StrOrURL] = None, + proxy_auth: Optional[BasicAuth] = None, + ssl: Union[SSLContext, bool, Fingerprint] = True, + verify_ssl: Optional[bool] = None, + fingerprint: Optional[bytes] = None, + ssl_context: Optional[SSLContext] = None, + server_hostname: Optional[str] = None, + proxy_headers: Optional[LooseHeaders] = None, + compress: int = 0, + max_msg_size: int = 4 * 1024 * 1024, + ) -> "_WSRequestContextManager": + """Initiate websocket connection.""" + return _WSRequestContextManager( + self._ws_connect( + url, + method=method, + protocols=protocols, + timeout=timeout, + receive_timeout=receive_timeout, + autoclose=autoclose, + autoping=autoping, + heartbeat=heartbeat, + auth=auth, + origin=origin, + params=params, + headers=headers, + proxy=proxy, + proxy_auth=proxy_auth, + ssl=ssl, + verify_ssl=verify_ssl, + fingerprint=fingerprint, + ssl_context=ssl_context, + server_hostname=server_hostname, + proxy_headers=proxy_headers, + compress=compress, + max_msg_size=max_msg_size, + ) + ) + + async def _ws_connect( + self, + url: StrOrURL, + *, + method: str = hdrs.METH_GET, + protocols: Iterable[str] = (), + timeout: Union[ClientWSTimeout, _SENTINEL] = sentinel, + receive_timeout: Optional[float] = None, + autoclose: bool = True, + autoping: bool = True, + heartbeat: Optional[float] = None, + auth: Optional[BasicAuth] = None, + origin: Optional[str] = None, + params: Query = None, + headers: Optional[LooseHeaders] = None, + proxy: Optional[StrOrURL] = None, + proxy_auth: Optional[BasicAuth] = None, + ssl: Union[SSLContext, bool, Fingerprint] = True, + verify_ssl: Optional[bool] = None, + fingerprint: Optional[bytes] = None, + ssl_context: Optional[SSLContext] = None, + server_hostname: Optional[str] = None, + proxy_headers: Optional[LooseHeaders] = None, + compress: int = 0, + max_msg_size: int = 4 * 1024 * 1024, + ) -> ClientWebSocketResponse: + if timeout is not sentinel: + if isinstance(timeout, ClientWSTimeout): + ws_timeout = timeout + else: + warnings.warn( + "parameter 'timeout' of type 'float' " + "is deprecated, please use " + "'timeout=ClientWSTimeout(ws_close=...)'", + DeprecationWarning, + stacklevel=2, + ) + ws_timeout = ClientWSTimeout(ws_close=timeout) + else: + ws_timeout = DEFAULT_WS_CLIENT_TIMEOUT + if receive_timeout is not None: + warnings.warn( + "float parameter 'receive_timeout' " + "is deprecated, please use parameter " + "'timeout=ClientWSTimeout(ws_receive=...)'", + DeprecationWarning, + stacklevel=2, + ) + ws_timeout = attr.evolve(ws_timeout, ws_receive=receive_timeout) + + if headers is None: + real_headers: CIMultiDict[str] = CIMultiDict() + else: + real_headers = CIMultiDict(headers) + + default_headers = { + hdrs.UPGRADE: "websocket", + hdrs.CONNECTION: "Upgrade", + hdrs.SEC_WEBSOCKET_VERSION: "13", + } + + for key, value in default_headers.items(): + real_headers.setdefault(key, value) + + sec_key = base64.b64encode(os.urandom(16)) + real_headers[hdrs.SEC_WEBSOCKET_KEY] = sec_key.decode() + + if protocols: + real_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ",".join(protocols) + if origin is not None: + real_headers[hdrs.ORIGIN] = origin + if compress: + extstr = ws_ext_gen(compress=compress) + real_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = extstr + + # For the sake of backward compatibility, if user passes in None, convert it to True + if ssl is None: + warnings.warn( + "ssl=None is deprecated, please use ssl=True", + DeprecationWarning, + stacklevel=2, + ) + ssl = True + ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint) + + # send request + resp = await self.request( + method, + url, + params=params, + headers=real_headers, + read_until_eof=False, + auth=auth, + proxy=proxy, + proxy_auth=proxy_auth, + ssl=ssl, + server_hostname=server_hostname, + proxy_headers=proxy_headers, + ) + + try: + # check handshake + if resp.status != 101: + raise WSServerHandshakeError( + resp.request_info, + resp.history, + message="Invalid response status", + status=resp.status, + headers=resp.headers, + ) + + if resp.headers.get(hdrs.UPGRADE, "").lower() != "websocket": + raise WSServerHandshakeError( + resp.request_info, + resp.history, + message="Invalid upgrade header", + status=resp.status, + headers=resp.headers, + ) + + if resp.headers.get(hdrs.CONNECTION, "").lower() != "upgrade": + raise WSServerHandshakeError( + resp.request_info, + resp.history, + message="Invalid connection header", + status=resp.status, + headers=resp.headers, + ) + + # key calculation + r_key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, "") + match = base64.b64encode(hashlib.sha1(sec_key + WS_KEY).digest()).decode() + if r_key != match: + raise WSServerHandshakeError( + resp.request_info, + resp.history, + message="Invalid challenge response", + status=resp.status, + headers=resp.headers, + ) + + # websocket protocol + protocol = None + if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers: + resp_protocols = [ + proto.strip() + for proto in resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",") + ] + + for proto in resp_protocols: + if proto in protocols: + protocol = proto + break + + # websocket compress + notakeover = False + if compress: + compress_hdrs = resp.headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS) + if compress_hdrs: + try: + compress, notakeover = ws_ext_parse(compress_hdrs) + except WSHandshakeError as exc: + raise WSServerHandshakeError( + resp.request_info, + resp.history, + message=exc.args[0], + status=resp.status, + headers=resp.headers, + ) from exc + else: + compress = 0 + notakeover = False + + conn = resp.connection + assert conn is not None + conn_proto = conn.protocol + assert conn_proto is not None + + # For WS connection the read_timeout must be either receive_timeout or greater + # None == no timeout, i.e. infinite timeout, so None is the max timeout possible + if ws_timeout.ws_receive is None: + # Reset regardless + conn_proto.read_timeout = None + elif conn_proto.read_timeout is not None: + conn_proto.read_timeout = max( + ws_timeout.ws_receive, conn_proto.read_timeout + ) + + transport = conn.transport + assert transport is not None + reader = WebSocketDataQueue(conn_proto, 2**16, loop=self._loop) + conn_proto.set_parser(WebSocketReader(reader, max_msg_size), reader) + writer = WebSocketWriter( + conn_proto, + transport, + use_mask=True, + compress=compress, + notakeover=notakeover, + ) + except BaseException: + resp.close() + raise + else: + return self._ws_response_class( + reader, + writer, + protocol, + resp, + ws_timeout, + autoclose, + autoping, + self._loop, + heartbeat=heartbeat, + compress=compress, + client_notakeover=notakeover, + ) + + def _prepare_headers(self, headers: Optional[LooseHeaders]) -> "CIMultiDict[str]": + """Add default headers and transform it to CIMultiDict""" + # Convert headers to MultiDict + result = CIMultiDict(self._default_headers) + if headers: + if not isinstance(headers, (MultiDictProxy, MultiDict)): + headers = CIMultiDict(headers) + added_names: Set[str] = set() + for key, value in headers.items(): + if key in added_names: + result.add(key, value) + else: + result[key] = value + added_names.add(key) + return result + + def _get_netrc_auth(self, host: str) -> Optional[BasicAuth]: + """ + Get auth from netrc for the given host. + + This method is designed to be called in an executor to avoid + blocking I/O in the event loop. + """ + netrc_obj = netrc_from_env() + try: + return basicauth_from_netrc(netrc_obj, host) + except LookupError: + return None + + if sys.version_info >= (3, 11) and TYPE_CHECKING: + + def get( + self, + url: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> "_RequestContextManager": ... + + def options( + self, + url: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> "_RequestContextManager": ... + + def head( + self, + url: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> "_RequestContextManager": ... + + def post( + self, + url: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> "_RequestContextManager": ... + + def put( + self, + url: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> "_RequestContextManager": ... + + def patch( + self, + url: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> "_RequestContextManager": ... + + def delete( + self, + url: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> "_RequestContextManager": ... + + else: + + def get( + self, url: StrOrURL, *, allow_redirects: bool = True, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP GET request.""" + return _RequestContextManager( + self._request( + hdrs.METH_GET, url, allow_redirects=allow_redirects, **kwargs + ) + ) + + def options( + self, url: StrOrURL, *, allow_redirects: bool = True, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP OPTIONS request.""" + return _RequestContextManager( + self._request( + hdrs.METH_OPTIONS, url, allow_redirects=allow_redirects, **kwargs + ) + ) + + def head( + self, url: StrOrURL, *, allow_redirects: bool = False, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP HEAD request.""" + return _RequestContextManager( + self._request( + hdrs.METH_HEAD, url, allow_redirects=allow_redirects, **kwargs + ) + ) + + def post( + self, url: StrOrURL, *, data: Any = None, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP POST request.""" + return _RequestContextManager( + self._request(hdrs.METH_POST, url, data=data, **kwargs) + ) + + def put( + self, url: StrOrURL, *, data: Any = None, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP PUT request.""" + return _RequestContextManager( + self._request(hdrs.METH_PUT, url, data=data, **kwargs) + ) + + def patch( + self, url: StrOrURL, *, data: Any = None, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP PATCH request.""" + return _RequestContextManager( + self._request(hdrs.METH_PATCH, url, data=data, **kwargs) + ) + + def delete(self, url: StrOrURL, **kwargs: Any) -> "_RequestContextManager": + """Perform HTTP DELETE request.""" + return _RequestContextManager( + self._request(hdrs.METH_DELETE, url, **kwargs) + ) + + async def close(self) -> None: + """Close underlying connector. + + Release all acquired resources. + """ + if not self.closed: + if self._connector is not None and self._connector_owner: + await self._connector.close() + self._connector = None + + @property + def closed(self) -> bool: + """Is client session closed. + + A readonly property. + """ + return self._connector is None or self._connector.closed + + @property + def connector(self) -> Optional[BaseConnector]: + """Connector instance used for the session.""" + return self._connector + + @property + def cookie_jar(self) -> AbstractCookieJar: + """The session cookies.""" + return self._cookie_jar + + @property + def version(self) -> Tuple[int, int]: + """The session HTTP protocol version.""" + return self._version + + @property + def requote_redirect_url(self) -> bool: + """Do URL requoting on redirection handling.""" + return self._requote_redirect_url + + @requote_redirect_url.setter + def requote_redirect_url(self, val: bool) -> None: + """Do URL requoting on redirection handling.""" + warnings.warn( + "session.requote_redirect_url modification is deprecated #2778", + DeprecationWarning, + stacklevel=2, + ) + self._requote_redirect_url = val + + @property + def loop(self) -> asyncio.AbstractEventLoop: + """Session's loop.""" + warnings.warn( + "client.loop property is deprecated", DeprecationWarning, stacklevel=2 + ) + return self._loop + + @property + def timeout(self) -> ClientTimeout: + """Timeout for the session.""" + return self._timeout + + @property + def headers(self) -> "CIMultiDict[str]": + """The default headers of the client session.""" + return self._default_headers + + @property + def skip_auto_headers(self) -> FrozenSet[istr]: + """Headers for which autogeneration should be skipped""" + return self._skip_auto_headers + + @property + def auth(self) -> Optional[BasicAuth]: + """An object that represents HTTP Basic Authorization""" + return self._default_auth + + @property + def json_serialize(self) -> JSONEncoder: + """Json serializer callable""" + return self._json_serialize + + @property + def connector_owner(self) -> bool: + """Should connector be closed on session closing""" + return self._connector_owner + + @property + def raise_for_status( + self, + ) -> Union[bool, Callable[[ClientResponse], Awaitable[None]]]: + """Should `ClientResponse.raise_for_status()` be called for each response.""" + return self._raise_for_status + + @property + def auto_decompress(self) -> bool: + """Should the body response be automatically decompressed.""" + return self._auto_decompress + + @property + def trust_env(self) -> bool: + """ + Should proxies information from environment or netrc be trusted. + + Information is from HTTP_PROXY / HTTPS_PROXY environment variables + or ~/.netrc file if present. + """ + return self._trust_env + + @property + def trace_configs(self) -> List[TraceConfig]: + """A list of TraceConfig instances used for client tracing""" + return self._trace_configs + + def detach(self) -> None: + """Detach connector from session without closing the former. + + Session is switched to closed state anyway. + """ + self._connector = None + + def __enter__(self) -> None: + raise TypeError("Use async with instead") + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + # __exit__ should exist in pair with __enter__ but never executed + pass # pragma: no cover + + async def __aenter__(self) -> "ClientSession": + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + await self.close() + + +class _BaseRequestContextManager(Coroutine[Any, Any, _RetType], Generic[_RetType]): + + __slots__ = ("_coro", "_resp") + + def __init__(self, coro: Coroutine["asyncio.Future[Any]", None, _RetType]) -> None: + self._coro: Coroutine["asyncio.Future[Any]", None, _RetType] = coro + + def send(self, arg: None) -> "asyncio.Future[Any]": + return self._coro.send(arg) + + def throw(self, *args: Any, **kwargs: Any) -> "asyncio.Future[Any]": + return self._coro.throw(*args, **kwargs) + + def close(self) -> None: + return self._coro.close() + + def __await__(self) -> Generator[Any, None, _RetType]: + ret = self._coro.__await__() + return ret + + def __iter__(self) -> Generator[Any, None, _RetType]: + return self.__await__() + + async def __aenter__(self) -> _RetType: + self._resp: _RetType = await self._coro + return await self._resp.__aenter__() + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: + await self._resp.__aexit__(exc_type, exc, tb) + + +_RequestContextManager = _BaseRequestContextManager[ClientResponse] +_WSRequestContextManager = _BaseRequestContextManager[ClientWebSocketResponse] + + +class _SessionRequestContextManager: + + __slots__ = ("_coro", "_resp", "_session") + + def __init__( + self, + coro: Coroutine["asyncio.Future[Any]", None, ClientResponse], + session: ClientSession, + ) -> None: + self._coro = coro + self._resp: Optional[ClientResponse] = None + self._session = session + + async def __aenter__(self) -> ClientResponse: + try: + self._resp = await self._coro + except BaseException: + await self._session.close() + raise + else: + return self._resp + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: + assert self._resp is not None + self._resp.close() + await self._session.close() + + +if sys.version_info >= (3, 11) and TYPE_CHECKING: + + def request( + method: str, + url: StrOrURL, + *, + version: HttpVersion = http.HttpVersion11, + connector: Optional[BaseConnector] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + **kwargs: Unpack[_RequestOptions], + ) -> _SessionRequestContextManager: ... + +else: + + def request( + method: str, + url: StrOrURL, + *, + version: HttpVersion = http.HttpVersion11, + connector: Optional[BaseConnector] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + **kwargs: Any, + ) -> _SessionRequestContextManager: + """Constructs and sends a request. + + Returns response object. + method - HTTP method + url - request url + params - (optional) Dictionary or bytes to be sent in the query + string of the new request + data - (optional) Dictionary, bytes, or file-like object to + send in the body of the request + json - (optional) Any json compatible python object + headers - (optional) Dictionary of HTTP Headers to send with + the request + cookies - (optional) Dict object to send with the request + auth - (optional) BasicAuth named tuple represent HTTP Basic Auth + auth - aiohttp.helpers.BasicAuth + allow_redirects - (optional) If set to False, do not follow + redirects + version - Request HTTP version. + compress - Set to True if request has to be compressed + with deflate encoding. + chunked - Set to chunk size for chunked transfer encoding. + expect100 - Expect 100-continue response from server. + connector - BaseConnector sub-class instance to support + connection pooling. + read_until_eof - Read response until eof if response + does not have Content-Length header. + loop - Optional event loop. + timeout - Optional ClientTimeout settings structure, 5min + total timeout by default. + Usage:: + >>> import aiohttp + >>> async with aiohttp.request('GET', 'http://python.org/') as resp: + ... print(resp) + ... data = await resp.read() + + """ + connector_owner = False + if connector is None: + connector_owner = True + connector = TCPConnector(loop=loop, force_close=True) + + session = ClientSession( + loop=loop, + cookies=kwargs.pop("cookies", None), + version=version, + timeout=kwargs.pop("timeout", sentinel), + connector=connector, + connector_owner=connector_owner, + ) + + return _SessionRequestContextManager( + session._request(method, url, **kwargs), + session, + ) diff --git a/py311/lib/python3.11/site-packages/aiohttp/client_exceptions.py b/py311/lib/python3.11/site-packages/aiohttp/client_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..1d298e9a8cf663cdc8a85d3b7d1f9264ff5e03c9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/client_exceptions.py @@ -0,0 +1,421 @@ +"""HTTP related errors.""" + +import asyncio +import warnings +from typing import TYPE_CHECKING, Optional, Tuple, Union + +from multidict import MultiMapping + +from .typedefs import StrOrURL + +if TYPE_CHECKING: + import ssl + + SSLContext = ssl.SSLContext +else: + try: + import ssl + + SSLContext = ssl.SSLContext + except ImportError: # pragma: no cover + ssl = SSLContext = None # type: ignore[assignment] + +if TYPE_CHECKING: + from .client_reqrep import ClientResponse, ConnectionKey, Fingerprint, RequestInfo + from .http_parser import RawResponseMessage +else: + RequestInfo = ClientResponse = ConnectionKey = RawResponseMessage = None + +__all__ = ( + "ClientError", + "ClientConnectionError", + "ClientConnectionResetError", + "ClientOSError", + "ClientConnectorError", + "ClientProxyConnectionError", + "ClientSSLError", + "ClientConnectorDNSError", + "ClientConnectorSSLError", + "ClientConnectorCertificateError", + "ConnectionTimeoutError", + "SocketTimeoutError", + "ServerConnectionError", + "ServerTimeoutError", + "ServerDisconnectedError", + "ServerFingerprintMismatch", + "ClientResponseError", + "ClientHttpProxyError", + "WSServerHandshakeError", + "ContentTypeError", + "ClientPayloadError", + "InvalidURL", + "InvalidUrlClientError", + "RedirectClientError", + "NonHttpUrlClientError", + "InvalidUrlRedirectClientError", + "NonHttpUrlRedirectClientError", + "WSMessageTypeError", +) + + +class ClientError(Exception): + """Base class for client connection errors.""" + + +class ClientResponseError(ClientError): + """Base class for exceptions that occur after getting a response. + + request_info: An instance of RequestInfo. + history: A sequence of responses, if redirects occurred. + status: HTTP status code. + message: Error message. + headers: Response headers. + """ + + def __init__( + self, + request_info: RequestInfo, + history: Tuple[ClientResponse, ...], + *, + code: Optional[int] = None, + status: Optional[int] = None, + message: str = "", + headers: Optional[MultiMapping[str]] = None, + ) -> None: + self.request_info = request_info + if code is not None: + if status is not None: + raise ValueError( + "Both code and status arguments are provided; " + "code is deprecated, use status instead" + ) + warnings.warn( + "code argument is deprecated, use status instead", + DeprecationWarning, + stacklevel=2, + ) + if status is not None: + self.status = status + elif code is not None: + self.status = code + else: + self.status = 0 + self.message = message + self.headers = headers + self.history = history + self.args = (request_info, history) + + def __str__(self) -> str: + return "{}, message={!r}, url={!r}".format( + self.status, + self.message, + str(self.request_info.real_url), + ) + + def __repr__(self) -> str: + args = f"{self.request_info!r}, {self.history!r}" + if self.status != 0: + args += f", status={self.status!r}" + if self.message != "": + args += f", message={self.message!r}" + if self.headers is not None: + args += f", headers={self.headers!r}" + return f"{type(self).__name__}({args})" + + @property + def code(self) -> int: + warnings.warn( + "code property is deprecated, use status instead", + DeprecationWarning, + stacklevel=2, + ) + return self.status + + @code.setter + def code(self, value: int) -> None: + warnings.warn( + "code property is deprecated, use status instead", + DeprecationWarning, + stacklevel=2, + ) + self.status = value + + +class ContentTypeError(ClientResponseError): + """ContentType found is not valid.""" + + +class WSServerHandshakeError(ClientResponseError): + """websocket server handshake error.""" + + +class ClientHttpProxyError(ClientResponseError): + """HTTP proxy error. + + Raised in :class:`aiohttp.connector.TCPConnector` if + proxy responds with status other than ``200 OK`` + on ``CONNECT`` request. + """ + + +class TooManyRedirects(ClientResponseError): + """Client was redirected too many times.""" + + +class ClientConnectionError(ClientError): + """Base class for client socket errors.""" + + +class ClientConnectionResetError(ClientConnectionError, ConnectionResetError): + """ConnectionResetError""" + + +class ClientOSError(ClientConnectionError, OSError): + """OSError error.""" + + +class ClientConnectorError(ClientOSError): + """Client connector error. + + Raised in :class:`aiohttp.connector.TCPConnector` if + a connection can not be established. + """ + + def __init__(self, connection_key: ConnectionKey, os_error: OSError) -> None: + self._conn_key = connection_key + self._os_error = os_error + super().__init__(os_error.errno, os_error.strerror) + self.args = (connection_key, os_error) + + @property + def os_error(self) -> OSError: + return self._os_error + + @property + def host(self) -> str: + return self._conn_key.host + + @property + def port(self) -> Optional[int]: + return self._conn_key.port + + @property + def ssl(self) -> Union[SSLContext, bool, "Fingerprint"]: + return self._conn_key.ssl + + def __str__(self) -> str: + return "Cannot connect to host {0.host}:{0.port} ssl:{1} [{2}]".format( + self, "default" if self.ssl is True else self.ssl, self.strerror + ) + + # OSError.__reduce__ does too much black magick + __reduce__ = BaseException.__reduce__ + + +class ClientConnectorDNSError(ClientConnectorError): + """DNS resolution failed during client connection. + + Raised in :class:`aiohttp.connector.TCPConnector` if + DNS resolution fails. + """ + + +class ClientProxyConnectionError(ClientConnectorError): + """Proxy connection error. + + Raised in :class:`aiohttp.connector.TCPConnector` if + connection to proxy can not be established. + """ + + +class UnixClientConnectorError(ClientConnectorError): + """Unix connector error. + + Raised in :py:class:`aiohttp.connector.UnixConnector` + if connection to unix socket can not be established. + """ + + def __init__( + self, path: str, connection_key: ConnectionKey, os_error: OSError + ) -> None: + self._path = path + super().__init__(connection_key, os_error) + + @property + def path(self) -> str: + return self._path + + def __str__(self) -> str: + return "Cannot connect to unix socket {0.path} ssl:{1} [{2}]".format( + self, "default" if self.ssl is True else self.ssl, self.strerror + ) + + +class ServerConnectionError(ClientConnectionError): + """Server connection errors.""" + + +class ServerDisconnectedError(ServerConnectionError): + """Server disconnected.""" + + def __init__(self, message: Union[RawResponseMessage, str, None] = None) -> None: + if message is None: + message = "Server disconnected" + + self.args = (message,) + self.message = message + + +class ServerTimeoutError(ServerConnectionError, asyncio.TimeoutError): + """Server timeout error.""" + + +class ConnectionTimeoutError(ServerTimeoutError): + """Connection timeout error.""" + + +class SocketTimeoutError(ServerTimeoutError): + """Socket timeout error.""" + + +class ServerFingerprintMismatch(ServerConnectionError): + """SSL certificate does not match expected fingerprint.""" + + def __init__(self, expected: bytes, got: bytes, host: str, port: int) -> None: + self.expected = expected + self.got = got + self.host = host + self.port = port + self.args = (expected, got, host, port) + + def __repr__(self) -> str: + return "<{} expected={!r} got={!r} host={!r} port={!r}>".format( + self.__class__.__name__, self.expected, self.got, self.host, self.port + ) + + +class ClientPayloadError(ClientError): + """Response payload error.""" + + +class InvalidURL(ClientError, ValueError): + """Invalid URL. + + URL used for fetching is malformed, e.g. it doesn't contains host + part. + """ + + # Derive from ValueError for backward compatibility + + def __init__(self, url: StrOrURL, description: Union[str, None] = None) -> None: + # The type of url is not yarl.URL because the exception can be raised + # on URL(url) call + self._url = url + self._description = description + + if description: + super().__init__(url, description) + else: + super().__init__(url) + + @property + def url(self) -> StrOrURL: + return self._url + + @property + def description(self) -> "str | None": + return self._description + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self}>" + + def __str__(self) -> str: + if self._description: + return f"{self._url} - {self._description}" + return str(self._url) + + +class InvalidUrlClientError(InvalidURL): + """Invalid URL client error.""" + + +class RedirectClientError(ClientError): + """Client redirect error.""" + + +class NonHttpUrlClientError(ClientError): + """Non http URL client error.""" + + +class InvalidUrlRedirectClientError(InvalidUrlClientError, RedirectClientError): + """Invalid URL redirect client error.""" + + +class NonHttpUrlRedirectClientError(NonHttpUrlClientError, RedirectClientError): + """Non http URL redirect client error.""" + + +class ClientSSLError(ClientConnectorError): + """Base error for ssl.*Errors.""" + + +if ssl is not None: + cert_errors = (ssl.CertificateError,) + cert_errors_bases = ( + ClientSSLError, + ssl.CertificateError, + ) + + ssl_errors = (ssl.SSLError,) + ssl_error_bases = (ClientSSLError, ssl.SSLError) +else: # pragma: no cover + cert_errors = tuple() + cert_errors_bases = ( + ClientSSLError, + ValueError, + ) + + ssl_errors = tuple() + ssl_error_bases = (ClientSSLError,) + + +class ClientConnectorSSLError(*ssl_error_bases): # type: ignore[misc] + """Response ssl error.""" + + +class ClientConnectorCertificateError(*cert_errors_bases): # type: ignore[misc] + """Response certificate error.""" + + def __init__( + self, connection_key: ConnectionKey, certificate_error: Exception + ) -> None: + self._conn_key = connection_key + self._certificate_error = certificate_error + self.args = (connection_key, certificate_error) + + @property + def certificate_error(self) -> Exception: + return self._certificate_error + + @property + def host(self) -> str: + return self._conn_key.host + + @property + def port(self) -> Optional[int]: + return self._conn_key.port + + @property + def ssl(self) -> bool: + return self._conn_key.is_ssl + + def __str__(self) -> str: + return ( + "Cannot connect to host {0.host}:{0.port} ssl:{0.ssl} " + "[{0.certificate_error.__class__.__name__}: " + "{0.certificate_error.args}]".format(self) + ) + + +class WSMessageTypeError(TypeError): + """WebSocket message type is not valid.""" diff --git a/py311/lib/python3.11/site-packages/aiohttp/client_middleware_digest_auth.py b/py311/lib/python3.11/site-packages/aiohttp/client_middleware_digest_auth.py new file mode 100644 index 0000000000000000000000000000000000000000..5aab5acb85acbda5136fe0bef84df7ff4f492405 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/client_middleware_digest_auth.py @@ -0,0 +1,480 @@ +""" +Digest authentication middleware for aiohttp client. + +This middleware implements HTTP Digest Authentication according to RFC 7616, +providing a more secure alternative to Basic Authentication. It supports all +standard hash algorithms including MD5, SHA, SHA-256, SHA-512 and their session +variants, as well as both 'auth' and 'auth-int' quality of protection (qop) options. +""" + +import hashlib +import os +import re +import sys +import time +from typing import ( + Callable, + Dict, + Final, + FrozenSet, + List, + Literal, + Tuple, + TypedDict, + Union, +) + +from yarl import URL + +from . import hdrs +from .client_exceptions import ClientError +from .client_middlewares import ClientHandlerType +from .client_reqrep import ClientRequest, ClientResponse +from .payload import Payload + + +class DigestAuthChallenge(TypedDict, total=False): + realm: str + nonce: str + qop: str + algorithm: str + opaque: str + domain: str + stale: str + + +DigestFunctions: Dict[str, Callable[[bytes], "hashlib._Hash"]] = { + "MD5": hashlib.md5, + "MD5-SESS": hashlib.md5, + "SHA": hashlib.sha1, + "SHA-SESS": hashlib.sha1, + "SHA256": hashlib.sha256, + "SHA256-SESS": hashlib.sha256, + "SHA-256": hashlib.sha256, + "SHA-256-SESS": hashlib.sha256, + "SHA512": hashlib.sha512, + "SHA512-SESS": hashlib.sha512, + "SHA-512": hashlib.sha512, + "SHA-512-SESS": hashlib.sha512, +} + + +# Compile the regex pattern once at module level for performance +_HEADER_PAIRS_PATTERN = re.compile( + r'(?:^|\s|,\s*)(\w+)\s*=\s*(?:"((?:[^"\\]|\\.)*)"|([^\s,]+))' + if sys.version_info < (3, 11) + else r'(?:^|\s|,\s*)((?>\w+))\s*=\s*(?:"((?:[^"\\]|\\.)*)"|([^\s,]+))' + # +------------|--------|--|-|-|--|----|------|----|--||-----|-> Match valid start/sep + # +--------|--|-|-|--|----|------|----|--||-----|-> alphanumeric key (atomic + # | | | | | | | | || | group reduces backtracking) + # +--|-|-|--|----|------|----|--||-----|-> maybe whitespace + # | | | | | | | || | + # +-|-|--|----|------|----|--||-----|-> = (delimiter) + # +-|--|----|------|----|--||-----|-> maybe whitespace + # | | | | | || | + # +--|----|------|----|--||-----|-> group quoted or unquoted + # | | | | || | + # +----|------|----|--||-----|-> if quoted... + # +------|----|--||-----|-> anything but " or \ + # +----|--||-----|-> escaped characters allowed + # +--||-----|-> or can be empty string + # || | + # +|-----|-> if unquoted... + # +-----|-> anything but , or + # +-> at least one char req'd +) + + +# RFC 7616: Challenge parameters to extract +CHALLENGE_FIELDS: Final[ + Tuple[ + Literal["realm", "nonce", "qop", "algorithm", "opaque", "domain", "stale"], ... + ] +] = ( + "realm", + "nonce", + "qop", + "algorithm", + "opaque", + "domain", + "stale", +) + +# Supported digest authentication algorithms +# Use a tuple of sorted keys for predictable documentation and error messages +SUPPORTED_ALGORITHMS: Final[Tuple[str, ...]] = tuple(sorted(DigestFunctions.keys())) + +# RFC 7616: Fields that require quoting in the Digest auth header +# These fields must be enclosed in double quotes in the Authorization header. +# Algorithm, qop, and nc are never quoted per RFC specifications. +# This frozen set is used by the template-based header construction to +# automatically determine which fields need quotes. +QUOTED_AUTH_FIELDS: Final[FrozenSet[str]] = frozenset( + {"username", "realm", "nonce", "uri", "response", "opaque", "cnonce"} +) + + +def escape_quotes(value: str) -> str: + """Escape double quotes for HTTP header values.""" + return value.replace('"', '\\"') + + +def unescape_quotes(value: str) -> str: + """Unescape double quotes in HTTP header values.""" + return value.replace('\\"', '"') + + +def parse_header_pairs(header: str) -> Dict[str, str]: + """ + Parse key-value pairs from WWW-Authenticate or similar HTTP headers. + + This function handles the complex format of WWW-Authenticate header values, + supporting both quoted and unquoted values, proper handling of commas in + quoted values, and whitespace variations per RFC 7616. + + Examples of supported formats: + - key1="value1", key2=value2 + - key1 = "value1" , key2="value, with, commas" + - key1=value1,key2="value2" + - realm="example.com", nonce="12345", qop="auth" + + Args: + header: The header value string to parse + + Returns: + Dictionary mapping parameter names to their values + """ + return { + stripped_key: unescape_quotes(quoted_val) if quoted_val else unquoted_val + for key, quoted_val, unquoted_val in _HEADER_PAIRS_PATTERN.findall(header) + if (stripped_key := key.strip()) + } + + +class DigestAuthMiddleware: + """ + HTTP digest authentication middleware for aiohttp client. + + This middleware intercepts 401 Unauthorized responses containing a Digest + authentication challenge, calculates the appropriate digest credentials, + and automatically retries the request with the proper Authorization header. + + Features: + - Handles all aspects of Digest authentication handshake automatically + - Supports all standard hash algorithms: + - MD5, MD5-SESS + - SHA, SHA-SESS + - SHA256, SHA256-SESS, SHA-256, SHA-256-SESS + - SHA512, SHA512-SESS, SHA-512, SHA-512-SESS + - Supports 'auth' and 'auth-int' quality of protection modes + - Properly handles quoted strings and parameter parsing + - Includes replay attack protection with client nonce count tracking + - Supports preemptive authentication per RFC 7616 Section 3.6 + + Standards compliance: + - RFC 7616: HTTP Digest Access Authentication (primary reference) + - RFC 2617: HTTP Authentication (deprecated by RFC 7616) + - RFC 1945: Section 11.1 (username restrictions) + + Implementation notes: + The core digest calculation is inspired by the implementation in + https://github.com/requests/requests/blob/v2.18.4/requests/auth.py + with added support for modern digest auth features and error handling. + """ + + def __init__( + self, + login: str, + password: str, + preemptive: bool = True, + ) -> None: + if login is None: + raise ValueError("None is not allowed as login value") + + if password is None: + raise ValueError("None is not allowed as password value") + + if ":" in login: + raise ValueError('A ":" is not allowed in username (RFC 1945#section-11.1)') + + self._login_str: Final[str] = login + self._login_bytes: Final[bytes] = login.encode("utf-8") + self._password_bytes: Final[bytes] = password.encode("utf-8") + + self._last_nonce_bytes = b"" + self._nonce_count = 0 + self._challenge: DigestAuthChallenge = {} + self._preemptive: bool = preemptive + # Set of URLs defining the protection space + self._protection_space: List[str] = [] + + async def _encode( + self, method: str, url: URL, body: Union[Payload, Literal[b""]] + ) -> str: + """ + Build digest authorization header for the current challenge. + + Args: + method: The HTTP method (GET, POST, etc.) + url: The request URL + body: The request body (used for qop=auth-int) + + Returns: + A fully formatted Digest authorization header string + + Raises: + ClientError: If the challenge is missing required parameters or + contains unsupported values + + """ + challenge = self._challenge + if "realm" not in challenge: + raise ClientError( + "Malformed Digest auth challenge: Missing 'realm' parameter" + ) + + if "nonce" not in challenge: + raise ClientError( + "Malformed Digest auth challenge: Missing 'nonce' parameter" + ) + + # Empty realm values are allowed per RFC 7616 (SHOULD, not MUST, contain host name) + realm = challenge["realm"] + nonce = challenge["nonce"] + + # Empty nonce values are not allowed as they are security-critical for replay protection + if not nonce: + raise ClientError( + "Security issue: Digest auth challenge contains empty 'nonce' value" + ) + + qop_raw = challenge.get("qop", "") + # Preserve original algorithm case for response while using uppercase for processing + algorithm_original = challenge.get("algorithm", "MD5") + algorithm = algorithm_original.upper() + opaque = challenge.get("opaque", "") + + # Convert string values to bytes once + nonce_bytes = nonce.encode("utf-8") + realm_bytes = realm.encode("utf-8") + path = URL(url).path_qs + + # Process QoP + qop = "" + qop_bytes = b"" + if qop_raw: + valid_qops = {"auth", "auth-int"}.intersection( + {q.strip() for q in qop_raw.split(",") if q.strip()} + ) + if not valid_qops: + raise ClientError( + f"Digest auth error: Unsupported Quality of Protection (qop) value(s): {qop_raw}" + ) + + qop = "auth-int" if "auth-int" in valid_qops else "auth" + qop_bytes = qop.encode("utf-8") + + if algorithm not in DigestFunctions: + raise ClientError( + f"Digest auth error: Unsupported hash algorithm: {algorithm}. " + f"Supported algorithms: {', '.join(SUPPORTED_ALGORITHMS)}" + ) + hash_fn: Final = DigestFunctions[algorithm] + + def H(x: bytes) -> bytes: + """RFC 7616 Section 3: Hash function H(data) = hex(hash(data)).""" + return hash_fn(x).hexdigest().encode() + + def KD(s: bytes, d: bytes) -> bytes: + """RFC 7616 Section 3: KD(secret, data) = H(concat(secret, ":", data)).""" + return H(b":".join((s, d))) + + # Calculate A1 and A2 + A1 = b":".join((self._login_bytes, realm_bytes, self._password_bytes)) + A2 = f"{method.upper()}:{path}".encode() + if qop == "auth-int": + if isinstance(body, Payload): # will always be empty bytes unless Payload + entity_bytes = await body.as_bytes() # Get bytes from Payload + else: + entity_bytes = body + entity_hash = H(entity_bytes) + A2 = b":".join((A2, entity_hash)) + + HA1 = H(A1) + HA2 = H(A2) + + # Nonce count handling + if nonce_bytes == self._last_nonce_bytes: + self._nonce_count += 1 + else: + self._nonce_count = 1 + + self._last_nonce_bytes = nonce_bytes + ncvalue = f"{self._nonce_count:08x}" + ncvalue_bytes = ncvalue.encode("utf-8") + + # Generate client nonce + cnonce = hashlib.sha1( + b"".join( + [ + str(self._nonce_count).encode("utf-8"), + nonce_bytes, + time.ctime().encode("utf-8"), + os.urandom(8), + ] + ) + ).hexdigest()[:16] + cnonce_bytes = cnonce.encode("utf-8") + + # Special handling for session-based algorithms + if algorithm.upper().endswith("-SESS"): + HA1 = H(b":".join((HA1, nonce_bytes, cnonce_bytes))) + + # Calculate the response digest + if qop: + noncebit = b":".join( + (nonce_bytes, ncvalue_bytes, cnonce_bytes, qop_bytes, HA2) + ) + response_digest = KD(HA1, noncebit) + else: + response_digest = KD(HA1, b":".join((nonce_bytes, HA2))) + + # Define a dict mapping of header fields to their values + # Group fields into always-present, optional, and qop-dependent + header_fields = { + # Always present fields + "username": escape_quotes(self._login_str), + "realm": escape_quotes(realm), + "nonce": escape_quotes(nonce), + "uri": path, + "response": response_digest.decode(), + "algorithm": algorithm_original, + } + + # Optional fields + if opaque: + header_fields["opaque"] = escape_quotes(opaque) + + # QoP-dependent fields + if qop: + header_fields["qop"] = qop + header_fields["nc"] = ncvalue + header_fields["cnonce"] = cnonce + + # Build header using templates for each field type + pairs: List[str] = [] + for field, value in header_fields.items(): + if field in QUOTED_AUTH_FIELDS: + pairs.append(f'{field}="{value}"') + else: + pairs.append(f"{field}={value}") + + return f"Digest {', '.join(pairs)}" + + def _in_protection_space(self, url: URL) -> bool: + """ + Check if the given URL is within the current protection space. + + According to RFC 7616, a URI is in the protection space if any URI + in the protection space is a prefix of it (after both have been made absolute). + """ + request_str = str(url) + for space_str in self._protection_space: + # Check if request starts with space URL + if not request_str.startswith(space_str): + continue + # Exact match or space ends with / (proper directory prefix) + if len(request_str) == len(space_str) or space_str[-1] == "/": + return True + # Check next char is / to ensure proper path boundary + if request_str[len(space_str)] == "/": + return True + return False + + def _authenticate(self, response: ClientResponse) -> bool: + """ + Takes the given response and tries digest-auth, if needed. + + Returns true if the original request must be resent. + """ + if response.status != 401: + return False + + auth_header = response.headers.get("www-authenticate", "") + if not auth_header: + return False # No authentication header present + + method, sep, headers = auth_header.partition(" ") + if not sep: + # No space found in www-authenticate header + return False # Malformed auth header, missing scheme separator + + if method.lower() != "digest": + # Not a digest auth challenge (could be Basic, Bearer, etc.) + return False + + if not headers: + # We have a digest scheme but no parameters + return False # Malformed digest header, missing parameters + + # We have a digest auth header with content + if not (header_pairs := parse_header_pairs(headers)): + # Failed to parse any key-value pairs + return False # Malformed digest header, no valid parameters + + # Extract challenge parameters + self._challenge = {} + for field in CHALLENGE_FIELDS: + if value := header_pairs.get(field): + self._challenge[field] = value + + # Update protection space based on domain parameter or default to origin + origin = response.url.origin() + + if domain := self._challenge.get("domain"): + # Parse space-separated list of URIs + self._protection_space = [] + for uri in domain.split(): + # Remove quotes if present + uri = uri.strip('"') + if uri.startswith("/"): + # Path-absolute, relative to origin + self._protection_space.append(str(origin.join(URL(uri)))) + else: + # Absolute URI + self._protection_space.append(str(URL(uri))) + else: + # No domain specified, protection space is entire origin + self._protection_space = [str(origin)] + + # Return True only if we found at least one challenge parameter + return bool(self._challenge) + + async def __call__( + self, request: ClientRequest, handler: ClientHandlerType + ) -> ClientResponse: + """Run the digest auth middleware.""" + response = None + for retry_count in range(2): + # Apply authorization header if: + # 1. This is a retry after 401 (retry_count > 0), OR + # 2. Preemptive auth is enabled AND we have a challenge AND the URL is in protection space + if retry_count > 0 or ( + self._preemptive + and self._challenge + and self._in_protection_space(request.url) + ): + request.headers[hdrs.AUTHORIZATION] = await self._encode( + request.method, request.url, request.body + ) + + # Send the request + response = await handler(request) + + # Check if we need to authenticate + if not self._authenticate(response): + break + + # At this point, response is guaranteed to be defined + assert response is not None + return response diff --git a/py311/lib/python3.11/site-packages/aiohttp/client_middlewares.py b/py311/lib/python3.11/site-packages/aiohttp/client_middlewares.py new file mode 100644 index 0000000000000000000000000000000000000000..3ca2cb202ad93963369f2a10fd1d118a194c4405 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/client_middlewares.py @@ -0,0 +1,55 @@ +"""Client middleware support.""" + +from collections.abc import Awaitable, Callable, Sequence + +from .client_reqrep import ClientRequest, ClientResponse + +__all__ = ("ClientMiddlewareType", "ClientHandlerType", "build_client_middlewares") + +# Type alias for client request handlers - functions that process requests and return responses +ClientHandlerType = Callable[[ClientRequest], Awaitable[ClientResponse]] + +# Type for client middleware - similar to server but uses ClientRequest/ClientResponse +ClientMiddlewareType = Callable[ + [ClientRequest, ClientHandlerType], Awaitable[ClientResponse] +] + + +def build_client_middlewares( + handler: ClientHandlerType, + middlewares: Sequence[ClientMiddlewareType], +) -> ClientHandlerType: + """ + Apply middlewares to request handler. + + The middlewares are applied in reverse order, so the first middleware + in the list wraps all subsequent middlewares and the handler. + + This implementation avoids using partial/update_wrapper to minimize overhead + and doesn't cache to avoid holding references to stateful middleware. + """ + # Optimize for single middleware case + if len(middlewares) == 1: + middleware = middlewares[0] + + async def single_middleware_handler(req: ClientRequest) -> ClientResponse: + return await middleware(req, handler) + + return single_middleware_handler + + # Build the chain for multiple middlewares + current_handler = handler + + for middleware in reversed(middlewares): + # Create a new closure that captures the current state + def make_wrapper( + mw: ClientMiddlewareType, next_h: ClientHandlerType + ) -> ClientHandlerType: + async def wrapped(req: ClientRequest) -> ClientResponse: + return await mw(req, next_h) + + return wrapped + + current_handler = make_wrapper(middleware, current_handler) + + return current_handler diff --git a/py311/lib/python3.11/site-packages/aiohttp/client_proto.py b/py311/lib/python3.11/site-packages/aiohttp/client_proto.py new file mode 100644 index 0000000000000000000000000000000000000000..e2fb1ce64cb6a39f5a72aecfd5840536defba519 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/client_proto.py @@ -0,0 +1,359 @@ +import asyncio +from contextlib import suppress +from typing import Any, Optional, Tuple, Union + +from .base_protocol import BaseProtocol +from .client_exceptions import ( + ClientConnectionError, + ClientOSError, + ClientPayloadError, + ServerDisconnectedError, + SocketTimeoutError, +) +from .helpers import ( + _EXC_SENTINEL, + EMPTY_BODY_STATUS_CODES, + BaseTimerContext, + set_exception, + set_result, +) +from .http import HttpResponseParser, RawResponseMessage +from .http_exceptions import HttpProcessingError +from .streams import EMPTY_PAYLOAD, DataQueue, StreamReader + + +class ResponseHandler(BaseProtocol, DataQueue[Tuple[RawResponseMessage, StreamReader]]): + """Helper class to adapt between Protocol and StreamReader.""" + + def __init__(self, loop: asyncio.AbstractEventLoop) -> None: + BaseProtocol.__init__(self, loop=loop) + DataQueue.__init__(self, loop) + + self._should_close = False + + self._payload: Optional[StreamReader] = None + self._skip_payload = False + self._payload_parser = None + + self._timer = None + + self._tail = b"" + self._upgraded = False + self._parser: Optional[HttpResponseParser] = None + + self._read_timeout: Optional[float] = None + self._read_timeout_handle: Optional[asyncio.TimerHandle] = None + + self._timeout_ceil_threshold: Optional[float] = 5 + + self._closed: Union[None, asyncio.Future[None]] = None + self._connection_lost_called = False + + @property + def closed(self) -> Union[None, asyncio.Future[None]]: + """Future that is set when the connection is closed. + + This property returns a Future that will be completed when the connection + is closed. The Future is created lazily on first access to avoid creating + futures that will never be awaited. + + Returns: + - A Future[None] if the connection is still open or was closed after + this property was accessed + - None if connection_lost() was already called before this property + was ever accessed (indicating no one is waiting for the closure) + """ + if self._closed is None and not self._connection_lost_called: + self._closed = self._loop.create_future() + return self._closed + + @property + def upgraded(self) -> bool: + return self._upgraded + + @property + def should_close(self) -> bool: + return bool( + self._should_close + or (self._payload is not None and not self._payload.is_eof()) + or self._upgraded + or self._exception is not None + or self._payload_parser is not None + or self._buffer + or self._tail + ) + + def force_close(self) -> None: + self._should_close = True + + def close(self) -> None: + self._exception = None # Break cyclic references + transport = self.transport + if transport is not None: + transport.close() + self.transport = None + self._payload = None + self._drop_timeout() + + def abort(self) -> None: + self._exception = None # Break cyclic references + transport = self.transport + if transport is not None: + transport.abort() + self.transport = None + self._payload = None + self._drop_timeout() + + def is_connected(self) -> bool: + return self.transport is not None and not self.transport.is_closing() + + def connection_lost(self, exc: Optional[BaseException]) -> None: + self._connection_lost_called = True + self._drop_timeout() + + original_connection_error = exc + reraised_exc = original_connection_error + + connection_closed_cleanly = original_connection_error is None + + if self._closed is not None: + # If someone is waiting for the closed future, + # we should set it to None or an exception. If + # self._closed is None, it means that + # connection_lost() was called already + # or nobody is waiting for it. + if connection_closed_cleanly: + set_result(self._closed, None) + else: + assert original_connection_error is not None + set_exception( + self._closed, + ClientConnectionError( + f"Connection lost: {original_connection_error !s}", + ), + original_connection_error, + ) + + if self._payload_parser is not None: + with suppress(Exception): # FIXME: log this somehow? + self._payload_parser.feed_eof() + + uncompleted = None + if self._parser is not None: + try: + uncompleted = self._parser.feed_eof() + except Exception as underlying_exc: + if self._payload is not None: + client_payload_exc_msg = ( + f"Response payload is not completed: {underlying_exc !r}" + ) + if not connection_closed_cleanly: + client_payload_exc_msg = ( + f"{client_payload_exc_msg !s}. " + f"{original_connection_error !r}" + ) + set_exception( + self._payload, + ClientPayloadError(client_payload_exc_msg), + underlying_exc, + ) + + if not self.is_eof(): + if isinstance(original_connection_error, OSError): + reraised_exc = ClientOSError(*original_connection_error.args) + if connection_closed_cleanly: + reraised_exc = ServerDisconnectedError(uncompleted) + # assigns self._should_close to True as side effect, + # we do it anyway below + underlying_non_eof_exc = ( + _EXC_SENTINEL + if connection_closed_cleanly + else original_connection_error + ) + assert underlying_non_eof_exc is not None + assert reraised_exc is not None + self.set_exception(reraised_exc, underlying_non_eof_exc) + + self._should_close = True + self._parser = None + self._payload = None + self._payload_parser = None + self._reading_paused = False + + super().connection_lost(reraised_exc) + + def eof_received(self) -> None: + # should call parser.feed_eof() most likely + self._drop_timeout() + + def pause_reading(self) -> None: + super().pause_reading() + self._drop_timeout() + + def resume_reading(self) -> None: + super().resume_reading() + self._reschedule_timeout() + + def set_exception( + self, + exc: BaseException, + exc_cause: BaseException = _EXC_SENTINEL, + ) -> None: + self._should_close = True + self._drop_timeout() + super().set_exception(exc, exc_cause) + + def set_parser(self, parser: Any, payload: Any) -> None: + # TODO: actual types are: + # parser: WebSocketReader + # payload: WebSocketDataQueue + # but they are not generi enough + # Need an ABC for both types + self._payload = payload + self._payload_parser = parser + + self._drop_timeout() + + if self._tail: + data, self._tail = self._tail, b"" + self.data_received(data) + + def set_response_params( + self, + *, + timer: Optional[BaseTimerContext] = None, + skip_payload: bool = False, + read_until_eof: bool = False, + auto_decompress: bool = True, + read_timeout: Optional[float] = None, + read_bufsize: int = 2**16, + timeout_ceil_threshold: float = 5, + max_line_size: int = 8190, + max_field_size: int = 8190, + ) -> None: + self._skip_payload = skip_payload + + self._read_timeout = read_timeout + + self._timeout_ceil_threshold = timeout_ceil_threshold + + self._parser = HttpResponseParser( + self, + self._loop, + read_bufsize, + timer=timer, + payload_exception=ClientPayloadError, + response_with_body=not skip_payload, + read_until_eof=read_until_eof, + auto_decompress=auto_decompress, + max_line_size=max_line_size, + max_field_size=max_field_size, + ) + + if self._tail: + data, self._tail = self._tail, b"" + self.data_received(data) + + def _drop_timeout(self) -> None: + if self._read_timeout_handle is not None: + self._read_timeout_handle.cancel() + self._read_timeout_handle = None + + def _reschedule_timeout(self) -> None: + timeout = self._read_timeout + if self._read_timeout_handle is not None: + self._read_timeout_handle.cancel() + + if timeout: + self._read_timeout_handle = self._loop.call_later( + timeout, self._on_read_timeout + ) + else: + self._read_timeout_handle = None + + def start_timeout(self) -> None: + self._reschedule_timeout() + + @property + def read_timeout(self) -> Optional[float]: + return self._read_timeout + + @read_timeout.setter + def read_timeout(self, read_timeout: Optional[float]) -> None: + self._read_timeout = read_timeout + + def _on_read_timeout(self) -> None: + exc = SocketTimeoutError("Timeout on reading data from socket") + self.set_exception(exc) + if self._payload is not None: + set_exception(self._payload, exc) + + def data_received(self, data: bytes) -> None: + self._reschedule_timeout() + + if not data: + return + + # custom payload parser - currently always WebSocketReader + if self._payload_parser is not None: + eof, tail = self._payload_parser.feed_data(data) + if eof: + self._payload = None + self._payload_parser = None + + if tail: + self.data_received(tail) + return + + if self._upgraded or self._parser is None: + # i.e. websocket connection, websocket parser is not set yet + self._tail += data + return + + # parse http messages + try: + messages, upgraded, tail = self._parser.feed_data(data) + except BaseException as underlying_exc: + if self.transport is not None: + # connection.release() could be called BEFORE + # data_received(), the transport is already + # closed in this case + self.transport.close() + # should_close is True after the call + if isinstance(underlying_exc, HttpProcessingError): + exc = HttpProcessingError( + code=underlying_exc.code, + message=underlying_exc.message, + headers=underlying_exc.headers, + ) + else: + exc = HttpProcessingError() + self.set_exception(exc, underlying_exc) + return + + self._upgraded = upgraded + + payload: Optional[StreamReader] = None + for message, payload in messages: + if message.should_close: + self._should_close = True + + self._payload = payload + + if self._skip_payload or message.code in EMPTY_BODY_STATUS_CODES: + self.feed_data((message, EMPTY_PAYLOAD), 0) + else: + self.feed_data((message, payload), 0) + + if payload is not None: + # new message(s) was processed + # register timeout handler unsubscribing + # either on end-of-stream or immediately for + # EMPTY_PAYLOAD + if payload is not EMPTY_PAYLOAD: + payload.on_eof(self._drop_timeout) + else: + self._drop_timeout() + + if upgraded and tail: + self.data_received(tail) diff --git a/py311/lib/python3.11/site-packages/aiohttp/client_reqrep.py b/py311/lib/python3.11/site-packages/aiohttp/client_reqrep.py new file mode 100644 index 0000000000000000000000000000000000000000..a9e0795893d6a56595f6f7b04b62cc4486d48a0f --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/client_reqrep.py @@ -0,0 +1,1536 @@ +import asyncio +import codecs +import contextlib +import functools +import io +import re +import sys +import traceback +import warnings +from collections.abc import Mapping +from hashlib import md5, sha1, sha256 +from http.cookies import Morsel, SimpleCookie +from types import MappingProxyType, TracebackType +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Literal, + NamedTuple, + Optional, + Tuple, + Type, + Union, +) + +import attr +from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy +from yarl import URL + +from . import hdrs, helpers, http, multipart, payload +from ._cookie_helpers import ( + parse_cookie_header, + parse_set_cookie_headers, + preserve_morsel_with_coded_value, +) +from .abc import AbstractStreamWriter +from .client_exceptions import ( + ClientConnectionError, + ClientOSError, + ClientResponseError, + ContentTypeError, + InvalidURL, + ServerFingerprintMismatch, +) +from .compression_utils import HAS_BROTLI, HAS_ZSTD +from .formdata import FormData +from .helpers import ( + _SENTINEL, + BaseTimerContext, + BasicAuth, + HeadersMixin, + TimerNoop, + noop, + reify, + sentinel, + set_exception, + set_result, +) +from .http import ( + SERVER_SOFTWARE, + HttpVersion, + HttpVersion10, + HttpVersion11, + StreamWriter, +) +from .streams import StreamReader +from .typedefs import ( + DEFAULT_JSON_DECODER, + JSONDecoder, + LooseCookies, + LooseHeaders, + Query, + RawHeaders, +) + +if TYPE_CHECKING: + import ssl + from ssl import SSLContext +else: + try: + import ssl + from ssl import SSLContext + except ImportError: # pragma: no cover + ssl = None # type: ignore[assignment] + SSLContext = object # type: ignore[misc,assignment] + + +__all__ = ("ClientRequest", "ClientResponse", "RequestInfo", "Fingerprint") + + +if TYPE_CHECKING: + from .client import ClientSession + from .connector import Connection + from .tracing import Trace + + +_CONNECTION_CLOSED_EXCEPTION = ClientConnectionError("Connection closed") +_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") +json_re = re.compile(r"^application/(?:[\w.+-]+?\+)?json") + + +def _gen_default_accept_encoding() -> str: + encodings = [ + "gzip", + "deflate", + ] + if HAS_BROTLI: + encodings.append("br") + if HAS_ZSTD: + encodings.append("zstd") + return ", ".join(encodings) + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class ContentDisposition: + type: Optional[str] + parameters: "MappingProxyType[str, str]" + filename: Optional[str] + + +class _RequestInfo(NamedTuple): + url: URL + method: str + headers: "CIMultiDictProxy[str]" + real_url: URL + + +class RequestInfo(_RequestInfo): + + def __new__( + cls, + url: URL, + method: str, + headers: "CIMultiDictProxy[str]", + real_url: Union[URL, _SENTINEL] = sentinel, + ) -> "RequestInfo": + """Create a new RequestInfo instance. + + For backwards compatibility, the real_url parameter is optional. + """ + return tuple.__new__( + cls, (url, method, headers, url if real_url is sentinel else real_url) + ) + + +class Fingerprint: + HASHFUNC_BY_DIGESTLEN = { + 16: md5, + 20: sha1, + 32: sha256, + } + + def __init__(self, fingerprint: bytes) -> None: + digestlen = len(fingerprint) + hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen) + if not hashfunc: + raise ValueError("fingerprint has invalid length") + elif hashfunc is md5 or hashfunc is sha1: + raise ValueError("md5 and sha1 are insecure and not supported. Use sha256.") + self._hashfunc = hashfunc + self._fingerprint = fingerprint + + @property + def fingerprint(self) -> bytes: + return self._fingerprint + + def check(self, transport: asyncio.Transport) -> None: + if not transport.get_extra_info("sslcontext"): + return + sslobj = transport.get_extra_info("ssl_object") + cert = sslobj.getpeercert(binary_form=True) + got = self._hashfunc(cert).digest() + if got != self._fingerprint: + host, port, *_ = transport.get_extra_info("peername") + raise ServerFingerprintMismatch(self._fingerprint, got, host, port) + + +if ssl is not None: + SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None)) +else: # pragma: no cover + SSL_ALLOWED_TYPES = (bool, type(None)) + + +def _merge_ssl_params( + ssl: Union["SSLContext", bool, Fingerprint], + verify_ssl: Optional[bool], + ssl_context: Optional["SSLContext"], + fingerprint: Optional[bytes], +) -> Union["SSLContext", bool, Fingerprint]: + if ssl is None: + ssl = True # Double check for backwards compatibility + if verify_ssl is not None and not verify_ssl: + warnings.warn( + "verify_ssl is deprecated, use ssl=False instead", + DeprecationWarning, + stacklevel=3, + ) + if ssl is not True: + raise ValueError( + "verify_ssl, ssl_context, fingerprint and ssl " + "parameters are mutually exclusive" + ) + else: + ssl = False + if ssl_context is not None: + warnings.warn( + "ssl_context is deprecated, use ssl=context instead", + DeprecationWarning, + stacklevel=3, + ) + if ssl is not True: + raise ValueError( + "verify_ssl, ssl_context, fingerprint and ssl " + "parameters are mutually exclusive" + ) + else: + ssl = ssl_context + if fingerprint is not None: + warnings.warn( + "fingerprint is deprecated, use ssl=Fingerprint(fingerprint) instead", + DeprecationWarning, + stacklevel=3, + ) + if ssl is not True: + raise ValueError( + "verify_ssl, ssl_context, fingerprint and ssl " + "parameters are mutually exclusive" + ) + else: + ssl = Fingerprint(fingerprint) + if not isinstance(ssl, SSL_ALLOWED_TYPES): + raise TypeError( + "ssl should be SSLContext, bool, Fingerprint or None, " + "got {!r} instead.".format(ssl) + ) + return ssl + + +_SSL_SCHEMES = frozenset(("https", "wss")) + + +# ConnectionKey is a NamedTuple because it is used as a key in a dict +# and a set in the connector. Since a NamedTuple is a tuple it uses +# the fast native tuple __hash__ and __eq__ implementation in CPython. +class ConnectionKey(NamedTuple): + # the key should contain an information about used proxy / TLS + # to prevent reusing wrong connections from a pool + host: str + port: Optional[int] + is_ssl: bool + ssl: Union[SSLContext, bool, Fingerprint] + proxy: Optional[URL] + proxy_auth: Optional[BasicAuth] + proxy_headers_hash: Optional[int] # hash(CIMultiDict) + + +def _is_expected_content_type( + response_content_type: str, expected_content_type: str +) -> bool: + if expected_content_type == "application/json": + return json_re.match(response_content_type) is not None + return expected_content_type in response_content_type + + +def _warn_if_unclosed_payload(payload: payload.Payload, stacklevel: int = 2) -> None: + """Warn if the payload is not closed. + + Callers must check that the body is a Payload before calling this method. + + Args: + payload: The payload to check + stacklevel: Stack level for the warning (default 2 for direct callers) + """ + if not payload.autoclose and not payload.consumed: + warnings.warn( + "The previous request body contains unclosed resources. " + "Use await request.update_body() instead of setting request.body " + "directly to properly close resources and avoid leaks.", + ResourceWarning, + stacklevel=stacklevel, + ) + + +class ClientResponse(HeadersMixin): + + # Some of these attributes are None when created, + # but will be set by the start() method. + # As the end user will likely never see the None values, we cheat the types below. + # from the Status-Line of the response + version: Optional[HttpVersion] = None # HTTP-Version + status: int = None # type: ignore[assignment] # Status-Code + reason: Optional[str] = None # Reason-Phrase + + content: StreamReader = None # type: ignore[assignment] # Payload stream + _body: Optional[bytes] = None + _headers: CIMultiDictProxy[str] = None # type: ignore[assignment] + _history: Tuple["ClientResponse", ...] = () + _raw_headers: RawHeaders = None # type: ignore[assignment] + + _connection: Optional["Connection"] = None # current connection + _cookies: Optional[SimpleCookie] = None + _raw_cookie_headers: Optional[Tuple[str, ...]] = None + _continue: Optional["asyncio.Future[bool]"] = None + _source_traceback: Optional[traceback.StackSummary] = None + _session: Optional["ClientSession"] = None + # set up by ClientRequest after ClientResponse object creation + # post-init stage allows to not change ctor signature + _closed = True # to allow __del__ for non-initialized properly response + _released = False + _in_context = False + + _resolve_charset: Callable[["ClientResponse", bytes], str] = lambda *_: "utf-8" + + __writer: Optional["asyncio.Task[None]"] = None + + def __init__( + self, + method: str, + url: URL, + *, + writer: "Optional[asyncio.Task[None]]", + continue100: Optional["asyncio.Future[bool]"], + timer: BaseTimerContext, + request_info: RequestInfo, + traces: List["Trace"], + loop: asyncio.AbstractEventLoop, + session: "ClientSession", + ) -> None: + # URL forbids subclasses, so a simple type check is enough. + assert type(url) is URL + + self.method = method + + self._real_url = url + self._url = url.with_fragment(None) if url.raw_fragment else url + if writer is not None: + self._writer = writer + if continue100 is not None: + self._continue = continue100 + self._request_info = request_info + self._timer = timer if timer is not None else TimerNoop() + self._cache: Dict[str, Any] = {} + self._traces = traces + self._loop = loop + # Save reference to _resolve_charset, so that get_encoding() will still + # work after the response has finished reading the body. + # TODO: Fix session=None in tests (see ClientRequest.__init__). + if session is not None: + # store a reference to session #1985 + self._session = session + self._resolve_charset = session._resolve_charset + if loop.get_debug(): + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + + def __reset_writer(self, _: object = None) -> None: + self.__writer = None + + @property + def _writer(self) -> Optional["asyncio.Task[None]"]: + """The writer task for streaming data. + + _writer is only provided for backwards compatibility + for subclasses that may need to access it. + """ + return self.__writer + + @_writer.setter + def _writer(self, writer: Optional["asyncio.Task[None]"]) -> None: + """Set the writer task for streaming data.""" + if self.__writer is not None: + self.__writer.remove_done_callback(self.__reset_writer) + self.__writer = writer + if writer is None: + return + if writer.done(): + # The writer is already done, so we can clear it immediately. + self.__writer = None + else: + writer.add_done_callback(self.__reset_writer) + + @property + def cookies(self) -> SimpleCookie: + if self._cookies is None: + if self._raw_cookie_headers is not None: + # Parse cookies for response.cookies (SimpleCookie for backward compatibility) + cookies = SimpleCookie() + # Use parse_set_cookie_headers for more lenient parsing that handles + # malformed cookies better than SimpleCookie.load + cookies.update(parse_set_cookie_headers(self._raw_cookie_headers)) + self._cookies = cookies + else: + self._cookies = SimpleCookie() + return self._cookies + + @cookies.setter + def cookies(self, cookies: SimpleCookie) -> None: + self._cookies = cookies + # Generate raw cookie headers from the SimpleCookie + if cookies: + self._raw_cookie_headers = tuple( + morsel.OutputString() for morsel in cookies.values() + ) + else: + self._raw_cookie_headers = None + + @reify + def url(self) -> URL: + return self._url + + @reify + def url_obj(self) -> URL: + warnings.warn("Deprecated, use .url #1654", DeprecationWarning, stacklevel=2) + return self._url + + @reify + def real_url(self) -> URL: + return self._real_url + + @reify + def host(self) -> str: + assert self._url.host is not None + return self._url.host + + @reify + def headers(self) -> "CIMultiDictProxy[str]": + return self._headers + + @reify + def raw_headers(self) -> RawHeaders: + return self._raw_headers + + @reify + def request_info(self) -> RequestInfo: + return self._request_info + + @reify + def content_disposition(self) -> Optional[ContentDisposition]: + raw = self._headers.get(hdrs.CONTENT_DISPOSITION) + if raw is None: + return None + disposition_type, params_dct = multipart.parse_content_disposition(raw) + params = MappingProxyType(params_dct) + filename = multipart.content_disposition_filename(params) + return ContentDisposition(disposition_type, params, filename) + + def __del__(self, _warnings: Any = warnings) -> None: + if self._closed: + return + + if self._connection is not None: + self._connection.release() + self._cleanup_writer() + + if self._loop.get_debug(): + kwargs = {"source": self} + _warnings.warn(f"Unclosed response {self!r}", ResourceWarning, **kwargs) + context = {"client_response": self, "message": "Unclosed response"} + if self._source_traceback: + context["source_traceback"] = self._source_traceback + self._loop.call_exception_handler(context) + + def __repr__(self) -> str: + out = io.StringIO() + ascii_encodable_url = str(self.url) + if self.reason: + ascii_encodable_reason = self.reason.encode( + "ascii", "backslashreplace" + ).decode("ascii") + else: + ascii_encodable_reason = "None" + print( + "".format( + ascii_encodable_url, self.status, ascii_encodable_reason + ), + file=out, + ) + print(self.headers, file=out) + return out.getvalue() + + @property + def connection(self) -> Optional["Connection"]: + return self._connection + + @reify + def history(self) -> Tuple["ClientResponse", ...]: + """A sequence of of responses, if redirects occurred.""" + return self._history + + @reify + def links(self) -> "MultiDictProxy[MultiDictProxy[Union[str, URL]]]": + links_str = ", ".join(self.headers.getall("link", [])) + + if not links_str: + return MultiDictProxy(MultiDict()) + + links: MultiDict[MultiDictProxy[Union[str, URL]]] = MultiDict() + + for val in re.split(r",(?=\s*<)", links_str): + match = re.match(r"\s*<(.*)>(.*)", val) + if match is None: # pragma: no cover + # the check exists to suppress mypy error + continue + url, params_str = match.groups() + params = params_str.split(";")[1:] + + link: MultiDict[Union[str, URL]] = MultiDict() + + for param in params: + match = re.match(r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$", param, re.M) + if match is None: # pragma: no cover + # the check exists to suppress mypy error + continue + key, _, value, _ = match.groups() + + link.add(key, value) + + key = link.get("rel", url) + + link.add("url", self.url.join(URL(url))) + + links.add(str(key), MultiDictProxy(link)) + + return MultiDictProxy(links) + + async def start(self, connection: "Connection") -> "ClientResponse": + """Start response processing.""" + self._closed = False + self._protocol = connection.protocol + self._connection = connection + + with self._timer: + while True: + # read response + try: + protocol = self._protocol + message, payload = await protocol.read() # type: ignore[union-attr] + except http.HttpProcessingError as exc: + raise ClientResponseError( + self.request_info, + self.history, + status=exc.code, + message=exc.message, + headers=exc.headers, + ) from exc + + if message.code < 100 or message.code > 199 or message.code == 101: + break + + if self._continue is not None: + set_result(self._continue, True) + self._continue = None + + # payload eof handler + payload.on_eof(self._response_eof) + + # response status + self.version = message.version + self.status = message.code + self.reason = message.reason + + # headers + self._headers = message.headers # type is CIMultiDictProxy + self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes] + + # payload + self.content = payload + + # cookies + if cookie_hdrs := self.headers.getall(hdrs.SET_COOKIE, ()): + # Store raw cookie headers for CookieJar + self._raw_cookie_headers = tuple(cookie_hdrs) + return self + + def _response_eof(self) -> None: + if self._closed: + return + + # protocol could be None because connection could be detached + protocol = self._connection and self._connection.protocol + if protocol is not None and protocol.upgraded: + return + + self._closed = True + self._cleanup_writer() + self._release_connection() + + @property + def closed(self) -> bool: + return self._closed + + def close(self) -> None: + if not self._released: + self._notify_content() + + self._closed = True + if self._loop is None or self._loop.is_closed(): + return + + self._cleanup_writer() + if self._connection is not None: + self._connection.close() + self._connection = None + + def release(self) -> Any: + if not self._released: + self._notify_content() + + self._closed = True + + self._cleanup_writer() + self._release_connection() + return noop() + + @property + def ok(self) -> bool: + """Returns ``True`` if ``status`` is less than ``400``, ``False`` if not. + + This is **not** a check for ``200 OK`` but a check that the response + status is under 400. + """ + return 400 > self.status + + def raise_for_status(self) -> None: + if not self.ok: + # reason should always be not None for a started response + assert self.reason is not None + + # If we're in a context we can rely on __aexit__() to release as the + # exception propagates. + if not self._in_context: + self.release() + + raise ClientResponseError( + self.request_info, + self.history, + status=self.status, + message=self.reason, + headers=self.headers, + ) + + def _release_connection(self) -> None: + if self._connection is not None: + if self.__writer is None: + self._connection.release() + self._connection = None + else: + self.__writer.add_done_callback(lambda f: self._release_connection()) + + async def _wait_released(self) -> None: + if self.__writer is not None: + try: + await self.__writer + except asyncio.CancelledError: + if ( + sys.version_info >= (3, 11) + and (task := asyncio.current_task()) + and task.cancelling() + ): + raise + self._release_connection() + + def _cleanup_writer(self) -> None: + if self.__writer is not None: + self.__writer.cancel() + self._session = None + + def _notify_content(self) -> None: + content = self.content + if content and content.exception() is None: + set_exception(content, _CONNECTION_CLOSED_EXCEPTION) + self._released = True + + async def wait_for_close(self) -> None: + if self.__writer is not None: + try: + await self.__writer + except asyncio.CancelledError: + if ( + sys.version_info >= (3, 11) + and (task := asyncio.current_task()) + and task.cancelling() + ): + raise + self.release() + + async def read(self) -> bytes: + """Read response payload.""" + if self._body is None: + try: + self._body = await self.content.read() + for trace in self._traces: + await trace.send_response_chunk_received( + self.method, self.url, self._body + ) + except BaseException: + self.close() + raise + elif self._released: # Response explicitly released + raise ClientConnectionError("Connection closed") + + protocol = self._connection and self._connection.protocol + if protocol is None or not protocol.upgraded: + await self._wait_released() # Underlying connection released + return self._body + + def get_encoding(self) -> str: + ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower() + mimetype = helpers.parse_mimetype(ctype) + + encoding = mimetype.parameters.get("charset") + if encoding: + with contextlib.suppress(LookupError, ValueError): + return codecs.lookup(encoding).name + + if mimetype.type == "application" and ( + mimetype.subtype == "json" or mimetype.subtype == "rdap" + ): + # RFC 7159 states that the default encoding is UTF-8. + # RFC 7483 defines application/rdap+json + return "utf-8" + + if self._body is None: + raise RuntimeError( + "Cannot compute fallback encoding of a not yet read body" + ) + + return self._resolve_charset(self, self._body) + + async def text(self, encoding: Optional[str] = None, errors: str = "strict") -> str: + """Read response payload and decode.""" + if self._body is None: + await self.read() + + if encoding is None: + encoding = self.get_encoding() + + return self._body.decode(encoding, errors=errors) # type: ignore[union-attr] + + async def json( + self, + *, + encoding: Optional[str] = None, + loads: JSONDecoder = DEFAULT_JSON_DECODER, + content_type: Optional[str] = "application/json", + ) -> Any: + """Read and decodes JSON response.""" + if self._body is None: + await self.read() + + if content_type: + ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower() + if not _is_expected_content_type(ctype, content_type): + raise ContentTypeError( + self.request_info, + self.history, + status=self.status, + message=( + "Attempt to decode JSON with unexpected mimetype: %s" % ctype + ), + headers=self.headers, + ) + + stripped = self._body.strip() # type: ignore[union-attr] + if not stripped: + return None + + if encoding is None: + encoding = self.get_encoding() + + return loads(stripped.decode(encoding)) + + async def __aenter__(self) -> "ClientResponse": + self._in_context = True + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self._in_context = False + # similar to _RequestContextManager, we do not need to check + # for exceptions, response object can close connection + # if state is broken + self.release() + await self.wait_for_close() + + +class ClientRequest: + GET_METHODS = { + hdrs.METH_GET, + hdrs.METH_HEAD, + hdrs.METH_OPTIONS, + hdrs.METH_TRACE, + } + POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT} + ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE}) + + DEFAULT_HEADERS = { + hdrs.ACCEPT: "*/*", + hdrs.ACCEPT_ENCODING: _gen_default_accept_encoding(), + } + + # Type of body depends on PAYLOAD_REGISTRY, which is dynamic. + _body: Union[None, payload.Payload] = None + auth = None + response = None + + __writer: Optional["asyncio.Task[None]"] = None # async task for streaming data + + # These class defaults help create_autospec() work correctly. + # If autospec is improved in future, maybe these can be removed. + url = URL() + method = "GET" + + _continue = None # waiter future for '100 Continue' response + + _skip_auto_headers: Optional["CIMultiDict[None]"] = None + + # N.B. + # Adding __del__ method with self._writer closing doesn't make sense + # because _writer is instance method, thus it keeps a reference to self. + # Until writer has finished finalizer will not be called. + + def __init__( + self, + method: str, + url: URL, + *, + params: Query = None, + headers: Optional[LooseHeaders] = None, + skip_auto_headers: Optional[Iterable[str]] = None, + data: Any = None, + cookies: Optional[LooseCookies] = None, + auth: Optional[BasicAuth] = None, + version: http.HttpVersion = http.HttpVersion11, + compress: Union[str, bool, None] = None, + chunked: Optional[bool] = None, + expect100: bool = False, + loop: Optional[asyncio.AbstractEventLoop] = None, + response_class: Optional[Type["ClientResponse"]] = None, + proxy: Optional[URL] = None, + proxy_auth: Optional[BasicAuth] = None, + timer: Optional[BaseTimerContext] = None, + session: Optional["ClientSession"] = None, + ssl: Union[SSLContext, bool, Fingerprint] = True, + proxy_headers: Optional[LooseHeaders] = None, + traces: Optional[List["Trace"]] = None, + trust_env: bool = False, + server_hostname: Optional[str] = None, + ): + if loop is None: + loop = asyncio.get_event_loop() + if match := _CONTAINS_CONTROL_CHAR_RE.search(method): + raise ValueError( + f"Method cannot contain non-token characters {method!r} " + f"(found at least {match.group()!r})" + ) + # URL forbids subclasses, so a simple type check is enough. + assert type(url) is URL, url + if proxy is not None: + assert type(proxy) is URL, proxy + # FIXME: session is None in tests only, need to fix tests + # assert session is not None + if TYPE_CHECKING: + assert session is not None + self._session = session + if params: + url = url.extend_query(params) + self.original_url = url + self.url = url.with_fragment(None) if url.raw_fragment else url + self.method = method.upper() + self.chunked = chunked + self.compress = compress + self.loop = loop + self.length = None + if response_class is None: + real_response_class = ClientResponse + else: + real_response_class = response_class + self.response_class: Type[ClientResponse] = real_response_class + self._timer = timer if timer is not None else TimerNoop() + self._ssl = ssl if ssl is not None else True + self.server_hostname = server_hostname + + if loop.get_debug(): + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + + self.update_version(version) + self.update_host(url) + self.update_headers(headers) + self.update_auto_headers(skip_auto_headers) + self.update_cookies(cookies) + self.update_content_encoding(data) + self.update_auth(auth, trust_env) + self.update_proxy(proxy, proxy_auth, proxy_headers) + + self.update_body_from_data(data) + if data is not None or self.method not in self.GET_METHODS: + self.update_transfer_encoding() + self.update_expect_continue(expect100) + self._traces = [] if traces is None else traces + + def __reset_writer(self, _: object = None) -> None: + self.__writer = None + + def _get_content_length(self) -> Optional[int]: + """Extract and validate Content-Length header value. + + Returns parsed Content-Length value or None if not set. + Raises ValueError if header exists but cannot be parsed as an integer. + """ + if hdrs.CONTENT_LENGTH not in self.headers: + return None + + content_length_hdr = self.headers[hdrs.CONTENT_LENGTH] + try: + return int(content_length_hdr) + except ValueError: + raise ValueError( + f"Invalid Content-Length header: {content_length_hdr}" + ) from None + + @property + def skip_auto_headers(self) -> CIMultiDict[None]: + return self._skip_auto_headers or CIMultiDict() + + @property + def _writer(self) -> Optional["asyncio.Task[None]"]: + return self.__writer + + @_writer.setter + def _writer(self, writer: "asyncio.Task[None]") -> None: + if self.__writer is not None: + self.__writer.remove_done_callback(self.__reset_writer) + self.__writer = writer + writer.add_done_callback(self.__reset_writer) + + def is_ssl(self) -> bool: + return self.url.scheme in _SSL_SCHEMES + + @property + def ssl(self) -> Union["SSLContext", bool, Fingerprint]: + return self._ssl + + @property + def connection_key(self) -> ConnectionKey: + if proxy_headers := self.proxy_headers: + h: Optional[int] = hash(tuple(proxy_headers.items())) + else: + h = None + url = self.url + return tuple.__new__( + ConnectionKey, + ( + url.raw_host or "", + url.port, + url.scheme in _SSL_SCHEMES, + self._ssl, + self.proxy, + self.proxy_auth, + h, + ), + ) + + @property + def host(self) -> str: + ret = self.url.raw_host + assert ret is not None + return ret + + @property + def port(self) -> Optional[int]: + return self.url.port + + @property + def body(self) -> Union[payload.Payload, Literal[b""]]: + """Request body.""" + # empty body is represented as bytes for backwards compatibility + return self._body or b"" + + @body.setter + def body(self, value: Any) -> None: + """Set request body with warning for non-autoclose payloads. + + WARNING: This setter must be called from within an event loop and is not + thread-safe. Setting body outside of an event loop may raise RuntimeError + when closing file-based payloads. + + DEPRECATED: Direct assignment to body is deprecated and will be removed + in a future version. Use await update_body() instead for proper resource + management. + """ + # Close existing payload if present + if self._body is not None: + # Warn if the payload needs manual closing + # stacklevel=3: user code -> body setter -> _warn_if_unclosed_payload + _warn_if_unclosed_payload(self._body, stacklevel=3) + # NOTE: In the future, when we remove sync close support, + # this setter will need to be removed and only the async + # update_body() method will be available. For now, we call + # _close() for backwards compatibility. + self._body._close() + self._update_body(value) + + @property + def request_info(self) -> RequestInfo: + headers: CIMultiDictProxy[str] = CIMultiDictProxy(self.headers) + # These are created on every request, so we use a NamedTuple + # for performance reasons. We don't use the RequestInfo.__new__ + # method because it has a different signature which is provided + # for backwards compatibility only. + return tuple.__new__( + RequestInfo, (self.url, self.method, headers, self.original_url) + ) + + @property + def session(self) -> "ClientSession": + """Return the ClientSession instance. + + This property provides access to the ClientSession that initiated + this request, allowing middleware to make additional requests + using the same session. + """ + return self._session + + def update_host(self, url: URL) -> None: + """Update destination host, port and connection type (ssl).""" + # get host/port + if not url.raw_host: + raise InvalidURL(url) + + # basic auth info + if url.raw_user or url.raw_password: + self.auth = helpers.BasicAuth(url.user or "", url.password or "") + + def update_version(self, version: Union[http.HttpVersion, str]) -> None: + """Convert request version to two elements tuple. + + parser HTTP version '1.1' => (1, 1) + """ + if isinstance(version, str): + v = [part.strip() for part in version.split(".", 1)] + try: + version = http.HttpVersion(int(v[0]), int(v[1])) + except ValueError: + raise ValueError( + f"Can not parse http version number: {version}" + ) from None + self.version = version + + def update_headers(self, headers: Optional[LooseHeaders]) -> None: + """Update request headers.""" + self.headers: CIMultiDict[str] = CIMultiDict() + + # Build the host header + host = self.url.host_port_subcomponent + + # host_port_subcomponent is None when the URL is a relative URL. + # but we know we do not have a relative URL here. + assert host is not None + self.headers[hdrs.HOST] = host + + if not headers: + return + + if isinstance(headers, (dict, MultiDictProxy, MultiDict)): + headers = headers.items() + + for key, value in headers: # type: ignore[misc] + # A special case for Host header + if key in hdrs.HOST_ALL: + self.headers[key] = value + else: + self.headers.add(key, value) + + def update_auto_headers(self, skip_auto_headers: Optional[Iterable[str]]) -> None: + if skip_auto_headers is not None: + self._skip_auto_headers = CIMultiDict( + (hdr, None) for hdr in sorted(skip_auto_headers) + ) + used_headers = self.headers.copy() + used_headers.extend(self._skip_auto_headers) # type: ignore[arg-type] + else: + # Fast path when there are no headers to skip + # which is the most common case. + used_headers = self.headers + + for hdr, val in self.DEFAULT_HEADERS.items(): + if hdr not in used_headers: + self.headers[hdr] = val + + if hdrs.USER_AGENT not in used_headers: + self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE + + def update_cookies(self, cookies: Optional[LooseCookies]) -> None: + """Update request cookies header.""" + if not cookies: + return + + c = SimpleCookie() + if hdrs.COOKIE in self.headers: + # parse_cookie_header for RFC 6265 compliant Cookie header parsing + c.update(parse_cookie_header(self.headers.get(hdrs.COOKIE, ""))) + del self.headers[hdrs.COOKIE] + + if isinstance(cookies, Mapping): + iter_cookies = cookies.items() + else: + iter_cookies = cookies # type: ignore[assignment] + for name, value in iter_cookies: + if isinstance(value, Morsel): + # Use helper to preserve coded_value exactly as sent by server + c[name] = preserve_morsel_with_coded_value(value) + else: + c[name] = value # type: ignore[assignment] + + self.headers[hdrs.COOKIE] = c.output(header="", sep=";").strip() + + def update_content_encoding(self, data: Any) -> None: + """Set request content encoding.""" + if not data: + # Don't compress an empty body. + self.compress = None + return + + if self.headers.get(hdrs.CONTENT_ENCODING): + if self.compress: + raise ValueError( + "compress can not be set if Content-Encoding header is set" + ) + elif self.compress: + if not isinstance(self.compress, str): + self.compress = "deflate" + self.headers[hdrs.CONTENT_ENCODING] = self.compress + self.chunked = True # enable chunked, no need to deal with length + + def update_transfer_encoding(self) -> None: + """Analyze transfer-encoding header.""" + te = self.headers.get(hdrs.TRANSFER_ENCODING, "").lower() + + if "chunked" in te: + if self.chunked: + raise ValueError( + "chunked can not be set " + 'if "Transfer-Encoding: chunked" header is set' + ) + + elif self.chunked: + if hdrs.CONTENT_LENGTH in self.headers: + raise ValueError( + "chunked can not be set if Content-Length header is set" + ) + + self.headers[hdrs.TRANSFER_ENCODING] = "chunked" + + def update_auth(self, auth: Optional[BasicAuth], trust_env: bool = False) -> None: + """Set basic auth.""" + if auth is None: + auth = self.auth + if auth is None: + return + + if not isinstance(auth, helpers.BasicAuth): + raise TypeError("BasicAuth() tuple is required instead") + + self.headers[hdrs.AUTHORIZATION] = auth.encode() + + def update_body_from_data(self, body: Any, _stacklevel: int = 3) -> None: + """Update request body from data.""" + if self._body is not None: + _warn_if_unclosed_payload(self._body, stacklevel=_stacklevel) + + if body is None: + self._body = None + # Set Content-Length to 0 when body is None for methods that expect a body + if ( + self.method not in self.GET_METHODS + and not self.chunked + and hdrs.CONTENT_LENGTH not in self.headers + ): + self.headers[hdrs.CONTENT_LENGTH] = "0" + return + + # FormData + maybe_payload = body() if isinstance(body, FormData) else body + + try: + body_payload = payload.PAYLOAD_REGISTRY.get(maybe_payload, disposition=None) + except payload.LookupError: + body_payload = FormData(maybe_payload)() # type: ignore[arg-type] + + self._body = body_payload + # enable chunked encoding if needed + if not self.chunked and hdrs.CONTENT_LENGTH not in self.headers: + if (size := body_payload.size) is not None: + self.headers[hdrs.CONTENT_LENGTH] = str(size) + else: + self.chunked = True + + # copy payload headers + assert body_payload.headers + headers = self.headers + skip_headers = self._skip_auto_headers + for key, value in body_payload.headers.items(): + if key in headers or (skip_headers is not None and key in skip_headers): + continue + headers[key] = value + + def _update_body(self, body: Any) -> None: + """Update request body after its already been set.""" + # Remove existing Content-Length header since body is changing + if hdrs.CONTENT_LENGTH in self.headers: + del self.headers[hdrs.CONTENT_LENGTH] + + # Remove existing Transfer-Encoding header to avoid conflicts + if self.chunked and hdrs.TRANSFER_ENCODING in self.headers: + del self.headers[hdrs.TRANSFER_ENCODING] + + # Now update the body using the existing method + # Called from _update_body, add 1 to stacklevel from caller + self.update_body_from_data(body, _stacklevel=4) + + # Update transfer encoding headers if needed (same logic as __init__) + if body is not None or self.method not in self.GET_METHODS: + self.update_transfer_encoding() + + async def update_body(self, body: Any) -> None: + """ + Update request body and close previous payload if needed. + + This method safely updates the request body by first closing any existing + payload to prevent resource leaks, then setting the new body. + + IMPORTANT: Always use this method instead of setting request.body directly. + Direct assignment to request.body will leak resources if the previous body + contains file handles, streams, or other resources that need cleanup. + + Args: + body: The new body content. Can be: + - bytes/bytearray: Raw binary data + - str: Text data (will be encoded using charset from Content-Type) + - FormData: Form data that will be encoded as multipart/form-data + - Payload: A pre-configured payload object + - AsyncIterable: An async iterable of bytes chunks + - File-like object: Will be read and sent as binary data + - None: Clears the body + + Usage: + # CORRECT: Use update_body + await request.update_body(b"new request data") + + # WRONG: Don't set body directly + # request.body = b"new request data" # This will leak resources! + + # Update with form data + form_data = FormData() + form_data.add_field('field', 'value') + await request.update_body(form_data) + + # Clear body + await request.update_body(None) + + Note: + This method is async because it may need to close file handles or + other resources associated with the previous payload. Always await + this method to ensure proper cleanup. + + Warning: + Setting request.body directly is highly discouraged and can lead to: + - Resource leaks (unclosed file handles, streams) + - Memory leaks (unreleased buffers) + - Unexpected behavior with streaming payloads + + It is not recommended to change the payload type in middleware. If the + body was already set (e.g., as bytes), it's best to keep the same type + rather than converting it (e.g., to str) as this may result in unexpected + behavior. + + See Also: + - update_body_from_data: Synchronous body update without cleanup + - body property: Direct body access (STRONGLY DISCOURAGED) + + """ + # Close existing payload if it exists and needs closing + if self._body is not None: + await self._body.close() + self._update_body(body) + + def update_expect_continue(self, expect: bool = False) -> None: + if expect: + self.headers[hdrs.EXPECT] = "100-continue" + elif ( + hdrs.EXPECT in self.headers + and self.headers[hdrs.EXPECT].lower() == "100-continue" + ): + expect = True + + if expect: + self._continue = self.loop.create_future() + + def update_proxy( + self, + proxy: Optional[URL], + proxy_auth: Optional[BasicAuth], + proxy_headers: Optional[LooseHeaders], + ) -> None: + self.proxy = proxy + if proxy is None: + self.proxy_auth = None + self.proxy_headers = None + return + + if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth): + raise ValueError("proxy_auth must be None or BasicAuth() tuple") + self.proxy_auth = proxy_auth + + if proxy_headers is not None and not isinstance( + proxy_headers, (MultiDict, MultiDictProxy) + ): + proxy_headers = CIMultiDict(proxy_headers) + self.proxy_headers = proxy_headers + + async def write_bytes( + self, + writer: AbstractStreamWriter, + conn: "Connection", + content_length: Optional[int] = None, + ) -> None: + """ + Write the request body to the connection stream. + + This method handles writing different types of request bodies: + 1. Payload objects (using their specialized write_with_length method) + 2. Bytes/bytearray objects + 3. Iterable body content + + Args: + writer: The stream writer to write the body to + conn: The connection being used for this request + content_length: Optional maximum number of bytes to write from the body + (None means write the entire body) + + The method properly handles: + - Waiting for 100-Continue responses if required + - Content length constraints for chunked encoding + - Error handling for network issues, cancellation, and other exceptions + - Signaling EOF and timeout management + + Raises: + ClientOSError: When there's an OS-level error writing the body + ClientConnectionError: When there's a general connection error + asyncio.CancelledError: When the operation is cancelled + + """ + # 100 response + if self._continue is not None: + # Force headers to be sent before waiting for 100-continue + writer.send_headers() + await writer.drain() + await self._continue + + protocol = conn.protocol + assert protocol is not None + try: + # This should be a rare case but the + # self._body can be set to None while + # the task is being started or we wait above + # for the 100-continue response. + # The more likely case is we have an empty + # payload, but 100-continue is still expected. + if self._body is not None: + await self._body.write_with_length(writer, content_length) + except OSError as underlying_exc: + reraised_exc = underlying_exc + + # Distinguish between timeout and other OS errors for better error reporting + exc_is_not_timeout = underlying_exc.errno is not None or not isinstance( + underlying_exc, asyncio.TimeoutError + ) + if exc_is_not_timeout: + reraised_exc = ClientOSError( + underlying_exc.errno, + f"Can not write request body for {self.url !s}", + ) + + set_exception(protocol, reraised_exc, underlying_exc) + except asyncio.CancelledError: + # Body hasn't been fully sent, so connection can't be reused + conn.close() + raise + except Exception as underlying_exc: + set_exception( + protocol, + ClientConnectionError( + "Failed to send bytes into the underlying connection " + f"{conn !s}: {underlying_exc!r}", + ), + underlying_exc, + ) + else: + # Successfully wrote the body, signal EOF and start response timeout + await writer.write_eof() + protocol.start_timeout() + + async def send(self, conn: "Connection") -> "ClientResponse": + # Specify request target: + # - CONNECT request must send authority form URI + # - not CONNECT proxy must send absolute form URI + # - most common is origin form URI + if self.method == hdrs.METH_CONNECT: + connect_host = self.url.host_subcomponent + assert connect_host is not None + path = f"{connect_host}:{self.url.port}" + elif self.proxy and not self.is_ssl(): + path = str(self.url) + else: + path = self.url.raw_path_qs + + protocol = conn.protocol + assert protocol is not None + writer = StreamWriter( + protocol, + self.loop, + on_chunk_sent=( + functools.partial(self._on_chunk_request_sent, self.method, self.url) + if self._traces + else None + ), + on_headers_sent=( + functools.partial(self._on_headers_request_sent, self.method, self.url) + if self._traces + else None + ), + ) + + if self.compress: + writer.enable_compression(self.compress) # type: ignore[arg-type] + + if self.chunked is not None: + writer.enable_chunking() + + # set default content-type + if ( + self.method in self.POST_METHODS + and ( + self._skip_auto_headers is None + or hdrs.CONTENT_TYPE not in self._skip_auto_headers + ) + and hdrs.CONTENT_TYPE not in self.headers + ): + self.headers[hdrs.CONTENT_TYPE] = "application/octet-stream" + + v = self.version + if hdrs.CONNECTION not in self.headers: + if conn._connector.force_close: + if v == HttpVersion11: + self.headers[hdrs.CONNECTION] = "close" + elif v == HttpVersion10: + self.headers[hdrs.CONNECTION] = "keep-alive" + + # status + headers + status_line = f"{self.method} {path} HTTP/{v.major}.{v.minor}" + + # Buffer headers for potential coalescing with body + await writer.write_headers(status_line, self.headers) + + task: Optional["asyncio.Task[None]"] + if self._body or self._continue is not None or protocol.writing_paused: + coro = self.write_bytes(writer, conn, self._get_content_length()) + if sys.version_info >= (3, 12): + # Optimization for Python 3.12, try to write + # bytes immediately to avoid having to schedule + # the task on the event loop. + task = asyncio.Task(coro, loop=self.loop, eager_start=True) + else: + task = self.loop.create_task(coro) + if task.done(): + task = None + else: + self._writer = task + else: + # We have nothing to write because + # - there is no body + # - the protocol does not have writing paused + # - we are not waiting for a 100-continue response + protocol.start_timeout() + writer.set_eof() + task = None + response_class = self.response_class + assert response_class is not None + self.response = response_class( + self.method, + self.original_url, + writer=task, + continue100=self._continue, + timer=self._timer, + request_info=self.request_info, + traces=self._traces, + loop=self.loop, + session=self._session, + ) + return self.response + + async def close(self) -> None: + if self.__writer is not None: + try: + await self.__writer + except asyncio.CancelledError: + if ( + sys.version_info >= (3, 11) + and (task := asyncio.current_task()) + and task.cancelling() + ): + raise + + def terminate(self) -> None: + if self.__writer is not None: + if not self.loop.is_closed(): + self.__writer.cancel() + self.__writer.remove_done_callback(self.__reset_writer) + self.__writer = None + + async def _on_chunk_request_sent(self, method: str, url: URL, chunk: bytes) -> None: + for trace in self._traces: + await trace.send_request_chunk_sent(method, url, chunk) + + async def _on_headers_request_sent( + self, method: str, url: URL, headers: "CIMultiDict[str]" + ) -> None: + for trace in self._traces: + await trace.send_request_headers(method, url, headers) diff --git a/py311/lib/python3.11/site-packages/aiohttp/client_ws.py b/py311/lib/python3.11/site-packages/aiohttp/client_ws.py new file mode 100644 index 0000000000000000000000000000000000000000..daa57d1930b1b0cfc03594aaae62bd93da5da165 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/client_ws.py @@ -0,0 +1,428 @@ +"""WebSocket client for asyncio.""" + +import asyncio +import sys +from types import TracebackType +from typing import Any, Optional, Type, cast + +import attr + +from ._websocket.reader import WebSocketDataQueue +from .client_exceptions import ClientError, ServerTimeoutError, WSMessageTypeError +from .client_reqrep import ClientResponse +from .helpers import calculate_timeout_when, set_result +from .http import ( + WS_CLOSED_MESSAGE, + WS_CLOSING_MESSAGE, + WebSocketError, + WSCloseCode, + WSMessage, + WSMsgType, +) +from .http_websocket import _INTERNAL_RECEIVE_TYPES, WebSocketWriter +from .streams import EofStream +from .typedefs import ( + DEFAULT_JSON_DECODER, + DEFAULT_JSON_ENCODER, + JSONDecoder, + JSONEncoder, +) + +if sys.version_info >= (3, 11): + import asyncio as async_timeout +else: + import async_timeout + + +@attr.s(frozen=True, slots=True) +class ClientWSTimeout: + ws_receive = attr.ib(type=Optional[float], default=None) + ws_close = attr.ib(type=Optional[float], default=None) + + +DEFAULT_WS_CLIENT_TIMEOUT = ClientWSTimeout(ws_receive=None, ws_close=10.0) + + +class ClientWebSocketResponse: + def __init__( + self, + reader: WebSocketDataQueue, + writer: WebSocketWriter, + protocol: Optional[str], + response: ClientResponse, + timeout: ClientWSTimeout, + autoclose: bool, + autoping: bool, + loop: asyncio.AbstractEventLoop, + *, + heartbeat: Optional[float] = None, + compress: int = 0, + client_notakeover: bool = False, + ) -> None: + self._response = response + self._conn = response.connection + + self._writer = writer + self._reader = reader + self._protocol = protocol + self._closed = False + self._closing = False + self._close_code: Optional[int] = None + self._timeout = timeout + self._autoclose = autoclose + self._autoping = autoping + self._heartbeat = heartbeat + self._heartbeat_cb: Optional[asyncio.TimerHandle] = None + self._heartbeat_when: float = 0.0 + if heartbeat is not None: + self._pong_heartbeat = heartbeat / 2.0 + self._pong_response_cb: Optional[asyncio.TimerHandle] = None + self._loop = loop + self._waiting: bool = False + self._close_wait: Optional[asyncio.Future[None]] = None + self._exception: Optional[BaseException] = None + self._compress = compress + self._client_notakeover = client_notakeover + self._ping_task: Optional[asyncio.Task[None]] = None + + self._reset_heartbeat() + + def _cancel_heartbeat(self) -> None: + self._cancel_pong_response_cb() + if self._heartbeat_cb is not None: + self._heartbeat_cb.cancel() + self._heartbeat_cb = None + if self._ping_task is not None: + self._ping_task.cancel() + self._ping_task = None + + def _cancel_pong_response_cb(self) -> None: + if self._pong_response_cb is not None: + self._pong_response_cb.cancel() + self._pong_response_cb = None + + def _reset_heartbeat(self) -> None: + if self._heartbeat is None: + return + self._cancel_pong_response_cb() + loop = self._loop + assert loop is not None + conn = self._conn + timeout_ceil_threshold = ( + conn._connector._timeout_ceil_threshold if conn is not None else 5 + ) + now = loop.time() + when = calculate_timeout_when(now, self._heartbeat, timeout_ceil_threshold) + self._heartbeat_when = when + if self._heartbeat_cb is None: + # We do not cancel the previous heartbeat_cb here because + # it generates a significant amount of TimerHandle churn + # which causes asyncio to rebuild the heap frequently. + # Instead _send_heartbeat() will reschedule the next + # heartbeat if it fires too early. + self._heartbeat_cb = loop.call_at(when, self._send_heartbeat) + + def _send_heartbeat(self) -> None: + self._heartbeat_cb = None + loop = self._loop + now = loop.time() + if now < self._heartbeat_when: + # Heartbeat fired too early, reschedule + self._heartbeat_cb = loop.call_at( + self._heartbeat_when, self._send_heartbeat + ) + return + + conn = self._conn + timeout_ceil_threshold = ( + conn._connector._timeout_ceil_threshold if conn is not None else 5 + ) + when = calculate_timeout_when(now, self._pong_heartbeat, timeout_ceil_threshold) + self._cancel_pong_response_cb() + self._pong_response_cb = loop.call_at(when, self._pong_not_received) + + coro = self._writer.send_frame(b"", WSMsgType.PING) + if sys.version_info >= (3, 12): + # Optimization for Python 3.12, try to send the ping + # immediately to avoid having to schedule + # the task on the event loop. + ping_task = asyncio.Task(coro, loop=loop, eager_start=True) + else: + ping_task = loop.create_task(coro) + + if not ping_task.done(): + self._ping_task = ping_task + ping_task.add_done_callback(self._ping_task_done) + else: + self._ping_task_done(ping_task) + + def _ping_task_done(self, task: "asyncio.Task[None]") -> None: + """Callback for when the ping task completes.""" + if not task.cancelled() and (exc := task.exception()): + self._handle_ping_pong_exception(exc) + self._ping_task = None + + def _pong_not_received(self) -> None: + self._handle_ping_pong_exception( + ServerTimeoutError(f"No PONG received after {self._pong_heartbeat} seconds") + ) + + def _handle_ping_pong_exception(self, exc: BaseException) -> None: + """Handle exceptions raised during ping/pong processing.""" + if self._closed: + return + self._set_closed() + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._exception = exc + self._response.close() + if self._waiting and not self._closing: + self._reader.feed_data(WSMessage(WSMsgType.ERROR, exc, None), 0) + + def _set_closed(self) -> None: + """Set the connection to closed. + + Cancel any heartbeat timers and set the closed flag. + """ + self._closed = True + self._cancel_heartbeat() + + def _set_closing(self) -> None: + """Set the connection to closing. + + Cancel any heartbeat timers and set the closing flag. + """ + self._closing = True + self._cancel_heartbeat() + + @property + def closed(self) -> bool: + return self._closed + + @property + def close_code(self) -> Optional[int]: + return self._close_code + + @property + def protocol(self) -> Optional[str]: + return self._protocol + + @property + def compress(self) -> int: + return self._compress + + @property + def client_notakeover(self) -> bool: + return self._client_notakeover + + def get_extra_info(self, name: str, default: Any = None) -> Any: + """extra info from connection transport""" + conn = self._response.connection + if conn is None: + return default + transport = conn.transport + if transport is None: + return default + return transport.get_extra_info(name, default) + + def exception(self) -> Optional[BaseException]: + return self._exception + + async def ping(self, message: bytes = b"") -> None: + await self._writer.send_frame(message, WSMsgType.PING) + + async def pong(self, message: bytes = b"") -> None: + await self._writer.send_frame(message, WSMsgType.PONG) + + async def send_frame( + self, message: bytes, opcode: WSMsgType, compress: Optional[int] = None + ) -> None: + """Send a frame over the websocket.""" + await self._writer.send_frame(message, opcode, compress) + + async def send_str(self, data: str, compress: Optional[int] = None) -> None: + if not isinstance(data, str): + raise TypeError("data argument must be str (%r)" % type(data)) + await self._writer.send_frame( + data.encode("utf-8"), WSMsgType.TEXT, compress=compress + ) + + async def send_bytes(self, data: bytes, compress: Optional[int] = None) -> None: + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError("data argument must be byte-ish (%r)" % type(data)) + await self._writer.send_frame(data, WSMsgType.BINARY, compress=compress) + + async def send_json( + self, + data: Any, + compress: Optional[int] = None, + *, + dumps: JSONEncoder = DEFAULT_JSON_ENCODER, + ) -> None: + await self.send_str(dumps(data), compress=compress) + + async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool: + # we need to break `receive()` cycle first, + # `close()` may be called from different task + if self._waiting and not self._closing: + assert self._loop is not None + self._close_wait = self._loop.create_future() + self._set_closing() + self._reader.feed_data(WS_CLOSING_MESSAGE, 0) + await self._close_wait + + if self._closed: + return False + + self._set_closed() + try: + await self._writer.close(code, message) + except asyncio.CancelledError: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._response.close() + raise + except Exception as exc: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._exception = exc + self._response.close() + return True + + if self._close_code: + self._response.close() + return True + + while True: + try: + async with async_timeout.timeout(self._timeout.ws_close): + msg = await self._reader.read() + except asyncio.CancelledError: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._response.close() + raise + except Exception as exc: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._exception = exc + self._response.close() + return True + + if msg.type is WSMsgType.CLOSE: + self._close_code = msg.data + self._response.close() + return True + + async def receive(self, timeout: Optional[float] = None) -> WSMessage: + receive_timeout = timeout or self._timeout.ws_receive + + while True: + if self._waiting: + raise RuntimeError("Concurrent call to receive() is not allowed") + + if self._closed: + return WS_CLOSED_MESSAGE + elif self._closing: + await self.close() + return WS_CLOSED_MESSAGE + + try: + self._waiting = True + try: + if receive_timeout: + # Entering the context manager and creating + # Timeout() object can take almost 50% of the + # run time in this loop so we avoid it if + # there is no read timeout. + async with async_timeout.timeout(receive_timeout): + msg = await self._reader.read() + else: + msg = await self._reader.read() + self._reset_heartbeat() + finally: + self._waiting = False + if self._close_wait: + set_result(self._close_wait, None) + except (asyncio.CancelledError, asyncio.TimeoutError): + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + raise + except EofStream: + self._close_code = WSCloseCode.OK + await self.close() + return WSMessage(WSMsgType.CLOSED, None, None) + except ClientError: + # Likely ServerDisconnectedError when connection is lost + self._set_closed() + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + return WS_CLOSED_MESSAGE + except WebSocketError as exc: + self._close_code = exc.code + await self.close(code=exc.code) + return WSMessage(WSMsgType.ERROR, exc, None) + except Exception as exc: + self._exception = exc + self._set_closing() + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + await self.close() + return WSMessage(WSMsgType.ERROR, exc, None) + + if msg.type not in _INTERNAL_RECEIVE_TYPES: + # If its not a close/closing/ping/pong message + # we can return it immediately + return msg + + if msg.type is WSMsgType.CLOSE: + self._set_closing() + self._close_code = msg.data + if not self._closed and self._autoclose: + await self.close() + elif msg.type is WSMsgType.CLOSING: + self._set_closing() + elif msg.type is WSMsgType.PING and self._autoping: + await self.pong(msg.data) + continue + elif msg.type is WSMsgType.PONG and self._autoping: + continue + + return msg + + async def receive_str(self, *, timeout: Optional[float] = None) -> str: + msg = await self.receive(timeout) + if msg.type is not WSMsgType.TEXT: + raise WSMessageTypeError( + f"Received message {msg.type}:{msg.data!r} is not WSMsgType.TEXT" + ) + return cast(str, msg.data) + + async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes: + msg = await self.receive(timeout) + if msg.type is not WSMsgType.BINARY: + raise WSMessageTypeError( + f"Received message {msg.type}:{msg.data!r} is not WSMsgType.BINARY" + ) + return cast(bytes, msg.data) + + async def receive_json( + self, + *, + loads: JSONDecoder = DEFAULT_JSON_DECODER, + timeout: Optional[float] = None, + ) -> Any: + data = await self.receive_str(timeout=timeout) + return loads(data) + + def __aiter__(self) -> "ClientWebSocketResponse": + return self + + async def __anext__(self) -> WSMessage: + msg = await self.receive() + if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED): + raise StopAsyncIteration + return msg + + async def __aenter__(self) -> "ClientWebSocketResponse": + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + await self.close() diff --git a/py311/lib/python3.11/site-packages/aiohttp/compression_utils.py b/py311/lib/python3.11/site-packages/aiohttp/compression_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e478d24c3d70105e6796007808462c4159f8c833 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/compression_utils.py @@ -0,0 +1,348 @@ +import asyncio +import sys +import zlib +from abc import ABC, abstractmethod +from concurrent.futures import Executor +from typing import Any, Final, Optional, Protocol, TypedDict, cast + +if sys.version_info >= (3, 12): + from collections.abc import Buffer +else: + from typing import Union + + Buffer = Union[bytes, bytearray, "memoryview[int]", "memoryview[bytes]"] + +try: + try: + import brotlicffi as brotli + except ImportError: + import brotli + + HAS_BROTLI = True +except ImportError: # pragma: no cover + HAS_BROTLI = False + +try: + if sys.version_info >= (3, 14): + from compression.zstd import ZstdDecompressor # noqa: I900 + else: # TODO(PY314): Remove mentions of backports.zstd across codebase + from backports.zstd import ZstdDecompressor + + HAS_ZSTD = True +except ImportError: + HAS_ZSTD = False + + +MAX_SYNC_CHUNK_SIZE = 4096 +DEFAULT_MAX_DECOMPRESS_SIZE = 2**25 # 32MiB + +# Unlimited decompression constants - different libraries use different conventions +ZLIB_MAX_LENGTH_UNLIMITED = 0 # zlib uses 0 to mean unlimited +ZSTD_MAX_LENGTH_UNLIMITED = -1 # zstd uses -1 to mean unlimited + + +class ZLibCompressObjProtocol(Protocol): + def compress(self, data: Buffer) -> bytes: ... + def flush(self, mode: int = ..., /) -> bytes: ... + + +class ZLibDecompressObjProtocol(Protocol): + def decompress(self, data: Buffer, max_length: int = ...) -> bytes: ... + def flush(self, length: int = ..., /) -> bytes: ... + + @property + def eof(self) -> bool: ... + + +class ZLibBackendProtocol(Protocol): + MAX_WBITS: int + Z_FULL_FLUSH: int + Z_SYNC_FLUSH: int + Z_BEST_SPEED: int + Z_FINISH: int + + def compressobj( + self, + level: int = ..., + method: int = ..., + wbits: int = ..., + memLevel: int = ..., + strategy: int = ..., + zdict: Optional[Buffer] = ..., + ) -> ZLibCompressObjProtocol: ... + def decompressobj( + self, wbits: int = ..., zdict: Buffer = ... + ) -> ZLibDecompressObjProtocol: ... + + def compress( + self, data: Buffer, /, level: int = ..., wbits: int = ... + ) -> bytes: ... + def decompress( + self, data: Buffer, /, wbits: int = ..., bufsize: int = ... + ) -> bytes: ... + + +class CompressObjArgs(TypedDict, total=False): + wbits: int + strategy: int + level: int + + +class ZLibBackendWrapper: + def __init__(self, _zlib_backend: ZLibBackendProtocol): + self._zlib_backend: ZLibBackendProtocol = _zlib_backend + + @property + def name(self) -> str: + return getattr(self._zlib_backend, "__name__", "undefined") + + @property + def MAX_WBITS(self) -> int: + return self._zlib_backend.MAX_WBITS + + @property + def Z_FULL_FLUSH(self) -> int: + return self._zlib_backend.Z_FULL_FLUSH + + @property + def Z_SYNC_FLUSH(self) -> int: + return self._zlib_backend.Z_SYNC_FLUSH + + @property + def Z_BEST_SPEED(self) -> int: + return self._zlib_backend.Z_BEST_SPEED + + @property + def Z_FINISH(self) -> int: + return self._zlib_backend.Z_FINISH + + def compressobj(self, *args: Any, **kwargs: Any) -> ZLibCompressObjProtocol: + return self._zlib_backend.compressobj(*args, **kwargs) + + def decompressobj(self, *args: Any, **kwargs: Any) -> ZLibDecompressObjProtocol: + return self._zlib_backend.decompressobj(*args, **kwargs) + + def compress(self, data: Buffer, *args: Any, **kwargs: Any) -> bytes: + return self._zlib_backend.compress(data, *args, **kwargs) + + def decompress(self, data: Buffer, *args: Any, **kwargs: Any) -> bytes: + return self._zlib_backend.decompress(data, *args, **kwargs) + + # Everything not explicitly listed in the Protocol we just pass through + def __getattr__(self, attrname: str) -> Any: + return getattr(self._zlib_backend, attrname) + + +ZLibBackend: ZLibBackendWrapper = ZLibBackendWrapper(zlib) + + +def set_zlib_backend(new_zlib_backend: ZLibBackendProtocol) -> None: + ZLibBackend._zlib_backend = new_zlib_backend + + +def encoding_to_mode( + encoding: Optional[str] = None, + suppress_deflate_header: bool = False, +) -> int: + if encoding == "gzip": + return 16 + ZLibBackend.MAX_WBITS + + return -ZLibBackend.MAX_WBITS if suppress_deflate_header else ZLibBackend.MAX_WBITS + + +class DecompressionBaseHandler(ABC): + def __init__( + self, + executor: Optional[Executor] = None, + max_sync_chunk_size: Optional[int] = MAX_SYNC_CHUNK_SIZE, + ): + """Base class for decompression handlers.""" + self._executor = executor + self._max_sync_chunk_size = max_sync_chunk_size + + @abstractmethod + def decompress_sync( + self, data: bytes, max_length: int = ZLIB_MAX_LENGTH_UNLIMITED + ) -> bytes: + """Decompress the given data.""" + + async def decompress( + self, data: bytes, max_length: int = ZLIB_MAX_LENGTH_UNLIMITED + ) -> bytes: + """Decompress the given data.""" + if ( + self._max_sync_chunk_size is not None + and len(data) > self._max_sync_chunk_size + ): + return await asyncio.get_event_loop().run_in_executor( + self._executor, self.decompress_sync, data, max_length + ) + return self.decompress_sync(data, max_length) + + +class ZLibCompressor: + def __init__( + self, + encoding: Optional[str] = None, + suppress_deflate_header: bool = False, + level: Optional[int] = None, + wbits: Optional[int] = None, + strategy: Optional[int] = None, + executor: Optional[Executor] = None, + max_sync_chunk_size: Optional[int] = MAX_SYNC_CHUNK_SIZE, + ): + self._executor = executor + self._max_sync_chunk_size = max_sync_chunk_size + self._mode = ( + encoding_to_mode(encoding, suppress_deflate_header) + if wbits is None + else wbits + ) + self._zlib_backend: Final = ZLibBackendWrapper(ZLibBackend._zlib_backend) + + kwargs: CompressObjArgs = {} + kwargs["wbits"] = self._mode + if strategy is not None: + kwargs["strategy"] = strategy + if level is not None: + kwargs["level"] = level + self._compressor = self._zlib_backend.compressobj(**kwargs) + + def compress_sync(self, data: bytes) -> bytes: + return self._compressor.compress(data) + + async def compress(self, data: bytes) -> bytes: + """Compress the data and returned the compressed bytes. + + Note that flush() must be called after the last call to compress() + + If the data size is large than the max_sync_chunk_size, the compression + will be done in the executor. Otherwise, the compression will be done + in the event loop. + + **WARNING: This method is NOT cancellation-safe when used with flush().** + If this operation is cancelled, the compressor state may be corrupted. + The connection MUST be closed after cancellation to avoid data corruption + in subsequent compress operations. + + For cancellation-safe compression (e.g., WebSocket), the caller MUST wrap + compress() + flush() + send operations in a shield and lock to ensure atomicity. + """ + # For large payloads, offload compression to executor to avoid blocking event loop + should_use_executor = ( + self._max_sync_chunk_size is not None + and len(data) > self._max_sync_chunk_size + ) + if should_use_executor: + return await asyncio.get_running_loop().run_in_executor( + self._executor, self._compressor.compress, data + ) + return self.compress_sync(data) + + def flush(self, mode: Optional[int] = None) -> bytes: + """Flush the compressor synchronously. + + **WARNING: This method is NOT cancellation-safe when called after compress().** + The flush() operation accesses shared compressor state. If compress() was + cancelled, calling flush() may result in corrupted data. The connection MUST + be closed after compress() cancellation. + + For cancellation-safe compression (e.g., WebSocket), the caller MUST wrap + compress() + flush() + send operations in a shield and lock to ensure atomicity. + """ + return self._compressor.flush( + mode if mode is not None else self._zlib_backend.Z_FINISH + ) + + +class ZLibDecompressor(DecompressionBaseHandler): + def __init__( + self, + encoding: Optional[str] = None, + suppress_deflate_header: bool = False, + executor: Optional[Executor] = None, + max_sync_chunk_size: Optional[int] = MAX_SYNC_CHUNK_SIZE, + ): + super().__init__(executor=executor, max_sync_chunk_size=max_sync_chunk_size) + self._mode = encoding_to_mode(encoding, suppress_deflate_header) + self._zlib_backend: Final = ZLibBackendWrapper(ZLibBackend._zlib_backend) + self._decompressor = self._zlib_backend.decompressobj(wbits=self._mode) + + def decompress_sync( + self, data: Buffer, max_length: int = ZLIB_MAX_LENGTH_UNLIMITED + ) -> bytes: + return self._decompressor.decompress(data, max_length) + + def flush(self, length: int = 0) -> bytes: + return ( + self._decompressor.flush(length) + if length > 0 + else self._decompressor.flush() + ) + + @property + def eof(self) -> bool: + return self._decompressor.eof + + +class BrotliDecompressor(DecompressionBaseHandler): + # Supports both 'brotlipy' and 'Brotli' packages + # since they share an import name. The top branches + # are for 'brotlipy' and bottom branches for 'Brotli' + def __init__( + self, + executor: Optional[Executor] = None, + max_sync_chunk_size: Optional[int] = MAX_SYNC_CHUNK_SIZE, + ) -> None: + """Decompress data using the Brotli library.""" + if not HAS_BROTLI: + raise RuntimeError( + "The brotli decompression is not available. " + "Please install `Brotli` module" + ) + self._obj = brotli.Decompressor() + super().__init__(executor=executor, max_sync_chunk_size=max_sync_chunk_size) + + def decompress_sync( + self, data: Buffer, max_length: int = ZLIB_MAX_LENGTH_UNLIMITED + ) -> bytes: + """Decompress the given data.""" + if hasattr(self._obj, "decompress"): + return cast(bytes, self._obj.decompress(data, max_length)) + return cast(bytes, self._obj.process(data, max_length)) + + def flush(self) -> bytes: + """Flush the decompressor.""" + if hasattr(self._obj, "flush"): + return cast(bytes, self._obj.flush()) + return b"" + + +class ZSTDDecompressor(DecompressionBaseHandler): + def __init__( + self, + executor: Optional[Executor] = None, + max_sync_chunk_size: Optional[int] = MAX_SYNC_CHUNK_SIZE, + ) -> None: + if not HAS_ZSTD: + raise RuntimeError( + "The zstd decompression is not available. " + "Please install `backports.zstd` module" + ) + self._obj = ZstdDecompressor() + super().__init__(executor=executor, max_sync_chunk_size=max_sync_chunk_size) + + def decompress_sync( + self, data: bytes, max_length: int = ZLIB_MAX_LENGTH_UNLIMITED + ) -> bytes: + # zstd uses -1 for unlimited, while zlib uses 0 for unlimited + # Convert the zlib convention (0=unlimited) to zstd convention (-1=unlimited) + zstd_max_length = ( + ZSTD_MAX_LENGTH_UNLIMITED + if max_length == ZLIB_MAX_LENGTH_UNLIMITED + else max_length + ) + return self._obj.decompress(data, zstd_max_length) + + def flush(self) -> bytes: + return b"" diff --git a/py311/lib/python3.11/site-packages/aiohttp/connector.py b/py311/lib/python3.11/site-packages/aiohttp/connector.py new file mode 100644 index 0000000000000000000000000000000000000000..290a42400f9af5257e1ca1511c3db7bd0570e9bd --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/connector.py @@ -0,0 +1,1842 @@ +import asyncio +import functools +import random +import socket +import sys +import traceback +import warnings +from collections import OrderedDict, defaultdict, deque +from contextlib import suppress +from http import HTTPStatus +from itertools import chain, cycle, islice +from time import monotonic +from types import TracebackType +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + DefaultDict, + Deque, + Dict, + Iterator, + List, + Literal, + Optional, + Sequence, + Set, + Tuple, + Type, + Union, + cast, +) + +import aiohappyeyeballs +from aiohappyeyeballs import AddrInfoType, SocketFactoryType + +from . import hdrs, helpers +from .abc import AbstractResolver, ResolveResult +from .client_exceptions import ( + ClientConnectionError, + ClientConnectorCertificateError, + ClientConnectorDNSError, + ClientConnectorError, + ClientConnectorSSLError, + ClientHttpProxyError, + ClientProxyConnectionError, + ServerFingerprintMismatch, + UnixClientConnectorError, + cert_errors, + ssl_errors, +) +from .client_proto import ResponseHandler +from .client_reqrep import ClientRequest, Fingerprint, _merge_ssl_params +from .helpers import ( + _SENTINEL, + ceil_timeout, + is_ip_address, + noop, + sentinel, + set_exception, + set_result, +) +from .log import client_logger +from .resolver import DefaultResolver + +if sys.version_info >= (3, 12): + from collections.abc import Buffer +else: + Buffer = Union[bytes, bytearray, "memoryview[int]", "memoryview[bytes]"] + +if TYPE_CHECKING: + import ssl + + SSLContext = ssl.SSLContext +else: + try: + import ssl + + SSLContext = ssl.SSLContext + except ImportError: # pragma: no cover + ssl = None # type: ignore[assignment] + SSLContext = object # type: ignore[misc,assignment] + +EMPTY_SCHEMA_SET = frozenset({""}) +HTTP_SCHEMA_SET = frozenset({"http", "https"}) +WS_SCHEMA_SET = frozenset({"ws", "wss"}) + +HTTP_AND_EMPTY_SCHEMA_SET = HTTP_SCHEMA_SET | EMPTY_SCHEMA_SET +HIGH_LEVEL_SCHEMA_SET = HTTP_AND_EMPTY_SCHEMA_SET | WS_SCHEMA_SET + +NEEDS_CLEANUP_CLOSED = (3, 13, 0) <= sys.version_info < ( + 3, + 13, + 1, +) or sys.version_info < (3, 12, 7) +# Cleanup closed is no longer needed after https://github.com/python/cpython/pull/118960 +# which first appeared in Python 3.12.7 and 3.13.1 + + +__all__ = ( + "BaseConnector", + "TCPConnector", + "UnixConnector", + "NamedPipeConnector", + "AddrInfoType", + "SocketFactoryType", +) + + +if TYPE_CHECKING: + from .client import ClientTimeout + from .client_reqrep import ConnectionKey + from .tracing import Trace + + +class _DeprecationWaiter: + __slots__ = ("_awaitable", "_awaited") + + def __init__(self, awaitable: Awaitable[Any]) -> None: + self._awaitable = awaitable + self._awaited = False + + def __await__(self) -> Any: + self._awaited = True + return self._awaitable.__await__() + + def __del__(self) -> None: + if not self._awaited: + warnings.warn( + "Connector.close() is a coroutine, " + "please use await connector.close()", + DeprecationWarning, + ) + + +async def _wait_for_close(waiters: List[Awaitable[object]]) -> None: + """Wait for all waiters to finish closing.""" + results = await asyncio.gather(*waiters, return_exceptions=True) + for res in results: + if isinstance(res, Exception): + client_logger.debug("Error while closing connector: %r", res) + + +class Connection: + + _source_traceback = None + + def __init__( + self, + connector: "BaseConnector", + key: "ConnectionKey", + protocol: ResponseHandler, + loop: asyncio.AbstractEventLoop, + ) -> None: + self._key = key + self._connector = connector + self._loop = loop + self._protocol: Optional[ResponseHandler] = protocol + self._callbacks: List[Callable[[], None]] = [] + + if loop.get_debug(): + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + + def __repr__(self) -> str: + return f"Connection<{self._key}>" + + def __del__(self, _warnings: Any = warnings) -> None: + if self._protocol is not None: + kwargs = {"source": self} + _warnings.warn(f"Unclosed connection {self!r}", ResourceWarning, **kwargs) + if self._loop.is_closed(): + return + + self._connector._release(self._key, self._protocol, should_close=True) + + context = {"client_connection": self, "message": "Unclosed connection"} + if self._source_traceback is not None: + context["source_traceback"] = self._source_traceback + self._loop.call_exception_handler(context) + + def __bool__(self) -> Literal[True]: + """Force subclasses to not be falsy, to make checks simpler.""" + return True + + @property + def loop(self) -> asyncio.AbstractEventLoop: + warnings.warn( + "connector.loop property is deprecated", DeprecationWarning, stacklevel=2 + ) + return self._loop + + @property + def transport(self) -> Optional[asyncio.Transport]: + if self._protocol is None: + return None + return self._protocol.transport + + @property + def protocol(self) -> Optional[ResponseHandler]: + return self._protocol + + def add_callback(self, callback: Callable[[], None]) -> None: + if callback is not None: + self._callbacks.append(callback) + + def _notify_release(self) -> None: + callbacks, self._callbacks = self._callbacks[:], [] + + for cb in callbacks: + with suppress(Exception): + cb() + + def close(self) -> None: + self._notify_release() + + if self._protocol is not None: + self._connector._release(self._key, self._protocol, should_close=True) + self._protocol = None + + def release(self) -> None: + self._notify_release() + + if self._protocol is not None: + self._connector._release(self._key, self._protocol) + self._protocol = None + + @property + def closed(self) -> bool: + return self._protocol is None or not self._protocol.is_connected() + + +class _ConnectTunnelConnection(Connection): + """Special connection wrapper for CONNECT tunnels that must never be pooled. + + This connection wraps the proxy connection that will be upgraded with TLS. + It must never be released to the pool because: + 1. Its 'closed' future will never complete, causing session.close() to hang + 2. It represents an intermediate state, not a reusable connection + 3. The real connection (with TLS) will be created separately + """ + + def release(self) -> None: + """Do nothing - don't pool or close the connection. + + These connections are an intermediate state during the CONNECT tunnel + setup and will be cleaned up naturally after the TLS upgrade. If they + were to be pooled, they would never be properly closed, causing + session.close() to wait forever for their 'closed' future. + """ + + +class _TransportPlaceholder: + """placeholder for BaseConnector.connect function""" + + __slots__ = ("closed", "transport") + + def __init__(self, closed_future: asyncio.Future[Optional[Exception]]) -> None: + """Initialize a placeholder for a transport.""" + self.closed = closed_future + self.transport = None + + def close(self) -> None: + """Close the placeholder.""" + + def abort(self) -> None: + """Abort the placeholder (does nothing).""" + + +class BaseConnector: + """Base connector class. + + keepalive_timeout - (optional) Keep-alive timeout. + force_close - Set to True to force close and do reconnect + after each request (and between redirects). + limit - The total number of simultaneous connections. + limit_per_host - Number of simultaneous connections to one host. + enable_cleanup_closed - Enables clean-up closed ssl transports. + Disabled by default. + timeout_ceil_threshold - Trigger ceiling of timeout values when + it's above timeout_ceil_threshold. + loop - Optional event loop. + """ + + _closed = True # prevent AttributeError in __del__ if ctor was failed + _source_traceback = None + + # abort transport after 2 seconds (cleanup broken connections) + _cleanup_closed_period = 2.0 + + allowed_protocol_schema_set = HIGH_LEVEL_SCHEMA_SET + + def __init__( + self, + *, + keepalive_timeout: Union[object, None, float] = sentinel, + force_close: bool = False, + limit: int = 100, + limit_per_host: int = 0, + enable_cleanup_closed: bool = False, + loop: Optional[asyncio.AbstractEventLoop] = None, + timeout_ceil_threshold: float = 5, + ) -> None: + + if force_close: + if keepalive_timeout is not None and keepalive_timeout is not sentinel: + raise ValueError( + "keepalive_timeout cannot be set if force_close is True" + ) + else: + if keepalive_timeout is sentinel: + keepalive_timeout = 15.0 + + loop = loop or asyncio.get_running_loop() + self._timeout_ceil_threshold = timeout_ceil_threshold + + self._closed = False + if loop.get_debug(): + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + + # Connection pool of reusable connections. + # We use a deque to store connections because it has O(1) popleft() + # and O(1) append() operations to implement a FIFO queue. + self._conns: DefaultDict[ + ConnectionKey, Deque[Tuple[ResponseHandler, float]] + ] = defaultdict(deque) + self._limit = limit + self._limit_per_host = limit_per_host + self._acquired: Set[ResponseHandler] = set() + self._acquired_per_host: DefaultDict[ConnectionKey, Set[ResponseHandler]] = ( + defaultdict(set) + ) + self._keepalive_timeout = cast(float, keepalive_timeout) + self._force_close = force_close + + # {host_key: FIFO list of waiters} + # The FIFO is implemented with an OrderedDict with None keys because + # python does not have an ordered set. + self._waiters: DefaultDict[ + ConnectionKey, OrderedDict[asyncio.Future[None], None] + ] = defaultdict(OrderedDict) + + self._loop = loop + self._factory = functools.partial(ResponseHandler, loop=loop) + + # start keep-alive connection cleanup task + self._cleanup_handle: Optional[asyncio.TimerHandle] = None + + # start cleanup closed transports task + self._cleanup_closed_handle: Optional[asyncio.TimerHandle] = None + + if enable_cleanup_closed and not NEEDS_CLEANUP_CLOSED: + warnings.warn( + "enable_cleanup_closed ignored because " + "https://github.com/python/cpython/pull/118960 is fixed " + f"in Python version {sys.version_info}", + DeprecationWarning, + stacklevel=2, + ) + enable_cleanup_closed = False + + self._cleanup_closed_disabled = not enable_cleanup_closed + self._cleanup_closed_transports: List[Optional[asyncio.Transport]] = [] + self._placeholder_future: asyncio.Future[Optional[Exception]] = ( + loop.create_future() + ) + self._placeholder_future.set_result(None) + self._cleanup_closed() + + def __del__(self, _warnings: Any = warnings) -> None: + if self._closed: + return + if not self._conns: + return + + conns = [repr(c) for c in self._conns.values()] + + self._close() + + kwargs = {"source": self} + _warnings.warn(f"Unclosed connector {self!r}", ResourceWarning, **kwargs) + context = { + "connector": self, + "connections": conns, + "message": "Unclosed connector", + } + if self._source_traceback is not None: + context["source_traceback"] = self._source_traceback + self._loop.call_exception_handler(context) + + def __enter__(self) -> "BaseConnector": + warnings.warn( + '"with Connector():" is deprecated, ' + 'use "async with Connector():" instead', + DeprecationWarning, + ) + return self + + def __exit__(self, *exc: Any) -> None: + self._close() + + async def __aenter__(self) -> "BaseConnector": + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + exc_traceback: Optional[TracebackType] = None, + ) -> None: + await self.close() + + @property + def force_close(self) -> bool: + """Ultimately close connection on releasing if True.""" + return self._force_close + + @property + def limit(self) -> int: + """The total number for simultaneous connections. + + If limit is 0 the connector has no limit. + The default limit size is 100. + """ + return self._limit + + @property + def limit_per_host(self) -> int: + """The limit for simultaneous connections to the same endpoint. + + Endpoints are the same if they are have equal + (host, port, is_ssl) triple. + """ + return self._limit_per_host + + def _cleanup(self) -> None: + """Cleanup unused transports.""" + if self._cleanup_handle: + self._cleanup_handle.cancel() + # _cleanup_handle should be unset, otherwise _release() will not + # recreate it ever! + self._cleanup_handle = None + + now = monotonic() + timeout = self._keepalive_timeout + + if self._conns: + connections = defaultdict(deque) + deadline = now - timeout + for key, conns in self._conns.items(): + alive: Deque[Tuple[ResponseHandler, float]] = deque() + for proto, use_time in conns: + if proto.is_connected() and use_time - deadline >= 0: + alive.append((proto, use_time)) + continue + transport = proto.transport + proto.close() + if not self._cleanup_closed_disabled and key.is_ssl: + self._cleanup_closed_transports.append(transport) + + if alive: + connections[key] = alive + + self._conns = connections + + if self._conns: + self._cleanup_handle = helpers.weakref_handle( + self, + "_cleanup", + timeout, + self._loop, + timeout_ceil_threshold=self._timeout_ceil_threshold, + ) + + def _cleanup_closed(self) -> None: + """Double confirmation for transport close. + + Some broken ssl servers may leave socket open without proper close. + """ + if self._cleanup_closed_handle: + self._cleanup_closed_handle.cancel() + + for transport in self._cleanup_closed_transports: + if transport is not None: + transport.abort() + + self._cleanup_closed_transports = [] + + if not self._cleanup_closed_disabled: + self._cleanup_closed_handle = helpers.weakref_handle( + self, + "_cleanup_closed", + self._cleanup_closed_period, + self._loop, + timeout_ceil_threshold=self._timeout_ceil_threshold, + ) + + def close(self, *, abort_ssl: bool = False) -> Awaitable[None]: + """Close all opened transports. + + :param abort_ssl: If True, SSL connections will be aborted immediately + without performing the shutdown handshake. This provides + faster cleanup at the cost of less graceful disconnection. + """ + if not (waiters := self._close(abort_ssl=abort_ssl)): + # If there are no connections to close, we can return a noop + # awaitable to avoid scheduling a task on the event loop. + return _DeprecationWaiter(noop()) + coro = _wait_for_close(waiters) + if sys.version_info >= (3, 12): + # Optimization for Python 3.12, try to close connections + # immediately to avoid having to schedule the task on the event loop. + task = asyncio.Task(coro, loop=self._loop, eager_start=True) + else: + task = self._loop.create_task(coro) + return _DeprecationWaiter(task) + + def _close(self, *, abort_ssl: bool = False) -> List[Awaitable[object]]: + waiters: List[Awaitable[object]] = [] + + if self._closed: + return waiters + + self._closed = True + + try: + if self._loop.is_closed(): + return waiters + + # cancel cleanup task + if self._cleanup_handle: + self._cleanup_handle.cancel() + + # cancel cleanup close task + if self._cleanup_closed_handle: + self._cleanup_closed_handle.cancel() + + for data in self._conns.values(): + for proto, _ in data: + if ( + abort_ssl + and proto.transport + and proto.transport.get_extra_info("sslcontext") is not None + ): + proto.abort() + else: + proto.close() + if closed := proto.closed: + waiters.append(closed) + + for proto in self._acquired: + if ( + abort_ssl + and proto.transport + and proto.transport.get_extra_info("sslcontext") is not None + ): + proto.abort() + else: + proto.close() + if closed := proto.closed: + waiters.append(closed) + + for transport in self._cleanup_closed_transports: + if transport is not None: + transport.abort() + + return waiters + + finally: + self._conns.clear() + self._acquired.clear() + for keyed_waiters in self._waiters.values(): + for keyed_waiter in keyed_waiters: + keyed_waiter.cancel() + self._waiters.clear() + self._cleanup_handle = None + self._cleanup_closed_transports.clear() + self._cleanup_closed_handle = None + + @property + def closed(self) -> bool: + """Is connector closed. + + A readonly property. + """ + return self._closed + + def _available_connections(self, key: "ConnectionKey") -> int: + """ + Return number of available connections. + + The limit, limit_per_host and the connection key are taken into account. + + If it returns less than 1 means that there are no connections + available. + """ + # check total available connections + # If there are no limits, this will always return 1 + total_remain = 1 + + if self._limit and (total_remain := self._limit - len(self._acquired)) <= 0: + return total_remain + + # check limit per host + if host_remain := self._limit_per_host: + if acquired := self._acquired_per_host.get(key): + host_remain -= len(acquired) + if total_remain > host_remain: + return host_remain + + return total_remain + + def _update_proxy_auth_header_and_build_proxy_req( + self, req: ClientRequest + ) -> ClientRequest: + """Set Proxy-Authorization header for non-SSL proxy requests and builds the proxy request for SSL proxy requests.""" + url = req.proxy + assert url is not None + headers: Dict[str, str] = {} + if req.proxy_headers is not None: + headers = req.proxy_headers # type: ignore[assignment] + headers[hdrs.HOST] = req.headers[hdrs.HOST] + proxy_req = ClientRequest( + hdrs.METH_GET, + url, + headers=headers, + auth=req.proxy_auth, + loop=self._loop, + ssl=req.ssl, + ) + auth = proxy_req.headers.pop(hdrs.AUTHORIZATION, None) + if auth is not None: + if not req.is_ssl(): + req.headers[hdrs.PROXY_AUTHORIZATION] = auth + else: + proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth + return proxy_req + + async def connect( + self, req: ClientRequest, traces: List["Trace"], timeout: "ClientTimeout" + ) -> Connection: + """Get from pool or create new connection.""" + key = req.connection_key + if (conn := await self._get(key, traces)) is not None: + # If we do not have to wait and we can get a connection from the pool + # we can avoid the timeout ceil logic and directly return the connection + if req.proxy: + self._update_proxy_auth_header_and_build_proxy_req(req) + return conn + + async with ceil_timeout(timeout.connect, timeout.ceil_threshold): + if self._available_connections(key) <= 0: + await self._wait_for_available_connection(key, traces) + if (conn := await self._get(key, traces)) is not None: + if req.proxy: + self._update_proxy_auth_header_and_build_proxy_req(req) + return conn + + placeholder = cast( + ResponseHandler, _TransportPlaceholder(self._placeholder_future) + ) + self._acquired.add(placeholder) + if self._limit_per_host: + self._acquired_per_host[key].add(placeholder) + + try: + # Traces are done inside the try block to ensure that the + # that the placeholder is still cleaned up if an exception + # is raised. + if traces: + for trace in traces: + await trace.send_connection_create_start() + proto = await self._create_connection(req, traces, timeout) + if traces: + for trace in traces: + await trace.send_connection_create_end() + except BaseException: + self._release_acquired(key, placeholder) + raise + else: + if self._closed: + proto.close() + raise ClientConnectionError("Connector is closed.") + + # The connection was successfully created, drop the placeholder + # and add the real connection to the acquired set. There should + # be no awaits after the proto is added to the acquired set + # to ensure that the connection is not left in the acquired set + # on cancellation. + self._acquired.remove(placeholder) + self._acquired.add(proto) + if self._limit_per_host: + acquired_per_host = self._acquired_per_host[key] + acquired_per_host.remove(placeholder) + acquired_per_host.add(proto) + return Connection(self, key, proto, self._loop) + + async def _wait_for_available_connection( + self, key: "ConnectionKey", traces: List["Trace"] + ) -> None: + """Wait for an available connection slot.""" + # We loop here because there is a race between + # the connection limit check and the connection + # being acquired. If the connection is acquired + # between the check and the await statement, we + # need to loop again to check if the connection + # slot is still available. + attempts = 0 + while True: + fut: asyncio.Future[None] = self._loop.create_future() + keyed_waiters = self._waiters[key] + keyed_waiters[fut] = None + if attempts: + # If we have waited before, we need to move the waiter + # to the front of the queue as otherwise we might get + # starved and hit the timeout. + keyed_waiters.move_to_end(fut, last=False) + + try: + # Traces happen in the try block to ensure that the + # the waiter is still cleaned up if an exception is raised. + if traces: + for trace in traces: + await trace.send_connection_queued_start() + await fut + if traces: + for trace in traces: + await trace.send_connection_queued_end() + finally: + # pop the waiter from the queue if its still + # there and not already removed by _release_waiter + keyed_waiters.pop(fut, None) + if not self._waiters.get(key, True): + del self._waiters[key] + + if self._available_connections(key) > 0: + break + attempts += 1 + + async def _get( + self, key: "ConnectionKey", traces: List["Trace"] + ) -> Optional[Connection]: + """Get next reusable connection for the key or None. + + The connection will be marked as acquired. + """ + if (conns := self._conns.get(key)) is None: + return None + + t1 = monotonic() + while conns: + proto, t0 = conns.popleft() + # We will we reuse the connection if its connected and + # the keepalive timeout has not been exceeded + if proto.is_connected() and t1 - t0 <= self._keepalive_timeout: + if not conns: + # The very last connection was reclaimed: drop the key + del self._conns[key] + self._acquired.add(proto) + if self._limit_per_host: + self._acquired_per_host[key].add(proto) + if traces: + for trace in traces: + try: + await trace.send_connection_reuseconn() + except BaseException: + self._release_acquired(key, proto) + raise + return Connection(self, key, proto, self._loop) + + # Connection cannot be reused, close it + transport = proto.transport + proto.close() + # only for SSL transports + if not self._cleanup_closed_disabled and key.is_ssl: + self._cleanup_closed_transports.append(transport) + + # No more connections: drop the key + del self._conns[key] + return None + + def _release_waiter(self) -> None: + """ + Iterates over all waiters until one to be released is found. + + The one to be released is not finished and + belongs to a host that has available connections. + """ + if not self._waiters: + return + + # Having the dict keys ordered this avoids to iterate + # at the same order at each call. + queues = list(self._waiters) + random.shuffle(queues) + + for key in queues: + if self._available_connections(key) < 1: + continue + + waiters = self._waiters[key] + while waiters: + waiter, _ = waiters.popitem(last=False) + if not waiter.done(): + waiter.set_result(None) + return + + def _release_acquired(self, key: "ConnectionKey", proto: ResponseHandler) -> None: + """Release acquired connection.""" + if self._closed: + # acquired connection is already released on connector closing + return + + self._acquired.discard(proto) + if self._limit_per_host and (conns := self._acquired_per_host.get(key)): + conns.discard(proto) + if not conns: + del self._acquired_per_host[key] + self._release_waiter() + + def _release( + self, + key: "ConnectionKey", + protocol: ResponseHandler, + *, + should_close: bool = False, + ) -> None: + if self._closed: + # acquired connection is already released on connector closing + return + + self._release_acquired(key, protocol) + + if self._force_close or should_close or protocol.should_close: + transport = protocol.transport + protocol.close() + + if key.is_ssl and not self._cleanup_closed_disabled: + self._cleanup_closed_transports.append(transport) + return + + self._conns[key].append((protocol, monotonic())) + + if self._cleanup_handle is None: + self._cleanup_handle = helpers.weakref_handle( + self, + "_cleanup", + self._keepalive_timeout, + self._loop, + timeout_ceil_threshold=self._timeout_ceil_threshold, + ) + + async def _create_connection( + self, req: ClientRequest, traces: List["Trace"], timeout: "ClientTimeout" + ) -> ResponseHandler: + raise NotImplementedError() + + +class _DNSCacheTable: + def __init__(self, ttl: Optional[float] = None) -> None: + self._addrs_rr: Dict[Tuple[str, int], Tuple[Iterator[ResolveResult], int]] = {} + self._timestamps: Dict[Tuple[str, int], float] = {} + self._ttl = ttl + + def __contains__(self, host: object) -> bool: + return host in self._addrs_rr + + def add(self, key: Tuple[str, int], addrs: List[ResolveResult]) -> None: + self._addrs_rr[key] = (cycle(addrs), len(addrs)) + + if self._ttl is not None: + self._timestamps[key] = monotonic() + + def remove(self, key: Tuple[str, int]) -> None: + self._addrs_rr.pop(key, None) + + if self._ttl is not None: + self._timestamps.pop(key, None) + + def clear(self) -> None: + self._addrs_rr.clear() + self._timestamps.clear() + + def next_addrs(self, key: Tuple[str, int]) -> List[ResolveResult]: + loop, length = self._addrs_rr[key] + addrs = list(islice(loop, length)) + # Consume one more element to shift internal state of `cycle` + next(loop) + return addrs + + def expired(self, key: Tuple[str, int]) -> bool: + if self._ttl is None: + return False + + return self._timestamps[key] + self._ttl < monotonic() + + +def _make_ssl_context(verified: bool) -> SSLContext: + """Create SSL context. + + This method is not async-friendly and should be called from a thread + because it will load certificates from disk and do other blocking I/O. + """ + if ssl is None: + # No ssl support + return None + if verified: + sslcontext = ssl.create_default_context() + else: + sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) + sslcontext.options |= ssl.OP_NO_SSLv2 + sslcontext.options |= ssl.OP_NO_SSLv3 + sslcontext.check_hostname = False + sslcontext.verify_mode = ssl.CERT_NONE + sslcontext.options |= ssl.OP_NO_COMPRESSION + sslcontext.set_default_verify_paths() + sslcontext.set_alpn_protocols(("http/1.1",)) + return sslcontext + + +# The default SSLContext objects are created at import time +# since they do blocking I/O to load certificates from disk, +# and imports should always be done before the event loop starts +# or in a thread. +_SSL_CONTEXT_VERIFIED = _make_ssl_context(True) +_SSL_CONTEXT_UNVERIFIED = _make_ssl_context(False) + + +class TCPConnector(BaseConnector): + """TCP connector. + + verify_ssl - Set to True to check ssl certifications. + fingerprint - Pass the binary sha256 + digest of the expected certificate in DER format to verify + that the certificate the server presents matches. See also + https://en.wikipedia.org/wiki/HTTP_Public_Key_Pinning + resolver - Enable DNS lookups and use this + resolver + use_dns_cache - Use memory cache for DNS lookups. + ttl_dns_cache - Max seconds having cached a DNS entry, None forever. + family - socket address family + local_addr - local tuple of (host, port) to bind socket to + + keepalive_timeout - (optional) Keep-alive timeout. + force_close - Set to True to force close and do reconnect + after each request (and between redirects). + limit - The total number of simultaneous connections. + limit_per_host - Number of simultaneous connections to one host. + enable_cleanup_closed - Enables clean-up closed ssl transports. + Disabled by default. + happy_eyeballs_delay - This is the “Connection Attempt Delay” + as defined in RFC 8305. To disable + the happy eyeballs algorithm, set to None. + interleave - “First Address Family Count” as defined in RFC 8305 + loop - Optional event loop. + socket_factory - A SocketFactoryType function that, if supplied, + will be used to create sockets given an + AddrInfoType. + ssl_shutdown_timeout - DEPRECATED. Will be removed in aiohttp 4.0. + Grace period for SSL shutdown handshake on TLS + connections. Default is 0 seconds (immediate abort). + This parameter allowed for a clean SSL shutdown by + notifying the remote peer of connection closure, + while avoiding excessive delays during connector cleanup. + Note: Only takes effect on Python 3.11+. + """ + + allowed_protocol_schema_set = HIGH_LEVEL_SCHEMA_SET | frozenset({"tcp"}) + + def __init__( + self, + *, + verify_ssl: bool = True, + fingerprint: Optional[bytes] = None, + use_dns_cache: bool = True, + ttl_dns_cache: Optional[int] = 10, + family: socket.AddressFamily = socket.AddressFamily.AF_UNSPEC, + ssl_context: Optional[SSLContext] = None, + ssl: Union[bool, Fingerprint, SSLContext] = True, + local_addr: Optional[Tuple[str, int]] = None, + resolver: Optional[AbstractResolver] = None, + keepalive_timeout: Union[None, float, object] = sentinel, + force_close: bool = False, + limit: int = 100, + limit_per_host: int = 0, + enable_cleanup_closed: bool = False, + loop: Optional[asyncio.AbstractEventLoop] = None, + timeout_ceil_threshold: float = 5, + happy_eyeballs_delay: Optional[float] = 0.25, + interleave: Optional[int] = None, + socket_factory: Optional[SocketFactoryType] = None, + ssl_shutdown_timeout: Union[_SENTINEL, None, float] = sentinel, + ): + super().__init__( + keepalive_timeout=keepalive_timeout, + force_close=force_close, + limit=limit, + limit_per_host=limit_per_host, + enable_cleanup_closed=enable_cleanup_closed, + loop=loop, + timeout_ceil_threshold=timeout_ceil_threshold, + ) + + self._ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint) + + self._resolver: AbstractResolver + if resolver is None: + self._resolver = DefaultResolver(loop=self._loop) + self._resolver_owner = True + else: + self._resolver = resolver + self._resolver_owner = False + + self._use_dns_cache = use_dns_cache + self._cached_hosts = _DNSCacheTable(ttl=ttl_dns_cache) + self._throttle_dns_futures: Dict[ + Tuple[str, int], Set["asyncio.Future[None]"] + ] = {} + self._family = family + self._local_addr_infos = aiohappyeyeballs.addr_to_addr_infos(local_addr) + self._happy_eyeballs_delay = happy_eyeballs_delay + self._interleave = interleave + self._resolve_host_tasks: Set["asyncio.Task[List[ResolveResult]]"] = set() + self._socket_factory = socket_factory + self._ssl_shutdown_timeout: Optional[float] + # Handle ssl_shutdown_timeout with warning for Python < 3.11 + if ssl_shutdown_timeout is sentinel: + self._ssl_shutdown_timeout = 0 + else: + # Deprecation warning for ssl_shutdown_timeout parameter + warnings.warn( + "The ssl_shutdown_timeout parameter is deprecated and will be removed in aiohttp 4.0", + DeprecationWarning, + stacklevel=2, + ) + if ( + sys.version_info < (3, 11) + and ssl_shutdown_timeout is not None + and ssl_shutdown_timeout != 0 + ): + warnings.warn( + f"ssl_shutdown_timeout={ssl_shutdown_timeout} is ignored on Python < 3.11; " + "only ssl_shutdown_timeout=0 is supported. The timeout will be ignored.", + RuntimeWarning, + stacklevel=2, + ) + self._ssl_shutdown_timeout = ssl_shutdown_timeout + + def _close(self, *, abort_ssl: bool = False) -> List[Awaitable[object]]: + """Close all ongoing DNS calls.""" + for fut in chain.from_iterable(self._throttle_dns_futures.values()): + fut.cancel() + + waiters = super()._close(abort_ssl=abort_ssl) + + for t in self._resolve_host_tasks: + t.cancel() + waiters.append(t) + + return waiters + + async def close(self, *, abort_ssl: bool = False) -> None: + """ + Close all opened transports. + + :param abort_ssl: If True, SSL connections will be aborted immediately + without performing the shutdown handshake. If False (default), + the behavior is determined by ssl_shutdown_timeout: + - If ssl_shutdown_timeout=0: connections are aborted + - If ssl_shutdown_timeout>0: graceful shutdown is performed + """ + if self._resolver_owner: + await self._resolver.close() + # Use abort_ssl param if explicitly set, otherwise use ssl_shutdown_timeout default + await super().close(abort_ssl=abort_ssl or self._ssl_shutdown_timeout == 0) + + @property + def family(self) -> int: + """Socket family like AF_INET.""" + return self._family + + @property + def use_dns_cache(self) -> bool: + """True if local DNS caching is enabled.""" + return self._use_dns_cache + + def clear_dns_cache( + self, host: Optional[str] = None, port: Optional[int] = None + ) -> None: + """Remove specified host/port or clear all dns local cache.""" + if host is not None and port is not None: + self._cached_hosts.remove((host, port)) + elif host is not None or port is not None: + raise ValueError("either both host and port or none of them are allowed") + else: + self._cached_hosts.clear() + + async def _resolve_host( + self, host: str, port: int, traces: Optional[Sequence["Trace"]] = None + ) -> List[ResolveResult]: + """Resolve host and return list of addresses.""" + if is_ip_address(host): + return [ + { + "hostname": host, + "host": host, + "port": port, + "family": self._family, + "proto": 0, + "flags": 0, + } + ] + + if not self._use_dns_cache: + + if traces: + for trace in traces: + await trace.send_dns_resolvehost_start(host) + + res = await self._resolver.resolve(host, port, family=self._family) + + if traces: + for trace in traces: + await trace.send_dns_resolvehost_end(host) + + return res + + key = (host, port) + if key in self._cached_hosts and not self._cached_hosts.expired(key): + # get result early, before any await (#4014) + result = self._cached_hosts.next_addrs(key) + + if traces: + for trace in traces: + await trace.send_dns_cache_hit(host) + return result + + futures: Set["asyncio.Future[None]"] + # + # If multiple connectors are resolving the same host, we wait + # for the first one to resolve and then use the result for all of them. + # We use a throttle to ensure that we only resolve the host once + # and then use the result for all the waiters. + # + if key in self._throttle_dns_futures: + # get futures early, before any await (#4014) + futures = self._throttle_dns_futures[key] + future: asyncio.Future[None] = self._loop.create_future() + futures.add(future) + if traces: + for trace in traces: + await trace.send_dns_cache_hit(host) + try: + await future + finally: + futures.discard(future) + return self._cached_hosts.next_addrs(key) + + # update dict early, before any await (#4014) + self._throttle_dns_futures[key] = futures = set() + # In this case we need to create a task to ensure that we can shield + # the task from cancellation as cancelling this lookup should not cancel + # the underlying lookup or else the cancel event will get broadcast to + # all the waiters across all connections. + # + coro = self._resolve_host_with_throttle(key, host, port, futures, traces) + loop = asyncio.get_running_loop() + if sys.version_info >= (3, 12): + # Optimization for Python 3.12, try to send immediately + resolved_host_task = asyncio.Task(coro, loop=loop, eager_start=True) + else: + resolved_host_task = loop.create_task(coro) + + if not resolved_host_task.done(): + self._resolve_host_tasks.add(resolved_host_task) + resolved_host_task.add_done_callback(self._resolve_host_tasks.discard) + + try: + return await asyncio.shield(resolved_host_task) + except asyncio.CancelledError: + + def drop_exception(fut: "asyncio.Future[List[ResolveResult]]") -> None: + with suppress(Exception, asyncio.CancelledError): + fut.result() + + resolved_host_task.add_done_callback(drop_exception) + raise + + async def _resolve_host_with_throttle( + self, + key: Tuple[str, int], + host: str, + port: int, + futures: Set["asyncio.Future[None]"], + traces: Optional[Sequence["Trace"]], + ) -> List[ResolveResult]: + """Resolve host and set result for all waiters. + + This method must be run in a task and shielded from cancellation + to avoid cancelling the underlying lookup. + """ + try: + if traces: + for trace in traces: + await trace.send_dns_cache_miss(host) + + for trace in traces: + await trace.send_dns_resolvehost_start(host) + + addrs = await self._resolver.resolve(host, port, family=self._family) + if traces: + for trace in traces: + await trace.send_dns_resolvehost_end(host) + + self._cached_hosts.add(key, addrs) + for fut in futures: + set_result(fut, None) + except BaseException as e: + # any DNS exception is set for the waiters to raise the same exception. + # This coro is always run in task that is shielded from cancellation so + # we should never be propagating cancellation here. + for fut in futures: + set_exception(fut, e) + raise + finally: + self._throttle_dns_futures.pop(key) + + return self._cached_hosts.next_addrs(key) + + async def _create_connection( + self, req: ClientRequest, traces: List["Trace"], timeout: "ClientTimeout" + ) -> ResponseHandler: + """Create connection. + + Has same keyword arguments as BaseEventLoop.create_connection. + """ + if req.proxy: + _, proto = await self._create_proxy_connection(req, traces, timeout) + else: + _, proto = await self._create_direct_connection(req, traces, timeout) + + return proto + + def _get_ssl_context(self, req: ClientRequest) -> Optional[SSLContext]: + """Logic to get the correct SSL context + + 0. if req.ssl is false, return None + + 1. if ssl_context is specified in req, use it + 2. if _ssl_context is specified in self, use it + 3. otherwise: + 1. if verify_ssl is not specified in req, use self.ssl_context + (will generate a default context according to self.verify_ssl) + 2. if verify_ssl is True in req, generate a default SSL context + 3. if verify_ssl is False in req, generate a SSL context that + won't verify + """ + if not req.is_ssl(): + return None + + if ssl is None: # pragma: no cover + raise RuntimeError("SSL is not supported.") + sslcontext = req.ssl + if isinstance(sslcontext, ssl.SSLContext): + return sslcontext + if sslcontext is not True: + # not verified or fingerprinted + return _SSL_CONTEXT_UNVERIFIED + sslcontext = self._ssl + if isinstance(sslcontext, ssl.SSLContext): + return sslcontext + if sslcontext is not True: + # not verified or fingerprinted + return _SSL_CONTEXT_UNVERIFIED + return _SSL_CONTEXT_VERIFIED + + def _get_fingerprint(self, req: ClientRequest) -> Optional["Fingerprint"]: + ret = req.ssl + if isinstance(ret, Fingerprint): + return ret + ret = self._ssl + if isinstance(ret, Fingerprint): + return ret + return None + + async def _wrap_create_connection( + self, + *args: Any, + addr_infos: List[AddrInfoType], + req: ClientRequest, + timeout: "ClientTimeout", + client_error: Type[Exception] = ClientConnectorError, + **kwargs: Any, + ) -> Tuple[asyncio.Transport, ResponseHandler]: + try: + async with ceil_timeout( + timeout.sock_connect, ceil_threshold=timeout.ceil_threshold + ): + sock = await aiohappyeyeballs.start_connection( + addr_infos=addr_infos, + local_addr_infos=self._local_addr_infos, + happy_eyeballs_delay=self._happy_eyeballs_delay, + interleave=self._interleave, + loop=self._loop, + socket_factory=self._socket_factory, + ) + # Add ssl_shutdown_timeout for Python 3.11+ when SSL is used + if ( + kwargs.get("ssl") + and self._ssl_shutdown_timeout + and sys.version_info >= (3, 11) + ): + kwargs["ssl_shutdown_timeout"] = self._ssl_shutdown_timeout + return await self._loop.create_connection(*args, **kwargs, sock=sock) + except cert_errors as exc: + raise ClientConnectorCertificateError(req.connection_key, exc) from exc + except ssl_errors as exc: + raise ClientConnectorSSLError(req.connection_key, exc) from exc + except OSError as exc: + if exc.errno is None and isinstance(exc, asyncio.TimeoutError): + raise + raise client_error(req.connection_key, exc) from exc + + async def _wrap_existing_connection( + self, + *args: Any, + req: ClientRequest, + timeout: "ClientTimeout", + client_error: Type[Exception] = ClientConnectorError, + **kwargs: Any, + ) -> Tuple[asyncio.Transport, ResponseHandler]: + try: + async with ceil_timeout( + timeout.sock_connect, ceil_threshold=timeout.ceil_threshold + ): + return await self._loop.create_connection(*args, **kwargs) + except cert_errors as exc: + raise ClientConnectorCertificateError(req.connection_key, exc) from exc + except ssl_errors as exc: + raise ClientConnectorSSLError(req.connection_key, exc) from exc + except OSError as exc: + if exc.errno is None and isinstance(exc, asyncio.TimeoutError): + raise + raise client_error(req.connection_key, exc) from exc + + def _fail_on_no_start_tls(self, req: "ClientRequest") -> None: + """Raise a :py:exc:`RuntimeError` on missing ``start_tls()``. + + It is necessary for TLS-in-TLS so that it is possible to + send HTTPS queries through HTTPS proxies. + + This doesn't affect regular HTTP requests, though. + """ + if not req.is_ssl(): + return + + proxy_url = req.proxy + assert proxy_url is not None + if proxy_url.scheme != "https": + return + + self._check_loop_for_start_tls() + + def _check_loop_for_start_tls(self) -> None: + try: + self._loop.start_tls + except AttributeError as attr_exc: + raise RuntimeError( + "An HTTPS request is being sent through an HTTPS proxy. " + "This needs support for TLS in TLS but it is not implemented " + "in your runtime for the stdlib asyncio.\n\n" + "Please upgrade to Python 3.11 or higher. For more details, " + "please see:\n" + "* https://bugs.python.org/issue37179\n" + "* https://github.com/python/cpython/pull/28073\n" + "* https://docs.aiohttp.org/en/stable/" + "client_advanced.html#proxy-support\n" + "* https://github.com/aio-libs/aiohttp/discussions/6044\n", + ) from attr_exc + + def _loop_supports_start_tls(self) -> bool: + try: + self._check_loop_for_start_tls() + except RuntimeError: + return False + else: + return True + + def _warn_about_tls_in_tls( + self, + underlying_transport: asyncio.Transport, + req: ClientRequest, + ) -> None: + """Issue a warning if the requested URL has HTTPS scheme.""" + if req.request_info.url.scheme != "https": + return + + # Check if uvloop is being used, which supports TLS in TLS, + # otherwise assume that asyncio's native transport is being used. + if type(underlying_transport).__module__.startswith("uvloop"): + return + + # Support in asyncio was added in Python 3.11 (bpo-44011) + asyncio_supports_tls_in_tls = sys.version_info >= (3, 11) or getattr( + underlying_transport, + "_start_tls_compatible", + False, + ) + + if asyncio_supports_tls_in_tls: + return + + warnings.warn( + "An HTTPS request is being sent through an HTTPS proxy. " + "This support for TLS in TLS is known to be disabled " + "in the stdlib asyncio (Python <3.11). This is why you'll probably see " + "an error in the log below.\n\n" + "It is possible to enable it via monkeypatching. " + "For more details, see:\n" + "* https://bugs.python.org/issue37179\n" + "* https://github.com/python/cpython/pull/28073\n\n" + "You can temporarily patch this as follows:\n" + "* https://docs.aiohttp.org/en/stable/client_advanced.html#proxy-support\n" + "* https://github.com/aio-libs/aiohttp/discussions/6044\n", + RuntimeWarning, + source=self, + # Why `4`? At least 3 of the calls in the stack originate + # from the methods in this class. + stacklevel=3, + ) + + async def _start_tls_connection( + self, + underlying_transport: asyncio.Transport, + req: ClientRequest, + timeout: "ClientTimeout", + client_error: Type[Exception] = ClientConnectorError, + ) -> Tuple[asyncio.BaseTransport, ResponseHandler]: + """Wrap the raw TCP transport with TLS.""" + tls_proto = self._factory() # Create a brand new proto for TLS + sslcontext = self._get_ssl_context(req) + if TYPE_CHECKING: + # _start_tls_connection is unreachable in the current code path + # if sslcontext is None. + assert sslcontext is not None + + try: + async with ceil_timeout( + timeout.sock_connect, ceil_threshold=timeout.ceil_threshold + ): + try: + # ssl_shutdown_timeout is only available in Python 3.11+ + if sys.version_info >= (3, 11) and self._ssl_shutdown_timeout: + tls_transport = await self._loop.start_tls( + underlying_transport, + tls_proto, + sslcontext, + server_hostname=req.server_hostname or req.host, + ssl_handshake_timeout=timeout.total, + ssl_shutdown_timeout=self._ssl_shutdown_timeout, + ) + else: + tls_transport = await self._loop.start_tls( + underlying_transport, + tls_proto, + sslcontext, + server_hostname=req.server_hostname or req.host, + ssl_handshake_timeout=timeout.total, + ) + except BaseException: + # We need to close the underlying transport since + # `start_tls()` probably failed before it had a + # chance to do this: + if self._ssl_shutdown_timeout == 0: + underlying_transport.abort() + else: + underlying_transport.close() + raise + if isinstance(tls_transport, asyncio.Transport): + fingerprint = self._get_fingerprint(req) + if fingerprint: + try: + fingerprint.check(tls_transport) + except ServerFingerprintMismatch: + tls_transport.close() + if not self._cleanup_closed_disabled: + self._cleanup_closed_transports.append(tls_transport) + raise + except cert_errors as exc: + raise ClientConnectorCertificateError(req.connection_key, exc) from exc + except ssl_errors as exc: + raise ClientConnectorSSLError(req.connection_key, exc) from exc + except OSError as exc: + if exc.errno is None and isinstance(exc, asyncio.TimeoutError): + raise + raise client_error(req.connection_key, exc) from exc + except TypeError as type_err: + # Example cause looks like this: + # TypeError: transport is not supported by start_tls() + + raise ClientConnectionError( + "Cannot initialize a TLS-in-TLS connection to host " + f"{req.host!s}:{req.port:d} through an underlying connection " + f"to an HTTPS proxy {req.proxy!s} ssl:{req.ssl or 'default'} " + f"[{type_err!s}]" + ) from type_err + else: + if tls_transport is None: + msg = "Failed to start TLS (possibly caused by closing transport)" + raise client_error(req.connection_key, OSError(msg)) + tls_proto.connection_made( + tls_transport + ) # Kick the state machine of the new TLS protocol + + return tls_transport, tls_proto + + def _convert_hosts_to_addr_infos( + self, hosts: List[ResolveResult] + ) -> List[AddrInfoType]: + """Converts the list of hosts to a list of addr_infos. + + The list of hosts is the result of a DNS lookup. The list of + addr_infos is the result of a call to `socket.getaddrinfo()`. + """ + addr_infos: List[AddrInfoType] = [] + for hinfo in hosts: + host = hinfo["host"] + is_ipv6 = ":" in host + family = socket.AF_INET6 if is_ipv6 else socket.AF_INET + if self._family and self._family != family: + continue + addr = (host, hinfo["port"], 0, 0) if is_ipv6 else (host, hinfo["port"]) + addr_infos.append( + (family, socket.SOCK_STREAM, socket.IPPROTO_TCP, "", addr) + ) + return addr_infos + + async def _create_direct_connection( + self, + req: ClientRequest, + traces: List["Trace"], + timeout: "ClientTimeout", + *, + client_error: Type[Exception] = ClientConnectorError, + ) -> Tuple[asyncio.Transport, ResponseHandler]: + sslcontext = self._get_ssl_context(req) + fingerprint = self._get_fingerprint(req) + + host = req.url.raw_host + assert host is not None + # Replace multiple trailing dots with a single one. + # A trailing dot is only present for fully-qualified domain names. + # See https://github.com/aio-libs/aiohttp/pull/7364. + if host.endswith(".."): + host = host.rstrip(".") + "." + port = req.port + assert port is not None + try: + # Cancelling this lookup should not cancel the underlying lookup + # or else the cancel event will get broadcast to all the waiters + # across all connections. + hosts = await self._resolve_host(host, port, traces=traces) + except OSError as exc: + if exc.errno is None and isinstance(exc, asyncio.TimeoutError): + raise + # in case of proxy it is not ClientProxyConnectionError + # it is problem of resolving proxy ip itself + raise ClientConnectorDNSError(req.connection_key, exc) from exc + + last_exc: Optional[Exception] = None + addr_infos = self._convert_hosts_to_addr_infos(hosts) + while addr_infos: + # Strip trailing dots, certificates contain FQDN without dots. + # See https://github.com/aio-libs/aiohttp/issues/3636 + server_hostname = ( + (req.server_hostname or host).rstrip(".") if sslcontext else None + ) + + try: + transp, proto = await self._wrap_create_connection( + self._factory, + timeout=timeout, + ssl=sslcontext, + addr_infos=addr_infos, + server_hostname=server_hostname, + req=req, + client_error=client_error, + ) + except (ClientConnectorError, asyncio.TimeoutError) as exc: + last_exc = exc + aiohappyeyeballs.pop_addr_infos_interleave(addr_infos, self._interleave) + continue + + if req.is_ssl() and fingerprint: + try: + fingerprint.check(transp) + except ServerFingerprintMismatch as exc: + transp.close() + if not self._cleanup_closed_disabled: + self._cleanup_closed_transports.append(transp) + last_exc = exc + # Remove the bad peer from the list of addr_infos + sock: socket.socket = transp.get_extra_info("socket") + bad_peer = sock.getpeername() + aiohappyeyeballs.remove_addr_infos(addr_infos, bad_peer) + continue + + return transp, proto + else: + assert last_exc is not None + raise last_exc + + async def _create_proxy_connection( + self, req: ClientRequest, traces: List["Trace"], timeout: "ClientTimeout" + ) -> Tuple[asyncio.BaseTransport, ResponseHandler]: + self._fail_on_no_start_tls(req) + runtime_has_start_tls = self._loop_supports_start_tls() + proxy_req = self._update_proxy_auth_header_and_build_proxy_req(req) + + # create connection to proxy server + transport, proto = await self._create_direct_connection( + proxy_req, [], timeout, client_error=ClientProxyConnectionError + ) + + if req.is_ssl(): + if runtime_has_start_tls: + self._warn_about_tls_in_tls(transport, req) + + # For HTTPS requests over HTTP proxy + # we must notify proxy to tunnel connection + # so we send CONNECT command: + # CONNECT www.python.org:443 HTTP/1.1 + # Host: www.python.org + # + # next we must do TLS handshake and so on + # to do this we must wrap raw socket into secure one + # asyncio handles this perfectly + proxy_req.method = hdrs.METH_CONNECT + proxy_req.url = req.url + key = req.connection_key._replace( + proxy=None, proxy_auth=None, proxy_headers_hash=None + ) + conn = _ConnectTunnelConnection(self, key, proto, self._loop) + proxy_resp = await proxy_req.send(conn) + try: + protocol = conn._protocol + assert protocol is not None + + # read_until_eof=True will ensure the connection isn't closed + # once the response is received and processed allowing + # START_TLS to work on the connection below. + protocol.set_response_params( + read_until_eof=runtime_has_start_tls, + timeout_ceil_threshold=self._timeout_ceil_threshold, + ) + resp = await proxy_resp.start(conn) + except BaseException: + proxy_resp.close() + conn.close() + raise + else: + conn._protocol = None + try: + if resp.status != 200: + message = resp.reason + if message is None: + message = HTTPStatus(resp.status).phrase + raise ClientHttpProxyError( + proxy_resp.request_info, + resp.history, + status=resp.status, + message=message, + headers=resp.headers, + ) + if not runtime_has_start_tls: + rawsock = transport.get_extra_info("socket", default=None) + if rawsock is None: + raise RuntimeError( + "Transport does not expose socket instance" + ) + # Duplicate the socket, so now we can close proxy transport + rawsock = rawsock.dup() + except BaseException: + # It shouldn't be closed in `finally` because it's fed to + # `loop.start_tls()` and the docs say not to touch it after + # passing there. + transport.close() + raise + finally: + if not runtime_has_start_tls: + transport.close() + + if not runtime_has_start_tls: + # HTTP proxy with support for upgrade to HTTPS + sslcontext = self._get_ssl_context(req) + return await self._wrap_existing_connection( + self._factory, + timeout=timeout, + ssl=sslcontext, + sock=rawsock, + server_hostname=req.host, + req=req, + ) + + return await self._start_tls_connection( + # Access the old transport for the last time before it's + # closed and forgotten forever: + transport, + req=req, + timeout=timeout, + ) + finally: + proxy_resp.close() + + return transport, proto + + +class UnixConnector(BaseConnector): + """Unix socket connector. + + path - Unix socket path. + keepalive_timeout - (optional) Keep-alive timeout. + force_close - Set to True to force close and do reconnect + after each request (and between redirects). + limit - The total number of simultaneous connections. + limit_per_host - Number of simultaneous connections to one host. + loop - Optional event loop. + """ + + allowed_protocol_schema_set = HIGH_LEVEL_SCHEMA_SET | frozenset({"unix"}) + + def __init__( + self, + path: str, + force_close: bool = False, + keepalive_timeout: Union[object, float, None] = sentinel, + limit: int = 100, + limit_per_host: int = 0, + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> None: + super().__init__( + force_close=force_close, + keepalive_timeout=keepalive_timeout, + limit=limit, + limit_per_host=limit_per_host, + loop=loop, + ) + self._path = path + + @property + def path(self) -> str: + """Path to unix socket.""" + return self._path + + async def _create_connection( + self, req: ClientRequest, traces: List["Trace"], timeout: "ClientTimeout" + ) -> ResponseHandler: + try: + async with ceil_timeout( + timeout.sock_connect, ceil_threshold=timeout.ceil_threshold + ): + _, proto = await self._loop.create_unix_connection( + self._factory, self._path + ) + except OSError as exc: + if exc.errno is None and isinstance(exc, asyncio.TimeoutError): + raise + raise UnixClientConnectorError(self.path, req.connection_key, exc) from exc + + return proto + + +class NamedPipeConnector(BaseConnector): + """Named pipe connector. + + Only supported by the proactor event loop. + See also: https://docs.python.org/3/library/asyncio-eventloop.html + + path - Windows named pipe path. + keepalive_timeout - (optional) Keep-alive timeout. + force_close - Set to True to force close and do reconnect + after each request (and between redirects). + limit - The total number of simultaneous connections. + limit_per_host - Number of simultaneous connections to one host. + loop - Optional event loop. + """ + + allowed_protocol_schema_set = HIGH_LEVEL_SCHEMA_SET | frozenset({"npipe"}) + + def __init__( + self, + path: str, + force_close: bool = False, + keepalive_timeout: Union[object, float, None] = sentinel, + limit: int = 100, + limit_per_host: int = 0, + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> None: + super().__init__( + force_close=force_close, + keepalive_timeout=keepalive_timeout, + limit=limit, + limit_per_host=limit_per_host, + loop=loop, + ) + if not isinstance( + self._loop, + asyncio.ProactorEventLoop, # type: ignore[attr-defined] + ): + raise RuntimeError( + "Named Pipes only available in proactor loop under windows" + ) + self._path = path + + @property + def path(self) -> str: + """Path to the named pipe.""" + return self._path + + async def _create_connection( + self, req: ClientRequest, traces: List["Trace"], timeout: "ClientTimeout" + ) -> ResponseHandler: + try: + async with ceil_timeout( + timeout.sock_connect, ceil_threshold=timeout.ceil_threshold + ): + _, proto = await self._loop.create_pipe_connection( # type: ignore[attr-defined] + self._factory, self._path + ) + # the drain is required so that the connection_made is called + # and transport is set otherwise it is not set before the + # `assert conn.transport is not None` + # in client.py's _request method + await asyncio.sleep(0) + # other option is to manually set transport like + # `proto.transport = trans` + except OSError as exc: + if exc.errno is None and isinstance(exc, asyncio.TimeoutError): + raise + raise ClientConnectorError(req.connection_key, exc) from exc + + return cast(ResponseHandler, proto) diff --git a/py311/lib/python3.11/site-packages/aiohttp/cookiejar.py b/py311/lib/python3.11/site-packages/aiohttp/cookiejar.py new file mode 100644 index 0000000000000000000000000000000000000000..193648d4309fc9252332c0c1241ab6aafbd2fe05 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/cookiejar.py @@ -0,0 +1,522 @@ +import asyncio +import calendar +import contextlib +import datetime +import heapq +import itertools +import os # noqa +import pathlib +import pickle +import re +import time +import warnings +from collections import defaultdict +from collections.abc import Mapping +from http.cookies import BaseCookie, Morsel, SimpleCookie +from typing import ( + DefaultDict, + Dict, + Iterable, + Iterator, + List, + Optional, + Set, + Tuple, + Union, +) + +from yarl import URL + +from ._cookie_helpers import preserve_morsel_with_coded_value +from .abc import AbstractCookieJar, ClearCookiePredicate +from .helpers import is_ip_address +from .typedefs import LooseCookies, PathLike, StrOrURL + +__all__ = ("CookieJar", "DummyCookieJar") + + +CookieItem = Union[str, "Morsel[str]"] + +# We cache these string methods here as their use is in performance critical code. +_FORMAT_PATH = "{}/{}".format +_FORMAT_DOMAIN_REVERSED = "{1}.{0}".format + +# The minimum number of scheduled cookie expirations before we start cleaning up +# the expiration heap. This is a performance optimization to avoid cleaning up the +# heap too often when there are only a few scheduled expirations. +_MIN_SCHEDULED_COOKIE_EXPIRATION = 100 +_SIMPLE_COOKIE = SimpleCookie() + + +class CookieJar(AbstractCookieJar): + """Implements cookie storage adhering to RFC 6265.""" + + DATE_TOKENS_RE = re.compile( + r"[\x09\x20-\x2F\x3B-\x40\x5B-\x60\x7B-\x7E]*" + r"(?P[\x00-\x08\x0A-\x1F\d:a-zA-Z\x7F-\xFF]+)" + ) + + DATE_HMS_TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})") + + DATE_DAY_OF_MONTH_RE = re.compile(r"(\d{1,2})") + + DATE_MONTH_RE = re.compile( + "(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|(aug)|(sep)|(oct)|(nov)|(dec)", + re.I, + ) + + DATE_YEAR_RE = re.compile(r"(\d{2,4})") + + # calendar.timegm() fails for timestamps after datetime.datetime.max + # Minus one as a loss of precision occurs when timestamp() is called. + MAX_TIME = ( + int(datetime.datetime.max.replace(tzinfo=datetime.timezone.utc).timestamp()) - 1 + ) + try: + calendar.timegm(time.gmtime(MAX_TIME)) + except (OSError, ValueError): + # Hit the maximum representable time on Windows + # https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/localtime-localtime32-localtime64 + # Throws ValueError on PyPy 3.9, OSError elsewhere + MAX_TIME = calendar.timegm((3000, 12, 31, 23, 59, 59, -1, -1, -1)) + except OverflowError: + # #4515: datetime.max may not be representable on 32-bit platforms + MAX_TIME = 2**31 - 1 + # Avoid minuses in the future, 3x faster + SUB_MAX_TIME = MAX_TIME - 1 + + def __init__( + self, + *, + unsafe: bool = False, + quote_cookie: bool = True, + treat_as_secure_origin: Union[StrOrURL, List[StrOrURL], None] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> None: + super().__init__(loop=loop) + self._cookies: DefaultDict[Tuple[str, str], SimpleCookie] = defaultdict( + SimpleCookie + ) + self._morsel_cache: DefaultDict[Tuple[str, str], Dict[str, Morsel[str]]] = ( + defaultdict(dict) + ) + self._host_only_cookies: Set[Tuple[str, str]] = set() + self._unsafe = unsafe + self._quote_cookie = quote_cookie + if treat_as_secure_origin is None: + treat_as_secure_origin = [] + elif isinstance(treat_as_secure_origin, URL): + treat_as_secure_origin = [treat_as_secure_origin.origin()] + elif isinstance(treat_as_secure_origin, str): + treat_as_secure_origin = [URL(treat_as_secure_origin).origin()] + else: + treat_as_secure_origin = [ + URL(url).origin() if isinstance(url, str) else url.origin() + for url in treat_as_secure_origin + ] + self._treat_as_secure_origin = treat_as_secure_origin + self._expire_heap: List[Tuple[float, Tuple[str, str, str]]] = [] + self._expirations: Dict[Tuple[str, str, str], float] = {} + + @property + def quote_cookie(self) -> bool: + return self._quote_cookie + + def save(self, file_path: PathLike) -> None: + file_path = pathlib.Path(file_path) + with file_path.open(mode="wb") as f: + pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL) + + def load(self, file_path: PathLike) -> None: + file_path = pathlib.Path(file_path) + with file_path.open(mode="rb") as f: + self._cookies = pickle.load(f) + + def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None: + if predicate is None: + self._expire_heap.clear() + self._cookies.clear() + self._morsel_cache.clear() + self._host_only_cookies.clear() + self._expirations.clear() + return + + now = time.time() + to_del = [ + key + for (domain, path), cookie in self._cookies.items() + for name, morsel in cookie.items() + if ( + (key := (domain, path, name)) in self._expirations + and self._expirations[key] <= now + ) + or predicate(morsel) + ] + if to_del: + self._delete_cookies(to_del) + + def clear_domain(self, domain: str) -> None: + self.clear(lambda x: self._is_domain_match(domain, x["domain"])) + + def __iter__(self) -> "Iterator[Morsel[str]]": + self._do_expiration() + for val in self._cookies.values(): + yield from val.values() + + def __len__(self) -> int: + """Return number of cookies. + + This function does not iterate self to avoid unnecessary expiration + checks. + """ + return sum(len(cookie.values()) for cookie in self._cookies.values()) + + def _do_expiration(self) -> None: + """Remove expired cookies.""" + if not (expire_heap_len := len(self._expire_heap)): + return + + # If the expiration heap grows larger than the number expirations + # times two, we clean it up to avoid keeping expired entries in + # the heap and consuming memory. We guard this with a minimum + # threshold to avoid cleaning up the heap too often when there are + # only a few scheduled expirations. + if ( + expire_heap_len > _MIN_SCHEDULED_COOKIE_EXPIRATION + and expire_heap_len > len(self._expirations) * 2 + ): + # Remove any expired entries from the expiration heap + # that do not match the expiration time in the expirations + # as it means the cookie has been re-added to the heap + # with a different expiration time. + self._expire_heap = [ + entry + for entry in self._expire_heap + if self._expirations.get(entry[1]) == entry[0] + ] + heapq.heapify(self._expire_heap) + + now = time.time() + to_del: List[Tuple[str, str, str]] = [] + # Find any expired cookies and add them to the to-delete list + while self._expire_heap: + when, cookie_key = self._expire_heap[0] + if when > now: + break + heapq.heappop(self._expire_heap) + # Check if the cookie hasn't been re-added to the heap + # with a different expiration time as it will be removed + # later when it reaches the top of the heap and its + # expiration time is met. + if self._expirations.get(cookie_key) == when: + to_del.append(cookie_key) + + if to_del: + self._delete_cookies(to_del) + + def _delete_cookies(self, to_del: List[Tuple[str, str, str]]) -> None: + for domain, path, name in to_del: + self._host_only_cookies.discard((domain, name)) + self._cookies[(domain, path)].pop(name, None) + self._morsel_cache[(domain, path)].pop(name, None) + self._expirations.pop((domain, path, name), None) + + def _expire_cookie(self, when: float, domain: str, path: str, name: str) -> None: + cookie_key = (domain, path, name) + if self._expirations.get(cookie_key) == when: + # Avoid adding duplicates to the heap + return + heapq.heappush(self._expire_heap, (when, cookie_key)) + self._expirations[cookie_key] = when + + def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None: + """Update cookies.""" + hostname = response_url.raw_host + + if not self._unsafe and is_ip_address(hostname): + # Don't accept cookies from IPs + return + + if isinstance(cookies, Mapping): + cookies = cookies.items() + + for name, cookie in cookies: + if not isinstance(cookie, Morsel): + tmp = SimpleCookie() + tmp[name] = cookie # type: ignore[assignment] + cookie = tmp[name] + + domain = cookie["domain"] + + # ignore domains with trailing dots + if domain and domain[-1] == ".": + domain = "" + del cookie["domain"] + + if not domain and hostname is not None: + # Set the cookie's domain to the response hostname + # and set its host-only-flag + self._host_only_cookies.add((hostname, name)) + domain = cookie["domain"] = hostname + + if domain and domain[0] == ".": + # Remove leading dot + domain = domain[1:] + cookie["domain"] = domain + + if hostname and not self._is_domain_match(domain, hostname): + # Setting cookies for different domains is not allowed + continue + + path = cookie["path"] + if not path or path[0] != "/": + # Set the cookie's path to the response path + path = response_url.path + if not path.startswith("/"): + path = "/" + else: + # Cut everything from the last slash to the end + path = "/" + path[1 : path.rfind("/")] + cookie["path"] = path + path = path.rstrip("/") + + if max_age := cookie["max-age"]: + try: + delta_seconds = int(max_age) + max_age_expiration = min(time.time() + delta_seconds, self.MAX_TIME) + self._expire_cookie(max_age_expiration, domain, path, name) + except ValueError: + cookie["max-age"] = "" + + elif expires := cookie["expires"]: + if expire_time := self._parse_date(expires): + self._expire_cookie(expire_time, domain, path, name) + else: + cookie["expires"] = "" + + key = (domain, path) + if self._cookies[key].get(name) != cookie: + # Don't blow away the cache if the same + # cookie gets set again + self._cookies[key][name] = cookie + self._morsel_cache[key].pop(name, None) + + self._do_expiration() + + def filter_cookies(self, request_url: URL = URL()) -> "BaseCookie[str]": + """Returns this jar's cookies filtered by their attributes.""" + # We always use BaseCookie now since all + # cookies set on on filtered are fully constructed + # Morsels, not just names and values. + filtered: BaseCookie[str] = BaseCookie() + if not self._cookies: + # Skip do_expiration() if there are no cookies. + return filtered + self._do_expiration() + if not self._cookies: + # Skip rest of function if no non-expired cookies. + return filtered + if type(request_url) is not URL: + warnings.warn( + "filter_cookies expects yarl.URL instances only," + f"and will stop working in 4.x, got {type(request_url)}", + DeprecationWarning, + stacklevel=2, + ) + request_url = URL(request_url) + hostname = request_url.raw_host or "" + + is_not_secure = request_url.scheme not in ("https", "wss") + if is_not_secure and self._treat_as_secure_origin: + request_origin = URL() + with contextlib.suppress(ValueError): + request_origin = request_url.origin() + is_not_secure = request_origin not in self._treat_as_secure_origin + + # Send shared cookie + key = ("", "") + for c in self._cookies[key].values(): + # Check cache first + if c.key in self._morsel_cache[key]: + filtered[c.key] = self._morsel_cache[key][c.key] + continue + + # Build and cache the morsel + mrsl_val = self._build_morsel(c) + self._morsel_cache[key][c.key] = mrsl_val + filtered[c.key] = mrsl_val + + if is_ip_address(hostname): + if not self._unsafe: + return filtered + domains: Iterable[str] = (hostname,) + else: + # Get all the subdomains that might match a cookie (e.g. "foo.bar.com", "bar.com", "com") + domains = itertools.accumulate( + reversed(hostname.split(".")), _FORMAT_DOMAIN_REVERSED + ) + + # Get all the path prefixes that might match a cookie (e.g. "", "/foo", "/foo/bar") + paths = itertools.accumulate(request_url.path.split("/"), _FORMAT_PATH) + # Create every combination of (domain, path) pairs. + pairs = itertools.product(domains, paths) + + path_len = len(request_url.path) + # Point 2: https://www.rfc-editor.org/rfc/rfc6265.html#section-5.4 + for p in pairs: + if p not in self._cookies: + continue + for name, cookie in self._cookies[p].items(): + domain = cookie["domain"] + + if (domain, name) in self._host_only_cookies and domain != hostname: + continue + + # Skip edge case when the cookie has a trailing slash but request doesn't. + if len(cookie["path"]) > path_len: + continue + + if is_not_secure and cookie["secure"]: + continue + + # We already built the Morsel so reuse it here + if name in self._morsel_cache[p]: + filtered[name] = self._morsel_cache[p][name] + continue + + # Build and cache the morsel + mrsl_val = self._build_morsel(cookie) + self._morsel_cache[p][name] = mrsl_val + filtered[name] = mrsl_val + + return filtered + + def _build_morsel(self, cookie: Morsel[str]) -> Morsel[str]: + """Build a morsel for sending, respecting quote_cookie setting.""" + if self._quote_cookie and cookie.coded_value and cookie.coded_value[0] == '"': + return preserve_morsel_with_coded_value(cookie) + morsel: Morsel[str] = Morsel() + if self._quote_cookie: + value, coded_value = _SIMPLE_COOKIE.value_encode(cookie.value) + else: + coded_value = value = cookie.value + # We use __setstate__ instead of the public set() API because it allows us to + # bypass validation and set already validated state. This is more stable than + # setting protected attributes directly and unlikely to change since it would + # break pickling. + morsel.__setstate__({"key": cookie.key, "value": value, "coded_value": coded_value}) # type: ignore[attr-defined] + return morsel + + @staticmethod + def _is_domain_match(domain: str, hostname: str) -> bool: + """Implements domain matching adhering to RFC 6265.""" + if hostname == domain: + return True + + if not hostname.endswith(domain): + return False + + non_matching = hostname[: -len(domain)] + + if not non_matching.endswith("."): + return False + + return not is_ip_address(hostname) + + @classmethod + def _parse_date(cls, date_str: str) -> Optional[int]: + """Implements date string parsing adhering to RFC 6265.""" + if not date_str: + return None + + found_time = False + found_day = False + found_month = False + found_year = False + + hour = minute = second = 0 + day = 0 + month = 0 + year = 0 + + for token_match in cls.DATE_TOKENS_RE.finditer(date_str): + + token = token_match.group("token") + + if not found_time: + time_match = cls.DATE_HMS_TIME_RE.match(token) + if time_match: + found_time = True + hour, minute, second = (int(s) for s in time_match.groups()) + continue + + if not found_day: + day_match = cls.DATE_DAY_OF_MONTH_RE.match(token) + if day_match: + found_day = True + day = int(day_match.group()) + continue + + if not found_month: + month_match = cls.DATE_MONTH_RE.match(token) + if month_match: + found_month = True + assert month_match.lastindex is not None + month = month_match.lastindex + continue + + if not found_year: + year_match = cls.DATE_YEAR_RE.match(token) + if year_match: + found_year = True + year = int(year_match.group()) + + if 70 <= year <= 99: + year += 1900 + elif 0 <= year <= 69: + year += 2000 + + if False in (found_day, found_month, found_year, found_time): + return None + + if not 1 <= day <= 31: + return None + + if year < 1601 or hour > 23 or minute > 59 or second > 59: + return None + + return calendar.timegm((year, month, day, hour, minute, second, -1, -1, -1)) + + +class DummyCookieJar(AbstractCookieJar): + """Implements a dummy cookie storage. + + It can be used with the ClientSession when no cookie processing is needed. + + """ + + def __init__(self, *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: + super().__init__(loop=loop) + + def __iter__(self) -> "Iterator[Morsel[str]]": + while False: + yield None + + def __len__(self) -> int: + return 0 + + @property + def quote_cookie(self) -> bool: + return True + + def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None: + pass + + def clear_domain(self, domain: str) -> None: + pass + + def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None: + pass + + def filter_cookies(self, request_url: URL) -> "BaseCookie[str]": + return SimpleCookie() diff --git a/py311/lib/python3.11/site-packages/aiohttp/formdata.py b/py311/lib/python3.11/site-packages/aiohttp/formdata.py new file mode 100644 index 0000000000000000000000000000000000000000..a5a4f603e190cdc66fe34e95a47983377f94666f --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/formdata.py @@ -0,0 +1,179 @@ +import io +import warnings +from typing import Any, Iterable, List, Optional +from urllib.parse import urlencode + +from multidict import MultiDict, MultiDictProxy + +from . import hdrs, multipart, payload +from .helpers import guess_filename +from .payload import Payload + +__all__ = ("FormData",) + + +class FormData: + """Helper class for form body generation. + + Supports multipart/form-data and application/x-www-form-urlencoded. + """ + + def __init__( + self, + fields: Iterable[Any] = (), + quote_fields: bool = True, + charset: Optional[str] = None, + *, + default_to_multipart: bool = False, + ) -> None: + self._writer = multipart.MultipartWriter("form-data") + self._fields: List[Any] = [] + self._is_multipart = default_to_multipart + self._quote_fields = quote_fields + self._charset = charset + + if isinstance(fields, dict): + fields = list(fields.items()) + elif not isinstance(fields, (list, tuple)): + fields = (fields,) + self.add_fields(*fields) + + @property + def is_multipart(self) -> bool: + return self._is_multipart + + def add_field( + self, + name: str, + value: Any, + *, + content_type: Optional[str] = None, + filename: Optional[str] = None, + content_transfer_encoding: Optional[str] = None, + ) -> None: + + if isinstance(value, io.IOBase): + self._is_multipart = True + elif isinstance(value, (bytes, bytearray, memoryview)): + msg = ( + "In v4, passing bytes will no longer create a file field. " + "Please explicitly use the filename parameter or pass a BytesIO object." + ) + if filename is None and content_transfer_encoding is None: + warnings.warn(msg, DeprecationWarning) + filename = name + + type_options: MultiDict[str] = MultiDict({"name": name}) + if filename is not None and not isinstance(filename, str): + raise TypeError("filename must be an instance of str. Got: %s" % filename) + if filename is None and isinstance(value, io.IOBase): + filename = guess_filename(value, name) + if filename is not None: + type_options["filename"] = filename + self._is_multipart = True + + headers = {} + if content_type is not None: + if not isinstance(content_type, str): + raise TypeError( + "content_type must be an instance of str. Got: %s" % content_type + ) + headers[hdrs.CONTENT_TYPE] = content_type + self._is_multipart = True + if content_transfer_encoding is not None: + if not isinstance(content_transfer_encoding, str): + raise TypeError( + "content_transfer_encoding must be an instance" + " of str. Got: %s" % content_transfer_encoding + ) + msg = ( + "content_transfer_encoding is deprecated. " + "To maintain compatibility with v4 please pass a BytesPayload." + ) + warnings.warn(msg, DeprecationWarning) + self._is_multipart = True + + self._fields.append((type_options, headers, value)) + + def add_fields(self, *fields: Any) -> None: + to_add = list(fields) + + while to_add: + rec = to_add.pop(0) + + if isinstance(rec, io.IOBase): + k = guess_filename(rec, "unknown") + self.add_field(k, rec) # type: ignore[arg-type] + + elif isinstance(rec, (MultiDictProxy, MultiDict)): + to_add.extend(rec.items()) + + elif isinstance(rec, (list, tuple)) and len(rec) == 2: + k, fp = rec + self.add_field(k, fp) + + else: + raise TypeError( + "Only io.IOBase, multidict and (name, file) " + "pairs allowed, use .add_field() for passing " + "more complex parameters, got {!r}".format(rec) + ) + + def _gen_form_urlencoded(self) -> payload.BytesPayload: + # form data (x-www-form-urlencoded) + data = [] + for type_options, _, value in self._fields: + data.append((type_options["name"], value)) + + charset = self._charset if self._charset is not None else "utf-8" + + if charset == "utf-8": + content_type = "application/x-www-form-urlencoded" + else: + content_type = "application/x-www-form-urlencoded; charset=%s" % charset + + return payload.BytesPayload( + urlencode(data, doseq=True, encoding=charset).encode(), + content_type=content_type, + ) + + def _gen_form_data(self) -> multipart.MultipartWriter: + """Encode a list of fields using the multipart/form-data MIME format""" + for dispparams, headers, value in self._fields: + try: + if hdrs.CONTENT_TYPE in headers: + part = payload.get_payload( + value, + content_type=headers[hdrs.CONTENT_TYPE], + headers=headers, + encoding=self._charset, + ) + else: + part = payload.get_payload( + value, headers=headers, encoding=self._charset + ) + except Exception as exc: + raise TypeError( + "Can not serialize value type: %r\n " + "headers: %r\n value: %r" % (type(value), headers, value) + ) from exc + + if dispparams: + part.set_content_disposition( + "form-data", quote_fields=self._quote_fields, **dispparams + ) + # FIXME cgi.FieldStorage doesn't likes body parts with + # Content-Length which were sent via chunked transfer encoding + assert part.headers is not None + part.headers.popall(hdrs.CONTENT_LENGTH, None) + + self._writer.append_payload(part) + + self._fields.clear() + return self._writer + + def __call__(self) -> Payload: + if self._is_multipart: + return self._gen_form_data() + else: + return self._gen_form_urlencoded() diff --git a/py311/lib/python3.11/site-packages/aiohttp/hdrs.py b/py311/lib/python3.11/site-packages/aiohttp/hdrs.py new file mode 100644 index 0000000000000000000000000000000000000000..c8d6b35f33ae4be537d7776ce4085982618d1305 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/hdrs.py @@ -0,0 +1,121 @@ +"""HTTP Headers constants.""" + +# After changing the file content call ./tools/gen.py +# to regenerate the headers parser +import itertools +from typing import Final, Set + +from multidict import istr + +METH_ANY: Final[str] = "*" +METH_CONNECT: Final[str] = "CONNECT" +METH_HEAD: Final[str] = "HEAD" +METH_GET: Final[str] = "GET" +METH_DELETE: Final[str] = "DELETE" +METH_OPTIONS: Final[str] = "OPTIONS" +METH_PATCH: Final[str] = "PATCH" +METH_POST: Final[str] = "POST" +METH_PUT: Final[str] = "PUT" +METH_TRACE: Final[str] = "TRACE" + +METH_ALL: Final[Set[str]] = { + METH_CONNECT, + METH_HEAD, + METH_GET, + METH_DELETE, + METH_OPTIONS, + METH_PATCH, + METH_POST, + METH_PUT, + METH_TRACE, +} + +ACCEPT: Final[istr] = istr("Accept") +ACCEPT_CHARSET: Final[istr] = istr("Accept-Charset") +ACCEPT_ENCODING: Final[istr] = istr("Accept-Encoding") +ACCEPT_LANGUAGE: Final[istr] = istr("Accept-Language") +ACCEPT_RANGES: Final[istr] = istr("Accept-Ranges") +ACCESS_CONTROL_MAX_AGE: Final[istr] = istr("Access-Control-Max-Age") +ACCESS_CONTROL_ALLOW_CREDENTIALS: Final[istr] = istr("Access-Control-Allow-Credentials") +ACCESS_CONTROL_ALLOW_HEADERS: Final[istr] = istr("Access-Control-Allow-Headers") +ACCESS_CONTROL_ALLOW_METHODS: Final[istr] = istr("Access-Control-Allow-Methods") +ACCESS_CONTROL_ALLOW_ORIGIN: Final[istr] = istr("Access-Control-Allow-Origin") +ACCESS_CONTROL_EXPOSE_HEADERS: Final[istr] = istr("Access-Control-Expose-Headers") +ACCESS_CONTROL_REQUEST_HEADERS: Final[istr] = istr("Access-Control-Request-Headers") +ACCESS_CONTROL_REQUEST_METHOD: Final[istr] = istr("Access-Control-Request-Method") +AGE: Final[istr] = istr("Age") +ALLOW: Final[istr] = istr("Allow") +AUTHORIZATION: Final[istr] = istr("Authorization") +CACHE_CONTROL: Final[istr] = istr("Cache-Control") +CONNECTION: Final[istr] = istr("Connection") +CONTENT_DISPOSITION: Final[istr] = istr("Content-Disposition") +CONTENT_ENCODING: Final[istr] = istr("Content-Encoding") +CONTENT_LANGUAGE: Final[istr] = istr("Content-Language") +CONTENT_LENGTH: Final[istr] = istr("Content-Length") +CONTENT_LOCATION: Final[istr] = istr("Content-Location") +CONTENT_MD5: Final[istr] = istr("Content-MD5") +CONTENT_RANGE: Final[istr] = istr("Content-Range") +CONTENT_TRANSFER_ENCODING: Final[istr] = istr("Content-Transfer-Encoding") +CONTENT_TYPE: Final[istr] = istr("Content-Type") +COOKIE: Final[istr] = istr("Cookie") +DATE: Final[istr] = istr("Date") +DESTINATION: Final[istr] = istr("Destination") +DIGEST: Final[istr] = istr("Digest") +ETAG: Final[istr] = istr("Etag") +EXPECT: Final[istr] = istr("Expect") +EXPIRES: Final[istr] = istr("Expires") +FORWARDED: Final[istr] = istr("Forwarded") +FROM: Final[istr] = istr("From") +HOST: Final[istr] = istr("Host") +IF_MATCH: Final[istr] = istr("If-Match") +IF_MODIFIED_SINCE: Final[istr] = istr("If-Modified-Since") +IF_NONE_MATCH: Final[istr] = istr("If-None-Match") +IF_RANGE: Final[istr] = istr("If-Range") +IF_UNMODIFIED_SINCE: Final[istr] = istr("If-Unmodified-Since") +KEEP_ALIVE: Final[istr] = istr("Keep-Alive") +LAST_EVENT_ID: Final[istr] = istr("Last-Event-ID") +LAST_MODIFIED: Final[istr] = istr("Last-Modified") +LINK: Final[istr] = istr("Link") +LOCATION: Final[istr] = istr("Location") +MAX_FORWARDS: Final[istr] = istr("Max-Forwards") +ORIGIN: Final[istr] = istr("Origin") +PRAGMA: Final[istr] = istr("Pragma") +PROXY_AUTHENTICATE: Final[istr] = istr("Proxy-Authenticate") +PROXY_AUTHORIZATION: Final[istr] = istr("Proxy-Authorization") +RANGE: Final[istr] = istr("Range") +REFERER: Final[istr] = istr("Referer") +RETRY_AFTER: Final[istr] = istr("Retry-After") +SEC_WEBSOCKET_ACCEPT: Final[istr] = istr("Sec-WebSocket-Accept") +SEC_WEBSOCKET_VERSION: Final[istr] = istr("Sec-WebSocket-Version") +SEC_WEBSOCKET_PROTOCOL: Final[istr] = istr("Sec-WebSocket-Protocol") +SEC_WEBSOCKET_EXTENSIONS: Final[istr] = istr("Sec-WebSocket-Extensions") +SEC_WEBSOCKET_KEY: Final[istr] = istr("Sec-WebSocket-Key") +SEC_WEBSOCKET_KEY1: Final[istr] = istr("Sec-WebSocket-Key1") +SERVER: Final[istr] = istr("Server") +SET_COOKIE: Final[istr] = istr("Set-Cookie") +TE: Final[istr] = istr("TE") +TRAILER: Final[istr] = istr("Trailer") +TRANSFER_ENCODING: Final[istr] = istr("Transfer-Encoding") +UPGRADE: Final[istr] = istr("Upgrade") +URI: Final[istr] = istr("URI") +USER_AGENT: Final[istr] = istr("User-Agent") +VARY: Final[istr] = istr("Vary") +VIA: Final[istr] = istr("Via") +WANT_DIGEST: Final[istr] = istr("Want-Digest") +WARNING: Final[istr] = istr("Warning") +WWW_AUTHENTICATE: Final[istr] = istr("WWW-Authenticate") +X_FORWARDED_FOR: Final[istr] = istr("X-Forwarded-For") +X_FORWARDED_HOST: Final[istr] = istr("X-Forwarded-Host") +X_FORWARDED_PROTO: Final[istr] = istr("X-Forwarded-Proto") + +# These are the upper/lower case variants of the headers/methods +# Example: {'hOst', 'host', 'HoST', 'HOSt', 'hOsT', 'HosT', 'hoSt', ...} +METH_HEAD_ALL: Final = frozenset( + map("".join, itertools.product(*zip(METH_HEAD.upper(), METH_HEAD.lower()))) +) +METH_CONNECT_ALL: Final = frozenset( + map("".join, itertools.product(*zip(METH_CONNECT.upper(), METH_CONNECT.lower()))) +) +HOST_ALL: Final = frozenset( + map("".join, itertools.product(*zip(HOST.upper(), HOST.lower()))) +) diff --git a/py311/lib/python3.11/site-packages/aiohttp/helpers.py b/py311/lib/python3.11/site-packages/aiohttp/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..dfab9877d398a9c8807ad2614397b3e54316ed1f --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/helpers.py @@ -0,0 +1,986 @@ +"""Various helper functions""" + +import asyncio +import base64 +import binascii +import contextlib +import datetime +import enum +import functools +import inspect +import netrc +import os +import platform +import re +import sys +import time +import weakref +from collections import namedtuple +from contextlib import suppress +from email.message import EmailMessage +from email.parser import HeaderParser +from email.policy import HTTP +from email.utils import parsedate +from math import ceil +from pathlib import Path +from types import MappingProxyType, TracebackType +from typing import ( + Any, + Callable, + ContextManager, + Dict, + Generator, + Generic, + Iterable, + Iterator, + List, + Mapping, + Optional, + Protocol, + Tuple, + Type, + TypeVar, + Union, + get_args, + overload, +) +from urllib.parse import quote +from urllib.request import getproxies, proxy_bypass + +import attr +from multidict import MultiDict, MultiDictProxy, MultiMapping +from propcache.api import under_cached_property as reify +from yarl import URL + +from . import hdrs +from .log import client_logger + +if sys.version_info >= (3, 11): + import asyncio as async_timeout +else: + import async_timeout + +__all__ = ("BasicAuth", "ChainMapProxy", "ETag", "reify") + +IS_MACOS = platform.system() == "Darwin" +IS_WINDOWS = platform.system() == "Windows" + +PY_310 = sys.version_info >= (3, 10) +PY_311 = sys.version_info >= (3, 11) + + +_T = TypeVar("_T") +_S = TypeVar("_S") + +_SENTINEL = enum.Enum("_SENTINEL", "sentinel") +sentinel = _SENTINEL.sentinel + +NO_EXTENSIONS = bool(os.environ.get("AIOHTTP_NO_EXTENSIONS")) + +# https://datatracker.ietf.org/doc/html/rfc9112#section-6.3-2.1 +EMPTY_BODY_STATUS_CODES = frozenset((204, 304, *range(100, 200))) +# https://datatracker.ietf.org/doc/html/rfc9112#section-6.3-2.1 +# https://datatracker.ietf.org/doc/html/rfc9112#section-6.3-2.2 +EMPTY_BODY_METHODS = hdrs.METH_HEAD_ALL + +DEBUG = sys.flags.dev_mode or ( + not sys.flags.ignore_environment and bool(os.environ.get("PYTHONASYNCIODEBUG")) +) + + +CHAR = {chr(i) for i in range(0, 128)} +CTL = {chr(i) for i in range(0, 32)} | { + chr(127), +} +SEPARATORS = { + "(", + ")", + "<", + ">", + "@", + ",", + ";", + ":", + "\\", + '"', + "/", + "[", + "]", + "?", + "=", + "{", + "}", + " ", + chr(9), +} +TOKEN = CHAR ^ CTL ^ SEPARATORS + + +class noop: + def __await__(self) -> Generator[None, None, None]: + yield + + +class BasicAuth(namedtuple("BasicAuth", ["login", "password", "encoding"])): + """Http basic authentication helper.""" + + def __new__( + cls, login: str, password: str = "", encoding: str = "latin1" + ) -> "BasicAuth": + if login is None: + raise ValueError("None is not allowed as login value") + + if password is None: + raise ValueError("None is not allowed as password value") + + if ":" in login: + raise ValueError('A ":" is not allowed in login (RFC 1945#section-11.1)') + + return super().__new__(cls, login, password, encoding) + + @classmethod + def decode(cls, auth_header: str, encoding: str = "latin1") -> "BasicAuth": + """Create a BasicAuth object from an Authorization HTTP header.""" + try: + auth_type, encoded_credentials = auth_header.split(" ", 1) + except ValueError: + raise ValueError("Could not parse authorization header.") + + if auth_type.lower() != "basic": + raise ValueError("Unknown authorization method %s" % auth_type) + + try: + decoded = base64.b64decode( + encoded_credentials.encode("ascii"), validate=True + ).decode(encoding) + except binascii.Error: + raise ValueError("Invalid base64 encoding.") + + try: + # RFC 2617 HTTP Authentication + # https://www.ietf.org/rfc/rfc2617.txt + # the colon must be present, but the username and password may be + # otherwise blank. + username, password = decoded.split(":", 1) + except ValueError: + raise ValueError("Invalid credentials.") + + return cls(username, password, encoding=encoding) + + @classmethod + def from_url(cls, url: URL, *, encoding: str = "latin1") -> Optional["BasicAuth"]: + """Create BasicAuth from url.""" + if not isinstance(url, URL): + raise TypeError("url should be yarl.URL instance") + # Check raw_user and raw_password first as yarl is likely + # to already have these values parsed from the netloc in the cache. + if url.raw_user is None and url.raw_password is None: + return None + return cls(url.user or "", url.password or "", encoding=encoding) + + def encode(self) -> str: + """Encode credentials.""" + creds = (f"{self.login}:{self.password}").encode(self.encoding) + return "Basic %s" % base64.b64encode(creds).decode(self.encoding) + + +def strip_auth_from_url(url: URL) -> Tuple[URL, Optional[BasicAuth]]: + """Remove user and password from URL if present and return BasicAuth object.""" + # Check raw_user and raw_password first as yarl is likely + # to already have these values parsed from the netloc in the cache. + if url.raw_user is None and url.raw_password is None: + return url, None + return url.with_user(None), BasicAuth(url.user or "", url.password or "") + + +def netrc_from_env() -> Optional[netrc.netrc]: + """Load netrc from file. + + Attempt to load it from the path specified by the env-var + NETRC or in the default location in the user's home directory. + + Returns None if it couldn't be found or fails to parse. + """ + netrc_env = os.environ.get("NETRC") + + if netrc_env is not None: + netrc_path = Path(netrc_env) + else: + try: + home_dir = Path.home() + except RuntimeError as e: # pragma: no cover + # if pathlib can't resolve home, it may raise a RuntimeError + client_logger.debug( + "Could not resolve home directory when " + "trying to look for .netrc file: %s", + e, + ) + return None + + netrc_path = home_dir / ("_netrc" if IS_WINDOWS else ".netrc") + + try: + return netrc.netrc(str(netrc_path)) + except netrc.NetrcParseError as e: + client_logger.warning("Could not parse .netrc file: %s", e) + except OSError as e: + netrc_exists = False + with contextlib.suppress(OSError): + netrc_exists = netrc_path.is_file() + # we couldn't read the file (doesn't exist, permissions, etc.) + if netrc_env or netrc_exists: + # only warn if the environment wanted us to load it, + # or it appears like the default file does actually exist + client_logger.warning("Could not read .netrc file: %s", e) + + return None + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class ProxyInfo: + proxy: URL + proxy_auth: Optional[BasicAuth] + + +def basicauth_from_netrc(netrc_obj: Optional[netrc.netrc], host: str) -> BasicAuth: + """ + Return :py:class:`~aiohttp.BasicAuth` credentials for ``host`` from ``netrc_obj``. + + :raises LookupError: if ``netrc_obj`` is :py:data:`None` or if no + entry is found for the ``host``. + """ + if netrc_obj is None: + raise LookupError("No .netrc file found") + auth_from_netrc = netrc_obj.authenticators(host) + + if auth_from_netrc is None: + raise LookupError(f"No entry for {host!s} found in the `.netrc` file.") + login, account, password = auth_from_netrc + + # TODO(PY311): username = login or account + # Up to python 3.10, account could be None if not specified, + # and login will be empty string if not specified. From 3.11, + # login and account will be empty string if not specified. + username = login if (login or account is None) else account + + # TODO(PY311): Remove this, as password will be empty string + # if not specified + if password is None: + password = "" + + return BasicAuth(username, password) + + +def proxies_from_env() -> Dict[str, ProxyInfo]: + proxy_urls = { + k: URL(v) + for k, v in getproxies().items() + if k in ("http", "https", "ws", "wss") + } + netrc_obj = netrc_from_env() + stripped = {k: strip_auth_from_url(v) for k, v in proxy_urls.items()} + ret = {} + for proto, val in stripped.items(): + proxy, auth = val + if proxy.scheme in ("https", "wss"): + client_logger.warning( + "%s proxies %s are not supported, ignoring", proxy.scheme.upper(), proxy + ) + continue + if netrc_obj and auth is None: + if proxy.host is not None: + try: + auth = basicauth_from_netrc(netrc_obj, proxy.host) + except LookupError: + auth = None + ret[proto] = ProxyInfo(proxy, auth) + return ret + + +def get_env_proxy_for_url(url: URL) -> Tuple[URL, Optional[BasicAuth]]: + """Get a permitted proxy for the given URL from the env.""" + if url.host is not None and proxy_bypass(url.host): + raise LookupError(f"Proxying is disallowed for `{url.host!r}`") + + proxies_in_env = proxies_from_env() + try: + proxy_info = proxies_in_env[url.scheme] + except KeyError: + raise LookupError(f"No proxies found for `{url!s}` in the env") + else: + return proxy_info.proxy, proxy_info.proxy_auth + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class MimeType: + type: str + subtype: str + suffix: str + parameters: "MultiDictProxy[str]" + + +@functools.lru_cache(maxsize=56) +def parse_mimetype(mimetype: str) -> MimeType: + """Parses a MIME type into its components. + + mimetype is a MIME type string. + + Returns a MimeType object. + + Example: + + >>> parse_mimetype('text/html; charset=utf-8') + MimeType(type='text', subtype='html', suffix='', + parameters={'charset': 'utf-8'}) + + """ + if not mimetype: + return MimeType( + type="", subtype="", suffix="", parameters=MultiDictProxy(MultiDict()) + ) + + parts = mimetype.split(";") + params: MultiDict[str] = MultiDict() + for item in parts[1:]: + if not item: + continue + key, _, value = item.partition("=") + params.add(key.lower().strip(), value.strip(' "')) + + fulltype = parts[0].strip().lower() + if fulltype == "*": + fulltype = "*/*" + + mtype, _, stype = fulltype.partition("/") + stype, _, suffix = stype.partition("+") + + return MimeType( + type=mtype, subtype=stype, suffix=suffix, parameters=MultiDictProxy(params) + ) + + +class EnsureOctetStream(EmailMessage): + def __init__(self) -> None: + super().__init__() + # https://www.rfc-editor.org/rfc/rfc9110#section-8.3-5 + self.set_default_type("application/octet-stream") + + def get_content_type(self) -> str: + """Re-implementation from Message + + Returns application/octet-stream in place of plain/text when + value is wrong. + + The way this class is used guarantees that content-type will + be present so simplify the checks wrt to the base implementation. + """ + value = self.get("content-type", "").lower() + + # Based on the implementation of _splitparam in the standard library + ctype, _, _ = value.partition(";") + ctype = ctype.strip() + if ctype.count("/") != 1: + return self.get_default_type() + return ctype + + +@functools.lru_cache(maxsize=56) +def parse_content_type(raw: str) -> Tuple[str, MappingProxyType[str, str]]: + """Parse Content-Type header. + + Returns a tuple of the parsed content type and a + MappingProxyType of parameters. The default returned value + is `application/octet-stream` + """ + msg = HeaderParser(EnsureOctetStream, policy=HTTP).parsestr(f"Content-Type: {raw}") + content_type = msg.get_content_type() + params = msg.get_params(()) + content_dict = dict(params[1:]) # First element is content type again + return content_type, MappingProxyType(content_dict) + + +def guess_filename(obj: Any, default: Optional[str] = None) -> Optional[str]: + name = getattr(obj, "name", None) + if name and isinstance(name, str) and name[0] != "<" and name[-1] != ">": + return Path(name).name + return default + + +not_qtext_re = re.compile(r"[^\041\043-\133\135-\176]") +QCONTENT = {chr(i) for i in range(0x20, 0x7F)} | {"\t"} + + +def quoted_string(content: str) -> str: + """Return 7-bit content as quoted-string. + + Format content into a quoted-string as defined in RFC5322 for + Internet Message Format. Notice that this is not the 8-bit HTTP + format, but the 7-bit email format. Content must be in usascii or + a ValueError is raised. + """ + if not (QCONTENT > set(content)): + raise ValueError(f"bad content for quoted-string {content!r}") + return not_qtext_re.sub(lambda x: "\\" + x.group(0), content) + + +def content_disposition_header( + disptype: str, quote_fields: bool = True, _charset: str = "utf-8", **params: str +) -> str: + """Sets ``Content-Disposition`` header for MIME. + + This is the MIME payload Content-Disposition header from RFC 2183 + and RFC 7579 section 4.2, not the HTTP Content-Disposition from + RFC 6266. + + disptype is a disposition type: inline, attachment, form-data. + Should be valid extension token (see RFC 2183) + + quote_fields performs value quoting to 7-bit MIME headers + according to RFC 7578. Set to quote_fields to False if recipient + can take 8-bit file names and field values. + + _charset specifies the charset to use when quote_fields is True. + + params is a dict with disposition params. + """ + if not disptype or not (TOKEN > set(disptype)): + raise ValueError(f"bad content disposition type {disptype!r}") + + value = disptype + if params: + lparams = [] + for key, val in params.items(): + if not key or not (TOKEN > set(key)): + raise ValueError(f"bad content disposition parameter {key!r}={val!r}") + if quote_fields: + if key.lower() == "filename": + qval = quote(val, "", encoding=_charset) + lparams.append((key, '"%s"' % qval)) + else: + try: + qval = quoted_string(val) + except ValueError: + qval = "".join( + (_charset, "''", quote(val, "", encoding=_charset)) + ) + lparams.append((key + "*", qval)) + else: + lparams.append((key, '"%s"' % qval)) + else: + qval = val.replace("\\", "\\\\").replace('"', '\\"') + lparams.append((key, '"%s"' % qval)) + sparams = "; ".join("=".join(pair) for pair in lparams) + value = "; ".join((value, sparams)) + return value + + +def is_ip_address(host: Optional[str]) -> bool: + """Check if host looks like an IP Address. + + This check is only meant as a heuristic to ensure that + a host is not a domain name. + """ + if not host: + return False + # For a host to be an ipv4 address, it must be all numeric. + # The host must contain a colon to be an IPv6 address. + return ":" in host or host.replace(".", "").isdigit() + + +_cached_current_datetime: Optional[int] = None +_cached_formatted_datetime = "" + + +def rfc822_formatted_time() -> str: + global _cached_current_datetime + global _cached_formatted_datetime + + now = int(time.time()) + if now != _cached_current_datetime: + # Weekday and month names for HTTP date/time formatting; + # always English! + # Tuples are constants stored in codeobject! + _weekdayname = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") + _monthname = ( + "", # Dummy so we can use 1-based month numbers + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ) + + year, month, day, hh, mm, ss, wd, *tail = time.gmtime(now) + _cached_formatted_datetime = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( + _weekdayname[wd], + day, + _monthname[month], + year, + hh, + mm, + ss, + ) + _cached_current_datetime = now + return _cached_formatted_datetime + + +def _weakref_handle(info: "Tuple[weakref.ref[object], str]") -> None: + ref, name = info + ob = ref() + if ob is not None: + with suppress(Exception): + getattr(ob, name)() + + +def weakref_handle( + ob: object, + name: str, + timeout: float, + loop: asyncio.AbstractEventLoop, + timeout_ceil_threshold: float = 5, +) -> Optional[asyncio.TimerHandle]: + if timeout is not None and timeout > 0: + when = loop.time() + timeout + if timeout >= timeout_ceil_threshold: + when = ceil(when) + + return loop.call_at(when, _weakref_handle, (weakref.ref(ob), name)) + return None + + +def call_later( + cb: Callable[[], Any], + timeout: float, + loop: asyncio.AbstractEventLoop, + timeout_ceil_threshold: float = 5, +) -> Optional[asyncio.TimerHandle]: + if timeout is None or timeout <= 0: + return None + now = loop.time() + when = calculate_timeout_when(now, timeout, timeout_ceil_threshold) + return loop.call_at(when, cb) + + +def calculate_timeout_when( + loop_time: float, + timeout: float, + timeout_ceiling_threshold: float, +) -> float: + """Calculate when to execute a timeout.""" + when = loop_time + timeout + if timeout > timeout_ceiling_threshold: + return ceil(when) + return when + + +class TimeoutHandle: + """Timeout handle""" + + __slots__ = ("_timeout", "_loop", "_ceil_threshold", "_callbacks") + + def __init__( + self, + loop: asyncio.AbstractEventLoop, + timeout: Optional[float], + ceil_threshold: float = 5, + ) -> None: + self._timeout = timeout + self._loop = loop + self._ceil_threshold = ceil_threshold + self._callbacks: List[ + Tuple[Callable[..., None], Tuple[Any, ...], Dict[str, Any]] + ] = [] + + def register( + self, callback: Callable[..., None], *args: Any, **kwargs: Any + ) -> None: + self._callbacks.append((callback, args, kwargs)) + + def close(self) -> None: + self._callbacks.clear() + + def start(self) -> Optional[asyncio.TimerHandle]: + timeout = self._timeout + if timeout is not None and timeout > 0: + when = self._loop.time() + timeout + if timeout >= self._ceil_threshold: + when = ceil(when) + return self._loop.call_at(when, self.__call__) + else: + return None + + def timer(self) -> "BaseTimerContext": + if self._timeout is not None and self._timeout > 0: + timer = TimerContext(self._loop) + self.register(timer.timeout) + return timer + else: + return TimerNoop() + + def __call__(self) -> None: + for cb, args, kwargs in self._callbacks: + with suppress(Exception): + cb(*args, **kwargs) + + self._callbacks.clear() + + +class BaseTimerContext(ContextManager["BaseTimerContext"]): + + __slots__ = () + + def assert_timeout(self) -> None: + """Raise TimeoutError if timeout has been exceeded.""" + + +class TimerNoop(BaseTimerContext): + + __slots__ = () + + def __enter__(self) -> BaseTimerContext: + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + return + + +class TimerContext(BaseTimerContext): + """Low resolution timeout context manager""" + + __slots__ = ("_loop", "_tasks", "_cancelled", "_cancelling") + + def __init__(self, loop: asyncio.AbstractEventLoop) -> None: + self._loop = loop + self._tasks: List[asyncio.Task[Any]] = [] + self._cancelled = False + self._cancelling = 0 + + def assert_timeout(self) -> None: + """Raise TimeoutError if timer has already been cancelled.""" + if self._cancelled: + raise asyncio.TimeoutError from None + + def __enter__(self) -> BaseTimerContext: + task = asyncio.current_task(loop=self._loop) + if task is None: + raise RuntimeError("Timeout context manager should be used inside a task") + + if sys.version_info >= (3, 11): + # Remember if the task was already cancelling + # so when we __exit__ we can decide if we should + # raise asyncio.TimeoutError or let the cancellation propagate + self._cancelling = task.cancelling() + + if self._cancelled: + raise asyncio.TimeoutError from None + + self._tasks.append(task) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + enter_task: Optional[asyncio.Task[Any]] = None + if self._tasks: + enter_task = self._tasks.pop() + + if exc_type is asyncio.CancelledError and self._cancelled: + assert enter_task is not None + # The timeout was hit, and the task was cancelled + # so we need to uncancel the last task that entered the context manager + # since the cancellation should not leak out of the context manager + if sys.version_info >= (3, 11): + # If the task was already cancelling don't raise + # asyncio.TimeoutError and instead return None + # to allow the cancellation to propagate + if enter_task.uncancel() > self._cancelling: + return None + raise asyncio.TimeoutError from exc_val + return None + + def timeout(self) -> None: + if not self._cancelled: + for task in set(self._tasks): + task.cancel() + + self._cancelled = True + + +def ceil_timeout( + delay: Optional[float], ceil_threshold: float = 5 +) -> async_timeout.Timeout: + if delay is None or delay <= 0: + return async_timeout.timeout(None) + + loop = asyncio.get_running_loop() + now = loop.time() + when = now + delay + if delay > ceil_threshold: + when = ceil(when) + return async_timeout.timeout_at(when) + + +class HeadersMixin: + """Mixin for handling headers.""" + + ATTRS = frozenset(["_content_type", "_content_dict", "_stored_content_type"]) + + _headers: MultiMapping[str] + _content_type: Optional[str] = None + _content_dict: Optional[Dict[str, str]] = None + _stored_content_type: Union[str, None, _SENTINEL] = sentinel + + def _parse_content_type(self, raw: Optional[str]) -> None: + self._stored_content_type = raw + if raw is None: + # default value according to RFC 2616 + self._content_type = "application/octet-stream" + self._content_dict = {} + else: + content_type, content_mapping_proxy = parse_content_type(raw) + self._content_type = content_type + # _content_dict needs to be mutable so we can update it + self._content_dict = content_mapping_proxy.copy() + + @property + def content_type(self) -> str: + """The value of content part for Content-Type HTTP header.""" + raw = self._headers.get(hdrs.CONTENT_TYPE) + if self._stored_content_type != raw: + self._parse_content_type(raw) + assert self._content_type is not None + return self._content_type + + @property + def charset(self) -> Optional[str]: + """The value of charset part for Content-Type HTTP header.""" + raw = self._headers.get(hdrs.CONTENT_TYPE) + if self._stored_content_type != raw: + self._parse_content_type(raw) + assert self._content_dict is not None + return self._content_dict.get("charset") + + @property + def content_length(self) -> Optional[int]: + """The value of Content-Length HTTP header.""" + content_length = self._headers.get(hdrs.CONTENT_LENGTH) + return None if content_length is None else int(content_length) + + +def set_result(fut: "asyncio.Future[_T]", result: _T) -> None: + if not fut.done(): + fut.set_result(result) + + +_EXC_SENTINEL = BaseException() + + +class ErrorableProtocol(Protocol): + def set_exception( + self, + exc: BaseException, + exc_cause: BaseException = ..., + ) -> None: ... # pragma: no cover + + +def set_exception( + fut: "asyncio.Future[_T] | ErrorableProtocol", + exc: BaseException, + exc_cause: BaseException = _EXC_SENTINEL, +) -> None: + """Set future exception. + + If the future is marked as complete, this function is a no-op. + + :param exc_cause: An exception that is a direct cause of ``exc``. + Only set if provided. + """ + if asyncio.isfuture(fut) and fut.done(): + return + + exc_is_sentinel = exc_cause is _EXC_SENTINEL + exc_causes_itself = exc is exc_cause + if not exc_is_sentinel and not exc_causes_itself: + exc.__cause__ = exc_cause + + fut.set_exception(exc) + + +@functools.total_ordering +class AppKey(Generic[_T]): + """Keys for static typing support in Application.""" + + __slots__ = ("_name", "_t", "__orig_class__") + + # This may be set by Python when instantiating with a generic type. We need to + # support this, in order to support types that are not concrete classes, + # like Iterable, which can't be passed as the second parameter to __init__. + __orig_class__: Type[object] + + def __init__(self, name: str, t: Optional[Type[_T]] = None): + # Prefix with module name to help deduplicate key names. + frame = inspect.currentframe() + while frame: + if frame.f_code.co_name == "": + module: str = frame.f_globals["__name__"] + break + frame = frame.f_back + + self._name = module + "." + name + self._t = t + + def __lt__(self, other: object) -> bool: + if isinstance(other, AppKey): + return self._name < other._name + return True # Order AppKey above other types. + + def __repr__(self) -> str: + t = self._t + if t is None: + with suppress(AttributeError): + # Set to type arg. + t = get_args(self.__orig_class__)[0] + + if t is None: + t_repr = "<>" + elif isinstance(t, type): + if t.__module__ == "builtins": + t_repr = t.__qualname__ + else: + t_repr = f"{t.__module__}.{t.__qualname__}" + else: + t_repr = repr(t) + return f"" + + +class ChainMapProxy(Mapping[Union[str, AppKey[Any]], Any]): + __slots__ = ("_maps",) + + def __init__(self, maps: Iterable[Mapping[Union[str, AppKey[Any]], Any]]) -> None: + self._maps = tuple(maps) + + def __init_subclass__(cls) -> None: + raise TypeError( + "Inheritance class {} from ChainMapProxy " + "is forbidden".format(cls.__name__) + ) + + @overload # type: ignore[override] + def __getitem__(self, key: AppKey[_T]) -> _T: ... + + @overload + def __getitem__(self, key: str) -> Any: ... + + def __getitem__(self, key: Union[str, AppKey[_T]]) -> Any: + for mapping in self._maps: + try: + return mapping[key] + except KeyError: + pass + raise KeyError(key) + + @overload # type: ignore[override] + def get(self, key: AppKey[_T], default: _S) -> Union[_T, _S]: ... + + @overload + def get(self, key: AppKey[_T], default: None = ...) -> Optional[_T]: ... + + @overload + def get(self, key: str, default: Any = ...) -> Any: ... + + def get(self, key: Union[str, AppKey[_T]], default: Any = None) -> Any: + try: + return self[key] + except KeyError: + return default + + def __len__(self) -> int: + # reuses stored hash values if possible + return len(set().union(*self._maps)) + + def __iter__(self) -> Iterator[Union[str, AppKey[Any]]]: + d: Dict[Union[str, AppKey[Any]], Any] = {} + for mapping in reversed(self._maps): + # reuses stored hash values if possible + d.update(mapping) + return iter(d) + + def __contains__(self, key: object) -> bool: + return any(key in m for m in self._maps) + + def __bool__(self) -> bool: + return any(self._maps) + + def __repr__(self) -> str: + content = ", ".join(map(repr, self._maps)) + return f"ChainMapProxy({content})" + + +# https://tools.ietf.org/html/rfc7232#section-2.3 +_ETAGC = r"[!\x23-\x7E\x80-\xff]+" +_ETAGC_RE = re.compile(_ETAGC) +_QUOTED_ETAG = rf'(W/)?"({_ETAGC})"' +QUOTED_ETAG_RE = re.compile(_QUOTED_ETAG) +LIST_QUOTED_ETAG_RE = re.compile(rf"({_QUOTED_ETAG})(?:\s*,\s*|$)|(.)") + +ETAG_ANY = "*" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class ETag: + value: str + is_weak: bool = False + + +def validate_etag_value(value: str) -> None: + if value != ETAG_ANY and not _ETAGC_RE.fullmatch(value): + raise ValueError( + f"Value {value!r} is not a valid etag. Maybe it contains '\"'?" + ) + + +def parse_http_date(date_str: Optional[str]) -> Optional[datetime.datetime]: + """Process a date string, return a datetime object""" + if date_str is not None: + timetuple = parsedate(date_str) + if timetuple is not None: + with suppress(ValueError): + return datetime.datetime(*timetuple[:6], tzinfo=datetime.timezone.utc) + return None + + +@functools.lru_cache +def must_be_empty_body(method: str, code: int) -> bool: + """Check if a request must return an empty body.""" + return ( + code in EMPTY_BODY_STATUS_CODES + or method in EMPTY_BODY_METHODS + or (200 <= code < 300 and method in hdrs.METH_CONNECT_ALL) + ) + + +def should_remove_content_length(method: str, code: int) -> bool: + """Check if a Content-Length header should be removed. + + This should always be a subset of must_be_empty_body + """ + # https://www.rfc-editor.org/rfc/rfc9110.html#section-8.6-8 + # https://www.rfc-editor.org/rfc/rfc9110.html#section-15.4.5-4 + return code in EMPTY_BODY_STATUS_CODES or ( + 200 <= code < 300 and method in hdrs.METH_CONNECT_ALL + ) diff --git a/py311/lib/python3.11/site-packages/aiohttp/http.py b/py311/lib/python3.11/site-packages/aiohttp/http.py new file mode 100644 index 0000000000000000000000000000000000000000..a1feae2d9b8fe631d539a15dbf8e5ea2914d70d5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/http.py @@ -0,0 +1,72 @@ +import sys +from http import HTTPStatus +from typing import Mapping, Tuple + +from . import __version__ +from .http_exceptions import HttpProcessingError as HttpProcessingError +from .http_parser import ( + HeadersParser as HeadersParser, + HttpParser as HttpParser, + HttpRequestParser as HttpRequestParser, + HttpResponseParser as HttpResponseParser, + RawRequestMessage as RawRequestMessage, + RawResponseMessage as RawResponseMessage, +) +from .http_websocket import ( + WS_CLOSED_MESSAGE as WS_CLOSED_MESSAGE, + WS_CLOSING_MESSAGE as WS_CLOSING_MESSAGE, + WS_KEY as WS_KEY, + WebSocketError as WebSocketError, + WebSocketReader as WebSocketReader, + WebSocketWriter as WebSocketWriter, + WSCloseCode as WSCloseCode, + WSMessage as WSMessage, + WSMsgType as WSMsgType, + ws_ext_gen as ws_ext_gen, + ws_ext_parse as ws_ext_parse, +) +from .http_writer import ( + HttpVersion as HttpVersion, + HttpVersion10 as HttpVersion10, + HttpVersion11 as HttpVersion11, + StreamWriter as StreamWriter, +) + +__all__ = ( + "HttpProcessingError", + "RESPONSES", + "SERVER_SOFTWARE", + # .http_writer + "StreamWriter", + "HttpVersion", + "HttpVersion10", + "HttpVersion11", + # .http_parser + "HeadersParser", + "HttpParser", + "HttpRequestParser", + "HttpResponseParser", + "RawRequestMessage", + "RawResponseMessage", + # .http_websocket + "WS_CLOSED_MESSAGE", + "WS_CLOSING_MESSAGE", + "WS_KEY", + "WebSocketReader", + "WebSocketWriter", + "ws_ext_gen", + "ws_ext_parse", + "WSMessage", + "WebSocketError", + "WSMsgType", + "WSCloseCode", +) + + +SERVER_SOFTWARE: str = "Python/{0[0]}.{0[1]} aiohttp/{1}".format( + sys.version_info, __version__ +) + +RESPONSES: Mapping[int, Tuple[str, str]] = { + v: (v.phrase, v.description) for v in HTTPStatus.__members__.values() +} diff --git a/py311/lib/python3.11/site-packages/aiohttp/http_exceptions.py b/py311/lib/python3.11/site-packages/aiohttp/http_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..0b5867c7861ec0ba2558784be068476a129954cd --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/http_exceptions.py @@ -0,0 +1,116 @@ +"""Low-level http related exceptions.""" + +from textwrap import indent +from typing import Optional, Union + +from .typedefs import _CIMultiDict + +__all__ = ("HttpProcessingError",) + + +class HttpProcessingError(Exception): + """HTTP error. + + Shortcut for raising HTTP errors with custom code, message and headers. + + code: HTTP Error code. + message: (optional) Error message. + headers: (optional) Headers to be sent in response, a list of pairs + """ + + code = 0 + message = "" + headers = None + + def __init__( + self, + *, + code: Optional[int] = None, + message: str = "", + headers: Optional[_CIMultiDict] = None, + ) -> None: + if code is not None: + self.code = code + self.headers = headers + self.message = message + + def __str__(self) -> str: + msg = indent(self.message, " ") + return f"{self.code}, message:\n{msg}" + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}: {self.code}, message={self.message!r}>" + + +class BadHttpMessage(HttpProcessingError): + + code = 400 + message = "Bad Request" + + def __init__(self, message: str, *, headers: Optional[_CIMultiDict] = None) -> None: + super().__init__(message=message, headers=headers) + self.args = (message,) + + +class HttpBadRequest(BadHttpMessage): + + code = 400 + message = "Bad Request" + + +class PayloadEncodingError(BadHttpMessage): + """Base class for payload errors""" + + +class ContentEncodingError(PayloadEncodingError): + """Content encoding error.""" + + +class TransferEncodingError(PayloadEncodingError): + """transfer encoding error.""" + + +class ContentLengthError(PayloadEncodingError): + """Not enough data to satisfy content length header.""" + + +class DecompressSizeError(PayloadEncodingError): + """Decompressed size exceeds the configured limit.""" + + +class LineTooLong(BadHttpMessage): + def __init__( + self, line: str, limit: str = "Unknown", actual_size: str = "Unknown" + ) -> None: + super().__init__( + f"Got more than {limit} bytes ({actual_size}) when reading {line}." + ) + self.args = (line, limit, actual_size) + + +class InvalidHeader(BadHttpMessage): + def __init__(self, hdr: Union[bytes, str]) -> None: + hdr_s = hdr.decode(errors="backslashreplace") if isinstance(hdr, bytes) else hdr + super().__init__(f"Invalid HTTP header: {hdr!r}") + self.hdr = hdr_s + self.args = (hdr,) + + +class BadStatusLine(BadHttpMessage): + def __init__(self, line: str = "", error: Optional[str] = None) -> None: + if not isinstance(line, str): + line = repr(line) + super().__init__(error or f"Bad status line {line!r}") + self.args = (line,) + self.line = line + + +class BadHttpMethod(BadStatusLine): + """Invalid HTTP method in status line.""" + + def __init__(self, line: str = "", error: Optional[str] = None) -> None: + super().__init__(line, error or f"Bad HTTP method in status line {line!r}") + + +class InvalidURLError(BadHttpMessage): + pass diff --git a/py311/lib/python3.11/site-packages/aiohttp/http_parser.py b/py311/lib/python3.11/site-packages/aiohttp/http_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..393e76a15865f3f536a4483ab7a3b1cfcd0702e5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/http_parser.py @@ -0,0 +1,1086 @@ +import abc +import asyncio +import re +import string +from contextlib import suppress +from enum import IntEnum +from typing import ( + Any, + ClassVar, + Final, + Generic, + List, + Literal, + NamedTuple, + Optional, + Pattern, + Set, + Tuple, + Type, + TypeVar, + Union, +) + +from multidict import CIMultiDict, CIMultiDictProxy, istr +from yarl import URL + +from . import hdrs +from .base_protocol import BaseProtocol +from .compression_utils import ( + DEFAULT_MAX_DECOMPRESS_SIZE, + HAS_BROTLI, + HAS_ZSTD, + BrotliDecompressor, + ZLibDecompressor, + ZSTDDecompressor, +) +from .helpers import ( + _EXC_SENTINEL, + DEBUG, + EMPTY_BODY_METHODS, + EMPTY_BODY_STATUS_CODES, + NO_EXTENSIONS, + BaseTimerContext, + set_exception, +) +from .http_exceptions import ( + BadHttpMessage, + BadHttpMethod, + BadStatusLine, + ContentEncodingError, + ContentLengthError, + DecompressSizeError, + InvalidHeader, + InvalidURLError, + LineTooLong, + TransferEncodingError, +) +from .http_writer import HttpVersion, HttpVersion10 +from .streams import EMPTY_PAYLOAD, StreamReader +from .typedefs import RawHeaders + +__all__ = ( + "HeadersParser", + "HttpParser", + "HttpRequestParser", + "HttpResponseParser", + "RawRequestMessage", + "RawResponseMessage", +) + +_SEP = Literal[b"\r\n", b"\n"] + +ASCIISET: Final[Set[str]] = set(string.printable) + +# See https://www.rfc-editor.org/rfc/rfc9110.html#name-overview +# and https://www.rfc-editor.org/rfc/rfc9110.html#name-tokens +# +# method = token +# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +# "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +# token = 1*tchar +_TCHAR_SPECIALS: Final[str] = re.escape("!#$%&'*+-.^_`|~") +TOKENRE: Final[Pattern[str]] = re.compile(f"[0-9A-Za-z{_TCHAR_SPECIALS}]+") +VERSRE: Final[Pattern[str]] = re.compile(r"HTTP/(\d)\.(\d)", re.ASCII) +DIGITS: Final[Pattern[str]] = re.compile(r"\d+", re.ASCII) +HEXDIGITS: Final[Pattern[bytes]] = re.compile(rb"[0-9a-fA-F]+") + + +class RawRequestMessage(NamedTuple): + method: str + path: str + version: HttpVersion + headers: "CIMultiDictProxy[str]" + raw_headers: RawHeaders + should_close: bool + compression: Optional[str] + upgrade: bool + chunked: bool + url: URL + + +class RawResponseMessage(NamedTuple): + version: HttpVersion + code: int + reason: str + headers: CIMultiDictProxy[str] + raw_headers: RawHeaders + should_close: bool + compression: Optional[str] + upgrade: bool + chunked: bool + + +_MsgT = TypeVar("_MsgT", RawRequestMessage, RawResponseMessage) + + +class ParseState(IntEnum): + + PARSE_NONE = 0 + PARSE_LENGTH = 1 + PARSE_CHUNKED = 2 + PARSE_UNTIL_EOF = 3 + + +class ChunkState(IntEnum): + PARSE_CHUNKED_SIZE = 0 + PARSE_CHUNKED_CHUNK = 1 + PARSE_CHUNKED_CHUNK_EOF = 2 + PARSE_MAYBE_TRAILERS = 3 + PARSE_TRAILERS = 4 + + +class HeadersParser: + def __init__( + self, + max_line_size: int = 8190, + max_headers: int = 32768, + max_field_size: int = 8190, + lax: bool = False, + ) -> None: + self.max_line_size = max_line_size + self.max_headers = max_headers + self.max_field_size = max_field_size + self._lax = lax + + def parse_headers( + self, lines: List[bytes] + ) -> Tuple["CIMultiDictProxy[str]", RawHeaders]: + headers: CIMultiDict[str] = CIMultiDict() + # note: "raw" does not mean inclusion of OWS before/after the field value + raw_headers = [] + + lines_idx = 0 + line = lines[lines_idx] + line_count = len(lines) + + while line: + # Parse initial header name : value pair. + try: + bname, bvalue = line.split(b":", 1) + except ValueError: + raise InvalidHeader(line) from None + + if len(bname) == 0: + raise InvalidHeader(bname) + + # https://www.rfc-editor.org/rfc/rfc9112.html#section-5.1-2 + if {bname[0], bname[-1]} & {32, 9}: # {" ", "\t"} + raise InvalidHeader(line) + + bvalue = bvalue.lstrip(b" \t") + if len(bname) > self.max_field_size: + raise LineTooLong( + "request header name {}".format( + bname.decode("utf8", "backslashreplace") + ), + str(self.max_field_size), + str(len(bname)), + ) + name = bname.decode("utf-8", "surrogateescape") + if not TOKENRE.fullmatch(name): + raise InvalidHeader(bname) + + header_length = len(bvalue) + + # next line + lines_idx += 1 + line = lines[lines_idx] + + # consume continuation lines + continuation = self._lax and line and line[0] in (32, 9) # (' ', '\t') + + # Deprecated: https://www.rfc-editor.org/rfc/rfc9112.html#name-obsolete-line-folding + if continuation: + bvalue_lst = [bvalue] + while continuation: + header_length += len(line) + if header_length > self.max_field_size: + raise LineTooLong( + "request header field {}".format( + bname.decode("utf8", "backslashreplace") + ), + str(self.max_field_size), + str(header_length), + ) + bvalue_lst.append(line) + + # next line + lines_idx += 1 + if lines_idx < line_count: + line = lines[lines_idx] + if line: + continuation = line[0] in (32, 9) # (' ', '\t') + else: + line = b"" + break + bvalue = b"".join(bvalue_lst) + else: + if header_length > self.max_field_size: + raise LineTooLong( + "request header field {}".format( + bname.decode("utf8", "backslashreplace") + ), + str(self.max_field_size), + str(header_length), + ) + + bvalue = bvalue.strip(b" \t") + value = bvalue.decode("utf-8", "surrogateescape") + + # https://www.rfc-editor.org/rfc/rfc9110.html#section-5.5-5 + if "\n" in value or "\r" in value or "\x00" in value: + raise InvalidHeader(bvalue) + + headers.add(name, value) + raw_headers.append((bname, bvalue)) + + return (CIMultiDictProxy(headers), tuple(raw_headers)) + + +def _is_supported_upgrade(headers: CIMultiDictProxy[str]) -> bool: + """Check if the upgrade header is supported.""" + u = headers.get(hdrs.UPGRADE, "") + # .lower() can transform non-ascii characters. + return u.isascii() and u.lower() in {"tcp", "websocket"} + + +class HttpParser(abc.ABC, Generic[_MsgT]): + lax: ClassVar[bool] = False + + def __init__( + self, + protocol: Optional[BaseProtocol] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + limit: int = 2**16, + max_line_size: int = 8190, + max_headers: int = 32768, + max_field_size: int = 8190, + timer: Optional[BaseTimerContext] = None, + code: Optional[int] = None, + method: Optional[str] = None, + payload_exception: Optional[Type[BaseException]] = None, + response_with_body: bool = True, + read_until_eof: bool = False, + auto_decompress: bool = True, + ) -> None: + self.protocol = protocol + self.loop = loop + self.max_line_size = max_line_size + self.max_headers = max_headers + self.max_field_size = max_field_size + self.timer = timer + self.code = code + self.method = method + self.payload_exception = payload_exception + self.response_with_body = response_with_body + self.read_until_eof = read_until_eof + + self._lines: List[bytes] = [] + self._tail = b"" + self._upgraded = False + self._payload = None + self._payload_parser: Optional[HttpPayloadParser] = None + self._auto_decompress = auto_decompress + self._limit = limit + self._headers_parser = HeadersParser( + max_line_size, max_headers, max_field_size, self.lax + ) + + @abc.abstractmethod + def parse_message(self, lines: List[bytes]) -> _MsgT: ... + + @abc.abstractmethod + def _is_chunked_te(self, te: str) -> bool: ... + + def feed_eof(self) -> Optional[_MsgT]: + if self._payload_parser is not None: + self._payload_parser.feed_eof() + self._payload_parser = None + else: + # try to extract partial message + if self._tail: + self._lines.append(self._tail) + + if self._lines: + if self._lines[-1] != "\r\n": + self._lines.append(b"") + with suppress(Exception): + return self.parse_message(self._lines) + return None + + def feed_data( + self, + data: bytes, + SEP: _SEP = b"\r\n", + EMPTY: bytes = b"", + CONTENT_LENGTH: istr = hdrs.CONTENT_LENGTH, + METH_CONNECT: str = hdrs.METH_CONNECT, + SEC_WEBSOCKET_KEY1: istr = hdrs.SEC_WEBSOCKET_KEY1, + ) -> Tuple[List[Tuple[_MsgT, StreamReader]], bool, bytes]: + + messages = [] + + if self._tail: + data, self._tail = self._tail + data, b"" + + data_len = len(data) + start_pos = 0 + loop = self.loop + + should_close = False + while start_pos < data_len: + + # read HTTP message (request/response line + headers), \r\n\r\n + # and split by lines + if self._payload_parser is None and not self._upgraded: + pos = data.find(SEP, start_pos) + # consume \r\n + if pos == start_pos and not self._lines: + start_pos = pos + len(SEP) + continue + + if pos >= start_pos: + if should_close: + raise BadHttpMessage("Data after `Connection: close`") + + # line found + line = data[start_pos:pos] + if SEP == b"\n": # For lax response parsing + line = line.rstrip(b"\r") + self._lines.append(line) + start_pos = pos + len(SEP) + + # \r\n\r\n found + if self._lines[-1] == EMPTY: + try: + msg: _MsgT = self.parse_message(self._lines) + finally: + self._lines.clear() + + def get_content_length() -> Optional[int]: + # payload length + length_hdr = msg.headers.get(CONTENT_LENGTH) + if length_hdr is None: + return None + + # Shouldn't allow +/- or other number formats. + # https://www.rfc-editor.org/rfc/rfc9110#section-8.6-2 + # msg.headers is already stripped of leading/trailing wsp + if not DIGITS.fullmatch(length_hdr): + raise InvalidHeader(CONTENT_LENGTH) + + return int(length_hdr) + + length = get_content_length() + # do not support old websocket spec + if SEC_WEBSOCKET_KEY1 in msg.headers: + raise InvalidHeader(SEC_WEBSOCKET_KEY1) + + self._upgraded = msg.upgrade and _is_supported_upgrade( + msg.headers + ) + + method = getattr(msg, "method", self.method) + # code is only present on responses + code = getattr(msg, "code", 0) + + assert self.protocol is not None + # calculate payload + empty_body = code in EMPTY_BODY_STATUS_CODES or bool( + method and method in EMPTY_BODY_METHODS + ) + if not empty_body and ( + ((length is not None and length > 0) or msg.chunked) + and not self._upgraded + ): + payload = StreamReader( + self.protocol, + timer=self.timer, + loop=loop, + limit=self._limit, + ) + payload_parser = HttpPayloadParser( + payload, + length=length, + chunked=msg.chunked, + method=method, + compression=msg.compression, + code=self.code, + response_with_body=self.response_with_body, + auto_decompress=self._auto_decompress, + lax=self.lax, + headers_parser=self._headers_parser, + ) + if not payload_parser.done: + self._payload_parser = payload_parser + elif method == METH_CONNECT: + assert isinstance(msg, RawRequestMessage) + payload = StreamReader( + self.protocol, + timer=self.timer, + loop=loop, + limit=self._limit, + ) + self._upgraded = True + self._payload_parser = HttpPayloadParser( + payload, + method=msg.method, + compression=msg.compression, + auto_decompress=self._auto_decompress, + lax=self.lax, + headers_parser=self._headers_parser, + ) + elif not empty_body and length is None and self.read_until_eof: + payload = StreamReader( + self.protocol, + timer=self.timer, + loop=loop, + limit=self._limit, + ) + payload_parser = HttpPayloadParser( + payload, + length=length, + chunked=msg.chunked, + method=method, + compression=msg.compression, + code=self.code, + response_with_body=self.response_with_body, + auto_decompress=self._auto_decompress, + lax=self.lax, + headers_parser=self._headers_parser, + ) + if not payload_parser.done: + self._payload_parser = payload_parser + else: + payload = EMPTY_PAYLOAD + + messages.append((msg, payload)) + should_close = msg.should_close + else: + self._tail = data[start_pos:] + data = EMPTY + break + + # no parser, just store + elif self._payload_parser is None and self._upgraded: + assert not self._lines + break + + # feed payload + elif data and start_pos < data_len: + assert not self._lines + assert self._payload_parser is not None + try: + eof, data = self._payload_parser.feed_data(data[start_pos:], SEP) + except BaseException as underlying_exc: + reraised_exc = underlying_exc + if self.payload_exception is not None: + reraised_exc = self.payload_exception(str(underlying_exc)) + + set_exception( + self._payload_parser.payload, + reraised_exc, + underlying_exc, + ) + + eof = True + data = b"" + if isinstance( + underlying_exc, (InvalidHeader, TransferEncodingError) + ): + raise + + if eof: + start_pos = 0 + data_len = len(data) + self._payload_parser = None + continue + else: + break + + if data and start_pos < data_len: + data = data[start_pos:] + else: + data = EMPTY + + return messages, self._upgraded, data + + def parse_headers( + self, lines: List[bytes] + ) -> Tuple[ + "CIMultiDictProxy[str]", RawHeaders, Optional[bool], Optional[str], bool, bool + ]: + """Parses RFC 5322 headers from a stream. + + Line continuations are supported. Returns list of header name + and value pairs. Header name is in upper case. + """ + headers, raw_headers = self._headers_parser.parse_headers(lines) + close_conn = None + encoding = None + upgrade = False + chunked = False + + # https://www.rfc-editor.org/rfc/rfc9110.html#section-5.5-6 + # https://www.rfc-editor.org/rfc/rfc9110.html#name-collected-abnf + singletons = ( + hdrs.CONTENT_LENGTH, + hdrs.CONTENT_LOCATION, + hdrs.CONTENT_RANGE, + hdrs.CONTENT_TYPE, + hdrs.ETAG, + hdrs.HOST, + hdrs.MAX_FORWARDS, + hdrs.SERVER, + hdrs.TRANSFER_ENCODING, + hdrs.USER_AGENT, + ) + bad_hdr = next((h for h in singletons if len(headers.getall(h, ())) > 1), None) + if bad_hdr is not None: + raise BadHttpMessage(f"Duplicate '{bad_hdr}' header found.") + + # keep-alive + conn = headers.get(hdrs.CONNECTION) + if conn: + v = conn.lower() + if v == "close": + close_conn = True + elif v == "keep-alive": + close_conn = False + # https://www.rfc-editor.org/rfc/rfc9110.html#name-101-switching-protocols + elif v == "upgrade" and headers.get(hdrs.UPGRADE): + upgrade = True + + # encoding + enc = headers.get(hdrs.CONTENT_ENCODING, "") + if enc.isascii() and enc.lower() in {"gzip", "deflate", "br", "zstd"}: + encoding = enc + + # chunking + te = headers.get(hdrs.TRANSFER_ENCODING) + if te is not None: + if self._is_chunked_te(te): + chunked = True + + if hdrs.CONTENT_LENGTH in headers: + raise BadHttpMessage( + "Transfer-Encoding can't be present with Content-Length", + ) + + return (headers, raw_headers, close_conn, encoding, upgrade, chunked) + + def set_upgraded(self, val: bool) -> None: + """Set connection upgraded (to websocket) mode. + + :param bool val: new state. + """ + self._upgraded = val + + +class HttpRequestParser(HttpParser[RawRequestMessage]): + """Read request status line. + + Exception .http_exceptions.BadStatusLine + could be raised in case of any errors in status line. + Returns RawRequestMessage. + """ + + def parse_message(self, lines: List[bytes]) -> RawRequestMessage: + # request line + line = lines[0].decode("utf-8", "surrogateescape") + try: + method, path, version = line.split(" ", maxsplit=2) + except ValueError: + raise BadHttpMethod(line) from None + + if len(path) > self.max_line_size: + raise LineTooLong( + "Status line is too long", str(self.max_line_size), str(len(path)) + ) + + # method + if not TOKENRE.fullmatch(method): + raise BadHttpMethod(method) + + # version + match = VERSRE.fullmatch(version) + if match is None: + raise BadStatusLine(line) + version_o = HttpVersion(int(match.group(1)), int(match.group(2))) + + if method == "CONNECT": + # authority-form, + # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.3 + url = URL.build(authority=path, encoded=True) + elif path.startswith("/"): + # origin-form, + # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.1 + path_part, _hash_separator, url_fragment = path.partition("#") + path_part, _question_mark_separator, qs_part = path_part.partition("?") + + # NOTE: `yarl.URL.build()` is used to mimic what the Cython-based + # NOTE: parser does, otherwise it results into the same + # NOTE: HTTP Request-Line input producing different + # NOTE: `yarl.URL()` objects + url = URL.build( + path=path_part, + query_string=qs_part, + fragment=url_fragment, + encoded=True, + ) + elif path == "*" and method == "OPTIONS": + # asterisk-form, + url = URL(path, encoded=True) + else: + # absolute-form for proxy maybe, + # https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.2 + url = URL(path, encoded=True) + if url.scheme == "": + # not absolute-form + raise InvalidURLError( + path.encode(errors="surrogateescape").decode("latin1") + ) + + # read headers + ( + headers, + raw_headers, + close, + compression, + upgrade, + chunked, + ) = self.parse_headers(lines[1:]) + + if close is None: # then the headers weren't set in the request + if version_o <= HttpVersion10: # HTTP 1.0 must asks to not close + close = True + else: # HTTP 1.1 must ask to close. + close = False + + return RawRequestMessage( + method, + path, + version_o, + headers, + raw_headers, + close, + compression, + upgrade, + chunked, + url, + ) + + def _is_chunked_te(self, te: str) -> bool: + te = te.rsplit(",", maxsplit=1)[-1].strip(" \t") + # .lower() transforms some non-ascii chars, so must check first. + if te.isascii() and te.lower() == "chunked": + return True + # https://www.rfc-editor.org/rfc/rfc9112#section-6.3-2.4.3 + raise BadHttpMessage("Request has invalid `Transfer-Encoding`") + + +class HttpResponseParser(HttpParser[RawResponseMessage]): + """Read response status line and headers. + + BadStatusLine could be raised in case of any errors in status line. + Returns RawResponseMessage. + """ + + # Lax mode should only be enabled on response parser. + lax = not DEBUG + + def feed_data( + self, + data: bytes, + SEP: Optional[_SEP] = None, + *args: Any, + **kwargs: Any, + ) -> Tuple[List[Tuple[RawResponseMessage, StreamReader]], bool, bytes]: + if SEP is None: + SEP = b"\r\n" if DEBUG else b"\n" + return super().feed_data(data, SEP, *args, **kwargs) + + def parse_message(self, lines: List[bytes]) -> RawResponseMessage: + line = lines[0].decode("utf-8", "surrogateescape") + try: + version, status = line.split(maxsplit=1) + except ValueError: + raise BadStatusLine(line) from None + + try: + status, reason = status.split(maxsplit=1) + except ValueError: + status = status.strip() + reason = "" + + if len(reason) > self.max_line_size: + raise LineTooLong( + "Status line is too long", str(self.max_line_size), str(len(reason)) + ) + + # version + match = VERSRE.fullmatch(version) + if match is None: + raise BadStatusLine(line) + version_o = HttpVersion(int(match.group(1)), int(match.group(2))) + + # The status code is a three-digit ASCII number, no padding + if len(status) != 3 or not DIGITS.fullmatch(status): + raise BadStatusLine(line) + status_i = int(status) + + # read headers + ( + headers, + raw_headers, + close, + compression, + upgrade, + chunked, + ) = self.parse_headers(lines[1:]) + + if close is None: + if version_o <= HttpVersion10: + close = True + # https://www.rfc-editor.org/rfc/rfc9112.html#name-message-body-length + elif 100 <= status_i < 200 or status_i in {204, 304}: + close = False + elif hdrs.CONTENT_LENGTH in headers or hdrs.TRANSFER_ENCODING in headers: + close = False + else: + # https://www.rfc-editor.org/rfc/rfc9112.html#section-6.3-2.8 + close = True + + return RawResponseMessage( + version_o, + status_i, + reason.strip(), + headers, + raw_headers, + close, + compression, + upgrade, + chunked, + ) + + def _is_chunked_te(self, te: str) -> bool: + # https://www.rfc-editor.org/rfc/rfc9112#section-6.3-2.4.2 + return te.rsplit(",", maxsplit=1)[-1].strip(" \t").lower() == "chunked" + + +class HttpPayloadParser: + def __init__( + self, + payload: StreamReader, + length: Optional[int] = None, + chunked: bool = False, + compression: Optional[str] = None, + code: Optional[int] = None, + method: Optional[str] = None, + response_with_body: bool = True, + auto_decompress: bool = True, + lax: bool = False, + *, + headers_parser: HeadersParser, + ) -> None: + self._length = 0 + self._type = ParseState.PARSE_UNTIL_EOF + self._chunk = ChunkState.PARSE_CHUNKED_SIZE + self._chunk_size = 0 + self._chunk_tail = b"" + self._auto_decompress = auto_decompress + self._lax = lax + self._headers_parser = headers_parser + self._trailer_lines: list[bytes] = [] + self.done = False + + # payload decompression wrapper + if response_with_body and compression and self._auto_decompress: + real_payload: Union[StreamReader, DeflateBuffer] = DeflateBuffer( + payload, compression + ) + else: + real_payload = payload + + # payload parser + if not response_with_body: + # don't parse payload if it's not expected to be received + self._type = ParseState.PARSE_NONE + real_payload.feed_eof() + self.done = True + elif chunked: + self._type = ParseState.PARSE_CHUNKED + elif length is not None: + self._type = ParseState.PARSE_LENGTH + self._length = length + if self._length == 0: + real_payload.feed_eof() + self.done = True + + self.payload = real_payload + + def feed_eof(self) -> None: + if self._type == ParseState.PARSE_UNTIL_EOF: + self.payload.feed_eof() + elif self._type == ParseState.PARSE_LENGTH: + raise ContentLengthError( + "Not enough data to satisfy content length header." + ) + elif self._type == ParseState.PARSE_CHUNKED: + raise TransferEncodingError( + "Not enough data to satisfy transfer length header." + ) + + def feed_data( + self, chunk: bytes, SEP: _SEP = b"\r\n", CHUNK_EXT: bytes = b";" + ) -> Tuple[bool, bytes]: + # Read specified amount of bytes + if self._type == ParseState.PARSE_LENGTH: + required = self._length + chunk_len = len(chunk) + + if required >= chunk_len: + self._length = required - chunk_len + self.payload.feed_data(chunk, chunk_len) + if self._length == 0: + self.payload.feed_eof() + return True, b"" + else: + self._length = 0 + self.payload.feed_data(chunk[:required], required) + self.payload.feed_eof() + return True, chunk[required:] + + # Chunked transfer encoding parser + elif self._type == ParseState.PARSE_CHUNKED: + if self._chunk_tail: + chunk = self._chunk_tail + chunk + self._chunk_tail = b"" + + while chunk: + + # read next chunk size + if self._chunk == ChunkState.PARSE_CHUNKED_SIZE: + pos = chunk.find(SEP) + if pos >= 0: + i = chunk.find(CHUNK_EXT, 0, pos) + if i >= 0: + size_b = chunk[:i] # strip chunk-extensions + # Verify no LF in the chunk-extension + if b"\n" in (ext := chunk[i:pos]): + exc = TransferEncodingError( + f"Unexpected LF in chunk-extension: {ext!r}" + ) + set_exception(self.payload, exc) + raise exc + else: + size_b = chunk[:pos] + + if self._lax: # Allow whitespace in lax mode. + size_b = size_b.strip() + + if not re.fullmatch(HEXDIGITS, size_b): + exc = TransferEncodingError( + chunk[:pos].decode("ascii", "surrogateescape") + ) + set_exception(self.payload, exc) + raise exc + size = int(bytes(size_b), 16) + + chunk = chunk[pos + len(SEP) :] + if size == 0: # eof marker + self._chunk = ChunkState.PARSE_TRAILERS + if self._lax and chunk.startswith(b"\r"): + chunk = chunk[1:] + else: + self._chunk = ChunkState.PARSE_CHUNKED_CHUNK + self._chunk_size = size + self.payload.begin_http_chunk_receiving() + else: + self._chunk_tail = chunk + return False, b"" + + # read chunk and feed buffer + if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK: + required = self._chunk_size + chunk_len = len(chunk) + + if required > chunk_len: + self._chunk_size = required - chunk_len + self.payload.feed_data(chunk, chunk_len) + return False, b"" + else: + self._chunk_size = 0 + self.payload.feed_data(chunk[:required], required) + chunk = chunk[required:] + self._chunk = ChunkState.PARSE_CHUNKED_CHUNK_EOF + self.payload.end_http_chunk_receiving() + + # toss the CRLF at the end of the chunk + if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK_EOF: + if self._lax and chunk.startswith(b"\r"): + chunk = chunk[1:] + if chunk[: len(SEP)] == SEP: + chunk = chunk[len(SEP) :] + self._chunk = ChunkState.PARSE_CHUNKED_SIZE + else: + self._chunk_tail = chunk + return False, b"" + + if self._chunk == ChunkState.PARSE_TRAILERS: + pos = chunk.find(SEP) + if pos < 0: # No line found + self._chunk_tail = chunk + return False, b"" + + line = chunk[:pos] + chunk = chunk[pos + len(SEP) :] + if SEP == b"\n": # For lax response parsing + line = line.rstrip(b"\r") + self._trailer_lines.append(line) + + # \r\n\r\n found, end of stream + if self._trailer_lines[-1] == b"": + # Headers and trailers are defined the same way, + # so we reuse the HeadersParser here. + try: + trailers, raw_trailers = self._headers_parser.parse_headers( + self._trailer_lines + ) + finally: + self._trailer_lines.clear() + self.payload.feed_eof() + return True, chunk + + # Read all bytes until eof + elif self._type == ParseState.PARSE_UNTIL_EOF: + self.payload.feed_data(chunk, len(chunk)) + + return False, b"" + + +class DeflateBuffer: + """DeflateStream decompress stream and feed data into specified stream.""" + + decompressor: Any + + def __init__( + self, + out: StreamReader, + encoding: Optional[str], + max_decompress_size: int = DEFAULT_MAX_DECOMPRESS_SIZE, + ) -> None: + self.out = out + self.size = 0 + out.total_compressed_bytes = self.size + self.encoding = encoding + self._started_decoding = False + + self.decompressor: Union[BrotliDecompressor, ZLibDecompressor, ZSTDDecompressor] + if encoding == "br": + if not HAS_BROTLI: # pragma: no cover + raise ContentEncodingError( + "Can not decode content-encoding: brotli (br). " + "Please install `Brotli`" + ) + self.decompressor = BrotliDecompressor() + elif encoding == "zstd": + if not HAS_ZSTD: + raise ContentEncodingError( + "Can not decode content-encoding: zstandard (zstd). " + "Please install `backports.zstd`" + ) + self.decompressor = ZSTDDecompressor() + else: + self.decompressor = ZLibDecompressor(encoding=encoding) + + self._max_decompress_size = max_decompress_size + + def set_exception( + self, + exc: BaseException, + exc_cause: BaseException = _EXC_SENTINEL, + ) -> None: + set_exception(self.out, exc, exc_cause) + + def feed_data(self, chunk: bytes, size: int) -> None: + if not size: + return + + self.size += size + self.out.total_compressed_bytes = self.size + + # RFC1950 + # bits 0..3 = CM = 0b1000 = 8 = "deflate" + # bits 4..7 = CINFO = 1..7 = windows size. + if ( + not self._started_decoding + and self.encoding == "deflate" + and chunk[0] & 0xF != 8 + ): + # Change the decoder to decompress incorrectly compressed data + # Actually we should issue a warning about non-RFC-compliant data. + self.decompressor = ZLibDecompressor( + encoding=self.encoding, suppress_deflate_header=True + ) + + try: + # Decompress with limit + 1 so we can detect if output exceeds limit + chunk = self.decompressor.decompress_sync( + chunk, max_length=self._max_decompress_size + 1 + ) + except Exception: + raise ContentEncodingError( + "Can not decode content-encoding: %s" % self.encoding + ) + + self._started_decoding = True + + # Check if decompression limit was exceeded + if len(chunk) > self._max_decompress_size: + raise DecompressSizeError( + "Decompressed data exceeds the configured limit of %d bytes" + % self._max_decompress_size + ) + + if chunk: + self.out.feed_data(chunk, len(chunk)) + + def feed_eof(self) -> None: + chunk = self.decompressor.flush() + + if chunk or self.size > 0: + self.out.feed_data(chunk, len(chunk)) + if self.encoding == "deflate" and not self.decompressor.eof: + raise ContentEncodingError("deflate") + + self.out.feed_eof() + + def begin_http_chunk_receiving(self) -> None: + self.out.begin_http_chunk_receiving() + + def end_http_chunk_receiving(self) -> None: + self.out.end_http_chunk_receiving() + + +HttpRequestParserPy = HttpRequestParser +HttpResponseParserPy = HttpResponseParser +RawRequestMessagePy = RawRequestMessage +RawResponseMessagePy = RawResponseMessage + +try: + if not NO_EXTENSIONS: + from ._http_parser import ( # type: ignore[import-not-found,no-redef] + HttpRequestParser, + HttpResponseParser, + RawRequestMessage, + RawResponseMessage, + ) + + HttpRequestParserC = HttpRequestParser + HttpResponseParserC = HttpResponseParser + RawRequestMessageC = RawRequestMessage + RawResponseMessageC = RawResponseMessage +except ImportError: # pragma: no cover + pass diff --git a/py311/lib/python3.11/site-packages/aiohttp/http_websocket.py b/py311/lib/python3.11/site-packages/aiohttp/http_websocket.py new file mode 100644 index 0000000000000000000000000000000000000000..6b4b30e02b247e30e0c84d3eb118b749bbe52079 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/http_websocket.py @@ -0,0 +1,36 @@ +"""WebSocket protocol versions 13 and 8.""" + +from ._websocket.helpers import WS_KEY, ws_ext_gen, ws_ext_parse +from ._websocket.models import ( + WS_CLOSED_MESSAGE, + WS_CLOSING_MESSAGE, + WebSocketError, + WSCloseCode, + WSHandshakeError, + WSMessage, + WSMsgType, +) +from ._websocket.reader import WebSocketReader +from ._websocket.writer import WebSocketWriter + +# Messages that the WebSocketResponse.receive needs to handle internally +_INTERNAL_RECEIVE_TYPES = frozenset( + (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.PING, WSMsgType.PONG) +) + + +__all__ = ( + "WS_CLOSED_MESSAGE", + "WS_CLOSING_MESSAGE", + "WS_KEY", + "WebSocketReader", + "WebSocketWriter", + "WSMessage", + "WebSocketError", + "WSMsgType", + "WSCloseCode", + "ws_ext_gen", + "ws_ext_parse", + "WSHandshakeError", + "WSMessage", +) diff --git a/py311/lib/python3.11/site-packages/aiohttp/http_writer.py b/py311/lib/python3.11/site-packages/aiohttp/http_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..a140b218b25fedccb49451c547d4d01326367cfb --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/http_writer.py @@ -0,0 +1,378 @@ +"""Http related parsers and protocol.""" + +import asyncio +import sys +from typing import ( # noqa + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Iterable, + List, + NamedTuple, + Optional, + Union, +) + +from multidict import CIMultiDict + +from .abc import AbstractStreamWriter +from .base_protocol import BaseProtocol +from .client_exceptions import ClientConnectionResetError +from .compression_utils import ZLibCompressor +from .helpers import NO_EXTENSIONS + +__all__ = ("StreamWriter", "HttpVersion", "HttpVersion10", "HttpVersion11") + + +MIN_PAYLOAD_FOR_WRITELINES = 2048 +IS_PY313_BEFORE_313_2 = (3, 13, 0) <= sys.version_info < (3, 13, 2) +IS_PY_BEFORE_312_9 = sys.version_info < (3, 12, 9) +SKIP_WRITELINES = IS_PY313_BEFORE_313_2 or IS_PY_BEFORE_312_9 +# writelines is not safe for use +# on Python 3.12+ until 3.12.9 +# on Python 3.13+ until 3.13.2 +# and on older versions it not any faster than write +# CVE-2024-12254: https://github.com/python/cpython/pull/127656 + + +class HttpVersion(NamedTuple): + major: int + minor: int + + +HttpVersion10 = HttpVersion(1, 0) +HttpVersion11 = HttpVersion(1, 1) + + +_T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]] +_T_OnHeadersSent = Optional[Callable[["CIMultiDict[str]"], Awaitable[None]]] + + +class StreamWriter(AbstractStreamWriter): + + length: Optional[int] = None + chunked: bool = False + _eof: bool = False + _compress: Optional[ZLibCompressor] = None + + def __init__( + self, + protocol: BaseProtocol, + loop: asyncio.AbstractEventLoop, + on_chunk_sent: _T_OnChunkSent = None, + on_headers_sent: _T_OnHeadersSent = None, + ) -> None: + self._protocol = protocol + self.loop = loop + self._on_chunk_sent: _T_OnChunkSent = on_chunk_sent + self._on_headers_sent: _T_OnHeadersSent = on_headers_sent + self._headers_buf: Optional[bytes] = None + self._headers_written: bool = False + + @property + def transport(self) -> Optional[asyncio.Transport]: + return self._protocol.transport + + @property + def protocol(self) -> BaseProtocol: + return self._protocol + + def enable_chunking(self) -> None: + self.chunked = True + + def enable_compression( + self, encoding: str = "deflate", strategy: Optional[int] = None + ) -> None: + self._compress = ZLibCompressor(encoding=encoding, strategy=strategy) + + def _write(self, chunk: Union[bytes, bytearray, memoryview]) -> None: + size = len(chunk) + self.buffer_size += size + self.output_size += size + transport = self._protocol.transport + if transport is None or transport.is_closing(): + raise ClientConnectionResetError("Cannot write to closing transport") + transport.write(chunk) + + def _writelines(self, chunks: Iterable[bytes]) -> None: + size = 0 + for chunk in chunks: + size += len(chunk) + self.buffer_size += size + self.output_size += size + transport = self._protocol.transport + if transport is None or transport.is_closing(): + raise ClientConnectionResetError("Cannot write to closing transport") + if SKIP_WRITELINES or size < MIN_PAYLOAD_FOR_WRITELINES: + transport.write(b"".join(chunks)) + else: + transport.writelines(chunks) + + def _write_chunked_payload( + self, chunk: Union[bytes, bytearray, "memoryview[int]", "memoryview[bytes]"] + ) -> None: + """Write a chunk with proper chunked encoding.""" + chunk_len_pre = f"{len(chunk):x}\r\n".encode("ascii") + self._writelines((chunk_len_pre, chunk, b"\r\n")) + + def _send_headers_with_payload( + self, + chunk: Union[bytes, bytearray, "memoryview[int]", "memoryview[bytes]"], + is_eof: bool, + ) -> None: + """Send buffered headers with payload, coalescing into single write.""" + # Mark headers as written + self._headers_written = True + headers_buf = self._headers_buf + self._headers_buf = None + + if TYPE_CHECKING: + # Safe because callers (write() and write_eof()) only invoke this method + # after checking that self._headers_buf is truthy + assert headers_buf is not None + + if not self.chunked: + # Non-chunked: coalesce headers with body + if chunk: + self._writelines((headers_buf, chunk)) + else: + self._write(headers_buf) + return + + # Coalesce headers with chunked data + if chunk: + chunk_len_pre = f"{len(chunk):x}\r\n".encode("ascii") + if is_eof: + self._writelines((headers_buf, chunk_len_pre, chunk, b"\r\n0\r\n\r\n")) + else: + self._writelines((headers_buf, chunk_len_pre, chunk, b"\r\n")) + elif is_eof: + self._writelines((headers_buf, b"0\r\n\r\n")) + else: + self._write(headers_buf) + + async def write( + self, + chunk: Union[bytes, bytearray, memoryview], + *, + drain: bool = True, + LIMIT: int = 0x10000, + ) -> None: + """ + Writes chunk of data to a stream. + + write_eof() indicates end of stream. + writer can't be used after write_eof() method being called. + write() return drain future. + """ + if self._on_chunk_sent is not None: + await self._on_chunk_sent(chunk) + + if isinstance(chunk, memoryview): + if chunk.nbytes != len(chunk): + # just reshape it + chunk = chunk.cast("c") + + if self._compress is not None: + chunk = await self._compress.compress(chunk) + if not chunk: + return + + if self.length is not None: + chunk_len = len(chunk) + if self.length >= chunk_len: + self.length = self.length - chunk_len + else: + chunk = chunk[: self.length] + self.length = 0 + if not chunk: + return + + # Handle buffered headers for small payload optimization + if self._headers_buf and not self._headers_written: + self._send_headers_with_payload(chunk, False) + if drain and self.buffer_size > LIMIT: + self.buffer_size = 0 + await self.drain() + return + + if chunk: + if self.chunked: + self._write_chunked_payload(chunk) + else: + self._write(chunk) + + if drain and self.buffer_size > LIMIT: + self.buffer_size = 0 + await self.drain() + + async def write_headers( + self, status_line: str, headers: "CIMultiDict[str]" + ) -> None: + """Write headers to the stream.""" + if self._on_headers_sent is not None: + await self._on_headers_sent(headers) + # status + headers + buf = _serialize_headers(status_line, headers) + self._headers_written = False + self._headers_buf = buf + + def send_headers(self) -> None: + """Force sending buffered headers if not already sent.""" + if not self._headers_buf or self._headers_written: + return + + self._headers_written = True + headers_buf = self._headers_buf + self._headers_buf = None + + if TYPE_CHECKING: + # Safe because we only enter this block when self._headers_buf is truthy + assert headers_buf is not None + + self._write(headers_buf) + + def set_eof(self) -> None: + """Indicate that the message is complete.""" + if self._eof: + return + + # If headers haven't been sent yet, send them now + # This handles the case where there's no body at all + if self._headers_buf and not self._headers_written: + self._headers_written = True + headers_buf = self._headers_buf + self._headers_buf = None + + if TYPE_CHECKING: + # Safe because we only enter this block when self._headers_buf is truthy + assert headers_buf is not None + + # Combine headers and chunked EOF marker in a single write + if self.chunked: + self._writelines((headers_buf, b"0\r\n\r\n")) + else: + self._write(headers_buf) + elif self.chunked and self._headers_written: + # Headers already sent, just send the final chunk marker + self._write(b"0\r\n\r\n") + + self._eof = True + + async def write_eof(self, chunk: bytes = b"") -> None: + if self._eof: + return + + if chunk and self._on_chunk_sent is not None: + await self._on_chunk_sent(chunk) + + # Handle body/compression + if self._compress: + chunks: List[bytes] = [] + chunks_len = 0 + if chunk and (compressed_chunk := await self._compress.compress(chunk)): + chunks_len = len(compressed_chunk) + chunks.append(compressed_chunk) + + flush_chunk = self._compress.flush() + chunks_len += len(flush_chunk) + chunks.append(flush_chunk) + assert chunks_len + + # Send buffered headers with compressed data if not yet sent + if self._headers_buf and not self._headers_written: + self._headers_written = True + headers_buf = self._headers_buf + self._headers_buf = None + + if self.chunked: + # Coalesce headers with compressed chunked data + chunk_len_pre = f"{chunks_len:x}\r\n".encode("ascii") + self._writelines( + (headers_buf, chunk_len_pre, *chunks, b"\r\n0\r\n\r\n") + ) + else: + # Coalesce headers with compressed data + self._writelines((headers_buf, *chunks)) + await self.drain() + self._eof = True + return + + # Headers already sent, just write compressed data + if self.chunked: + chunk_len_pre = f"{chunks_len:x}\r\n".encode("ascii") + self._writelines((chunk_len_pre, *chunks, b"\r\n0\r\n\r\n")) + elif len(chunks) > 1: + self._writelines(chunks) + else: + self._write(chunks[0]) + await self.drain() + self._eof = True + return + + # No compression - send buffered headers if not yet sent + if self._headers_buf and not self._headers_written: + # Use helper to send headers with payload + self._send_headers_with_payload(chunk, True) + await self.drain() + self._eof = True + return + + # Handle remaining body + if self.chunked: + if chunk: + # Write final chunk with EOF marker + self._writelines( + (f"{len(chunk):x}\r\n".encode("ascii"), chunk, b"\r\n0\r\n\r\n") + ) + else: + self._write(b"0\r\n\r\n") + await self.drain() + self._eof = True + return + + if chunk: + self._write(chunk) + await self.drain() + + self._eof = True + + async def drain(self) -> None: + """Flush the write buffer. + + The intended use is to write + + await w.write(data) + await w.drain() + """ + protocol = self._protocol + if protocol.transport is not None and protocol._paused: + await protocol._drain_helper() + + +def _safe_header(string: str) -> str: + if "\r" in string or "\n" in string: + raise ValueError( + "Newline or carriage return detected in headers. " + "Potential header injection attack." + ) + return string + + +def _py_serialize_headers(status_line: str, headers: "CIMultiDict[str]") -> bytes: + headers_gen = (_safe_header(k) + ": " + _safe_header(v) for k, v in headers.items()) + line = status_line + "\r\n" + "\r\n".join(headers_gen) + "\r\n\r\n" + return line.encode("utf-8") + + +_serialize_headers = _py_serialize_headers + +try: + import aiohttp._http_writer as _http_writer # type: ignore[import-not-found] + + _c_serialize_headers = _http_writer._serialize_headers + if not NO_EXTENSIONS: + _serialize_headers = _c_serialize_headers +except ImportError: + pass diff --git a/py311/lib/python3.11/site-packages/aiohttp/log.py b/py311/lib/python3.11/site-packages/aiohttp/log.py new file mode 100644 index 0000000000000000000000000000000000000000..3cecea2bac185df741bccd0a32a5fef9cfe23299 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/log.py @@ -0,0 +1,8 @@ +import logging + +access_logger = logging.getLogger("aiohttp.access") +client_logger = logging.getLogger("aiohttp.client") +internal_logger = logging.getLogger("aiohttp.internal") +server_logger = logging.getLogger("aiohttp.server") +web_logger = logging.getLogger("aiohttp.web") +ws_logger = logging.getLogger("aiohttp.websocket") diff --git a/py311/lib/python3.11/site-packages/aiohttp/multipart.py b/py311/lib/python3.11/site-packages/aiohttp/multipart.py new file mode 100644 index 0000000000000000000000000000000000000000..9c37f0bb71699d9001f4baf0fcd64bbeb94410ef --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/multipart.py @@ -0,0 +1,1152 @@ +import base64 +import binascii +import json +import re +import sys +import uuid +import warnings +from collections import deque +from collections.abc import Mapping, Sequence +from types import TracebackType +from typing import ( + TYPE_CHECKING, + Any, + Deque, + Dict, + Iterator, + List, + Optional, + Tuple, + Type, + Union, + cast, +) +from urllib.parse import parse_qsl, unquote, urlencode + +from multidict import CIMultiDict, CIMultiDictProxy + +from .abc import AbstractStreamWriter +from .compression_utils import ( + DEFAULT_MAX_DECOMPRESS_SIZE, + ZLibCompressor, + ZLibDecompressor, +) +from .hdrs import ( + CONTENT_DISPOSITION, + CONTENT_ENCODING, + CONTENT_LENGTH, + CONTENT_TRANSFER_ENCODING, + CONTENT_TYPE, +) +from .helpers import CHAR, TOKEN, parse_mimetype, reify +from .http import HeadersParser +from .log import internal_logger +from .payload import ( + JsonPayload, + LookupError, + Order, + Payload, + StringPayload, + get_payload, + payload_type, +) +from .streams import StreamReader + +if sys.version_info >= (3, 11): + from typing import Self +else: + from typing import TypeVar + + Self = TypeVar("Self", bound="BodyPartReader") + +__all__ = ( + "MultipartReader", + "MultipartWriter", + "BodyPartReader", + "BadContentDispositionHeader", + "BadContentDispositionParam", + "parse_content_disposition", + "content_disposition_filename", +) + + +if TYPE_CHECKING: + from .client_reqrep import ClientResponse + + +class BadContentDispositionHeader(RuntimeWarning): + pass + + +class BadContentDispositionParam(RuntimeWarning): + pass + + +def parse_content_disposition( + header: Optional[str], +) -> Tuple[Optional[str], Dict[str, str]]: + def is_token(string: str) -> bool: + return bool(string) and TOKEN >= set(string) + + def is_quoted(string: str) -> bool: + return string[0] == string[-1] == '"' + + def is_rfc5987(string: str) -> bool: + return is_token(string) and string.count("'") == 2 + + def is_extended_param(string: str) -> bool: + return string.endswith("*") + + def is_continuous_param(string: str) -> bool: + pos = string.find("*") + 1 + if not pos: + return False + substring = string[pos:-1] if string.endswith("*") else string[pos:] + return substring.isdigit() + + def unescape(text: str, *, chars: str = "".join(map(re.escape, CHAR))) -> str: + return re.sub(f"\\\\([{chars}])", "\\1", text) + + if not header: + return None, {} + + disptype, *parts = header.split(";") + if not is_token(disptype): + warnings.warn(BadContentDispositionHeader(header)) + return None, {} + + params: Dict[str, str] = {} + while parts: + item = parts.pop(0) + + if not item: # To handle trailing semicolons + warnings.warn(BadContentDispositionHeader(header)) + continue + + if "=" not in item: + warnings.warn(BadContentDispositionHeader(header)) + return None, {} + + key, value = item.split("=", 1) + key = key.lower().strip() + value = value.lstrip() + + if key in params: + warnings.warn(BadContentDispositionHeader(header)) + return None, {} + + if not is_token(key): + warnings.warn(BadContentDispositionParam(item)) + continue + + elif is_continuous_param(key): + if is_quoted(value): + value = unescape(value[1:-1]) + elif not is_token(value): + warnings.warn(BadContentDispositionParam(item)) + continue + + elif is_extended_param(key): + if is_rfc5987(value): + encoding, _, value = value.split("'", 2) + encoding = encoding or "utf-8" + else: + warnings.warn(BadContentDispositionParam(item)) + continue + + try: + value = unquote(value, encoding, "strict") + except UnicodeDecodeError: # pragma: nocover + warnings.warn(BadContentDispositionParam(item)) + continue + + else: + failed = True + if is_quoted(value): + failed = False + value = unescape(value[1:-1].lstrip("\\/")) + elif is_token(value): + failed = False + elif parts: + # maybe just ; in filename, in any case this is just + # one case fix, for proper fix we need to redesign parser + _value = f"{value};{parts[0]}" + if is_quoted(_value): + parts.pop(0) + value = unescape(_value[1:-1].lstrip("\\/")) + failed = False + + if failed: + warnings.warn(BadContentDispositionHeader(header)) + return None, {} + + params[key] = value + + return disptype.lower(), params + + +def content_disposition_filename( + params: Mapping[str, str], name: str = "filename" +) -> Optional[str]: + name_suf = "%s*" % name + if not params: + return None + elif name_suf in params: + return params[name_suf] + elif name in params: + return params[name] + else: + parts = [] + fnparams = sorted( + (key, value) for key, value in params.items() if key.startswith(name_suf) + ) + for num, (key, value) in enumerate(fnparams): + _, tail = key.split("*", 1) + if tail.endswith("*"): + tail = tail[:-1] + if tail == str(num): + parts.append(value) + else: + break + if not parts: + return None + value = "".join(parts) + if "'" in value: + encoding, _, value = value.split("'", 2) + encoding = encoding or "utf-8" + return unquote(value, encoding, "strict") + return value + + +class MultipartResponseWrapper: + """Wrapper around the MultipartReader. + + It takes care about + underlying connection and close it when it needs in. + """ + + def __init__( + self, + resp: "ClientResponse", + stream: "MultipartReader", + ) -> None: + self.resp = resp + self.stream = stream + + def __aiter__(self) -> "MultipartResponseWrapper": + return self + + async def __anext__( + self, + ) -> Union["MultipartReader", "BodyPartReader"]: + part = await self.next() + if part is None: + raise StopAsyncIteration + return part + + def at_eof(self) -> bool: + """Returns True when all response data had been read.""" + return self.resp.content.at_eof() + + async def next( + self, + ) -> Optional[Union["MultipartReader", "BodyPartReader"]]: + """Emits next multipart reader object.""" + item = await self.stream.next() + if self.stream.at_eof(): + await self.release() + return item + + async def release(self) -> None: + """Release the connection gracefully. + + All remaining content is read to the void. + """ + await self.resp.release() + + +class BodyPartReader: + """Multipart reader for single body part.""" + + chunk_size = 8192 + + def __init__( + self, + boundary: bytes, + headers: "CIMultiDictProxy[str]", + content: StreamReader, + *, + subtype: str = "mixed", + default_charset: Optional[str] = None, + max_decompress_size: int = DEFAULT_MAX_DECOMPRESS_SIZE, + ) -> None: + self.headers = headers + self._boundary = boundary + self._boundary_len = len(boundary) + 2 # Boundary + \r\n + self._content = content + self._default_charset = default_charset + self._at_eof = False + self._is_form_data = subtype == "form-data" + # https://datatracker.ietf.org/doc/html/rfc7578#section-4.8 + length = None if self._is_form_data else self.headers.get(CONTENT_LENGTH, None) + self._length = int(length) if length is not None else None + self._read_bytes = 0 + self._unread: Deque[bytes] = deque() + self._prev_chunk: Optional[bytes] = None + self._content_eof = 0 + self._cache: Dict[str, Any] = {} + self._max_decompress_size = max_decompress_size + + def __aiter__(self: Self) -> Self: + return self + + async def __anext__(self) -> bytes: + part = await self.next() + if part is None: + raise StopAsyncIteration + return part + + async def next(self) -> Optional[bytes]: + item = await self.read() + if not item: + return None + return item + + async def read(self, *, decode: bool = False) -> bytes: + """Reads body part data. + + decode: Decodes data following by encoding + method from Content-Encoding header. If it missed + data remains untouched + """ + if self._at_eof: + return b"" + data = bytearray() + while not self._at_eof: + data.extend(await self.read_chunk(self.chunk_size)) + if decode: + return await self.decode(data) + return data + + async def read_chunk(self, size: int = chunk_size) -> bytes: + """Reads body part content chunk of the specified size. + + size: chunk size + """ + if self._at_eof: + return b"" + if self._length: + chunk = await self._read_chunk_from_length(size) + else: + chunk = await self._read_chunk_from_stream(size) + + # For the case of base64 data, we must read a fragment of size with a + # remainder of 0 by dividing by 4 for string without symbols \n or \r + encoding = self.headers.get(CONTENT_TRANSFER_ENCODING) + if encoding and encoding.lower() == "base64": + stripped_chunk = b"".join(chunk.split()) + remainder = len(stripped_chunk) % 4 + + while remainder != 0 and not self.at_eof(): + over_chunk_size = 4 - remainder + over_chunk = b"" + + if self._prev_chunk: + over_chunk = self._prev_chunk[:over_chunk_size] + self._prev_chunk = self._prev_chunk[len(over_chunk) :] + + if len(over_chunk) != over_chunk_size: + over_chunk += await self._content.read(4 - len(over_chunk)) + + if not over_chunk: + self._at_eof = True + + stripped_chunk += b"".join(over_chunk.split()) + chunk += over_chunk + remainder = len(stripped_chunk) % 4 + + self._read_bytes += len(chunk) + if self._read_bytes == self._length: + self._at_eof = True + if self._at_eof and await self._content.readline() != b"\r\n": + raise ValueError("Reader did not read all the data or it is malformed") + return chunk + + async def _read_chunk_from_length(self, size: int) -> bytes: + # Reads body part content chunk of the specified size. + # The body part must has Content-Length header with proper value. + assert self._length is not None, "Content-Length required for chunked read" + chunk_size = min(size, self._length - self._read_bytes) + chunk = await self._content.read(chunk_size) + if self._content.at_eof(): + self._at_eof = True + return chunk + + async def _read_chunk_from_stream(self, size: int) -> bytes: + # Reads content chunk of body part with unknown length. + # The Content-Length header for body part is not necessary. + assert ( + size >= self._boundary_len + ), "Chunk size must be greater or equal than boundary length + 2" + first_chunk = self._prev_chunk is None + if first_chunk: + # We need to re-add the CRLF that got removed from headers parsing. + self._prev_chunk = b"\r\n" + await self._content.read(size) + + chunk = b"" + # content.read() may return less than size, so we need to loop to ensure + # we have enough data to detect the boundary. + while len(chunk) < self._boundary_len: + chunk += await self._content.read(size) + self._content_eof += int(self._content.at_eof()) + if self._content_eof > 2: + raise ValueError("Reading after EOF") + if self._content_eof: + break + if len(chunk) > size: + self._content.unread_data(chunk[size:]) + chunk = chunk[:size] + + assert self._prev_chunk is not None + window = self._prev_chunk + chunk + sub = b"\r\n" + self._boundary + if first_chunk: + idx = window.find(sub) + else: + idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub))) + if idx >= 0: + # pushing boundary back to content + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) + self._content.unread_data(window[idx:]) + self._prev_chunk = self._prev_chunk[:idx] + chunk = window[len(self._prev_chunk) : idx] + if not chunk: + self._at_eof = True + result = self._prev_chunk[2 if first_chunk else 0 :] # Strip initial CRLF + self._prev_chunk = chunk + return result + + async def readline(self) -> bytes: + """Reads body part by line by line.""" + if self._at_eof: + return b"" + + if self._unread: + line = self._unread.popleft() + else: + line = await self._content.readline() + + if line.startswith(self._boundary): + # the very last boundary may not come with \r\n, + # so set single rules for everyone + sline = line.rstrip(b"\r\n") + boundary = self._boundary + last_boundary = self._boundary + b"--" + # ensure that we read exactly the boundary, not something alike + if sline == boundary or sline == last_boundary: + self._at_eof = True + self._unread.append(line) + return b"" + else: + next_line = await self._content.readline() + if next_line.startswith(self._boundary): + line = line[:-2] # strip CRLF but only once + self._unread.append(next_line) + + return line + + async def release(self) -> None: + """Like read(), but reads all the data to the void.""" + if self._at_eof: + return + while not self._at_eof: + await self.read_chunk(self.chunk_size) + + async def text(self, *, encoding: Optional[str] = None) -> str: + """Like read(), but assumes that body part contains text data.""" + data = await self.read(decode=True) + # see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm + # and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send + encoding = encoding or self.get_charset(default="utf-8") + return data.decode(encoding) + + async def json(self, *, encoding: Optional[str] = None) -> Optional[Dict[str, Any]]: + """Like read(), but assumes that body parts contains JSON data.""" + data = await self.read(decode=True) + if not data: + return None + encoding = encoding or self.get_charset(default="utf-8") + return cast(Dict[str, Any], json.loads(data.decode(encoding))) + + async def form(self, *, encoding: Optional[str] = None) -> List[Tuple[str, str]]: + """Like read(), but assumes that body parts contain form urlencoded data.""" + data = await self.read(decode=True) + if not data: + return [] + if encoding is not None: + real_encoding = encoding + else: + real_encoding = self.get_charset(default="utf-8") + try: + decoded_data = data.rstrip().decode(real_encoding) + except UnicodeDecodeError: + raise ValueError("data cannot be decoded with %s encoding" % real_encoding) + + return parse_qsl( + decoded_data, + keep_blank_values=True, + encoding=real_encoding, + ) + + def at_eof(self) -> bool: + """Returns True if the boundary was reached or False otherwise.""" + return self._at_eof + + async def decode(self, data: bytes) -> bytes: + """Decodes data. + + Decoding is done according the specified Content-Encoding + or Content-Transfer-Encoding headers value. + """ + if CONTENT_TRANSFER_ENCODING in self.headers: + data = self._decode_content_transfer(data) + # https://datatracker.ietf.org/doc/html/rfc7578#section-4.8 + if not self._is_form_data and CONTENT_ENCODING in self.headers: + return await self._decode_content(data) + return data + + async def _decode_content(self, data: bytes) -> bytes: + encoding = self.headers.get(CONTENT_ENCODING, "").lower() + if encoding == "identity": + return data + if encoding in {"deflate", "gzip"}: + return await ZLibDecompressor( + encoding=encoding, + suppress_deflate_header=True, + ).decompress(data, max_length=self._max_decompress_size) + + raise RuntimeError(f"unknown content encoding: {encoding}") + + def _decode_content_transfer(self, data: bytes) -> bytes: + encoding = self.headers.get(CONTENT_TRANSFER_ENCODING, "").lower() + + if encoding == "base64": + return base64.b64decode(data) + elif encoding == "quoted-printable": + return binascii.a2b_qp(data) + elif encoding in ("binary", "8bit", "7bit"): + return data + else: + raise RuntimeError(f"unknown content transfer encoding: {encoding}") + + def get_charset(self, default: str) -> str: + """Returns charset parameter from Content-Type header or default.""" + ctype = self.headers.get(CONTENT_TYPE, "") + mimetype = parse_mimetype(ctype) + return mimetype.parameters.get("charset", self._default_charset or default) + + @reify + def name(self) -> Optional[str]: + """Returns name specified in Content-Disposition header. + + If the header is missing or malformed, returns None. + """ + _, params = parse_content_disposition(self.headers.get(CONTENT_DISPOSITION)) + return content_disposition_filename(params, "name") + + @reify + def filename(self) -> Optional[str]: + """Returns filename specified in Content-Disposition header. + + Returns None if the header is missing or malformed. + """ + _, params = parse_content_disposition(self.headers.get(CONTENT_DISPOSITION)) + return content_disposition_filename(params, "filename") + + +@payload_type(BodyPartReader, order=Order.try_first) +class BodyPartReaderPayload(Payload): + _value: BodyPartReader + # _autoclose = False (inherited) - Streaming reader that may have resources + + def __init__(self, value: BodyPartReader, *args: Any, **kwargs: Any) -> None: + super().__init__(value, *args, **kwargs) + + params: Dict[str, str] = {} + if value.name is not None: + params["name"] = value.name + if value.filename is not None: + params["filename"] = value.filename + + if params: + self.set_content_disposition("attachment", True, **params) + + def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str: + raise TypeError("Unable to decode.") + + async def as_bytes(self, encoding: str = "utf-8", errors: str = "strict") -> bytes: + """Raises TypeError as body parts should be consumed via write(). + + This is intentional: BodyPartReader payloads are designed for streaming + large data (potentially gigabytes) and must be consumed only once via + the write() method to avoid memory exhaustion. They cannot be buffered + in memory for reuse. + """ + raise TypeError("Unable to read body part as bytes. Use write() to consume.") + + async def write(self, writer: AbstractStreamWriter) -> None: + field = self._value + chunk = await field.read_chunk(size=2**16) + while chunk: + await writer.write(await field.decode(chunk)) + chunk = await field.read_chunk(size=2**16) + + +class MultipartReader: + """Multipart body reader.""" + + #: Response wrapper, used when multipart readers constructs from response. + response_wrapper_cls = MultipartResponseWrapper + #: Multipart reader class, used to handle multipart/* body parts. + #: None points to type(self) + multipart_reader_cls: Optional[Type["MultipartReader"]] = None + #: Body part reader class for non multipart/* content types. + part_reader_cls = BodyPartReader + + def __init__(self, headers: Mapping[str, str], content: StreamReader) -> None: + self._mimetype = parse_mimetype(headers[CONTENT_TYPE]) + assert self._mimetype.type == "multipart", "multipart/* content type expected" + if "boundary" not in self._mimetype.parameters: + raise ValueError( + "boundary missed for Content-Type: %s" % headers[CONTENT_TYPE] + ) + + self.headers = headers + self._boundary = ("--" + self._get_boundary()).encode() + self._content = content + self._default_charset: Optional[str] = None + self._last_part: Optional[Union["MultipartReader", BodyPartReader]] = None + self._at_eof = False + self._at_bof = True + self._unread: List[bytes] = [] + + def __aiter__(self: Self) -> Self: + return self + + async def __anext__( + self, + ) -> Optional[Union["MultipartReader", BodyPartReader]]: + part = await self.next() + if part is None: + raise StopAsyncIteration + return part + + @classmethod + def from_response( + cls, + response: "ClientResponse", + ) -> MultipartResponseWrapper: + """Constructs reader instance from HTTP response. + + :param response: :class:`~aiohttp.client.ClientResponse` instance + """ + obj = cls.response_wrapper_cls( + response, cls(response.headers, response.content) + ) + return obj + + def at_eof(self) -> bool: + """Returns True if the final boundary was reached, false otherwise.""" + return self._at_eof + + async def next( + self, + ) -> Optional[Union["MultipartReader", BodyPartReader]]: + """Emits the next multipart body part.""" + # So, if we're at BOF, we need to skip till the boundary. + if self._at_eof: + return None + await self._maybe_release_last_part() + if self._at_bof: + await self._read_until_first_boundary() + self._at_bof = False + else: + await self._read_boundary() + if self._at_eof: # we just read the last boundary, nothing to do there + return None + + part = await self.fetch_next_part() + # https://datatracker.ietf.org/doc/html/rfc7578#section-4.6 + if ( + self._last_part is None + and self._mimetype.subtype == "form-data" + and isinstance(part, BodyPartReader) + ): + _, params = parse_content_disposition(part.headers.get(CONTENT_DISPOSITION)) + if params.get("name") == "_charset_": + # Longest encoding in https://encoding.spec.whatwg.org/encodings.json + # is 19 characters, so 32 should be more than enough for any valid encoding. + charset = await part.read_chunk(32) + if len(charset) > 31: + raise RuntimeError("Invalid default charset") + self._default_charset = charset.strip().decode() + part = await self.fetch_next_part() + self._last_part = part + return self._last_part + + async def release(self) -> None: + """Reads all the body parts to the void till the final boundary.""" + while not self._at_eof: + item = await self.next() + if item is None: + break + await item.release() + + async def fetch_next_part( + self, + ) -> Union["MultipartReader", BodyPartReader]: + """Returns the next body part reader.""" + headers = await self._read_headers() + return self._get_part_reader(headers) + + def _get_part_reader( + self, + headers: "CIMultiDictProxy[str]", + ) -> Union["MultipartReader", BodyPartReader]: + """Dispatches the response by the `Content-Type` header. + + Returns a suitable reader instance. + + :param dict headers: Response headers + """ + ctype = headers.get(CONTENT_TYPE, "") + mimetype = parse_mimetype(ctype) + + if mimetype.type == "multipart": + if self.multipart_reader_cls is None: + return type(self)(headers, self._content) + return self.multipart_reader_cls(headers, self._content) + else: + return self.part_reader_cls( + self._boundary, + headers, + self._content, + subtype=self._mimetype.subtype, + default_charset=self._default_charset, + ) + + def _get_boundary(self) -> str: + boundary = self._mimetype.parameters["boundary"] + if len(boundary) > 70: + raise ValueError("boundary %r is too long (70 chars max)" % boundary) + + return boundary + + async def _readline(self) -> bytes: + if self._unread: + return self._unread.pop() + return await self._content.readline() + + async def _read_until_first_boundary(self) -> None: + while True: + chunk = await self._readline() + if chunk == b"": + raise ValueError( + "Could not find starting boundary %r" % (self._boundary) + ) + chunk = chunk.rstrip() + if chunk == self._boundary: + return + elif chunk == self._boundary + b"--": + self._at_eof = True + return + + async def _read_boundary(self) -> None: + chunk = (await self._readline()).rstrip() + if chunk == self._boundary: + pass + elif chunk == self._boundary + b"--": + self._at_eof = True + epilogue = await self._readline() + next_line = await self._readline() + + # the epilogue is expected and then either the end of input or the + # parent multipart boundary, if the parent boundary is found then + # it should be marked as unread and handed to the parent for + # processing + if next_line[:2] == b"--": + self._unread.append(next_line) + # otherwise the request is likely missing an epilogue and both + # lines should be passed to the parent for processing + # (this handles the old behavior gracefully) + else: + self._unread.extend([next_line, epilogue]) + else: + raise ValueError(f"Invalid boundary {chunk!r}, expected {self._boundary!r}") + + async def _read_headers(self) -> "CIMultiDictProxy[str]": + lines = [] + while True: + chunk = await self._content.readline() + chunk = chunk.rstrip(b"\r\n") + lines.append(chunk) + if not chunk: + break + parser = HeadersParser() + headers, raw_headers = parser.parse_headers(lines) + return headers + + async def _maybe_release_last_part(self) -> None: + """Ensures that the last read body part is read completely.""" + if self._last_part is not None: + if not self._last_part.at_eof(): + await self._last_part.release() + self._unread.extend(self._last_part._unread) + self._last_part = None + + +_Part = Tuple[Payload, str, str] + + +class MultipartWriter(Payload): + """Multipart body writer.""" + + _value: None + # _consumed = False (inherited) - Can be encoded multiple times + _autoclose = True # No file handles, just collects parts in memory + + def __init__(self, subtype: str = "mixed", boundary: Optional[str] = None) -> None: + boundary = boundary if boundary is not None else uuid.uuid4().hex + # The underlying Payload API demands a str (utf-8), not bytes, + # so we need to ensure we don't lose anything during conversion. + # As a result, require the boundary to be ASCII only. + # In both situations. + + try: + self._boundary = boundary.encode("ascii") + except UnicodeEncodeError: + raise ValueError("boundary should contain ASCII only chars") from None + ctype = f"multipart/{subtype}; boundary={self._boundary_value}" + + super().__init__(None, content_type=ctype) + + self._parts: List[_Part] = [] + self._is_form_data = subtype == "form-data" + + def __enter__(self) -> "MultipartWriter": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + pass + + def __iter__(self) -> Iterator[_Part]: + return iter(self._parts) + + def __len__(self) -> int: + return len(self._parts) + + def __bool__(self) -> bool: + return True + + _valid_tchar_regex = re.compile(rb"\A[!#$%&'*+\-.^_`|~\w]+\Z") + _invalid_qdtext_char_regex = re.compile(rb"[\x00-\x08\x0A-\x1F\x7F]") + + @property + def _boundary_value(self) -> str: + """Wrap boundary parameter value in quotes, if necessary. + + Reads self.boundary and returns a unicode string. + """ + # Refer to RFCs 7231, 7230, 5234. + # + # parameter = token "=" ( token / quoted-string ) + # token = 1*tchar + # quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE + # qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text + # obs-text = %x80-FF + # quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) + # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" + # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" + # / DIGIT / ALPHA + # ; any VCHAR, except delimiters + # VCHAR = %x21-7E + value = self._boundary + if re.match(self._valid_tchar_regex, value): + return value.decode("ascii") # cannot fail + + if re.search(self._invalid_qdtext_char_regex, value): + raise ValueError("boundary value contains invalid characters") + + # escape %x5C and %x22 + quoted_value_content = value.replace(b"\\", b"\\\\") + quoted_value_content = quoted_value_content.replace(b'"', b'\\"') + + return '"' + quoted_value_content.decode("ascii") + '"' + + @property + def boundary(self) -> str: + return self._boundary.decode("ascii") + + def append(self, obj: Any, headers: Optional[Mapping[str, str]] = None) -> Payload: + if headers is None: + headers = CIMultiDict() + + if isinstance(obj, Payload): + obj.headers.update(headers) + return self.append_payload(obj) + else: + try: + payload = get_payload(obj, headers=headers) + except LookupError: + raise TypeError("Cannot create payload from %r" % obj) + else: + return self.append_payload(payload) + + def append_payload(self, payload: Payload) -> Payload: + """Adds a new body part to multipart writer.""" + encoding: Optional[str] = None + te_encoding: Optional[str] = None + if self._is_form_data: + # https://datatracker.ietf.org/doc/html/rfc7578#section-4.7 + # https://datatracker.ietf.org/doc/html/rfc7578#section-4.8 + assert ( + not {CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_TRANSFER_ENCODING} + & payload.headers.keys() + ) + # Set default Content-Disposition in case user doesn't create one + if CONTENT_DISPOSITION not in payload.headers: + name = f"section-{len(self._parts)}" + payload.set_content_disposition("form-data", name=name) + else: + # compression + encoding = payload.headers.get(CONTENT_ENCODING, "").lower() + if encoding and encoding not in ("deflate", "gzip", "identity"): + raise RuntimeError(f"unknown content encoding: {encoding}") + if encoding == "identity": + encoding = None + + # te encoding + te_encoding = payload.headers.get(CONTENT_TRANSFER_ENCODING, "").lower() + if te_encoding not in ("", "base64", "quoted-printable", "binary"): + raise RuntimeError(f"unknown content transfer encoding: {te_encoding}") + if te_encoding == "binary": + te_encoding = None + + # size + size = payload.size + if size is not None and not (encoding or te_encoding): + payload.headers[CONTENT_LENGTH] = str(size) + + self._parts.append((payload, encoding, te_encoding)) # type: ignore[arg-type] + return payload + + def append_json( + self, obj: Any, headers: Optional[Mapping[str, str]] = None + ) -> Payload: + """Helper to append JSON part.""" + if headers is None: + headers = CIMultiDict() + + return self.append_payload(JsonPayload(obj, headers=headers)) + + def append_form( + self, + obj: Union[Sequence[Tuple[str, str]], Mapping[str, str]], + headers: Optional[Mapping[str, str]] = None, + ) -> Payload: + """Helper to append form urlencoded part.""" + assert isinstance(obj, (Sequence, Mapping)) + + if headers is None: + headers = CIMultiDict() + + if isinstance(obj, Mapping): + obj = list(obj.items()) + data = urlencode(obj, doseq=True) + + return self.append_payload( + StringPayload( + data, headers=headers, content_type="application/x-www-form-urlencoded" + ) + ) + + @property + def size(self) -> Optional[int]: + """Size of the payload.""" + total = 0 + for part, encoding, te_encoding in self._parts: + part_size = part.size + if encoding or te_encoding or part_size is None: + return None + + total += int( + 2 + + len(self._boundary) + + 2 + + part_size # b'--'+self._boundary+b'\r\n' + + len(part._binary_headers) + + 2 # b'\r\n' + ) + + total += 2 + len(self._boundary) + 4 # b'--'+self._boundary+b'--\r\n' + return total + + def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str: + """Return string representation of the multipart data. + + WARNING: This method may do blocking I/O if parts contain file payloads. + It should not be called in the event loop. Use as_bytes().decode() instead. + """ + return "".join( + "--" + + self.boundary + + "\r\n" + + part._binary_headers.decode(encoding, errors) + + part.decode() + for part, _e, _te in self._parts + ) + + async def as_bytes(self, encoding: str = "utf-8", errors: str = "strict") -> bytes: + """Return bytes representation of the multipart data. + + This method is async-safe and calls as_bytes on underlying payloads. + """ + parts: List[bytes] = [] + + # Process each part + for part, _e, _te in self._parts: + # Add boundary + parts.append(b"--" + self._boundary + b"\r\n") + + # Add headers + parts.append(part._binary_headers) + + # Add payload content using as_bytes for async safety + part_bytes = await part.as_bytes(encoding, errors) + parts.append(part_bytes) + + # Add trailing CRLF + parts.append(b"\r\n") + + # Add closing boundary + parts.append(b"--" + self._boundary + b"--\r\n") + + return b"".join(parts) + + async def write( + self, writer: AbstractStreamWriter, close_boundary: bool = True + ) -> None: + """Write body.""" + for part, encoding, te_encoding in self._parts: + if self._is_form_data: + # https://datatracker.ietf.org/doc/html/rfc7578#section-4.2 + assert CONTENT_DISPOSITION in part.headers + assert "name=" in part.headers[CONTENT_DISPOSITION] + + await writer.write(b"--" + self._boundary + b"\r\n") + await writer.write(part._binary_headers) + + if encoding or te_encoding: + w = MultipartPayloadWriter(writer) + if encoding: + w.enable_compression(encoding) + if te_encoding: + w.enable_encoding(te_encoding) + await part.write(w) # type: ignore[arg-type] + await w.write_eof() + else: + await part.write(writer) + + await writer.write(b"\r\n") + + if close_boundary: + await writer.write(b"--" + self._boundary + b"--\r\n") + + async def close(self) -> None: + """ + Close all part payloads that need explicit closing. + + IMPORTANT: This method must not await anything that might not finish + immediately, as it may be called during cleanup/cancellation. Schedule + any long-running operations without awaiting them. + """ + if self._consumed: + return + self._consumed = True + + # Close all parts that need explicit closing + # We catch and log exceptions to ensure all parts get a chance to close + # we do not use asyncio.gather() here because we are not allowed + # to suspend given we may be called during cleanup + for idx, (part, _, _) in enumerate(self._parts): + if not part.autoclose and not part.consumed: + try: + await part.close() + except Exception as exc: + internal_logger.error( + "Failed to close multipart part %d: %s", idx, exc, exc_info=True + ) + + +class MultipartPayloadWriter: + def __init__(self, writer: AbstractStreamWriter) -> None: + self._writer = writer + self._encoding: Optional[str] = None + self._compress: Optional[ZLibCompressor] = None + self._encoding_buffer: Optional[bytearray] = None + + def enable_encoding(self, encoding: str) -> None: + if encoding == "base64": + self._encoding = encoding + self._encoding_buffer = bytearray() + elif encoding == "quoted-printable": + self._encoding = "quoted-printable" + + def enable_compression( + self, encoding: str = "deflate", strategy: Optional[int] = None + ) -> None: + self._compress = ZLibCompressor( + encoding=encoding, + suppress_deflate_header=True, + strategy=strategy, + ) + + async def write_eof(self) -> None: + if self._compress is not None: + chunk = self._compress.flush() + if chunk: + self._compress = None + await self.write(chunk) + + if self._encoding == "base64": + if self._encoding_buffer: + await self._writer.write(base64.b64encode(self._encoding_buffer)) + + async def write(self, chunk: bytes) -> None: + if self._compress is not None: + if chunk: + chunk = await self._compress.compress(chunk) + if not chunk: + return + + if self._encoding == "base64": + buf = self._encoding_buffer + assert buf is not None + buf.extend(chunk) + + if buf: + div, mod = divmod(len(buf), 3) + enc_chunk, self._encoding_buffer = (buf[: div * 3], buf[div * 3 :]) + if enc_chunk: + b64chunk = base64.b64encode(enc_chunk) + await self._writer.write(b64chunk) + elif self._encoding == "quoted-printable": + await self._writer.write(binascii.b2a_qp(chunk)) + else: + await self._writer.write(chunk) diff --git a/py311/lib/python3.11/site-packages/aiohttp/payload.py b/py311/lib/python3.11/site-packages/aiohttp/payload.py new file mode 100644 index 0000000000000000000000000000000000000000..5b88fa094050fcef280050b54429f6bd43b166d3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/payload.py @@ -0,0 +1,1120 @@ +import asyncio +import enum +import io +import json +import mimetypes +import os +import sys +import warnings +from abc import ABC, abstractmethod +from collections.abc import Iterable +from itertools import chain +from typing import ( + IO, + TYPE_CHECKING, + Any, + Dict, + Final, + List, + Optional, + Set, + TextIO, + Tuple, + Type, + Union, +) + +from multidict import CIMultiDict + +from . import hdrs +from .abc import AbstractStreamWriter +from .helpers import ( + _SENTINEL, + content_disposition_header, + guess_filename, + parse_mimetype, + sentinel, +) +from .streams import StreamReader +from .typedefs import JSONEncoder, _CIMultiDict + +__all__ = ( + "PAYLOAD_REGISTRY", + "get_payload", + "payload_type", + "Payload", + "BytesPayload", + "StringPayload", + "IOBasePayload", + "BytesIOPayload", + "BufferedReaderPayload", + "TextIOPayload", + "StringIOPayload", + "JsonPayload", + "AsyncIterablePayload", +) + +TOO_LARGE_BYTES_BODY: Final[int] = 2**20 # 1 MB +READ_SIZE: Final[int] = 2**16 # 64 KB +_CLOSE_FUTURES: Set[asyncio.Future[None]] = set() + + +class LookupError(Exception): + """Raised when no payload factory is found for the given data type.""" + + +class Order(str, enum.Enum): + normal = "normal" + try_first = "try_first" + try_last = "try_last" + + +def get_payload(data: Any, *args: Any, **kwargs: Any) -> "Payload": + return PAYLOAD_REGISTRY.get(data, *args, **kwargs) + + +def register_payload( + factory: Type["Payload"], type: Any, *, order: Order = Order.normal +) -> None: + PAYLOAD_REGISTRY.register(factory, type, order=order) + + +class payload_type: + def __init__(self, type: Any, *, order: Order = Order.normal) -> None: + self.type = type + self.order = order + + def __call__(self, factory: Type["Payload"]) -> Type["Payload"]: + register_payload(factory, self.type, order=self.order) + return factory + + +PayloadType = Type["Payload"] +_PayloadRegistryItem = Tuple[PayloadType, Any] + + +class PayloadRegistry: + """Payload registry. + + note: we need zope.interface for more efficient adapter search + """ + + __slots__ = ("_first", "_normal", "_last", "_normal_lookup") + + def __init__(self) -> None: + self._first: List[_PayloadRegistryItem] = [] + self._normal: List[_PayloadRegistryItem] = [] + self._last: List[_PayloadRegistryItem] = [] + self._normal_lookup: Dict[Any, PayloadType] = {} + + def get( + self, + data: Any, + *args: Any, + _CHAIN: "Type[chain[_PayloadRegistryItem]]" = chain, + **kwargs: Any, + ) -> "Payload": + if self._first: + for factory, type_ in self._first: + if isinstance(data, type_): + return factory(data, *args, **kwargs) + # Try the fast lookup first + if lookup_factory := self._normal_lookup.get(type(data)): + return lookup_factory(data, *args, **kwargs) + # Bail early if its already a Payload + if isinstance(data, Payload): + return data + # Fallback to the slower linear search + for factory, type_ in _CHAIN(self._normal, self._last): + if isinstance(data, type_): + return factory(data, *args, **kwargs) + raise LookupError() + + def register( + self, factory: PayloadType, type: Any, *, order: Order = Order.normal + ) -> None: + if order is Order.try_first: + self._first.append((factory, type)) + elif order is Order.normal: + self._normal.append((factory, type)) + if isinstance(type, Iterable): + for t in type: + self._normal_lookup[t] = factory + else: + self._normal_lookup[type] = factory + elif order is Order.try_last: + self._last.append((factory, type)) + else: + raise ValueError(f"Unsupported order {order!r}") + + +class Payload(ABC): + + _default_content_type: str = "application/octet-stream" + _size: Optional[int] = None + _consumed: bool = False # Default: payload has not been consumed yet + _autoclose: bool = False # Default: assume resource needs explicit closing + + def __init__( + self, + value: Any, + headers: Optional[ + Union[_CIMultiDict, Dict[str, str], Iterable[Tuple[str, str]]] + ] = None, + content_type: Union[str, None, _SENTINEL] = sentinel, + filename: Optional[str] = None, + encoding: Optional[str] = None, + **kwargs: Any, + ) -> None: + self._encoding = encoding + self._filename = filename + self._headers: _CIMultiDict = CIMultiDict() + self._value = value + if content_type is not sentinel and content_type is not None: + self._headers[hdrs.CONTENT_TYPE] = content_type + elif self._filename is not None: + if sys.version_info >= (3, 13): + guesser = mimetypes.guess_file_type + else: + guesser = mimetypes.guess_type + content_type = guesser(self._filename)[0] + if content_type is None: + content_type = self._default_content_type + self._headers[hdrs.CONTENT_TYPE] = content_type + else: + self._headers[hdrs.CONTENT_TYPE] = self._default_content_type + if headers: + self._headers.update(headers) + + @property + def size(self) -> Optional[int]: + """Size of the payload in bytes. + + Returns the number of bytes that will be transmitted when the payload + is written. For string payloads, this is the size after encoding to bytes, + not the length of the string. + """ + return self._size + + @property + def filename(self) -> Optional[str]: + """Filename of the payload.""" + return self._filename + + @property + def headers(self) -> _CIMultiDict: + """Custom item headers""" + return self._headers + + @property + def _binary_headers(self) -> bytes: + return ( + "".join([k + ": " + v + "\r\n" for k, v in self.headers.items()]).encode( + "utf-8" + ) + + b"\r\n" + ) + + @property + def encoding(self) -> Optional[str]: + """Payload encoding""" + return self._encoding + + @property + def content_type(self) -> str: + """Content type""" + return self._headers[hdrs.CONTENT_TYPE] + + @property + def consumed(self) -> bool: + """Whether the payload has been consumed and cannot be reused.""" + return self._consumed + + @property + def autoclose(self) -> bool: + """ + Whether the payload can close itself automatically. + + Returns True if the payload has no file handles or resources that need + explicit closing. If False, callers must await close() to release resources. + """ + return self._autoclose + + def set_content_disposition( + self, + disptype: str, + quote_fields: bool = True, + _charset: str = "utf-8", + **params: Any, + ) -> None: + """Sets ``Content-Disposition`` header.""" + self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header( + disptype, quote_fields=quote_fields, _charset=_charset, **params + ) + + @abstractmethod + def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str: + """ + Return string representation of the value. + + This is named decode() to allow compatibility with bytes objects. + """ + + @abstractmethod + async def write(self, writer: AbstractStreamWriter) -> None: + """ + Write payload to the writer stream. + + Args: + writer: An AbstractStreamWriter instance that handles the actual writing + + This is a legacy method that writes the entire payload without length constraints. + + Important: + For new implementations, use write_with_length() instead of this method. + This method is maintained for backwards compatibility and will eventually + delegate to write_with_length(writer, None) in all implementations. + + All payload subclasses must override this method for backwards compatibility, + but new code should use write_with_length for more flexibility and control. + + """ + + # write_with_length is new in aiohttp 3.12 + # it should be overridden by subclasses + async def write_with_length( + self, writer: AbstractStreamWriter, content_length: Optional[int] + ) -> None: + """ + Write payload with a specific content length constraint. + + Args: + writer: An AbstractStreamWriter instance that handles the actual writing + content_length: Maximum number of bytes to write (None for unlimited) + + This method allows writing payload content with a specific length constraint, + which is particularly useful for HTTP responses with Content-Length header. + + Note: + This is the base implementation that provides backwards compatibility + for subclasses that don't override this method. Specific payload types + should override this method to implement proper length-constrained writing. + + """ + # Backwards compatibility for subclasses that don't override this method + # and for the default implementation + await self.write(writer) + + async def as_bytes(self, encoding: str = "utf-8", errors: str = "strict") -> bytes: + """ + Return bytes representation of the value. + + This is a convenience method that calls decode() and encodes the result + to bytes using the specified encoding. + """ + # Use instance encoding if available, otherwise use parameter + actual_encoding = self._encoding or encoding + return self.decode(actual_encoding, errors).encode(actual_encoding) + + def _close(self) -> None: + """ + Async safe synchronous close operations for backwards compatibility. + + This method exists only for backwards compatibility with code that + needs to clean up payloads synchronously. In the future, we will + drop this method and only support the async close() method. + + WARNING: This method must be safe to call from within the event loop + without blocking. Subclasses should not perform any blocking I/O here. + + WARNING: This method must be called from within an event loop for + certain payload types (e.g., IOBasePayload). Calling it outside an + event loop may raise RuntimeError. + """ + # This is a no-op by default, but subclasses can override it + # for non-blocking cleanup operations. + + async def close(self) -> None: + """ + Close the payload if it holds any resources. + + IMPORTANT: This method must not await anything that might not finish + immediately, as it may be called during cleanup/cancellation. Schedule + any long-running operations without awaiting them. + + In the future, this will be the only close method supported. + """ + self._close() + + +class BytesPayload(Payload): + _value: bytes + # _consumed = False (inherited) - Bytes are immutable and can be reused + _autoclose = True # No file handle, just bytes in memory + + def __init__( + self, value: Union[bytes, bytearray, memoryview], *args: Any, **kwargs: Any + ) -> None: + if "content_type" not in kwargs: + kwargs["content_type"] = "application/octet-stream" + + super().__init__(value, *args, **kwargs) + + if isinstance(value, memoryview): + self._size = value.nbytes + elif isinstance(value, (bytes, bytearray)): + self._size = len(value) + else: + raise TypeError(f"value argument must be byte-ish, not {type(value)!r}") + + if self._size > TOO_LARGE_BYTES_BODY: + kwargs = {"source": self} + warnings.warn( + "Sending a large body directly with raw bytes might" + " lock the event loop. You should probably pass an " + "io.BytesIO object instead", + ResourceWarning, + **kwargs, + ) + + def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str: + return self._value.decode(encoding, errors) + + async def as_bytes(self, encoding: str = "utf-8", errors: str = "strict") -> bytes: + """ + Return bytes representation of the value. + + This method returns the raw bytes content of the payload. + It is equivalent to accessing the _value attribute directly. + """ + return self._value + + async def write(self, writer: AbstractStreamWriter) -> None: + """ + Write the entire bytes payload to the writer stream. + + Args: + writer: An AbstractStreamWriter instance that handles the actual writing + + This method writes the entire bytes content without any length constraint. + + Note: + For new implementations that need length control, use write_with_length(). + This method is maintained for backwards compatibility and is equivalent + to write_with_length(writer, None). + + """ + await writer.write(self._value) + + async def write_with_length( + self, writer: AbstractStreamWriter, content_length: Optional[int] + ) -> None: + """ + Write bytes payload with a specific content length constraint. + + Args: + writer: An AbstractStreamWriter instance that handles the actual writing + content_length: Maximum number of bytes to write (None for unlimited) + + This method writes either the entire byte sequence or a slice of it + up to the specified content_length. For BytesPayload, this operation + is performed efficiently using array slicing. + + """ + if content_length is not None: + await writer.write(self._value[:content_length]) + else: + await writer.write(self._value) + + +class StringPayload(BytesPayload): + def __init__( + self, + value: str, + *args: Any, + encoding: Optional[str] = None, + content_type: Optional[str] = None, + **kwargs: Any, + ) -> None: + + if encoding is None: + if content_type is None: + real_encoding = "utf-8" + content_type = "text/plain; charset=utf-8" + else: + mimetype = parse_mimetype(content_type) + real_encoding = mimetype.parameters.get("charset", "utf-8") + else: + if content_type is None: + content_type = "text/plain; charset=%s" % encoding + real_encoding = encoding + + super().__init__( + value.encode(real_encoding), + encoding=real_encoding, + content_type=content_type, + *args, + **kwargs, + ) + + +class StringIOPayload(StringPayload): + def __init__(self, value: IO[str], *args: Any, **kwargs: Any) -> None: + super().__init__(value.read(), *args, **kwargs) + + +class IOBasePayload(Payload): + _value: io.IOBase + # _consumed = False (inherited) - File can be re-read from the same position + _start_position: Optional[int] = None + # _autoclose = False (inherited) - Has file handle that needs explicit closing + + def __init__( + self, value: IO[Any], disposition: str = "attachment", *args: Any, **kwargs: Any + ) -> None: + if "filename" not in kwargs: + kwargs["filename"] = guess_filename(value) + + super().__init__(value, *args, **kwargs) + + if self._filename is not None and disposition is not None: + if hdrs.CONTENT_DISPOSITION not in self.headers: + self.set_content_disposition(disposition, filename=self._filename) + + def _set_or_restore_start_position(self) -> None: + """Set or restore the start position of the file-like object.""" + if self._start_position is None: + try: + self._start_position = self._value.tell() + except (OSError, AttributeError): + self._consumed = True # Cannot seek, mark as consumed + return + try: + self._value.seek(self._start_position) + except (OSError, AttributeError): + # Failed to seek back - mark as consumed since we've already read + self._consumed = True + + def _read_and_available_len( + self, remaining_content_len: Optional[int] + ) -> Tuple[Optional[int], bytes]: + """ + Read the file-like object and return both its total size and the first chunk. + + Args: + remaining_content_len: Optional limit on how many bytes to read in this operation. + If None, READ_SIZE will be used as the default chunk size. + + Returns: + A tuple containing: + - The total size of the remaining unread content (None if size cannot be determined) + - The first chunk of bytes read from the file object + + This method is optimized to perform both size calculation and initial read + in a single operation, which is executed in a single executor job to minimize + context switches and file operations when streaming content. + + """ + self._set_or_restore_start_position() + size = self.size # Call size only once since it does I/O + return size, self._value.read( + min(READ_SIZE, size or READ_SIZE, remaining_content_len or READ_SIZE) + ) + + def _read(self, remaining_content_len: Optional[int]) -> bytes: + """ + Read a chunk of data from the file-like object. + + Args: + remaining_content_len: Optional maximum number of bytes to read. + If None, READ_SIZE will be used as the default chunk size. + + Returns: + A chunk of bytes read from the file object, respecting the + remaining_content_len limit if specified. + + This method is used for subsequent reads during streaming after + the initial _read_and_available_len call has been made. + + """ + return self._value.read(remaining_content_len or READ_SIZE) # type: ignore[no-any-return] + + @property + def size(self) -> Optional[int]: + """ + Size of the payload in bytes. + + Returns the total size of the payload content from the initial position. + This ensures consistent Content-Length for requests, including 307/308 redirects + where the same payload instance is reused. + + Returns None if the size cannot be determined (e.g., for unseekable streams). + """ + try: + # Store the start position on first access. + # This is critical when the same payload instance is reused (e.g., 307/308 + # redirects). Without storing the initial position, after the payload is + # read once, the file position would be at EOF, which would cause the + # size calculation to return 0 (file_size - EOF position). + # By storing the start position, we ensure the size calculation always + # returns the correct total size for any subsequent use. + if self._start_position is None: + self._start_position = self._value.tell() + + # Return the total size from the start position + # This ensures Content-Length is correct even after reading + return os.fstat(self._value.fileno()).st_size - self._start_position + except (AttributeError, OSError): + return None + + async def write(self, writer: AbstractStreamWriter) -> None: + """ + Write the entire file-like payload to the writer stream. + + Args: + writer: An AbstractStreamWriter instance that handles the actual writing + + This method writes the entire file content without any length constraint. + It delegates to write_with_length() with no length limit for implementation + consistency. + + Note: + For new implementations that need length control, use write_with_length() directly. + This method is maintained for backwards compatibility with existing code. + + """ + await self.write_with_length(writer, None) + + async def write_with_length( + self, writer: AbstractStreamWriter, content_length: Optional[int] + ) -> None: + """ + Write file-like payload with a specific content length constraint. + + Args: + writer: An AbstractStreamWriter instance that handles the actual writing + content_length: Maximum number of bytes to write (None for unlimited) + + This method implements optimized streaming of file content with length constraints: + + 1. File reading is performed in a thread pool to avoid blocking the event loop + 2. Content is read and written in chunks to maintain memory efficiency + 3. Writing stops when either: + - All available file content has been written (when size is known) + - The specified content_length has been reached + 4. File resources are properly closed even if the operation is cancelled + + The implementation carefully handles both known-size and unknown-size payloads, + as well as constrained and unconstrained content lengths. + + """ + loop = asyncio.get_running_loop() + total_written_len = 0 + remaining_content_len = content_length + + # Get initial data and available length + available_len, chunk = await loop.run_in_executor( + None, self._read_and_available_len, remaining_content_len + ) + # Process data chunks until done + while chunk: + chunk_len = len(chunk) + + # Write data with or without length constraint + if remaining_content_len is None: + await writer.write(chunk) + else: + await writer.write(chunk[:remaining_content_len]) + remaining_content_len -= chunk_len + + total_written_len += chunk_len + + # Check if we're done writing + if self._should_stop_writing( + available_len, total_written_len, remaining_content_len + ): + return + + # Read next chunk + chunk = await loop.run_in_executor( + None, + self._read, + ( + min(READ_SIZE, remaining_content_len) + if remaining_content_len is not None + else READ_SIZE + ), + ) + + def _should_stop_writing( + self, + available_len: Optional[int], + total_written_len: int, + remaining_content_len: Optional[int], + ) -> bool: + """ + Determine if we should stop writing data. + + Args: + available_len: Known size of the payload if available (None if unknown) + total_written_len: Number of bytes already written + remaining_content_len: Remaining bytes to be written for content-length limited responses + + Returns: + True if we should stop writing data, based on either: + - Having written all available data (when size is known) + - Having written all requested content (when content-length is specified) + + """ + return (available_len is not None and total_written_len >= available_len) or ( + remaining_content_len is not None and remaining_content_len <= 0 + ) + + def _close(self) -> None: + """ + Async safe synchronous close operations for backwards compatibility. + + This method exists only for backwards + compatibility. Use the async close() method instead. + + WARNING: This method MUST be called from within an event loop. + Calling it outside an event loop will raise RuntimeError. + """ + # Skip if already consumed + if self._consumed: + return + self._consumed = True # Mark as consumed to prevent further writes + # Schedule file closing without awaiting to prevent cancellation issues + loop = asyncio.get_running_loop() + close_future = loop.run_in_executor(None, self._value.close) + # Hold a strong reference to the future to prevent it from being + # garbage collected before it completes. + _CLOSE_FUTURES.add(close_future) + close_future.add_done_callback(_CLOSE_FUTURES.remove) + + async def close(self) -> None: + """ + Close the payload if it holds any resources. + + IMPORTANT: This method must not await anything that might not finish + immediately, as it may be called during cleanup/cancellation. Schedule + any long-running operations without awaiting them. + """ + self._close() + + def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str: + """ + Return string representation of the value. + + WARNING: This method does blocking I/O and should not be called in the event loop. + """ + return self._read_all().decode(encoding, errors) + + def _read_all(self) -> bytes: + """Read the entire file-like object and return its content as bytes.""" + self._set_or_restore_start_position() + # Use readlines() to ensure we get all content + return b"".join(self._value.readlines()) + + async def as_bytes(self, encoding: str = "utf-8", errors: str = "strict") -> bytes: + """ + Return bytes representation of the value. + + This method reads the entire file content and returns it as bytes. + It is equivalent to reading the file-like object directly. + The file reading is performed in an executor to avoid blocking the event loop. + """ + loop = asyncio.get_running_loop() + return await loop.run_in_executor(None, self._read_all) + + +class TextIOPayload(IOBasePayload): + _value: io.TextIOBase + # _autoclose = False (inherited) - Has text file handle that needs explicit closing + + def __init__( + self, + value: TextIO, + *args: Any, + encoding: Optional[str] = None, + content_type: Optional[str] = None, + **kwargs: Any, + ) -> None: + + if encoding is None: + if content_type is None: + encoding = "utf-8" + content_type = "text/plain; charset=utf-8" + else: + mimetype = parse_mimetype(content_type) + encoding = mimetype.parameters.get("charset", "utf-8") + else: + if content_type is None: + content_type = "text/plain; charset=%s" % encoding + + super().__init__( + value, + content_type=content_type, + encoding=encoding, + *args, + **kwargs, + ) + + def _read_and_available_len( + self, remaining_content_len: Optional[int] + ) -> Tuple[Optional[int], bytes]: + """ + Read the text file-like object and return both its total size and the first chunk. + + Args: + remaining_content_len: Optional limit on how many bytes to read in this operation. + If None, READ_SIZE will be used as the default chunk size. + + Returns: + A tuple containing: + - The total size of the remaining unread content (None if size cannot be determined) + - The first chunk of bytes read from the file object, encoded using the payload's encoding + + This method is optimized to perform both size calculation and initial read + in a single operation, which is executed in a single executor job to minimize + context switches and file operations when streaming content. + + Note: + TextIOPayload handles encoding of the text content before writing it + to the stream. If no encoding is specified, UTF-8 is used as the default. + + """ + self._set_or_restore_start_position() + size = self.size + chunk = self._value.read( + min(READ_SIZE, size or READ_SIZE, remaining_content_len or READ_SIZE) + ) + return size, chunk.encode(self._encoding) if self._encoding else chunk.encode() + + def _read(self, remaining_content_len: Optional[int]) -> bytes: + """ + Read a chunk of data from the text file-like object. + + Args: + remaining_content_len: Optional maximum number of bytes to read. + If None, READ_SIZE will be used as the default chunk size. + + Returns: + A chunk of bytes read from the file object and encoded using the payload's + encoding. The data is automatically converted from text to bytes. + + This method is used for subsequent reads during streaming after + the initial _read_and_available_len call has been made. It properly + handles text encoding, converting the text content to bytes using + the specified encoding (or UTF-8 if none was provided). + + """ + chunk = self._value.read(remaining_content_len or READ_SIZE) + return chunk.encode(self._encoding) if self._encoding else chunk.encode() + + def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str: + """ + Return string representation of the value. + + WARNING: This method does blocking I/O and should not be called in the event loop. + """ + self._set_or_restore_start_position() + return self._value.read() + + async def as_bytes(self, encoding: str = "utf-8", errors: str = "strict") -> bytes: + """ + Return bytes representation of the value. + + This method reads the entire text file content and returns it as bytes. + It encodes the text content using the specified encoding. + The file reading is performed in an executor to avoid blocking the event loop. + """ + loop = asyncio.get_running_loop() + + # Use instance encoding if available, otherwise use parameter + actual_encoding = self._encoding or encoding + + def _read_and_encode() -> bytes: + self._set_or_restore_start_position() + # TextIO read() always returns the full content + return self._value.read().encode(actual_encoding, errors) + + return await loop.run_in_executor(None, _read_and_encode) + + +class BytesIOPayload(IOBasePayload): + _value: io.BytesIO + _size: int # Always initialized in __init__ + _autoclose = True # BytesIO is in-memory, safe to auto-close + + def __init__(self, value: io.BytesIO, *args: Any, **kwargs: Any) -> None: + super().__init__(value, *args, **kwargs) + # Calculate size once during initialization + self._size = len(self._value.getbuffer()) - self._value.tell() + + @property + def size(self) -> int: + """Size of the payload in bytes. + + Returns the number of bytes in the BytesIO buffer that will be transmitted. + This is calculated once during initialization for efficiency. + """ + return self._size + + def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str: + self._set_or_restore_start_position() + return self._value.read().decode(encoding, errors) + + async def write(self, writer: AbstractStreamWriter) -> None: + return await self.write_with_length(writer, None) + + async def write_with_length( + self, writer: AbstractStreamWriter, content_length: Optional[int] + ) -> None: + """ + Write BytesIO payload with a specific content length constraint. + + Args: + writer: An AbstractStreamWriter instance that handles the actual writing + content_length: Maximum number of bytes to write (None for unlimited) + + This implementation is specifically optimized for BytesIO objects: + + 1. Reads content in chunks to maintain memory efficiency + 2. Yields control back to the event loop periodically to prevent blocking + when dealing with large BytesIO objects + 3. Respects content_length constraints when specified + 4. Properly cleans up by closing the BytesIO object when done or on error + + The periodic yielding to the event loop is important for maintaining + responsiveness when processing large in-memory buffers. + + """ + self._set_or_restore_start_position() + loop_count = 0 + remaining_bytes = content_length + while chunk := self._value.read(READ_SIZE): + if loop_count > 0: + # Avoid blocking the event loop + # if they pass a large BytesIO object + # and we are not in the first iteration + # of the loop + await asyncio.sleep(0) + if remaining_bytes is None: + await writer.write(chunk) + else: + await writer.write(chunk[:remaining_bytes]) + remaining_bytes -= len(chunk) + if remaining_bytes <= 0: + return + loop_count += 1 + + async def as_bytes(self, encoding: str = "utf-8", errors: str = "strict") -> bytes: + """ + Return bytes representation of the value. + + This method reads the entire BytesIO content and returns it as bytes. + It is equivalent to accessing the _value attribute directly. + """ + self._set_or_restore_start_position() + return self._value.read() + + async def close(self) -> None: + """ + Close the BytesIO payload. + + This does nothing since BytesIO is in-memory and does not require explicit closing. + """ + + +class BufferedReaderPayload(IOBasePayload): + _value: io.BufferedIOBase + # _autoclose = False (inherited) - Has buffered file handle that needs explicit closing + + def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str: + self._set_or_restore_start_position() + return self._value.read().decode(encoding, errors) + + +class JsonPayload(BytesPayload): + def __init__( + self, + value: Any, + encoding: str = "utf-8", + content_type: str = "application/json", + dumps: JSONEncoder = json.dumps, + *args: Any, + **kwargs: Any, + ) -> None: + + super().__init__( + dumps(value).encode(encoding), + content_type=content_type, + encoding=encoding, + *args, + **kwargs, + ) + + +if TYPE_CHECKING: + from typing import AsyncIterable, AsyncIterator + + _AsyncIterator = AsyncIterator[bytes] + _AsyncIterable = AsyncIterable[bytes] +else: + from collections.abc import AsyncIterable, AsyncIterator + + _AsyncIterator = AsyncIterator + _AsyncIterable = AsyncIterable + + +class AsyncIterablePayload(Payload): + + _iter: Optional[_AsyncIterator] = None + _value: _AsyncIterable + _cached_chunks: Optional[List[bytes]] = None + # _consumed stays False to allow reuse with cached content + _autoclose = True # Iterator doesn't need explicit closing + + def __init__(self, value: _AsyncIterable, *args: Any, **kwargs: Any) -> None: + if not isinstance(value, AsyncIterable): + raise TypeError( + "value argument must support " + "collections.abc.AsyncIterable interface, " + "got {!r}".format(type(value)) + ) + + if "content_type" not in kwargs: + kwargs["content_type"] = "application/octet-stream" + + super().__init__(value, *args, **kwargs) + + self._iter = value.__aiter__() + + async def write(self, writer: AbstractStreamWriter) -> None: + """ + Write the entire async iterable payload to the writer stream. + + Args: + writer: An AbstractStreamWriter instance that handles the actual writing + + This method iterates through the async iterable and writes each chunk + to the writer without any length constraint. + + Note: + For new implementations that need length control, use write_with_length() directly. + This method is maintained for backwards compatibility with existing code. + + """ + await self.write_with_length(writer, None) + + async def write_with_length( + self, writer: AbstractStreamWriter, content_length: Optional[int] + ) -> None: + """ + Write async iterable payload with a specific content length constraint. + + Args: + writer: An AbstractStreamWriter instance that handles the actual writing + content_length: Maximum number of bytes to write (None for unlimited) + + This implementation handles streaming of async iterable content with length constraints: + + 1. If cached chunks are available, writes from them + 2. Otherwise iterates through the async iterable one chunk at a time + 3. Respects content_length constraints when specified + 4. Does NOT generate cache - that's done by as_bytes() + + """ + # If we have cached chunks, use them + if self._cached_chunks is not None: + remaining_bytes = content_length + for chunk in self._cached_chunks: + if remaining_bytes is None: + await writer.write(chunk) + elif remaining_bytes > 0: + await writer.write(chunk[:remaining_bytes]) + remaining_bytes -= len(chunk) + else: + break + return + + # If iterator is exhausted and we don't have cached chunks, nothing to write + if self._iter is None: + return + + # Stream from the iterator + remaining_bytes = content_length + + try: + while True: + if sys.version_info >= (3, 10): + chunk = await anext(self._iter) + else: + chunk = await self._iter.__anext__() + if remaining_bytes is None: + await writer.write(chunk) + # If we have a content length limit + elif remaining_bytes > 0: + await writer.write(chunk[:remaining_bytes]) + remaining_bytes -= len(chunk) + # We still want to exhaust the iterator even + # if we have reached the content length limit + # since the file handle may not get closed by + # the iterator if we don't do this + except StopAsyncIteration: + # Iterator is exhausted + self._iter = None + self._consumed = True # Mark as consumed when streamed without caching + + def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str: + """Decode the payload content as a string if cached chunks are available.""" + if self._cached_chunks is not None: + return b"".join(self._cached_chunks).decode(encoding, errors) + raise TypeError("Unable to decode - content not cached. Call as_bytes() first.") + + async def as_bytes(self, encoding: str = "utf-8", errors: str = "strict") -> bytes: + """ + Return bytes representation of the value. + + This method reads the entire async iterable content and returns it as bytes. + It generates and caches the chunks for future reuse. + """ + # If we have cached chunks, return them joined + if self._cached_chunks is not None: + return b"".join(self._cached_chunks) + + # If iterator is exhausted and no cache, return empty + if self._iter is None: + return b"" + + # Read all chunks and cache them + chunks: List[bytes] = [] + async for chunk in self._iter: + chunks.append(chunk) + + # Iterator is exhausted, cache the chunks + self._iter = None + self._cached_chunks = chunks + # Keep _consumed as False to allow reuse with cached chunks + + return b"".join(chunks) + + +class StreamReaderPayload(AsyncIterablePayload): + def __init__(self, value: StreamReader, *args: Any, **kwargs: Any) -> None: + super().__init__(value.iter_any(), *args, **kwargs) + + +PAYLOAD_REGISTRY = PayloadRegistry() +PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview)) +PAYLOAD_REGISTRY.register(StringPayload, str) +PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO) +PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase) +PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO) +PAYLOAD_REGISTRY.register(BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom)) +PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase) +PAYLOAD_REGISTRY.register(StreamReaderPayload, StreamReader) +# try_last for giving a chance to more specialized async interables like +# multipart.BodyPartReaderPayload override the default +PAYLOAD_REGISTRY.register(AsyncIterablePayload, AsyncIterable, order=Order.try_last) diff --git a/py311/lib/python3.11/site-packages/aiohttp/payload_streamer.py b/py311/lib/python3.11/site-packages/aiohttp/payload_streamer.py new file mode 100644 index 0000000000000000000000000000000000000000..831fdc0a77f302acaf9a000be408fe7c9a9035aa --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/payload_streamer.py @@ -0,0 +1,78 @@ +""" +Payload implementation for coroutines as data provider. + +As a simple case, you can upload data from file:: + + @aiohttp.streamer + async def file_sender(writer, file_name=None): + with open(file_name, 'rb') as f: + chunk = f.read(2**16) + while chunk: + await writer.write(chunk) + + chunk = f.read(2**16) + +Then you can use `file_sender` like this: + + async with session.post('http://httpbin.org/post', + data=file_sender(file_name='huge_file')) as resp: + print(await resp.text()) + +..note:: Coroutine must accept `writer` as first argument + +""" + +import types +import warnings +from typing import Any, Awaitable, Callable, Dict, Tuple + +from .abc import AbstractStreamWriter +from .payload import Payload, payload_type + +__all__ = ("streamer",) + + +class _stream_wrapper: + def __init__( + self, + coro: Callable[..., Awaitable[None]], + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> None: + self.coro = types.coroutine(coro) + self.args = args + self.kwargs = kwargs + + async def __call__(self, writer: AbstractStreamWriter) -> None: + await self.coro(writer, *self.args, **self.kwargs) + + +class streamer: + def __init__(self, coro: Callable[..., Awaitable[None]]) -> None: + warnings.warn( + "@streamer is deprecated, use async generators instead", + DeprecationWarning, + stacklevel=2, + ) + self.coro = coro + + def __call__(self, *args: Any, **kwargs: Any) -> _stream_wrapper: + return _stream_wrapper(self.coro, args, kwargs) + + +@payload_type(_stream_wrapper) +class StreamWrapperPayload(Payload): + async def write(self, writer: AbstractStreamWriter) -> None: + await self._value(writer) + + def decode(self, encoding: str = "utf-8", errors: str = "strict") -> str: + raise TypeError("Unable to decode.") + + +@payload_type(streamer) +class StreamPayload(StreamWrapperPayload): + def __init__(self, value: Any, *args: Any, **kwargs: Any) -> None: + super().__init__(value(), *args, **kwargs) + + async def write(self, writer: AbstractStreamWriter) -> None: + await self._value(writer) diff --git a/py311/lib/python3.11/site-packages/aiohttp/py.typed b/py311/lib/python3.11/site-packages/aiohttp/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..f5642f79f21d872f010979dcf6f0c4a415acc19d --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/py.typed @@ -0,0 +1 @@ +Marker diff --git a/py311/lib/python3.11/site-packages/aiohttp/pytest_plugin.py b/py311/lib/python3.11/site-packages/aiohttp/pytest_plugin.py new file mode 100644 index 0000000000000000000000000000000000000000..7d59fe820d697632c9a3311ff96841ebc0ee735b --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/pytest_plugin.py @@ -0,0 +1,444 @@ +import asyncio +import contextlib +import inspect +import warnings +from typing import ( + Any, + Awaitable, + Callable, + Dict, + Iterator, + Optional, + Protocol, + Union, + overload, +) + +import pytest + +from .test_utils import ( + BaseTestServer, + RawTestServer, + TestClient, + TestServer, + loop_context, + setup_test_loop, + teardown_test_loop, + unused_port as _unused_port, +) +from .web import Application, BaseRequest, Request +from .web_protocol import _RequestHandler + +try: + import uvloop +except ImportError: # pragma: no cover + uvloop = None # type: ignore[assignment] + + +class AiohttpClient(Protocol): + @overload + async def __call__( + self, + __param: Application, + *, + server_kwargs: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> TestClient[Request, Application]: ... + @overload + async def __call__( + self, + __param: BaseTestServer, + *, + server_kwargs: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> TestClient[BaseRequest, None]: ... + + +class AiohttpServer(Protocol): + def __call__( + self, app: Application, *, port: Optional[int] = None, **kwargs: Any + ) -> Awaitable[TestServer]: ... + + +class AiohttpRawServer(Protocol): + def __call__( + self, handler: _RequestHandler, *, port: Optional[int] = None, **kwargs: Any + ) -> Awaitable[RawTestServer]: ... + + +def pytest_addoption(parser): # type: ignore[no-untyped-def] + parser.addoption( + "--aiohttp-fast", + action="store_true", + default=False, + help="run tests faster by disabling extra checks", + ) + parser.addoption( + "--aiohttp-loop", + action="store", + default="pyloop", + help="run tests with specific loop: pyloop, uvloop or all", + ) + parser.addoption( + "--aiohttp-enable-loop-debug", + action="store_true", + default=False, + help="enable event loop debug mode", + ) + + +def pytest_fixture_setup(fixturedef): # type: ignore[no-untyped-def] + """Set up pytest fixture. + + Allow fixtures to be coroutines. Run coroutine fixtures in an event loop. + """ + func = fixturedef.func + + if inspect.isasyncgenfunction(func): + # async generator fixture + is_async_gen = True + elif inspect.iscoroutinefunction(func): + # regular async fixture + is_async_gen = False + else: + # not an async fixture, nothing to do + return + + strip_request = False + if "request" not in fixturedef.argnames: + fixturedef.argnames += ("request",) + strip_request = True + + def wrapper(*args, **kwargs): # type: ignore[no-untyped-def] + request = kwargs["request"] + if strip_request: + del kwargs["request"] + + # if neither the fixture nor the test use the 'loop' fixture, + # 'getfixturevalue' will fail because the test is not parameterized + # (this can be removed someday if 'loop' is no longer parameterized) + if "loop" not in request.fixturenames: + raise Exception( + "Asynchronous fixtures must depend on the 'loop' fixture or " + "be used in tests depending from it." + ) + + _loop = request.getfixturevalue("loop") + + if is_async_gen: + # for async generators, we need to advance the generator once, + # then advance it again in a finalizer + gen = func(*args, **kwargs) + + def finalizer(): # type: ignore[no-untyped-def] + try: + return _loop.run_until_complete(gen.__anext__()) + except StopAsyncIteration: + pass + + request.addfinalizer(finalizer) + return _loop.run_until_complete(gen.__anext__()) + else: + return _loop.run_until_complete(func(*args, **kwargs)) + + fixturedef.func = wrapper + + +@pytest.fixture +def fast(request): # type: ignore[no-untyped-def] + """--fast config option""" + return request.config.getoption("--aiohttp-fast") + + +@pytest.fixture +def loop_debug(request): # type: ignore[no-untyped-def] + """--enable-loop-debug config option""" + return request.config.getoption("--aiohttp-enable-loop-debug") + + +@contextlib.contextmanager +def _runtime_warning_context(): # type: ignore[no-untyped-def] + """Context manager which checks for RuntimeWarnings. + + This exists specifically to + avoid "coroutine 'X' was never awaited" warnings being missed. + + If RuntimeWarnings occur in the context a RuntimeError is raised. + """ + with warnings.catch_warnings(record=True) as _warnings: + yield + rw = [ + "{w.filename}:{w.lineno}:{w.message}".format(w=w) + for w in _warnings + if w.category == RuntimeWarning + ] + if rw: + raise RuntimeError( + "{} Runtime Warning{},\n{}".format( + len(rw), "" if len(rw) == 1 else "s", "\n".join(rw) + ) + ) + + +@contextlib.contextmanager +def _passthrough_loop_context(loop, fast=False): # type: ignore[no-untyped-def] + """Passthrough loop context. + + Sets up and tears down a loop unless one is passed in via the loop + argument when it's passed straight through. + """ + if loop: + # loop already exists, pass it straight through + yield loop + else: + # this shadows loop_context's standard behavior + loop = setup_test_loop() + yield loop + teardown_test_loop(loop, fast=fast) + + +def pytest_pycollect_makeitem(collector, name, obj): # type: ignore[no-untyped-def] + """Fix pytest collecting for coroutines.""" + if collector.funcnamefilter(name) and inspect.iscoroutinefunction(obj): + return list(collector._genfunctions(name, obj)) + + +def pytest_pyfunc_call(pyfuncitem): # type: ignore[no-untyped-def] + """Run coroutines in an event loop instead of a normal function call.""" + fast = pyfuncitem.config.getoption("--aiohttp-fast") + if inspect.iscoroutinefunction(pyfuncitem.function): + existing_loop = ( + pyfuncitem.funcargs.get("proactor_loop") + or pyfuncitem.funcargs.get("selector_loop") + or pyfuncitem.funcargs.get("uvloop_loop") + or pyfuncitem.funcargs.get("loop", None) + ) + + with _runtime_warning_context(): + with _passthrough_loop_context(existing_loop, fast=fast) as _loop: + testargs = { + arg: pyfuncitem.funcargs[arg] + for arg in pyfuncitem._fixtureinfo.argnames + } + _loop.run_until_complete(pyfuncitem.obj(**testargs)) + + return True + + +def pytest_generate_tests(metafunc): # type: ignore[no-untyped-def] + if "loop_factory" not in metafunc.fixturenames: + return + + loops = metafunc.config.option.aiohttp_loop + avail_factories: dict[str, Callable[[], asyncio.AbstractEventLoop]] + avail_factories = {"pyloop": asyncio.new_event_loop} + + if uvloop is not None: # pragma: no cover + avail_factories["uvloop"] = uvloop.new_event_loop + + if loops == "all": + loops = "pyloop,uvloop?" + + factories = {} # type: ignore[var-annotated] + for name in loops.split(","): + required = not name.endswith("?") + name = name.strip(" ?") + if name not in avail_factories: # pragma: no cover + if required: + raise ValueError( + "Unknown loop '%s', available loops: %s" + % (name, list(factories.keys())) + ) + else: + continue + factories[name] = avail_factories[name] + metafunc.parametrize( + "loop_factory", list(factories.values()), ids=list(factories.keys()) + ) + + +@pytest.fixture +def loop( + loop_factory: Callable[[], asyncio.AbstractEventLoop], + fast: bool, + loop_debug: bool, +) -> Iterator[asyncio.AbstractEventLoop]: + """Return an instance of the event loop.""" + with loop_context(loop_factory, fast=fast) as _loop: + if loop_debug: + _loop.set_debug(True) # pragma: no cover + asyncio.set_event_loop(_loop) + yield _loop + + +@pytest.fixture +def proactor_loop() -> Iterator[asyncio.AbstractEventLoop]: + factory = asyncio.ProactorEventLoop # type: ignore[attr-defined] + + with loop_context(factory) as _loop: + asyncio.set_event_loop(_loop) + yield _loop + + +@pytest.fixture +def unused_port(aiohttp_unused_port: Callable[[], int]) -> Callable[[], int]: + warnings.warn( + "Deprecated, use aiohttp_unused_port fixture instead", + DeprecationWarning, + stacklevel=2, + ) + return aiohttp_unused_port + + +@pytest.fixture +def aiohttp_unused_port() -> Callable[[], int]: + """Return a port that is unused on the current host.""" + return _unused_port + + +@pytest.fixture +def aiohttp_server(loop: asyncio.AbstractEventLoop) -> Iterator[AiohttpServer]: + """Factory to create a TestServer instance, given an app. + + aiohttp_server(app, **kwargs) + """ + servers = [] + + async def go( + app: Application, + *, + host: str = "127.0.0.1", + port: Optional[int] = None, + **kwargs: Any, + ) -> TestServer: + server = TestServer(app, host=host, port=port) + await server.start_server(loop=loop, **kwargs) + servers.append(server) + return server + + yield go + + async def finalize() -> None: + while servers: + await servers.pop().close() + + loop.run_until_complete(finalize()) + + +@pytest.fixture +def test_server(aiohttp_server): # type: ignore[no-untyped-def] # pragma: no cover + warnings.warn( + "Deprecated, use aiohttp_server fixture instead", + DeprecationWarning, + stacklevel=2, + ) + return aiohttp_server + + +@pytest.fixture +def aiohttp_raw_server(loop: asyncio.AbstractEventLoop) -> Iterator[AiohttpRawServer]: + """Factory to create a RawTestServer instance, given a web handler. + + aiohttp_raw_server(handler, **kwargs) + """ + servers = [] + + async def go( + handler: _RequestHandler, *, port: Optional[int] = None, **kwargs: Any + ) -> RawTestServer: + server = RawTestServer(handler, port=port) + await server.start_server(loop=loop, **kwargs) + servers.append(server) + return server + + yield go + + async def finalize() -> None: + while servers: + await servers.pop().close() + + loop.run_until_complete(finalize()) + + +@pytest.fixture +def raw_test_server( # type: ignore[no-untyped-def] # pragma: no cover + aiohttp_raw_server, +): + warnings.warn( + "Deprecated, use aiohttp_raw_server fixture instead", + DeprecationWarning, + stacklevel=2, + ) + return aiohttp_raw_server + + +@pytest.fixture +def aiohttp_client(loop: asyncio.AbstractEventLoop) -> Iterator[AiohttpClient]: + """Factory to create a TestClient instance. + + aiohttp_client(app, **kwargs) + aiohttp_client(server, **kwargs) + aiohttp_client(raw_server, **kwargs) + """ + clients = [] + + @overload + async def go( + __param: Application, + *, + server_kwargs: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> TestClient[Request, Application]: ... + + @overload + async def go( + __param: BaseTestServer, + *, + server_kwargs: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> TestClient[BaseRequest, None]: ... + + async def go( + __param: Union[Application, BaseTestServer], + *args: Any, + server_kwargs: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> TestClient[Any, Any]: + if isinstance(__param, Callable) and not isinstance( # type: ignore[arg-type] + __param, (Application, BaseTestServer) + ): + __param = __param(loop, *args, **kwargs) + kwargs = {} + else: + assert not args, "args should be empty" + + if isinstance(__param, Application): + server_kwargs = server_kwargs or {} + server = TestServer(__param, loop=loop, **server_kwargs) + client = TestClient(server, loop=loop, **kwargs) + elif isinstance(__param, BaseTestServer): + client = TestClient(__param, loop=loop, **kwargs) + else: + raise ValueError("Unknown argument type: %r" % type(__param)) + + await client.start_server() + clients.append(client) + return client + + yield go + + async def finalize() -> None: + while clients: + await clients.pop().close() + + loop.run_until_complete(finalize()) + + +@pytest.fixture +def test_client(aiohttp_client): # type: ignore[no-untyped-def] # pragma: no cover + warnings.warn( + "Deprecated, use aiohttp_client fixture instead", + DeprecationWarning, + stacklevel=2, + ) + return aiohttp_client diff --git a/py311/lib/python3.11/site-packages/aiohttp/resolver.py b/py311/lib/python3.11/site-packages/aiohttp/resolver.py new file mode 100644 index 0000000000000000000000000000000000000000..b20e5672ce51a1aadd7768301a3a1c8eb6007bf4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/resolver.py @@ -0,0 +1,274 @@ +import asyncio +import socket +import weakref +from typing import Any, Dict, Final, List, Optional, Tuple, Type, Union + +from .abc import AbstractResolver, ResolveResult + +__all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver") + + +try: + import aiodns + + aiodns_default = hasattr(aiodns.DNSResolver, "getaddrinfo") +except ImportError: # pragma: no cover + aiodns = None # type: ignore[assignment] + aiodns_default = False + + +_NUMERIC_SOCKET_FLAGS = socket.AI_NUMERICHOST | socket.AI_NUMERICSERV +_NAME_SOCKET_FLAGS = socket.NI_NUMERICHOST | socket.NI_NUMERICSERV +_AI_ADDRCONFIG = socket.AI_ADDRCONFIG +if hasattr(socket, "AI_MASK"): + _AI_ADDRCONFIG &= socket.AI_MASK + + +class ThreadedResolver(AbstractResolver): + """Threaded resolver. + + Uses an Executor for synchronous getaddrinfo() calls. + concurrent.futures.ThreadPoolExecutor is used by default. + """ + + def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: + self._loop = loop or asyncio.get_running_loop() + + async def resolve( + self, host: str, port: int = 0, family: socket.AddressFamily = socket.AF_INET + ) -> List[ResolveResult]: + infos = await self._loop.getaddrinfo( + host, + port, + type=socket.SOCK_STREAM, + family=family, + flags=_AI_ADDRCONFIG, + ) + + hosts: List[ResolveResult] = [] + for family, _, proto, _, address in infos: + if family == socket.AF_INET6: + if len(address) < 3: + # IPv6 is not supported by Python build, + # or IPv6 is not enabled in the host + continue + if address[3]: + # This is essential for link-local IPv6 addresses. + # LL IPv6 is a VERY rare case. Strictly speaking, we should use + # getnameinfo() unconditionally, but performance makes sense. + resolved_host, _port = await self._loop.getnameinfo( + address, _NAME_SOCKET_FLAGS + ) + port = int(_port) + else: + resolved_host, port = address[:2] + else: # IPv4 + assert family == socket.AF_INET + resolved_host, port = address # type: ignore[misc] + hosts.append( + ResolveResult( + hostname=host, + host=resolved_host, + port=port, + family=family, + proto=proto, + flags=_NUMERIC_SOCKET_FLAGS, + ) + ) + + return hosts + + async def close(self) -> None: + pass + + +class AsyncResolver(AbstractResolver): + """Use the `aiodns` package to make asynchronous DNS lookups""" + + def __init__( + self, + loop: Optional[asyncio.AbstractEventLoop] = None, + *args: Any, + **kwargs: Any, + ) -> None: + if aiodns is None: + raise RuntimeError("Resolver requires aiodns library") + + self._loop = loop or asyncio.get_running_loop() + self._manager: Optional[_DNSResolverManager] = None + # If custom args are provided, create a dedicated resolver instance + # This means each AsyncResolver with custom args gets its own + # aiodns.DNSResolver instance + if args or kwargs: + self._resolver = aiodns.DNSResolver(*args, **kwargs) + return + # Use the shared resolver from the manager for default arguments + self._manager = _DNSResolverManager() + self._resolver = self._manager.get_resolver(self, self._loop) + + if not hasattr(self._resolver, "gethostbyname"): + # aiodns 1.1 is not available, fallback to DNSResolver.query + self.resolve = self._resolve_with_query # type: ignore + + async def resolve( + self, host: str, port: int = 0, family: socket.AddressFamily = socket.AF_INET + ) -> List[ResolveResult]: + try: + resp = await self._resolver.getaddrinfo( + host, + port=port, + type=socket.SOCK_STREAM, + family=family, + flags=_AI_ADDRCONFIG, + ) + except aiodns.error.DNSError as exc: + msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed" + raise OSError(None, msg) from exc + hosts: List[ResolveResult] = [] + for node in resp.nodes: + address: Union[Tuple[bytes, int], Tuple[bytes, int, int, int]] = node.addr + family = node.family + if family == socket.AF_INET6: + if len(address) > 3 and address[3]: + # This is essential for link-local IPv6 addresses. + # LL IPv6 is a VERY rare case. Strictly speaking, we should use + # getnameinfo() unconditionally, but performance makes sense. + result = await self._resolver.getnameinfo( + (address[0].decode("ascii"), *address[1:]), + _NAME_SOCKET_FLAGS, + ) + resolved_host = result.node + else: + resolved_host = address[0].decode("ascii") + port = address[1] + else: # IPv4 + assert family == socket.AF_INET + resolved_host = address[0].decode("ascii") + port = address[1] + hosts.append( + ResolveResult( + hostname=host, + host=resolved_host, + port=port, + family=family, + proto=0, + flags=_NUMERIC_SOCKET_FLAGS, + ) + ) + + if not hosts: + raise OSError(None, "DNS lookup failed") + + return hosts + + async def _resolve_with_query( + self, host: str, port: int = 0, family: int = socket.AF_INET + ) -> List[Dict[str, Any]]: + qtype: Final = "AAAA" if family == socket.AF_INET6 else "A" + + try: + resp = await self._resolver.query(host, qtype) + except aiodns.error.DNSError as exc: + msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed" + raise OSError(None, msg) from exc + + hosts = [] + for rr in resp: + hosts.append( + { + "hostname": host, + "host": rr.host, + "port": port, + "family": family, + "proto": 0, + "flags": socket.AI_NUMERICHOST, + } + ) + + if not hosts: + raise OSError(None, "DNS lookup failed") + + return hosts + + async def close(self) -> None: + if self._manager: + # Release the resolver from the manager if using the shared resolver + self._manager.release_resolver(self, self._loop) + self._manager = None # Clear reference to manager + self._resolver = None # type: ignore[assignment] # Clear reference to resolver + return + # Otherwise cancel our dedicated resolver + if self._resolver is not None: + self._resolver.cancel() + self._resolver = None # type: ignore[assignment] # Clear reference + + +class _DNSResolverManager: + """Manager for aiodns.DNSResolver objects. + + This class manages shared aiodns.DNSResolver instances + with no custom arguments across different event loops. + """ + + _instance: Optional["_DNSResolverManager"] = None + + def __new__(cls) -> "_DNSResolverManager": + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._instance._init() + return cls._instance + + def _init(self) -> None: + # Use WeakKeyDictionary to allow event loops to be garbage collected + self._loop_data: weakref.WeakKeyDictionary[ + asyncio.AbstractEventLoop, + tuple["aiodns.DNSResolver", weakref.WeakSet["AsyncResolver"]], + ] = weakref.WeakKeyDictionary() + + def get_resolver( + self, client: "AsyncResolver", loop: asyncio.AbstractEventLoop + ) -> "aiodns.DNSResolver": + """Get or create the shared aiodns.DNSResolver instance for a specific event loop. + + Args: + client: The AsyncResolver instance requesting the resolver. + This is required to track resolver usage. + loop: The event loop to use for the resolver. + """ + # Create a new resolver and client set for this loop if it doesn't exist + if loop not in self._loop_data: + resolver = aiodns.DNSResolver(loop=loop) + client_set: weakref.WeakSet["AsyncResolver"] = weakref.WeakSet() + self._loop_data[loop] = (resolver, client_set) + else: + # Get the existing resolver and client set + resolver, client_set = self._loop_data[loop] + + # Register this client with the loop + client_set.add(client) + return resolver + + def release_resolver( + self, client: "AsyncResolver", loop: asyncio.AbstractEventLoop + ) -> None: + """Release the resolver for an AsyncResolver client when it's closed. + + Args: + client: The AsyncResolver instance to release. + loop: The event loop the resolver was using. + """ + # Remove client from its loop's tracking + current_loop_data = self._loop_data.get(loop) + if current_loop_data is None: + return + resolver, client_set = current_loop_data + client_set.discard(client) + # If no more clients for this loop, cancel and remove its resolver + if not client_set: + if resolver is not None: + resolver.cancel() + del self._loop_data[loop] + + +_DefaultType = Type[Union[AsyncResolver, ThreadedResolver]] +DefaultResolver: _DefaultType = AsyncResolver if aiodns_default else ThreadedResolver diff --git a/py311/lib/python3.11/site-packages/aiohttp/streams.py b/py311/lib/python3.11/site-packages/aiohttp/streams.py new file mode 100644 index 0000000000000000000000000000000000000000..6cc74fc9cbd5bcbc7e213b79cadd8d42b701a812 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/streams.py @@ -0,0 +1,758 @@ +import asyncio +import collections +import warnings +from typing import ( + Awaitable, + Callable, + Deque, + Final, + Generic, + List, + Optional, + Tuple, + TypeVar, +) + +from .base_protocol import BaseProtocol +from .helpers import ( + _EXC_SENTINEL, + BaseTimerContext, + TimerNoop, + set_exception, + set_result, +) +from .log import internal_logger + +__all__ = ( + "EMPTY_PAYLOAD", + "EofStream", + "StreamReader", + "DataQueue", +) + +_T = TypeVar("_T") + + +class EofStream(Exception): + """eof stream indication.""" + + +class AsyncStreamIterator(Generic[_T]): + + __slots__ = ("read_func",) + + def __init__(self, read_func: Callable[[], Awaitable[_T]]) -> None: + self.read_func = read_func + + def __aiter__(self) -> "AsyncStreamIterator[_T]": + return self + + async def __anext__(self) -> _T: + try: + rv = await self.read_func() + except EofStream: + raise StopAsyncIteration + if rv == b"": + raise StopAsyncIteration + return rv + + +class ChunkTupleAsyncStreamIterator: + + __slots__ = ("_stream",) + + def __init__(self, stream: "StreamReader") -> None: + self._stream = stream + + def __aiter__(self) -> "ChunkTupleAsyncStreamIterator": + return self + + async def __anext__(self) -> Tuple[bytes, bool]: + rv = await self._stream.readchunk() + if rv == (b"", False): + raise StopAsyncIteration + return rv + + +class AsyncStreamReaderMixin: + + __slots__ = () + + def __aiter__(self) -> AsyncStreamIterator[bytes]: + return AsyncStreamIterator(self.readline) # type: ignore[attr-defined] + + def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]: + """Returns an asynchronous iterator that yields chunks of size n.""" + return AsyncStreamIterator(lambda: self.read(n)) # type: ignore[attr-defined] + + def iter_any(self) -> AsyncStreamIterator[bytes]: + """Yield all available data as soon as it is received.""" + return AsyncStreamIterator(self.readany) # type: ignore[attr-defined] + + def iter_chunks(self) -> ChunkTupleAsyncStreamIterator: + """Yield chunks of data as they are received by the server. + + The yielded objects are tuples + of (bytes, bool) as returned by the StreamReader.readchunk method. + """ + return ChunkTupleAsyncStreamIterator(self) # type: ignore[arg-type] + + +class StreamReader(AsyncStreamReaderMixin): + """An enhancement of asyncio.StreamReader. + + Supports asynchronous iteration by line, chunk or as available:: + + async for line in reader: + ... + async for chunk in reader.iter_chunked(1024): + ... + async for slice in reader.iter_any(): + ... + + """ + + __slots__ = ( + "_protocol", + "_low_water", + "_high_water", + "_low_water_chunks", + "_high_water_chunks", + "_loop", + "_size", + "_cursor", + "_http_chunk_splits", + "_buffer", + "_buffer_offset", + "_eof", + "_waiter", + "_eof_waiter", + "_exception", + "_timer", + "_eof_callbacks", + "_eof_counter", + "total_bytes", + "total_compressed_bytes", + ) + + def __init__( + self, + protocol: BaseProtocol, + limit: int, + *, + timer: Optional[BaseTimerContext] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> None: + self._protocol = protocol + self._low_water = limit + self._high_water = limit * 2 + if loop is None: + loop = asyncio.get_event_loop() + # Ensure high_water_chunks >= 3 so it's always > low_water_chunks. + self._high_water_chunks = max(3, limit // 4) + # Use max(2, ...) because there's always at least 1 chunk split remaining + # (the current position), so we need low_water >= 2 to allow resume. + self._low_water_chunks = max(2, self._high_water_chunks // 2) + self._loop = loop + self._size = 0 + self._cursor = 0 + self._http_chunk_splits: Optional[Deque[int]] = None + self._buffer: Deque[bytes] = collections.deque() + self._buffer_offset = 0 + self._eof = False + self._waiter: Optional[asyncio.Future[None]] = None + self._eof_waiter: Optional[asyncio.Future[None]] = None + self._exception: Optional[BaseException] = None + self._timer = TimerNoop() if timer is None else timer + self._eof_callbacks: List[Callable[[], None]] = [] + self._eof_counter = 0 + self.total_bytes = 0 + self.total_compressed_bytes: Optional[int] = None + + def __repr__(self) -> str: + info = [self.__class__.__name__] + if self._size: + info.append("%d bytes" % self._size) + if self._eof: + info.append("eof") + if self._low_water != 2**16: # default limit + info.append("low=%d high=%d" % (self._low_water, self._high_water)) + if self._waiter: + info.append("w=%r" % self._waiter) + if self._exception: + info.append("e=%r" % self._exception) + return "<%s>" % " ".join(info) + + def get_read_buffer_limits(self) -> Tuple[int, int]: + return (self._low_water, self._high_water) + + def exception(self) -> Optional[BaseException]: + return self._exception + + def set_exception( + self, + exc: BaseException, + exc_cause: BaseException = _EXC_SENTINEL, + ) -> None: + self._exception = exc + self._eof_callbacks.clear() + + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_exception(waiter, exc, exc_cause) + + waiter = self._eof_waiter + if waiter is not None: + self._eof_waiter = None + set_exception(waiter, exc, exc_cause) + + def on_eof(self, callback: Callable[[], None]) -> None: + if self._eof: + try: + callback() + except Exception: + internal_logger.exception("Exception in eof callback") + else: + self._eof_callbacks.append(callback) + + def feed_eof(self) -> None: + self._eof = True + + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_result(waiter, None) + + waiter = self._eof_waiter + if waiter is not None: + self._eof_waiter = None + set_result(waiter, None) + + if self._protocol._reading_paused: + self._protocol.resume_reading() + + for cb in self._eof_callbacks: + try: + cb() + except Exception: + internal_logger.exception("Exception in eof callback") + + self._eof_callbacks.clear() + + def is_eof(self) -> bool: + """Return True if 'feed_eof' was called.""" + return self._eof + + def at_eof(self) -> bool: + """Return True if the buffer is empty and 'feed_eof' was called.""" + return self._eof and not self._buffer + + async def wait_eof(self) -> None: + if self._eof: + return + + assert self._eof_waiter is None + self._eof_waiter = self._loop.create_future() + try: + await self._eof_waiter + finally: + self._eof_waiter = None + + @property + def total_raw_bytes(self) -> int: + if self.total_compressed_bytes is None: + return self.total_bytes + return self.total_compressed_bytes + + def unread_data(self, data: bytes) -> None: + """rollback reading some data from stream, inserting it to buffer head.""" + warnings.warn( + "unread_data() is deprecated " + "and will be removed in future releases (#3260)", + DeprecationWarning, + stacklevel=2, + ) + if not data: + return + + if self._buffer_offset: + self._buffer[0] = self._buffer[0][self._buffer_offset :] + self._buffer_offset = 0 + self._size += len(data) + self._cursor -= len(data) + self._buffer.appendleft(data) + self._eof_counter = 0 + + # TODO: size is ignored, remove the param later + def feed_data(self, data: bytes, size: int = 0) -> None: + assert not self._eof, "feed_data after feed_eof" + + if not data: + return + + data_len = len(data) + self._size += data_len + self._buffer.append(data) + self.total_bytes += data_len + + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_result(waiter, None) + + if self._size > self._high_water and not self._protocol._reading_paused: + self._protocol.pause_reading() + + def begin_http_chunk_receiving(self) -> None: + if self._http_chunk_splits is None: + if self.total_bytes: + raise RuntimeError( + "Called begin_http_chunk_receiving when some data was already fed" + ) + self._http_chunk_splits = collections.deque() + + def end_http_chunk_receiving(self) -> None: + if self._http_chunk_splits is None: + raise RuntimeError( + "Called end_chunk_receiving without calling " + "begin_chunk_receiving first" + ) + + # self._http_chunk_splits contains logical byte offsets from start of + # the body transfer. Each offset is the offset of the end of a chunk. + # "Logical" means bytes, accessible for a user. + # If no chunks containing logical data were received, current position + # is difinitely zero. + pos = self._http_chunk_splits[-1] if self._http_chunk_splits else 0 + + if self.total_bytes == pos: + # We should not add empty chunks here. So we check for that. + # Note, when chunked + gzip is used, we can receive a chunk + # of compressed data, but that data may not be enough for gzip FSM + # to yield any uncompressed data. That's why current position may + # not change after receiving a chunk. + return + + self._http_chunk_splits.append(self.total_bytes) + + # If we get too many small chunks before self._high_water is reached, then any + # .read() call becomes computationally expensive, and could block the event loop + # for too long, hence an additional self._high_water_chunks here. + if ( + len(self._http_chunk_splits) > self._high_water_chunks + and not self._protocol._reading_paused + ): + self._protocol.pause_reading() + + # wake up readchunk when end of http chunk received + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_result(waiter, None) + + async def _wait(self, func_name: str) -> None: + if not self._protocol.connected: + raise RuntimeError("Connection closed.") + + # StreamReader uses a future to link the protocol feed_data() method + # to a read coroutine. Running two read coroutines at the same time + # would have an unexpected behaviour. It would not possible to know + # which coroutine would get the next data. + if self._waiter is not None: + raise RuntimeError( + "%s() called while another coroutine is " + "already waiting for incoming data" % func_name + ) + + waiter = self._waiter = self._loop.create_future() + try: + with self._timer: + await waiter + finally: + self._waiter = None + + async def readline(self) -> bytes: + return await self.readuntil() + + async def readuntil(self, separator: bytes = b"\n") -> bytes: + seplen = len(separator) + if seplen == 0: + raise ValueError("Separator should be at least one-byte string") + + if self._exception is not None: + raise self._exception + + chunk = b"" + chunk_size = 0 + not_enough = True + + while not_enough: + while self._buffer and not_enough: + offset = self._buffer_offset + ichar = self._buffer[0].find(separator, offset) + 1 + # Read from current offset to found separator or to the end. + data = self._read_nowait_chunk( + ichar - offset + seplen - 1 if ichar else -1 + ) + chunk += data + chunk_size += len(data) + if ichar: + not_enough = False + + if chunk_size > self._high_water: + raise ValueError("Chunk too big") + + if self._eof: + break + + if not_enough: + await self._wait("readuntil") + + return chunk + + async def read(self, n: int = -1) -> bytes: + if self._exception is not None: + raise self._exception + + # migration problem; with DataQueue you have to catch + # EofStream exception, so common way is to run payload.read() inside + # infinite loop. what can cause real infinite loop with StreamReader + # lets keep this code one major release. + if __debug__: + if self._eof and not self._buffer: + self._eof_counter = getattr(self, "_eof_counter", 0) + 1 + if self._eof_counter > 5: + internal_logger.warning( + "Multiple access to StreamReader in eof state, " + "might be infinite loop.", + stack_info=True, + ) + + if not n: + return b"" + + if n < 0: + # This used to just loop creating a new waiter hoping to + # collect everything in self._buffer, but that would + # deadlock if the subprocess sends more than self.limit + # bytes. So just call self.readany() until EOF. + blocks = [] + while True: + block = await self.readany() + if not block: + break + blocks.append(block) + return b"".join(blocks) + + # TODO: should be `if` instead of `while` + # because waiter maybe triggered on chunk end, + # without feeding any data + while not self._buffer and not self._eof: + await self._wait("read") + + return self._read_nowait(n) + + async def readany(self) -> bytes: + if self._exception is not None: + raise self._exception + + # TODO: should be `if` instead of `while` + # because waiter maybe triggered on chunk end, + # without feeding any data + while not self._buffer and not self._eof: + await self._wait("readany") + + return self._read_nowait(-1) + + async def readchunk(self) -> Tuple[bytes, bool]: + """Returns a tuple of (data, end_of_http_chunk). + + When chunked transfer + encoding is used, end_of_http_chunk is a boolean indicating if the end + of the data corresponds to the end of a HTTP chunk , otherwise it is + always False. + """ + while True: + if self._exception is not None: + raise self._exception + + while self._http_chunk_splits: + pos = self._http_chunk_splits.popleft() + if pos == self._cursor: + return (b"", True) + if pos > self._cursor: + return (self._read_nowait(pos - self._cursor), True) + internal_logger.warning( + "Skipping HTTP chunk end due to data " + "consumption beyond chunk boundary" + ) + + if self._buffer: + return (self._read_nowait_chunk(-1), False) + # return (self._read_nowait(-1), False) + + if self._eof: + # Special case for signifying EOF. + # (b'', True) is not a final return value actually. + return (b"", False) + + await self._wait("readchunk") + + async def readexactly(self, n: int) -> bytes: + if self._exception is not None: + raise self._exception + + blocks: List[bytes] = [] + while n > 0: + block = await self.read(n) + if not block: + partial = b"".join(blocks) + raise asyncio.IncompleteReadError(partial, len(partial) + n) + blocks.append(block) + n -= len(block) + + return b"".join(blocks) + + def read_nowait(self, n: int = -1) -> bytes: + # default was changed to be consistent with .read(-1) + # + # I believe the most users don't know about the method and + # they are not affected. + if self._exception is not None: + raise self._exception + + if self._waiter and not self._waiter.done(): + raise RuntimeError( + "Called while some coroutine is waiting for incoming data." + ) + + return self._read_nowait(n) + + def _read_nowait_chunk(self, n: int) -> bytes: + first_buffer = self._buffer[0] + offset = self._buffer_offset + if n != -1 and len(first_buffer) - offset > n: + data = first_buffer[offset : offset + n] + self._buffer_offset += n + + elif offset: + self._buffer.popleft() + data = first_buffer[offset:] + self._buffer_offset = 0 + + else: + data = self._buffer.popleft() + + data_len = len(data) + self._size -= data_len + self._cursor += data_len + + chunk_splits = self._http_chunk_splits + # Prevent memory leak: drop useless chunk splits + while chunk_splits and chunk_splits[0] < self._cursor: + chunk_splits.popleft() + + if ( + self._protocol._reading_paused + and self._size < self._low_water + and ( + self._http_chunk_splits is None + or len(self._http_chunk_splits) < self._low_water_chunks + ) + ): + self._protocol.resume_reading() + return data + + def _read_nowait(self, n: int) -> bytes: + """Read not more than n bytes, or whole buffer if n == -1""" + self._timer.assert_timeout() + + chunks = [] + while self._buffer: + chunk = self._read_nowait_chunk(n) + chunks.append(chunk) + if n != -1: + n -= len(chunk) + if n == 0: + break + + return b"".join(chunks) if chunks else b"" + + +class EmptyStreamReader(StreamReader): # lgtm [py/missing-call-to-init] + + __slots__ = ("_read_eof_chunk",) + + def __init__(self) -> None: + self._read_eof_chunk = False + self.total_bytes = 0 + + def __repr__(self) -> str: + return "<%s>" % self.__class__.__name__ + + def exception(self) -> Optional[BaseException]: + return None + + def set_exception( + self, + exc: BaseException, + exc_cause: BaseException = _EXC_SENTINEL, + ) -> None: + pass + + def on_eof(self, callback: Callable[[], None]) -> None: + try: + callback() + except Exception: + internal_logger.exception("Exception in eof callback") + + def feed_eof(self) -> None: + pass + + def is_eof(self) -> bool: + return True + + def at_eof(self) -> bool: + return True + + async def wait_eof(self) -> None: + return + + def feed_data(self, data: bytes, n: int = 0) -> None: + pass + + async def readline(self) -> bytes: + return b"" + + async def read(self, n: int = -1) -> bytes: + return b"" + + # TODO add async def readuntil + + async def readany(self) -> bytes: + return b"" + + async def readchunk(self) -> Tuple[bytes, bool]: + if not self._read_eof_chunk: + self._read_eof_chunk = True + return (b"", False) + + return (b"", True) + + async def readexactly(self, n: int) -> bytes: + raise asyncio.IncompleteReadError(b"", n) + + def read_nowait(self, n: int = -1) -> bytes: + return b"" + + +EMPTY_PAYLOAD: Final[StreamReader] = EmptyStreamReader() + + +class DataQueue(Generic[_T]): + """DataQueue is a general-purpose blocking queue with one reader.""" + + def __init__(self, loop: asyncio.AbstractEventLoop) -> None: + self._loop = loop + self._eof = False + self._waiter: Optional[asyncio.Future[None]] = None + self._exception: Optional[BaseException] = None + self._buffer: Deque[Tuple[_T, int]] = collections.deque() + + def __len__(self) -> int: + return len(self._buffer) + + def is_eof(self) -> bool: + return self._eof + + def at_eof(self) -> bool: + return self._eof and not self._buffer + + def exception(self) -> Optional[BaseException]: + return self._exception + + def set_exception( + self, + exc: BaseException, + exc_cause: BaseException = _EXC_SENTINEL, + ) -> None: + self._eof = True + self._exception = exc + if (waiter := self._waiter) is not None: + self._waiter = None + set_exception(waiter, exc, exc_cause) + + def feed_data(self, data: _T, size: int = 0) -> None: + self._buffer.append((data, size)) + if (waiter := self._waiter) is not None: + self._waiter = None + set_result(waiter, None) + + def feed_eof(self) -> None: + self._eof = True + if (waiter := self._waiter) is not None: + self._waiter = None + set_result(waiter, None) + + async def read(self) -> _T: + if not self._buffer and not self._eof: + assert not self._waiter + self._waiter = self._loop.create_future() + try: + await self._waiter + except (asyncio.CancelledError, asyncio.TimeoutError): + self._waiter = None + raise + if self._buffer: + data, _ = self._buffer.popleft() + return data + if self._exception is not None: + raise self._exception + raise EofStream + + def __aiter__(self) -> AsyncStreamIterator[_T]: + return AsyncStreamIterator(self.read) + + +class FlowControlDataQueue(DataQueue[_T]): + """FlowControlDataQueue resumes and pauses an underlying stream. + + It is a destination for parsed data. + + This class is deprecated and will be removed in version 4.0. + """ + + def __init__( + self, protocol: BaseProtocol, limit: int, *, loop: asyncio.AbstractEventLoop + ) -> None: + super().__init__(loop=loop) + self._size = 0 + self._protocol = protocol + self._limit = limit * 2 + + def feed_data(self, data: _T, size: int = 0) -> None: + super().feed_data(data, size) + self._size += size + + if self._size > self._limit and not self._protocol._reading_paused: + self._protocol.pause_reading() + + async def read(self) -> _T: + if not self._buffer and not self._eof: + assert not self._waiter + self._waiter = self._loop.create_future() + try: + await self._waiter + except (asyncio.CancelledError, asyncio.TimeoutError): + self._waiter = None + raise + if self._buffer: + data, size = self._buffer.popleft() + self._size -= size + if self._size < self._limit and self._protocol._reading_paused: + self._protocol.resume_reading() + return data + if self._exception is not None: + raise self._exception + raise EofStream diff --git a/py311/lib/python3.11/site-packages/aiohttp/tcp_helpers.py b/py311/lib/python3.11/site-packages/aiohttp/tcp_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..88b244223741ad2decb6cb612eae644fae88b2b2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/tcp_helpers.py @@ -0,0 +1,37 @@ +"""Helper methods to tune a TCP connection""" + +import asyncio +import socket +from contextlib import suppress +from typing import Optional # noqa + +__all__ = ("tcp_keepalive", "tcp_nodelay") + + +if hasattr(socket, "SO_KEEPALIVE"): + + def tcp_keepalive(transport: asyncio.Transport) -> None: + sock = transport.get_extra_info("socket") + if sock is not None: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + +else: + + def tcp_keepalive(transport: asyncio.Transport) -> None: # pragma: no cover + pass + + +def tcp_nodelay(transport: asyncio.Transport, value: bool) -> None: + sock = transport.get_extra_info("socket") + + if sock is None: + return + + if sock.family not in (socket.AF_INET, socket.AF_INET6): + return + + value = bool(value) + + # socket may be closed already, on windows OSError get raised + with suppress(OSError): + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, value) diff --git a/py311/lib/python3.11/site-packages/aiohttp/test_utils.py b/py311/lib/python3.11/site-packages/aiohttp/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..87c31427867f90244c11c8440a5e1f47fc5a079f --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/test_utils.py @@ -0,0 +1,774 @@ +"""Utilities shared by tests.""" + +import asyncio +import contextlib +import gc +import inspect +import ipaddress +import os +import socket +import sys +import warnings +from abc import ABC, abstractmethod +from types import TracebackType +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + Iterator, + List, + Optional, + Type, + TypeVar, + cast, + overload, +) +from unittest import IsolatedAsyncioTestCase, mock + +from aiosignal import Signal +from multidict import CIMultiDict, CIMultiDictProxy +from yarl import URL + +import aiohttp +from aiohttp.client import ( + _RequestContextManager, + _RequestOptions, + _WSRequestContextManager, +) + +from . import ClientSession, hdrs +from .abc import AbstractCookieJar +from .client_reqrep import ClientResponse +from .client_ws import ClientWebSocketResponse +from .helpers import sentinel +from .http import HttpVersion, RawRequestMessage +from .streams import EMPTY_PAYLOAD, StreamReader +from .typedefs import StrOrURL +from .web import ( + Application, + AppRunner, + BaseRequest, + BaseRunner, + Request, + Server, + ServerRunner, + SockSite, + UrlMappingMatchInfo, +) +from .web_protocol import _RequestHandler + +if TYPE_CHECKING: + from ssl import SSLContext +else: + SSLContext = None + +if sys.version_info >= (3, 11) and TYPE_CHECKING: + from typing import Unpack + +if sys.version_info >= (3, 11): + from typing import Self +else: + Self = Any + +_ApplicationNone = TypeVar("_ApplicationNone", Application, None) +_Request = TypeVar("_Request", bound=BaseRequest) + +REUSE_ADDRESS = os.name == "posix" and sys.platform != "cygwin" + + +def get_unused_port_socket( + host: str, family: socket.AddressFamily = socket.AF_INET +) -> socket.socket: + return get_port_socket(host, 0, family) + + +def get_port_socket( + host: str, port: int, family: socket.AddressFamily +) -> socket.socket: + s = socket.socket(family, socket.SOCK_STREAM) + if REUSE_ADDRESS: + # Windows has different semantics for SO_REUSEADDR, + # so don't set it. Ref: + # https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + s.bind((host, port)) + return s + + +def unused_port() -> int: + """Return a port that is unused on the current host.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("127.0.0.1", 0)) + return cast(int, s.getsockname()[1]) + + +class BaseTestServer(ABC): + __test__ = False + + def __init__( + self, + *, + scheme: str = "", + loop: Optional[asyncio.AbstractEventLoop] = None, + host: str = "127.0.0.1", + port: Optional[int] = None, + skip_url_asserts: bool = False, + socket_factory: Callable[ + [str, int, socket.AddressFamily], socket.socket + ] = get_port_socket, + **kwargs: Any, + ) -> None: + self._loop = loop + self.runner: Optional[BaseRunner] = None + self._root: Optional[URL] = None + self.host = host + self.port = port + self._closed = False + self.scheme = scheme + self.skip_url_asserts = skip_url_asserts + self.socket_factory = socket_factory + + async def start_server( + self, loop: Optional[asyncio.AbstractEventLoop] = None, **kwargs: Any + ) -> None: + if self.runner: + return + self._loop = loop + self._ssl = kwargs.pop("ssl", None) + self.runner = await self._make_runner(handler_cancellation=True, **kwargs) + await self.runner.setup() + if not self.port: + self.port = 0 + absolute_host = self.host + try: + version = ipaddress.ip_address(self.host).version + except ValueError: + version = 4 + if version == 6: + absolute_host = f"[{self.host}]" + family = socket.AF_INET6 if version == 6 else socket.AF_INET + _sock = self.socket_factory(self.host, self.port, family) + self.host, self.port = _sock.getsockname()[:2] + site = SockSite(self.runner, sock=_sock, ssl_context=self._ssl) + await site.start() + server = site._server + assert server is not None + sockets = server.sockets # type: ignore[attr-defined] + assert sockets is not None + self.port = sockets[0].getsockname()[1] + if not self.scheme: + self.scheme = "https" if self._ssl else "http" + self._root = URL(f"{self.scheme}://{absolute_host}:{self.port}") + + @abstractmethod # pragma: no cover + async def _make_runner(self, **kwargs: Any) -> BaseRunner: + pass + + def make_url(self, path: StrOrURL) -> URL: + assert self._root is not None + url = URL(path) + if not self.skip_url_asserts: + assert not url.absolute + return self._root.join(url) + else: + return URL(str(self._root) + str(path)) + + @property + def started(self) -> bool: + return self.runner is not None + + @property + def closed(self) -> bool: + return self._closed + + @property + def handler(self) -> Server: + # for backward compatibility + # web.Server instance + runner = self.runner + assert runner is not None + assert runner.server is not None + return runner.server + + async def close(self) -> None: + """Close all fixtures created by the test client. + + After that point, the TestClient is no longer usable. + + This is an idempotent function: running close multiple times + will not have any additional effects. + + close is also run when the object is garbage collected, and on + exit when used as a context manager. + + """ + if self.started and not self.closed: + assert self.runner is not None + await self.runner.cleanup() + self._root = None + self.port = None + self._closed = True + + def __enter__(self) -> None: + raise TypeError("Use async with instead") + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + # __exit__ should exist in pair with __enter__ but never executed + pass # pragma: no cover + + async def __aenter__(self) -> "BaseTestServer": + await self.start_server(loop=self._loop) + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + await self.close() + + +class TestServer(BaseTestServer): + def __init__( + self, + app: Application, + *, + scheme: str = "", + host: str = "127.0.0.1", + port: Optional[int] = None, + **kwargs: Any, + ): + self.app = app + super().__init__(scheme=scheme, host=host, port=port, **kwargs) + + async def _make_runner(self, **kwargs: Any) -> BaseRunner: + return AppRunner(self.app, **kwargs) + + +class RawTestServer(BaseTestServer): + def __init__( + self, + handler: _RequestHandler, + *, + scheme: str = "", + host: str = "127.0.0.1", + port: Optional[int] = None, + **kwargs: Any, + ) -> None: + self._handler = handler + super().__init__(scheme=scheme, host=host, port=port, **kwargs) + + async def _make_runner(self, debug: bool = True, **kwargs: Any) -> ServerRunner: + srv = Server(self._handler, loop=self._loop, debug=debug, **kwargs) + return ServerRunner(srv, debug=debug, **kwargs) + + +class TestClient(Generic[_Request, _ApplicationNone]): + """ + A test client implementation. + + To write functional tests for aiohttp based servers. + + """ + + __test__ = False + + @overload + def __init__( + self: "TestClient[Request, Application]", + server: TestServer, + *, + cookie_jar: Optional[AbstractCookieJar] = None, + **kwargs: Any, + ) -> None: ... + @overload + def __init__( + self: "TestClient[_Request, None]", + server: BaseTestServer, + *, + cookie_jar: Optional[AbstractCookieJar] = None, + **kwargs: Any, + ) -> None: ... + def __init__( + self, + server: BaseTestServer, + *, + cookie_jar: Optional[AbstractCookieJar] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + **kwargs: Any, + ) -> None: + if not isinstance(server, BaseTestServer): + raise TypeError( + "server must be TestServer instance, found type: %r" % type(server) + ) + self._server = server + self._loop = loop + if cookie_jar is None: + cookie_jar = aiohttp.CookieJar(unsafe=True, loop=loop) + self._session = ClientSession(loop=loop, cookie_jar=cookie_jar, **kwargs) + self._session._retry_connection = False + self._closed = False + self._responses: List[ClientResponse] = [] + self._websockets: List[ClientWebSocketResponse] = [] + + async def start_server(self) -> None: + await self._server.start_server(loop=self._loop) + + @property + def host(self) -> str: + return self._server.host + + @property + def port(self) -> Optional[int]: + return self._server.port + + @property + def server(self) -> BaseTestServer: + return self._server + + @property + def app(self) -> _ApplicationNone: + return getattr(self._server, "app", None) # type: ignore[return-value] + + @property + def session(self) -> ClientSession: + """An internal aiohttp.ClientSession. + + Unlike the methods on the TestClient, client session requests + do not automatically include the host in the url queried, and + will require an absolute path to the resource. + + """ + return self._session + + def make_url(self, path: StrOrURL) -> URL: + return self._server.make_url(path) + + async def _request( + self, method: str, path: StrOrURL, **kwargs: Any + ) -> ClientResponse: + resp = await self._session.request(method, self.make_url(path), **kwargs) + # save it to close later + self._responses.append(resp) + return resp + + if sys.version_info >= (3, 11) and TYPE_CHECKING: + + def request( + self, method: str, path: StrOrURL, **kwargs: Unpack[_RequestOptions] + ) -> _RequestContextManager: ... + + def get( + self, + path: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> _RequestContextManager: ... + + def options( + self, + path: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> _RequestContextManager: ... + + def head( + self, + path: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> _RequestContextManager: ... + + def post( + self, + path: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> _RequestContextManager: ... + + def put( + self, + path: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> _RequestContextManager: ... + + def patch( + self, + path: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> _RequestContextManager: ... + + def delete( + self, + path: StrOrURL, + **kwargs: Unpack[_RequestOptions], + ) -> _RequestContextManager: ... + + else: + + def request( + self, method: str, path: StrOrURL, **kwargs: Any + ) -> _RequestContextManager: + """Routes a request to tested http server. + + The interface is identical to aiohttp.ClientSession.request, + except the loop kwarg is overridden by the instance used by the + test server. + + """ + return _RequestContextManager(self._request(method, path, **kwargs)) + + def get(self, path: StrOrURL, **kwargs: Any) -> _RequestContextManager: + """Perform an HTTP GET request.""" + return _RequestContextManager(self._request(hdrs.METH_GET, path, **kwargs)) + + def post(self, path: StrOrURL, **kwargs: Any) -> _RequestContextManager: + """Perform an HTTP POST request.""" + return _RequestContextManager(self._request(hdrs.METH_POST, path, **kwargs)) + + def options(self, path: StrOrURL, **kwargs: Any) -> _RequestContextManager: + """Perform an HTTP OPTIONS request.""" + return _RequestContextManager( + self._request(hdrs.METH_OPTIONS, path, **kwargs) + ) + + def head(self, path: StrOrURL, **kwargs: Any) -> _RequestContextManager: + """Perform an HTTP HEAD request.""" + return _RequestContextManager(self._request(hdrs.METH_HEAD, path, **kwargs)) + + def put(self, path: StrOrURL, **kwargs: Any) -> _RequestContextManager: + """Perform an HTTP PUT request.""" + return _RequestContextManager(self._request(hdrs.METH_PUT, path, **kwargs)) + + def patch(self, path: StrOrURL, **kwargs: Any) -> _RequestContextManager: + """Perform an HTTP PATCH request.""" + return _RequestContextManager( + self._request(hdrs.METH_PATCH, path, **kwargs) + ) + + def delete(self, path: StrOrURL, **kwargs: Any) -> _RequestContextManager: + """Perform an HTTP PATCH request.""" + return _RequestContextManager( + self._request(hdrs.METH_DELETE, path, **kwargs) + ) + + def ws_connect(self, path: StrOrURL, **kwargs: Any) -> _WSRequestContextManager: + """Initiate websocket connection. + + The api corresponds to aiohttp.ClientSession.ws_connect. + + """ + return _WSRequestContextManager(self._ws_connect(path, **kwargs)) + + async def _ws_connect( + self, path: StrOrURL, **kwargs: Any + ) -> ClientWebSocketResponse: + ws = await self._session.ws_connect(self.make_url(path), **kwargs) + self._websockets.append(ws) + return ws + + async def close(self) -> None: + """Close all fixtures created by the test client. + + After that point, the TestClient is no longer usable. + + This is an idempotent function: running close multiple times + will not have any additional effects. + + close is also run on exit when used as a(n) (asynchronous) + context manager. + + """ + if not self._closed: + for resp in self._responses: + resp.close() + for ws in self._websockets: + await ws.close() + await self._session.close() + await self._server.close() + self._closed = True + + def __enter__(self) -> None: + raise TypeError("Use async with instead") + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: + # __exit__ should exist in pair with __enter__ but never executed + pass # pragma: no cover + + async def __aenter__(self) -> Self: + await self.start_server() + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: + await self.close() + + +class AioHTTPTestCase(IsolatedAsyncioTestCase): + """A base class to allow for unittest web applications using aiohttp. + + Provides the following: + + * self.client (aiohttp.test_utils.TestClient): an aiohttp test client. + * self.loop (asyncio.BaseEventLoop): the event loop in which the + application and server are running. + * self.app (aiohttp.web.Application): the application returned by + self.get_application() + + Note that the TestClient's methods are asynchronous: you have to + execute function on the test client using asynchronous methods. + """ + + async def get_application(self) -> Application: + """Get application. + + This method should be overridden + to return the aiohttp.web.Application + object to test. + """ + return self.get_app() + + def get_app(self) -> Application: + """Obsolete method used to constructing web application. + + Use .get_application() coroutine instead. + """ + raise RuntimeError("Did you forget to define get_application()?") + + async def asyncSetUp(self) -> None: + self.loop = asyncio.get_running_loop() + return await self.setUpAsync() + + async def setUpAsync(self) -> None: + self.app = await self.get_application() + self.server = await self.get_server(self.app) + self.client = await self.get_client(self.server) + + await self.client.start_server() + + async def asyncTearDown(self) -> None: + return await self.tearDownAsync() + + async def tearDownAsync(self) -> None: + await self.client.close() + + async def get_server(self, app: Application) -> TestServer: + """Return a TestServer instance.""" + return TestServer(app, loop=self.loop) + + async def get_client(self, server: TestServer) -> TestClient[Request, Application]: + """Return a TestClient instance.""" + return TestClient(server, loop=self.loop) + + +def unittest_run_loop(func: Any, *args: Any, **kwargs: Any) -> Any: + """ + A decorator dedicated to use with asynchronous AioHTTPTestCase test methods. + + In 3.8+, this does nothing. + """ + warnings.warn( + "Decorator `@unittest_run_loop` is no longer needed in aiohttp 3.8+", + DeprecationWarning, + stacklevel=2, + ) + return func + + +_LOOP_FACTORY = Callable[[], asyncio.AbstractEventLoop] + + +@contextlib.contextmanager +def loop_context( + loop_factory: _LOOP_FACTORY = asyncio.new_event_loop, fast: bool = False +) -> Iterator[asyncio.AbstractEventLoop]: + """A contextmanager that creates an event_loop, for test purposes. + + Handles the creation and cleanup of a test loop. + """ + loop = setup_test_loop(loop_factory) + yield loop + teardown_test_loop(loop, fast=fast) + + +def setup_test_loop( + loop_factory: _LOOP_FACTORY = asyncio.new_event_loop, +) -> asyncio.AbstractEventLoop: + """Create and return an asyncio.BaseEventLoop instance. + + The caller should also call teardown_test_loop, + once they are done with the loop. + """ + loop = loop_factory() + asyncio.set_event_loop(loop) + return loop + + +def teardown_test_loop(loop: asyncio.AbstractEventLoop, fast: bool = False) -> None: + """Teardown and cleanup an event_loop created by setup_test_loop.""" + closed = loop.is_closed() + if not closed: + loop.call_soon(loop.stop) + loop.run_forever() + loop.close() + + if not fast: + gc.collect() + + asyncio.set_event_loop(None) + + +def _create_app_mock() -> mock.MagicMock: + def get_dict(app: Any, key: str) -> Any: + return app.__app_dict[key] + + def set_dict(app: Any, key: str, value: Any) -> None: + app.__app_dict[key] = value + + app = mock.MagicMock(spec=Application) + app.__app_dict = {} + app.__getitem__ = get_dict + app.__setitem__ = set_dict + + app._debug = False + app.on_response_prepare = Signal(app) + app.on_response_prepare.freeze() + return app + + +def _create_transport(sslcontext: Optional[SSLContext] = None) -> mock.Mock: + transport = mock.Mock() + + def get_extra_info(key: str) -> Optional[SSLContext]: + if key == "sslcontext": + return sslcontext + else: + return None + + transport.get_extra_info.side_effect = get_extra_info + return transport + + +def make_mocked_request( + method: str, + path: str, + headers: Any = None, + *, + match_info: Any = sentinel, + version: HttpVersion = HttpVersion(1, 1), + closing: bool = False, + app: Any = None, + writer: Any = sentinel, + protocol: Any = sentinel, + transport: Any = sentinel, + payload: StreamReader = EMPTY_PAYLOAD, + sslcontext: Optional[SSLContext] = None, + client_max_size: int = 1024**2, + loop: Any = ..., +) -> Request: + """Creates mocked web.Request testing purposes. + + Useful in unit tests, when spinning full web server is overkill or + specific conditions and errors are hard to trigger. + """ + task = mock.Mock() + if loop is ...: + # no loop passed, try to get the current one if + # its is running as we need a real loop to create + # executor jobs to be able to do testing + # with a real executor + try: + loop = asyncio.get_running_loop() + except RuntimeError: + loop = mock.Mock() + loop.create_future.return_value = () + + if version < HttpVersion(1, 1): + closing = True + + if headers: + headers = CIMultiDictProxy(CIMultiDict(headers)) + raw_hdrs = tuple( + (k.encode("utf-8"), v.encode("utf-8")) for k, v in headers.items() + ) + else: + headers = CIMultiDictProxy(CIMultiDict()) + raw_hdrs = () + + chunked = "chunked" in headers.get(hdrs.TRANSFER_ENCODING, "").lower() + + message = RawRequestMessage( + method, + path, + version, + headers, + raw_hdrs, + closing, + None, + False, + chunked, + URL(path), + ) + if app is None: + app = _create_app_mock() + + if transport is sentinel: + transport = _create_transport(sslcontext) + + if protocol is sentinel: + protocol = mock.Mock() + protocol.transport = transport + type(protocol).peername = mock.PropertyMock( + return_value=transport.get_extra_info("peername") + ) + type(protocol).ssl_context = mock.PropertyMock(return_value=sslcontext) + + if writer is sentinel: + writer = mock.Mock() + writer.write_headers = make_mocked_coro(None) + writer.write = make_mocked_coro(None) + writer.write_eof = make_mocked_coro(None) + writer.drain = make_mocked_coro(None) + writer.transport = transport + + protocol.transport = transport + protocol.writer = writer + + req = Request( + message, payload, protocol, writer, task, loop, client_max_size=client_max_size + ) + + match_info = UrlMappingMatchInfo( + {} if match_info is sentinel else match_info, mock.Mock() + ) + match_info.add_app(app) + req._match_info = match_info + + return req + + +def make_mocked_coro( + return_value: Any = sentinel, raise_exception: Any = sentinel +) -> Any: + """Creates a coroutine mock.""" + + async def mock_coro(*args: Any, **kwargs: Any) -> Any: + if raise_exception is not sentinel: + raise raise_exception + if not inspect.isawaitable(return_value): + return return_value + await return_value + + return mock.Mock(wraps=mock_coro) diff --git a/py311/lib/python3.11/site-packages/aiohttp/tracing.py b/py311/lib/python3.11/site-packages/aiohttp/tracing.py new file mode 100644 index 0000000000000000000000000000000000000000..568fa7f9e38090e0c0a4738db4d51656ce31b99a --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/tracing.py @@ -0,0 +1,455 @@ +from types import SimpleNamespace +from typing import TYPE_CHECKING, Mapping, Optional, Type, TypeVar + +import attr +from aiosignal import Signal +from multidict import CIMultiDict +from yarl import URL + +from .client_reqrep import ClientResponse + +if TYPE_CHECKING: + from .client import ClientSession + + _ParamT_contra = TypeVar("_ParamT_contra", contravariant=True) + _TracingSignal = Signal[ClientSession, SimpleNamespace, _ParamT_contra] + + +__all__ = ( + "TraceConfig", + "TraceRequestStartParams", + "TraceRequestEndParams", + "TraceRequestExceptionParams", + "TraceConnectionQueuedStartParams", + "TraceConnectionQueuedEndParams", + "TraceConnectionCreateStartParams", + "TraceConnectionCreateEndParams", + "TraceConnectionReuseconnParams", + "TraceDnsResolveHostStartParams", + "TraceDnsResolveHostEndParams", + "TraceDnsCacheHitParams", + "TraceDnsCacheMissParams", + "TraceRequestRedirectParams", + "TraceRequestChunkSentParams", + "TraceResponseChunkReceivedParams", + "TraceRequestHeadersSentParams", +) + + +class TraceConfig: + """First-class used to trace requests launched via ClientSession objects.""" + + def __init__( + self, trace_config_ctx_factory: Type[SimpleNamespace] = SimpleNamespace + ) -> None: + self._on_request_start: _TracingSignal[TraceRequestStartParams] = Signal(self) + self._on_request_chunk_sent: _TracingSignal[TraceRequestChunkSentParams] = ( + Signal(self) + ) + self._on_response_chunk_received: _TracingSignal[ + TraceResponseChunkReceivedParams + ] = Signal(self) + self._on_request_end: _TracingSignal[TraceRequestEndParams] = Signal(self) + self._on_request_exception: _TracingSignal[TraceRequestExceptionParams] = ( + Signal(self) + ) + self._on_request_redirect: _TracingSignal[TraceRequestRedirectParams] = Signal( + self + ) + self._on_connection_queued_start: _TracingSignal[ + TraceConnectionQueuedStartParams + ] = Signal(self) + self._on_connection_queued_end: _TracingSignal[ + TraceConnectionQueuedEndParams + ] = Signal(self) + self._on_connection_create_start: _TracingSignal[ + TraceConnectionCreateStartParams + ] = Signal(self) + self._on_connection_create_end: _TracingSignal[ + TraceConnectionCreateEndParams + ] = Signal(self) + self._on_connection_reuseconn: _TracingSignal[ + TraceConnectionReuseconnParams + ] = Signal(self) + self._on_dns_resolvehost_start: _TracingSignal[ + TraceDnsResolveHostStartParams + ] = Signal(self) + self._on_dns_resolvehost_end: _TracingSignal[TraceDnsResolveHostEndParams] = ( + Signal(self) + ) + self._on_dns_cache_hit: _TracingSignal[TraceDnsCacheHitParams] = Signal(self) + self._on_dns_cache_miss: _TracingSignal[TraceDnsCacheMissParams] = Signal(self) + self._on_request_headers_sent: _TracingSignal[TraceRequestHeadersSentParams] = ( + Signal(self) + ) + + self._trace_config_ctx_factory = trace_config_ctx_factory + + def trace_config_ctx( + self, trace_request_ctx: Optional[Mapping[str, str]] = None + ) -> SimpleNamespace: + """Return a new trace_config_ctx instance""" + return self._trace_config_ctx_factory(trace_request_ctx=trace_request_ctx) + + def freeze(self) -> None: + self._on_request_start.freeze() + self._on_request_chunk_sent.freeze() + self._on_response_chunk_received.freeze() + self._on_request_end.freeze() + self._on_request_exception.freeze() + self._on_request_redirect.freeze() + self._on_connection_queued_start.freeze() + self._on_connection_queued_end.freeze() + self._on_connection_create_start.freeze() + self._on_connection_create_end.freeze() + self._on_connection_reuseconn.freeze() + self._on_dns_resolvehost_start.freeze() + self._on_dns_resolvehost_end.freeze() + self._on_dns_cache_hit.freeze() + self._on_dns_cache_miss.freeze() + self._on_request_headers_sent.freeze() + + @property + def on_request_start(self) -> "_TracingSignal[TraceRequestStartParams]": + return self._on_request_start + + @property + def on_request_chunk_sent( + self, + ) -> "_TracingSignal[TraceRequestChunkSentParams]": + return self._on_request_chunk_sent + + @property + def on_response_chunk_received( + self, + ) -> "_TracingSignal[TraceResponseChunkReceivedParams]": + return self._on_response_chunk_received + + @property + def on_request_end(self) -> "_TracingSignal[TraceRequestEndParams]": + return self._on_request_end + + @property + def on_request_exception( + self, + ) -> "_TracingSignal[TraceRequestExceptionParams]": + return self._on_request_exception + + @property + def on_request_redirect( + self, + ) -> "_TracingSignal[TraceRequestRedirectParams]": + return self._on_request_redirect + + @property + def on_connection_queued_start( + self, + ) -> "_TracingSignal[TraceConnectionQueuedStartParams]": + return self._on_connection_queued_start + + @property + def on_connection_queued_end( + self, + ) -> "_TracingSignal[TraceConnectionQueuedEndParams]": + return self._on_connection_queued_end + + @property + def on_connection_create_start( + self, + ) -> "_TracingSignal[TraceConnectionCreateStartParams]": + return self._on_connection_create_start + + @property + def on_connection_create_end( + self, + ) -> "_TracingSignal[TraceConnectionCreateEndParams]": + return self._on_connection_create_end + + @property + def on_connection_reuseconn( + self, + ) -> "_TracingSignal[TraceConnectionReuseconnParams]": + return self._on_connection_reuseconn + + @property + def on_dns_resolvehost_start( + self, + ) -> "_TracingSignal[TraceDnsResolveHostStartParams]": + return self._on_dns_resolvehost_start + + @property + def on_dns_resolvehost_end( + self, + ) -> "_TracingSignal[TraceDnsResolveHostEndParams]": + return self._on_dns_resolvehost_end + + @property + def on_dns_cache_hit(self) -> "_TracingSignal[TraceDnsCacheHitParams]": + return self._on_dns_cache_hit + + @property + def on_dns_cache_miss(self) -> "_TracingSignal[TraceDnsCacheMissParams]": + return self._on_dns_cache_miss + + @property + def on_request_headers_sent( + self, + ) -> "_TracingSignal[TraceRequestHeadersSentParams]": + return self._on_request_headers_sent + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestStartParams: + """Parameters sent by the `on_request_start` signal""" + + method: str + url: URL + headers: "CIMultiDict[str]" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestChunkSentParams: + """Parameters sent by the `on_request_chunk_sent` signal""" + + method: str + url: URL + chunk: bytes + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceResponseChunkReceivedParams: + """Parameters sent by the `on_response_chunk_received` signal""" + + method: str + url: URL + chunk: bytes + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestEndParams: + """Parameters sent by the `on_request_end` signal""" + + method: str + url: URL + headers: "CIMultiDict[str]" + response: ClientResponse + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestExceptionParams: + """Parameters sent by the `on_request_exception` signal""" + + method: str + url: URL + headers: "CIMultiDict[str]" + exception: BaseException + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestRedirectParams: + """Parameters sent by the `on_request_redirect` signal""" + + method: str + url: URL + headers: "CIMultiDict[str]" + response: ClientResponse + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceConnectionQueuedStartParams: + """Parameters sent by the `on_connection_queued_start` signal""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceConnectionQueuedEndParams: + """Parameters sent by the `on_connection_queued_end` signal""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceConnectionCreateStartParams: + """Parameters sent by the `on_connection_create_start` signal""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceConnectionCreateEndParams: + """Parameters sent by the `on_connection_create_end` signal""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceConnectionReuseconnParams: + """Parameters sent by the `on_connection_reuseconn` signal""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceDnsResolveHostStartParams: + """Parameters sent by the `on_dns_resolvehost_start` signal""" + + host: str + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceDnsResolveHostEndParams: + """Parameters sent by the `on_dns_resolvehost_end` signal""" + + host: str + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceDnsCacheHitParams: + """Parameters sent by the `on_dns_cache_hit` signal""" + + host: str + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceDnsCacheMissParams: + """Parameters sent by the `on_dns_cache_miss` signal""" + + host: str + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestHeadersSentParams: + """Parameters sent by the `on_request_headers_sent` signal""" + + method: str + url: URL + headers: "CIMultiDict[str]" + + +class Trace: + """Internal dependency holder class. + + Used to keep together the main dependencies used + at the moment of send a signal. + """ + + def __init__( + self, + session: "ClientSession", + trace_config: TraceConfig, + trace_config_ctx: SimpleNamespace, + ) -> None: + self._trace_config = trace_config + self._trace_config_ctx = trace_config_ctx + self._session = session + + async def send_request_start( + self, method: str, url: URL, headers: "CIMultiDict[str]" + ) -> None: + return await self._trace_config.on_request_start.send( + self._session, + self._trace_config_ctx, + TraceRequestStartParams(method, url, headers), + ) + + async def send_request_chunk_sent( + self, method: str, url: URL, chunk: bytes + ) -> None: + return await self._trace_config.on_request_chunk_sent.send( + self._session, + self._trace_config_ctx, + TraceRequestChunkSentParams(method, url, chunk), + ) + + async def send_response_chunk_received( + self, method: str, url: URL, chunk: bytes + ) -> None: + return await self._trace_config.on_response_chunk_received.send( + self._session, + self._trace_config_ctx, + TraceResponseChunkReceivedParams(method, url, chunk), + ) + + async def send_request_end( + self, + method: str, + url: URL, + headers: "CIMultiDict[str]", + response: ClientResponse, + ) -> None: + return await self._trace_config.on_request_end.send( + self._session, + self._trace_config_ctx, + TraceRequestEndParams(method, url, headers, response), + ) + + async def send_request_exception( + self, + method: str, + url: URL, + headers: "CIMultiDict[str]", + exception: BaseException, + ) -> None: + return await self._trace_config.on_request_exception.send( + self._session, + self._trace_config_ctx, + TraceRequestExceptionParams(method, url, headers, exception), + ) + + async def send_request_redirect( + self, + method: str, + url: URL, + headers: "CIMultiDict[str]", + response: ClientResponse, + ) -> None: + return await self._trace_config._on_request_redirect.send( + self._session, + self._trace_config_ctx, + TraceRequestRedirectParams(method, url, headers, response), + ) + + async def send_connection_queued_start(self) -> None: + return await self._trace_config.on_connection_queued_start.send( + self._session, self._trace_config_ctx, TraceConnectionQueuedStartParams() + ) + + async def send_connection_queued_end(self) -> None: + return await self._trace_config.on_connection_queued_end.send( + self._session, self._trace_config_ctx, TraceConnectionQueuedEndParams() + ) + + async def send_connection_create_start(self) -> None: + return await self._trace_config.on_connection_create_start.send( + self._session, self._trace_config_ctx, TraceConnectionCreateStartParams() + ) + + async def send_connection_create_end(self) -> None: + return await self._trace_config.on_connection_create_end.send( + self._session, self._trace_config_ctx, TraceConnectionCreateEndParams() + ) + + async def send_connection_reuseconn(self) -> None: + return await self._trace_config.on_connection_reuseconn.send( + self._session, self._trace_config_ctx, TraceConnectionReuseconnParams() + ) + + async def send_dns_resolvehost_start(self, host: str) -> None: + return await self._trace_config.on_dns_resolvehost_start.send( + self._session, self._trace_config_ctx, TraceDnsResolveHostStartParams(host) + ) + + async def send_dns_resolvehost_end(self, host: str) -> None: + return await self._trace_config.on_dns_resolvehost_end.send( + self._session, self._trace_config_ctx, TraceDnsResolveHostEndParams(host) + ) + + async def send_dns_cache_hit(self, host: str) -> None: + return await self._trace_config.on_dns_cache_hit.send( + self._session, self._trace_config_ctx, TraceDnsCacheHitParams(host) + ) + + async def send_dns_cache_miss(self, host: str) -> None: + return await self._trace_config.on_dns_cache_miss.send( + self._session, self._trace_config_ctx, TraceDnsCacheMissParams(host) + ) + + async def send_request_headers( + self, method: str, url: URL, headers: "CIMultiDict[str]" + ) -> None: + return await self._trace_config._on_request_headers_sent.send( + self._session, + self._trace_config_ctx, + TraceRequestHeadersSentParams(method, url, headers), + ) diff --git a/py311/lib/python3.11/site-packages/aiohttp/typedefs.py b/py311/lib/python3.11/site-packages/aiohttp/typedefs.py new file mode 100644 index 0000000000000000000000000000000000000000..cc8c0825b4e522f7d1b6cf0058564f322fb5a905 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/typedefs.py @@ -0,0 +1,69 @@ +import json +import os +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Iterable, + Mapping, + Protocol, + Tuple, + Union, +) + +from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy, istr +from yarl import URL, Query as _Query + +Query = _Query + +DEFAULT_JSON_ENCODER = json.dumps +DEFAULT_JSON_DECODER = json.loads + +if TYPE_CHECKING: + _CIMultiDict = CIMultiDict[str] + _CIMultiDictProxy = CIMultiDictProxy[str] + _MultiDict = MultiDict[str] + _MultiDictProxy = MultiDictProxy[str] + from http.cookies import BaseCookie, Morsel + + from .web import Request, StreamResponse +else: + _CIMultiDict = CIMultiDict + _CIMultiDictProxy = CIMultiDictProxy + _MultiDict = MultiDict + _MultiDictProxy = MultiDictProxy + +Byteish = Union[bytes, bytearray, memoryview] +JSONEncoder = Callable[[Any], str] +JSONDecoder = Callable[[str], Any] +LooseHeaders = Union[ + Mapping[str, str], + Mapping[istr, str], + _CIMultiDict, + _CIMultiDictProxy, + Iterable[Tuple[Union[str, istr], str]], +] +RawHeaders = Tuple[Tuple[bytes, bytes], ...] +StrOrURL = Union[str, URL] + +LooseCookiesMappings = Mapping[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]] +LooseCookiesIterables = Iterable[ + Tuple[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]] +] +LooseCookies = Union[ + LooseCookiesMappings, + LooseCookiesIterables, + "BaseCookie[str]", +] + +Handler = Callable[["Request"], Awaitable["StreamResponse"]] + + +class Middleware(Protocol): + def __call__( + self, request: "Request", handler: Handler + ) -> Awaitable["StreamResponse"]: ... + + +PathLike = Union[str, "os.PathLike[str]"] diff --git a/py311/lib/python3.11/site-packages/aiohttp/web.py b/py311/lib/python3.11/site-packages/aiohttp/web.py new file mode 100644 index 0000000000000000000000000000000000000000..5a1fc964172be24b03ffaf3982ba6fccea7b6515 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web.py @@ -0,0 +1,592 @@ +import asyncio +import logging +import os +import socket +import sys +import warnings +from argparse import ArgumentParser +from collections.abc import Iterable +from contextlib import suppress +from importlib import import_module +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Iterable as TypingIterable, + List, + Optional, + Set, + Type, + Union, + cast, +) + +from .abc import AbstractAccessLogger +from .helpers import AppKey as AppKey +from .log import access_logger +from .typedefs import PathLike +from .web_app import Application as Application, CleanupError as CleanupError +from .web_exceptions import ( + HTTPAccepted as HTTPAccepted, + HTTPBadGateway as HTTPBadGateway, + HTTPBadRequest as HTTPBadRequest, + HTTPClientError as HTTPClientError, + HTTPConflict as HTTPConflict, + HTTPCreated as HTTPCreated, + HTTPError as HTTPError, + HTTPException as HTTPException, + HTTPExpectationFailed as HTTPExpectationFailed, + HTTPFailedDependency as HTTPFailedDependency, + HTTPForbidden as HTTPForbidden, + HTTPFound as HTTPFound, + HTTPGatewayTimeout as HTTPGatewayTimeout, + HTTPGone as HTTPGone, + HTTPInsufficientStorage as HTTPInsufficientStorage, + HTTPInternalServerError as HTTPInternalServerError, + HTTPLengthRequired as HTTPLengthRequired, + HTTPMethodNotAllowed as HTTPMethodNotAllowed, + HTTPMisdirectedRequest as HTTPMisdirectedRequest, + HTTPMove as HTTPMove, + HTTPMovedPermanently as HTTPMovedPermanently, + HTTPMultipleChoices as HTTPMultipleChoices, + HTTPNetworkAuthenticationRequired as HTTPNetworkAuthenticationRequired, + HTTPNoContent as HTTPNoContent, + HTTPNonAuthoritativeInformation as HTTPNonAuthoritativeInformation, + HTTPNotAcceptable as HTTPNotAcceptable, + HTTPNotExtended as HTTPNotExtended, + HTTPNotFound as HTTPNotFound, + HTTPNotImplemented as HTTPNotImplemented, + HTTPNotModified as HTTPNotModified, + HTTPOk as HTTPOk, + HTTPPartialContent as HTTPPartialContent, + HTTPPaymentRequired as HTTPPaymentRequired, + HTTPPermanentRedirect as HTTPPermanentRedirect, + HTTPPreconditionFailed as HTTPPreconditionFailed, + HTTPPreconditionRequired as HTTPPreconditionRequired, + HTTPProxyAuthenticationRequired as HTTPProxyAuthenticationRequired, + HTTPRedirection as HTTPRedirection, + HTTPRequestEntityTooLarge as HTTPRequestEntityTooLarge, + HTTPRequestHeaderFieldsTooLarge as HTTPRequestHeaderFieldsTooLarge, + HTTPRequestRangeNotSatisfiable as HTTPRequestRangeNotSatisfiable, + HTTPRequestTimeout as HTTPRequestTimeout, + HTTPRequestURITooLong as HTTPRequestURITooLong, + HTTPResetContent as HTTPResetContent, + HTTPSeeOther as HTTPSeeOther, + HTTPServerError as HTTPServerError, + HTTPServiceUnavailable as HTTPServiceUnavailable, + HTTPSuccessful as HTTPSuccessful, + HTTPTemporaryRedirect as HTTPTemporaryRedirect, + HTTPTooManyRequests as HTTPTooManyRequests, + HTTPUnauthorized as HTTPUnauthorized, + HTTPUnavailableForLegalReasons as HTTPUnavailableForLegalReasons, + HTTPUnprocessableEntity as HTTPUnprocessableEntity, + HTTPUnsupportedMediaType as HTTPUnsupportedMediaType, + HTTPUpgradeRequired as HTTPUpgradeRequired, + HTTPUseProxy as HTTPUseProxy, + HTTPVariantAlsoNegotiates as HTTPVariantAlsoNegotiates, + HTTPVersionNotSupported as HTTPVersionNotSupported, + NotAppKeyWarning as NotAppKeyWarning, +) +from .web_fileresponse import FileResponse as FileResponse +from .web_log import AccessLogger +from .web_middlewares import ( + middleware as middleware, + normalize_path_middleware as normalize_path_middleware, +) +from .web_protocol import ( + PayloadAccessError as PayloadAccessError, + RequestHandler as RequestHandler, + RequestPayloadError as RequestPayloadError, +) +from .web_request import ( + BaseRequest as BaseRequest, + FileField as FileField, + Request as Request, +) +from .web_response import ( + ContentCoding as ContentCoding, + Response as Response, + StreamResponse as StreamResponse, + json_response as json_response, +) +from .web_routedef import ( + AbstractRouteDef as AbstractRouteDef, + RouteDef as RouteDef, + RouteTableDef as RouteTableDef, + StaticDef as StaticDef, + delete as delete, + get as get, + head as head, + options as options, + patch as patch, + post as post, + put as put, + route as route, + static as static, + view as view, +) +from .web_runner import ( + AppRunner as AppRunner, + BaseRunner as BaseRunner, + BaseSite as BaseSite, + GracefulExit as GracefulExit, + NamedPipeSite as NamedPipeSite, + ServerRunner as ServerRunner, + SockSite as SockSite, + TCPSite as TCPSite, + UnixSite as UnixSite, +) +from .web_server import Server as Server +from .web_urldispatcher import ( + AbstractResource as AbstractResource, + AbstractRoute as AbstractRoute, + DynamicResource as DynamicResource, + PlainResource as PlainResource, + PrefixedSubAppResource as PrefixedSubAppResource, + Resource as Resource, + ResourceRoute as ResourceRoute, + StaticResource as StaticResource, + UrlDispatcher as UrlDispatcher, + UrlMappingMatchInfo as UrlMappingMatchInfo, + View as View, +) +from .web_ws import ( + WebSocketReady as WebSocketReady, + WebSocketResponse as WebSocketResponse, + WSMsgType as WSMsgType, +) + +__all__ = ( + # web_app + "AppKey", + "Application", + "CleanupError", + # web_exceptions + "NotAppKeyWarning", + "HTTPAccepted", + "HTTPBadGateway", + "HTTPBadRequest", + "HTTPClientError", + "HTTPConflict", + "HTTPCreated", + "HTTPError", + "HTTPException", + "HTTPExpectationFailed", + "HTTPFailedDependency", + "HTTPForbidden", + "HTTPFound", + "HTTPGatewayTimeout", + "HTTPGone", + "HTTPInsufficientStorage", + "HTTPInternalServerError", + "HTTPLengthRequired", + "HTTPMethodNotAllowed", + "HTTPMisdirectedRequest", + "HTTPMove", + "HTTPMovedPermanently", + "HTTPMultipleChoices", + "HTTPNetworkAuthenticationRequired", + "HTTPNoContent", + "HTTPNonAuthoritativeInformation", + "HTTPNotAcceptable", + "HTTPNotExtended", + "HTTPNotFound", + "HTTPNotImplemented", + "HTTPNotModified", + "HTTPOk", + "HTTPPartialContent", + "HTTPPaymentRequired", + "HTTPPermanentRedirect", + "HTTPPreconditionFailed", + "HTTPPreconditionRequired", + "HTTPProxyAuthenticationRequired", + "HTTPRedirection", + "HTTPRequestEntityTooLarge", + "HTTPRequestHeaderFieldsTooLarge", + "HTTPRequestRangeNotSatisfiable", + "HTTPRequestTimeout", + "HTTPRequestURITooLong", + "HTTPResetContent", + "HTTPSeeOther", + "HTTPServerError", + "HTTPServiceUnavailable", + "HTTPSuccessful", + "HTTPTemporaryRedirect", + "HTTPTooManyRequests", + "HTTPUnauthorized", + "HTTPUnavailableForLegalReasons", + "HTTPUnprocessableEntity", + "HTTPUnsupportedMediaType", + "HTTPUpgradeRequired", + "HTTPUseProxy", + "HTTPVariantAlsoNegotiates", + "HTTPVersionNotSupported", + # web_fileresponse + "FileResponse", + # web_middlewares + "middleware", + "normalize_path_middleware", + # web_protocol + "PayloadAccessError", + "RequestHandler", + "RequestPayloadError", + # web_request + "BaseRequest", + "FileField", + "Request", + # web_response + "ContentCoding", + "Response", + "StreamResponse", + "json_response", + # web_routedef + "AbstractRouteDef", + "RouteDef", + "RouteTableDef", + "StaticDef", + "delete", + "get", + "head", + "options", + "patch", + "post", + "put", + "route", + "static", + "view", + # web_runner + "AppRunner", + "BaseRunner", + "BaseSite", + "GracefulExit", + "ServerRunner", + "SockSite", + "TCPSite", + "UnixSite", + "NamedPipeSite", + # web_server + "Server", + # web_urldispatcher + "AbstractResource", + "AbstractRoute", + "DynamicResource", + "PlainResource", + "PrefixedSubAppResource", + "Resource", + "ResourceRoute", + "StaticResource", + "UrlDispatcher", + "UrlMappingMatchInfo", + "View", + # web_ws + "WebSocketReady", + "WebSocketResponse", + "WSMsgType", + # web + "run_app", +) + + +if TYPE_CHECKING: + from ssl import SSLContext +else: + try: + from ssl import SSLContext + except ImportError: # pragma: no cover + SSLContext = object # type: ignore[misc,assignment] + +# Only display warning when using -Wdefault, -We, -X dev or similar. +warnings.filterwarnings("ignore", category=NotAppKeyWarning, append=True) + +HostSequence = TypingIterable[str] + + +async def _run_app( + app: Union[Application, Awaitable[Application]], + *, + host: Optional[Union[str, HostSequence]] = None, + port: Optional[int] = None, + path: Union[PathLike, TypingIterable[PathLike], None] = None, + sock: Optional[Union[socket.socket, TypingIterable[socket.socket]]] = None, + ssl_context: Optional[SSLContext] = None, + print: Optional[Callable[..., None]] = print, + backlog: int = 128, + reuse_address: Optional[bool] = None, + reuse_port: Optional[bool] = None, + **kwargs: Any, # TODO(PY311): Use Unpack +) -> None: + # An internal function to actually do all dirty job for application running + if asyncio.iscoroutine(app): + app = await app + + app = cast(Application, app) + + runner = AppRunner(app, **kwargs) + + await runner.setup() + + sites: List[BaseSite] = [] + + try: + if host is not None: + if isinstance(host, str): + sites.append( + TCPSite( + runner, + host, + port, + ssl_context=ssl_context, + backlog=backlog, + reuse_address=reuse_address, + reuse_port=reuse_port, + ) + ) + else: + for h in host: + sites.append( + TCPSite( + runner, + h, + port, + ssl_context=ssl_context, + backlog=backlog, + reuse_address=reuse_address, + reuse_port=reuse_port, + ) + ) + elif path is None and sock is None or port is not None: + sites.append( + TCPSite( + runner, + port=port, + ssl_context=ssl_context, + backlog=backlog, + reuse_address=reuse_address, + reuse_port=reuse_port, + ) + ) + + if path is not None: + if isinstance(path, (str, os.PathLike)): + sites.append( + UnixSite( + runner, + path, + ssl_context=ssl_context, + backlog=backlog, + ) + ) + else: + for p in path: + sites.append( + UnixSite( + runner, + p, + ssl_context=ssl_context, + backlog=backlog, + ) + ) + + if sock is not None: + if not isinstance(sock, Iterable): + sites.append( + SockSite( + runner, + sock, + ssl_context=ssl_context, + backlog=backlog, + ) + ) + else: + for s in sock: + sites.append( + SockSite( + runner, + s, + ssl_context=ssl_context, + backlog=backlog, + ) + ) + for site in sites: + await site.start() + + if print: # pragma: no branch + names = sorted(str(s.name) for s in runner.sites) + print( + "======== Running on {} ========\n" + "(Press CTRL+C to quit)".format(", ".join(names)) + ) + + # sleep forever by 1 hour intervals, + while True: + await asyncio.sleep(3600) + finally: + await runner.cleanup() + + +def _cancel_tasks( + to_cancel: Set["asyncio.Task[Any]"], loop: asyncio.AbstractEventLoop +) -> None: + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + + loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True)) + + for task in to_cancel: + if task.cancelled(): + continue + if task.exception() is not None: + loop.call_exception_handler( + { + "message": "unhandled exception during asyncio.run() shutdown", + "exception": task.exception(), + "task": task, + } + ) + + +def run_app( + app: Union[Application, Awaitable[Application]], + *, + host: Optional[Union[str, HostSequence]] = None, + port: Optional[int] = None, + path: Union[PathLike, TypingIterable[PathLike], None] = None, + sock: Optional[Union[socket.socket, TypingIterable[socket.socket]]] = None, + shutdown_timeout: float = 60.0, + keepalive_timeout: float = 75.0, + ssl_context: Optional[SSLContext] = None, + print: Optional[Callable[..., None]] = print, + backlog: int = 128, + access_log_class: Type[AbstractAccessLogger] = AccessLogger, + access_log_format: str = AccessLogger.LOG_FORMAT, + access_log: Optional[logging.Logger] = access_logger, + handle_signals: bool = True, + reuse_address: Optional[bool] = None, + reuse_port: Optional[bool] = None, + handler_cancellation: bool = False, + loop: Optional[asyncio.AbstractEventLoop] = None, + **kwargs: Any, +) -> None: + """Run an app locally""" + if loop is None: + loop = asyncio.new_event_loop() + + # Configure if and only if in debugging mode and using the default logger + if loop.get_debug() and access_log and access_log.name == "aiohttp.access": + if access_log.level == logging.NOTSET: + access_log.setLevel(logging.DEBUG) + if not access_log.hasHandlers(): + access_log.addHandler(logging.StreamHandler()) + + main_task = loop.create_task( + _run_app( + app, + host=host, + port=port, + path=path, + sock=sock, + shutdown_timeout=shutdown_timeout, + keepalive_timeout=keepalive_timeout, + ssl_context=ssl_context, + print=print, + backlog=backlog, + access_log_class=access_log_class, + access_log_format=access_log_format, + access_log=access_log, + handle_signals=handle_signals, + reuse_address=reuse_address, + reuse_port=reuse_port, + handler_cancellation=handler_cancellation, + **kwargs, + ) + ) + + try: + asyncio.set_event_loop(loop) + loop.run_until_complete(main_task) + except (GracefulExit, KeyboardInterrupt): # pragma: no cover + pass + finally: + try: + main_task.cancel() + with suppress(asyncio.CancelledError): + loop.run_until_complete(main_task) + finally: + _cancel_tasks(asyncio.all_tasks(loop), loop) + loop.run_until_complete(loop.shutdown_asyncgens()) + loop.close() + + +def main(argv: List[str]) -> None: + arg_parser = ArgumentParser( + description="aiohttp.web Application server", prog="aiohttp.web" + ) + arg_parser.add_argument( + "entry_func", + help=( + "Callable returning the `aiohttp.web.Application` instance to " + "run. Should be specified in the 'module:function' syntax." + ), + metavar="entry-func", + ) + arg_parser.add_argument( + "-H", + "--hostname", + help="TCP/IP hostname to serve on (default: localhost)", + default=None, + ) + arg_parser.add_argument( + "-P", + "--port", + help="TCP/IP port to serve on (default: %(default)r)", + type=int, + default=8080, + ) + arg_parser.add_argument( + "-U", + "--path", + help="Unix file system path to serve on. Can be combined with hostname " + "to serve on both Unix and TCP.", + ) + args, extra_argv = arg_parser.parse_known_args(argv) + + # Import logic + mod_str, _, func_str = args.entry_func.partition(":") + if not func_str or not mod_str: + arg_parser.error("'entry-func' not in 'module:function' syntax") + if mod_str.startswith("."): + arg_parser.error("relative module names not supported") + try: + module = import_module(mod_str) + except ImportError as ex: + arg_parser.error(f"unable to import {mod_str}: {ex}") + try: + func = getattr(module, func_str) + except AttributeError: + arg_parser.error(f"module {mod_str!r} has no attribute {func_str!r}") + + # Compatibility logic + if args.path is not None and not hasattr(socket, "AF_UNIX"): + arg_parser.error( + "file system paths not supported by your operating environment" + ) + + logging.basicConfig(level=logging.DEBUG) + + if args.path and args.hostname is None: + host = port = None + else: + host = args.hostname or "localhost" + port = args.port + + app = func(extra_argv) + run_app(app, host=host, port=port, path=args.path) + arg_parser.exit(message="Stopped\n") + + +if __name__ == "__main__": # pragma: no branch + main(sys.argv[1:]) # pragma: no cover diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_app.py b/py311/lib/python3.11/site-packages/aiohttp/web_app.py new file mode 100644 index 0000000000000000000000000000000000000000..619c0085da1985b97f7e222d30c1870cb010a128 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_app.py @@ -0,0 +1,620 @@ +import asyncio +import logging +import warnings +from functools import lru_cache, partial, update_wrapper +from typing import ( + TYPE_CHECKING, + Any, + AsyncIterator, + Awaitable, + Callable, + Dict, + Iterable, + Iterator, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, + cast, + overload, +) + +from aiosignal import Signal +from frozenlist import FrozenList + +from . import hdrs +from .abc import ( + AbstractAccessLogger, + AbstractMatchInfo, + AbstractRouter, + AbstractStreamWriter, +) +from .helpers import DEBUG, AppKey +from .http_parser import RawRequestMessage +from .log import web_logger +from .streams import StreamReader +from .typedefs import Handler, Middleware +from .web_exceptions import NotAppKeyWarning +from .web_log import AccessLogger +from .web_middlewares import _fix_request_current_app +from .web_protocol import RequestHandler +from .web_request import Request +from .web_response import StreamResponse +from .web_routedef import AbstractRouteDef +from .web_server import Server +from .web_urldispatcher import ( + AbstractResource, + AbstractRoute, + Domain, + MaskDomain, + MatchedSubAppResource, + PrefixedSubAppResource, + SystemRoute, + UrlDispatcher, +) + +__all__ = ("Application", "CleanupError") + + +if TYPE_CHECKING: + _AppSignal = Signal["Application"] + _RespPrepareSignal = Signal[Request, StreamResponse] + _Middlewares = FrozenList[Middleware] + _MiddlewaresHandlers = Optional[Sequence[Tuple[Middleware, bool]]] + _Subapps = List["Application"] +else: + # No type checker mode, skip types + _AppSignal = Signal + _RespPrepareSignal = Signal + _Middlewares = FrozenList + _MiddlewaresHandlers = Optional[Sequence] + _Subapps = List + +_T = TypeVar("_T") +_U = TypeVar("_U") +_Resource = TypeVar("_Resource", bound=AbstractResource) + + +def _build_middlewares( + handler: Handler, apps: Tuple["Application", ...] +) -> Callable[[Request], Awaitable[StreamResponse]]: + """Apply middlewares to handler.""" + for app in apps[::-1]: + for m, _ in app._middlewares_handlers: # type: ignore[union-attr] + handler = update_wrapper(partial(m, handler=handler), handler) + return handler + + +_cached_build_middleware = lru_cache(maxsize=1024)(_build_middlewares) + + +class Application(MutableMapping[Union[str, AppKey[Any]], Any]): + ATTRS = frozenset( + [ + "logger", + "_debug", + "_router", + "_loop", + "_handler_args", + "_middlewares", + "_middlewares_handlers", + "_has_legacy_middlewares", + "_run_middlewares", + "_state", + "_frozen", + "_pre_frozen", + "_subapps", + "_on_response_prepare", + "_on_startup", + "_on_shutdown", + "_on_cleanup", + "_client_max_size", + "_cleanup_ctx", + ] + ) + + def __init__( + self, + *, + logger: logging.Logger = web_logger, + router: Optional[UrlDispatcher] = None, + middlewares: Iterable[Middleware] = (), + handler_args: Optional[Mapping[str, Any]] = None, + client_max_size: int = 1024**2, + loop: Optional[asyncio.AbstractEventLoop] = None, + debug: Any = ..., # mypy doesn't support ellipsis + ) -> None: + if router is None: + router = UrlDispatcher() + else: + warnings.warn( + "router argument is deprecated", DeprecationWarning, stacklevel=2 + ) + assert isinstance(router, AbstractRouter), router + + if loop is not None: + warnings.warn( + "loop argument is deprecated", DeprecationWarning, stacklevel=2 + ) + + if debug is not ...: + warnings.warn( + "debug argument is deprecated", DeprecationWarning, stacklevel=2 + ) + self._debug = debug + self._router: UrlDispatcher = router + self._loop = loop + self._handler_args = handler_args + self.logger = logger + + self._middlewares: _Middlewares = FrozenList(middlewares) + + # initialized on freezing + self._middlewares_handlers: _MiddlewaresHandlers = None + # initialized on freezing + self._run_middlewares: Optional[bool] = None + self._has_legacy_middlewares: bool = True + + self._state: Dict[Union[AppKey[Any], str], object] = {} + self._frozen = False + self._pre_frozen = False + self._subapps: _Subapps = [] + + self._on_response_prepare: _RespPrepareSignal = Signal(self) + self._on_startup: _AppSignal = Signal(self) + self._on_shutdown: _AppSignal = Signal(self) + self._on_cleanup: _AppSignal = Signal(self) + self._cleanup_ctx = CleanupContext() + self._on_startup.append(self._cleanup_ctx._on_startup) + self._on_cleanup.append(self._cleanup_ctx._on_cleanup) + self._client_max_size = client_max_size + + def __init_subclass__(cls: Type["Application"]) -> None: + warnings.warn( + "Inheritance class {} from web.Application " + "is discouraged".format(cls.__name__), + DeprecationWarning, + stacklevel=3, + ) + + if DEBUG: # pragma: no cover + + def __setattr__(self, name: str, val: Any) -> None: + if name not in self.ATTRS: + warnings.warn( + "Setting custom web.Application.{} attribute " + "is discouraged".format(name), + DeprecationWarning, + stacklevel=2, + ) + super().__setattr__(name, val) + + # MutableMapping API + + def __eq__(self, other: object) -> bool: + return self is other + + @overload # type: ignore[override] + def __getitem__(self, key: AppKey[_T]) -> _T: ... + + @overload + def __getitem__(self, key: str) -> Any: ... + + def __getitem__(self, key: Union[str, AppKey[_T]]) -> Any: + return self._state[key] + + def _check_frozen(self) -> None: + if self._frozen: + warnings.warn( + "Changing state of started or joined application is deprecated", + DeprecationWarning, + stacklevel=3, + ) + + @overload # type: ignore[override] + def __setitem__(self, key: AppKey[_T], value: _T) -> None: ... + + @overload + def __setitem__(self, key: str, value: Any) -> None: ... + + def __setitem__(self, key: Union[str, AppKey[_T]], value: Any) -> None: + self._check_frozen() + if not isinstance(key, AppKey): + warnings.warn( + "It is recommended to use web.AppKey instances for keys.\n" + + "https://docs.aiohttp.org/en/stable/web_advanced.html" + + "#application-s-config", + category=NotAppKeyWarning, + stacklevel=2, + ) + self._state[key] = value + + def __delitem__(self, key: Union[str, AppKey[_T]]) -> None: + self._check_frozen() + del self._state[key] + + def __len__(self) -> int: + return len(self._state) + + def __iter__(self) -> Iterator[Union[str, AppKey[Any]]]: + return iter(self._state) + + def __hash__(self) -> int: + return id(self) + + @overload # type: ignore[override] + def get(self, key: AppKey[_T], default: None = ...) -> Optional[_T]: ... + + @overload + def get(self, key: AppKey[_T], default: _U) -> Union[_T, _U]: ... + + @overload + def get(self, key: str, default: Any = ...) -> Any: ... + + def get(self, key: Union[str, AppKey[_T]], default: Any = None) -> Any: + return self._state.get(key, default) + + ######## + @property + def loop(self) -> asyncio.AbstractEventLoop: + # Technically the loop can be None + # but we mask it by explicit type cast + # to provide more convenient type annotation + warnings.warn("loop property is deprecated", DeprecationWarning, stacklevel=2) + return cast(asyncio.AbstractEventLoop, self._loop) + + def _set_loop(self, loop: Optional[asyncio.AbstractEventLoop]) -> None: + if loop is None: + loop = asyncio.get_event_loop() + if self._loop is not None and self._loop is not loop: + raise RuntimeError( + "web.Application instance initialized with different loop" + ) + + self._loop = loop + + # set loop debug + if self._debug is ...: + self._debug = loop.get_debug() + + # set loop to sub applications + for subapp in self._subapps: + subapp._set_loop(loop) + + @property + def pre_frozen(self) -> bool: + return self._pre_frozen + + def pre_freeze(self) -> None: + if self._pre_frozen: + return + + self._pre_frozen = True + self._middlewares.freeze() + self._router.freeze() + self._on_response_prepare.freeze() + self._cleanup_ctx.freeze() + self._on_startup.freeze() + self._on_shutdown.freeze() + self._on_cleanup.freeze() + self._middlewares_handlers = tuple(self._prepare_middleware()) + self._has_legacy_middlewares = any( + not new_style for _, new_style in self._middlewares_handlers + ) + + # If current app and any subapp do not have middlewares avoid run all + # of the code footprint that it implies, which have a middleware + # hardcoded per app that sets up the current_app attribute. If no + # middlewares are configured the handler will receive the proper + # current_app without needing all of this code. + self._run_middlewares = True if self.middlewares else False + + for subapp in self._subapps: + subapp.pre_freeze() + self._run_middlewares = self._run_middlewares or subapp._run_middlewares + + @property + def frozen(self) -> bool: + return self._frozen + + def freeze(self) -> None: + if self._frozen: + return + + self.pre_freeze() + self._frozen = True + for subapp in self._subapps: + subapp.freeze() + + @property + def debug(self) -> bool: + warnings.warn("debug property is deprecated", DeprecationWarning, stacklevel=2) + return self._debug # type: ignore[no-any-return] + + def _reg_subapp_signals(self, subapp: "Application") -> None: + def reg_handler(signame: str) -> None: + subsig = getattr(subapp, signame) + + async def handler(app: "Application") -> None: + await subsig.send(subapp) + + appsig = getattr(self, signame) + appsig.append(handler) + + reg_handler("on_startup") + reg_handler("on_shutdown") + reg_handler("on_cleanup") + + def add_subapp(self, prefix: str, subapp: "Application") -> PrefixedSubAppResource: + if not isinstance(prefix, str): + raise TypeError("Prefix must be str") + prefix = prefix.rstrip("/") + if not prefix: + raise ValueError("Prefix cannot be empty") + factory = partial(PrefixedSubAppResource, prefix, subapp) + return self._add_subapp(factory, subapp) + + def _add_subapp( + self, resource_factory: Callable[[], _Resource], subapp: "Application" + ) -> _Resource: + if self.frozen: + raise RuntimeError("Cannot add sub application to frozen application") + if subapp.frozen: + raise RuntimeError("Cannot add frozen application") + resource = resource_factory() + self.router.register_resource(resource) + self._reg_subapp_signals(subapp) + self._subapps.append(subapp) + subapp.pre_freeze() + if self._loop is not None: + subapp._set_loop(self._loop) + return resource + + def add_domain(self, domain: str, subapp: "Application") -> MatchedSubAppResource: + if not isinstance(domain, str): + raise TypeError("Domain must be str") + elif "*" in domain: + rule: Domain = MaskDomain(domain) + else: + rule = Domain(domain) + factory = partial(MatchedSubAppResource, rule, subapp) + return self._add_subapp(factory, subapp) + + def add_routes(self, routes: Iterable[AbstractRouteDef]) -> List[AbstractRoute]: + return self.router.add_routes(routes) + + @property + def on_response_prepare(self) -> _RespPrepareSignal: + return self._on_response_prepare + + @property + def on_startup(self) -> _AppSignal: + return self._on_startup + + @property + def on_shutdown(self) -> _AppSignal: + return self._on_shutdown + + @property + def on_cleanup(self) -> _AppSignal: + return self._on_cleanup + + @property + def cleanup_ctx(self) -> "CleanupContext": + return self._cleanup_ctx + + @property + def router(self) -> UrlDispatcher: + return self._router + + @property + def middlewares(self) -> _Middlewares: + return self._middlewares + + def _make_handler( + self, + *, + loop: Optional[asyncio.AbstractEventLoop] = None, + access_log_class: Type[AbstractAccessLogger] = AccessLogger, + **kwargs: Any, + ) -> Server: + + if not issubclass(access_log_class, AbstractAccessLogger): + raise TypeError( + "access_log_class must be subclass of " + "aiohttp.abc.AbstractAccessLogger, got {}".format(access_log_class) + ) + + self._set_loop(loop) + self.freeze() + + kwargs["debug"] = self._debug + kwargs["access_log_class"] = access_log_class + if self._handler_args: + for k, v in self._handler_args.items(): + kwargs[k] = v + + return Server( + self._handle, # type: ignore[arg-type] + request_factory=self._make_request, + loop=self._loop, + **kwargs, + ) + + def make_handler( + self, + *, + loop: Optional[asyncio.AbstractEventLoop] = None, + access_log_class: Type[AbstractAccessLogger] = AccessLogger, + **kwargs: Any, + ) -> Server: + + warnings.warn( + "Application.make_handler(...) is deprecated, use AppRunner API instead", + DeprecationWarning, + stacklevel=2, + ) + + return self._make_handler( + loop=loop, access_log_class=access_log_class, **kwargs + ) + + async def startup(self) -> None: + """Causes on_startup signal + + Should be called in the event loop along with the request handler. + """ + await self.on_startup.send(self) + + async def shutdown(self) -> None: + """Causes on_shutdown signal + + Should be called before cleanup() + """ + await self.on_shutdown.send(self) + + async def cleanup(self) -> None: + """Causes on_cleanup signal + + Should be called after shutdown() + """ + if self.on_cleanup.frozen: + await self.on_cleanup.send(self) + else: + # If an exception occurs in startup, ensure cleanup contexts are completed. + await self._cleanup_ctx._on_cleanup(self) + + def _make_request( + self, + message: RawRequestMessage, + payload: StreamReader, + protocol: RequestHandler, + writer: AbstractStreamWriter, + task: "asyncio.Task[None]", + _cls: Type[Request] = Request, + ) -> Request: + if TYPE_CHECKING: + assert self._loop is not None + return _cls( + message, + payload, + protocol, + writer, + task, + self._loop, + client_max_size=self._client_max_size, + ) + + def _prepare_middleware(self) -> Iterator[Tuple[Middleware, bool]]: + for m in reversed(self._middlewares): + if getattr(m, "__middleware_version__", None) == 1: + yield m, True + else: + warnings.warn( + f'old-style middleware "{m!r}" deprecated, see #2252', + DeprecationWarning, + stacklevel=2, + ) + yield m, False + + yield _fix_request_current_app(self), True + + async def _handle(self, request: Request) -> StreamResponse: + loop = asyncio.get_event_loop() + debug = loop.get_debug() + match_info = await self._router.resolve(request) + if debug: # pragma: no cover + if not isinstance(match_info, AbstractMatchInfo): + raise TypeError( + "match_info should be AbstractMatchInfo " + "instance, not {!r}".format(match_info) + ) + match_info.add_app(self) + + match_info.freeze() + + request._match_info = match_info + + if request.headers.get(hdrs.EXPECT): + resp = await match_info.expect_handler(request) + await request.writer.drain() + if resp is not None: + return resp + + handler = match_info.handler + + if self._run_middlewares: + # If its a SystemRoute, don't cache building the middlewares since + # they are constructed for every MatchInfoError as a new handler + # is made each time. + if not self._has_legacy_middlewares and not isinstance( + match_info.route, SystemRoute + ): + handler = _cached_build_middleware(handler, match_info.apps) + else: + for app in match_info.apps[::-1]: + for m, new_style in app._middlewares_handlers: # type: ignore[union-attr] + if new_style: + handler = update_wrapper( + partial(m, handler=handler), handler + ) + else: + handler = await m(app, handler) # type: ignore[arg-type,assignment] + + return await handler(request) + + def __call__(self) -> "Application": + """gunicorn compatibility""" + return self + + def __repr__(self) -> str: + return f"" + + def __bool__(self) -> bool: + return True + + +class CleanupError(RuntimeError): + @property + def exceptions(self) -> List[BaseException]: + return cast(List[BaseException], self.args[1]) + + +if TYPE_CHECKING: + _CleanupContextBase = FrozenList[Callable[[Application], AsyncIterator[None]]] +else: + _CleanupContextBase = FrozenList + + +class CleanupContext(_CleanupContextBase): + def __init__(self) -> None: + super().__init__() + self._exits: List[AsyncIterator[None]] = [] + + async def _on_startup(self, app: Application) -> None: + for cb in self: + it = cb(app).__aiter__() + await it.__anext__() + self._exits.append(it) + + async def _on_cleanup(self, app: Application) -> None: + errors = [] + for it in reversed(self._exits): + try: + await it.__anext__() + except StopAsyncIteration: + pass + except (Exception, asyncio.CancelledError) as exc: + errors.append(exc) + else: + errors.append(RuntimeError(f"{it!r} has more than one 'yield'")) + if errors: + if len(errors) == 1: + raise errors[0] + else: + raise CleanupError("Multiple errors on cleanup stage", errors) diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_exceptions.py b/py311/lib/python3.11/site-packages/aiohttp/web_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..ee2c1e72d40ac93c00cbfcef6c84888a336855a3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_exceptions.py @@ -0,0 +1,452 @@ +import warnings +from typing import Any, Dict, Iterable, List, Optional, Set # noqa + +from yarl import URL + +from .typedefs import LooseHeaders, StrOrURL +from .web_response import Response + +__all__ = ( + "HTTPException", + "HTTPError", + "HTTPRedirection", + "HTTPSuccessful", + "HTTPOk", + "HTTPCreated", + "HTTPAccepted", + "HTTPNonAuthoritativeInformation", + "HTTPNoContent", + "HTTPResetContent", + "HTTPPartialContent", + "HTTPMove", + "HTTPMultipleChoices", + "HTTPMovedPermanently", + "HTTPFound", + "HTTPSeeOther", + "HTTPNotModified", + "HTTPUseProxy", + "HTTPTemporaryRedirect", + "HTTPPermanentRedirect", + "HTTPClientError", + "HTTPBadRequest", + "HTTPUnauthorized", + "HTTPPaymentRequired", + "HTTPForbidden", + "HTTPNotFound", + "HTTPMethodNotAllowed", + "HTTPNotAcceptable", + "HTTPProxyAuthenticationRequired", + "HTTPRequestTimeout", + "HTTPConflict", + "HTTPGone", + "HTTPLengthRequired", + "HTTPPreconditionFailed", + "HTTPRequestEntityTooLarge", + "HTTPRequestURITooLong", + "HTTPUnsupportedMediaType", + "HTTPRequestRangeNotSatisfiable", + "HTTPExpectationFailed", + "HTTPMisdirectedRequest", + "HTTPUnprocessableEntity", + "HTTPFailedDependency", + "HTTPUpgradeRequired", + "HTTPPreconditionRequired", + "HTTPTooManyRequests", + "HTTPRequestHeaderFieldsTooLarge", + "HTTPUnavailableForLegalReasons", + "HTTPServerError", + "HTTPInternalServerError", + "HTTPNotImplemented", + "HTTPBadGateway", + "HTTPServiceUnavailable", + "HTTPGatewayTimeout", + "HTTPVersionNotSupported", + "HTTPVariantAlsoNegotiates", + "HTTPInsufficientStorage", + "HTTPNotExtended", + "HTTPNetworkAuthenticationRequired", +) + + +class NotAppKeyWarning(UserWarning): + """Warning when not using AppKey in Application.""" + + +############################################################ +# HTTP Exceptions +############################################################ + + +class HTTPException(Response, Exception): + + # You should set in subclasses: + # status = 200 + + status_code = -1 + empty_body = False + + __http_exception__ = True + + def __init__( + self, + *, + headers: Optional[LooseHeaders] = None, + reason: Optional[str] = None, + body: Any = None, + text: Optional[str] = None, + content_type: Optional[str] = None, + ) -> None: + if body is not None: + warnings.warn( + "body argument is deprecated for http web exceptions", + DeprecationWarning, + ) + Response.__init__( + self, + status=self.status_code, + headers=headers, + reason=reason, + body=body, + text=text, + content_type=content_type, + ) + Exception.__init__(self, self.reason) + if self.body is None and not self.empty_body: + self.text = f"{self.status}: {self.reason}" + + def __bool__(self) -> bool: + return True + + +class HTTPError(HTTPException): + """Base class for exceptions with status codes in the 400s and 500s.""" + + +class HTTPRedirection(HTTPException): + """Base class for exceptions with status codes in the 300s.""" + + +class HTTPSuccessful(HTTPException): + """Base class for exceptions with status codes in the 200s.""" + + +class HTTPOk(HTTPSuccessful): + status_code = 200 + + +class HTTPCreated(HTTPSuccessful): + status_code = 201 + + +class HTTPAccepted(HTTPSuccessful): + status_code = 202 + + +class HTTPNonAuthoritativeInformation(HTTPSuccessful): + status_code = 203 + + +class HTTPNoContent(HTTPSuccessful): + status_code = 204 + empty_body = True + + +class HTTPResetContent(HTTPSuccessful): + status_code = 205 + empty_body = True + + +class HTTPPartialContent(HTTPSuccessful): + status_code = 206 + + +############################################################ +# 3xx redirection +############################################################ + + +class HTTPMove(HTTPRedirection): + def __init__( + self, + location: StrOrURL, + *, + headers: Optional[LooseHeaders] = None, + reason: Optional[str] = None, + body: Any = None, + text: Optional[str] = None, + content_type: Optional[str] = None, + ) -> None: + if not location: + raise ValueError("HTTP redirects need a location to redirect to.") + super().__init__( + headers=headers, + reason=reason, + body=body, + text=text, + content_type=content_type, + ) + self.headers["Location"] = str(URL(location)) + self.location = location + + +class HTTPMultipleChoices(HTTPMove): + status_code = 300 + + +class HTTPMovedPermanently(HTTPMove): + status_code = 301 + + +class HTTPFound(HTTPMove): + status_code = 302 + + +# This one is safe after a POST (the redirected location will be +# retrieved with GET): +class HTTPSeeOther(HTTPMove): + status_code = 303 + + +class HTTPNotModified(HTTPRedirection): + # FIXME: this should include a date or etag header + status_code = 304 + empty_body = True + + +class HTTPUseProxy(HTTPMove): + # Not a move, but looks a little like one + status_code = 305 + + +class HTTPTemporaryRedirect(HTTPMove): + status_code = 307 + + +class HTTPPermanentRedirect(HTTPMove): + status_code = 308 + + +############################################################ +# 4xx client error +############################################################ + + +class HTTPClientError(HTTPError): + pass + + +class HTTPBadRequest(HTTPClientError): + status_code = 400 + + +class HTTPUnauthorized(HTTPClientError): + status_code = 401 + + +class HTTPPaymentRequired(HTTPClientError): + status_code = 402 + + +class HTTPForbidden(HTTPClientError): + status_code = 403 + + +class HTTPNotFound(HTTPClientError): + status_code = 404 + + +class HTTPMethodNotAllowed(HTTPClientError): + status_code = 405 + + def __init__( + self, + method: str, + allowed_methods: Iterable[str], + *, + headers: Optional[LooseHeaders] = None, + reason: Optional[str] = None, + body: Any = None, + text: Optional[str] = None, + content_type: Optional[str] = None, + ) -> None: + allow = ",".join(sorted(allowed_methods)) + super().__init__( + headers=headers, + reason=reason, + body=body, + text=text, + content_type=content_type, + ) + self.headers["Allow"] = allow + self.allowed_methods: Set[str] = set(allowed_methods) + self.method = method.upper() + + +class HTTPNotAcceptable(HTTPClientError): + status_code = 406 + + +class HTTPProxyAuthenticationRequired(HTTPClientError): + status_code = 407 + + +class HTTPRequestTimeout(HTTPClientError): + status_code = 408 + + +class HTTPConflict(HTTPClientError): + status_code = 409 + + +class HTTPGone(HTTPClientError): + status_code = 410 + + +class HTTPLengthRequired(HTTPClientError): + status_code = 411 + + +class HTTPPreconditionFailed(HTTPClientError): + status_code = 412 + + +class HTTPRequestEntityTooLarge(HTTPClientError): + status_code = 413 + + def __init__(self, max_size: float, actual_size: float, **kwargs: Any) -> None: + kwargs.setdefault( + "text", + "Maximum request body size {} exceeded, " + "actual body size {}".format(max_size, actual_size), + ) + super().__init__(**kwargs) + + +class HTTPRequestURITooLong(HTTPClientError): + status_code = 414 + + +class HTTPUnsupportedMediaType(HTTPClientError): + status_code = 415 + + +class HTTPRequestRangeNotSatisfiable(HTTPClientError): + status_code = 416 + + +class HTTPExpectationFailed(HTTPClientError): + status_code = 417 + + +class HTTPMisdirectedRequest(HTTPClientError): + status_code = 421 + + +class HTTPUnprocessableEntity(HTTPClientError): + status_code = 422 + + +class HTTPFailedDependency(HTTPClientError): + status_code = 424 + + +class HTTPUpgradeRequired(HTTPClientError): + status_code = 426 + + +class HTTPPreconditionRequired(HTTPClientError): + status_code = 428 + + +class HTTPTooManyRequests(HTTPClientError): + status_code = 429 + + +class HTTPRequestHeaderFieldsTooLarge(HTTPClientError): + status_code = 431 + + +class HTTPUnavailableForLegalReasons(HTTPClientError): + status_code = 451 + + def __init__( + self, + link: Optional[StrOrURL], + *, + headers: Optional[LooseHeaders] = None, + reason: Optional[str] = None, + body: Any = None, + text: Optional[str] = None, + content_type: Optional[str] = None, + ) -> None: + super().__init__( + headers=headers, + reason=reason, + body=body, + text=text, + content_type=content_type, + ) + self._link = None + if link: + self._link = URL(link) + self.headers["Link"] = f'<{str(self._link)}>; rel="blocked-by"' + + @property + def link(self) -> Optional[URL]: + return self._link + + +############################################################ +# 5xx Server Error +############################################################ +# Response status codes beginning with the digit "5" indicate cases in +# which the server is aware that it has erred or is incapable of +# performing the request. Except when responding to a HEAD request, the +# server SHOULD include an entity containing an explanation of the error +# situation, and whether it is a temporary or permanent condition. User +# agents SHOULD display any included entity to the user. These response +# codes are applicable to any request method. + + +class HTTPServerError(HTTPError): + pass + + +class HTTPInternalServerError(HTTPServerError): + status_code = 500 + + +class HTTPNotImplemented(HTTPServerError): + status_code = 501 + + +class HTTPBadGateway(HTTPServerError): + status_code = 502 + + +class HTTPServiceUnavailable(HTTPServerError): + status_code = 503 + + +class HTTPGatewayTimeout(HTTPServerError): + status_code = 504 + + +class HTTPVersionNotSupported(HTTPServerError): + status_code = 505 + + +class HTTPVariantAlsoNegotiates(HTTPServerError): + status_code = 506 + + +class HTTPInsufficientStorage(HTTPServerError): + status_code = 507 + + +class HTTPNotExtended(HTTPServerError): + status_code = 510 + + +class HTTPNetworkAuthenticationRequired(HTTPServerError): + status_code = 511 diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_fileresponse.py b/py311/lib/python3.11/site-packages/aiohttp/web_fileresponse.py new file mode 100644 index 0000000000000000000000000000000000000000..26484b9483a9e7e49e1975a2002535009755e133 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_fileresponse.py @@ -0,0 +1,418 @@ +import asyncio +import io +import os +import pathlib +import sys +from contextlib import suppress +from enum import Enum, auto +from mimetypes import MimeTypes +from stat import S_ISREG +from types import MappingProxyType +from typing import ( # noqa + IO, + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Final, + Iterator, + List, + Optional, + Set, + Tuple, + Union, + cast, +) + +from . import hdrs +from .abc import AbstractStreamWriter +from .helpers import ETAG_ANY, ETag, must_be_empty_body +from .typedefs import LooseHeaders, PathLike +from .web_exceptions import ( + HTTPForbidden, + HTTPNotFound, + HTTPNotModified, + HTTPPartialContent, + HTTPPreconditionFailed, + HTTPRequestRangeNotSatisfiable, +) +from .web_response import StreamResponse + +__all__ = ("FileResponse",) + +if TYPE_CHECKING: + from .web_request import BaseRequest + + +_T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]] + + +NOSENDFILE: Final[bool] = bool(os.environ.get("AIOHTTP_NOSENDFILE")) + +CONTENT_TYPES: Final[MimeTypes] = MimeTypes() + +# File extension to IANA encodings map that will be checked in the order defined. +ENCODING_EXTENSIONS = MappingProxyType( + {ext: CONTENT_TYPES.encodings_map[ext] for ext in (".br", ".gz")} +) + +FALLBACK_CONTENT_TYPE = "application/octet-stream" + +# Provide additional MIME type/extension pairs to be recognized. +# https://en.wikipedia.org/wiki/List_of_archive_formats#Compression_only +ADDITIONAL_CONTENT_TYPES = MappingProxyType( + { + "application/gzip": ".gz", + "application/x-brotli": ".br", + "application/x-bzip2": ".bz2", + "application/x-compress": ".Z", + "application/x-xz": ".xz", + } +) + + +class _FileResponseResult(Enum): + """The result of the file response.""" + + SEND_FILE = auto() # Ie a regular file to send + NOT_ACCEPTABLE = auto() # Ie a socket, or non-regular file + PRE_CONDITION_FAILED = auto() # Ie If-Match or If-None-Match failed + NOT_MODIFIED = auto() # 304 Not Modified + + +# Add custom pairs and clear the encodings map so guess_type ignores them. +CONTENT_TYPES.encodings_map.clear() +for content_type, extension in ADDITIONAL_CONTENT_TYPES.items(): + CONTENT_TYPES.add_type(content_type, extension) + + +_CLOSE_FUTURES: Set[asyncio.Future[None]] = set() + + +class FileResponse(StreamResponse): + """A response object can be used to send files.""" + + def __init__( + self, + path: PathLike, + chunk_size: int = 256 * 1024, + status: int = 200, + reason: Optional[str] = None, + headers: Optional[LooseHeaders] = None, + ) -> None: + super().__init__(status=status, reason=reason, headers=headers) + + self._path = pathlib.Path(path) + self._chunk_size = chunk_size + + def _seek_and_read(self, fobj: IO[Any], offset: int, chunk_size: int) -> bytes: + fobj.seek(offset) + return fobj.read(chunk_size) # type: ignore[no-any-return] + + async def _sendfile_fallback( + self, writer: AbstractStreamWriter, fobj: IO[Any], offset: int, count: int + ) -> AbstractStreamWriter: + # To keep memory usage low,fobj is transferred in chunks + # controlled by the constructor's chunk_size argument. + + chunk_size = self._chunk_size + loop = asyncio.get_event_loop() + chunk = await loop.run_in_executor( + None, self._seek_and_read, fobj, offset, chunk_size + ) + while chunk: + await writer.write(chunk) + count = count - chunk_size + if count <= 0: + break + chunk = await loop.run_in_executor(None, fobj.read, min(chunk_size, count)) + + await writer.drain() + return writer + + async def _sendfile( + self, request: "BaseRequest", fobj: IO[Any], offset: int, count: int + ) -> AbstractStreamWriter: + writer = await super().prepare(request) + assert writer is not None + + if NOSENDFILE or self.compression: + return await self._sendfile_fallback(writer, fobj, offset, count) + + loop = request._loop + transport = request.transport + assert transport is not None + + try: + await loop.sendfile(transport, fobj, offset, count) + except NotImplementedError: + return await self._sendfile_fallback(writer, fobj, offset, count) + + await super().write_eof() + return writer + + @staticmethod + def _etag_match(etag_value: str, etags: Tuple[ETag, ...], *, weak: bool) -> bool: + if len(etags) == 1 and etags[0].value == ETAG_ANY: + return True + return any( + etag.value == etag_value for etag in etags if weak or not etag.is_weak + ) + + async def _not_modified( + self, request: "BaseRequest", etag_value: str, last_modified: float + ) -> Optional[AbstractStreamWriter]: + self.set_status(HTTPNotModified.status_code) + self._length_check = False + self.etag = etag_value + self.last_modified = last_modified + # Delete any Content-Length headers provided by user. HTTP 304 + # should always have empty response body + return await super().prepare(request) + + async def _precondition_failed( + self, request: "BaseRequest" + ) -> Optional[AbstractStreamWriter]: + self.set_status(HTTPPreconditionFailed.status_code) + self.content_length = 0 + return await super().prepare(request) + + def _make_response( + self, request: "BaseRequest", accept_encoding: str + ) -> Tuple[ + _FileResponseResult, Optional[io.BufferedReader], os.stat_result, Optional[str] + ]: + """Return the response result, io object, stat result, and encoding. + + If an uncompressed file is returned, the encoding is set to + :py:data:`None`. + + This method should be called from a thread executor + since it calls os.stat which may block. + """ + file_path, st, file_encoding = self._get_file_path_stat_encoding( + accept_encoding + ) + if not file_path: + return _FileResponseResult.NOT_ACCEPTABLE, None, st, None + + etag_value = f"{st.st_mtime_ns:x}-{st.st_size:x}" + + # https://www.rfc-editor.org/rfc/rfc9110#section-13.1.1-2 + if (ifmatch := request.if_match) is not None and not self._etag_match( + etag_value, ifmatch, weak=False + ): + return _FileResponseResult.PRE_CONDITION_FAILED, None, st, file_encoding + + if ( + (unmodsince := request.if_unmodified_since) is not None + and ifmatch is None + and st.st_mtime > unmodsince.timestamp() + ): + return _FileResponseResult.PRE_CONDITION_FAILED, None, st, file_encoding + + # https://www.rfc-editor.org/rfc/rfc9110#section-13.1.2-2 + if (ifnonematch := request.if_none_match) is not None and self._etag_match( + etag_value, ifnonematch, weak=True + ): + return _FileResponseResult.NOT_MODIFIED, None, st, file_encoding + + if ( + (modsince := request.if_modified_since) is not None + and ifnonematch is None + and st.st_mtime <= modsince.timestamp() + ): + return _FileResponseResult.NOT_MODIFIED, None, st, file_encoding + + fobj = file_path.open("rb") + with suppress(OSError): + # fstat() may not be available on all platforms + # Once we open the file, we want the fstat() to ensure + # the file has not changed between the first stat() + # and the open(). + st = os.stat(fobj.fileno()) + return _FileResponseResult.SEND_FILE, fobj, st, file_encoding + + def _get_file_path_stat_encoding( + self, accept_encoding: str + ) -> Tuple[Optional[pathlib.Path], os.stat_result, Optional[str]]: + file_path = self._path + for file_extension, file_encoding in ENCODING_EXTENSIONS.items(): + if file_encoding not in accept_encoding: + continue + + compressed_path = file_path.with_suffix(file_path.suffix + file_extension) + with suppress(OSError): + # Do not follow symlinks and ignore any non-regular files. + st = compressed_path.lstat() + if S_ISREG(st.st_mode): + return compressed_path, st, file_encoding + + # Fallback to the uncompressed file + st = file_path.stat() + return file_path if S_ISREG(st.st_mode) else None, st, None + + async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]: + loop = asyncio.get_running_loop() + # Encoding comparisons should be case-insensitive + # https://www.rfc-editor.org/rfc/rfc9110#section-8.4.1 + accept_encoding = request.headers.get(hdrs.ACCEPT_ENCODING, "").lower() + try: + response_result, fobj, st, file_encoding = await loop.run_in_executor( + None, self._make_response, request, accept_encoding + ) + except PermissionError: + self.set_status(HTTPForbidden.status_code) + return await super().prepare(request) + except OSError: + # Most likely to be FileNotFoundError or OSError for circular + # symlinks in python >= 3.13, so respond with 404. + self.set_status(HTTPNotFound.status_code) + return await super().prepare(request) + + # Forbid special files like sockets, pipes, devices, etc. + if response_result is _FileResponseResult.NOT_ACCEPTABLE: + self.set_status(HTTPForbidden.status_code) + return await super().prepare(request) + + if response_result is _FileResponseResult.PRE_CONDITION_FAILED: + return await self._precondition_failed(request) + + if response_result is _FileResponseResult.NOT_MODIFIED: + etag_value = f"{st.st_mtime_ns:x}-{st.st_size:x}" + last_modified = st.st_mtime + return await self._not_modified(request, etag_value, last_modified) + + assert fobj is not None + try: + return await self._prepare_open_file(request, fobj, st, file_encoding) + finally: + # We do not await here because we do not want to wait + # for the executor to finish before returning the response + # so the connection can begin servicing another request + # as soon as possible. + close_future = loop.run_in_executor(None, fobj.close) + # Hold a strong reference to the future to prevent it from being + # garbage collected before it completes. + _CLOSE_FUTURES.add(close_future) + close_future.add_done_callback(_CLOSE_FUTURES.remove) + + async def _prepare_open_file( + self, + request: "BaseRequest", + fobj: io.BufferedReader, + st: os.stat_result, + file_encoding: Optional[str], + ) -> Optional[AbstractStreamWriter]: + status = self._status + file_size: int = st.st_size + file_mtime: float = st.st_mtime + count: int = file_size + start: Optional[int] = None + + if (ifrange := request.if_range) is None or file_mtime <= ifrange.timestamp(): + # If-Range header check: + # condition = cached date >= last modification date + # return 206 if True else 200. + # if False: + # Range header would not be processed, return 200 + # if True but Range header missing + # return 200 + try: + rng = request.http_range + start = rng.start + end: Optional[int] = rng.stop + except ValueError: + # https://tools.ietf.org/html/rfc7233: + # A server generating a 416 (Range Not Satisfiable) response to + # a byte-range request SHOULD send a Content-Range header field + # with an unsatisfied-range value. + # The complete-length in a 416 response indicates the current + # length of the selected representation. + # + # Will do the same below. Many servers ignore this and do not + # send a Content-Range header with HTTP 416 + self._headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}" + self.set_status(HTTPRequestRangeNotSatisfiable.status_code) + return await super().prepare(request) + + # If a range request has been made, convert start, end slice + # notation into file pointer offset and count + if start is not None: + if start < 0 and end is None: # return tail of file + start += file_size + if start < 0: + # if Range:bytes=-1000 in request header but file size + # is only 200, there would be trouble without this + start = 0 + count = file_size - start + else: + # rfc7233:If the last-byte-pos value is + # absent, or if the value is greater than or equal to + # the current length of the representation data, + # the byte range is interpreted as the remainder + # of the representation (i.e., the server replaces the + # value of last-byte-pos with a value that is one less than + # the current length of the selected representation). + count = ( + min(end if end is not None else file_size, file_size) - start + ) + + if start >= file_size: + # HTTP 416 should be returned in this case. + # + # According to https://tools.ietf.org/html/rfc7233: + # If a valid byte-range-set includes at least one + # byte-range-spec with a first-byte-pos that is less than + # the current length of the representation, or at least one + # suffix-byte-range-spec with a non-zero suffix-length, + # then the byte-range-set is satisfiable. Otherwise, the + # byte-range-set is unsatisfiable. + self._headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}" + self.set_status(HTTPRequestRangeNotSatisfiable.status_code) + return await super().prepare(request) + + status = HTTPPartialContent.status_code + # Even though you are sending the whole file, you should still + # return a HTTP 206 for a Range request. + self.set_status(status) + + # If the Content-Type header is not already set, guess it based on the + # extension of the request path. The encoding returned by guess_type + # can be ignored since the map was cleared above. + if hdrs.CONTENT_TYPE not in self._headers: + if sys.version_info >= (3, 13): + guesser = CONTENT_TYPES.guess_file_type + else: + guesser = CONTENT_TYPES.guess_type + self.content_type = guesser(self._path)[0] or FALLBACK_CONTENT_TYPE + + if file_encoding: + self._headers[hdrs.CONTENT_ENCODING] = file_encoding + self._headers[hdrs.VARY] = hdrs.ACCEPT_ENCODING + # Disable compression if we are already sending + # a compressed file since we don't want to double + # compress. + self._compression = False + + self.etag = f"{st.st_mtime_ns:x}-{st.st_size:x}" + self.last_modified = file_mtime + self.content_length = count + + self._headers[hdrs.ACCEPT_RANGES] = "bytes" + + if status == HTTPPartialContent.status_code: + real_start = start + assert real_start is not None + self._headers[hdrs.CONTENT_RANGE] = "bytes {}-{}/{}".format( + real_start, real_start + count - 1, file_size + ) + + # If we are sending 0 bytes calling sendfile() will throw a ValueError + if count == 0 or must_be_empty_body(request.method, status): + return await super().prepare(request) + + # be aware that start could be None or int=0 here. + offset = start or 0 + + return await self._sendfile(request, fobj, offset, count) diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_log.py b/py311/lib/python3.11/site-packages/aiohttp/web_log.py new file mode 100644 index 0000000000000000000000000000000000000000..d5ea2beeb152974ce5dd9f3e7990133ce04f7980 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_log.py @@ -0,0 +1,216 @@ +import datetime +import functools +import logging +import os +import re +import time as time_mod +from collections import namedtuple +from typing import Any, Callable, Dict, Iterable, List, Tuple # noqa + +from .abc import AbstractAccessLogger +from .web_request import BaseRequest +from .web_response import StreamResponse + +KeyMethod = namedtuple("KeyMethod", "key method") + + +class AccessLogger(AbstractAccessLogger): + """Helper object to log access. + + Usage: + log = logging.getLogger("spam") + log_format = "%a %{User-Agent}i" + access_logger = AccessLogger(log, log_format) + access_logger.log(request, response, time) + + Format: + %% The percent sign + %a Remote IP-address (IP-address of proxy if using reverse proxy) + %t Time when the request was started to process + %P The process ID of the child that serviced the request + %r First line of request + %s Response status code + %b Size of response in bytes, including HTTP headers + %T Time taken to serve the request, in seconds + %Tf Time taken to serve the request, in seconds with floating fraction + in .06f format + %D Time taken to serve the request, in microseconds + %{FOO}i request.headers['FOO'] + %{FOO}o response.headers['FOO'] + %{FOO}e os.environ['FOO'] + + """ + + LOG_FORMAT_MAP = { + "a": "remote_address", + "t": "request_start_time", + "P": "process_id", + "r": "first_request_line", + "s": "response_status", + "b": "response_size", + "T": "request_time", + "Tf": "request_time_frac", + "D": "request_time_micro", + "i": "request_header", + "o": "response_header", + } + + LOG_FORMAT = '%a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i"' + FORMAT_RE = re.compile(r"%(\{([A-Za-z0-9\-_]+)\}([ioe])|[atPrsbOD]|Tf?)") + CLEANUP_RE = re.compile(r"(%[^s])") + _FORMAT_CACHE: Dict[str, Tuple[str, List[KeyMethod]]] = {} + + def __init__(self, logger: logging.Logger, log_format: str = LOG_FORMAT) -> None: + """Initialise the logger. + + logger is a logger object to be used for logging. + log_format is a string with apache compatible log format description. + + """ + super().__init__(logger, log_format=log_format) + + _compiled_format = AccessLogger._FORMAT_CACHE.get(log_format) + if not _compiled_format: + _compiled_format = self.compile_format(log_format) + AccessLogger._FORMAT_CACHE[log_format] = _compiled_format + + self._log_format, self._methods = _compiled_format + + def compile_format(self, log_format: str) -> Tuple[str, List[KeyMethod]]: + """Translate log_format into form usable by modulo formatting + + All known atoms will be replaced with %s + Also methods for formatting of those atoms will be added to + _methods in appropriate order + + For example we have log_format = "%a %t" + This format will be translated to "%s %s" + Also contents of _methods will be + [self._format_a, self._format_t] + These method will be called and results will be passed + to translated string format. + + Each _format_* method receive 'args' which is list of arguments + given to self.log + + Exceptions are _format_e, _format_i and _format_o methods which + also receive key name (by functools.partial) + + """ + # list of (key, method) tuples, we don't use an OrderedDict as users + # can repeat the same key more than once + methods = list() + + for atom in self.FORMAT_RE.findall(log_format): + if atom[1] == "": + format_key1 = self.LOG_FORMAT_MAP[atom[0]] + m = getattr(AccessLogger, "_format_%s" % atom[0]) + key_method = KeyMethod(format_key1, m) + else: + format_key2 = (self.LOG_FORMAT_MAP[atom[2]], atom[1]) + m = getattr(AccessLogger, "_format_%s" % atom[2]) + key_method = KeyMethod(format_key2, functools.partial(m, atom[1])) + + methods.append(key_method) + + log_format = self.FORMAT_RE.sub(r"%s", log_format) + log_format = self.CLEANUP_RE.sub(r"%\1", log_format) + return log_format, methods + + @staticmethod + def _format_i( + key: str, request: BaseRequest, response: StreamResponse, time: float + ) -> str: + if request is None: + return "(no headers)" + + # suboptimal, make istr(key) once + return request.headers.get(key, "-") + + @staticmethod + def _format_o( + key: str, request: BaseRequest, response: StreamResponse, time: float + ) -> str: + # suboptimal, make istr(key) once + return response.headers.get(key, "-") + + @staticmethod + def _format_a(request: BaseRequest, response: StreamResponse, time: float) -> str: + if request is None: + return "-" + ip = request.remote + return ip if ip is not None else "-" + + @staticmethod + def _format_t(request: BaseRequest, response: StreamResponse, time: float) -> str: + tz = datetime.timezone(datetime.timedelta(seconds=-time_mod.timezone)) + now = datetime.datetime.now(tz) + start_time = now - datetime.timedelta(seconds=time) + return start_time.strftime("[%d/%b/%Y:%H:%M:%S %z]") + + @staticmethod + def _format_P(request: BaseRequest, response: StreamResponse, time: float) -> str: + return "<%s>" % os.getpid() + + @staticmethod + def _format_r(request: BaseRequest, response: StreamResponse, time: float) -> str: + if request is None: + return "-" + return "{} {} HTTP/{}.{}".format( + request.method, + request.path_qs, + request.version.major, + request.version.minor, + ) + + @staticmethod + def _format_s(request: BaseRequest, response: StreamResponse, time: float) -> int: + return response.status + + @staticmethod + def _format_b(request: BaseRequest, response: StreamResponse, time: float) -> int: + return response.body_length + + @staticmethod + def _format_T(request: BaseRequest, response: StreamResponse, time: float) -> str: + return str(round(time)) + + @staticmethod + def _format_Tf(request: BaseRequest, response: StreamResponse, time: float) -> str: + return "%06f" % time + + @staticmethod + def _format_D(request: BaseRequest, response: StreamResponse, time: float) -> str: + return str(round(time * 1000000)) + + def _format_line( + self, request: BaseRequest, response: StreamResponse, time: float + ) -> Iterable[Tuple[str, Callable[[BaseRequest, StreamResponse, float], str]]]: + return [(key, method(request, response, time)) for key, method in self._methods] + + @property + def enabled(self) -> bool: + """Check if logger is enabled.""" + # Avoid formatting the log line if it will not be emitted. + return self.logger.isEnabledFor(logging.INFO) + + def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None: + try: + fmt_info = self._format_line(request, response, time) + + values = list() + extra = dict() + for key, value in fmt_info: + values.append(value) + + if key.__class__ is str: + extra[key] = value + else: + k1, k2 = key # type: ignore[misc] + dct = extra.get(k1, {}) # type: ignore[var-annotated,has-type] + dct[k2] = value # type: ignore[index,has-type] + extra[k1] = dct # type: ignore[has-type,assignment] + + self.logger.info(self._log_format % tuple(values), extra=extra) + except Exception: + self.logger.exception("Error in logging") diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_middlewares.py b/py311/lib/python3.11/site-packages/aiohttp/web_middlewares.py new file mode 100644 index 0000000000000000000000000000000000000000..2f1f5f58e6e38845d4d2d4ffdd2748fc519fa5bf --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_middlewares.py @@ -0,0 +1,121 @@ +import re +from typing import TYPE_CHECKING, Tuple, Type, TypeVar + +from .typedefs import Handler, Middleware +from .web_exceptions import HTTPMove, HTTPPermanentRedirect +from .web_request import Request +from .web_response import StreamResponse +from .web_urldispatcher import SystemRoute + +__all__ = ( + "middleware", + "normalize_path_middleware", +) + +if TYPE_CHECKING: + from .web_app import Application + +_Func = TypeVar("_Func") + + +async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]: + alt_request = request.clone(rel_url=path) + + match_info = await request.app.router.resolve(alt_request) + alt_request._match_info = match_info + + if match_info.http_exception is None: + return True, alt_request + + return False, request + + +def middleware(f: _Func) -> _Func: + f.__middleware_version__ = 1 # type: ignore[attr-defined] + return f + + +def normalize_path_middleware( + *, + append_slash: bool = True, + remove_slash: bool = False, + merge_slashes: bool = True, + redirect_class: Type[HTTPMove] = HTTPPermanentRedirect, +) -> Middleware: + """Factory for producing a middleware that normalizes the path of a request. + + Normalizing means: + - Add or remove a trailing slash to the path. + - Double slashes are replaced by one. + + The middleware returns as soon as it finds a path that resolves + correctly. The order if both merge and append/remove are enabled is + 1) merge slashes + 2) append/remove slash + 3) both merge slashes and append/remove slash. + If the path resolves with at least one of those conditions, it will + redirect to the new path. + + Only one of `append_slash` and `remove_slash` can be enabled. If both + are `True` the factory will raise an assertion error + + If `append_slash` is `True` the middleware will append a slash when + needed. If a resource is defined with trailing slash and the request + comes without it, it will append it automatically. + + If `remove_slash` is `True`, `append_slash` must be `False`. When enabled + the middleware will remove trailing slashes and redirect if the resource + is defined + + If merge_slashes is True, merge multiple consecutive slashes in the + path into one. + """ + correct_configuration = not (append_slash and remove_slash) + assert correct_configuration, "Cannot both remove and append slash" + + @middleware + async def impl(request: Request, handler: Handler) -> StreamResponse: + if isinstance(request.match_info.route, SystemRoute): + paths_to_check = [] + if "?" in request.raw_path: + path, query = request.raw_path.split("?", 1) + query = "?" + query + else: + query = "" + path = request.raw_path + + if merge_slashes: + paths_to_check.append(re.sub("//+", "/", path)) + if append_slash and not request.path.endswith("/"): + paths_to_check.append(path + "/") + if remove_slash and request.path.endswith("/"): + paths_to_check.append(path[:-1]) + if merge_slashes and append_slash: + paths_to_check.append(re.sub("//+", "/", path + "/")) + if merge_slashes and remove_slash: + merged_slashes = re.sub("//+", "/", path) + paths_to_check.append(merged_slashes[:-1]) + + for path in paths_to_check: + path = re.sub("^//+", "/", path) # SECURITY: GHSA-v6wp-4m6f-gcjg + resolves, request = await _check_request_resolves(request, path) + if resolves: + raise redirect_class(request.raw_path + query) + + return await handler(request) + + return impl + + +def _fix_request_current_app(app: "Application") -> Middleware: + @middleware + async def impl(request: Request, handler: Handler) -> StreamResponse: + match_info = request.match_info + prev = match_info.current_app + match_info.current_app = app + try: + return await handler(request) + finally: + match_info.current_app = prev + + return impl diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_protocol.py b/py311/lib/python3.11/site-packages/aiohttp/web_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd344ae42a65445df53eb6864db1abeb06f56e7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_protocol.py @@ -0,0 +1,792 @@ +import asyncio +import asyncio.streams +import sys +import traceback +import warnings +from collections import deque +from contextlib import suppress +from html import escape as html_escape +from http import HTTPStatus +from logging import Logger +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Deque, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +import attr +import yarl +from propcache import under_cached_property + +from .abc import AbstractAccessLogger, AbstractStreamWriter +from .base_protocol import BaseProtocol +from .helpers import ceil_timeout +from .http import ( + HttpProcessingError, + HttpRequestParser, + HttpVersion10, + RawRequestMessage, + StreamWriter, +) +from .http_exceptions import BadHttpMethod +from .log import access_logger, server_logger +from .streams import EMPTY_PAYLOAD, StreamReader +from .tcp_helpers import tcp_keepalive +from .web_exceptions import HTTPException, HTTPInternalServerError +from .web_log import AccessLogger +from .web_request import BaseRequest +from .web_response import Response, StreamResponse + +__all__ = ("RequestHandler", "RequestPayloadError", "PayloadAccessError") + +if TYPE_CHECKING: + import ssl + + from .web_server import Server + + +_RequestFactory = Callable[ + [ + RawRequestMessage, + StreamReader, + "RequestHandler", + AbstractStreamWriter, + "asyncio.Task[None]", + ], + BaseRequest, +] + +_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]] + +ERROR = RawRequestMessage( + "UNKNOWN", + "/", + HttpVersion10, + {}, # type: ignore[arg-type] + {}, # type: ignore[arg-type] + True, + None, + False, + False, + yarl.URL("/"), +) + + +class RequestPayloadError(Exception): + """Payload parsing error.""" + + +class PayloadAccessError(Exception): + """Payload was accessed after response was sent.""" + + +_PAYLOAD_ACCESS_ERROR = PayloadAccessError() + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class _ErrInfo: + status: int + exc: BaseException + message: str + + +_MsgType = Tuple[Union[RawRequestMessage, _ErrInfo], StreamReader] + + +class RequestHandler(BaseProtocol): + """HTTP protocol implementation. + + RequestHandler handles incoming HTTP request. It reads request line, + request headers and request payload and calls handle_request() method. + By default it always returns with 404 response. + + RequestHandler handles errors in incoming request, like bad + status line, bad headers or incomplete payload. If any error occurs, + connection gets closed. + + keepalive_timeout -- number of seconds before closing + keep-alive connection + + tcp_keepalive -- TCP keep-alive is on, default is on + + debug -- enable debug mode + + logger -- custom logger object + + access_log_class -- custom class for access_logger + + access_log -- custom logging object + + access_log_format -- access log format string + + loop -- Optional event loop + + max_line_size -- Optional maximum header line size + + max_field_size -- Optional maximum header field size + + max_headers -- Optional maximum header size + + timeout_ceil_threshold -- Optional value to specify + threshold to ceil() timeout + values + + """ + + __slots__ = ( + "_request_count", + "_keepalive", + "_manager", + "_request_handler", + "_request_factory", + "_tcp_keepalive", + "_next_keepalive_close_time", + "_keepalive_handle", + "_keepalive_timeout", + "_lingering_time", + "_messages", + "_message_tail", + "_handler_waiter", + "_waiter", + "_task_handler", + "_upgrade", + "_payload_parser", + "_request_parser", + "_reading_paused", + "logger", + "debug", + "access_log", + "access_logger", + "_close", + "_force_close", + "_current_request", + "_timeout_ceil_threshold", + "_request_in_progress", + "_logging_enabled", + "_cache", + ) + + def __init__( + self, + manager: "Server", + *, + loop: asyncio.AbstractEventLoop, + # Default should be high enough that it's likely longer than a reverse proxy. + keepalive_timeout: float = 3630, + tcp_keepalive: bool = True, + logger: Logger = server_logger, + access_log_class: Type[AbstractAccessLogger] = AccessLogger, + access_log: Logger = access_logger, + access_log_format: str = AccessLogger.LOG_FORMAT, + debug: bool = False, + max_line_size: int = 8190, + max_headers: int = 32768, + max_field_size: int = 8190, + lingering_time: float = 10.0, + read_bufsize: int = 2**16, + auto_decompress: bool = True, + timeout_ceil_threshold: float = 5, + ): + super().__init__(loop) + + # _request_count is the number of requests processed with the same connection. + self._request_count = 0 + self._keepalive = False + self._current_request: Optional[BaseRequest] = None + self._manager: Optional[Server] = manager + self._request_handler: Optional[_RequestHandler] = manager.request_handler + self._request_factory: Optional[_RequestFactory] = manager.request_factory + + self._tcp_keepalive = tcp_keepalive + # placeholder to be replaced on keepalive timeout setup + self._next_keepalive_close_time = 0.0 + self._keepalive_handle: Optional[asyncio.Handle] = None + self._keepalive_timeout = keepalive_timeout + self._lingering_time = float(lingering_time) + + self._messages: Deque[_MsgType] = deque() + self._message_tail = b"" + + self._waiter: Optional[asyncio.Future[None]] = None + self._handler_waiter: Optional[asyncio.Future[None]] = None + self._task_handler: Optional[asyncio.Task[None]] = None + + self._upgrade = False + self._payload_parser: Any = None + self._request_parser: Optional[HttpRequestParser] = HttpRequestParser( + self, + loop, + read_bufsize, + max_line_size=max_line_size, + max_field_size=max_field_size, + max_headers=max_headers, + payload_exception=RequestPayloadError, + auto_decompress=auto_decompress, + ) + + self._timeout_ceil_threshold: float = 5 + try: + self._timeout_ceil_threshold = float(timeout_ceil_threshold) + except (TypeError, ValueError): + pass + + self.logger = logger + self.debug = debug + self.access_log = access_log + if access_log: + self.access_logger: Optional[AbstractAccessLogger] = access_log_class( + access_log, access_log_format + ) + self._logging_enabled = self.access_logger.enabled + else: + self.access_logger = None + self._logging_enabled = False + + self._close = False + self._force_close = False + self._request_in_progress = False + self._cache: dict[str, Any] = {} + + def __repr__(self) -> str: + return "<{} {}>".format( + self.__class__.__name__, + "connected" if self.transport is not None else "disconnected", + ) + + @under_cached_property + def ssl_context(self) -> Optional["ssl.SSLContext"]: + """Return SSLContext if available.""" + return ( + None + if self.transport is None + else self.transport.get_extra_info("sslcontext") + ) + + @under_cached_property + def peername( + self, + ) -> Optional[Union[str, Tuple[str, int, int, int], Tuple[str, int]]]: + """Return peername if available.""" + return ( + None + if self.transport is None + else self.transport.get_extra_info("peername") + ) + + @property + def keepalive_timeout(self) -> float: + return self._keepalive_timeout + + async def shutdown(self, timeout: Optional[float] = 15.0) -> None: + """Do worker process exit preparations. + + We need to clean up everything and stop accepting requests. + It is especially important for keep-alive connections. + """ + self._force_close = True + + if self._keepalive_handle is not None: + self._keepalive_handle.cancel() + + # Wait for graceful handler completion + if self._request_in_progress: + # The future is only created when we are shutting + # down while the handler is still processing a request + # to avoid creating a future for every request. + self._handler_waiter = self._loop.create_future() + try: + async with ceil_timeout(timeout): + await self._handler_waiter + except (asyncio.CancelledError, asyncio.TimeoutError): + self._handler_waiter = None + if ( + sys.version_info >= (3, 11) + and (task := asyncio.current_task()) + and task.cancelling() + ): + raise + # Then cancel handler and wait + try: + async with ceil_timeout(timeout): + if self._current_request is not None: + self._current_request._cancel(asyncio.CancelledError()) + + if self._task_handler is not None and not self._task_handler.done(): + await asyncio.shield(self._task_handler) + except (asyncio.CancelledError, asyncio.TimeoutError): + if ( + sys.version_info >= (3, 11) + and (task := asyncio.current_task()) + and task.cancelling() + ): + raise + + # force-close non-idle handler + if self._task_handler is not None: + self._task_handler.cancel() + + self.force_close() + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + super().connection_made(transport) + + real_transport = cast(asyncio.Transport, transport) + if self._tcp_keepalive: + tcp_keepalive(real_transport) + + assert self._manager is not None + self._manager.connection_made(self, real_transport) + + loop = self._loop + if sys.version_info >= (3, 12): + task = asyncio.Task(self.start(), loop=loop, eager_start=True) + else: + task = loop.create_task(self.start()) + self._task_handler = task + + def connection_lost(self, exc: Optional[BaseException]) -> None: + if self._manager is None: + return + self._manager.connection_lost(self, exc) + + # Grab value before setting _manager to None. + handler_cancellation = self._manager.handler_cancellation + + self.force_close() + super().connection_lost(exc) + self._manager = None + self._request_factory = None + self._request_handler = None + self._request_parser = None + + if self._keepalive_handle is not None: + self._keepalive_handle.cancel() + + if self._current_request is not None: + if exc is None: + exc = ConnectionResetError("Connection lost") + self._current_request._cancel(exc) + + if handler_cancellation and self._task_handler is not None: + self._task_handler.cancel() + + self._task_handler = None + + if self._payload_parser is not None: + self._payload_parser.feed_eof() + self._payload_parser = None + + def set_parser(self, parser: Any) -> None: + # Actual type is WebReader + assert self._payload_parser is None + + self._payload_parser = parser + + if self._message_tail: + self._payload_parser.feed_data(self._message_tail) + self._message_tail = b"" + + def eof_received(self) -> None: + pass + + def data_received(self, data: bytes) -> None: + if self._force_close or self._close: + return + # parse http messages + messages: Sequence[_MsgType] + if self._payload_parser is None and not self._upgrade: + assert self._request_parser is not None + try: + messages, upgraded, tail = self._request_parser.feed_data(data) + except HttpProcessingError as exc: + messages = [ + (_ErrInfo(status=400, exc=exc, message=exc.message), EMPTY_PAYLOAD) + ] + upgraded = False + tail = b"" + + for msg, payload in messages or (): + self._request_count += 1 + self._messages.append((msg, payload)) + + waiter = self._waiter + if messages and waiter is not None and not waiter.done(): + # don't set result twice + waiter.set_result(None) + + self._upgrade = upgraded + if upgraded and tail: + self._message_tail = tail + + # no parser, just store + elif self._payload_parser is None and self._upgrade and data: + self._message_tail += data + + # feed payload + elif data: + eof, tail = self._payload_parser.feed_data(data) + if eof: + self.close() + + def keep_alive(self, val: bool) -> None: + """Set keep-alive connection mode. + + :param bool val: new state. + """ + self._keepalive = val + if self._keepalive_handle: + self._keepalive_handle.cancel() + self._keepalive_handle = None + + def close(self) -> None: + """Close connection. + + Stop accepting new pipelining messages and close + connection when handlers done processing messages. + """ + self._close = True + if self._waiter: + self._waiter.cancel() + + def force_close(self) -> None: + """Forcefully close connection.""" + self._force_close = True + if self._waiter: + self._waiter.cancel() + if self.transport is not None: + self.transport.close() + self.transport = None + + def log_access( + self, request: BaseRequest, response: StreamResponse, time: Optional[float] + ) -> None: + if self._logging_enabled and self.access_logger is not None: + if TYPE_CHECKING: + assert time is not None + self.access_logger.log(request, response, self._loop.time() - time) + + def log_debug(self, *args: Any, **kw: Any) -> None: + if self.debug: + self.logger.debug(*args, **kw) + + def log_exception(self, *args: Any, **kw: Any) -> None: + self.logger.exception(*args, **kw) + + def _process_keepalive(self) -> None: + self._keepalive_handle = None + if self._force_close or not self._keepalive: + return + + loop = self._loop + now = loop.time() + close_time = self._next_keepalive_close_time + if now < close_time: + # Keep alive close check fired too early, reschedule + self._keepalive_handle = loop.call_at(close_time, self._process_keepalive) + return + + # handler in idle state + if self._waiter and not self._waiter.done(): + self.force_close() + + async def _handle_request( + self, + request: BaseRequest, + start_time: Optional[float], + request_handler: Callable[[BaseRequest], Awaitable[StreamResponse]], + ) -> Tuple[StreamResponse, bool]: + self._request_in_progress = True + try: + try: + self._current_request = request + resp = await request_handler(request) + finally: + self._current_request = None + except HTTPException as exc: + resp = exc + resp, reset = await self.finish_response(request, resp, start_time) + except asyncio.CancelledError: + raise + except asyncio.TimeoutError as exc: + self.log_debug("Request handler timed out.", exc_info=exc) + resp = self.handle_error(request, 504) + resp, reset = await self.finish_response(request, resp, start_time) + except Exception as exc: + resp = self.handle_error(request, 500, exc) + resp, reset = await self.finish_response(request, resp, start_time) + else: + # Deprecation warning (See #2415) + if getattr(resp, "__http_exception__", False): + warnings.warn( + "returning HTTPException object is deprecated " + "(#2415) and will be removed, " + "please raise the exception instead", + DeprecationWarning, + ) + + resp, reset = await self.finish_response(request, resp, start_time) + finally: + self._request_in_progress = False + if self._handler_waiter is not None: + self._handler_waiter.set_result(None) + + return resp, reset + + async def start(self) -> None: + """Process incoming request. + + It reads request line, request headers and request payload, then + calls handle_request() method. Subclass has to override + handle_request(). start() handles various exceptions in request + or response handling. Connection is being closed always unless + keep_alive(True) specified. + """ + loop = self._loop + manager = self._manager + assert manager is not None + keepalive_timeout = self._keepalive_timeout + resp = None + assert self._request_factory is not None + assert self._request_handler is not None + + while not self._force_close: + if not self._messages: + try: + # wait for next request + self._waiter = loop.create_future() + await self._waiter + finally: + self._waiter = None + + message, payload = self._messages.popleft() + + # time is only fetched if logging is enabled as otherwise + # its thrown away and never used. + start = loop.time() if self._logging_enabled else None + + manager.requests_count += 1 + writer = StreamWriter(self, loop) + if isinstance(message, _ErrInfo): + # make request_factory work + request_handler = self._make_error_handler(message) + message = ERROR + else: + request_handler = self._request_handler + + # Important don't hold a reference to the current task + # as on traceback it will prevent the task from being + # collected and will cause a memory leak. + request = self._request_factory( + message, + payload, + self, + writer, + self._task_handler or asyncio.current_task(loop), # type: ignore[arg-type] + ) + try: + # a new task is used for copy context vars (#3406) + coro = self._handle_request(request, start, request_handler) + if sys.version_info >= (3, 12): + task = asyncio.Task(coro, loop=loop, eager_start=True) + else: + task = loop.create_task(coro) + try: + resp, reset = await task + except ConnectionError: + self.log_debug("Ignored premature client disconnection") + break + + # Drop the processed task from asyncio.Task.all_tasks() early + del task + if reset: + self.log_debug("Ignored premature client disconnection 2") + break + + # notify server about keep-alive + self._keepalive = bool(resp.keep_alive) + + # check payload + if not payload.is_eof(): + lingering_time = self._lingering_time + if not self._force_close and lingering_time: + self.log_debug( + "Start lingering close timer for %s sec.", lingering_time + ) + + now = loop.time() + end_t = now + lingering_time + + try: + while not payload.is_eof() and now < end_t: + async with ceil_timeout(end_t - now): + # read and ignore + await payload.readany() + now = loop.time() + except (asyncio.CancelledError, asyncio.TimeoutError): + if ( + sys.version_info >= (3, 11) + and (t := asyncio.current_task()) + and t.cancelling() + ): + raise + + # if payload still uncompleted + if not payload.is_eof() and not self._force_close: + self.log_debug("Uncompleted request.") + self.close() + + payload.set_exception(_PAYLOAD_ACCESS_ERROR) + + except asyncio.CancelledError: + self.log_debug("Ignored premature client disconnection") + self.force_close() + raise + except Exception as exc: + self.log_exception("Unhandled exception", exc_info=exc) + self.force_close() + except BaseException: + self.force_close() + raise + finally: + request._task = None # type: ignore[assignment] # Break reference cycle in case of exception + if self.transport is None and resp is not None: + self.log_debug("Ignored premature client disconnection.") + + if self._keepalive and not self._close and not self._force_close: + # start keep-alive timer + close_time = loop.time() + keepalive_timeout + self._next_keepalive_close_time = close_time + if self._keepalive_handle is None: + self._keepalive_handle = loop.call_at( + close_time, self._process_keepalive + ) + else: + break + + # remove handler, close transport if no handlers left + if not self._force_close: + self._task_handler = None + if self.transport is not None: + self.transport.close() + + async def finish_response( + self, request: BaseRequest, resp: StreamResponse, start_time: Optional[float] + ) -> Tuple[StreamResponse, bool]: + """Prepare the response and write_eof, then log access. + + This has to + be called within the context of any exception so the access logger + can get exception information. Returns True if the client disconnects + prematurely. + """ + request._finish() + if self._request_parser is not None: + self._request_parser.set_upgraded(False) + self._upgrade = False + if self._message_tail: + self._request_parser.feed_data(self._message_tail) + self._message_tail = b"" + try: + prepare_meth = resp.prepare + except AttributeError: + if resp is None: + self.log_exception("Missing return statement on request handler") + else: + self.log_exception( + "Web-handler should return a response instance, " + "got {!r}".format(resp) + ) + exc = HTTPInternalServerError() + resp = Response( + status=exc.status, reason=exc.reason, text=exc.text, headers=exc.headers + ) + prepare_meth = resp.prepare + try: + await prepare_meth(request) + await resp.write_eof() + except ConnectionError: + self.log_access(request, resp, start_time) + return resp, True + + self.log_access(request, resp, start_time) + return resp, False + + def handle_error( + self, + request: BaseRequest, + status: int = 500, + exc: Optional[BaseException] = None, + message: Optional[str] = None, + ) -> StreamResponse: + """Handle errors. + + Returns HTTP response with specific status code. Logs additional + information. It always closes current connection. + """ + if self._request_count == 1 and isinstance(exc, BadHttpMethod): + # BadHttpMethod is common when a client sends non-HTTP + # or encrypted traffic to an HTTP port. This is expected + # to happen when connected to the public internet so we log + # it at the debug level as to not fill logs with noise. + self.logger.debug( + "Error handling request from %s", request.remote, exc_info=exc + ) + else: + self.log_exception( + "Error handling request from %s", request.remote, exc_info=exc + ) + + # some data already got sent, connection is broken + if request.writer.output_size > 0: + raise ConnectionError( + "Response is sent already, cannot send another response " + "with the error message" + ) + + ct = "text/plain" + if status == HTTPStatus.INTERNAL_SERVER_ERROR: + title = "{0.value} {0.phrase}".format(HTTPStatus.INTERNAL_SERVER_ERROR) + msg = HTTPStatus.INTERNAL_SERVER_ERROR.description + tb = None + if self.debug: + with suppress(Exception): + tb = traceback.format_exc() + + if "text/html" in request.headers.get("Accept", ""): + if tb: + tb = html_escape(tb) + msg = f"

Traceback:

\n
{tb}
" + message = ( + "" + "{title}" + "\n

{title}

" + "\n{msg}\n\n" + ).format(title=title, msg=msg) + ct = "text/html" + else: + if tb: + msg = tb + message = title + "\n\n" + msg + + resp = Response(status=status, text=message, content_type=ct) + resp.force_close() + + return resp + + def _make_error_handler( + self, err_info: _ErrInfo + ) -> Callable[[BaseRequest], Awaitable[StreamResponse]]: + async def handler(request: BaseRequest) -> StreamResponse: + return self.handle_error( + request, err_info.status, err_info.exc, err_info.message + ) + + return handler diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_request.py b/py311/lib/python3.11/site-packages/aiohttp/web_request.py new file mode 100644 index 0000000000000000000000000000000000000000..0eafcd6e34cb8418393f1af38a072de947a63502 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_request.py @@ -0,0 +1,914 @@ +import asyncio +import datetime +import io +import re +import socket +import string +import tempfile +import types +import warnings +from types import MappingProxyType +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Final, + Iterator, + Mapping, + MutableMapping, + Optional, + Pattern, + Tuple, + Union, + cast, +) +from urllib.parse import parse_qsl + +import attr +from multidict import ( + CIMultiDict, + CIMultiDictProxy, + MultiDict, + MultiDictProxy, + MultiMapping, +) +from yarl import URL + +from . import hdrs +from ._cookie_helpers import parse_cookie_header +from .abc import AbstractStreamWriter +from .helpers import ( + _SENTINEL, + DEBUG, + ETAG_ANY, + LIST_QUOTED_ETAG_RE, + ChainMapProxy, + ETag, + HeadersMixin, + parse_http_date, + reify, + sentinel, + set_exception, +) +from .http_parser import RawRequestMessage +from .http_writer import HttpVersion +from .multipart import BodyPartReader, MultipartReader +from .streams import EmptyStreamReader, StreamReader +from .typedefs import ( + DEFAULT_JSON_DECODER, + JSONDecoder, + LooseHeaders, + RawHeaders, + StrOrURL, +) +from .web_exceptions import HTTPRequestEntityTooLarge +from .web_response import StreamResponse + +__all__ = ("BaseRequest", "FileField", "Request") + + +if TYPE_CHECKING: + from .web_app import Application + from .web_protocol import RequestHandler + from .web_urldispatcher import UrlMappingMatchInfo + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class FileField: + name: str + filename: str + file: io.BufferedReader + content_type: str + headers: CIMultiDictProxy[str] + + +_TCHAR: Final[str] = string.digits + string.ascii_letters + r"!#$%&'*+.^_`|~-" +# '-' at the end to prevent interpretation as range in a char class + +_TOKEN: Final[str] = rf"[{_TCHAR}]+" + +_QDTEXT: Final[str] = r"[{}]".format( + r"".join(chr(c) for c in (0x09, 0x20, 0x21) + tuple(range(0x23, 0x7F))) +) +# qdtext includes 0x5C to escape 0x5D ('\]') +# qdtext excludes obs-text (because obsoleted, and encoding not specified) + +_QUOTED_PAIR: Final[str] = r"\\[\t !-~]" + +_QUOTED_STRING: Final[str] = r'"(?:{quoted_pair}|{qdtext})*"'.format( + qdtext=_QDTEXT, quoted_pair=_QUOTED_PAIR +) + +_FORWARDED_PAIR: Final[str] = ( + r"({token})=({token}|{quoted_string})(:\d{{1,4}})?".format( + token=_TOKEN, quoted_string=_QUOTED_STRING + ) +) + +_QUOTED_PAIR_REPLACE_RE: Final[Pattern[str]] = re.compile(r"\\([\t !-~])") +# same pattern as _QUOTED_PAIR but contains a capture group + +_FORWARDED_PAIR_RE: Final[Pattern[str]] = re.compile(_FORWARDED_PAIR) + +############################################################ +# HTTP Request +############################################################ + + +class BaseRequest(MutableMapping[str, Any], HeadersMixin): + + POST_METHODS = { + hdrs.METH_PATCH, + hdrs.METH_POST, + hdrs.METH_PUT, + hdrs.METH_TRACE, + hdrs.METH_DELETE, + } + + ATTRS = HeadersMixin.ATTRS | frozenset( + [ + "_message", + "_protocol", + "_payload_writer", + "_payload", + "_headers", + "_method", + "_version", + "_rel_url", + "_post", + "_read_bytes", + "_state", + "_cache", + "_task", + "_client_max_size", + "_loop", + "_transport_sslcontext", + "_transport_peername", + ] + ) + _post: Optional[MultiDictProxy[Union[str, bytes, FileField]]] = None + _read_bytes: Optional[bytes] = None + + def __init__( + self, + message: RawRequestMessage, + payload: StreamReader, + protocol: "RequestHandler", + payload_writer: AbstractStreamWriter, + task: "asyncio.Task[None]", + loop: asyncio.AbstractEventLoop, + *, + client_max_size: int = 1024**2, + state: Optional[Dict[str, Any]] = None, + scheme: Optional[str] = None, + host: Optional[str] = None, + remote: Optional[str] = None, + ) -> None: + self._message = message + self._protocol = protocol + self._payload_writer = payload_writer + + self._payload = payload + self._headers: CIMultiDictProxy[str] = message.headers + self._method = message.method + self._version = message.version + self._cache: Dict[str, Any] = {} + url = message.url + if url.absolute: + if scheme is not None: + url = url.with_scheme(scheme) + if host is not None: + url = url.with_host(host) + # absolute URL is given, + # override auto-calculating url, host, and scheme + # all other properties should be good + self._cache["url"] = url + self._cache["host"] = url.host + self._cache["scheme"] = url.scheme + self._rel_url = url.relative() + else: + self._rel_url = url + if scheme is not None: + self._cache["scheme"] = scheme + if host is not None: + self._cache["host"] = host + + self._state = {} if state is None else state + self._task = task + self._client_max_size = client_max_size + self._loop = loop + + self._transport_sslcontext = protocol.ssl_context + self._transport_peername = protocol.peername + + if remote is not None: + self._cache["remote"] = remote + + def clone( + self, + *, + method: Union[str, _SENTINEL] = sentinel, + rel_url: Union[StrOrURL, _SENTINEL] = sentinel, + headers: Union[LooseHeaders, _SENTINEL] = sentinel, + scheme: Union[str, _SENTINEL] = sentinel, + host: Union[str, _SENTINEL] = sentinel, + remote: Union[str, _SENTINEL] = sentinel, + client_max_size: Union[int, _SENTINEL] = sentinel, + ) -> "BaseRequest": + """Clone itself with replacement some attributes. + + Creates and returns a new instance of Request object. If no parameters + are given, an exact copy is returned. If a parameter is not passed, it + will reuse the one from the current request object. + """ + if self._read_bytes: + raise RuntimeError("Cannot clone request after reading its content") + + dct: Dict[str, Any] = {} + if method is not sentinel: + dct["method"] = method + if rel_url is not sentinel: + new_url: URL = URL(rel_url) + dct["url"] = new_url + dct["path"] = str(new_url) + if headers is not sentinel: + # a copy semantic + dct["headers"] = CIMultiDictProxy(CIMultiDict(headers)) + dct["raw_headers"] = tuple( + (k.encode("utf-8"), v.encode("utf-8")) + for k, v in dct["headers"].items() + ) + + message = self._message._replace(**dct) + + kwargs = {} + if scheme is not sentinel: + kwargs["scheme"] = scheme + if host is not sentinel: + kwargs["host"] = host + if remote is not sentinel: + kwargs["remote"] = remote + if client_max_size is sentinel: + client_max_size = self._client_max_size + + return self.__class__( + message, + self._payload, + self._protocol, + self._payload_writer, + self._task, + self._loop, + client_max_size=client_max_size, + state=self._state.copy(), + **kwargs, + ) + + @property + def task(self) -> "asyncio.Task[None]": + return self._task + + @property + def protocol(self) -> "RequestHandler": + return self._protocol + + @property + def transport(self) -> Optional[asyncio.Transport]: + if self._protocol is None: + return None + return self._protocol.transport + + @property + def writer(self) -> AbstractStreamWriter: + return self._payload_writer + + @property + def client_max_size(self) -> int: + return self._client_max_size + + @reify + def message(self) -> RawRequestMessage: + warnings.warn("Request.message is deprecated", DeprecationWarning, stacklevel=3) + return self._message + + @reify + def rel_url(self) -> URL: + return self._rel_url + + @reify + def loop(self) -> asyncio.AbstractEventLoop: + warnings.warn( + "request.loop property is deprecated", DeprecationWarning, stacklevel=2 + ) + return self._loop + + # MutableMapping API + + def __getitem__(self, key: str) -> Any: + return self._state[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._state[key] = value + + def __delitem__(self, key: str) -> None: + del self._state[key] + + def __len__(self) -> int: + return len(self._state) + + def __iter__(self) -> Iterator[str]: + return iter(self._state) + + ######## + + @reify + def secure(self) -> bool: + """A bool indicating if the request is handled with SSL.""" + return self.scheme == "https" + + @reify + def forwarded(self) -> Tuple[Mapping[str, str], ...]: + """A tuple containing all parsed Forwarded header(s). + + Makes an effort to parse Forwarded headers as specified by RFC 7239: + + - It adds one (immutable) dictionary per Forwarded 'field-value', ie + per proxy. The element corresponds to the data in the Forwarded + field-value added by the first proxy encountered by the client. Each + subsequent item corresponds to those added by later proxies. + - It checks that every value has valid syntax in general as specified + in section 4: either a 'token' or a 'quoted-string'. + - It un-escapes found escape sequences. + - It does NOT validate 'by' and 'for' contents as specified in section + 6. + - It does NOT validate 'host' contents (Host ABNF). + - It does NOT validate 'proto' contents for valid URI scheme names. + + Returns a tuple containing one or more immutable dicts + """ + elems = [] + for field_value in self._message.headers.getall(hdrs.FORWARDED, ()): + length = len(field_value) + pos = 0 + need_separator = False + elem: Dict[str, str] = {} + elems.append(types.MappingProxyType(elem)) + while 0 <= pos < length: + match = _FORWARDED_PAIR_RE.match(field_value, pos) + if match is not None: # got a valid forwarded-pair + if need_separator: + # bad syntax here, skip to next comma + pos = field_value.find(",", pos) + else: + name, value, port = match.groups() + if value[0] == '"': + # quoted string: remove quotes and unescape + value = _QUOTED_PAIR_REPLACE_RE.sub(r"\1", value[1:-1]) + if port: + value += port + elem[name.lower()] = value + pos += len(match.group(0)) + need_separator = True + elif field_value[pos] == ",": # next forwarded-element + need_separator = False + elem = {} + elems.append(types.MappingProxyType(elem)) + pos += 1 + elif field_value[pos] == ";": # next forwarded-pair + need_separator = False + pos += 1 + elif field_value[pos] in " \t": + # Allow whitespace even between forwarded-pairs, though + # RFC 7239 doesn't. This simplifies code and is in line + # with Postel's law. + pos += 1 + else: + # bad syntax here, skip to next comma + pos = field_value.find(",", pos) + return tuple(elems) + + @reify + def scheme(self) -> str: + """A string representing the scheme of the request. + + Hostname is resolved in this order: + + - overridden value by .clone(scheme=new_scheme) call. + - type of connection to peer: HTTPS if socket is SSL, HTTP otherwise. + + 'http' or 'https'. + """ + if self._transport_sslcontext: + return "https" + else: + return "http" + + @reify + def method(self) -> str: + """Read only property for getting HTTP method. + + The value is upper-cased str like 'GET', 'POST', 'PUT' etc. + """ + return self._method + + @reify + def version(self) -> HttpVersion: + """Read only property for getting HTTP version of request. + + Returns aiohttp.protocol.HttpVersion instance. + """ + return self._version + + @reify + def host(self) -> str: + """Hostname of the request. + + Hostname is resolved in this order: + + - overridden value by .clone(host=new_host) call. + - HOST HTTP header + - socket.getfqdn() value + + For example, 'example.com' or 'localhost:8080'. + + For historical reasons, the port number may be included. + """ + host = self._message.headers.get(hdrs.HOST) + if host is not None: + return host + return socket.getfqdn() + + @reify + def remote(self) -> Optional[str]: + """Remote IP of client initiated HTTP request. + + The IP is resolved in this order: + + - overridden value by .clone(remote=new_remote) call. + - peername of opened socket + """ + if self._transport_peername is None: + return None + if isinstance(self._transport_peername, (list, tuple)): + return str(self._transport_peername[0]) + return str(self._transport_peername) + + @reify + def url(self) -> URL: + """The full URL of the request.""" + # authority is used here because it may include the port number + # and we want yarl to parse it correctly + return URL.build(scheme=self.scheme, authority=self.host).join(self._rel_url) + + @reify + def path(self) -> str: + """The URL including *PATH INFO* without the host or scheme. + + E.g., ``/app/blog`` + """ + return self._rel_url.path + + @reify + def path_qs(self) -> str: + """The URL including PATH_INFO and the query string. + + E.g, /app/blog?id=10 + """ + return str(self._rel_url) + + @reify + def raw_path(self) -> str: + """The URL including raw *PATH INFO* without the host or scheme. + + Warning, the path is unquoted and may contains non valid URL characters + + E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters`` + """ + return self._message.path + + @reify + def query(self) -> "MultiMapping[str]": + """A multidict with all the variables in the query string.""" + return self._rel_url.query + + @reify + def query_string(self) -> str: + """The query string in the URL. + + E.g., id=10 + """ + return self._rel_url.query_string + + @reify + def headers(self) -> CIMultiDictProxy[str]: + """A case-insensitive multidict proxy with all headers.""" + return self._headers + + @reify + def raw_headers(self) -> RawHeaders: + """A sequence of pairs for all headers.""" + return self._message.raw_headers + + @reify + def if_modified_since(self) -> Optional[datetime.datetime]: + """The value of If-Modified-Since HTTP header, or None. + + This header is represented as a `datetime` object. + """ + return parse_http_date(self.headers.get(hdrs.IF_MODIFIED_SINCE)) + + @reify + def if_unmodified_since(self) -> Optional[datetime.datetime]: + """The value of If-Unmodified-Since HTTP header, or None. + + This header is represented as a `datetime` object. + """ + return parse_http_date(self.headers.get(hdrs.IF_UNMODIFIED_SINCE)) + + @staticmethod + def _etag_values(etag_header: str) -> Iterator[ETag]: + """Extract `ETag` objects from raw header.""" + if etag_header == ETAG_ANY: + yield ETag( + is_weak=False, + value=ETAG_ANY, + ) + else: + for match in LIST_QUOTED_ETAG_RE.finditer(etag_header): + is_weak, value, garbage = match.group(2, 3, 4) + # Any symbol captured by 4th group means + # that the following sequence is invalid. + if garbage: + break + + yield ETag( + is_weak=bool(is_weak), + value=value, + ) + + @classmethod + def _if_match_or_none_impl( + cls, header_value: Optional[str] + ) -> Optional[Tuple[ETag, ...]]: + if not header_value: + return None + + return tuple(cls._etag_values(header_value)) + + @reify + def if_match(self) -> Optional[Tuple[ETag, ...]]: + """The value of If-Match HTTP header, or None. + + This header is represented as a `tuple` of `ETag` objects. + """ + return self._if_match_or_none_impl(self.headers.get(hdrs.IF_MATCH)) + + @reify + def if_none_match(self) -> Optional[Tuple[ETag, ...]]: + """The value of If-None-Match HTTP header, or None. + + This header is represented as a `tuple` of `ETag` objects. + """ + return self._if_match_or_none_impl(self.headers.get(hdrs.IF_NONE_MATCH)) + + @reify + def if_range(self) -> Optional[datetime.datetime]: + """The value of If-Range HTTP header, or None. + + This header is represented as a `datetime` object. + """ + return parse_http_date(self.headers.get(hdrs.IF_RANGE)) + + @reify + def keep_alive(self) -> bool: + """Is keepalive enabled by client?""" + return not self._message.should_close + + @reify + def cookies(self) -> Mapping[str, str]: + """Return request cookies. + + A read-only dictionary-like object. + """ + # Use parse_cookie_header for RFC 6265 compliant Cookie header parsing + # that accepts special characters in cookie names (fixes #2683) + parsed = parse_cookie_header(self.headers.get(hdrs.COOKIE, "")) + # Extract values from Morsel objects + return MappingProxyType({name: morsel.value for name, morsel in parsed}) + + @reify + def http_range(self) -> slice: + """The content of Range HTTP header. + + Return a slice instance. + + """ + rng = self._headers.get(hdrs.RANGE) + start, end = None, None + if rng is not None: + try: + pattern = r"^bytes=(\d*)-(\d*)$" + start, end = re.findall(pattern, rng, re.ASCII)[0] + except IndexError: # pattern was not found in header + raise ValueError("range not in acceptable format") + + end = int(end) if end else None + start = int(start) if start else None + + if start is None and end is not None: + # end with no start is to return tail of content + start = -end + end = None + + if start is not None and end is not None: + # end is inclusive in range header, exclusive for slice + end += 1 + + if start >= end: + raise ValueError("start cannot be after end") + + if start is end is None: # No valid range supplied + raise ValueError("No start or end of range specified") + + return slice(start, end, 1) + + @reify + def content(self) -> StreamReader: + """Return raw payload stream.""" + return self._payload + + @property + def has_body(self) -> bool: + """Return True if request's HTTP BODY can be read, False otherwise.""" + warnings.warn( + "Deprecated, use .can_read_body #2005", DeprecationWarning, stacklevel=2 + ) + return not self._payload.at_eof() + + @property + def can_read_body(self) -> bool: + """Return True if request's HTTP BODY can be read, False otherwise.""" + return not self._payload.at_eof() + + @reify + def body_exists(self) -> bool: + """Return True if request has HTTP BODY, False otherwise.""" + return type(self._payload) is not EmptyStreamReader + + async def release(self) -> None: + """Release request. + + Eat unread part of HTTP BODY if present. + """ + while not self._payload.at_eof(): + await self._payload.readany() + + async def read(self) -> bytes: + """Read request body if present. + + Returns bytes object with full request content. + """ + if self._read_bytes is None: + body = bytearray() + while True: + chunk = await self._payload.readany() + body.extend(chunk) + if self._client_max_size: + body_size = len(body) + if body_size >= self._client_max_size: + raise HTTPRequestEntityTooLarge( + max_size=self._client_max_size, actual_size=body_size + ) + if not chunk: + break + self._read_bytes = bytes(body) + return self._read_bytes + + async def text(self) -> str: + """Return BODY as text using encoding from .charset.""" + bytes_body = await self.read() + encoding = self.charset or "utf-8" + return bytes_body.decode(encoding) + + async def json(self, *, loads: JSONDecoder = DEFAULT_JSON_DECODER) -> Any: + """Return BODY as JSON.""" + body = await self.text() + return loads(body) + + async def multipart(self) -> MultipartReader: + """Return async iterator to process BODY as multipart.""" + return MultipartReader(self._headers, self._payload) + + async def post(self) -> "MultiDictProxy[Union[str, bytes, FileField]]": + """Return POST parameters.""" + if self._post is not None: + return self._post + if self._method not in self.POST_METHODS: + self._post = MultiDictProxy(MultiDict()) + return self._post + + content_type = self.content_type + if content_type not in ( + "", + "application/x-www-form-urlencoded", + "multipart/form-data", + ): + self._post = MultiDictProxy(MultiDict()) + return self._post + + out: MultiDict[Union[str, bytes, FileField]] = MultiDict() + + if content_type == "multipart/form-data": + multipart = await self.multipart() + max_size = self._client_max_size + + size = 0 + while (field := await multipart.next()) is not None: + field_ct = field.headers.get(hdrs.CONTENT_TYPE) + + if isinstance(field, BodyPartReader): + if field.name is None: + raise ValueError("Multipart field missing name.") + + # Note that according to RFC 7578, the Content-Type header + # is optional, even for files, so we can't assume it's + # present. + # https://tools.ietf.org/html/rfc7578#section-4.4 + if field.filename: + # store file in temp file + tmp = await self._loop.run_in_executor( + None, tempfile.TemporaryFile + ) + chunk = await field.read_chunk(size=2**16) + while chunk: + chunk = await field.decode(chunk) + await self._loop.run_in_executor(None, tmp.write, chunk) + size += len(chunk) + if 0 < max_size < size: + await self._loop.run_in_executor(None, tmp.close) + raise HTTPRequestEntityTooLarge( + max_size=max_size, actual_size=size + ) + chunk = await field.read_chunk(size=2**16) + await self._loop.run_in_executor(None, tmp.seek, 0) + + if field_ct is None: + field_ct = "application/octet-stream" + + ff = FileField( + field.name, + field.filename, + cast(io.BufferedReader, tmp), + field_ct, + field.headers, + ) + out.add(field.name, ff) + else: + # deal with ordinary data + value = await field.read(decode=True) + if field_ct is None or field_ct.startswith("text/"): + charset = field.get_charset(default="utf-8") + out.add(field.name, value.decode(charset)) + else: + out.add(field.name, value) + size += len(value) + if 0 < max_size < size: + raise HTTPRequestEntityTooLarge( + max_size=max_size, actual_size=size + ) + else: + raise ValueError( + "To decode nested multipart you need to use custom reader", + ) + else: + data = await self.read() + if data: + charset = self.charset or "utf-8" + out.extend( + parse_qsl( + data.rstrip().decode(charset), + keep_blank_values=True, + encoding=charset, + ) + ) + + self._post = MultiDictProxy(out) + return self._post + + def get_extra_info(self, name: str, default: Any = None) -> Any: + """Extra info from protocol transport""" + protocol = self._protocol + if protocol is None: + return default + + transport = protocol.transport + if transport is None: + return default + + return transport.get_extra_info(name, default) + + def __repr__(self) -> str: + ascii_encodable_path = self.path.encode("ascii", "backslashreplace").decode( + "ascii" + ) + return "<{} {} {} >".format( + self.__class__.__name__, self._method, ascii_encodable_path + ) + + def __eq__(self, other: object) -> bool: + return id(self) == id(other) + + def __bool__(self) -> bool: + return True + + async def _prepare_hook(self, response: StreamResponse) -> None: + return + + def _cancel(self, exc: BaseException) -> None: + set_exception(self._payload, exc) + + def _finish(self) -> None: + if self._post is None or self.content_type != "multipart/form-data": + return + + # NOTE: Release file descriptors for the + # NOTE: `tempfile.Temporaryfile`-created `_io.BufferedRandom` + # NOTE: instances of files sent within multipart request body + # NOTE: via HTTP POST request. + for file_name, file_field_object in self._post.items(): + if isinstance(file_field_object, FileField): + file_field_object.file.close() + + +class Request(BaseRequest): + + ATTRS = BaseRequest.ATTRS | frozenset(["_match_info"]) + + _match_info: Optional["UrlMappingMatchInfo"] = None + + if DEBUG: + + def __setattr__(self, name: str, val: Any) -> None: + if name not in self.ATTRS: + warnings.warn( + "Setting custom {}.{} attribute " + "is discouraged".format(self.__class__.__name__, name), + DeprecationWarning, + stacklevel=2, + ) + super().__setattr__(name, val) + + def clone( + self, + *, + method: Union[str, _SENTINEL] = sentinel, + rel_url: Union[StrOrURL, _SENTINEL] = sentinel, + headers: Union[LooseHeaders, _SENTINEL] = sentinel, + scheme: Union[str, _SENTINEL] = sentinel, + host: Union[str, _SENTINEL] = sentinel, + remote: Union[str, _SENTINEL] = sentinel, + client_max_size: Union[int, _SENTINEL] = sentinel, + ) -> "Request": + ret = super().clone( + method=method, + rel_url=rel_url, + headers=headers, + scheme=scheme, + host=host, + remote=remote, + client_max_size=client_max_size, + ) + new_ret = cast(Request, ret) + new_ret._match_info = self._match_info + return new_ret + + @reify + def match_info(self) -> "UrlMappingMatchInfo": + """Result of route resolving.""" + match_info = self._match_info + assert match_info is not None + return match_info + + @property + def app(self) -> "Application": + """Application instance.""" + match_info = self._match_info + assert match_info is not None + return match_info.current_app + + @property + def config_dict(self) -> ChainMapProxy: + match_info = self._match_info + assert match_info is not None + lst = match_info.apps + app = self.app + idx = lst.index(app) + sublist = list(reversed(lst[: idx + 1])) + return ChainMapProxy(sublist) + + async def _prepare_hook(self, response: StreamResponse) -> None: + match_info = self._match_info + if match_info is None: + return + for app in match_info._apps: + if on_response_prepare := app.on_response_prepare: + await on_response_prepare.send(self, response) diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_response.py b/py311/lib/python3.11/site-packages/aiohttp/web_response.py new file mode 100644 index 0000000000000000000000000000000000000000..e5f8b6cd652c36274386cb0ce5586d8a5fdf044c --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_response.py @@ -0,0 +1,856 @@ +import asyncio +import collections.abc +import datetime +import enum +import json +import math +import time +import warnings +from concurrent.futures import Executor +from http import HTTPStatus +from http.cookies import SimpleCookie +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterator, + MutableMapping, + Optional, + Union, + cast, +) + +from multidict import CIMultiDict, istr + +from . import hdrs, payload +from .abc import AbstractStreamWriter +from .compression_utils import ZLibCompressor +from .helpers import ( + ETAG_ANY, + QUOTED_ETAG_RE, + ETag, + HeadersMixin, + must_be_empty_body, + parse_http_date, + rfc822_formatted_time, + sentinel, + should_remove_content_length, + validate_etag_value, +) +from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11 +from .payload import Payload +from .typedefs import JSONEncoder, LooseHeaders + +REASON_PHRASES = {http_status.value: http_status.phrase for http_status in HTTPStatus} +LARGE_BODY_SIZE = 1024**2 + +__all__ = ("ContentCoding", "StreamResponse", "Response", "json_response") + + +if TYPE_CHECKING: + from .web_request import BaseRequest + + BaseClass = MutableMapping[str, Any] +else: + BaseClass = collections.abc.MutableMapping + + +# TODO(py311): Convert to StrEnum for wider use +class ContentCoding(enum.Enum): + # The content codings that we have support for. + # + # Additional registered codings are listed at: + # https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding + deflate = "deflate" + gzip = "gzip" + identity = "identity" + + +CONTENT_CODINGS = {coding.value: coding for coding in ContentCoding} + +############################################################ +# HTTP Response classes +############################################################ + + +class StreamResponse(BaseClass, HeadersMixin): + + _body: Union[None, bytes, bytearray, Payload] + _length_check = True + _body = None + _keep_alive: Optional[bool] = None + _chunked: bool = False + _compression: bool = False + _compression_strategy: Optional[int] = None + _compression_force: Optional[ContentCoding] = None + _req: Optional["BaseRequest"] = None + _payload_writer: Optional[AbstractStreamWriter] = None + _eof_sent: bool = False + _must_be_empty_body: Optional[bool] = None + _body_length = 0 + _cookies: Optional[SimpleCookie] = None + _send_headers_immediately = True + + def __init__( + self, + *, + status: int = 200, + reason: Optional[str] = None, + headers: Optional[LooseHeaders] = None, + _real_headers: Optional[CIMultiDict[str]] = None, + ) -> None: + """Initialize a new stream response object. + + _real_headers is an internal parameter used to pass a pre-populated + headers object. It is used by the `Response` class to avoid copying + the headers when creating a new response object. It is not intended + to be used by external code. + """ + self._state: Dict[str, Any] = {} + + if _real_headers is not None: + self._headers = _real_headers + elif headers is not None: + self._headers: CIMultiDict[str] = CIMultiDict(headers) + else: + self._headers = CIMultiDict() + + self._set_status(status, reason) + + @property + def prepared(self) -> bool: + return self._eof_sent or self._payload_writer is not None + + @property + def task(self) -> "Optional[asyncio.Task[None]]": + if self._req: + return self._req.task + else: + return None + + @property + def status(self) -> int: + return self._status + + @property + def chunked(self) -> bool: + return self._chunked + + @property + def compression(self) -> bool: + return self._compression + + @property + def reason(self) -> str: + return self._reason + + def set_status( + self, + status: int, + reason: Optional[str] = None, + ) -> None: + assert ( + not self.prepared + ), "Cannot change the response status code after the headers have been sent" + self._set_status(status, reason) + + def _set_status(self, status: int, reason: Optional[str]) -> None: + self._status = int(status) + if reason is None: + reason = REASON_PHRASES.get(self._status, "") + elif "\n" in reason: + raise ValueError("Reason cannot contain \\n") + self._reason = reason + + @property + def keep_alive(self) -> Optional[bool]: + return self._keep_alive + + def force_close(self) -> None: + self._keep_alive = False + + @property + def body_length(self) -> int: + return self._body_length + + @property + def output_length(self) -> int: + warnings.warn("output_length is deprecated", DeprecationWarning) + assert self._payload_writer + return self._payload_writer.buffer_size + + def enable_chunked_encoding(self, chunk_size: Optional[int] = None) -> None: + """Enables automatic chunked transfer encoding.""" + if hdrs.CONTENT_LENGTH in self._headers: + raise RuntimeError( + "You can't enable chunked encoding when a content length is set" + ) + if chunk_size is not None: + warnings.warn("Chunk size is deprecated #1615", DeprecationWarning) + self._chunked = True + + def enable_compression( + self, + force: Optional[Union[bool, ContentCoding]] = None, + strategy: Optional[int] = None, + ) -> None: + """Enables response compression encoding.""" + # Backwards compatibility for when force was a bool <0.17. + if isinstance(force, bool): + force = ContentCoding.deflate if force else ContentCoding.identity + warnings.warn( + "Using boolean for force is deprecated #3318", DeprecationWarning + ) + elif force is not None: + assert isinstance( + force, ContentCoding + ), "force should one of None, bool or ContentEncoding" + + self._compression = True + self._compression_force = force + self._compression_strategy = strategy + + @property + def headers(self) -> "CIMultiDict[str]": + return self._headers + + @property + def cookies(self) -> SimpleCookie: + if self._cookies is None: + self._cookies = SimpleCookie() + return self._cookies + + def set_cookie( + self, + name: str, + value: str, + *, + expires: Optional[str] = None, + domain: Optional[str] = None, + max_age: Optional[Union[int, str]] = None, + path: str = "/", + secure: Optional[bool] = None, + httponly: Optional[bool] = None, + version: Optional[str] = None, + samesite: Optional[str] = None, + partitioned: Optional[bool] = None, + ) -> None: + """Set or update response cookie. + + Sets new cookie or updates existent with new value. + Also updates only those params which are not None. + """ + if self._cookies is None: + self._cookies = SimpleCookie() + + self._cookies[name] = value + c = self._cookies[name] + + if expires is not None: + c["expires"] = expires + elif c.get("expires") == "Thu, 01 Jan 1970 00:00:00 GMT": + del c["expires"] + + if domain is not None: + c["domain"] = domain + + if max_age is not None: + c["max-age"] = str(max_age) + elif "max-age" in c: + del c["max-age"] + + c["path"] = path + + if secure is not None: + c["secure"] = secure + if httponly is not None: + c["httponly"] = httponly + if version is not None: + c["version"] = version + if samesite is not None: + c["samesite"] = samesite + + if partitioned is not None: + c["partitioned"] = partitioned + + def del_cookie( + self, + name: str, + *, + domain: Optional[str] = None, + path: str = "/", + secure: Optional[bool] = None, + httponly: Optional[bool] = None, + samesite: Optional[str] = None, + ) -> None: + """Delete cookie. + + Creates new empty expired cookie. + """ + # TODO: do we need domain/path here? + if self._cookies is not None: + self._cookies.pop(name, None) + self.set_cookie( + name, + "", + max_age=0, + expires="Thu, 01 Jan 1970 00:00:00 GMT", + domain=domain, + path=path, + secure=secure, + httponly=httponly, + samesite=samesite, + ) + + @property + def content_length(self) -> Optional[int]: + # Just a placeholder for adding setter + return super().content_length + + @content_length.setter + def content_length(self, value: Optional[int]) -> None: + if value is not None: + value = int(value) + if self._chunked: + raise RuntimeError( + "You can't set content length when chunked encoding is enable" + ) + self._headers[hdrs.CONTENT_LENGTH] = str(value) + else: + self._headers.pop(hdrs.CONTENT_LENGTH, None) + + @property + def content_type(self) -> str: + # Just a placeholder for adding setter + return super().content_type + + @content_type.setter + def content_type(self, value: str) -> None: + self.content_type # read header values if needed + self._content_type = str(value) + self._generate_content_type_header() + + @property + def charset(self) -> Optional[str]: + # Just a placeholder for adding setter + return super().charset + + @charset.setter + def charset(self, value: Optional[str]) -> None: + ctype = self.content_type # read header values if needed + if ctype == "application/octet-stream": + raise RuntimeError( + "Setting charset for application/octet-stream " + "doesn't make sense, setup content_type first" + ) + assert self._content_dict is not None + if value is None: + self._content_dict.pop("charset", None) + else: + self._content_dict["charset"] = str(value).lower() + self._generate_content_type_header() + + @property + def last_modified(self) -> Optional[datetime.datetime]: + """The value of Last-Modified HTTP header, or None. + + This header is represented as a `datetime` object. + """ + return parse_http_date(self._headers.get(hdrs.LAST_MODIFIED)) + + @last_modified.setter + def last_modified( + self, value: Optional[Union[int, float, datetime.datetime, str]] + ) -> None: + if value is None: + self._headers.pop(hdrs.LAST_MODIFIED, None) + elif isinstance(value, (int, float)): + self._headers[hdrs.LAST_MODIFIED] = time.strftime( + "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)) + ) + elif isinstance(value, datetime.datetime): + self._headers[hdrs.LAST_MODIFIED] = time.strftime( + "%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple() + ) + elif isinstance(value, str): + self._headers[hdrs.LAST_MODIFIED] = value + else: + msg = f"Unsupported type for last_modified: {type(value).__name__}" + raise TypeError(msg) + + @property + def etag(self) -> Optional[ETag]: + quoted_value = self._headers.get(hdrs.ETAG) + if not quoted_value: + return None + elif quoted_value == ETAG_ANY: + return ETag(value=ETAG_ANY) + match = QUOTED_ETAG_RE.fullmatch(quoted_value) + if not match: + return None + is_weak, value = match.group(1, 2) + return ETag( + is_weak=bool(is_weak), + value=value, + ) + + @etag.setter + def etag(self, value: Optional[Union[ETag, str]]) -> None: + if value is None: + self._headers.pop(hdrs.ETAG, None) + elif (isinstance(value, str) and value == ETAG_ANY) or ( + isinstance(value, ETag) and value.value == ETAG_ANY + ): + self._headers[hdrs.ETAG] = ETAG_ANY + elif isinstance(value, str): + validate_etag_value(value) + self._headers[hdrs.ETAG] = f'"{value}"' + elif isinstance(value, ETag) and isinstance(value.value, str): + validate_etag_value(value.value) + hdr_value = f'W/"{value.value}"' if value.is_weak else f'"{value.value}"' + self._headers[hdrs.ETAG] = hdr_value + else: + raise ValueError( + f"Unsupported etag type: {type(value)}. " + f"etag must be str, ETag or None" + ) + + def _generate_content_type_header( + self, CONTENT_TYPE: istr = hdrs.CONTENT_TYPE + ) -> None: + assert self._content_dict is not None + assert self._content_type is not None + params = "; ".join(f"{k}={v}" for k, v in self._content_dict.items()) + if params: + ctype = self._content_type + "; " + params + else: + ctype = self._content_type + self._headers[CONTENT_TYPE] = ctype + + async def _do_start_compression(self, coding: ContentCoding) -> None: + if coding is ContentCoding.identity: + return + assert self._payload_writer is not None + self._headers[hdrs.CONTENT_ENCODING] = coding.value + self._payload_writer.enable_compression( + coding.value, self._compression_strategy + ) + # Compressed payload may have different content length, + # remove the header + self._headers.popall(hdrs.CONTENT_LENGTH, None) + + async def _start_compression(self, request: "BaseRequest") -> None: + if self._compression_force: + await self._do_start_compression(self._compression_force) + return + # Encoding comparisons should be case-insensitive + # https://www.rfc-editor.org/rfc/rfc9110#section-8.4.1 + accept_encoding = request.headers.get(hdrs.ACCEPT_ENCODING, "").lower() + for value, coding in CONTENT_CODINGS.items(): + if value in accept_encoding: + await self._do_start_compression(coding) + return + + async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]: + if self._eof_sent: + return None + if self._payload_writer is not None: + return self._payload_writer + self._must_be_empty_body = must_be_empty_body(request.method, self.status) + return await self._start(request) + + async def _start(self, request: "BaseRequest") -> AbstractStreamWriter: + self._req = request + writer = self._payload_writer = request._payload_writer + + await self._prepare_headers() + await request._prepare_hook(self) + await self._write_headers() + + return writer + + async def _prepare_headers(self) -> None: + request = self._req + assert request is not None + writer = self._payload_writer + assert writer is not None + keep_alive = self._keep_alive + if keep_alive is None: + keep_alive = request.keep_alive + self._keep_alive = keep_alive + + version = request.version + + headers = self._headers + if self._cookies: + for cookie in self._cookies.values(): + value = cookie.output(header="")[1:] + headers.add(hdrs.SET_COOKIE, value) + + if self._compression: + await self._start_compression(request) + + if self._chunked: + if version != HttpVersion11: + raise RuntimeError( + "Using chunked encoding is forbidden " + "for HTTP/{0.major}.{0.minor}".format(request.version) + ) + if not self._must_be_empty_body: + writer.enable_chunking() + headers[hdrs.TRANSFER_ENCODING] = "chunked" + elif self._length_check: # Disabled for WebSockets + writer.length = self.content_length + if writer.length is None: + if version >= HttpVersion11: + if not self._must_be_empty_body: + writer.enable_chunking() + headers[hdrs.TRANSFER_ENCODING] = "chunked" + elif not self._must_be_empty_body: + keep_alive = False + + # HTTP 1.1: https://tools.ietf.org/html/rfc7230#section-3.3.2 + # HTTP 1.0: https://tools.ietf.org/html/rfc1945#section-10.4 + if self._must_be_empty_body: + if hdrs.CONTENT_LENGTH in headers and should_remove_content_length( + request.method, self.status + ): + del headers[hdrs.CONTENT_LENGTH] + # https://datatracker.ietf.org/doc/html/rfc9112#section-6.1-10 + # https://datatracker.ietf.org/doc/html/rfc9112#section-6.1-13 + if hdrs.TRANSFER_ENCODING in headers: + del headers[hdrs.TRANSFER_ENCODING] + elif (writer.length if self._length_check else self.content_length) != 0: + # https://www.rfc-editor.org/rfc/rfc9110#section-8.3-5 + headers.setdefault(hdrs.CONTENT_TYPE, "application/octet-stream") + headers.setdefault(hdrs.DATE, rfc822_formatted_time()) + headers.setdefault(hdrs.SERVER, SERVER_SOFTWARE) + + # connection header + if hdrs.CONNECTION not in headers: + if keep_alive: + if version == HttpVersion10: + headers[hdrs.CONNECTION] = "keep-alive" + elif version == HttpVersion11: + headers[hdrs.CONNECTION] = "close" + + async def _write_headers(self) -> None: + request = self._req + assert request is not None + writer = self._payload_writer + assert writer is not None + # status line + version = request.version + status_line = f"HTTP/{version[0]}.{version[1]} {self._status} {self._reason}" + await writer.write_headers(status_line, self._headers) + # Send headers immediately if not opted into buffering + if self._send_headers_immediately: + writer.send_headers() + + async def write(self, data: Union[bytes, bytearray, memoryview]) -> None: + assert isinstance( + data, (bytes, bytearray, memoryview) + ), "data argument must be byte-ish (%r)" % type(data) + + if self._eof_sent: + raise RuntimeError("Cannot call write() after write_eof()") + if self._payload_writer is None: + raise RuntimeError("Cannot call write() before prepare()") + + await self._payload_writer.write(data) + + async def drain(self) -> None: + assert not self._eof_sent, "EOF has already been sent" + assert self._payload_writer is not None, "Response has not been started" + warnings.warn( + "drain method is deprecated, use await resp.write()", + DeprecationWarning, + stacklevel=2, + ) + await self._payload_writer.drain() + + async def write_eof(self, data: bytes = b"") -> None: + assert isinstance( + data, (bytes, bytearray, memoryview) + ), "data argument must be byte-ish (%r)" % type(data) + + if self._eof_sent: + return + + assert self._payload_writer is not None, "Response has not been started" + + await self._payload_writer.write_eof(data) + self._eof_sent = True + self._req = None + self._body_length = self._payload_writer.output_size + self._payload_writer = None + + def __repr__(self) -> str: + if self._eof_sent: + info = "eof" + elif self.prepared: + assert self._req is not None + info = f"{self._req.method} {self._req.path} " + else: + info = "not prepared" + return f"<{self.__class__.__name__} {self.reason} {info}>" + + def __getitem__(self, key: str) -> Any: + return self._state[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._state[key] = value + + def __delitem__(self, key: str) -> None: + del self._state[key] + + def __len__(self) -> int: + return len(self._state) + + def __iter__(self) -> Iterator[str]: + return iter(self._state) + + def __hash__(self) -> int: + return hash(id(self)) + + def __eq__(self, other: object) -> bool: + return self is other + + def __bool__(self) -> bool: + return True + + +class Response(StreamResponse): + + _compressed_body: Optional[bytes] = None + _send_headers_immediately = False + + def __init__( + self, + *, + body: Any = None, + status: int = 200, + reason: Optional[str] = None, + text: Optional[str] = None, + headers: Optional[LooseHeaders] = None, + content_type: Optional[str] = None, + charset: Optional[str] = None, + zlib_executor_size: Optional[int] = None, + zlib_executor: Optional[Executor] = None, + ) -> None: + if body is not None and text is not None: + raise ValueError("body and text are not allowed together") + + if headers is None: + real_headers: CIMultiDict[str] = CIMultiDict() + else: + real_headers = CIMultiDict(headers) + + if content_type is not None and "charset" in content_type: + raise ValueError("charset must not be in content_type argument") + + if text is not None: + if hdrs.CONTENT_TYPE in real_headers: + if content_type or charset: + raise ValueError( + "passing both Content-Type header and " + "content_type or charset params " + "is forbidden" + ) + else: + # fast path for filling headers + if not isinstance(text, str): + raise TypeError("text argument must be str (%r)" % type(text)) + if content_type is None: + content_type = "text/plain" + if charset is None: + charset = "utf-8" + real_headers[hdrs.CONTENT_TYPE] = content_type + "; charset=" + charset + body = text.encode(charset) + text = None + elif hdrs.CONTENT_TYPE in real_headers: + if content_type is not None or charset is not None: + raise ValueError( + "passing both Content-Type header and " + "content_type or charset params " + "is forbidden" + ) + elif content_type is not None: + if charset is not None: + content_type += "; charset=" + charset + real_headers[hdrs.CONTENT_TYPE] = content_type + + super().__init__(status=status, reason=reason, _real_headers=real_headers) + + if text is not None: + self.text = text + else: + self.body = body + + self._zlib_executor_size = zlib_executor_size + self._zlib_executor = zlib_executor + + @property + def body(self) -> Optional[Union[bytes, Payload]]: + return self._body + + @body.setter + def body(self, body: Any) -> None: + if body is None: + self._body = None + elif isinstance(body, (bytes, bytearray)): + self._body = body + else: + try: + self._body = body = payload.PAYLOAD_REGISTRY.get(body) + except payload.LookupError: + raise ValueError("Unsupported body type %r" % type(body)) + + headers = self._headers + + # set content-type + if hdrs.CONTENT_TYPE not in headers: + headers[hdrs.CONTENT_TYPE] = body.content_type + + # copy payload headers + if body.headers: + for key, value in body.headers.items(): + if key not in headers: + headers[key] = value + + self._compressed_body = None + + @property + def text(self) -> Optional[str]: + if self._body is None: + return None + # Note: When _body is a Payload (e.g. FilePayload), this may do blocking I/O + # This is generally safe as most common payloads (BytesPayload, StringPayload) + # don't do blocking I/O, but be careful with file-based payloads + return self._body.decode(self.charset or "utf-8") + + @text.setter + def text(self, text: str) -> None: + assert text is None or isinstance( + text, str + ), "text argument must be str (%r)" % type(text) + + if self.content_type == "application/octet-stream": + self.content_type = "text/plain" + if self.charset is None: + self.charset = "utf-8" + + self._body = text.encode(self.charset) + self._compressed_body = None + + @property + def content_length(self) -> Optional[int]: + if self._chunked: + return None + + if hdrs.CONTENT_LENGTH in self._headers: + return int(self._headers[hdrs.CONTENT_LENGTH]) + + if self._compressed_body is not None: + # Return length of the compressed body + return len(self._compressed_body) + elif isinstance(self._body, Payload): + # A payload without content length, or a compressed payload + return None + elif self._body is not None: + return len(self._body) + else: + return 0 + + @content_length.setter + def content_length(self, value: Optional[int]) -> None: + raise RuntimeError("Content length is set automatically") + + async def write_eof(self, data: bytes = b"") -> None: + if self._eof_sent: + return + if self._compressed_body is None: + body: Optional[Union[bytes, Payload]] = self._body + else: + body = self._compressed_body + assert not data, f"data arg is not supported, got {data!r}" + assert self._req is not None + assert self._payload_writer is not None + if body is None or self._must_be_empty_body: + await super().write_eof() + elif isinstance(self._body, Payload): + await self._body.write(self._payload_writer) + await self._body.close() + await super().write_eof() + else: + await super().write_eof(cast(bytes, body)) + + async def _start(self, request: "BaseRequest") -> AbstractStreamWriter: + if hdrs.CONTENT_LENGTH in self._headers: + if should_remove_content_length(request.method, self.status): + del self._headers[hdrs.CONTENT_LENGTH] + elif not self._chunked: + if isinstance(self._body, Payload): + if (size := self._body.size) is not None: + self._headers[hdrs.CONTENT_LENGTH] = str(size) + else: + body_len = len(self._body) if self._body else "0" + # https://www.rfc-editor.org/rfc/rfc9110.html#section-8.6-7 + if body_len != "0" or ( + self.status != 304 and request.method not in hdrs.METH_HEAD_ALL + ): + self._headers[hdrs.CONTENT_LENGTH] = str(body_len) + + return await super()._start(request) + + async def _do_start_compression(self, coding: ContentCoding) -> None: + if self._chunked or isinstance(self._body, Payload): + return await super()._do_start_compression(coding) + if coding is ContentCoding.identity: + return + # Instead of using _payload_writer.enable_compression, + # compress the whole body + compressor = ZLibCompressor( + encoding=coding.value, + max_sync_chunk_size=self._zlib_executor_size, + executor=self._zlib_executor, + ) + assert self._body is not None + if self._zlib_executor_size is None and len(self._body) > LARGE_BODY_SIZE: + warnings.warn( + "Synchronous compression of large response bodies " + f"({len(self._body)} bytes) might block the async event loop. " + "Consider providing a custom value to zlib_executor_size/" + "zlib_executor response properties or disabling compression on it." + ) + self._compressed_body = ( + await compressor.compress(self._body) + compressor.flush() + ) + self._headers[hdrs.CONTENT_ENCODING] = coding.value + self._headers[hdrs.CONTENT_LENGTH] = str(len(self._compressed_body)) + + +def json_response( + data: Any = sentinel, + *, + text: Optional[str] = None, + body: Optional[bytes] = None, + status: int = 200, + reason: Optional[str] = None, + headers: Optional[LooseHeaders] = None, + content_type: str = "application/json", + dumps: JSONEncoder = json.dumps, +) -> Response: + if data is not sentinel: + if text or body: + raise ValueError("only one of data, text, or body should be specified") + else: + text = dumps(data) + return Response( + text=text, + body=body, + status=status, + reason=reason, + headers=headers, + content_type=content_type, + ) diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_routedef.py b/py311/lib/python3.11/site-packages/aiohttp/web_routedef.py new file mode 100644 index 0000000000000000000000000000000000000000..f51b6cd00815a4daeabf7ef269a3225b2b764503 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_routedef.py @@ -0,0 +1,214 @@ +import abc +import os # noqa +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Type, + Union, + overload, +) + +import attr + +from . import hdrs +from .abc import AbstractView +from .typedefs import Handler, PathLike + +if TYPE_CHECKING: + from .web_request import Request + from .web_response import StreamResponse + from .web_urldispatcher import AbstractRoute, UrlDispatcher +else: + Request = StreamResponse = UrlDispatcher = AbstractRoute = None + + +__all__ = ( + "AbstractRouteDef", + "RouteDef", + "StaticDef", + "RouteTableDef", + "head", + "options", + "get", + "post", + "patch", + "put", + "delete", + "route", + "view", + "static", +) + + +class AbstractRouteDef(abc.ABC): + @abc.abstractmethod + def register(self, router: UrlDispatcher) -> List[AbstractRoute]: + pass # pragma: no cover + + +_HandlerType = Union[Type[AbstractView], Handler] + + +@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True) +class RouteDef(AbstractRouteDef): + method: str + path: str + handler: _HandlerType + kwargs: Dict[str, Any] + + def __repr__(self) -> str: + info = [] + for name, value in sorted(self.kwargs.items()): + info.append(f", {name}={value!r}") + return " {handler.__name__!r}{info}>".format( + method=self.method, path=self.path, handler=self.handler, info="".join(info) + ) + + def register(self, router: UrlDispatcher) -> List[AbstractRoute]: + if self.method in hdrs.METH_ALL: + reg = getattr(router, "add_" + self.method.lower()) + return [reg(self.path, self.handler, **self.kwargs)] + else: + return [ + router.add_route(self.method, self.path, self.handler, **self.kwargs) + ] + + +@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True) +class StaticDef(AbstractRouteDef): + prefix: str + path: PathLike + kwargs: Dict[str, Any] + + def __repr__(self) -> str: + info = [] + for name, value in sorted(self.kwargs.items()): + info.append(f", {name}={value!r}") + return " {path}{info}>".format( + prefix=self.prefix, path=self.path, info="".join(info) + ) + + def register(self, router: UrlDispatcher) -> List[AbstractRoute]: + resource = router.add_static(self.prefix, self.path, **self.kwargs) + routes = resource.get_info().get("routes", {}) + return list(routes.values()) + + +def route(method: str, path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return RouteDef(method, path, handler, kwargs) + + +def head(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_HEAD, path, handler, **kwargs) + + +def options(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_OPTIONS, path, handler, **kwargs) + + +def get( + path: str, + handler: _HandlerType, + *, + name: Optional[str] = None, + allow_head: bool = True, + **kwargs: Any, +) -> RouteDef: + return route( + hdrs.METH_GET, path, handler, name=name, allow_head=allow_head, **kwargs + ) + + +def post(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_POST, path, handler, **kwargs) + + +def put(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_PUT, path, handler, **kwargs) + + +def patch(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_PATCH, path, handler, **kwargs) + + +def delete(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_DELETE, path, handler, **kwargs) + + +def view(path: str, handler: Type[AbstractView], **kwargs: Any) -> RouteDef: + return route(hdrs.METH_ANY, path, handler, **kwargs) + + +def static(prefix: str, path: PathLike, **kwargs: Any) -> StaticDef: + return StaticDef(prefix, path, kwargs) + + +_Deco = Callable[[_HandlerType], _HandlerType] + + +class RouteTableDef(Sequence[AbstractRouteDef]): + """Route definition table""" + + def __init__(self) -> None: + self._items: List[AbstractRouteDef] = [] + + def __repr__(self) -> str: + return f"" + + @overload + def __getitem__(self, index: int) -> AbstractRouteDef: ... + + @overload + def __getitem__(self, index: slice) -> List[AbstractRouteDef]: ... + + def __getitem__(self, index): # type: ignore[no-untyped-def] + return self._items[index] + + def __iter__(self) -> Iterator[AbstractRouteDef]: + return iter(self._items) + + def __len__(self) -> int: + return len(self._items) + + def __contains__(self, item: object) -> bool: + return item in self._items + + def route(self, method: str, path: str, **kwargs: Any) -> _Deco: + def inner(handler: _HandlerType) -> _HandlerType: + self._items.append(RouteDef(method, path, handler, kwargs)) + return handler + + return inner + + def head(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_HEAD, path, **kwargs) + + def get(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_GET, path, **kwargs) + + def post(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_POST, path, **kwargs) + + def put(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_PUT, path, **kwargs) + + def patch(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_PATCH, path, **kwargs) + + def delete(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_DELETE, path, **kwargs) + + def options(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_OPTIONS, path, **kwargs) + + def view(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_ANY, path, **kwargs) + + def static(self, prefix: str, path: PathLike, **kwargs: Any) -> None: + self._items.append(StaticDef(prefix, path, kwargs)) diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_runner.py b/py311/lib/python3.11/site-packages/aiohttp/web_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..bcfec727c8419bbc6518085ecedde1f7de8992c9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_runner.py @@ -0,0 +1,399 @@ +import asyncio +import signal +import socket +import warnings +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, List, Optional, Set + +from yarl import URL + +from .typedefs import PathLike +from .web_app import Application +from .web_server import Server + +if TYPE_CHECKING: + from ssl import SSLContext +else: + try: + from ssl import SSLContext + except ImportError: # pragma: no cover + SSLContext = object # type: ignore[misc,assignment] + +__all__ = ( + "BaseSite", + "TCPSite", + "UnixSite", + "NamedPipeSite", + "SockSite", + "BaseRunner", + "AppRunner", + "ServerRunner", + "GracefulExit", +) + + +class GracefulExit(SystemExit): + code = 1 + + +def _raise_graceful_exit() -> None: + raise GracefulExit() + + +class BaseSite(ABC): + __slots__ = ("_runner", "_ssl_context", "_backlog", "_server") + + def __init__( + self, + runner: "BaseRunner", + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + ) -> None: + if runner.server is None: + raise RuntimeError("Call runner.setup() before making a site") + if shutdown_timeout != 60.0: + msg = "shutdown_timeout should be set on BaseRunner" + warnings.warn(msg, DeprecationWarning, stacklevel=2) + runner._shutdown_timeout = shutdown_timeout + self._runner = runner + self._ssl_context = ssl_context + self._backlog = backlog + self._server: Optional[asyncio.AbstractServer] = None + + @property + @abstractmethod + def name(self) -> str: + pass # pragma: no cover + + @abstractmethod + async def start(self) -> None: + self._runner._reg_site(self) + + async def stop(self) -> None: + self._runner._check_site(self) + if self._server is not None: # Maybe not started yet + self._server.close() + + self._runner._unreg_site(self) + + +class TCPSite(BaseSite): + __slots__ = ("_host", "_port", "_reuse_address", "_reuse_port") + + def __init__( + self, + runner: "BaseRunner", + host: Optional[str] = None, + port: Optional[int] = None, + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + reuse_address: Optional[bool] = None, + reuse_port: Optional[bool] = None, + ) -> None: + super().__init__( + runner, + shutdown_timeout=shutdown_timeout, + ssl_context=ssl_context, + backlog=backlog, + ) + self._host = host + if port is None: + port = 8443 if self._ssl_context else 8080 + self._port = port + self._reuse_address = reuse_address + self._reuse_port = reuse_port + + @property + def name(self) -> str: + scheme = "https" if self._ssl_context else "http" + host = "0.0.0.0" if not self._host else self._host + return str(URL.build(scheme=scheme, host=host, port=self._port)) + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + self._server = await loop.create_server( + server, + self._host, + self._port, + ssl=self._ssl_context, + backlog=self._backlog, + reuse_address=self._reuse_address, + reuse_port=self._reuse_port, + ) + + +class UnixSite(BaseSite): + __slots__ = ("_path",) + + def __init__( + self, + runner: "BaseRunner", + path: PathLike, + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + ) -> None: + super().__init__( + runner, + shutdown_timeout=shutdown_timeout, + ssl_context=ssl_context, + backlog=backlog, + ) + self._path = path + + @property + def name(self) -> str: + scheme = "https" if self._ssl_context else "http" + return f"{scheme}://unix:{self._path}:" + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + self._server = await loop.create_unix_server( + server, + self._path, + ssl=self._ssl_context, + backlog=self._backlog, + ) + + +class NamedPipeSite(BaseSite): + __slots__ = ("_path",) + + def __init__( + self, runner: "BaseRunner", path: str, *, shutdown_timeout: float = 60.0 + ) -> None: + loop = asyncio.get_event_loop() + if not isinstance( + loop, asyncio.ProactorEventLoop # type: ignore[attr-defined] + ): + raise RuntimeError( + "Named Pipes only available in proactor loop under windows" + ) + super().__init__(runner, shutdown_timeout=shutdown_timeout) + self._path = path + + @property + def name(self) -> str: + return self._path + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + _server = await loop.start_serving_pipe( # type: ignore[attr-defined] + server, self._path + ) + self._server = _server[0] + + +class SockSite(BaseSite): + __slots__ = ("_sock", "_name") + + def __init__( + self, + runner: "BaseRunner", + sock: socket.socket, + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + ) -> None: + super().__init__( + runner, + shutdown_timeout=shutdown_timeout, + ssl_context=ssl_context, + backlog=backlog, + ) + self._sock = sock + scheme = "https" if self._ssl_context else "http" + if hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX: + name = f"{scheme}://unix:{sock.getsockname()}:" + else: + host, port = sock.getsockname()[:2] + name = str(URL.build(scheme=scheme, host=host, port=port)) + self._name = name + + @property + def name(self) -> str: + return self._name + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + self._server = await loop.create_server( + server, sock=self._sock, ssl=self._ssl_context, backlog=self._backlog + ) + + +class BaseRunner(ABC): + __slots__ = ("_handle_signals", "_kwargs", "_server", "_sites", "_shutdown_timeout") + + def __init__( + self, + *, + handle_signals: bool = False, + shutdown_timeout: float = 60.0, + **kwargs: Any, + ) -> None: + self._handle_signals = handle_signals + self._kwargs = kwargs + self._server: Optional[Server] = None + self._sites: List[BaseSite] = [] + self._shutdown_timeout = shutdown_timeout + + @property + def server(self) -> Optional[Server]: + return self._server + + @property + def addresses(self) -> List[Any]: + ret: List[Any] = [] + for site in self._sites: + server = site._server + if server is not None: + sockets = server.sockets # type: ignore[attr-defined] + if sockets is not None: + for sock in sockets: + ret.append(sock.getsockname()) + return ret + + @property + def sites(self) -> Set[BaseSite]: + return set(self._sites) + + async def setup(self) -> None: + loop = asyncio.get_event_loop() + + if self._handle_signals: + try: + loop.add_signal_handler(signal.SIGINT, _raise_graceful_exit) + loop.add_signal_handler(signal.SIGTERM, _raise_graceful_exit) + except NotImplementedError: # pragma: no cover + # add_signal_handler is not implemented on Windows + pass + + self._server = await self._make_server() + + @abstractmethod + async def shutdown(self) -> None: + """Call any shutdown hooks to help server close gracefully.""" + + async def cleanup(self) -> None: + # The loop over sites is intentional, an exception on gather() + # leaves self._sites in unpredictable state. + # The loop guaranties that a site is either deleted on success or + # still present on failure + for site in list(self._sites): + await site.stop() + + if self._server: # If setup succeeded + # Yield to event loop to ensure incoming requests prior to stopping the sites + # have all started to be handled before we proceed to close idle connections. + await asyncio.sleep(0) + self._server.pre_shutdown() + await self.shutdown() + await self._server.shutdown(self._shutdown_timeout) + await self._cleanup_server() + + self._server = None + if self._handle_signals: + loop = asyncio.get_running_loop() + try: + loop.remove_signal_handler(signal.SIGINT) + loop.remove_signal_handler(signal.SIGTERM) + except NotImplementedError: # pragma: no cover + # remove_signal_handler is not implemented on Windows + pass + + @abstractmethod + async def _make_server(self) -> Server: + pass # pragma: no cover + + @abstractmethod + async def _cleanup_server(self) -> None: + pass # pragma: no cover + + def _reg_site(self, site: BaseSite) -> None: + if site in self._sites: + raise RuntimeError(f"Site {site} is already registered in runner {self}") + self._sites.append(site) + + def _check_site(self, site: BaseSite) -> None: + if site not in self._sites: + raise RuntimeError(f"Site {site} is not registered in runner {self}") + + def _unreg_site(self, site: BaseSite) -> None: + if site not in self._sites: + raise RuntimeError(f"Site {site} is not registered in runner {self}") + self._sites.remove(site) + + +class ServerRunner(BaseRunner): + """Low-level web server runner""" + + __slots__ = ("_web_server",) + + def __init__( + self, web_server: Server, *, handle_signals: bool = False, **kwargs: Any + ) -> None: + super().__init__(handle_signals=handle_signals, **kwargs) + self._web_server = web_server + + async def shutdown(self) -> None: + pass + + async def _make_server(self) -> Server: + return self._web_server + + async def _cleanup_server(self) -> None: + pass + + +class AppRunner(BaseRunner): + """Web Application runner""" + + __slots__ = ("_app",) + + def __init__( + self, app: Application, *, handle_signals: bool = False, **kwargs: Any + ) -> None: + super().__init__(handle_signals=handle_signals, **kwargs) + if not isinstance(app, Application): + raise TypeError( + "The first argument should be web.Application " + "instance, got {!r}".format(app) + ) + self._app = app + + @property + def app(self) -> Application: + return self._app + + async def shutdown(self) -> None: + await self._app.shutdown() + + async def _make_server(self) -> Server: + loop = asyncio.get_event_loop() + self._app._set_loop(loop) + self._app.on_startup.freeze() + await self._app.startup() + self._app.freeze() + + return self._app._make_handler(loop=loop, **self._kwargs) + + async def _cleanup_server(self) -> None: + await self._app.cleanup() diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_server.py b/py311/lib/python3.11/site-packages/aiohttp/web_server.py new file mode 100644 index 0000000000000000000000000000000000000000..328aca1e405ef87e4df8a992c32eac092b4af8f0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_server.py @@ -0,0 +1,84 @@ +"""Low level HTTP server.""" + +import asyncio +from typing import Any, Awaitable, Callable, Dict, List, Optional # noqa + +from .abc import AbstractStreamWriter +from .http_parser import RawRequestMessage +from .streams import StreamReader +from .web_protocol import RequestHandler, _RequestFactory, _RequestHandler +from .web_request import BaseRequest + +__all__ = ("Server",) + + +class Server: + def __init__( + self, + handler: _RequestHandler, + *, + request_factory: Optional[_RequestFactory] = None, + handler_cancellation: bool = False, + loop: Optional[asyncio.AbstractEventLoop] = None, + **kwargs: Any, + ) -> None: + self._loop = loop or asyncio.get_running_loop() + self._connections: Dict[RequestHandler, asyncio.Transport] = {} + self._kwargs = kwargs + # requests_count is the number of requests being processed by the server + # for the lifetime of the server. + self.requests_count = 0 + self.request_handler = handler + self.request_factory = request_factory or self._make_request + self.handler_cancellation = handler_cancellation + + @property + def connections(self) -> List[RequestHandler]: + return list(self._connections.keys()) + + def connection_made( + self, handler: RequestHandler, transport: asyncio.Transport + ) -> None: + self._connections[handler] = transport + + def connection_lost( + self, handler: RequestHandler, exc: Optional[BaseException] = None + ) -> None: + if handler in self._connections: + if handler._task_handler: + handler._task_handler.add_done_callback( + lambda f: self._connections.pop(handler, None) + ) + else: + del self._connections[handler] + + def _make_request( + self, + message: RawRequestMessage, + payload: StreamReader, + protocol: RequestHandler, + writer: AbstractStreamWriter, + task: "asyncio.Task[None]", + ) -> BaseRequest: + return BaseRequest(message, payload, protocol, writer, task, self._loop) + + def pre_shutdown(self) -> None: + for conn in self._connections: + conn.close() + + async def shutdown(self, timeout: Optional[float] = None) -> None: + coros = (conn.shutdown(timeout) for conn in self._connections) + await asyncio.gather(*coros) + self._connections.clear() + + def __call__(self) -> RequestHandler: + try: + return RequestHandler(self, loop=self._loop, **self._kwargs) + except TypeError: + # Failsafe creation: remove all custom handler_args + kwargs = { + k: v + for k, v in self._kwargs.items() + if k in ["debug", "access_log_class"] + } + return RequestHandler(self, loop=self._loop, **kwargs) diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_urldispatcher.py b/py311/lib/python3.11/site-packages/aiohttp/web_urldispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..cfa57a310046c78636d1872f6e4c2e27b6a18a76 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_urldispatcher.py @@ -0,0 +1,1305 @@ +import abc +import asyncio +import base64 +import functools +import hashlib +import html +import inspect +import keyword +import os +import platform +import re +import sys +import warnings +from functools import wraps +from pathlib import Path +from types import MappingProxyType +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Container, + Dict, + Final, + Generator, + Iterable, + Iterator, + List, + Mapping, + NoReturn, + Optional, + Pattern, + Set, + Sized, + Tuple, + Type, + TypedDict, + Union, + cast, +) + +from yarl import URL, __version__ as yarl_version + +from . import hdrs +from .abc import AbstractMatchInfo, AbstractRouter, AbstractView +from .helpers import DEBUG +from .http import HttpVersion11 +from .typedefs import Handler, PathLike +from .web_exceptions import ( + HTTPException, + HTTPExpectationFailed, + HTTPForbidden, + HTTPMethodNotAllowed, + HTTPNotFound, +) +from .web_fileresponse import FileResponse +from .web_request import Request +from .web_response import Response, StreamResponse +from .web_routedef import AbstractRouteDef + +__all__ = ( + "UrlDispatcher", + "UrlMappingMatchInfo", + "AbstractResource", + "Resource", + "PlainResource", + "DynamicResource", + "AbstractRoute", + "ResourceRoute", + "StaticResource", + "View", +) + + +if TYPE_CHECKING: + from .web_app import Application + + BaseDict = Dict[str, str] +else: + BaseDict = dict + +CIRCULAR_SYMLINK_ERROR = ( + (OSError,) + if sys.version_info < (3, 10) and sys.platform.startswith("win32") + else (RuntimeError,) if sys.version_info < (3, 13) else () +) + +YARL_VERSION: Final[Tuple[int, ...]] = tuple(map(int, yarl_version.split(".")[:2])) + +HTTP_METHOD_RE: Final[Pattern[str]] = re.compile( + r"^[0-9A-Za-z!#\$%&'\*\+\-\.\^_`\|~]+$" +) +ROUTE_RE: Final[Pattern[str]] = re.compile( + r"(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})" +) +PATH_SEP: Final[str] = re.escape("/") + +IS_WINDOWS: Final[bool] = platform.system() == "Windows" + +_ExpectHandler = Callable[[Request], Awaitable[Optional[StreamResponse]]] +_Resolve = Tuple[Optional["UrlMappingMatchInfo"], Set[str]] + +html_escape = functools.partial(html.escape, quote=True) + + +class _InfoDict(TypedDict, total=False): + path: str + + formatter: str + pattern: Pattern[str] + + directory: Path + prefix: str + routes: Mapping[str, "AbstractRoute"] + + app: "Application" + + domain: str + + rule: "AbstractRuleMatching" + + http_exception: HTTPException + + +class AbstractResource(Sized, Iterable["AbstractRoute"]): + def __init__(self, *, name: Optional[str] = None) -> None: + self._name = name + + @property + def name(self) -> Optional[str]: + return self._name + + @property + @abc.abstractmethod + def canonical(self) -> str: + """Exposes the resource's canonical path. + + For example '/foo/bar/{name}' + + """ + + @abc.abstractmethod # pragma: no branch + def url_for(self, **kwargs: str) -> URL: + """Construct url for resource with additional params.""" + + @abc.abstractmethod # pragma: no branch + async def resolve(self, request: Request) -> _Resolve: + """Resolve resource. + + Return (UrlMappingMatchInfo, allowed_methods) pair. + """ + + @abc.abstractmethod + def add_prefix(self, prefix: str) -> None: + """Add a prefix to processed URLs. + + Required for subapplications support. + """ + + @abc.abstractmethod + def get_info(self) -> _InfoDict: + """Return a dict with additional info useful for introspection""" + + def freeze(self) -> None: + pass + + @abc.abstractmethod + def raw_match(self, path: str) -> bool: + """Perform a raw match against path""" + + +class AbstractRoute(abc.ABC): + def __init__( + self, + method: str, + handler: Union[Handler, Type[AbstractView]], + *, + expect_handler: Optional[_ExpectHandler] = None, + resource: Optional[AbstractResource] = None, + ) -> None: + + if expect_handler is None: + expect_handler = _default_expect_handler + + assert inspect.iscoroutinefunction(expect_handler) or ( + sys.version_info < (3, 14) and asyncio.iscoroutinefunction(expect_handler) + ), f"Coroutine is expected, got {expect_handler!r}" + + method = method.upper() + if not HTTP_METHOD_RE.match(method): + raise ValueError(f"{method} is not allowed HTTP method") + + assert callable(handler), handler + if inspect.iscoroutinefunction(handler) or ( + sys.version_info < (3, 14) and asyncio.iscoroutinefunction(handler) + ): + pass + elif inspect.isgeneratorfunction(handler): + if TYPE_CHECKING: + assert False + warnings.warn( + "Bare generators are deprecated, use @coroutine wrapper", + DeprecationWarning, + ) + elif isinstance(handler, type) and issubclass(handler, AbstractView): + pass + else: + warnings.warn( + "Bare functions are deprecated, use async ones", DeprecationWarning + ) + + @wraps(handler) + async def handler_wrapper(request: Request) -> StreamResponse: + result = old_handler(request) # type: ignore[call-arg] + if asyncio.iscoroutine(result): + result = await result + assert isinstance(result, StreamResponse) + return result + + old_handler = handler + handler = handler_wrapper + + self._method = method + self._handler = handler + self._expect_handler = expect_handler + self._resource = resource + + @property + def method(self) -> str: + return self._method + + @property + def handler(self) -> Handler: + return self._handler + + @property + @abc.abstractmethod + def name(self) -> Optional[str]: + """Optional route's name, always equals to resource's name.""" + + @property + def resource(self) -> Optional[AbstractResource]: + return self._resource + + @abc.abstractmethod + def get_info(self) -> _InfoDict: + """Return a dict with additional info useful for introspection""" + + @abc.abstractmethod # pragma: no branch + def url_for(self, *args: str, **kwargs: str) -> URL: + """Construct url for route with additional params.""" + + async def handle_expect_header(self, request: Request) -> Optional[StreamResponse]: + return await self._expect_handler(request) + + +class UrlMappingMatchInfo(BaseDict, AbstractMatchInfo): + + __slots__ = ("_route", "_apps", "_current_app", "_frozen") + + def __init__(self, match_dict: Dict[str, str], route: AbstractRoute) -> None: + super().__init__(match_dict) + self._route = route + self._apps: List[Application] = [] + self._current_app: Optional[Application] = None + self._frozen = False + + @property + def handler(self) -> Handler: + return self._route.handler + + @property + def route(self) -> AbstractRoute: + return self._route + + @property + def expect_handler(self) -> _ExpectHandler: + return self._route.handle_expect_header + + @property + def http_exception(self) -> Optional[HTTPException]: + return None + + def get_info(self) -> _InfoDict: # type: ignore[override] + return self._route.get_info() + + @property + def apps(self) -> Tuple["Application", ...]: + return tuple(self._apps) + + def add_app(self, app: "Application") -> None: + if self._frozen: + raise RuntimeError("Cannot change apps stack after .freeze() call") + if self._current_app is None: + self._current_app = app + self._apps.insert(0, app) + + @property + def current_app(self) -> "Application": + app = self._current_app + assert app is not None + return app + + @current_app.setter + def current_app(self, app: "Application") -> None: + if DEBUG: # pragma: no cover + if app not in self._apps: + raise RuntimeError( + "Expected one of the following apps {!r}, got {!r}".format( + self._apps, app + ) + ) + self._current_app = app + + def freeze(self) -> None: + self._frozen = True + + def __repr__(self) -> str: + return f"" + + +class MatchInfoError(UrlMappingMatchInfo): + + __slots__ = ("_exception",) + + def __init__(self, http_exception: HTTPException) -> None: + self._exception = http_exception + super().__init__({}, SystemRoute(self._exception)) + + @property + def http_exception(self) -> HTTPException: + return self._exception + + def __repr__(self) -> str: + return "".format( + self._exception.status, self._exception.reason + ) + + +async def _default_expect_handler(request: Request) -> None: + """Default handler for Expect header. + + Just send "100 Continue" to client. + raise HTTPExpectationFailed if value of header is not "100-continue" + """ + expect = request.headers.get(hdrs.EXPECT, "") + if request.version == HttpVersion11: + if expect.lower() == "100-continue": + await request.writer.write(b"HTTP/1.1 100 Continue\r\n\r\n") + # Reset output_size as we haven't started the main body yet. + request.writer.output_size = 0 + else: + raise HTTPExpectationFailed(text="Unknown Expect: %s" % expect) + + +class Resource(AbstractResource): + def __init__(self, *, name: Optional[str] = None) -> None: + super().__init__(name=name) + self._routes: Dict[str, ResourceRoute] = {} + self._any_route: Optional[ResourceRoute] = None + self._allowed_methods: Set[str] = set() + + def add_route( + self, + method: str, + handler: Union[Type[AbstractView], Handler], + *, + expect_handler: Optional[_ExpectHandler] = None, + ) -> "ResourceRoute": + if route := self._routes.get(method, self._any_route): + raise RuntimeError( + "Added route will never be executed, " + f"method {route.method} is already " + "registered" + ) + + route_obj = ResourceRoute(method, handler, self, expect_handler=expect_handler) + self.register_route(route_obj) + return route_obj + + def register_route(self, route: "ResourceRoute") -> None: + assert isinstance( + route, ResourceRoute + ), f"Instance of Route class is required, got {route!r}" + if route.method == hdrs.METH_ANY: + self._any_route = route + self._allowed_methods.add(route.method) + self._routes[route.method] = route + + async def resolve(self, request: Request) -> _Resolve: + if (match_dict := self._match(request.rel_url.path_safe)) is None: + return None, set() + if route := self._routes.get(request.method, self._any_route): + return UrlMappingMatchInfo(match_dict, route), self._allowed_methods + return None, self._allowed_methods + + @abc.abstractmethod + def _match(self, path: str) -> Optional[Dict[str, str]]: + pass # pragma: no cover + + def __len__(self) -> int: + return len(self._routes) + + def __iter__(self) -> Iterator["ResourceRoute"]: + return iter(self._routes.values()) + + # TODO: implement all abstract methods + + +class PlainResource(Resource): + def __init__(self, path: str, *, name: Optional[str] = None) -> None: + super().__init__(name=name) + assert not path or path.startswith("/") + self._path = path + + @property + def canonical(self) -> str: + return self._path + + def freeze(self) -> None: + if not self._path: + self._path = "/" + + def add_prefix(self, prefix: str) -> None: + assert prefix.startswith("/") + assert not prefix.endswith("/") + assert len(prefix) > 1 + self._path = prefix + self._path + + def _match(self, path: str) -> Optional[Dict[str, str]]: + # string comparison is about 10 times faster than regexp matching + if self._path == path: + return {} + return None + + def raw_match(self, path: str) -> bool: + return self._path == path + + def get_info(self) -> _InfoDict: + return {"path": self._path} + + def url_for(self) -> URL: # type: ignore[override] + return URL.build(path=self._path, encoded=True) + + def __repr__(self) -> str: + name = "'" + self.name + "' " if self.name is not None else "" + return f"" + + +class DynamicResource(Resource): + + DYN = re.compile(r"\{(?P[_a-zA-Z][_a-zA-Z0-9]*)\}") + DYN_WITH_RE = re.compile(r"\{(?P[_a-zA-Z][_a-zA-Z0-9]*):(?P.+)\}") + GOOD = r"[^{}/]+" + + def __init__(self, path: str, *, name: Optional[str] = None) -> None: + super().__init__(name=name) + self._orig_path = path + pattern = "" + formatter = "" + for part in ROUTE_RE.split(path): + match = self.DYN.fullmatch(part) + if match: + pattern += "(?P<{}>{})".format(match.group("var"), self.GOOD) + formatter += "{" + match.group("var") + "}" + continue + + match = self.DYN_WITH_RE.fullmatch(part) + if match: + pattern += "(?P<{var}>{re})".format(**match.groupdict()) + formatter += "{" + match.group("var") + "}" + continue + + if "{" in part or "}" in part: + raise ValueError(f"Invalid path '{path}'['{part}']") + + part = _requote_path(part) + formatter += part + pattern += re.escape(part) + + try: + compiled = re.compile(pattern) + except re.error as exc: + raise ValueError(f"Bad pattern '{pattern}': {exc}") from None + assert compiled.pattern.startswith(PATH_SEP) + assert formatter.startswith("/") + self._pattern = compiled + self._formatter = formatter + + @property + def canonical(self) -> str: + return self._formatter + + def add_prefix(self, prefix: str) -> None: + assert prefix.startswith("/") + assert not prefix.endswith("/") + assert len(prefix) > 1 + self._pattern = re.compile(re.escape(prefix) + self._pattern.pattern) + self._formatter = prefix + self._formatter + + def _match(self, path: str) -> Optional[Dict[str, str]]: + match = self._pattern.fullmatch(path) + if match is None: + return None + return { + key: _unquote_path_safe(value) for key, value in match.groupdict().items() + } + + def raw_match(self, path: str) -> bool: + return self._orig_path == path + + def get_info(self) -> _InfoDict: + return {"formatter": self._formatter, "pattern": self._pattern} + + def url_for(self, **parts: str) -> URL: + url = self._formatter.format_map({k: _quote_path(v) for k, v in parts.items()}) + return URL.build(path=url, encoded=True) + + def __repr__(self) -> str: + name = "'" + self.name + "' " if self.name is not None else "" + return "".format( + name=name, formatter=self._formatter + ) + + +class PrefixResource(AbstractResource): + def __init__(self, prefix: str, *, name: Optional[str] = None) -> None: + assert not prefix or prefix.startswith("/"), prefix + assert prefix in ("", "/") or not prefix.endswith("/"), prefix + super().__init__(name=name) + self._prefix = _requote_path(prefix) + self._prefix2 = self._prefix + "/" + + @property + def canonical(self) -> str: + return self._prefix + + def add_prefix(self, prefix: str) -> None: + assert prefix.startswith("/") + assert not prefix.endswith("/") + assert len(prefix) > 1 + self._prefix = prefix + self._prefix + self._prefix2 = self._prefix + "/" + + def raw_match(self, prefix: str) -> bool: + return False + + # TODO: impl missing abstract methods + + +class StaticResource(PrefixResource): + VERSION_KEY = "v" + + def __init__( + self, + prefix: str, + directory: PathLike, + *, + name: Optional[str] = None, + expect_handler: Optional[_ExpectHandler] = None, + chunk_size: int = 256 * 1024, + show_index: bool = False, + follow_symlinks: bool = False, + append_version: bool = False, + ) -> None: + super().__init__(prefix, name=name) + try: + directory = Path(directory).expanduser().resolve(strict=True) + except FileNotFoundError as error: + raise ValueError(f"'{directory}' does not exist") from error + if not directory.is_dir(): + raise ValueError(f"'{directory}' is not a directory") + self._directory = directory + self._show_index = show_index + self._chunk_size = chunk_size + self._follow_symlinks = follow_symlinks + self._expect_handler = expect_handler + self._append_version = append_version + + self._routes = { + "GET": ResourceRoute( + "GET", self._handle, self, expect_handler=expect_handler + ), + "HEAD": ResourceRoute( + "HEAD", self._handle, self, expect_handler=expect_handler + ), + } + self._allowed_methods = set(self._routes) + + def url_for( # type: ignore[override] + self, + *, + filename: PathLike, + append_version: Optional[bool] = None, + ) -> URL: + if append_version is None: + append_version = self._append_version + filename = str(filename).lstrip("/") + + url = URL.build(path=self._prefix, encoded=True) + # filename is not encoded + if YARL_VERSION < (1, 6): + url = url / filename.replace("%", "%25") + else: + url = url / filename + + if append_version: + unresolved_path = self._directory.joinpath(filename) + try: + if self._follow_symlinks: + normalized_path = Path(os.path.normpath(unresolved_path)) + normalized_path.relative_to(self._directory) + filepath = normalized_path.resolve() + else: + filepath = unresolved_path.resolve() + filepath.relative_to(self._directory) + except (ValueError, FileNotFoundError): + # ValueError for case when path point to symlink + # with follow_symlinks is False + return url # relatively safe + if filepath.is_file(): + # TODO cache file content + # with file watcher for cache invalidation + with filepath.open("rb") as f: + file_bytes = f.read() + h = self._get_file_hash(file_bytes) + url = url.with_query({self.VERSION_KEY: h}) + return url + return url + + @staticmethod + def _get_file_hash(byte_array: bytes) -> str: + m = hashlib.sha256() # todo sha256 can be configurable param + m.update(byte_array) + b64 = base64.urlsafe_b64encode(m.digest()) + return b64.decode("ascii") + + def get_info(self) -> _InfoDict: + return { + "directory": self._directory, + "prefix": self._prefix, + "routes": self._routes, + } + + def set_options_route(self, handler: Handler) -> None: + if "OPTIONS" in self._routes: + raise RuntimeError("OPTIONS route was set already") + self._routes["OPTIONS"] = ResourceRoute( + "OPTIONS", handler, self, expect_handler=self._expect_handler + ) + self._allowed_methods.add("OPTIONS") + + async def resolve(self, request: Request) -> _Resolve: + path = request.rel_url.path_safe + method = request.method + # We normalise here to avoid matches that traverse below the static root. + # e.g. /static/../../../../home/user/webapp/static/ + norm_path = os.path.normpath(path) + if IS_WINDOWS: + norm_path = norm_path.replace("\\", "/") + if not norm_path.startswith(self._prefix2) and norm_path != self._prefix: + return None, set() + + allowed_methods = self._allowed_methods + if method not in allowed_methods: + return None, allowed_methods + + match_dict = {"filename": _unquote_path_safe(path[len(self._prefix) + 1 :])} + return (UrlMappingMatchInfo(match_dict, self._routes[method]), allowed_methods) + + def __len__(self) -> int: + return len(self._routes) + + def __iter__(self) -> Iterator[AbstractRoute]: + return iter(self._routes.values()) + + async def _handle(self, request: Request) -> StreamResponse: + filename = request.match_info["filename"] + unresolved_path = self._directory.joinpath(filename) + loop = asyncio.get_running_loop() + return await loop.run_in_executor( + None, self._resolve_path_to_response, unresolved_path + ) + + def _resolve_path_to_response(self, unresolved_path: Path) -> StreamResponse: + """Take the unresolved path and query the file system to form a response.""" + # Check for access outside the root directory. For follow symlinks, URI + # cannot traverse out, but symlinks can. Otherwise, no access outside + # root is permitted. + try: + if self._follow_symlinks: + normalized_path = Path(os.path.normpath(unresolved_path)) + normalized_path.relative_to(self._directory) + file_path = normalized_path.resolve() + else: + file_path = unresolved_path.resolve() + file_path.relative_to(self._directory) + except (ValueError, *CIRCULAR_SYMLINK_ERROR) as error: + # ValueError is raised for the relative check. Circular symlinks + # raise here on resolving for python < 3.13. + raise HTTPNotFound() from error + + # if path is a directory, return the contents if permitted. Note the + # directory check will raise if a segment is not readable. + try: + if file_path.is_dir(): + if self._show_index: + return Response( + text=self._directory_as_html(file_path), + content_type="text/html", + ) + else: + raise HTTPForbidden() + except PermissionError as error: + raise HTTPForbidden() from error + + # Return the file response, which handles all other checks. + return FileResponse(file_path, chunk_size=self._chunk_size) + + def _directory_as_html(self, dir_path: Path) -> str: + """returns directory's index as html.""" + assert dir_path.is_dir() + + relative_path_to_dir = dir_path.relative_to(self._directory).as_posix() + index_of = f"Index of /{html_escape(relative_path_to_dir)}" + h1 = f"

{index_of}

" + + index_list = [] + dir_index = dir_path.iterdir() + for _file in sorted(dir_index): + # show file url as relative to static path + rel_path = _file.relative_to(self._directory).as_posix() + quoted_file_url = _quote_path(f"{self._prefix}/{rel_path}") + + # if file is a directory, add '/' to the end of the name + if _file.is_dir(): + file_name = f"{_file.name}/" + else: + file_name = _file.name + + index_list.append( + f'
  • {html_escape(file_name)}
  • ' + ) + ul = "
      \n{}\n
    ".format("\n".join(index_list)) + body = f"\n{h1}\n{ul}\n" + + head_str = f"\n{index_of}\n" + html = f"\n{head_str}\n{body}\n" + + return html + + def __repr__(self) -> str: + name = "'" + self.name + "'" if self.name is not None else "" + return " {directory!r}>".format( + name=name, path=self._prefix, directory=self._directory + ) + + +class PrefixedSubAppResource(PrefixResource): + def __init__(self, prefix: str, app: "Application") -> None: + super().__init__(prefix) + self._app = app + self._add_prefix_to_resources(prefix) + + def add_prefix(self, prefix: str) -> None: + super().add_prefix(prefix) + self._add_prefix_to_resources(prefix) + + def _add_prefix_to_resources(self, prefix: str) -> None: + router = self._app.router + for resource in router.resources(): + # Since the canonical path of a resource is about + # to change, we need to unindex it and then reindex + router.unindex_resource(resource) + resource.add_prefix(prefix) + router.index_resource(resource) + + def url_for(self, *args: str, **kwargs: str) -> URL: + raise RuntimeError(".url_for() is not supported by sub-application root") + + def get_info(self) -> _InfoDict: + return {"app": self._app, "prefix": self._prefix} + + async def resolve(self, request: Request) -> _Resolve: + match_info = await self._app.router.resolve(request) + match_info.add_app(self._app) + if isinstance(match_info.http_exception, HTTPMethodNotAllowed): + methods = match_info.http_exception.allowed_methods + else: + methods = set() + return match_info, methods + + def __len__(self) -> int: + return len(self._app.router.routes()) + + def __iter__(self) -> Iterator[AbstractRoute]: + return iter(self._app.router.routes()) + + def __repr__(self) -> str: + return " {app!r}>".format( + prefix=self._prefix, app=self._app + ) + + +class AbstractRuleMatching(abc.ABC): + @abc.abstractmethod # pragma: no branch + async def match(self, request: Request) -> bool: + """Return bool if the request satisfies the criteria""" + + @abc.abstractmethod # pragma: no branch + def get_info(self) -> _InfoDict: + """Return a dict with additional info useful for introspection""" + + @property + @abc.abstractmethod # pragma: no branch + def canonical(self) -> str: + """Return a str""" + + +class Domain(AbstractRuleMatching): + re_part = re.compile(r"(?!-)[a-z\d-]{1,63}(? None: + super().__init__() + self._domain = self.validation(domain) + + @property + def canonical(self) -> str: + return self._domain + + def validation(self, domain: str) -> str: + if not isinstance(domain, str): + raise TypeError("Domain must be str") + domain = domain.rstrip(".").lower() + if not domain: + raise ValueError("Domain cannot be empty") + elif "://" in domain: + raise ValueError("Scheme not supported") + url = URL("http://" + domain) + assert url.raw_host is not None + if not all(self.re_part.fullmatch(x) for x in url.raw_host.split(".")): + raise ValueError("Domain not valid") + if url.port == 80: + return url.raw_host + return f"{url.raw_host}:{url.port}" + + async def match(self, request: Request) -> bool: + host = request.headers.get(hdrs.HOST) + if not host: + return False + return self.match_domain(host) + + def match_domain(self, host: str) -> bool: + return host.lower() == self._domain + + def get_info(self) -> _InfoDict: + return {"domain": self._domain} + + +class MaskDomain(Domain): + re_part = re.compile(r"(?!-)[a-z\d\*-]{1,63}(? None: + super().__init__(domain) + mask = self._domain.replace(".", r"\.").replace("*", ".*") + self._mask = re.compile(mask) + + @property + def canonical(self) -> str: + return self._mask.pattern + + def match_domain(self, host: str) -> bool: + return self._mask.fullmatch(host) is not None + + +class MatchedSubAppResource(PrefixedSubAppResource): + def __init__(self, rule: AbstractRuleMatching, app: "Application") -> None: + AbstractResource.__init__(self) + self._prefix = "" + self._app = app + self._rule = rule + + @property + def canonical(self) -> str: + return self._rule.canonical + + def get_info(self) -> _InfoDict: + return {"app": self._app, "rule": self._rule} + + async def resolve(self, request: Request) -> _Resolve: + if not await self._rule.match(request): + return None, set() + match_info = await self._app.router.resolve(request) + match_info.add_app(self._app) + if isinstance(match_info.http_exception, HTTPMethodNotAllowed): + methods = match_info.http_exception.allowed_methods + else: + methods = set() + return match_info, methods + + def __repr__(self) -> str: + return f" {self._app!r}>" + + +class ResourceRoute(AbstractRoute): + """A route with resource""" + + def __init__( + self, + method: str, + handler: Union[Handler, Type[AbstractView]], + resource: AbstractResource, + *, + expect_handler: Optional[_ExpectHandler] = None, + ) -> None: + super().__init__( + method, handler, expect_handler=expect_handler, resource=resource + ) + + def __repr__(self) -> str: + return " {handler!r}".format( + method=self.method, resource=self._resource, handler=self.handler + ) + + @property + def name(self) -> Optional[str]: + if self._resource is None: + return None + return self._resource.name + + def url_for(self, *args: str, **kwargs: str) -> URL: + """Construct url for route with additional params.""" + assert self._resource is not None + return self._resource.url_for(*args, **kwargs) + + def get_info(self) -> _InfoDict: + assert self._resource is not None + return self._resource.get_info() + + +class SystemRoute(AbstractRoute): + def __init__(self, http_exception: HTTPException) -> None: + super().__init__(hdrs.METH_ANY, self._handle) + self._http_exception = http_exception + + def url_for(self, *args: str, **kwargs: str) -> URL: + raise RuntimeError(".url_for() is not allowed for SystemRoute") + + @property + def name(self) -> Optional[str]: + return None + + def get_info(self) -> _InfoDict: + return {"http_exception": self._http_exception} + + async def _handle(self, request: Request) -> StreamResponse: + raise self._http_exception + + @property + def status(self) -> int: + return self._http_exception.status + + @property + def reason(self) -> str: + return self._http_exception.reason + + def __repr__(self) -> str: + return "".format(self=self) + + +class View(AbstractView): + async def _iter(self) -> StreamResponse: + if self.request.method not in hdrs.METH_ALL: + self._raise_allowed_methods() + method: Optional[Callable[[], Awaitable[StreamResponse]]] + method = getattr(self, self.request.method.lower(), None) + if method is None: + self._raise_allowed_methods() + ret = await method() + assert isinstance(ret, StreamResponse) + return ret + + def __await__(self) -> Generator[None, None, StreamResponse]: + return self._iter().__await__() + + def _raise_allowed_methods(self) -> NoReturn: + allowed_methods = {m for m in hdrs.METH_ALL if hasattr(self, m.lower())} + raise HTTPMethodNotAllowed(self.request.method, allowed_methods) + + +class ResourcesView(Sized, Iterable[AbstractResource], Container[AbstractResource]): + def __init__(self, resources: List[AbstractResource]) -> None: + self._resources = resources + + def __len__(self) -> int: + return len(self._resources) + + def __iter__(self) -> Iterator[AbstractResource]: + yield from self._resources + + def __contains__(self, resource: object) -> bool: + return resource in self._resources + + +class RoutesView(Sized, Iterable[AbstractRoute], Container[AbstractRoute]): + def __init__(self, resources: List[AbstractResource]): + self._routes: List[AbstractRoute] = [] + for resource in resources: + for route in resource: + self._routes.append(route) + + def __len__(self) -> int: + return len(self._routes) + + def __iter__(self) -> Iterator[AbstractRoute]: + yield from self._routes + + def __contains__(self, route: object) -> bool: + return route in self._routes + + +class UrlDispatcher(AbstractRouter, Mapping[str, AbstractResource]): + + NAME_SPLIT_RE = re.compile(r"[.:-]") + + def __init__(self) -> None: + super().__init__() + self._resources: List[AbstractResource] = [] + self._named_resources: Dict[str, AbstractResource] = {} + self._resource_index: dict[str, list[AbstractResource]] = {} + self._matched_sub_app_resources: List[MatchedSubAppResource] = [] + + async def resolve(self, request: Request) -> UrlMappingMatchInfo: + resource_index = self._resource_index + allowed_methods: Set[str] = set() + + # MatchedSubAppResource is primarily used to match on domain names + # (though custom rules could match on other things). This means that + # the traversal algorithm below can't be applied, and that we likely + # need to check these first so a sub app that defines the same path + # as a parent app will get priority if there's a domain match. + # + # For most cases we do not expect there to be many of these since + # currently they are only added by `.add_domain()`. + for resource in self._matched_sub_app_resources: + match_dict, allowed = await resource.resolve(request) + if match_dict is not None: + return match_dict + else: + allowed_methods |= allowed + + # Walk the url parts looking for candidates. We walk the url backwards + # to ensure the most explicit match is found first. If there are multiple + # candidates for a given url part because there are multiple resources + # registered for the same canonical path, we resolve them in a linear + # fashion to ensure registration order is respected. + url_part = request.rel_url.path_safe + while url_part: + for candidate in resource_index.get(url_part, ()): + match_dict, allowed = await candidate.resolve(request) + if match_dict is not None: + return match_dict + else: + allowed_methods |= allowed + if url_part == "/": + break + url_part = url_part.rpartition("/")[0] or "/" + + if allowed_methods: + return MatchInfoError(HTTPMethodNotAllowed(request.method, allowed_methods)) + + return MatchInfoError(HTTPNotFound()) + + def __iter__(self) -> Iterator[str]: + return iter(self._named_resources) + + def __len__(self) -> int: + return len(self._named_resources) + + def __contains__(self, resource: object) -> bool: + return resource in self._named_resources + + def __getitem__(self, name: str) -> AbstractResource: + return self._named_resources[name] + + def resources(self) -> ResourcesView: + return ResourcesView(self._resources) + + def routes(self) -> RoutesView: + return RoutesView(self._resources) + + def named_resources(self) -> Mapping[str, AbstractResource]: + return MappingProxyType(self._named_resources) + + def register_resource(self, resource: AbstractResource) -> None: + assert isinstance( + resource, AbstractResource + ), f"Instance of AbstractResource class is required, got {resource!r}" + if self.frozen: + raise RuntimeError("Cannot register a resource into frozen router.") + + name = resource.name + + if name is not None: + parts = self.NAME_SPLIT_RE.split(name) + for part in parts: + if keyword.iskeyword(part): + raise ValueError( + f"Incorrect route name {name!r}, " + "python keywords cannot be used " + "for route name" + ) + if not part.isidentifier(): + raise ValueError( + "Incorrect route name {!r}, " + "the name should be a sequence of " + "python identifiers separated " + "by dash, dot or column".format(name) + ) + if name in self._named_resources: + raise ValueError( + "Duplicate {!r}, " + "already handled by {!r}".format(name, self._named_resources[name]) + ) + self._named_resources[name] = resource + self._resources.append(resource) + + if isinstance(resource, MatchedSubAppResource): + # We cannot index match sub-app resources because they have match rules + self._matched_sub_app_resources.append(resource) + else: + self.index_resource(resource) + + def _get_resource_index_key(self, resource: AbstractResource) -> str: + """Return a key to index the resource in the resource index.""" + if "{" in (index_key := resource.canonical): + # strip at the first { to allow for variables, and than + # rpartition at / to allow for variable parts in the path + # For example if the canonical path is `/core/locations{tail:.*}` + # the index key will be `/core` since index is based on the + # url parts split by `/` + index_key = index_key.partition("{")[0].rpartition("/")[0] + return index_key.rstrip("/") or "/" + + def index_resource(self, resource: AbstractResource) -> None: + """Add a resource to the resource index.""" + resource_key = self._get_resource_index_key(resource) + # There may be multiple resources for a canonical path + # so we keep them in a list to ensure that registration + # order is respected. + self._resource_index.setdefault(resource_key, []).append(resource) + + def unindex_resource(self, resource: AbstractResource) -> None: + """Remove a resource from the resource index.""" + resource_key = self._get_resource_index_key(resource) + self._resource_index[resource_key].remove(resource) + + def add_resource(self, path: str, *, name: Optional[str] = None) -> Resource: + if path and not path.startswith("/"): + raise ValueError("path should be started with / or be empty") + # Reuse last added resource if path and name are the same + if self._resources: + resource = self._resources[-1] + if resource.name == name and resource.raw_match(path): + return cast(Resource, resource) + if not ("{" in path or "}" in path or ROUTE_RE.search(path)): + resource = PlainResource(path, name=name) + self.register_resource(resource) + return resource + resource = DynamicResource(path, name=name) + self.register_resource(resource) + return resource + + def add_route( + self, + method: str, + path: str, + handler: Union[Handler, Type[AbstractView]], + *, + name: Optional[str] = None, + expect_handler: Optional[_ExpectHandler] = None, + ) -> AbstractRoute: + resource = self.add_resource(path, name=name) + return resource.add_route(method, handler, expect_handler=expect_handler) + + def add_static( + self, + prefix: str, + path: PathLike, + *, + name: Optional[str] = None, + expect_handler: Optional[_ExpectHandler] = None, + chunk_size: int = 256 * 1024, + show_index: bool = False, + follow_symlinks: bool = False, + append_version: bool = False, + ) -> AbstractResource: + """Add static files view. + + prefix - url prefix + path - folder with files + + """ + assert prefix.startswith("/") + if prefix.endswith("/"): + prefix = prefix[:-1] + resource = StaticResource( + prefix, + path, + name=name, + expect_handler=expect_handler, + chunk_size=chunk_size, + show_index=show_index, + follow_symlinks=follow_symlinks, + append_version=append_version, + ) + self.register_resource(resource) + return resource + + def add_head(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method HEAD.""" + return self.add_route(hdrs.METH_HEAD, path, handler, **kwargs) + + def add_options(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method OPTIONS.""" + return self.add_route(hdrs.METH_OPTIONS, path, handler, **kwargs) + + def add_get( + self, + path: str, + handler: Handler, + *, + name: Optional[str] = None, + allow_head: bool = True, + **kwargs: Any, + ) -> AbstractRoute: + """Shortcut for add_route with method GET. + + If allow_head is true, another + route is added allowing head requests to the same endpoint. + """ + resource = self.add_resource(path, name=name) + if allow_head: + resource.add_route(hdrs.METH_HEAD, handler, **kwargs) + return resource.add_route(hdrs.METH_GET, handler, **kwargs) + + def add_post(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method POST.""" + return self.add_route(hdrs.METH_POST, path, handler, **kwargs) + + def add_put(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method PUT.""" + return self.add_route(hdrs.METH_PUT, path, handler, **kwargs) + + def add_patch(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method PATCH.""" + return self.add_route(hdrs.METH_PATCH, path, handler, **kwargs) + + def add_delete(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute: + """Shortcut for add_route with method DELETE.""" + return self.add_route(hdrs.METH_DELETE, path, handler, **kwargs) + + def add_view( + self, path: str, handler: Type[AbstractView], **kwargs: Any + ) -> AbstractRoute: + """Shortcut for add_route with ANY methods for a class-based view.""" + return self.add_route(hdrs.METH_ANY, path, handler, **kwargs) + + def freeze(self) -> None: + super().freeze() + for resource in self._resources: + resource.freeze() + + def add_routes(self, routes: Iterable[AbstractRouteDef]) -> List[AbstractRoute]: + """Append routes to route table. + + Parameter should be a sequence of RouteDef objects. + + Returns a list of registered AbstractRoute instances. + """ + registered_routes = [] + for route_def in routes: + registered_routes.extend(route_def.register(self)) + return registered_routes + + +def _quote_path(value: str) -> str: + if YARL_VERSION < (1, 6): + value = value.replace("%", "%25") + return URL.build(path=value, encoded=False).raw_path + + +def _unquote_path_safe(value: str) -> str: + if "%" not in value: + return value + return value.replace("%2F", "/").replace("%25", "%") + + +def _requote_path(value: str) -> str: + # Quote non-ascii characters and other characters which must be quoted, + # but preserve existing %-sequences. + result = _quote_path(value) + if "%" in value: + result = result.replace("%25", "%") + return result diff --git a/py311/lib/python3.11/site-packages/aiohttp/web_ws.py b/py311/lib/python3.11/site-packages/aiohttp/web_ws.py new file mode 100644 index 0000000000000000000000000000000000000000..575f9a3dc8507d1e6b766333c9daec389313febd --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/web_ws.py @@ -0,0 +1,631 @@ +import asyncio +import base64 +import binascii +import hashlib +import json +import sys +from typing import Any, Final, Iterable, Optional, Tuple, Union, cast + +import attr +from multidict import CIMultiDict + +from . import hdrs +from ._websocket.reader import WebSocketDataQueue +from ._websocket.writer import DEFAULT_LIMIT +from .abc import AbstractStreamWriter +from .client_exceptions import WSMessageTypeError +from .helpers import calculate_timeout_when, set_exception, set_result +from .http import ( + WS_CLOSED_MESSAGE, + WS_CLOSING_MESSAGE, + WS_KEY, + WebSocketError, + WebSocketReader, + WebSocketWriter, + WSCloseCode, + WSMessage, + WSMsgType as WSMsgType, + ws_ext_gen, + ws_ext_parse, +) +from .http_websocket import _INTERNAL_RECEIVE_TYPES +from .log import ws_logger +from .streams import EofStream +from .typedefs import JSONDecoder, JSONEncoder +from .web_exceptions import HTTPBadRequest, HTTPException +from .web_request import BaseRequest +from .web_response import StreamResponse + +if sys.version_info >= (3, 11): + import asyncio as async_timeout +else: + import async_timeout + +__all__ = ( + "WebSocketResponse", + "WebSocketReady", + "WSMsgType", +) + +THRESHOLD_CONNLOST_ACCESS: Final[int] = 5 + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class WebSocketReady: + ok: bool + protocol: Optional[str] + + def __bool__(self) -> bool: + return self.ok + + +class WebSocketResponse(StreamResponse): + + _length_check: bool = False + _ws_protocol: Optional[str] = None + _writer: Optional[WebSocketWriter] = None + _reader: Optional[WebSocketDataQueue] = None + _closed: bool = False + _closing: bool = False + _conn_lost: int = 0 + _close_code: Optional[int] = None + _loop: Optional[asyncio.AbstractEventLoop] = None + _waiting: bool = False + _close_wait: Optional[asyncio.Future[None]] = None + _exception: Optional[BaseException] = None + _heartbeat_when: float = 0.0 + _heartbeat_cb: Optional[asyncio.TimerHandle] = None + _pong_response_cb: Optional[asyncio.TimerHandle] = None + _ping_task: Optional[asyncio.Task[None]] = None + + def __init__( + self, + *, + timeout: float = 10.0, + receive_timeout: Optional[float] = None, + autoclose: bool = True, + autoping: bool = True, + heartbeat: Optional[float] = None, + protocols: Iterable[str] = (), + compress: bool = True, + max_msg_size: int = 4 * 1024 * 1024, + writer_limit: int = DEFAULT_LIMIT, + ) -> None: + super().__init__(status=101) + self._protocols = protocols + self._timeout = timeout + self._receive_timeout = receive_timeout + self._autoclose = autoclose + self._autoping = autoping + self._heartbeat = heartbeat + if heartbeat is not None: + self._pong_heartbeat = heartbeat / 2.0 + self._compress: Union[bool, int] = compress + self._max_msg_size = max_msg_size + self._writer_limit = writer_limit + + def _cancel_heartbeat(self) -> None: + self._cancel_pong_response_cb() + if self._heartbeat_cb is not None: + self._heartbeat_cb.cancel() + self._heartbeat_cb = None + if self._ping_task is not None: + self._ping_task.cancel() + self._ping_task = None + + def _cancel_pong_response_cb(self) -> None: + if self._pong_response_cb is not None: + self._pong_response_cb.cancel() + self._pong_response_cb = None + + def _reset_heartbeat(self) -> None: + if self._heartbeat is None: + return + self._cancel_pong_response_cb() + req = self._req + timeout_ceil_threshold = ( + req._protocol._timeout_ceil_threshold if req is not None else 5 + ) + loop = self._loop + assert loop is not None + now = loop.time() + when = calculate_timeout_when(now, self._heartbeat, timeout_ceil_threshold) + self._heartbeat_when = when + if self._heartbeat_cb is None: + # We do not cancel the previous heartbeat_cb here because + # it generates a significant amount of TimerHandle churn + # which causes asyncio to rebuild the heap frequently. + # Instead _send_heartbeat() will reschedule the next + # heartbeat if it fires too early. + self._heartbeat_cb = loop.call_at(when, self._send_heartbeat) + + def _send_heartbeat(self) -> None: + self._heartbeat_cb = None + loop = self._loop + assert loop is not None and self._writer is not None + now = loop.time() + if now < self._heartbeat_when: + # Heartbeat fired too early, reschedule + self._heartbeat_cb = loop.call_at( + self._heartbeat_when, self._send_heartbeat + ) + return + + req = self._req + timeout_ceil_threshold = ( + req._protocol._timeout_ceil_threshold if req is not None else 5 + ) + when = calculate_timeout_when(now, self._pong_heartbeat, timeout_ceil_threshold) + self._cancel_pong_response_cb() + self._pong_response_cb = loop.call_at(when, self._pong_not_received) + + coro = self._writer.send_frame(b"", WSMsgType.PING) + if sys.version_info >= (3, 12): + # Optimization for Python 3.12, try to send the ping + # immediately to avoid having to schedule + # the task on the event loop. + ping_task = asyncio.Task(coro, loop=loop, eager_start=True) + else: + ping_task = loop.create_task(coro) + + if not ping_task.done(): + self._ping_task = ping_task + ping_task.add_done_callback(self._ping_task_done) + else: + self._ping_task_done(ping_task) + + def _ping_task_done(self, task: "asyncio.Task[None]") -> None: + """Callback for when the ping task completes.""" + if not task.cancelled() and (exc := task.exception()): + self._handle_ping_pong_exception(exc) + self._ping_task = None + + def _pong_not_received(self) -> None: + if self._req is not None and self._req.transport is not None: + self._handle_ping_pong_exception( + asyncio.TimeoutError( + f"No PONG received after {self._pong_heartbeat} seconds" + ) + ) + + def _handle_ping_pong_exception(self, exc: BaseException) -> None: + """Handle exceptions raised during ping/pong processing.""" + if self._closed: + return + self._set_closed() + self._set_code_close_transport(WSCloseCode.ABNORMAL_CLOSURE) + self._exception = exc + if self._waiting and not self._closing and self._reader is not None: + self._reader.feed_data(WSMessage(WSMsgType.ERROR, exc, None), 0) + + def _set_closed(self) -> None: + """Set the connection to closed. + + Cancel any heartbeat timers and set the closed flag. + """ + self._closed = True + self._cancel_heartbeat() + + async def prepare(self, request: BaseRequest) -> AbstractStreamWriter: + # make pre-check to don't hide it by do_handshake() exceptions + if self._payload_writer is not None: + return self._payload_writer + + protocol, writer = self._pre_start(request) + payload_writer = await super().prepare(request) + assert payload_writer is not None + self._post_start(request, protocol, writer) + await payload_writer.drain() + return payload_writer + + def _handshake( + self, request: BaseRequest + ) -> Tuple["CIMultiDict[str]", Optional[str], int, bool]: + headers = request.headers + if "websocket" != headers.get(hdrs.UPGRADE, "").lower().strip(): + raise HTTPBadRequest( + text=( + "No WebSocket UPGRADE hdr: {}\n Can " + '"Upgrade" only to "WebSocket".' + ).format(headers.get(hdrs.UPGRADE)) + ) + + if "upgrade" not in headers.get(hdrs.CONNECTION, "").lower(): + raise HTTPBadRequest( + text="No CONNECTION upgrade hdr: {}".format( + headers.get(hdrs.CONNECTION) + ) + ) + + # find common sub-protocol between client and server + protocol: Optional[str] = None + if hdrs.SEC_WEBSOCKET_PROTOCOL in headers: + req_protocols = [ + str(proto.strip()) + for proto in headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",") + ] + + for proto in req_protocols: + if proto in self._protocols: + protocol = proto + break + else: + # No overlap found: Return no protocol as per spec + ws_logger.warning( + "%s: Client protocols %r don’t overlap server-known ones %r", + request.remote, + req_protocols, + self._protocols, + ) + + # check supported version + version = headers.get(hdrs.SEC_WEBSOCKET_VERSION, "") + if version not in ("13", "8", "7"): + raise HTTPBadRequest(text=f"Unsupported version: {version}") + + # check client handshake for validity + key = headers.get(hdrs.SEC_WEBSOCKET_KEY) + try: + if not key or len(base64.b64decode(key)) != 16: + raise HTTPBadRequest(text=f"Handshake error: {key!r}") + except binascii.Error: + raise HTTPBadRequest(text=f"Handshake error: {key!r}") from None + + accept_val = base64.b64encode( + hashlib.sha1(key.encode() + WS_KEY).digest() + ).decode() + response_headers = CIMultiDict( + { + hdrs.UPGRADE: "websocket", + hdrs.CONNECTION: "upgrade", + hdrs.SEC_WEBSOCKET_ACCEPT: accept_val, + } + ) + + notakeover = False + compress = 0 + if self._compress: + extensions = headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS) + # Server side always get return with no exception. + # If something happened, just drop compress extension + compress, notakeover = ws_ext_parse(extensions, isserver=True) + if compress: + enabledext = ws_ext_gen( + compress=compress, isserver=True, server_notakeover=notakeover + ) + response_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = enabledext + + if protocol: + response_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = protocol + return ( + response_headers, + protocol, + compress, + notakeover, + ) + + def _pre_start(self, request: BaseRequest) -> Tuple[Optional[str], WebSocketWriter]: + self._loop = request._loop + + headers, protocol, compress, notakeover = self._handshake(request) + + self.set_status(101) + self.headers.update(headers) + self.force_close() + self._compress = compress + transport = request._protocol.transport + assert transport is not None + writer = WebSocketWriter( + request._protocol, + transport, + compress=compress, + notakeover=notakeover, + limit=self._writer_limit, + ) + + return protocol, writer + + def _post_start( + self, request: BaseRequest, protocol: Optional[str], writer: WebSocketWriter + ) -> None: + self._ws_protocol = protocol + self._writer = writer + + self._reset_heartbeat() + + loop = self._loop + assert loop is not None + self._reader = WebSocketDataQueue(request._protocol, 2**16, loop=loop) + request.protocol.set_parser( + WebSocketReader( + self._reader, self._max_msg_size, compress=bool(self._compress) + ) + ) + # disable HTTP keepalive for WebSocket + request.protocol.keep_alive(False) + + def can_prepare(self, request: BaseRequest) -> WebSocketReady: + if self._writer is not None: + raise RuntimeError("Already started") + try: + _, protocol, _, _ = self._handshake(request) + except HTTPException: + return WebSocketReady(False, None) + else: + return WebSocketReady(True, protocol) + + @property + def prepared(self) -> bool: + return self._writer is not None + + @property + def closed(self) -> bool: + return self._closed + + @property + def close_code(self) -> Optional[int]: + return self._close_code + + @property + def ws_protocol(self) -> Optional[str]: + return self._ws_protocol + + @property + def compress(self) -> Union[int, bool]: + return self._compress + + def get_extra_info(self, name: str, default: Any = None) -> Any: + """Get optional transport information. + + If no value associated with ``name`` is found, ``default`` is returned. + """ + writer = self._writer + if writer is None: + return default + transport = writer.transport + if transport is None: + return default + return transport.get_extra_info(name, default) + + def exception(self) -> Optional[BaseException]: + return self._exception + + async def ping(self, message: bytes = b"") -> None: + if self._writer is None: + raise RuntimeError("Call .prepare() first") + await self._writer.send_frame(message, WSMsgType.PING) + + async def pong(self, message: bytes = b"") -> None: + # unsolicited pong + if self._writer is None: + raise RuntimeError("Call .prepare() first") + await self._writer.send_frame(message, WSMsgType.PONG) + + async def send_frame( + self, message: bytes, opcode: WSMsgType, compress: Optional[int] = None + ) -> None: + """Send a frame over the websocket.""" + if self._writer is None: + raise RuntimeError("Call .prepare() first") + await self._writer.send_frame(message, opcode, compress) + + async def send_str(self, data: str, compress: Optional[int] = None) -> None: + if self._writer is None: + raise RuntimeError("Call .prepare() first") + if not isinstance(data, str): + raise TypeError("data argument must be str (%r)" % type(data)) + await self._writer.send_frame( + data.encode("utf-8"), WSMsgType.TEXT, compress=compress + ) + + async def send_bytes(self, data: bytes, compress: Optional[int] = None) -> None: + if self._writer is None: + raise RuntimeError("Call .prepare() first") + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError("data argument must be byte-ish (%r)" % type(data)) + await self._writer.send_frame(data, WSMsgType.BINARY, compress=compress) + + async def send_json( + self, + data: Any, + compress: Optional[int] = None, + *, + dumps: JSONEncoder = json.dumps, + ) -> None: + await self.send_str(dumps(data), compress=compress) + + async def write_eof(self) -> None: # type: ignore[override] + if self._eof_sent: + return + if self._payload_writer is None: + raise RuntimeError("Response has not been started") + + await self.close() + self._eof_sent = True + + async def close( + self, *, code: int = WSCloseCode.OK, message: bytes = b"", drain: bool = True + ) -> bool: + """Close websocket connection.""" + if self._writer is None: + raise RuntimeError("Call .prepare() first") + + if self._closed: + return False + self._set_closed() + + try: + await self._writer.close(code, message) + writer = self._payload_writer + assert writer is not None + if drain: + await writer.drain() + except (asyncio.CancelledError, asyncio.TimeoutError): + self._set_code_close_transport(WSCloseCode.ABNORMAL_CLOSURE) + raise + except Exception as exc: + self._exception = exc + self._set_code_close_transport(WSCloseCode.ABNORMAL_CLOSURE) + return True + + reader = self._reader + assert reader is not None + # we need to break `receive()` cycle before we can call + # `reader.read()` as `close()` may be called from different task + if self._waiting: + assert self._loop is not None + assert self._close_wait is None + self._close_wait = self._loop.create_future() + reader.feed_data(WS_CLOSING_MESSAGE, 0) + await self._close_wait + + if self._closing: + self._close_transport() + return True + + try: + async with async_timeout.timeout(self._timeout): + while True: + msg = await reader.read() + if msg.type is WSMsgType.CLOSE: + self._set_code_close_transport(msg.data) + return True + except asyncio.CancelledError: + self._set_code_close_transport(WSCloseCode.ABNORMAL_CLOSURE) + raise + except Exception as exc: + self._exception = exc + self._set_code_close_transport(WSCloseCode.ABNORMAL_CLOSURE) + return True + + def _set_closing(self, code: WSCloseCode) -> None: + """Set the close code and mark the connection as closing.""" + self._closing = True + self._close_code = code + self._cancel_heartbeat() + + def _set_code_close_transport(self, code: WSCloseCode) -> None: + """Set the close code and close the transport.""" + self._close_code = code + self._close_transport() + + def _close_transport(self) -> None: + """Close the transport.""" + if self._req is not None and self._req.transport is not None: + self._req.transport.close() + + async def receive(self, timeout: Optional[float] = None) -> WSMessage: + if self._reader is None: + raise RuntimeError("Call .prepare() first") + + receive_timeout = timeout or self._receive_timeout + while True: + if self._waiting: + raise RuntimeError("Concurrent call to receive() is not allowed") + + if self._closed: + self._conn_lost += 1 + if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS: + raise RuntimeError("WebSocket connection is closed.") + return WS_CLOSED_MESSAGE + elif self._closing: + return WS_CLOSING_MESSAGE + + try: + self._waiting = True + try: + if receive_timeout: + # Entering the context manager and creating + # Timeout() object can take almost 50% of the + # run time in this loop so we avoid it if + # there is no read timeout. + async with async_timeout.timeout(receive_timeout): + msg = await self._reader.read() + else: + msg = await self._reader.read() + self._reset_heartbeat() + finally: + self._waiting = False + if self._close_wait: + set_result(self._close_wait, None) + except asyncio.TimeoutError: + raise + except EofStream: + self._close_code = WSCloseCode.OK + await self.close() + return WSMessage(WSMsgType.CLOSED, None, None) + except WebSocketError as exc: + self._close_code = exc.code + await self.close(code=exc.code) + return WSMessage(WSMsgType.ERROR, exc, None) + except Exception as exc: + self._exception = exc + self._set_closing(WSCloseCode.ABNORMAL_CLOSURE) + await self.close() + return WSMessage(WSMsgType.ERROR, exc, None) + + if msg.type not in _INTERNAL_RECEIVE_TYPES: + # If its not a close/closing/ping/pong message + # we can return it immediately + return msg + + if msg.type is WSMsgType.CLOSE: + self._set_closing(msg.data) + # Could be closed while awaiting reader. + if not self._closed and self._autoclose: + # The client is likely going to close the + # connection out from under us so we do not + # want to drain any pending writes as it will + # likely result writing to a broken pipe. + await self.close(drain=False) + elif msg.type is WSMsgType.CLOSING: + self._set_closing(WSCloseCode.OK) + elif msg.type is WSMsgType.PING and self._autoping: + await self.pong(msg.data) + continue + elif msg.type is WSMsgType.PONG and self._autoping: + continue + + return msg + + async def receive_str(self, *, timeout: Optional[float] = None) -> str: + msg = await self.receive(timeout) + if msg.type is not WSMsgType.TEXT: + raise WSMessageTypeError( + f"Received message {msg.type}:{msg.data!r} is not WSMsgType.TEXT" + ) + return cast(str, msg.data) + + async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes: + msg = await self.receive(timeout) + if msg.type is not WSMsgType.BINARY: + raise WSMessageTypeError( + f"Received message {msg.type}:{msg.data!r} is not WSMsgType.BINARY" + ) + return cast(bytes, msg.data) + + async def receive_json( + self, *, loads: JSONDecoder = json.loads, timeout: Optional[float] = None + ) -> Any: + data = await self.receive_str(timeout=timeout) + return loads(data) + + async def write(self, data: bytes) -> None: + raise RuntimeError("Cannot call .write() for websocket") + + def __aiter__(self) -> "WebSocketResponse": + return self + + async def __anext__(self) -> WSMessage: + msg = await self.receive() + if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED): + raise StopAsyncIteration + return msg + + def _cancel(self, exc: BaseException) -> None: + # web_protocol calls this from connection_lost + # or when the server is shutting down. + self._closing = True + self._cancel_heartbeat() + if self._reader is not None: + set_exception(self._reader, exc) diff --git a/py311/lib/python3.11/site-packages/aiohttp/worker.py b/py311/lib/python3.11/site-packages/aiohttp/worker.py new file mode 100644 index 0000000000000000000000000000000000000000..f7281bfde7541412c3174aa5fdcb859fa1b7a996 --- /dev/null +++ b/py311/lib/python3.11/site-packages/aiohttp/worker.py @@ -0,0 +1,255 @@ +"""Async gunicorn worker for aiohttp.web""" + +import asyncio +import inspect +import os +import re +import signal +import sys +from types import FrameType +from typing import TYPE_CHECKING, Any, Optional + +from gunicorn.config import AccessLogFormat as GunicornAccessLogFormat +from gunicorn.workers import base + +from aiohttp import web + +from .helpers import set_result +from .web_app import Application +from .web_log import AccessLogger + +if TYPE_CHECKING: + import ssl + + SSLContext = ssl.SSLContext +else: + try: + import ssl + + SSLContext = ssl.SSLContext + except ImportError: # pragma: no cover + ssl = None # type: ignore[assignment] + SSLContext = object # type: ignore[misc,assignment] + + +__all__ = ("GunicornWebWorker", "GunicornUVLoopWebWorker") + + +class GunicornWebWorker(base.Worker): # type: ignore[misc,no-any-unimported] + + DEFAULT_AIOHTTP_LOG_FORMAT = AccessLogger.LOG_FORMAT + DEFAULT_GUNICORN_LOG_FORMAT = GunicornAccessLogFormat.default + + def __init__(self, *args: Any, **kw: Any) -> None: # pragma: no cover + super().__init__(*args, **kw) + + self._task: Optional[asyncio.Task[None]] = None + self.exit_code = 0 + self._notify_waiter: Optional[asyncio.Future[bool]] = None + + def init_process(self) -> None: + # create new event_loop after fork + asyncio.get_event_loop().close() + + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + super().init_process() + + def run(self) -> None: + self._task = self.loop.create_task(self._run()) + + try: # ignore all finalization problems + self.loop.run_until_complete(self._task) + except Exception: + self.log.exception("Exception in gunicorn worker") + self.loop.run_until_complete(self.loop.shutdown_asyncgens()) + self.loop.close() + + sys.exit(self.exit_code) + + async def _run(self) -> None: + runner = None + if isinstance(self.wsgi, Application): + app = self.wsgi + elif inspect.iscoroutinefunction(self.wsgi) or ( + sys.version_info < (3, 14) and asyncio.iscoroutinefunction(self.wsgi) + ): + wsgi = await self.wsgi() + if isinstance(wsgi, web.AppRunner): + runner = wsgi + app = runner.app + else: + app = wsgi + else: + raise RuntimeError( + "wsgi app should be either Application or " + "async function returning Application, got {}".format(self.wsgi) + ) + + if runner is None: + access_log = self.log.access_log if self.cfg.accesslog else None + runner = web.AppRunner( + app, + logger=self.log, + keepalive_timeout=self.cfg.keepalive, + access_log=access_log, + access_log_format=self._get_valid_log_format( + self.cfg.access_log_format + ), + shutdown_timeout=self.cfg.graceful_timeout / 100 * 95, + ) + await runner.setup() + + ctx = self._create_ssl_context(self.cfg) if self.cfg.is_ssl else None + + runner = runner + assert runner is not None + server = runner.server + assert server is not None + for sock in self.sockets: + site = web.SockSite( + runner, + sock, + ssl_context=ctx, + ) + await site.start() + + # If our parent changed then we shut down. + pid = os.getpid() + try: + while self.alive: # type: ignore[has-type] + self.notify() + + cnt = server.requests_count + if self.max_requests and cnt > self.max_requests: + self.alive = False + self.log.info("Max requests, shutting down: %s", self) + + elif pid == os.getpid() and self.ppid != os.getppid(): + self.alive = False + self.log.info("Parent changed, shutting down: %s", self) + else: + await self._wait_next_notify() + except BaseException: + pass + + await runner.cleanup() + + def _wait_next_notify(self) -> "asyncio.Future[bool]": + self._notify_waiter_done() + + loop = self.loop + assert loop is not None + self._notify_waiter = waiter = loop.create_future() + self.loop.call_later(1.0, self._notify_waiter_done, waiter) + + return waiter + + def _notify_waiter_done( + self, waiter: Optional["asyncio.Future[bool]"] = None + ) -> None: + if waiter is None: + waiter = self._notify_waiter + if waiter is not None: + set_result(waiter, True) + + if waiter is self._notify_waiter: + self._notify_waiter = None + + def init_signals(self) -> None: + # Set up signals through the event loop API. + + self.loop.add_signal_handler( + signal.SIGQUIT, self.handle_quit, signal.SIGQUIT, None + ) + + self.loop.add_signal_handler( + signal.SIGTERM, self.handle_exit, signal.SIGTERM, None + ) + + self.loop.add_signal_handler( + signal.SIGINT, self.handle_quit, signal.SIGINT, None + ) + + self.loop.add_signal_handler( + signal.SIGWINCH, self.handle_winch, signal.SIGWINCH, None + ) + + self.loop.add_signal_handler( + signal.SIGUSR1, self.handle_usr1, signal.SIGUSR1, None + ) + + self.loop.add_signal_handler( + signal.SIGABRT, self.handle_abort, signal.SIGABRT, None + ) + + # Don't let SIGTERM and SIGUSR1 disturb active requests + # by interrupting system calls + signal.siginterrupt(signal.SIGTERM, False) + signal.siginterrupt(signal.SIGUSR1, False) + # Reset signals so Gunicorn doesn't swallow subprocess return codes + # See: https://github.com/aio-libs/aiohttp/issues/6130 + + def handle_quit(self, sig: int, frame: Optional[FrameType]) -> None: + self.alive = False + + # worker_int callback + self.cfg.worker_int(self) + + # wakeup closing process + self._notify_waiter_done() + + def handle_abort(self, sig: int, frame: Optional[FrameType]) -> None: + self.alive = False + self.exit_code = 1 + self.cfg.worker_abort(self) + sys.exit(1) + + @staticmethod + def _create_ssl_context(cfg: Any) -> "SSLContext": + """Creates SSLContext instance for usage in asyncio.create_server. + + See ssl.SSLSocket.__init__ for more details. + """ + if ssl is None: # pragma: no cover + raise RuntimeError("SSL is not supported.") + + ctx = ssl.SSLContext(cfg.ssl_version) + ctx.load_cert_chain(cfg.certfile, cfg.keyfile) + ctx.verify_mode = cfg.cert_reqs + if cfg.ca_certs: + ctx.load_verify_locations(cfg.ca_certs) + if cfg.ciphers: + ctx.set_ciphers(cfg.ciphers) + return ctx + + def _get_valid_log_format(self, source_format: str) -> str: + if source_format == self.DEFAULT_GUNICORN_LOG_FORMAT: + return self.DEFAULT_AIOHTTP_LOG_FORMAT + elif re.search(r"%\([^\)]+\)", source_format): + raise ValueError( + "Gunicorn's style options in form of `%(name)s` are not " + "supported for the log formatting. Please use aiohttp's " + "format specification to configure access log formatting: " + "http://docs.aiohttp.org/en/stable/logging.html" + "#format-specification" + ) + else: + return source_format + + +class GunicornUVLoopWebWorker(GunicornWebWorker): + def init_process(self) -> None: + import uvloop + + # Close any existing event loop before setting a + # new policy. + asyncio.get_event_loop().close() + + # Setup uvloop policy, so that every + # asyncio.get_event_loop() will create an instance + # of uvloop event loop. + asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) + + super().init_process() diff --git a/py311/lib/python3.11/site-packages/backoff/__init__.py b/py311/lib/python3.11/site-packages/backoff/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..196274d72035ae9da20a236c51affe405240d5cf --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff/__init__.py @@ -0,0 +1,30 @@ +# coding:utf-8 +""" +Function decoration for backoff and retry + +This module provides function decorators which can be used to wrap a +function such that it will be retried until some condition is met. It +is meant to be of use when accessing unreliable resources with the +potential for intermittent failures i.e. network resources and external +APIs. Somewhat more generally, it may also be of use for dynamically +polling resources for externally generated content. + +For examples and full documentation see the README at +https://github.com/litl/backoff +""" +from backoff._decorator import on_exception, on_predicate +from backoff._jitter import full_jitter, random_jitter +from backoff._wait_gen import constant, expo, fibo, runtime + +__all__ = [ + 'on_predicate', + 'on_exception', + 'constant', + 'expo', + 'fibo', + 'runtime', + 'full_jitter', + 'random_jitter', +] + +__version__ = "2.2.1" diff --git a/py311/lib/python3.11/site-packages/backoff/_async.py b/py311/lib/python3.11/site-packages/backoff/_async.py new file mode 100644 index 0000000000000000000000000000000000000000..82fd4773581587b7cea95a28fbcdda8d423f0d16 --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff/_async.py @@ -0,0 +1,188 @@ +# coding:utf-8 +import datetime +import functools +import asyncio +from datetime import timedelta + +from backoff._common import (_init_wait_gen, _maybe_call, _next_wait) + + +def _ensure_coroutine(coro_or_func): + if asyncio.iscoroutinefunction(coro_or_func): + return coro_or_func + else: + @functools.wraps(coro_or_func) + async def f(*args, **kwargs): + return coro_or_func(*args, **kwargs) + return f + + +def _ensure_coroutines(coros_or_funcs): + return [_ensure_coroutine(f) for f in coros_or_funcs] + + +async def _call_handlers(handlers, + *, + target, args, kwargs, tries, elapsed, + **extra): + details = { + 'target': target, + 'args': args, + 'kwargs': kwargs, + 'tries': tries, + 'elapsed': elapsed, + } + details.update(extra) + for handler in handlers: + await handler(details) + + +def retry_predicate(target, wait_gen, predicate, + *, + max_tries, max_time, jitter, + on_success, on_backoff, on_giveup, + wait_gen_kwargs): + on_success = _ensure_coroutines(on_success) + on_backoff = _ensure_coroutines(on_backoff) + on_giveup = _ensure_coroutines(on_giveup) + + # Easy to implement, please report if you need this. + assert not asyncio.iscoroutinefunction(max_tries) + assert not asyncio.iscoroutinefunction(jitter) + + assert asyncio.iscoroutinefunction(target) + + @functools.wraps(target) + async def retry(*args, **kwargs): + + # update variables from outer function args + max_tries_value = _maybe_call(max_tries) + max_time_value = _maybe_call(max_time) + + tries = 0 + start = datetime.datetime.now() + wait = _init_wait_gen(wait_gen, wait_gen_kwargs) + while True: + tries += 1 + elapsed = timedelta.total_seconds(datetime.datetime.now() - start) + details = { + "target": target, + "args": args, + "kwargs": kwargs, + "tries": tries, + "elapsed": elapsed, + } + + ret = await target(*args, **kwargs) + if predicate(ret): + max_tries_exceeded = (tries == max_tries_value) + max_time_exceeded = (max_time_value is not None and + elapsed >= max_time_value) + + if max_tries_exceeded or max_time_exceeded: + await _call_handlers(on_giveup, **details, value=ret) + break + + try: + seconds = _next_wait(wait, ret, jitter, elapsed, + max_time_value) + except StopIteration: + await _call_handlers(on_giveup, **details, value=ret) + break + + await _call_handlers(on_backoff, **details, value=ret, + wait=seconds) + + # Note: there is no convenient way to pass explicit event + # loop to decorator, so here we assume that either default + # thread event loop is set and correct (it mostly is + # by default), or Python >= 3.5.3 or Python >= 3.6 is used + # where loop.get_event_loop() in coroutine guaranteed to + # return correct value. + # See for details: + # + # + await asyncio.sleep(seconds) + continue + else: + await _call_handlers(on_success, **details, value=ret) + break + + return ret + + return retry + + +def retry_exception(target, wait_gen, exception, + *, + max_tries, max_time, jitter, giveup, + on_success, on_backoff, on_giveup, raise_on_giveup, + wait_gen_kwargs): + on_success = _ensure_coroutines(on_success) + on_backoff = _ensure_coroutines(on_backoff) + on_giveup = _ensure_coroutines(on_giveup) + giveup = _ensure_coroutine(giveup) + + # Easy to implement, please report if you need this. + assert not asyncio.iscoroutinefunction(max_tries) + assert not asyncio.iscoroutinefunction(jitter) + + @functools.wraps(target) + async def retry(*args, **kwargs): + + max_tries_value = _maybe_call(max_tries) + max_time_value = _maybe_call(max_time) + + tries = 0 + start = datetime.datetime.now() + wait = _init_wait_gen(wait_gen, wait_gen_kwargs) + while True: + tries += 1 + elapsed = timedelta.total_seconds(datetime.datetime.now() - start) + details = { + "target": target, + "args": args, + "kwargs": kwargs, + "tries": tries, + "elapsed": elapsed, + } + + try: + ret = await target(*args, **kwargs) + except exception as e: + giveup_result = await giveup(e) + max_tries_exceeded = (tries == max_tries_value) + max_time_exceeded = (max_time_value is not None and + elapsed >= max_time_value) + + if giveup_result or max_tries_exceeded or max_time_exceeded: + await _call_handlers(on_giveup, **details, exception=e) + if raise_on_giveup: + raise + return None + + try: + seconds = _next_wait(wait, e, jitter, elapsed, + max_time_value) + except StopIteration: + await _call_handlers(on_giveup, **details, exception=e) + raise e + + await _call_handlers(on_backoff, **details, wait=seconds, + exception=e) + + # Note: there is no convenient way to pass explicit event + # loop to decorator, so here we assume that either default + # thread event loop is set and correct (it mostly is + # by default), or Python >= 3.5.3 or Python >= 3.6 is used + # where loop.get_event_loop() in coroutine guaranteed to + # return correct value. + # See for details: + # + # + await asyncio.sleep(seconds) + else: + await _call_handlers(on_success, **details) + + return ret + return retry diff --git a/py311/lib/python3.11/site-packages/backoff/_common.py b/py311/lib/python3.11/site-packages/backoff/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..2b2e54efa323ccd81102667642775b4c56d06128 --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff/_common.py @@ -0,0 +1,120 @@ +# coding:utf-8 + +import functools +import logging +import sys +import traceback +import warnings + + +# Use module-specific logger with a default null handler. +_logger = logging.getLogger('backoff') +_logger.addHandler(logging.NullHandler()) # pragma: no cover +_logger.setLevel(logging.INFO) + + +# Evaluate arg that can be either a fixed value or a callable. +def _maybe_call(f, *args, **kwargs): + if callable(f): + try: + return f(*args, **kwargs) + except TypeError: + return f + else: + return f + + +def _init_wait_gen(wait_gen, wait_gen_kwargs): + kwargs = {k: _maybe_call(v) for k, v in wait_gen_kwargs.items()} + initialized = wait_gen(**kwargs) + initialized.send(None) # Initialize with an empty send + return initialized + + +def _next_wait(wait, send_value, jitter, elapsed, max_time): + value = wait.send(send_value) + try: + if jitter is not None: + seconds = jitter(value) + else: + seconds = value + except TypeError: + warnings.warn( + "Nullary jitter function signature is deprecated. Use " + "unary signature accepting a wait value in seconds and " + "returning a jittered version of it.", + DeprecationWarning, + stacklevel=2, + ) + + seconds = value + jitter() + + # don't sleep longer than remaining allotted max_time + if max_time is not None: + seconds = min(seconds, max_time - elapsed) + + return seconds + + +def _prepare_logger(logger): + if isinstance(logger, str): + logger = logging.getLogger(logger) + return logger + + +# Configure handler list with user specified handler and optionally +# with a default handler bound to the specified logger. +def _config_handlers( + user_handlers, *, default_handler=None, logger=None, log_level=None +): + handlers = [] + if logger is not None: + assert log_level is not None, "Log level is not specified" + # bind the specified logger to the default log handler + log_handler = functools.partial( + default_handler, logger=logger, log_level=log_level + ) + handlers.append(log_handler) + + if user_handlers is None: + return handlers + + # user specified handlers can either be an iterable of handlers + # or a single handler. either way append them to the list. + if hasattr(user_handlers, '__iter__'): + # add all handlers in the iterable + handlers += list(user_handlers) + else: + # append a single handler + handlers.append(user_handlers) + + return handlers + + +# Default backoff handler +def _log_backoff(details, logger, log_level): + msg = "Backing off %s(...) for %.1fs (%s)" + log_args = [details['target'].__name__, details['wait']] + + exc_typ, exc, _ = sys.exc_info() + if exc is not None: + exc_fmt = traceback.format_exception_only(exc_typ, exc)[-1] + log_args.append(exc_fmt.rstrip("\n")) + else: + log_args.append(details['value']) + logger.log(log_level, msg, *log_args) + + +# Default giveup handler +def _log_giveup(details, logger, log_level): + msg = "Giving up %s(...) after %d tries (%s)" + log_args = [details['target'].__name__, details['tries']] + + exc_typ, exc, _ = sys.exc_info() + if exc is not None: + exc_fmt = traceback.format_exception_only(exc_typ, exc)[-1] + log_args.append(exc_fmt.rstrip("\n")) + else: + log_args.append(details['value']) + + logger.log(log_level, msg, *log_args) diff --git a/py311/lib/python3.11/site-packages/backoff/_decorator.py b/py311/lib/python3.11/site-packages/backoff/_decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..92dee1bb76178d0beaa2ae841d5d0325e3ac27d3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff/_decorator.py @@ -0,0 +1,222 @@ +# coding:utf-8 +import asyncio +import logging +import operator +from typing import Any, Callable, Iterable, Optional, Type, Union + +from backoff._common import ( + _prepare_logger, + _config_handlers, + _log_backoff, + _log_giveup +) +from backoff._jitter import full_jitter +from backoff import _async, _sync +from backoff._typing import ( + _CallableT, + _Handler, + _Jitterer, + _MaybeCallable, + _MaybeLogger, + _MaybeSequence, + _Predicate, + _WaitGenerator, +) + + +def on_predicate(wait_gen: _WaitGenerator, + predicate: _Predicate[Any] = operator.not_, + *, + max_tries: Optional[_MaybeCallable[int]] = None, + max_time: Optional[_MaybeCallable[float]] = None, + jitter: Union[_Jitterer, None] = full_jitter, + on_success: Union[_Handler, Iterable[_Handler], None] = None, + on_backoff: Union[_Handler, Iterable[_Handler], None] = None, + on_giveup: Union[_Handler, Iterable[_Handler], None] = None, + logger: _MaybeLogger = 'backoff', + backoff_log_level: int = logging.INFO, + giveup_log_level: int = logging.ERROR, + **wait_gen_kwargs: Any) -> Callable[[_CallableT], _CallableT]: + """Returns decorator for backoff and retry triggered by predicate. + + Args: + wait_gen: A generator yielding successive wait times in + seconds. + predicate: A function which when called on the return value of + the target function will trigger backoff when considered + truthily. If not specified, the default behavior is to + backoff on falsey return values. + max_tries: The maximum number of attempts to make before giving + up. In the case of failure, the result of the last attempt + will be returned. The default value of None means there + is no limit to the number of tries. If a callable is passed, + it will be evaluated at runtime and its return value used. + max_time: The maximum total amount of time to try for before + giving up. If this time expires, the result of the last + attempt will be returned. If a callable is passed, it will + be evaluated at runtime and its return value used. + jitter: A function of the value yielded by wait_gen returning + the actual time to wait. This distributes wait times + stochastically in order to avoid timing collisions across + concurrent clients. Wait times are jittered by default + using the full_jitter function. Jittering may be disabled + altogether by passing jitter=None. + on_success: Callable (or iterable of callables) with a unary + signature to be called in the event of success. The + parameter is a dict containing details about the invocation. + on_backoff: Callable (or iterable of callables) with a unary + signature to be called in the event of a backoff. The + parameter is a dict containing details about the invocation. + on_giveup: Callable (or iterable of callables) with a unary + signature to be called in the event that max_tries + is exceeded. The parameter is a dict containing details + about the invocation. + logger: Name of logger or Logger object to log to. Defaults to + 'backoff'. + backoff_log_level: log level for the backoff event. Defaults to "INFO" + giveup_log_level: log level for the give up event. Defaults to "ERROR" + **wait_gen_kwargs: Any additional keyword args specified will be + passed to wait_gen when it is initialized. Any callable + args will first be evaluated and their return values passed. + This is useful for runtime configuration. + """ + def decorate(target): + nonlocal logger, on_success, on_backoff, on_giveup + + logger = _prepare_logger(logger) + on_success = _config_handlers(on_success) + on_backoff = _config_handlers( + on_backoff, + default_handler=_log_backoff, + logger=logger, + log_level=backoff_log_level + ) + on_giveup = _config_handlers( + on_giveup, + default_handler=_log_giveup, + logger=logger, + log_level=giveup_log_level + ) + + if asyncio.iscoroutinefunction(target): + retry = _async.retry_predicate + else: + retry = _sync.retry_predicate + + return retry( + target, + wait_gen, + predicate, + max_tries=max_tries, + max_time=max_time, + jitter=jitter, + on_success=on_success, + on_backoff=on_backoff, + on_giveup=on_giveup, + wait_gen_kwargs=wait_gen_kwargs + ) + + # Return a function which decorates a target with a retry loop. + return decorate + + +def on_exception(wait_gen: _WaitGenerator, + exception: _MaybeSequence[Type[Exception]], + *, + max_tries: Optional[_MaybeCallable[int]] = None, + max_time: Optional[_MaybeCallable[float]] = None, + jitter: Union[_Jitterer, None] = full_jitter, + giveup: _Predicate[Exception] = lambda e: False, + on_success: Union[_Handler, Iterable[_Handler], None] = None, + on_backoff: Union[_Handler, Iterable[_Handler], None] = None, + on_giveup: Union[_Handler, Iterable[_Handler], None] = None, + raise_on_giveup: bool = True, + logger: _MaybeLogger = 'backoff', + backoff_log_level: int = logging.INFO, + giveup_log_level: int = logging.ERROR, + **wait_gen_kwargs: Any) -> Callable[[_CallableT], _CallableT]: + """Returns decorator for backoff and retry triggered by exception. + + Args: + wait_gen: A generator yielding successive wait times in + seconds. + exception: An exception type (or tuple of types) which triggers + backoff. + max_tries: The maximum number of attempts to make before giving + up. Once exhausted, the exception will be allowed to escape. + The default value of None means there is no limit to the + number of tries. If a callable is passed, it will be + evaluated at runtime and its return value used. + max_time: The maximum total amount of time to try for before + giving up. Once expired, the exception will be allowed to + escape. If a callable is passed, it will be + evaluated at runtime and its return value used. + jitter: A function of the value yielded by wait_gen returning + the actual time to wait. This distributes wait times + stochastically in order to avoid timing collisions across + concurrent clients. Wait times are jittered by default + using the full_jitter function. Jittering may be disabled + altogether by passing jitter=None. + giveup: Function accepting an exception instance and + returning whether or not to give up. Optional. The default + is to always continue. + on_success: Callable (or iterable of callables) with a unary + signature to be called in the event of success. The + parameter is a dict containing details about the invocation. + on_backoff: Callable (or iterable of callables) with a unary + signature to be called in the event of a backoff. The + parameter is a dict containing details about the invocation. + on_giveup: Callable (or iterable of callables) with a unary + signature to be called in the event that max_tries + is exceeded. The parameter is a dict containing details + about the invocation. + raise_on_giveup: Boolean indicating whether the registered exceptions + should be raised on giveup. Defaults to `True` + logger: Name or Logger object to log to. Defaults to 'backoff'. + backoff_log_level: log level for the backoff event. Defaults to "INFO" + giveup_log_level: log level for the give up event. Defaults to "ERROR" + **wait_gen_kwargs: Any additional keyword args specified will be + passed to wait_gen when it is initialized. Any callable + args will first be evaluated and their return values passed. + This is useful for runtime configuration. + """ + def decorate(target): + nonlocal logger, on_success, on_backoff, on_giveup + + logger = _prepare_logger(logger) + on_success = _config_handlers(on_success) + on_backoff = _config_handlers( + on_backoff, + default_handler=_log_backoff, + logger=logger, + log_level=backoff_log_level, + ) + on_giveup = _config_handlers( + on_giveup, + default_handler=_log_giveup, + logger=logger, + log_level=giveup_log_level, + ) + + if asyncio.iscoroutinefunction(target): + retry = _async.retry_exception + else: + retry = _sync.retry_exception + + return retry( + target, + wait_gen, + exception, + max_tries=max_tries, + max_time=max_time, + jitter=jitter, + giveup=giveup, + on_success=on_success, + on_backoff=on_backoff, + on_giveup=on_giveup, + raise_on_giveup=raise_on_giveup, + wait_gen_kwargs=wait_gen_kwargs + ) + + # Return a function which decorates a target with a retry loop. + return decorate diff --git a/py311/lib/python3.11/site-packages/backoff/_jitter.py b/py311/lib/python3.11/site-packages/backoff/_jitter.py new file mode 100644 index 0000000000000000000000000000000000000000..be7e38925ea857216c874dbbdd6aa1daa8b503f0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff/_jitter.py @@ -0,0 +1,28 @@ +# coding:utf-8 + +import random + + +def random_jitter(value: float) -> float: + """Jitter the value a random number of milliseconds. + + This adds up to 1 second of additional time to the original value. + Prior to backoff version 1.2 this was the default jitter behavior. + + Args: + value: The unadulterated backoff value. + """ + return value + random.random() + + +def full_jitter(value: float) -> float: + """Jitter the value across the full range (0 to value). + + This corresponds to the "Full Jitter" algorithm specified in the + AWS blog's post on the performance of various jitter algorithms. + (http://www.awsarchitectureblog.com/2015/03/backoff.html) + + Args: + value: The unadulterated backoff value. + """ + return random.uniform(0, value) diff --git a/py311/lib/python3.11/site-packages/backoff/_typing.py b/py311/lib/python3.11/site-packages/backoff/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..20446d4f58df9cec3edb3ace8d94dd11942fcef4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff/_typing.py @@ -0,0 +1,44 @@ +# coding:utf-8 +import logging +import sys +from typing import (Any, Callable, Coroutine, Dict, Generator, Sequence, Tuple, + TypeVar, Union) + +if sys.version_info >= (3, 8): # pragma: no cover + from typing import TypedDict +else: # pragma: no cover + # use typing_extensions if installed but don't require it + try: + from typing_extensions import TypedDict + except ImportError: + class TypedDict(dict): + def __init_subclass__(cls, **kwargs: Any) -> None: + return super().__init_subclass__() + + +class _Details(TypedDict): + target: Callable[..., Any] + args: Tuple[Any, ...] + kwargs: Dict[str, Any] + tries: int + elapsed: float + + +class Details(_Details, total=False): + wait: float # present in the on_backoff handler case for either decorator + value: Any # present in the on_predicate decorator case + + +T = TypeVar("T") + +_CallableT = TypeVar('_CallableT', bound=Callable[..., Any]) +_Handler = Union[ + Callable[[Details], None], + Callable[[Details], Coroutine[Any, Any, None]], +] +_Jitterer = Callable[[float], float] +_MaybeCallable = Union[T, Callable[[], T]] +_MaybeLogger = Union[str, logging.Logger, None] +_MaybeSequence = Union[T, Sequence[T]] +_Predicate = Callable[[T], bool] +_WaitGenerator = Callable[..., Generator[float, None, None]] diff --git a/py311/lib/python3.11/site-packages/backoff/_wait_gen.py b/py311/lib/python3.11/site-packages/backoff/_wait_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..cc9c8857a38c2ad1eb9bf13aa56a65a491134257 --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff/_wait_gen.py @@ -0,0 +1,89 @@ +# coding:utf-8 + +import itertools +from typing import Any, Callable, Generator, Iterable, Optional, Union + + +def expo( + base: float = 2, + factor: float = 1, + max_value: Optional[float] = None +) -> Generator[float, Any, None]: + + """Generator for exponential decay. + + Args: + base: The mathematical base of the exponentiation operation + factor: Factor to multiply the exponentiation by. + max_value: The maximum value to yield. Once the value in the + true exponential sequence exceeds this, the value + of max_value will forever after be yielded. + """ + # Advance past initial .send() call + yield # type: ignore[misc] + n = 0 + while True: + a = factor * base ** n + if max_value is None or a < max_value: + yield a + n += 1 + else: + yield max_value + + +def fibo(max_value: Optional[int] = None) -> Generator[int, None, None]: + """Generator for fibonaccial decay. + + Args: + max_value: The maximum value to yield. Once the value in the + true fibonacci sequence exceeds this, the value + of max_value will forever after be yielded. + """ + # Advance past initial .send() call + yield # type: ignore[misc] + + a = 1 + b = 1 + while True: + if max_value is None or a < max_value: + yield a + a, b = b, a + b + else: + yield max_value + + +def constant( + interval: Union[int, Iterable[float]] = 1 +) -> Generator[float, None, None]: + """Generator for constant intervals. + + Args: + interval: A constant value to yield or an iterable of such values. + """ + # Advance past initial .send() call + yield # type: ignore[misc] + + try: + itr = iter(interval) # type: ignore + except TypeError: + itr = itertools.repeat(interval) # type: ignore + + for val in itr: + yield val + + +def runtime( + *, + value: Callable[[Any], float] +) -> Generator[float, None, None]: + """Generator that is based on parsing the return value or thrown + exception of the decorated method + + Args: + value: a callable which takes as input the decorated + function's return value or thrown exception and + determines how long to wait + """ + ret_or_exc = yield # type: ignore[misc] + while True: + ret_or_exc = yield value(ret_or_exc) diff --git a/py311/lib/python3.11/site-packages/backoff/py.typed b/py311/lib/python3.11/site-packages/backoff/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/backoff/types.py b/py311/lib/python3.11/site-packages/backoff/types.py new file mode 100644 index 0000000000000000000000000000000000000000..25f20a4c43f79a62278b00081c5d7da5dfc12e3e --- /dev/null +++ b/py311/lib/python3.11/site-packages/backoff/types.py @@ -0,0 +1,6 @@ +# coding:utf-8 +from ._typing import Details + +__all__ = [ + 'Details' +] diff --git a/py311/lib/python3.11/site-packages/boto3/__init__.py b/py311/lib/python3.11/site-packages/boto3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..795ac29bfdda98de62dec05f3c67df2defecc56a --- /dev/null +++ b/py311/lib/python3.11/site-packages/boto3/__init__.py @@ -0,0 +1,107 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# https://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import logging +from logging import NullHandler + +from boto3.compat import _warn_deprecated_python +from boto3.session import Session + +__author__ = 'Amazon Web Services' +__version__ = '1.42.27' + + +# The default Boto3 session; autoloaded when needed. +DEFAULT_SESSION = None + + +def setup_default_session(**kwargs): + """ + Set up a default session, passing through any parameters to the session + constructor. There is no need to call this unless you wish to pass custom + parameters, because a default session will be created for you. + """ + global DEFAULT_SESSION + DEFAULT_SESSION = Session(**kwargs) + + +def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None): + """ + Add a stream handler for the given name and level to the logging module. + By default, this logs all boto3 messages to ``stdout``. + + >>> import boto3 + >>> boto3.set_stream_logger('boto3.resources', logging.INFO) + + For debugging purposes a good choice is to set the stream logger to ``''`` + which is equivalent to saying "log everything". + + .. WARNING:: + Be aware that when logging anything from ``'botocore'`` the full wire + trace will appear in your logs. If your payloads contain sensitive data + this should not be used in production. + + :type name: string + :param name: Log name + :type level: int + :param level: Logging level, e.g. ``logging.INFO`` + :type format_string: str + :param format_string: Log message format + """ + if format_string is None: + format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s" + + logger = logging.getLogger(name) + logger.setLevel(level) + handler = logging.StreamHandler() + handler.setLevel(level) + formatter = logging.Formatter(format_string) + handler.setFormatter(formatter) + logger.addHandler(handler) + + +def _get_default_session(): + """ + Get the default session, creating one if needed. + + :rtype: :py:class:`~boto3.session.Session` + :return: The default session + """ + if DEFAULT_SESSION is None: + setup_default_session() + _warn_deprecated_python() + + return DEFAULT_SESSION + + +def client(*args, **kwargs): + """ + Create a low-level service client by name using the default session. + + See :py:meth:`boto3.session.Session.client`. + """ + return _get_default_session().client(*args, **kwargs) + + +def resource(*args, **kwargs): + """ + Create a resource service client by name using the default session. + + See :py:meth:`boto3.session.Session.resource`. + """ + return _get_default_session().resource(*args, **kwargs) + + +# Set up do-nothing logging like a library is supposed to. +# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library +logging.getLogger('boto3').addHandler(NullHandler()) diff --git a/py311/lib/python3.11/site-packages/boto3/compat.py b/py311/lib/python3.11/site-packages/boto3/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..44a6431caf68509f564de91c64977cfc4e7016a9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/boto3/compat.py @@ -0,0 +1,97 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# https://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import sys +import os +import errno +import socket +import warnings + +from boto3.exceptions import PythonDeprecationWarning + +from s3transfer.manager import TransferConfig + +# In python3, socket.error is OSError, which is too general +# for what we want (i.e FileNotFoundError is a subclass of OSError). +# In py3 all the socket related errors are in a newly created +# ConnectionError +SOCKET_ERROR = ConnectionError + +_APPEND_MODE_CHAR = 'a' + +import collections.abc as collections_abc + + +TRANSFER_CONFIG_SUPPORTS_CRT = hasattr(TransferConfig, 'UNSET_DEFAULT') + + +if sys.platform.startswith('win'): + def rename_file(current_filename, new_filename): + try: + os.remove(new_filename) + except OSError as e: + if not e.errno == errno.ENOENT: + # We only want to a ignore trying to remove + # a file that does not exist. If it fails + # for any other reason we should be propagating + # that exception. + raise + os.rename(current_filename, new_filename) +else: + rename_file = os.rename + + +def filter_python_deprecation_warnings(): + """ + Invoking this filter acknowledges your runtime will soon be deprecated + at which time you will stop receiving all updates to your client. + """ + warnings.filterwarnings( + 'ignore', + message=".*Boto3 will no longer support Python.*", + category=PythonDeprecationWarning, + module=r".*boto3\.compat" + ) + + +def _warn_deprecated_python(): + """Use this template for future deprecation campaigns as needed.""" + py_39_params = { + 'date': 'April 29, 2026', + 'blog_link': ( + 'https://aws.amazon.com/blogs/developer/' + 'python-support-policy-updates-for-aws-sdks-and-tools/' + ) + } + deprecated_versions = { + # Example template for future deprecations + (3, 9): py_39_params, + } + py_version = sys.version_info[:2] + + if py_version in deprecated_versions: + params = deprecated_versions[py_version] + warning = ( + "Boto3 will no longer support Python {}.{} " + "starting {}. To continue receiving service updates, " + "bug fixes, and security updates please upgrade to Python 3.10 or " + "later. More information can be found here: {}" + ).format(py_version[0], py_version[1], params['date'], params['blog_link']) + warnings.warn(warning, PythonDeprecationWarning) + + +def is_append_mode(fileobj): + return ( + hasattr(fileobj, 'mode') and + isinstance(fileobj.mode, str) and + _APPEND_MODE_CHAR in fileobj.mode + ) diff --git a/py311/lib/python3.11/site-packages/boto3/crt.py b/py311/lib/python3.11/site-packages/boto3/crt.py new file mode 100644 index 0000000000000000000000000000000000000000..fb30401d5bf827ee5e4eeba439f8080f1f70d646 --- /dev/null +++ b/py311/lib/python3.11/site-packages/boto3/crt.py @@ -0,0 +1,217 @@ +# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# https://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +""" +This file contains private functionality for interacting with the AWS +Common Runtime library (awscrt) in boto3. + +All code contained within this file is for internal usage within this +project and is not intended for external consumption. All interfaces +contained within are subject to abrupt breaking changes. +""" + +import logging +import threading + +import botocore.exceptions +from botocore.session import Session +from s3transfer.crt import ( + BotocoreCRTCredentialsWrapper, + BotocoreCRTRequestSerializer, + CRTTransferManager, + acquire_crt_s3_process_lock, + create_s3_crt_client, +) + +from boto3.compat import TRANSFER_CONFIG_SUPPORTS_CRT +from boto3.exceptions import InvalidCrtTransferConfigError +from boto3.s3.constants import CRT_TRANSFER_CLIENT + +logger = logging.getLogger(__name__) + +# Singletons for CRT-backed transfers +CRT_S3_CLIENT = None +BOTOCORE_CRT_SERIALIZER = None + +CLIENT_CREATION_LOCK = threading.Lock() +PROCESS_LOCK_NAME = 'boto3' + + +_ALLOWED_CRT_TRANSFER_CONFIG_OPTIONS = { + 'multipart_threshold', + 'max_concurrency', + 'max_request_concurrency', + 'multipart_chunksize', + 'preferred_transfer_client', +} + + +def _create_crt_client(session, config, region_name, cred_provider): + """Create a CRT S3 Client for file transfer. + + Instantiating many of these may lead to degraded performance or + system resource exhaustion. + """ + create_crt_client_kwargs = { + 'region': region_name, + 'use_ssl': True, + 'crt_credentials_provider': cred_provider, + } + return create_s3_crt_client(**create_crt_client_kwargs) + + +def _create_crt_request_serializer(session, region_name): + return BotocoreCRTRequestSerializer( + session, {'region_name': region_name, 'endpoint_url': None} + ) + + +def _create_crt_s3_client( + session, config, region_name, credentials, lock, **kwargs +): + """Create boto3 wrapper class to manage crt lock reference and S3 client.""" + cred_wrapper = BotocoreCRTCredentialsWrapper(credentials) + cred_provider = cred_wrapper.to_crt_credentials_provider() + return CRTS3Client( + _create_crt_client(session, config, region_name, cred_provider), + lock, + region_name, + cred_wrapper, + ) + + +def _initialize_crt_transfer_primatives(client, config): + lock = acquire_crt_s3_process_lock(PROCESS_LOCK_NAME) + if lock is None: + # If we're unable to acquire the lock, we cannot + # use the CRT in this process and should default to + # the classic s3transfer manager. + return None, None + + session = Session() + region_name = client.meta.region_name + credentials = client._get_credentials() + + serializer = _create_crt_request_serializer(session, region_name) + s3_client = _create_crt_s3_client( + session, config, region_name, credentials, lock + ) + return serializer, s3_client + + +def get_crt_s3_client(client, config): + global CRT_S3_CLIENT + global BOTOCORE_CRT_SERIALIZER + + with CLIENT_CREATION_LOCK: + if CRT_S3_CLIENT is None: + serializer, s3_client = _initialize_crt_transfer_primatives( + client, config + ) + BOTOCORE_CRT_SERIALIZER = serializer + CRT_S3_CLIENT = s3_client + + return CRT_S3_CLIENT + + +class CRTS3Client: + """ + This wrapper keeps track of our underlying CRT client, the lock used to + acquire it and the region we've used to instantiate the client. + + Due to limitations in the existing CRT interfaces, we can only make calls + in a single region and does not support redirects. We track the region to + ensure we don't use the CRT client when a successful request cannot be made. + """ + + def __init__(self, crt_client, process_lock, region, cred_provider): + self.crt_client = crt_client + self.process_lock = process_lock + self.region = region + self.cred_provider = cred_provider + + +def is_crt_compatible_request(client, crt_s3_client): + """ + Boto3 client must use same signing region and credentials + as the CRT_S3_CLIENT singleton. Otherwise fallback to classic. + """ + if crt_s3_client is None: + return False + + boto3_creds = client._get_credentials() + if boto3_creds is None: + return False + + is_same_identity = compare_identity( + boto3_creds.get_frozen_credentials(), crt_s3_client.cred_provider + ) + is_same_region = client.meta.region_name == crt_s3_client.region + return is_same_region and is_same_identity + + +def compare_identity(boto3_creds, crt_s3_creds): + try: + crt_creds = crt_s3_creds() + except botocore.exceptions.NoCredentialsError: + return False + + is_matching_identity = ( + boto3_creds.access_key == crt_creds.access_key_id + and boto3_creds.secret_key == crt_creds.secret_access_key + and boto3_creds.token == crt_creds.session_token + ) + return is_matching_identity + + +def _validate_crt_transfer_config(config): + if config is None: + return + # CRT client can also be configured via `AUTO_RESOLVE_TRANSFER_CLIENT` + # but it predates this validation. We only validate against CRT client + # configured via `CRT_TRANSFER_CLIENT` to preserve compatibility. + if config.preferred_transfer_client != CRT_TRANSFER_CLIENT: + return + invalid_crt_args = [] + for param in config.DEFAULTS.keys(): + val = config.get_deep_attr(param) + if ( + param not in _ALLOWED_CRT_TRANSFER_CONFIG_OPTIONS + and val is not config.UNSET_DEFAULT + ): + invalid_crt_args.append(param) + if len(invalid_crt_args) > 0: + raise InvalidCrtTransferConfigError( + "The following transfer config options are invalid " + "when preferred_transfer_client is set to crt: " + f"{', '.join(invalid_crt_args)}`" + ) + + +def create_crt_transfer_manager(client, config): + """Create a CRTTransferManager for optimized data transfer.""" + crt_s3_client = get_crt_s3_client(client, config) + if is_crt_compatible_request(client, crt_s3_client): + crt_transfer_manager_kwargs = { + 'crt_s3_client': crt_s3_client.crt_client, + 'crt_request_serializer': BOTOCORE_CRT_SERIALIZER, + } + if TRANSFER_CONFIG_SUPPORTS_CRT: + _validate_crt_transfer_config(config) + crt_transfer_manager_kwargs['config'] = config + if not TRANSFER_CONFIG_SUPPORTS_CRT and config: + logger.warning( + 'Using TransferConfig with CRT client requires ' + 's3transfer >= 0.16.0, configured values will be ignored.' + ) + return CRTTransferManager(**crt_transfer_manager_kwargs) + return None diff --git a/py311/lib/python3.11/site-packages/boto3/exceptions.py b/py311/lib/python3.11/site-packages/boto3/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..1dbe37dfe07d8a2aa6b2004e2ee6968c77ce670e --- /dev/null +++ b/py311/lib/python3.11/site-packages/boto3/exceptions.py @@ -0,0 +1,130 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# https://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +# All exceptions in this class should subclass from Boto3Error. +import botocore.exceptions + + +# All exceptions should subclass from Boto3Error in this module. +class Boto3Error(Exception): + """Base class for all Boto3 errors.""" + + +class ResourceLoadException(Boto3Error): + pass + + +# NOTE: This doesn't appear to be used anywhere. +# It's probably safe to remove this. +class NoVersionFound(Boto3Error): + pass + + +# We're subclassing from botocore.exceptions.DataNotFoundError +# to keep backwards compatibility with anyone that was catching +# this low level Botocore error before this exception was +# introduced in boto3. +# Same thing for ResourceNotExistsError below. +class UnknownAPIVersionError( + Boto3Error, botocore.exceptions.DataNotFoundError +): + def __init__(self, service_name, bad_api_version, available_api_versions): + msg = ( + f"The '{service_name}' resource does not support an API version of: {bad_api_version}\n" + f"Valid API versions are: {available_api_versions}" + ) + # Not using super because we don't want the DataNotFoundError + # to be called, it has a different __init__ signature. + Boto3Error.__init__(self, msg) + + +class ResourceNotExistsError( + Boto3Error, botocore.exceptions.DataNotFoundError +): + """Raised when you attempt to create a resource that does not exist.""" + + def __init__(self, service_name, available_services, has_low_level_client): + msg = ( + "The '{}' resource does not exist.\n" + "The available resources are:\n" + " - {}\n".format( + service_name, '\n - '.join(available_services) + ) + ) + if has_low_level_client: + msg = ( + f"{msg}\nConsider using a boto3.client('{service_name}') " + f"instead of a resource for '{service_name}'" + ) + # Not using super because we don't want the DataNotFoundError + # to be called, it has a different __init__ signature. + Boto3Error.__init__(self, msg) + + +class RetriesExceededError(Boto3Error): + def __init__(self, last_exception, msg='Max Retries Exceeded'): + super().__init__(msg) + self.last_exception = last_exception + + +class S3TransferFailedError(Boto3Error): + pass + + +class S3UploadFailedError(Boto3Error): + pass + + +class DynamoDBOperationNotSupportedError(Boto3Error): + """Raised for operations that are not supported for an operand.""" + + def __init__(self, operation, value): + msg = ( + f'{operation} operation cannot be applied to value {value} of type ' + f'{type(value)} directly. Must use AttributeBase object methods ' + f'(i.e. Attr().eq()). to generate ConditionBase instances first.' + ) + Exception.__init__(self, msg) + + +# FIXME: Backward compatibility +DynanmoDBOperationNotSupportedError = DynamoDBOperationNotSupportedError + + +class DynamoDBNeedsConditionError(Boto3Error): + """Raised when input is not a condition""" + + def __init__(self, value): + msg = ( + f'Expecting a ConditionBase object. Got {value} of type {type(value)}. ' + f'Use AttributeBase object methods (i.e. Attr().eq()). to ' + f'generate ConditionBase instances.' + ) + Exception.__init__(self, msg) + + +class DynamoDBNeedsKeyConditionError(Boto3Error): + pass + + +class PythonDeprecationWarning(Warning): + """ + Python version being used is scheduled to become unsupported + in an future release. See warning for specifics. + """ + + pass + + +class InvalidCrtTransferConfigError(Boto3Error): + pass diff --git a/py311/lib/python3.11/site-packages/boto3/session.py b/py311/lib/python3.11/site-packages/boto3/session.py new file mode 100644 index 0000000000000000000000000000000000000000..a990f460eac1e4d8855c30b80f6ad6be787a4756 --- /dev/null +++ b/py311/lib/python3.11/site-packages/boto3/session.py @@ -0,0 +1,574 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# https://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import copy +import os + +import botocore.session +from botocore.client import Config +from botocore.exceptions import ( + DataNotFoundError, + NoCredentialsError, + UnknownServiceError, +) + +import boto3 +import boto3.utils +from boto3.exceptions import ResourceNotExistsError, UnknownAPIVersionError + +from .resources.factory import ResourceFactory + + +class Session: + """ + A session stores configuration state and allows you to create service + clients and resources. + + :type aws_access_key_id: string + :param aws_access_key_id: AWS access key ID + :type aws_secret_access_key: string + :param aws_secret_access_key: AWS secret access key + :type aws_session_token: string + :param aws_session_token: AWS temporary session token + :type region_name: string + :param region_name: Default region when creating new connections + :type botocore_session: botocore.session.Session + :param botocore_session: Use this Botocore session instead of creating + a new default one. + :type profile_name: string + :param profile_name: The name of a profile to use. If not given, then + the default profile is used. + :type aws_account_id: string + :param aws_account_id: AWS account ID + """ + + def __init__( + self, + aws_access_key_id=None, + aws_secret_access_key=None, + aws_session_token=None, + region_name=None, + botocore_session=None, + profile_name=None, + aws_account_id=None, + ): + if botocore_session is not None: + self._session = botocore_session + else: + # Create a new default session + self._session = botocore.session.get_session() + + # Setup custom user-agent string if it isn't already customized + if self._session.user_agent_name == 'Botocore': + botocore_info = f'Botocore/{self._session.user_agent_version}' + if self._session.user_agent_extra: + self._session.user_agent_extra += f" {botocore_info}" + else: + self._session.user_agent_extra = botocore_info + self._session.user_agent_name = 'Boto3' + self._session.user_agent_version = boto3.__version__ + + if profile_name is not None: + self._session.set_config_variable('profile', profile_name) + + credentials_kwargs = { + "aws_access_key_id": aws_access_key_id, + "aws_secret_access_key": aws_secret_access_key, + "aws_session_token": aws_session_token, + "aws_account_id": aws_account_id, + } + + if any(credentials_kwargs.values()): + if self._account_id_set_without_credentials(**credentials_kwargs): + raise NoCredentialsError() + + if aws_account_id is None: + del credentials_kwargs["aws_account_id"] + + self._session.set_credentials(*credentials_kwargs.values()) + + if region_name is not None: + self._session.set_config_variable('region', region_name) + + self.resource_factory = ResourceFactory( + self._session.get_component('event_emitter') + ) + self._setup_loader() + self._register_default_handlers() + + def __repr__(self): + return '{}(region_name={})'.format( + self.__class__.__name__, + repr(self._session.get_config_variable('region')), + ) + + @property + def profile_name(self): + """ + The **read-only** profile name. + """ + return self._session.profile or 'default' + + @property + def region_name(self): + """ + The **read-only** region name. + """ + return self._session.get_config_variable('region') + + @property + def events(self): + """ + The event emitter for a session + """ + return self._session.get_component('event_emitter') + + @property + def available_profiles(self): + """ + The profiles available to the session credentials + """ + return self._session.available_profiles + + def _setup_loader(self): + """ + Setup loader paths so that we can load resources. + """ + self._loader = self._session.get_component('data_loader') + self._loader.search_paths.append( + os.path.join(os.path.dirname(__file__), 'data') + ) + + def get_available_services(self): + """ + Get a list of available services that can be loaded as low-level + clients via :py:meth:`Session.client`. + + :rtype: list + :return: List of service names + """ + return self._session.get_available_services() + + def get_available_resources(self): + """ + Get a list of available services that can be loaded as resource + clients via :py:meth:`Session.resource`. + + :rtype: list + :return: List of service names + """ + return self._loader.list_available_services(type_name='resources-1') + + def get_available_partitions(self): + """Lists the available partitions + + :rtype: list + :return: Returns a list of partition names (e.g., ["aws", "aws-cn"]) + """ + return self._session.get_available_partitions() + + def get_available_regions( + self, service_name, partition_name='aws', allow_non_regional=False + ): + """Lists the region and endpoint names of a particular partition. + + The list of regions returned by this method are regions that are + explicitly known by the client to exist and is not comprehensive. A + region not returned in this list may still be available for the + provided service. + + :type service_name: string + :param service_name: Name of a service to list endpoint for (e.g., s3). + + :type partition_name: string + :param partition_name: Name of the partition to limit endpoints to. + (e.g., aws for the public AWS endpoints, aws-cn for AWS China + endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.) + + :type allow_non_regional: bool + :param allow_non_regional: Set to True to include endpoints that are + not regional endpoints (e.g., s3-external-1, + fips-us-gov-west-1, etc). + + :return: Returns a list of endpoint names (e.g., ["us-east-1"]). + """ + return self._session.get_available_regions( + service_name=service_name, + partition_name=partition_name, + allow_non_regional=allow_non_regional, + ) + + def get_credentials(self): + """ + Return the :class:`botocore.credentials.Credentials` object + associated with this session. If the credentials have not + yet been loaded, this will attempt to load them. If they + have already been loaded, this will return the cached + credentials. + """ + return self._session.get_credentials() + + def get_partition_for_region(self, region_name): + """Lists the partition name of a particular region. + + :type region_name: string + :param region_name: Name of the region to list partition for (e.g., + us-east-1). + + :rtype: string + :return: Returns the respective partition name (e.g., aws). + """ + return self._session.get_partition_for_region(region_name) + + def client( + self, + service_name, + region_name=None, + api_version=None, + use_ssl=True, + verify=None, + endpoint_url=None, + aws_access_key_id=None, + aws_secret_access_key=None, + aws_session_token=None, + config=None, + aws_account_id=None, + ): + """ + Create a low-level service client by name. + + :type service_name: string + :param service_name: The name of a service, e.g. 's3' or 'ec2'. You + can get a list of available services via + :py:meth:`get_available_services`. + + :type region_name: string + :param region_name: The name of the region associated with the client. + A client is associated with a single region. + + :type api_version: string + :param api_version: The API version to use. By default, botocore will + use the latest API version when creating a client. You only need + to specify this parameter if you want to use a previous API version + of the client. + + :type use_ssl: boolean + :param use_ssl: Whether or not to use SSL. By default, SSL is used. + Note that not all services support non-ssl connections. + + :type verify: boolean/string + :param verify: Whether or not to verify SSL certificates. By default + SSL certificates are verified. You can provide the following + values: + + * False - do not validate SSL certificates. SSL will still be + used (unless use_ssl is False), but SSL certificates + will not be verified. + * path/to/cert/bundle.pem - A filename of the CA cert bundle to + uses. You can specify this argument if you want to use a + different CA cert bundle than the one used by botocore. + + :type endpoint_url: string + :param endpoint_url: The complete URL to use for the constructed + client. Normally, botocore will automatically construct the + appropriate URL to use when communicating with a service. You + can specify a complete URL (including the "http/https" scheme) + to override this behavior. If this value is provided, + then ``use_ssl`` is ignored. + + :type aws_access_key_id: string + :param aws_access_key_id: The access key to use when creating + the client. This is entirely optional, and if not provided, + the credentials configured for the session will automatically + be used. You only need to provide this argument if you want + to override the credentials used for this specific client. + + :type aws_secret_access_key: string + :param aws_secret_access_key: The secret key to use when creating + the client. Same semantics as aws_access_key_id above. + + :type aws_session_token: string + :param aws_session_token: The session token to use when creating + the client. Same semantics as aws_access_key_id above. + + :type config: botocore.client.Config + :param config: Advanced client configuration options. If region_name + is specified in the client config, its value will take precedence + over environment variables and configuration values, but not over + a region_name value passed explicitly to the method. See + `botocore config documentation + `_ + for more details. + + :type aws_account_id: string + :param aws_account_id: The account id to use when creating + the client. Same semantics as aws_access_key_id above. + + :return: Service client instance + + """ + create_client_kwargs = { + 'region_name': region_name, + 'api_version': api_version, + 'use_ssl': use_ssl, + 'verify': verify, + 'endpoint_url': endpoint_url, + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key, + 'aws_session_token': aws_session_token, + 'config': config, + 'aws_account_id': aws_account_id, + } + if aws_account_id is None: + # Remove aws_account_id for arbitrary + # botocore version mismatches in AWS Lambda. + del create_client_kwargs['aws_account_id'] + + return self._session.create_client( + service_name, **create_client_kwargs + ) + + def resource( + self, + service_name, + region_name=None, + api_version=None, + use_ssl=True, + verify=None, + endpoint_url=None, + aws_access_key_id=None, + aws_secret_access_key=None, + aws_session_token=None, + config=None, + ): + """ + Create a resource service client by name. + + :type service_name: string + :param service_name: The name of a service, e.g. 's3' or 'ec2'. You + can get a list of available services via + :py:meth:`get_available_resources`. + + :type region_name: string + :param region_name: The name of the region associated with the client. + A client is associated with a single region. + + :type api_version: string + :param api_version: The API version to use. By default, botocore will + use the latest API version when creating a client. You only need + to specify this parameter if you want to use a previous API version + of the client. + + :type use_ssl: boolean + :param use_ssl: Whether or not to use SSL. By default, SSL is used. + Note that not all services support non-ssl connections. + + :type verify: boolean/string + :param verify: Whether or not to verify SSL certificates. By default + SSL certificates are verified. You can provide the following + values: + + * False - do not validate SSL certificates. SSL will still be + used (unless use_ssl is False), but SSL certificates + will not be verified. + * path/to/cert/bundle.pem - A filename of the CA cert bundle to + uses. You can specify this argument if you want to use a + different CA cert bundle than the one used by botocore. + + :type endpoint_url: string + :param endpoint_url: The complete URL to use for the constructed + client. Normally, botocore will automatically construct the + appropriate URL to use when communicating with a service. You + can specify a complete URL (including the "http/https" scheme) + to override this behavior. If this value is provided, + then ``use_ssl`` is ignored. + + :type aws_access_key_id: string + :param aws_access_key_id: The access key to use when creating + the client. This is entirely optional, and if not provided, + the credentials configured for the session will automatically + be used. You only need to provide this argument if you want + to override the credentials used for this specific client. + + :type aws_secret_access_key: string + :param aws_secret_access_key: The secret key to use when creating + the client. Same semantics as aws_access_key_id above. + + :type aws_session_token: string + :param aws_session_token: The session token to use when creating + the client. Same semantics as aws_access_key_id above. + + :type config: botocore.client.Config + :param config: Advanced client configuration options. If region_name + is specified in the client config, its value will take precedence + over environment variables and configuration values, but not over + a region_name value passed explicitly to the method. If + user_agent_extra is specified in the client config, it overrides + the default user_agent_extra provided by the resource API. See + `botocore config documentation + `_ + for more details. + + :return: Subclass of :py:class:`~boto3.resources.base.ServiceResource` + """ + try: + resource_model = self._loader.load_service_model( + service_name, 'resources-1', api_version + ) + except UnknownServiceError: + available = self.get_available_resources() + has_low_level_client = ( + service_name in self.get_available_services() + ) + raise ResourceNotExistsError( + service_name, available, has_low_level_client + ) + except DataNotFoundError: + # This is because we've provided an invalid API version. + available_api_versions = self._loader.list_api_versions( + service_name, 'resources-1' + ) + raise UnknownAPIVersionError( + service_name, api_version, ', '.join(available_api_versions) + ) + + if api_version is None: + # Even though botocore's load_service_model() can handle + # using the latest api_version if not provided, we need + # to track this api_version in boto3 in order to ensure + # we're pairing a resource model with a client model + # of the same API version. It's possible for the latest + # API version of a resource model in boto3 to not be + # the same API version as a service model in botocore. + # So we need to look up the api_version if one is not + # provided to ensure we load the same API version of the + # client. + # + # Note: This is relying on the fact that + # loader.load_service_model(..., api_version=None) + # and loader.determine_latest_version(..., 'resources-1') + # both load the same api version of the file. + api_version = self._loader.determine_latest_version( + service_name, 'resources-1' + ) + + # Creating a new resource instance requires the low-level client + # and service model, the resource version and resource JSON data. + # We pass these to the factory and get back a class, which is + # instantiated on top of the low-level client. + if config is not None: + if config.user_agent_extra is None: + config = copy.deepcopy(config) + config.user_agent_extra = 'Resource' + else: + config = Config(user_agent_extra='Resource') + client = self.client( + service_name, + region_name=region_name, + api_version=api_version, + use_ssl=use_ssl, + verify=verify, + endpoint_url=endpoint_url, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_session_token=aws_session_token, + config=config, + ) + service_model = client.meta.service_model + + # Create a ServiceContext object to serve as a reference to + # important read-only information about the general service. + service_context = boto3.utils.ServiceContext( + service_name=service_name, + service_model=service_model, + resource_json_definitions=resource_model['resources'], + service_waiter_model=boto3.utils.LazyLoadedWaiterModel( + self._session, service_name, api_version + ), + ) + + # Create the service resource class. + cls = self.resource_factory.load_from_definition( + resource_name=service_name, + single_resource_json_definition=resource_model['service'], + service_context=service_context, + ) + + return cls(client=client) + + def _register_default_handlers(self): + # S3 customizations + self._session.register( + 'creating-client-class.s3', + boto3.utils.lazy_call( + 'boto3.s3.inject.inject_s3_transfer_methods' + ), + ) + self._session.register( + 'creating-resource-class.s3.Bucket', + boto3.utils.lazy_call('boto3.s3.inject.inject_bucket_methods'), + ) + self._session.register( + 'creating-resource-class.s3.Object', + boto3.utils.lazy_call('boto3.s3.inject.inject_object_methods'), + ) + self._session.register( + 'creating-resource-class.s3.ObjectSummary', + boto3.utils.lazy_call( + 'boto3.s3.inject.inject_object_summary_methods' + ), + ) + + # DynamoDb customizations + self._session.register( + 'creating-resource-class.dynamodb', + boto3.utils.lazy_call( + 'boto3.dynamodb.transform.register_high_level_interface' + ), + unique_id='high-level-dynamodb', + ) + self._session.register( + 'creating-resource-class.dynamodb.Table', + boto3.utils.lazy_call( + 'boto3.dynamodb.table.register_table_methods' + ), + unique_id='high-level-dynamodb-table', + ) + + # EC2 Customizations + self._session.register( + 'creating-resource-class.ec2.ServiceResource', + boto3.utils.lazy_call('boto3.ec2.createtags.inject_create_tags'), + ) + + self._session.register( + 'creating-resource-class.ec2.Instance', + boto3.utils.lazy_call( + 'boto3.ec2.deletetags.inject_delete_tags', + event_emitter=self.events, + ), + ) + + def _account_id_set_without_credentials( + self, + *, + aws_account_id, + aws_access_key_id, + aws_secret_access_key, + **kwargs, + ): + if aws_account_id is None: + return False + elif aws_access_key_id is None or aws_secret_access_key is None: + return True + return False diff --git a/py311/lib/python3.11/site-packages/boto3/utils.py b/py311/lib/python3.11/site-packages/boto3/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..289bbdf6c4427028c1fd445168572e512bd0e3ca --- /dev/null +++ b/py311/lib/python3.11/site-packages/boto3/utils.py @@ -0,0 +1,90 @@ +# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# https://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from collections import namedtuple +from importlib import import_module + +_ServiceContext = namedtuple( + 'ServiceContext', + [ + 'service_name', + 'service_model', + 'service_waiter_model', + 'resource_json_definitions', + ], +) + + +class ServiceContext(_ServiceContext): + """Provides important service-wide, read-only information about a service + + :type service_name: str + :param service_name: The name of the service + + :type service_model: :py:class:`botocore.model.ServiceModel` + :param service_model: The model of the service. + + :type service_waiter_model: :py:class:`botocore.waiter.WaiterModel` or + a waiter model-like object such as + :py:class:`boto3.utils.LazyLoadedWaiterModel` + :param service_waiter_model: The waiter model of the service. + + :type resource_json_definitions: dict + :param resource_json_definitions: The loaded json models of all resource + shapes for a service. It is equivalient of loading a + ``resource-1.json`` and retrieving the value at the key "resources". + """ + + pass + + +def lazy_call(full_name, **kwargs): + parent_kwargs = kwargs + + def _handler(**kwargs): + module, function_name = full_name.rsplit('.', 1) + module = import_module(module) + kwargs.update(parent_kwargs) + return getattr(module, function_name)(**kwargs) + + return _handler + + +def inject_attribute(class_attributes, name, value): + if name in class_attributes: + raise RuntimeError( + f'Cannot inject class attribute "{name}", attribute ' + f'already exists in class dict.' + ) + else: + class_attributes[name] = value + + +class LazyLoadedWaiterModel: + """A lazily loaded waiter model + + This does not load the service waiter model until an attempt is made + to retrieve the waiter model for a specific waiter. This is helpful + in docstring generation where we do not need to actually need to grab + the waiter-2.json until it is accessed through a ``get_waiter`` call + when the docstring is generated/accessed. + """ + + def __init__(self, bc_session, service_name, api_version): + self._session = bc_session + self._service_name = service_name + self._api_version = api_version + + def get_waiter(self, waiter_name): + return self._session.get_waiter_model( + self._service_name, self._api_version + ).get_waiter(waiter_name) diff --git a/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/LICENSE.txt b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..f433b1a53f5b830a205fd2df78e2b34974656c7b --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/METADATA b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..faffcaa2fe12f1cef653cbac48365cc7f8894627 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/METADATA @@ -0,0 +1,151 @@ +Metadata-Version: 2.1 +Name: botocore +Version: 1.42.27 +Summary: Low-level, data-driven core of boto 3. +Home-page: https://github.com/boto/botocore +Author: Amazon Web Services +License: Apache-2.0 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: Natural Language :: English +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Requires-Python: >= 3.9 +License-File: LICENSE.txt +License-File: NOTICE +Requires-Dist: jmespath (<2.0.0,>=0.7.1) +Requires-Dist: python-dateutil (<3.0.0,>=2.1) +Requires-Dist: urllib3 (<1.27,>=1.25.4) ; python_version < "3.10" +Requires-Dist: urllib3 (!=2.2.0,<3,>=1.25.4) ; python_version >= "3.10" +Provides-Extra: crt +Requires-Dist: awscrt (==0.29.2) ; extra == 'crt' + +botocore +======== + +|Version| |Python| |License| + +A low-level interface to a growing number of Amazon Web Services. The +botocore package is the foundation for the +`AWS CLI `__ as well as +`boto3 `__. + +Botocore is maintained and published by `Amazon Web Services`_. + +Notices +------- + +On 2026-04-29, support for Python 3.9 will end for Botocore. This follows the +Python Software Foundation `end of support `__ +for the runtime which occurred on 2025-10-31. + +On 2025-04-22, support for Python 3.8 ended for Botocore. This follows the +Python Software Foundation `end of support `__ +for the runtime which occurred on 2024-10-07. + +For more information, see this `blog post `__. + +.. _`Amazon Web Services`: https://aws.amazon.com/what-is-aws/ +.. |Python| image:: https://img.shields.io/pypi/pyversions/botocore.svg?style=flat + :target: https://pypi.python.org/pypi/botocore/ + :alt: Python Versions +.. |Version| image:: http://img.shields.io/pypi/v/botocore.svg?style=flat + :target: https://pypi.python.org/pypi/botocore/ + :alt: Package Version +.. |License| image:: http://img.shields.io/pypi/l/botocore.svg?style=flat + :target: https://github.com/boto/botocore/blob/develop/LICENSE.txt + :alt: License + +Getting Started +--------------- +Assuming that you have Python and ``virtualenv`` installed, set up your environment and install the required dependencies like this or you can install the library using ``pip``: + +.. code-block:: sh + + $ git clone https://github.com/boto/botocore.git + $ cd botocore + $ python -m venv .venv + ... + $ source .venv/bin/activate + $ python -m pip install -r requirements.txt + $ python -m pip install -e . + +.. code-block:: sh + + $ pip install botocore + +Using Botocore +~~~~~~~~~~~~~~ +After installing botocore + +Next, set up credentials (in e.g. ``~/.aws/credentials``): + +.. code-block:: ini + + [default] + aws_access_key_id = YOUR_KEY + aws_secret_access_key = YOUR_SECRET + +Then, set up a default region (in e.g. ``~/.aws/config``): + +.. code-block:: ini + + [default] + region=us-east-1 + +Other credentials configuration method can be found `here `__ + +Then, from a Python interpreter: + +.. code-block:: python + + >>> import botocore.session + >>> session = botocore.session.get_session() + >>> client = session.create_client('ec2') + >>> print(client.describe_instances()) + + +Getting Help +------------ + +We use GitHub issues for tracking bugs and feature requests and have limited +bandwidth to address them. Please use these community resources for getting +help. Please note many of the same resources available for ``boto3`` are +applicable for ``botocore``: + +* Ask a question on `Stack Overflow `__ and tag it with `boto3 `__ +* Open a support ticket with `AWS Support `__ +* If it turns out that you may have found a bug, please `open an issue `__ + + +Contributing +------------ + +We value feedback and contributions from our community. Whether it's a bug report, new feature, correction, or additional documentation, we welcome your issues and pull requests. Please read through this `CONTRIBUTING `__ document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your contribution. + + +Maintenance and Support for SDK Major Versions +---------------------------------------------- + +Botocore was made generally available on 06/22/2015 and is currently in the full support phase of the availability life cycle. + +For information about maintenance and support for SDK major versions and their underlying dependencies, see the following in the AWS SDKs and Tools Reference Guide: + +* `AWS SDKs and Tools Maintenance Policy `__ +* `AWS SDKs and Tools Version Support Matrix `__ + + +More Resources +-------------- + +* `NOTICE `__ +* `Changelog `__ +* `License `__ diff --git a/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/NOTICE b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/NOTICE new file mode 100644 index 0000000000000000000000000000000000000000..edcc3cd78ef78effb3203d4e21f572a92e31f8eb --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/NOTICE @@ -0,0 +1,60 @@ +Botocore +Copyright 2012-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +---- + +Botocore includes vendorized parts of the requests python library for backwards compatibility. + +Requests License +================ + +Copyright 2013 Kenneth Reitz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Botocore includes vendorized parts of the urllib3 library for backwards compatibility. + +Urllib3 License +=============== + +This is the MIT license: http://www.opensource.org/licenses/mit-license.php + +Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt), +Modifications copyright 2012 Kenneth Reitz. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons +to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE +FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +Bundle of CA Root Certificates +============================== + +***** BEGIN LICENSE BLOCK ***** +This Source Code Form is subject to the terms of the +Mozilla Public License, v. 2.0. If a copy of the MPL +was not distributed with this file, You can obtain +one at http://mozilla.org/MPL/2.0/. + +***** END LICENSE BLOCK ***** diff --git a/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/RECORD b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f90a6e109144fb6ed3b7a4fd5d124bdb50970c87 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/RECORD @@ -0,0 +1,1928 @@ +botocore-1.42.27.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +botocore-1.42.27.dist-info/LICENSE.txt,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +botocore-1.42.27.dist-info/METADATA,sha256=eOFxJRoiKED8sPk_qOM15nrQd-Fbm9JH4yUWjqsjR6c,5869 +botocore-1.42.27.dist-info/NOTICE,sha256=HRxabz1oyxH0-tGvqGp0UNAobxXBdu8OoEjyVbRtlbA,2467 +botocore-1.42.27.dist-info/RECORD,, +botocore-1.42.27.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +botocore-1.42.27.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91 +botocore-1.42.27.dist-info/top_level.txt,sha256=IdlNr9dnwi3lQt66dKnShE5HBUhIqBFqJmVhm11aijk,9 +botocore/__init__.py,sha256=zi-8TJYz6x9Uf7lA2QGXb98z5XQcs5M_G_8qad-j3jA,8019 +botocore/args.py,sha256=EHFfRFzpg9CE2S8Hgx3sDpj48RBZwlgh24FAfqwsEcI,39011 +botocore/auth.py,sha256=T0m-kVIqo28grLAKRqY49GLed0lfF2rI2ZjRdGAvUgA,46062 +botocore/awsrequest.py,sha256=PvhLGpW6ziN2hGUnT_jDdhR_LGc1jh2NVPrdGfaFlOw,23178 +botocore/cacert.pem,sha256=nW1QIfzIoiMvzo60s_mC3EhCUtVVSTrFwqPL8ssZQ4o,266617 +botocore/client.py,sha256=kwQ0qbTN67ouIi2lHZVhlgl-cvQEWguunKWHrgWJLgg,54154 +botocore/compat.py,sha256=5PGk-Hh7OVi6_7U8BmEifsaTn-scP-ahWp6hdUAKJ5s,11769 +botocore/compress.py,sha256=Qxl9IGVCpZ2gQG5KE_iqWQgbXvj_I70jA5yNtgZL1iY,4533 +botocore/config.py,sha256=hByTAk2HKH-yj1nehNR3fsIl0h3Er22b1lulmhxXyNA,20255 +botocore/configloader.py,sha256=NTejI7b9UGUXBv2uKiPaXH19Lgl30LY5ujZkXRcFpHs,10039 +botocore/configprovider.py,sha256=RxILFRpW14PxPOaI_mlBpbgzjkeHysqgwdHRnWBFzRM,38092 +botocore/context.py,sha256=XQRDA0YPajIjb6zQres-QQNOtV0L7B6UDfBHrza4W9E,3782 +botocore/credentials.py,sha256=OPlVYcVTx-dtnabSPTCnedz7BAOiVMuYVi6S4SzAsOg,101908 +botocore/crt/__init__.py,sha256=kCXQL93gdg5yBQJOTp7YFLl9wYNy4tV_5TAyJq0asD0,1006 +botocore/crt/auth.py,sha256=ReT3jB_J6H-0wD3fogG4uuyrwwka14d7UJZ5weujftU,25076 +botocore/data/_retry.json,sha256=0dIGY-kUA1xaYn9-2YHgunpCeettHM3hKEYoTOirc6o,7270 +botocore/data/accessanalyzer/2019-11-01/endpoint-rule-set-1.json.gz,sha256=AOdG9hEDFHo8yIQmCXYfHStVOf8j_aDGxiiaxgSkKig,1237 +botocore/data/accessanalyzer/2019-11-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/accessanalyzer/2019-11-01/paginators-1.json,sha256=8cCsaqYmzBJj1naqrZSRYos_QGyOVec_G_9xuLEApG8,1908 +botocore/data/accessanalyzer/2019-11-01/paginators-1.sdk-extras.json,sha256=nwsOcoMZ1GDzrFfMc6_Gx0tNRKwt5b8XddGiZmkAS2s,600 +botocore/data/accessanalyzer/2019-11-01/service-2.json.gz,sha256=yWN5PDLNXmmHM6vHc5oUDxvvi5qIlnTcvr2PIAq-Muo,27631 +botocore/data/account/2021-02-01/endpoint-rule-set-1.json.gz,sha256=YI5Z7mpU4fXNhSveC-NIb62GKZZGR86c6-eR1x6cet8,1379 +botocore/data/account/2021-02-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/account/2021-02-01/paginators-1.json,sha256=TCku1Qs1la1Ggv8u8dKSYP2E5i5sWpmhRmL4zSR87RQ,185 +botocore/data/account/2021-02-01/service-2.json.gz,sha256=g8mMjRypBmoupRsrK6Pmq4s60sleQOzK158v37jqqSc,6344 +botocore/data/acm-pca/2017-08-22/endpoint-rule-set-1.json.gz,sha256=8dGkBQ9XLGIqNSzIMbN2palFoDc45VaosmDZ33YWNWA,1234 +botocore/data/acm-pca/2017-08-22/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/acm-pca/2017-08-22/paginators-1.json,sha256=q2wFRetchlBt43qtOCTJ_Qw49u-LnRgmPdEn1j_j50A,537 +botocore/data/acm-pca/2017-08-22/service-2.json.gz,sha256=BdZ2ykkyXxAamfEl_KCrX-z9alK_vaHtER8UX7eI2-0,24950 +botocore/data/acm-pca/2017-08-22/waiters-2.json,sha256=PH8MS91fUjXWbhd08bgVhUC-_SQPmLrJOGkGo6j6jaU,1759 +botocore/data/acm/2015-12-08/endpoint-rule-set-1.json.gz,sha256=87BhCvZBhjWnKrU0Mx0Tb80DhajgqX2ov-HhK2vFe20,1230 +botocore/data/acm/2015-12-08/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/acm/2015-12-08/paginators-1.json,sha256=oB2exj3JKzcsLCvfBeMqYawlxz6YghtvUQlwOfdTY4g,203 +botocore/data/acm/2015-12-08/service-2.json.gz,sha256=1N6_f6cFINzHh3VGcqMo-2IvYPIOgsnZx8m1j-vCid4,14816 +botocore/data/acm/2015-12-08/waiters-2.json,sha256=yHGQEXzDfVDuG0r6SRAMf4LYJdVcSwxxN4w6Op3t_wE,818 +botocore/data/aiops/2018-05-10/endpoint-rule-set-1.json.gz,sha256=TSKWUqvy8-Kfm5bGte5GDhjreX8EP4keWbJpdKUt-bc,1296 +botocore/data/aiops/2018-05-10/paginators-1.json,sha256=f0tLATJ2XSci2en4kooSFQ-htod7hyA7nf54-4ycaIg,209 +botocore/data/aiops/2018-05-10/service-2.json.gz,sha256=msJDGQud-taTPfAOw7dtqNLlF6JnQBPCTTLEwXDOp4w,6128 +botocore/data/aiops/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/amp/2020-08-01/endpoint-rule-set-1.json.gz,sha256=EvWTPLL_QaLXSaM8894umXYg0lUfzBIu-L151gtCVbk,1145 +botocore/data/amp/2020-08-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/amp/2020-08-01/paginators-1.json,sha256=gOCl7CM-ELiCZZ9BzIfEbHwzBmxizZraTadvic-_kI8,717 +botocore/data/amp/2020-08-01/service-2.json.gz,sha256=gUxshYaI2Vnl-78viDcNZEjCOFZZYlEPcck-wzeSMUs,18753 +botocore/data/amp/2020-08-01/waiters-2.json,sha256=eeRO0PVcJh6MPtsVwtDu4vW0caXV1XwqJAdBHvUnfzY,3387 +botocore/data/amplify/2017-07-25/endpoint-rule-set-1.json.gz,sha256=5i2StqWGP3ABhmjsmKcOeLDhxWPAdU-TpJfsOf3QfA0,1149 +botocore/data/amplify/2017-07-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/amplify/2017-07-25/paginators-1.json,sha256=XJ4xwNrUExhAxy-8K8JJAPnBhdRZO7FB6NGTrgr_qZQ,685 +botocore/data/amplify/2017-07-25/service-2.json.gz,sha256=9AoRmGk_V0FA_oBPvRSs5j_VGO61t75X77XEw9yZ6z0,17604 +botocore/data/amplifybackend/2020-08-11/endpoint-rule-set-1.json.gz,sha256=EN-kmCYCoDj5nvn-kciqA9RRKsS7QU1MDKLW8HETtkw,1154 +botocore/data/amplifybackend/2020-08-11/paginators-1.json,sha256=0JG13-2KlCwca-Pwz7d5Mp3WIttu4BpwDusqxMXF9XY,186 +botocore/data/amplifybackend/2020-08-11/service-2.json.gz,sha256=LUDmIV-XTOt5ffqOKPq83GK8u7x5n4rQRTtbrRGIhTw,10990 +botocore/data/amplifyuibuilder/2021-08-11/endpoint-rule-set-1.json.gz,sha256=_UhNt47mwQT-tsHteVQz_6zBvUJER9NZOcRSlzx_MNA,1156 +botocore/data/amplifyuibuilder/2021-08-11/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/amplifyuibuilder/2021-08-11/paginators-1.json,sha256=idtki67MCJcfs_brVKsvknxJbZtDfS-IK3cakM1IFCI,1063 +botocore/data/amplifyuibuilder/2021-08-11/service-2.json.gz,sha256=C5QPRNDwvBr6aaO8q5J_SgISMYdyABwCeKQgN009if4,15634 +botocore/data/amplifyuibuilder/2021-08-11/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/apigateway/2015-07-09/endpoint-rule-set-1.json.gz,sha256=hOzKgmjWzra4unSPjgrZTi7dPMa0JMLmjZ1Jh1n9XBY,1151 +botocore/data/apigateway/2015-07-09/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/apigateway/2015-07-09/paginators-1.json,sha256=gwAb1K7CkHdC49pAfwZMgaT18Hm1r5qDK1m_6m-Ki9w,2913 +botocore/data/apigateway/2015-07-09/service-2.json.gz,sha256=FqnnJcSCLOM-JHsO33-usNH2FEfamUXnCcJw0Uzqrzk,39734 +botocore/data/apigatewaymanagementapi/2018-11-29/endpoint-rule-set-1.json.gz,sha256=TpRsBzvpuU854NULZzptucaFknnwyx6Xvuqda5nrams,1151 +botocore/data/apigatewaymanagementapi/2018-11-29/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/apigatewaymanagementapi/2018-11-29/service-2.json.gz,sha256=nB4cFKBBAur1d2GioFryrE-IdVwUjJ17Zaldu3qFdkE,1444 +botocore/data/apigatewayv2/2018-11-29/endpoint-rule-set-1.json.gz,sha256=hOzKgmjWzra4unSPjgrZTi7dPMa0JMLmjZ1Jh1n9XBY,1151 +botocore/data/apigatewayv2/2018-11-29/paginators-1.json,sha256=uQijp2s8aofeR6MXY0KDrgYJXF8Dk2P34Lbw6H9utSk,2457 +botocore/data/apigatewayv2/2018-11-29/service-2.json.gz,sha256=rI8DOgZqMflbLxKdNDOuB-MUELKqVGj_Ed34wsTm3_g,53301 +botocore/data/appconfig/2019-10-09/endpoint-rule-set-1.json.gz,sha256=SFgFQ0d-CLj-2JRWnq6hHYaVCTJJJf1DuOfwNL7ZLsg,1230 +botocore/data/appconfig/2019-10-09/examples-1.json,sha256=lm2meYHY2djHXZ_3lYZa2PxELHhVDtZdMkVw4IWCI8Y,25502 +botocore/data/appconfig/2019-10-09/paginators-1.json,sha256=DlvXrqKcTiVi3Yv2rStPwl5O1kqSQaiyRGD_fQugFEQ,1367 +botocore/data/appconfig/2019-10-09/service-2.json.gz,sha256=_MKQGgTyXSs0Xhfvym4Q5oGtVbcoOD7hVVRfEE4znBE,19510 +botocore/data/appconfig/2019-10-09/waiters-2.json,sha256=1_6Y2OdolOE882ZkaU3E11-WALKr3g4cj_KZwxW_jmc,1217 +botocore/data/appconfigdata/2021-11-11/endpoint-rule-set-1.json.gz,sha256=Wo2ay7KpMjQ3FoF38giO5xBXWb6hm6NGhQbckeYnZOQ,1235 +botocore/data/appconfigdata/2021-11-11/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/appconfigdata/2021-11-11/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/appconfigdata/2021-11-11/service-2.json.gz,sha256=v44WTIBXR73S2Qmb2WMWDuXkgZZ7DugkIUyJYueZRSc,3114 +botocore/data/appfabric/2023-05-19/endpoint-rule-set-1.json.gz,sha256=byw1T-6nGc0vI2drxNZirOrLMEEu5c0ua8Zv8Fse6tM,1296 +botocore/data/appfabric/2023-05-19/paginators-1.json,sha256=AceDN9kDs832sLebyXTQMYza-dMZ8m2hsVyzbqxUXnQ,745 +botocore/data/appfabric/2023-05-19/service-2.json.gz,sha256=weLqNpeujruoDuDRUfkSVNLjms_8ohsW4FZtFUuIZkI,8601 +botocore/data/appfabric/2023-05-19/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/appflow/2020-08-23/endpoint-rule-set-1.json.gz,sha256=SjhmBCwBqCZWYB8ih7YaHZ-UIyH89ym7lJuSE_0Ae_w,1149 +botocore/data/appflow/2020-08-23/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/appflow/2020-08-23/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/appflow/2020-08-23/service-2.json.gz,sha256=nnWJD0AZgWqkgqvvsiYI7tcSCzyfvbwNs3uy7G0work,32811 +botocore/data/appintegrations/2020-07-29/endpoint-rule-set-1.json.gz,sha256=VGSB6Hq3FjuKqP6TAqlLTrtJaRjU8jef9l3uhHT5sG4,1153 +botocore/data/appintegrations/2020-07-29/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/appintegrations/2020-07-29/paginators-1.json,sha256=BYTiBgFZxnU-sQgufFZqZnJtClnQxZqlwuhNGC6J1vw,1147 +botocore/data/appintegrations/2020-07-29/service-2.json.gz,sha256=IAYeDaz0RIvtnTLOIfdgatddwPYAHhCEI4DLn7WqEio,6981 +botocore/data/application-autoscaling/2016-02-06/endpoint-rule-set-1.json.gz,sha256=aTMnbQWypqHFG3OInHpibh-bSf1BfH_gtOoLloFMvwE,1244 +botocore/data/application-autoscaling/2016-02-06/examples-1.json,sha256=_IICzVD2rqZHmWHwRCsR313_WXRitdmWhlhDtSzomVE,8473 +botocore/data/application-autoscaling/2016-02-06/paginators-1.json,sha256=Yg5NHu8W50qc_r8JCtkNGMbKd861R4w8wQFdrbV0rR0,751 +botocore/data/application-autoscaling/2016-02-06/service-2.json.gz,sha256=-7P5nmt1-0a9z9CR0UfeBTj81B_FCp9xv2Xh8DvWoi4,24469 +botocore/data/application-insights/2018-11-25/endpoint-rule-set-1.json.gz,sha256=kZwy0iiXRK0ic6DqNTonxQKf9K5HyzWPsFFAS3zRC0w,1158 +botocore/data/application-insights/2018-11-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/application-insights/2018-11-25/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/application-insights/2018-11-25/service-2.json.gz,sha256=Kg2uFd5mGul_gImq38o0N3DpX7sbEMMHx7x20uahpng,12426 +botocore/data/application-signals/2024-04-15/endpoint-rule-set-1.json.gz,sha256=Zp5uW2bq_SOWgYB-fu8hWn2LkqLvHH2RxGKYIVmfGuc,840 +botocore/data/application-signals/2024-04-15/paginators-1.json,sha256=qxyHVSFC0Jy0F-_lae2jvxP4YeESvlp2me7mBB6INVY,1460 +botocore/data/application-signals/2024-04-15/paginators-1.sdk-extras.json,sha256=q7il5SmbvqgIkSGNSz9N91-QUBoPCGwaVGqozML7COM,952 +botocore/data/application-signals/2024-04-15/service-2.json.gz,sha256=u9xzFJDFt5rqAgAE6agJZxDhzzrsHmqFQ32JIi4l-ys,24454 +botocore/data/applicationcostprofiler/2020-09-10/endpoint-rule-set-1.json.gz,sha256=3JtFZ_lRhMRlPnyxwiEJ7pCi5JmrU1q_A6r6LqxsqcQ,1164 +botocore/data/applicationcostprofiler/2020-09-10/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/applicationcostprofiler/2020-09-10/paginators-1.json,sha256=2by8SKjvkqf2tkVd1NxlMiNsOoEUr6V3LekGj4k4yWg,205 +botocore/data/applicationcostprofiler/2020-09-10/service-2.json.gz,sha256=MFKNuglgwuvKi7g-lrGZ34mal_QGTEYbvRt7psZ34Es,2850 +botocore/data/appmesh/2018-10-01/endpoint-rule-set-1.json.gz,sha256=A_At1naTosEfk3OdFrHr86eI-8ESY9VGsLwAPW6pE7s,1289 +botocore/data/appmesh/2018-10-01/examples-1.json,sha256=IKnIAQr_hsb-b42MXo7jKoBKd1lTzVS0bsbWMSTIwg8,41 +botocore/data/appmesh/2018-10-01/paginators-1.json,sha256=-TPoHMW78DG37BJz5SNi67CsUIs4PTTccyUhlXtMBm4,665 +botocore/data/appmesh/2018-10-01/service-2.json.gz,sha256=WFlHGdiIysKgH9NhP_xEB06RMxM58Gbma7GnNlSShZk,7902 +botocore/data/appmesh/2019-01-25/endpoint-rule-set-1.json.gz,sha256=DKjcEkgHyDtxjjXnAJyGrTaFSAntDyku9l7odEPLbOI,1149 +botocore/data/appmesh/2019-01-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/appmesh/2019-01-25/paginators-1.json,sha256=z6PCEVS0COSk5Nf9KXgXsZ3I9gcq9whv7yonh8s1YMM,1334 +botocore/data/appmesh/2019-01-25/service-2.json.gz,sha256=HhluIk72DJltt6zygxl5VpuGP6PH_rYc6a9Jejv0Y40,23271 +botocore/data/apprunner/2020-05-15/endpoint-rule-set-1.json.gz,sha256=gykpX7sEAkqdDhzeCURame7sVHnDlRZNUOjfqit5xEI,1149 +botocore/data/apprunner/2020-05-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/apprunner/2020-05-15/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/apprunner/2020-05-15/service-2.json.gz,sha256=eOnoUhMq_WhNpjfOsDrLvn2SB2XX8o86jFmR3m8a-gk,19689 +botocore/data/appstream/2016-12-01/endpoint-rule-set-1.json.gz,sha256=Rwk1tRVTynjuK_R_9CzRmQgeKwlvo7MspO1ujTpPuPM,1242 +botocore/data/appstream/2016-12-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/appstream/2016-12-01/paginators-1.json,sha256=agUpesJpo5f8dC0kH0m0asMYWn4N4MUHae5LK8W0Hwo,1584 +botocore/data/appstream/2016-12-01/service-2.json.gz,sha256=xzOtEE4Nuke9fcYVhOLVtZX7Qh-Xs5GViyq_5zcmy-c,38678 +botocore/data/appstream/2016-12-01/waiters-2.json,sha256=XZ1LQBLoJ56YEhaTqi2Bs5XKhax6pr9LRsQVIo7kHck,1245 +botocore/data/appsync/2017-07-25/endpoint-rule-set-1.json.gz,sha256=E6ah1Urro_bUOHMkyh_iq_MknyHnZvGHWTNT5qpi7AM,1149 +botocore/data/appsync/2017-07-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/appsync/2017-07-25/paginators-1.json,sha256=dFnt5T5D3rtFs2xAFiCK0lj-5A8p_ZNysOMx2vA-vFo,2052 +botocore/data/appsync/2017-07-25/service-2.json.gz,sha256=SUrL7NjXzA3fIMTElTlKXoy1Lk1MDU157y8Uwi8xJws,31441 +botocore/data/arc-region-switch/2022-07-26/endpoint-rule-set-1.json.gz,sha256=bOKdTxr6NmKQMBs7bDwi_hNM4a_AM8nahl_hPSJn1XY,1320 +botocore/data/arc-region-switch/2022-07-26/paginators-1.json,sha256=0d1_BtM6AvPZ5SwAkVXkRgdmQxLicfTo7EQFZHyWCUM,1383 +botocore/data/arc-region-switch/2022-07-26/paginators-1.sdk-extras.json,sha256=oAPYMezaJ5qMLpj-HQ8hn5DR_KWiDk0Yd7upZBcFeQE,725 +botocore/data/arc-region-switch/2022-07-26/service-2.json.gz,sha256=wW9VpVqzY1xcoFJXBQxAjdNqTIBBcbwDBr8lB_Af7rk,12595 +botocore/data/arc-region-switch/2022-07-26/waiters-2.json,sha256=LCN4G74d_dY3Oed3NEBHvbMUvXZLg5Km0ioiJte6nW4,1499 +botocore/data/arc-zonal-shift/2022-10-30/endpoint-rule-set-1.json.gz,sha256=aVot6r7vJxdg19BpnwPALoI_rzZE9FZRYijwQsGU3cw,1305 +botocore/data/arc-zonal-shift/2022-10-30/paginators-1.json,sha256=wx99_DrI6RWKkZuUiP1HQ1xacRiIoUsgPuxVHGpvZGU,515 +botocore/data/arc-zonal-shift/2022-10-30/service-2.json.gz,sha256=lH8FJIbhc1e8M3jGJ9TmLEO_aBK4tJeta8f4G0NzupU,10676 +botocore/data/arc-zonal-shift/2022-10-30/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/artifact/2018-05-10/endpoint-rule-set-1.json.gz,sha256=4aCVrK1rcZP3zzbU_gP8bJGgJrzljOL2hXPiUnGEX54,1378 +botocore/data/artifact/2018-05-10/paginators-1.json,sha256=iEHcZhMQQtp5NM20fJVZJDialoOTzbsz03OSePilRxc,534 +botocore/data/artifact/2018-05-10/service-2.json.gz,sha256=yd3UkBM0DSYIaYO1QyUYhDLdRlctRKXO4IPcHMAlqns,3431 +botocore/data/artifact/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/athena/2017-05-18/endpoint-rule-set-1.json.gz,sha256=GD0XKC89C0dgk_nWGKIl00jVs6cysBYep0h6RsFAMlo,1147 +botocore/data/athena/2017-05-18/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/athena/2017-05-18/paginators-1.json,sha256=lLXYrCWDDFVhjAdFEhKyoc0-zEe2YYUM4nR9vXRBDgE,1330 +botocore/data/athena/2017-05-18/service-2.json.gz,sha256=JynFvN9qbl_FpHBlEGkoyL1rzsKEkXjCkKKOMFpMzEA,33502 +botocore/data/auditmanager/2017-07-25/endpoint-rule-set-1.json.gz,sha256=CsfjepvlYonuHqWCLpIFnjNDgMG83LcdtoP2fHKrTQQ,1152 +botocore/data/auditmanager/2017-07-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/auditmanager/2017-07-25/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/auditmanager/2017-07-25/service-2.json.gz,sha256=yOzI7oOYBL15zc_4xq4MSgKDQARvgw8Yp36slEqM2aY,27932 +botocore/data/autoscaling-plans/2018-01-06/endpoint-rule-set-1.json.gz,sha256=xQtiFN-0Dijv1dWS_Nn--i5-OerHZrcZM4afA24No_I,1239 +botocore/data/autoscaling-plans/2018-01-06/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/autoscaling-plans/2018-01-06/paginators-1.json,sha256=Au_RY0jJAvQZ-sAmZQk8FXYyrw1rDVD4YILlb6sDxh8,389 +botocore/data/autoscaling-plans/2018-01-06/service-2.json.gz,sha256=2kzcetXXAaF_kHWnam-IbmMBoyroO3vZ4XVd8c1tdws,9106 +botocore/data/autoscaling/2011-01-01/endpoint-rule-set-1.json.gz,sha256=JJwp_vcXRekAis6LMewEe4nQT7wkqi_9pJt9Adqnq9w,1236 +botocore/data/autoscaling/2011-01-01/examples-1.json,sha256=-VLit9j2MnCph5AkDejxys_Iqt3JaUweEkC1B0_37j4,54289 +botocore/data/autoscaling/2011-01-01/paginators-1.json,sha256=hM_o0QSb61rvEQvua3IVpSLBUVCEy2BcwdQv1D_wSXk,2033 +botocore/data/autoscaling/2011-01-01/paginators-1.sdk-extras.json,sha256=FWBD5vKeS-MHcMzdipl2xKN3ddQu81Dk19sMd_82lKs,177 +botocore/data/autoscaling/2011-01-01/service-2.json.gz,sha256=IVoQyhH5oIIDu9kpZ9oBfhZxbVLIEZYZrBMvxOHlJUQ,63462 +botocore/data/b2bi/2022-06-23/endpoint-rule-set-1.json.gz,sha256=jdSe9yMbA3akpaoLmdqXfC86sTDQFhL-OKZAkjf3e8w,1296 +botocore/data/b2bi/2022-06-23/paginators-1.json,sha256=7ttS6Z0bHTlax4HX4atDWB9qbLUxoE9OTzdYeT62jiE,697 +botocore/data/b2bi/2022-06-23/service-2.json.gz,sha256=f9Yy4UlBk8KBYoLAdFB-HeCRd71wNx43p1RmqR9N84Q,19765 +botocore/data/b2bi/2022-06-23/waiters-2.json,sha256=QMq6U9zbKkK3L-Tn-wU5690g011Rd4U2AUe93h__Arg,451 +botocore/data/backup-gateway/2021-01-01/endpoint-rule-set-1.json.gz,sha256=w5aohEjf1epnsL5tR71gk4ubEI4IJ31VoPFL-ZV22qk,1154 +botocore/data/backup-gateway/2021-01-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/backup-gateway/2021-01-01/paginators-1.json,sha256=SBncJ16jo9My_HRd-t9A7KPTxlId0ZP7A9JGuJ8tsiA,531 +botocore/data/backup-gateway/2021-01-01/service-2.json.gz,sha256=IuXwfNw6GKR9lyYwKZKrJK972R5KFh2aZJ4-2zTxwig,7373 +botocore/data/backup/2018-11-15/endpoint-rule-set-1.json.gz,sha256=gMt-yD02Rkbw9Py7rnTMppWBGMK78XV9QCH-HlJ_hgE,1147 +botocore/data/backup/2018-11-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/backup/2018-11-15/paginators-1.json,sha256=Xhdz-lLlNpgVP7ul4OdioPS7yLLUZxH1kENkr3Xblys,3976 +botocore/data/backup/2018-11-15/paginators-1.sdk-extras.json,sha256=5L_0CmNaoKhsX9pvnJgxZ31pDmYLjQF2BoJhpyhUuws,208 +botocore/data/backup/2018-11-15/service-2.json.gz,sha256=p089iuIJnfXJzWAKXVO1xyOFKAfMl8sbQZS4H0yBazg,63845 +botocore/data/backupsearch/2018-05-10/endpoint-rule-set-1.json.gz,sha256=fXuBdm_LdnVdN-YBz8JNtz_rDErIMS91aDerxmR6KvY,837 +botocore/data/backupsearch/2018-05-10/paginators-1.json,sha256=yHP69vTZn8UYtEwENmKyNQAb4VJ4_dvo2jF_qze7r4Y,707 +botocore/data/backupsearch/2018-05-10/service-2.json.gz,sha256=sM4MZ3ikQF21_SVMQRidZ_9mnjRr-04kTDRbx1Mi9wA,7506 +botocore/data/backupsearch/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/batch/2016-08-10/endpoint-rule-set-1.json.gz,sha256=i1SQk5jtlLGrVyyNserm3Aw3BHj_9hXUl8MFBIQCDAo,1266 +botocore/data/batch/2016-08-10/examples-1.json,sha256=OVGvwREzgw_LYc8FpiMwLMNKVBoPq2uadWkT4icK_aM,20292 +botocore/data/batch/2016-08-10/paginators-1.json,sha256=ZyYVhJ5W3ovu-L9f7gBaRB4t47W8jwftypzcghD1FCs,1622 +botocore/data/batch/2016-08-10/service-2.json.gz,sha256=MD3sjwJC29jIGeSEy1cpZ0pTIDNr6m72Yd72vjf0Pxs,61870 +botocore/data/bcm-dashboards/2025-08-18/endpoint-rule-set-1.json.gz,sha256=7KaSd-MOgB65MfzGzXBVWWcwPP4Z09VPS9mfIrK3ot0,903 +botocore/data/bcm-dashboards/2025-08-18/paginators-1.json,sha256=psuO6Y_5J65x5ln4VJUFBbkyJ6QpacclUKZ0vCybvOY,191 +botocore/data/bcm-dashboards/2025-08-18/service-2.json.gz,sha256=3KoeLQgYso3bXICyiu6iDhq5y3w5rgvSOnd5bzJJjps,6191 +botocore/data/bcm-dashboards/2025-08-18/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/bcm-data-exports/2023-11-26/endpoint-rule-set-1.json.gz,sha256=YvmQqCBL6XMtI7ekvha6CrINig2AYKpkuxftf6zW7rU,1286 +botocore/data/bcm-data-exports/2023-11-26/paginators-1.json,sha256=O6FqSUDC5izLwZBKGsqYvMoy2ROOd85-Hb7II57VJoY,509 +botocore/data/bcm-data-exports/2023-11-26/service-2.json.gz,sha256=VjK_lZA3dghD3f_0VMNEQaJzdJXAf8l-wQhnvVPY7tA,5122 +botocore/data/bcm-pricing-calculator/2024-06-19/endpoint-rule-set-1.json.gz,sha256=Sg1Wybj8gFwpkTo9pZRLjckoGtlrVWz0_uGhRSFokYE,908 +botocore/data/bcm-pricing-calculator/2024-06-19/paginators-1.json,sha256=2_8le9lK1cG14GXeXUw1W0WMTbfb0tRtdFstFj36z9U,1783 +botocore/data/bcm-pricing-calculator/2024-06-19/service-2.json.gz,sha256=WONJd5ItqigN8rQszDkeNXnNTCeAFv243QKEqOc6Ieo,13412 +botocore/data/bcm-pricing-calculator/2024-06-19/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/bcm-recommended-actions/2024-11-14/endpoint-rule-set-1.json.gz,sha256=v7t21lg8QNlDwplwVuieq3kIBN7ibkEvyQyy5yExYJ0,909 +botocore/data/bcm-recommended-actions/2024-11-14/paginators-1.json,sha256=IVWKoMWh9816owS8FS9WRNbQSsxNHHf2zdJs9WlFSZc,207 +botocore/data/bcm-recommended-actions/2024-11-14/service-2.json.gz,sha256=E5eRRNw2c3WTQ3VdAqqNGfT8dFy8QoTtuuw2EIYthOU,2575 +botocore/data/bcm-recommended-actions/2024-11-14/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/bedrock-agent-runtime/2023-07-26/endpoint-rule-set-1.json.gz,sha256=a2iUdiVpMDhkqpM63Z4EEh-TbCF2QoFW2xIGzBZKlDA,1309 +botocore/data/bedrock-agent-runtime/2023-07-26/paginators-1.json,sha256=C9qRMLkjskdop4L8GtJ_lTOeTAbdbUZxrB96tbMXE54,1344 +botocore/data/bedrock-agent-runtime/2023-07-26/paginators-1.sdk-extras.json,sha256=pTkYapptXTqJqkdaW-BtMU5clcvuIMpBrZsT72LGBsg,163 +botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json.gz,sha256=Au2UziVYbAvamaxC6JCRR9NmP0Z1TBjT5D_AmcKByFg,50680 +botocore/data/bedrock-agent-runtime/2023-07-26/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/bedrock-agent/2023-06-05/endpoint-rule-set-1.json.gz,sha256=J4hIuergQp7x65Z8xQDcVLjQzqtXB8kN-35XesjuwPg,1303 +botocore/data/bedrock-agent/2023-06-05/paginators-1.json,sha256=S_V0LwGpy2m_rxV0kBxgSTQCJCkQMHQImSKu2g2FSBU,2519 +botocore/data/bedrock-agent/2023-06-05/service-2.json.gz,sha256=_D3QMvZS8Ec8L4d6nbOEdmqN3Kxhm5x3NKNfrpTc9B0,58823 +botocore/data/bedrock-agent/2023-06-05/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/bedrock-agentcore-control/2023-06-05/endpoint-rule-set-1.json.gz,sha256=Rm-2K2bXpo6LxDg5rYMYp-DHHtUwI2GtWsMAHKN4xLo,1312 +botocore/data/bedrock-agentcore-control/2023-06-05/paginators-1.json,sha256=mSXMmHW6VJPKJXI1hiV_wcd41sCOT7BVvwnR0tvH0Y4,3025 +botocore/data/bedrock-agentcore-control/2023-06-05/service-2.json.gz,sha256=YmL6yyoSxSr-SlTgYndCPhdopihY1O4GV6nl6jnsTYY,51693 +botocore/data/bedrock-agentcore-control/2023-06-05/waiters-2.json,sha256=ltaYPqrbtAE5fUL4n-cMigpOpoK6A5YGKpaJ5X4lFlc,3961 +botocore/data/bedrock-agentcore/2024-02-28/endpoint-rule-set-1.json.gz,sha256=QzR-YP6XLboeYE8l1Y4amFUrhFmcP9VxtwuI3sEM35I,1306 +botocore/data/bedrock-agentcore/2024-02-28/paginators-1.json,sha256=wu2SqNrrkziC33euLpNROY2iauMnsx8mRK6s3whmAjY,1053 +botocore/data/bedrock-agentcore/2024-02-28/service-2.json.gz,sha256=YfA-XXYQVHS8uRbH7Okp8nb1Yk4XEmCuZk14WSQfnOM,22140 +botocore/data/bedrock-agentcore/2024-02-28/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/bedrock-data-automation-runtime/2024-06-13/endpoint-rule-set-1.json.gz,sha256=gkHwu8WrUxAZT_WOAePcstpZOJBhHjaoEMpqtrU-lLw,1317 +botocore/data/bedrock-data-automation-runtime/2024-06-13/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/bedrock-data-automation-runtime/2024-06-13/service-2.json.gz,sha256=SfUdqMf_qDQpGc9T_drYzSG3Oy3hJ_5WF4eatQbK7jE,3509 +botocore/data/bedrock-data-automation/2023-07-26/endpoint-rule-set-1.json.gz,sha256=lYr4I4qg6YB-n1_xVkns-bP8vjYh1eI406mDhbJI57k,1310 +botocore/data/bedrock-data-automation/2023-07-26/paginators-1.json,sha256=ws4Quiv4FeVskwu7oEHreNPkY5Qyvc_6E7p2lzFrzZM,367 +botocore/data/bedrock-data-automation/2023-07-26/service-2.json.gz,sha256=JZ6ZQPQzitRx--RZ-aUzIgRIojUAtLrdMVN-SAd4M-g,7262 +botocore/data/bedrock-runtime/2023-09-30/endpoint-rule-set-1.json.gz,sha256=33ghsd9JCQ9HjzPvb-4--7fkysJ03Nu0jNEG5lIoTds,1305 +botocore/data/bedrock-runtime/2023-09-30/paginators-1.json,sha256=f2V5o6U1eaWP23rP4Qybme3wfj71oUfX79uvhvsR-T0,203 +botocore/data/bedrock-runtime/2023-09-30/service-2.json.gz,sha256=FEPhtnKjir-gL8OSaxhF3SjTNPggdHJx6NPLiUTNh2o,27119 +botocore/data/bedrock-runtime/2023-09-30/waiters-2.json,sha256=tj1ZnaqhwmJkUEQlwH7wm1SqY3lg1BvZDfzfPaIgNrY,38 +botocore/data/bedrock/2023-04-20/endpoint-rule-set-1.json.gz,sha256=RN1ykdz5HSUMYF7BcymtbtGeOX5X9EwvKg5j08qLQfw,1298 +botocore/data/bedrock/2023-04-20/paginators-1.json,sha256=ghGmg8k5S2wPf-tl9s2WxbYILaciNaLilnPpBh541xE,3387 +botocore/data/bedrock/2023-04-20/service-2.json.gz,sha256=r2rL6M1r2gWZDCi3c9-_S0uXZ8IGtRfRrnIuLswf9Gs,76287 +botocore/data/bedrock/2023-04-20/waiters-2.json,sha256=tj1ZnaqhwmJkUEQlwH7wm1SqY3lg1BvZDfzfPaIgNrY,38 +botocore/data/billing/2023-09-07/endpoint-rule-set-1.json.gz,sha256=YqB1P_P6LuXLSN7PEMwm4aR3b3pk2Y3uaYYg0eSROGc,1538 +botocore/data/billing/2023-09-07/paginators-1.json,sha256=VVwgGWADfSrHSZlpYtjAy0BSdOZkNLuemV3vr2HZP2M,377 +botocore/data/billing/2023-09-07/service-2.json.gz,sha256=enArbDhZd91b1W6Iq1l-llwM0TaDlCc0H8h941RHs5M,5564 +botocore/data/billing/2023-09-07/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/billingconductor/2021-07-30/endpoint-rule-set-1.json.gz,sha256=z8jbO4EDel67cLI84oK8WA3hFBjl9t3mCscka4cTfKg,1311 +botocore/data/billingconductor/2021-07-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/billingconductor/2021-07-30/paginators-1.json,sha256=C1lDM7aIG0KK8L7HotZs6eXvTQLuxzETH2wAHQdDzqI,2192 +botocore/data/billingconductor/2021-07-30/service-2.json.gz,sha256=7pJsYCQRlviyohLrHRu0GgbKK3odCpIJjzLnZyh7PZY,16405 +botocore/data/billingconductor/2021-07-30/waiters-2.json,sha256=sAGuGxokCpXh7GUF-AzqqNR6DLDE-wgRMhjNJb41AHc,36 +botocore/data/braket/2019-09-01/endpoint-rule-set-1.json.gz,sha256=Mfc-I0ltaGYT6bwzO2OimCPoOYBjUir9dB9TiY8Vf2E,1147 +botocore/data/braket/2019-09-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/braket/2019-09-01/paginators-1.json,sha256=7oQATkyhA3sCLqboUPW6D7XGmWs8prCwyfxvjbvFu8c,691 +botocore/data/braket/2019-09-01/service-2.json.gz,sha256=mgIb0VrYPcIs-pWWG_qf1x51JkH_-IksHl5ZpJHCS_Y,11429 +botocore/data/budgets/2016-10-20/endpoint-rule-set-1.json.gz,sha256=hd9U0SvepoJkEMOTU3A-OeV9nVUdvBxDL8enLBKA2Ys,1791 +botocore/data/budgets/2016-10-20/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/budgets/2016-10-20/paginators-1.json,sha256=4lIRhlnV70H90OPO79aAX2cps42vBAkZcxWDSS40zis,1512 +botocore/data/budgets/2016-10-20/service-2.json.gz,sha256=8NWFGYb5pA57xEXKpd16Kvwe5bRmHNGz-kU_5E-lhhs,14621 +botocore/data/ce/2017-10-25/endpoint-rule-set-1.json.gz,sha256=rF9K_GiVHNwvtoeCccTLgxRiLpNP-D31iBdHWIsLCUc,1937 +botocore/data/ce/2017-10-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/ce/2017-10-25/paginators-1.json,sha256=Ky9BBLdDrEphFKJFV-wK5EN9nalaUevMp1k_V3f_1Xw,2558 +botocore/data/ce/2017-10-25/paginators-1.sdk-extras.json,sha256=PtomDOLRX3ei-nf22GLErTLGutc5yPaMxM3vHtgo0m0,462 +botocore/data/ce/2017-10-25/service-2.json.gz,sha256=1jsBuM_YzbSzfoD2eQzGlguRzZAGCdiW0aYkTag8Qxs,43836 +botocore/data/chatbot/2017-10-11/endpoint-rule-set-1.json.gz,sha256=j8GbcsQhD6ozZSMLU_GdXn_uotmfe0V0v0hCOowHCSw,1295 +botocore/data/chatbot/2017-10-11/paginators-1.json,sha256=dyTPHZL8UEdw0bi3HPFXTYPfk9gRppvCy5ZRh2Vmysw,1723 +botocore/data/chatbot/2017-10-11/service-2.json.gz,sha256=n4mgQAmWIKLDYe5dlK_mwGyj1dWtzCAZQd3uQ1b2cto,10732 +botocore/data/chime-sdk-identity/2021-04-20/endpoint-rule-set-1.json.gz,sha256=TcRyvFeLJnr91pvZXxDjznxW8V111v4T3wPSUaREHJU,1154 +botocore/data/chime-sdk-identity/2021-04-20/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/chime-sdk-identity/2021-04-20/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/chime-sdk-identity/2021-04-20/service-2.json.gz,sha256=l-x256u85A0bt_JPAUurKBe7g8GzfPrr7SGgtofx0fI,8137 +botocore/data/chime-sdk-media-pipelines/2021-07-15/endpoint-rule-set-1.json.gz,sha256=783fk7heJJoMCyZqee1AOf7kVG3IdyLMkLXU7bA9IVI,1158 +botocore/data/chime-sdk-media-pipelines/2021-07-15/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/chime-sdk-media-pipelines/2021-07-15/service-2.json.gz,sha256=qKS_QHF60OdDoUuUmHdlyMQkZ3ALAUoDaiOUZsGWHBI,18799 +botocore/data/chime-sdk-meetings/2021-07-15/endpoint-rule-set-1.json.gz,sha256=tz6hl1y8zaOE5zdvxBJtMsRBtb2EnBklIceJ0o85S_0,1154 +botocore/data/chime-sdk-meetings/2021-07-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/chime-sdk-meetings/2021-07-15/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/chime-sdk-meetings/2021-07-15/service-2.json.gz,sha256=PSv74AT-qq2ebcvk1KwZUOYlcOq0kJ0E5GfGBQ8Qyec,11608 +botocore/data/chime-sdk-messaging/2021-05-15/endpoint-rule-set-1.json.gz,sha256=uWKG7dNmtPnYHZNslE4CtiglOUrahh10o8CsKCtnExY,1154 +botocore/data/chime-sdk-messaging/2021-05-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/chime-sdk-messaging/2021-05-15/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/chime-sdk-messaging/2021-05-15/service-2.json.gz,sha256=WRTxX55wUsFI2TRLBohpNQXimfxo-RZ_cqMe08vMAvI,16422 +botocore/data/chime-sdk-voice/2022-08-03/endpoint-rule-set-1.json.gz,sha256=KqVTpAEGtXOunoLuUGtSIfV5-qEAguPa4J5eNzJAzjI,1301 +botocore/data/chime-sdk-voice/2022-08-03/paginators-1.json,sha256=28096cSFWwRSuJQMmk9A3HNyMAH8wFdjz3F_5pukB8Q,373 +botocore/data/chime-sdk-voice/2022-08-03/service-2.json.gz,sha256=3zQWZuXQLW_juxN5J6wh0Q8jsdhAUmB_FTr1p4zqA6g,23793 +botocore/data/chime/2018-05-01/endpoint-rule-set-1.json.gz,sha256=kfpt6mWB1X9dH4wCtYVbWh9N0bEZ0_1n-Jqrzk7gGJM,1303 +botocore/data/chime/2018-05-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/chime/2018-05-01/paginators-1.json,sha256=eU07vcRnjLd-9RmN_aGGPffN0ZXkpMRFYD_XbcyDy3A,343 +botocore/data/chime/2018-05-01/service-2.json.gz,sha256=oOHqemYHdHiZacergks_P3QYiIQmxsmTsxfFAw0Jiew,15342 +botocore/data/cleanrooms/2022-02-17/endpoint-rule-set-1.json.gz,sha256=Jl6B0fMrIQZamqCEVvIAQtQerWX2yBYcVuFUQW6JBx0,1300 +botocore/data/cleanrooms/2022-02-17/paginators-1.json,sha256=iPjb26ef_F3feCu4c4D3ES2inGWlFEp3pEqOkg9sp3E,3964 +botocore/data/cleanrooms/2022-02-17/service-2.json.gz,sha256=MKZkN0bkPQTmR3fY7Fs3biUiGES45OMWU4hJjsxOL20,46104 +botocore/data/cleanrooms/2022-02-17/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/cleanroomsml/2023-09-06/endpoint-rule-set-1.json.gz,sha256=Y_rHd9gAwT7VLfO9kZ-jF2iJrMwre4SlEydIMIdZIGw,1302 +botocore/data/cleanroomsml/2023-09-06/paginators-1.json,sha256=-tYvoBkilgzoays9JIwkGktzi_n9C3PGlVvsTol8n7o,3170 +botocore/data/cleanroomsml/2023-09-06/service-2.json.gz,sha256=J9BWlt7Y0PQfBVrC8cW4ATDQwpAlZbuHr1WlOcCIMW0,30400 +botocore/data/cleanroomsml/2023-09-06/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/cloud9/2017-09-23/endpoint-rule-set-1.json.gz,sha256=rWDwtO3ZCIGDPM1NgVmXgQbkvMcrnlCA_B4plYDmZ-A,1149 +botocore/data/cloud9/2017-09-23/examples-1.json,sha256=Jbbei88MR8S4MFnfmPKNTEk_b1NdqqM5R6P781A23JY,9183 +botocore/data/cloud9/2017-09-23/paginators-1.json,sha256=lET7E3FWErLA8In260otKfr3_9oVSr5OTO1zcrBi28w,380 +botocore/data/cloud9/2017-09-23/service-2.json.gz,sha256=sH6Tkfv-fMu-kbTFKcNNyRd5E6z8XtvoyD8ZTlk5GFw,6083 +botocore/data/cloudcontrol/2021-09-30/endpoint-rule-set-1.json.gz,sha256=CPDdkKaLt11TOSirlWRVMF-VjV1ACobP24g64xrrrIM,1154 +botocore/data/cloudcontrol/2021-09-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudcontrol/2021-09-30/paginators-1.json,sha256=Xh6wJghPx6VpGNTTEdpRQIsrJuVeyY5FQNpNLpUdkhc,392 +botocore/data/cloudcontrol/2021-09-30/paginators-1.sdk-extras.json,sha256=9NbQ8xHg5ztdpvYFDl15_74F30ZNPFnSFDxismgvSMg,143 +botocore/data/cloudcontrol/2021-09-30/service-2.json.gz,sha256=q6JKD7nElqxON8e5SVlRFJfMp2BozmMKBQJe0vMt2yk,6492 +botocore/data/cloudcontrol/2021-09-30/waiters-2.json,sha256=US_tyuvbMcXS6IrVB8D817Gg3pGKdCuooDJKz4Ta56U,738 +botocore/data/clouddirectory/2016-05-10/endpoint-rule-set-1.json.gz,sha256=OkV0tPt2muqOtCmXfLsIFzEg2UO6M5oO-Txuph5cZmg,1398 +botocore/data/clouddirectory/2016-05-10/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/clouddirectory/2016-05-10/paginators-1.json,sha256=y8GPuHURJmdagJ3QAI5mxkAzKvdCZwcnfYt3Z-qwgAU,2808 +botocore/data/clouddirectory/2016-05-10/service-2.json.gz,sha256=_OXZ9G2rDdaIwVlX1xy-fVANukjpjKY2HcY3SFg0CRg,22958 +botocore/data/clouddirectory/2017-01-11/endpoint-rule-set-1.json.gz,sha256=Af6J9gkfEtroh08qnhlCGsBVFGd5luYErBd2Se5yKx4,1239 +botocore/data/clouddirectory/2017-01-11/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/clouddirectory/2017-01-11/paginators-1.json,sha256=gIqmeqo-8lsyEDEVDFvc1RJfd0T7c9xN6SdMnxGvSpw,3342 +botocore/data/clouddirectory/2017-01-11/service-2.json.gz,sha256=RLvXKCmO9OgJQonENTQSSSTM5TO3JwnYKLseP_uKdHU,23910 +botocore/data/cloudformation/2010-05-15/endpoint-rule-set-1.json.gz,sha256=BhpXSK10au6hIRFplZzv6aKPWSZ0gvzxN8NDDyiDMTY,1237 +botocore/data/cloudformation/2010-05-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudformation/2010-05-15/paginators-1.json,sha256=wGVn2mkDjwzpNruHooXuaGh29r875VHHC6yO_KAK8ys,3878 +botocore/data/cloudformation/2010-05-15/service-2.json.gz,sha256=W814A__5UaIRC8HPpacmuI6Njw16t99r-Un-yBJp6mk,85186 +botocore/data/cloudformation/2010-05-15/waiters-2.json,sha256=0fp18QpYZmIii-mio63vhFFVyREj4UBFHZbi-F2o4cE,11014 +botocore/data/cloudfront-keyvaluestore/2022-07-26/endpoint-rule-set-1.json.gz,sha256=Y4WVlTjX9gz3VrAbHSUVM3Mu53jRlRMb3NPc8ElaNlo,2209 +botocore/data/cloudfront-keyvaluestore/2022-07-26/paginators-1.json,sha256=2wyrpgvniacM8xlFDnHQiCR0KVEAVJxBEpWFBcrB4Z0,180 +botocore/data/cloudfront-keyvaluestore/2022-07-26/service-2.json.gz,sha256=nyCbznJBWwEAYWciBorw8NYUUsjRgxFO8SKdCwtw9bo,2220 +botocore/data/cloudfront/2014-05-31/endpoint-rule-set-1.json.gz,sha256=Gdd4IXS8QGmZhl4ZnByixnQPM9ch-CYa5JcPDhYoie4,1839 +botocore/data/cloudfront/2014-05-31/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2014-05-31/service-2.json.gz,sha256=expqsf7MGR41bfmQcVTfmGvwSIuIjlA_fp8LpbOJoH4,15298 +botocore/data/cloudfront/2014-05-31/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2014-10-21/endpoint-rule-set-1.json.gz,sha256=Gdd4IXS8QGmZhl4ZnByixnQPM9ch-CYa5JcPDhYoie4,1839 +botocore/data/cloudfront/2014-10-21/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2014-10-21/service-2.json.gz,sha256=i2J-8zRU7zC7fiSFbjz4InqNV7ljoO1id9HBe3hiXjo,15887 +botocore/data/cloudfront/2014-10-21/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2014-11-06/endpoint-rule-set-1.json.gz,sha256=Gdd4IXS8QGmZhl4ZnByixnQPM9ch-CYa5JcPDhYoie4,1839 +botocore/data/cloudfront/2014-11-06/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2014-11-06/service-2.json.gz,sha256=L77RCZcC3kJBmdF_mT8UHBczkHnYma2Iu545fw3EUGs,15959 +botocore/data/cloudfront/2014-11-06/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2015-04-17/endpoint-rule-set-1.json.gz,sha256=Gdd4IXS8QGmZhl4ZnByixnQPM9ch-CYa5JcPDhYoie4,1839 +botocore/data/cloudfront/2015-04-17/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2015-04-17/service-2.json.gz,sha256=_xOvtUT3HyOVFAp0THuzL_SEQwl2W0VfgYF0y5pMhCU,16213 +botocore/data/cloudfront/2015-04-17/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2015-07-27/endpoint-rule-set-1.json.gz,sha256=Gdd4IXS8QGmZhl4ZnByixnQPM9ch-CYa5JcPDhYoie4,1839 +botocore/data/cloudfront/2015-07-27/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2015-07-27/service-2.json.gz,sha256=KOFNPDEcoDd_64UPDcRVGH-lvjLmlzlg2LpFHCG8A3E,16702 +botocore/data/cloudfront/2015-07-27/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2015-09-17/endpoint-rule-set-1.json.gz,sha256=Gdd4IXS8QGmZhl4ZnByixnQPM9ch-CYa5JcPDhYoie4,1839 +botocore/data/cloudfront/2015-09-17/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2015-09-17/service-2.json.gz,sha256=tsyEb3dJGhoO217pvSK5KeVjBO_bqPPaBUvm6-nyjp8,15890 +botocore/data/cloudfront/2015-09-17/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2016-01-13/endpoint-rule-set-1.json.gz,sha256=Gdd4IXS8QGmZhl4ZnByixnQPM9ch-CYa5JcPDhYoie4,1839 +botocore/data/cloudfront/2016-01-13/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2016-01-13/service-2.json.gz,sha256=JNKgoaSCnaq75xLGUuvCoI5XJ0ak9EXgJ3CVbT-I8-g,16358 +botocore/data/cloudfront/2016-01-13/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2016-01-28/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2016-01-28/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudfront/2016-01-28/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2016-01-28/service-2.json.gz,sha256=B_UKLAnlqS0Ss50lbX0HJK2UChqVz-ulTanuxTkyjTA,16279 +botocore/data/cloudfront/2016-01-28/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2016-08-01/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2016-08-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudfront/2016-08-01/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2016-08-01/service-2.json.gz,sha256=7suXpxs3LiJ4fJmgT1Bem9UF4-xFgpfUBQl8ux1dj2Q,17725 +botocore/data/cloudfront/2016-08-01/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2016-08-20/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2016-08-20/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2016-08-20/service-2.json.gz,sha256=Ie3LJqHwlMYMCnF85OuMM64MHblraW4DoKMZ1blC8WQ,18123 +botocore/data/cloudfront/2016-08-20/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2016-09-07/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2016-09-07/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudfront/2016-09-07/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2016-09-07/service-2.json.gz,sha256=Bn2gtP2pjheO5h_AukBzgZVMToYx_RueN1956abN5zY,18444 +botocore/data/cloudfront/2016-09-07/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2016-09-29/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2016-09-29/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudfront/2016-09-29/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2016-09-29/service-2.json.gz,sha256=4sdF6OVqIfbxWuzVMY2Xy5A0SafInZ3RRpZii_CBA3E,27522 +botocore/data/cloudfront/2016-09-29/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2016-11-25/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2016-11-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudfront/2016-11-25/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2016-11-25/service-2.json.gz,sha256=dwlI3rkE1MdRP3TQNFLCTCSjEOc0555dc6gndZBU10M,27955 +botocore/data/cloudfront/2016-11-25/waiters-2.json,sha256=jzREqDxfIg2KbmPYOmDoYgDvy8mWAEK0w_NmEoCqhHI,1184 +botocore/data/cloudfront/2017-03-25/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2017-03-25/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2017-03-25/service-2.json.gz,sha256=7AiCq0TrGMKo7CZvaN_7NA9v2QNr_7RszEuWXkABImg,29088 +botocore/data/cloudfront/2017-03-25/waiters-2.json,sha256=JboqzXjlni8p-wiVKBz1jRj-mFpkryqueCgI1hD7WPA,1184 +botocore/data/cloudfront/2017-10-30/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2017-10-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudfront/2017-10-30/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2017-10-30/service-2.json.gz,sha256=mcDORzm4IC1xqxvO8105oZa3orYielQLD1GGCCe1htk,34767 +botocore/data/cloudfront/2017-10-30/waiters-2.json,sha256=JboqzXjlni8p-wiVKBz1jRj-mFpkryqueCgI1hD7WPA,1184 +botocore/data/cloudfront/2018-06-18/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2018-06-18/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudfront/2018-06-18/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2018-06-18/service-2.json.gz,sha256=iB8Vr5HTLohPU-eGmX_z9shNHhU8-Z-g_6vK9hSYivY,35482 +botocore/data/cloudfront/2018-06-18/waiters-2.json,sha256=JboqzXjlni8p-wiVKBz1jRj-mFpkryqueCgI1hD7WPA,1184 +botocore/data/cloudfront/2018-11-05/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2018-11-05/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudfront/2018-11-05/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2018-11-05/service-2.json.gz,sha256=ycQe_g_vnixui5-V9yKZ_aRvTSJyAp0qM4YgIMx5dUg,36144 +botocore/data/cloudfront/2018-11-05/waiters-2.json,sha256=JboqzXjlni8p-wiVKBz1jRj-mFpkryqueCgI1hD7WPA,1184 +botocore/data/cloudfront/2019-03-26/endpoint-rule-set-1.json.gz,sha256=kbUXNFfqyJ2dE73xx1Q5OZGWwOSq0B6LG8a9jI4iv_M,1574 +botocore/data/cloudfront/2019-03-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudfront/2019-03-26/paginators-1.json,sha256=I7u4h1MFflBvFJemcrLHSn7uOrEeDFc7ecWGqwDxGF8,1126 +botocore/data/cloudfront/2019-03-26/service-2.json.gz,sha256=wHlYOJMmNKkFbRXgNqZ5OOLVfWMkhe2NjXiid_mzHNQ,37652 +botocore/data/cloudfront/2019-03-26/waiters-2.json,sha256=qt7oBhQ-B52-397Q88q0EJoFpDWuOZM7CZpaFhX1xgM,1184 +botocore/data/cloudfront/2020-05-31/endpoint-rule-set-1.json.gz,sha256=p6jzbuc6E53v1wQ0ci3WiVwm-Y5yitwrSEvr_HthRcQ,1657 +botocore/data/cloudfront/2020-05-31/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudfront/2020-05-31/paginators-1.json,sha256=ASaoIzYBXx4dR4prrVpgqLWxB-zUZvsQjdqkdGbsstY,3642 +botocore/data/cloudfront/2020-05-31/service-2.json.gz,sha256=k2AyzdrfkSk-qOyQNl5Nb-clzBXL-nROXpnCrhKJKx8,89991 +botocore/data/cloudfront/2020-05-31/waiters-2.json,sha256=EBQKgBvTcueQ7pUpl3XfCyr-KY39mO_EedGvn21OpZg,1568 +botocore/data/cloudhsm/2014-05-30/endpoint-rule-set-1.json.gz,sha256=O1YKAtzXkx0M0vocSSMrjwkx8ks4l-1PL92mRfGzvAA,1150 +botocore/data/cloudhsm/2014-05-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudhsm/2014-05-30/paginators-1.json,sha256=pe-X06JkfqlENEk-25nE_w_q3QQXkdMnQ5cOG1NPi6E,409 +botocore/data/cloudhsm/2014-05-30/service-2.json.gz,sha256=tKxiFHK8wptbtE9PgUHxO5hDropevU_4WY0ULf6SDIw,5581 +botocore/data/cloudhsmv2/2017-04-28/endpoint-rule-set-1.json.gz,sha256=rpe2Ez6Ldlf0KfHdspt16-rNspVw334Jk4YHh2tydGM,1242 +botocore/data/cloudhsmv2/2017-04-28/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudhsmv2/2017-04-28/paginators-1.json,sha256=VvCnjrdoGz3Lb-gi5YSOAhhAHzB50i0vIks0GaA2nS0,512 +botocore/data/cloudhsmv2/2017-04-28/service-2.json.gz,sha256=JwSFyOWHSWrMFB4nfOWOyqDT1fifh4SX55i4e_G4PDk,8056 +botocore/data/cloudsearch/2011-02-01/endpoint-rule-set-1.json.gz,sha256=cF4pcx8nu_D-JadD-JFf3qspfLidRsMyO-oCACDdhGE,1149 +botocore/data/cloudsearch/2011-02-01/service-2.json.gz,sha256=qasy5XNgQsJbkuCKeKhan6mp9SsLOXY5LxbN0QS7tSA,9599 +botocore/data/cloudsearch/2013-01-01/endpoint-rule-set-1.json.gz,sha256=QsQ51uT-uYpsVQg05MzcYD-xCfEIVgSViemMtNrgFwI,1151 +botocore/data/cloudsearch/2013-01-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudsearch/2013-01-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/cloudsearch/2013-01-01/service-2.json.gz,sha256=KkPTyxCwmYRn59o-sZ3nL7kRnohyM40DUDNLiZgj7r8,12107 +botocore/data/cloudsearchdomain/2013-01-01/endpoint-rule-set-1.json.gz,sha256=FnTvoXYRY4Qy4e59cGj1Q_WXVtAT3gQE8XMnv6fH_MI,1155 +botocore/data/cloudsearchdomain/2013-01-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudsearchdomain/2013-01-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/cloudsearchdomain/2013-01-01/service-2.json.gz,sha256=xHmdMgPoEudKiiGNGL5LsRpUPR85b8N_Zxpmc2YJxeQ,9123 +botocore/data/cloudtrail-data/2021-08-11/endpoint-rule-set-1.json.gz,sha256=V64gBIsna9aZehMGSaMPceJt8feOaQlB3G9nOw9oea4,1304 +botocore/data/cloudtrail-data/2021-08-11/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/cloudtrail-data/2021-08-11/service-2.json.gz,sha256=xGNfQAD40Qf4VrP3x7sL1t5mplwnJ_k-xoi8iNfKhZg,2165 +botocore/data/cloudtrail/2013-11-01/endpoint-rule-set-1.json.gz,sha256=st54TED8EV39b_MkZFZv0r4Yyaqz3qpMr6YnT1qEBFk,1232 +botocore/data/cloudtrail/2013-11-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudtrail/2013-11-01/paginators-1.json,sha256=o6vvum0HuZOUnwcmSutAt1JcJBwzSpHooZx75aRRQc0,1070 +botocore/data/cloudtrail/2013-11-01/service-2.json.gz,sha256=LS1b_4C7NRujmYm9oGAXYF4ybKj5SdpFMf6SqyjdClU,45143 +botocore/data/cloudwatch/2010-08-01/endpoint-rule-set-1.json.gz,sha256=YLt8kqa_limijmrrtxAq4UcCMVly45YwnBi5AeT0wMw,1415 +botocore/data/cloudwatch/2010-08-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cloudwatch/2010-08-01/paginators-1.json,sha256=OfAocfP12RM8pfP6Fh2EUikcL00nN2vRMCW3O4wsjHo,1122 +botocore/data/cloudwatch/2010-08-01/service-2.json.gz,sha256=OU1kiNS00qKIKCN3RjCqVXdRIlGZiO8HiRSIdxGEHjw,41274 +botocore/data/cloudwatch/2010-08-01/waiters-2.json,sha256=MloXSzqs1ZkzyWAP2NrkVyNkIE63Hbk24II7PCuUxl0,644 +botocore/data/codeartifact/2018-09-22/endpoint-rule-set-1.json.gz,sha256=1CRBZlI_O5tysVzFlJMtJNjsDvZMtsNeZgwGFMD9xnM,1151 +botocore/data/codeartifact/2018-09-22/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/codeartifact/2018-09-22/paginators-1.json,sha256=I3MlPdEGK-hCFxJnNpPrpWkJSBaM9dhFiZ4uo0AoR8o,1747 +botocore/data/codeartifact/2018-09-22/paginators-1.sdk-extras.json,sha256=kNVDIOe3C5yL0xTWSrW2xDchpno4Xozz60DY53uxNEA,444 +botocore/data/codeartifact/2018-09-22/service-2.json.gz,sha256=X8POt_QTDZbOM2MfhhowNBFeHDm1Pgt7D_aHBHRwqzI,23321 +botocore/data/codebuild/2016-10-06/endpoint-rule-set-1.json.gz,sha256=L4ORhg7DnYzEXa63ibkpc_sa6Zlut5TvXbNUNscwsKY,1149 +botocore/data/codebuild/2016-10-06/examples-1.json,sha256=_-tVq2XM1YDuzv78VwIj_WjyXHu-yrIPyxzTtTbdFJ8,9778 +botocore/data/codebuild/2016-10-06/paginators-1.json,sha256=jNb-seZLb_i52B9YQgWA8t6xpsFoHQEiAkYGnScwEz4,2448 +botocore/data/codebuild/2016-10-06/service-2.json.gz,sha256=0GyDmuSmM5KbL8hZXi_52sd-hvXPCx8Y9uCl_XhdOR8,48723 +botocore/data/codecatalyst/2022-09-28/endpoint-rule-set-1.json.gz,sha256=bed-bBRpmz5X1ONLzsI5H3QNysY3drG2ZurmKp8cogU,849 +botocore/data/codecatalyst/2022-09-28/paginators-1.json,sha256=TuEQ6NVw_F_LgmG-TurtBCvFTRFRP8DWHseWdO8DNRk,1637 +botocore/data/codecatalyst/2022-09-28/service-2.json.gz,sha256=-aZW1kXrQTTAytHDMjqK93ymqay-VrLJ946CEEvVg8o,13989 +botocore/data/codecatalyst/2022-09-28/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/codecommit/2015-04-13/endpoint-rule-set-1.json.gz,sha256=CVYWlsiiGGre_GY2TBx1ezlkjWVD4704BAvSTExo29g,1151 +botocore/data/codecommit/2015-04-13/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/codecommit/2015-04-13/paginators-1.json,sha256=2w92BpzUce0gSVEaZH0la2r8ZT_MDtxoLc6RG-dpln4,1206 +botocore/data/codecommit/2015-04-13/service-2.json.gz,sha256=lOkeydC-q2sN3LdUJMSoiSpPakJz6khCZUxDW-lkIsQ,40935 +botocore/data/codeconnections/2023-12-01/endpoint-rule-set-1.json.gz,sha256=Ki7x9aFHivkj9Tzg59l7Vjm9ghUxIXsVqqKoVPjmI0s,1302 +botocore/data/codeconnections/2023-12-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/codeconnections/2023-12-01/service-2.json.gz,sha256=x46d7VyUjBC0M3FL3n4MPACWyoFCyXuh-8SaePA0iTg,10017 +botocore/data/codedeploy/2014-10-06/endpoint-rule-set-1.json.gz,sha256=vvuGtTn97brZcTN19MvpE13_lyGZv3pJjUAMs1_fetg,1150 +botocore/data/codedeploy/2014-10-06/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/codedeploy/2014-10-06/paginators-1.json,sha256=riyMuhePXvzjx3lAoHiIaOi0U6v2lCVd65qX4UWPoxo,1313 +botocore/data/codedeploy/2014-10-06/service-2.json.gz,sha256=_0Nz-S55UMKy2nqnc3QpZrF3aJTDu0yy0uEoobhB0iU,31886 +botocore/data/codedeploy/2014-10-06/waiters-2.json,sha256=OARBxBeZTRUui1WztkVtUn7Q2lAh3-Bemczgk455MGQ,662 +botocore/data/codeguru-reviewer/2019-09-19/endpoint-rule-set-1.json.gz,sha256=f9K5YcW-DGDMtA0ZF0e5erA1QVWANP0qj9QdKDzr760,1157 +botocore/data/codeguru-reviewer/2019-09-19/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/codeguru-reviewer/2019-09-19/paginators-1.json,sha256=0bkbq9IDAtNTQOShBQuJVNtb8xgFFUYNdzOcl3ri_DM,223 +botocore/data/codeguru-reviewer/2019-09-19/service-2.json.gz,sha256=ZxbI0kvAErNl00Cx7ldD2dn29B-Bt2lxxz-swCfViw4,11785 +botocore/data/codeguru-reviewer/2019-09-19/waiters-2.json,sha256=0jf0N7KHQV4qYAOPKBKNdiExhxEvojmGQ2Jzrc9lYR4,1733 +botocore/data/codeguru-security/2018-05-10/endpoint-rule-set-1.json.gz,sha256=RwCCbO3nybVmnGuBz06CHd1Oia-mnCBY6vi4eQCKgss,1306 +botocore/data/codeguru-security/2018-05-10/paginators-1.json,sha256=nwCp854x7Q4pjInZgk9mpYoj9BiFf09ekRTXObmU4GQ,522 +botocore/data/codeguru-security/2018-05-10/service-2.json.gz,sha256=ZQBLrjF3REcQ50MbZNMyyFbA6l4bYB8qDdiXwX3xzWc,7989 +botocore/data/codeguru-security/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/codeguruprofiler/2019-07-18/endpoint-rule-set-1.json.gz,sha256=ZlFCUcoAjJ0oSDZM1i9BvEMWXrdmj8XFK5aUy_E77eg,1157 +botocore/data/codeguruprofiler/2019-07-18/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/codeguruprofiler/2019-07-18/paginators-1.json,sha256=d7DXbQ-GmZLDQRjjpAO-vzvm7OEA-pNKfPUyA9rgaag,195 +botocore/data/codeguruprofiler/2019-07-18/service-2.json.gz,sha256=fJNd_vpR98n-bZhkoCuAD0-aaET8I1viWyPnu2mH9CI,14592 +botocore/data/codepipeline/2015-07-09/endpoint-rule-set-1.json.gz,sha256=xKhx694at8IocZZXkPzIDeSi1QXOr_E-Y8ItYXA1udk,1151 +botocore/data/codepipeline/2015-07-09/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/codepipeline/2015-07-09/paginators-1.json,sha256=qvL1MMRjFuod_E5JuAfBuSUwPrMBCxO6zsfl4mEDYH8,1386 +botocore/data/codepipeline/2015-07-09/service-2.json.gz,sha256=53unYXnqFoR4LsvFFdRInR6LpYqoZV6SQlG4C91LN4Q,36738 +botocore/data/codestar-connections/2019-12-01/endpoint-rule-set-1.json.gz,sha256=tgU3F1qI57lY9yRBipsGGGZcNQUkNZr9i1yKIAK06a8,1154 +botocore/data/codestar-connections/2019-12-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/codestar-connections/2019-12-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/codestar-connections/2019-12-01/service-2.json.gz,sha256=xn4mrcd_boJuHBoX8XiQR957FumFIK2rJ6MQsbSQtjo,9897 +botocore/data/codestar-notifications/2019-10-15/endpoint-rule-set-1.json.gz,sha256=YlVdkkRdo-1cqEBhHzxIc7iwfbzhIOYC6_R7fKRpAdk,1157 +botocore/data/codestar-notifications/2019-10-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/codestar-notifications/2019-10-15/paginators-1.json,sha256=bD6rBB54kEd5ns5mM8KWWE2Gfs6rNkRWTLyvKHai9OA,531 +botocore/data/codestar-notifications/2019-10-15/service-2.json.gz,sha256=ObrZYqCKfzV2HfusayVdKGpoUeArCqog_TNcmK_p6j8,5570 +botocore/data/cognito-identity/2014-06-30/endpoint-rule-set-1.json.gz,sha256=jGq02cqcaRtRZqpeTaZE-lPixdErh76J7hyJK3rLDxY,1348 +botocore/data/cognito-identity/2014-06-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cognito-identity/2014-06-30/paginators-1.json,sha256=iRnVNYNjXj4riBW6sjwmAF2p9fSX2MkfoM5W_Y9_tkE,197 +botocore/data/cognito-identity/2014-06-30/service-2.json.gz,sha256=-2zDr6KABAmI59e2UFb45kunN7ZwJTENj8juHBQNSBE,10267 +botocore/data/cognito-idp/2016-04-18/endpoint-rule-set-1.json.gz,sha256=lK54tadTcArsvgauILRkNYkzg0i_E3gojvTL6Zoy7q4,1344 +botocore/data/cognito-idp/2016-04-18/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cognito-idp/2016-04-18/paginators-1.json,sha256=RdlZ6K9kobwkGd7v3X15eeU_1MTpNNWwHD35RwB4keA,1527 +botocore/data/cognito-idp/2016-04-18/service-2.json.gz,sha256=LZlzlPHSvLWUsQtZtNvhGgScaflkOP4MmpRpzSmTPYg,114861 +botocore/data/cognito-sync/2014-06-30/endpoint-rule-set-1.json.gz,sha256=4oTtkkKaVVt_zK9PzOK4RedReErjdMUh2_WQlS4RaYw,1153 +botocore/data/cognito-sync/2014-06-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/cognito-sync/2014-06-30/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/cognito-sync/2014-06-30/service-2.json.gz,sha256=wYRznIKwyIYbNXjO3fooNQ9RpstGHyDn0_sGfRQPP0s,7342 +botocore/data/comprehend/2017-11-27/endpoint-rule-set-1.json.gz,sha256=ZC-3tE2yi0Xz8yiLo2PgQi91vGSHriE-vqPHXl1qe_M,1150 +botocore/data/comprehend/2017-11-27/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/comprehend/2017-11-27/paginators-1.json,sha256=aCozRajzUb4wblnxzb_bTJlztnDFC3PnwItAMek2WtY,2033 +botocore/data/comprehend/2017-11-27/service-2.json.gz,sha256=Yq4WbOJEoaR33gQMbThasUZcz77z7yEM2UAwyo-5Vxc,43058 +botocore/data/comprehendmedical/2018-10-30/endpoint-rule-set-1.json.gz,sha256=P1--QDAWhR66hJ18Iys_ACrw_kIlI6ov26DZ0Rzs6b8,1155 +botocore/data/comprehendmedical/2018-10-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/comprehendmedical/2018-10-30/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/comprehendmedical/2018-10-30/service-2.json.gz,sha256=SYQmcRRbMos2xrFYT_cEuGp3rWXKCaZ-MzyMg-MC7uM,10293 +botocore/data/compute-optimizer-automation/2025-09-22/endpoint-rule-set-1.json.gz,sha256=dDaIYUkUmWQBZ_kiRsrDSzsyJqu9NvT0DH9qQ06oJas,1302 +botocore/data/compute-optimizer-automation/2025-09-22/paginators-1.json,sha256=25BBuPwHP0Glc_fbh2x7tYdAxRQLlh5OqzXISl_Du1g,1680 +botocore/data/compute-optimizer-automation/2025-09-22/service-2.json.gz,sha256=ARARRFxuoa3TLl_7grEZf9peoWP1IkEqW5tzcTSlr6I,11777 +botocore/data/compute-optimizer-automation/2025-09-22/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/compute-optimizer/2019-11-01/endpoint-rule-set-1.json.gz,sha256=0z4UIj3hxC1EftiMJtdxlUj9kPRz32GjU7ySKL41R3Y,1156 +botocore/data/compute-optimizer/2019-11-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/compute-optimizer/2019-11-01/paginators-1.json,sha256=FBFfvnKfuzo8mWExsEqu3Gy9-nKsBwTxjoRuqT-_oU0,1022 +botocore/data/compute-optimizer/2019-11-01/service-2.json.gz,sha256=kekd2I8t4d_7pk9SrIF--KJPkLG3qiGOJcUwrs2hM-k,40948 +botocore/data/config/2014-11-12/endpoint-rule-set-1.json.gz,sha256=Bqop-ldPHsOg1bJioXGVhUovcVYZbDCd-4eEwoR8RYA,1231 +botocore/data/config/2014-11-12/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/config/2014-11-12/paginators-1.json,sha256=PrX21DZ-zHv2NKxXpMSY70B5v-TZmr7GeydAsMA7vw8,6209 +botocore/data/config/2014-11-12/service-2.json.gz,sha256=Wq4Q1a2R7Ua6_z7D-YP2Ymj0hJsUCUMSaWKY53GNi28,66638 +botocore/data/connect-contact-lens/2020-08-21/endpoint-rule-set-1.json.gz,sha256=JYrz0HnRX1RiksP3Z5P-g0hf-1VDRS4PEX8_XPZ6dWI,1152 +botocore/data/connect-contact-lens/2020-08-21/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/connect-contact-lens/2020-08-21/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/connect-contact-lens/2020-08-21/service-2.json.gz,sha256=rliP39VUELuYDgFZbWOp0O_ccjA0bmxec2LEfE-rF_8,3162 +botocore/data/connect/2017-08-08/endpoint-rule-set-1.json.gz,sha256=WYug_g1lsZWxmoaNBgztjeRXl15zhszX4UXHls7nmuw,1230 +botocore/data/connect/2017-08-08/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/connect/2017-08-08/paginators-1.json,sha256=WlDtEkow4KbvqUydSuSxlBWsNyYMJqkrM7j0pw4KqeA,15974 +botocore/data/connect/2017-08-08/service-2.json.gz,sha256=FlyxU9SgfNONc_nFL19AnKJ1r1idOIUr3AYvP0s1pkI,183687 +botocore/data/connectcampaigns/2021-01-30/endpoint-rule-set-1.json.gz,sha256=guJ9hFbFNdWAf9fdIcA8zdPEoVH29BMnwMLzZ-MC1ec,1156 +botocore/data/connectcampaigns/2021-01-30/paginators-1.json,sha256=0u4LcBZFpshvXnakuryTCgfVdLeSI-dpWmlZds4eVWs,199 +botocore/data/connectcampaigns/2021-01-30/service-2.json.gz,sha256=Lc0G-Fh5wTs2jO_txDmPpV0UkBaIfo73MMjUigHf9CY,5276 +botocore/data/connectcampaignsv2/2024-04-23/endpoint-rule-set-1.json.gz,sha256=wninJ1qmYJF3ONCXTzuoqRbbE5IO_PcsthY1BKfrVrE,1306 +botocore/data/connectcampaignsv2/2024-04-23/paginators-1.json,sha256=LMkaF55JCeS3JzOF7c4eW3ZqvVCDHNHTmJCpGQ8wpi0,394 +botocore/data/connectcampaignsv2/2024-04-23/service-2.json.gz,sha256=zNjsALyYl0lOtRIZUyv4yL14jGLfXVvcNfRmJU31pQ0,9361 +botocore/data/connectcases/2022-10-03/endpoint-rule-set-1.json.gz,sha256=rbNUcGZbGHDsITeHMtkzLg_WINkEGE6m3iK287_fLPA,1293 +botocore/data/connectcases/2022-10-03/paginators-1.json,sha256=CBQn85TwOMroUn8rqB689UtbJkuBCAX371eSdVbZRAc,694 +botocore/data/connectcases/2022-10-03/service-2.json.gz,sha256=362K7l6_EFvmxcxbEdFvq1Xpph9yoPBQjIjD96SxHo0,18219 +botocore/data/connectcases/2022-10-03/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/connectparticipant/2018-09-07/endpoint-rule-set-1.json.gz,sha256=JR3ylsQ-bnTvmamW05mpg5ZfalXch-pgfqX74vJX0wg,1238 +botocore/data/connectparticipant/2018-09-07/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/connectparticipant/2018-09-07/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/connectparticipant/2018-09-07/service-2.json.gz,sha256=20Ois1ZCJ8tsUZrJTTkelw0KjRTJ9rdoEm-y0x3vsXE,8850 +botocore/data/controlcatalog/2018-05-10/endpoint-rule-set-1.json.gz,sha256=ZhhCEeao--GmZkms4frAqj3hXJw7WJB9NUmdlfmp42I,1302 +botocore/data/controlcatalog/2018-05-10/paginators-1.json,sha256=4NXGNZRHFcTeivBOzG6lJUAPu3RKCzcLAhvgtDKjG4g,863 +botocore/data/controlcatalog/2018-05-10/service-2.json.gz,sha256=j_R0g7ZdpqDOMrkcvterO9sZnuVG8v4qoLmy4XrHL6Q,7454 +botocore/data/controlcatalog/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/controltower/2018-05-10/endpoint-rule-set-1.json.gz,sha256=Kc77BQSOzQ0h-E3-q0S0pm-acLPk1h-P3sVYcUDTHgk,1151 +botocore/data/controltower/2018-05-10/paginators-1.json,sha256=fnUyaumVMU4LxD6VRfutlQ549Lr8SBKeWce0mqgA0uM,1081 +botocore/data/controltower/2018-05-10/service-2.json.gz,sha256=cQ5oYyDiipVX4pMP7R_V3e5ukidXThv2TyfTR5sUeQQ,13991 +botocore/data/controltower/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/cost-optimization-hub/2022-07-26/endpoint-rule-set-1.json.gz,sha256=i4_JNZCTtkM-Ymu2KloKKNuAGhOCOqMhpAsXByztolw,1309 +botocore/data/cost-optimization-hub/2022-07-26/paginators-1.json,sha256=nc9cjDY9Czbo6fxvbnoL0-JpxcIuCbeBgscEFYlPAuE,721 +botocore/data/cost-optimization-hub/2022-07-26/paginators-1.sdk-extras.json,sha256=O4h78RRWkbXvEDne3NyxM_npO-yI2KU8QL1jnJnfiwE,382 +botocore/data/cost-optimization-hub/2022-07-26/service-2.json.gz,sha256=FHFV1RvMAQiXzed38vG1R53yMlwvg7IIsN6FNZtLt4o,10918 +botocore/data/cost-optimization-hub/2022-07-26/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/cur/2017-01-06/endpoint-rule-set-1.json.gz,sha256=AJ3fLC98PGvWCkUp67wn2NeEiX9CHRbySPHAhRnhLOc,1145 +botocore/data/cur/2017-01-06/examples-1.json,sha256=NyOJJuDWe_rnuUTIp9cdvnw0GfJCK2aaDMW8Qkyf2Mg,2874 +botocore/data/cur/2017-01-06/paginators-1.json,sha256=svrnnDA-WDB_TSjNDhx_3bXmieM10GBn4TRFNlZNPHg,209 +botocore/data/cur/2017-01-06/service-2.json.gz,sha256=zPhbbEs4iGg69Qcx1fYb0pp6Vr3bO-0beKLb-Hm1hzc,3835 +botocore/data/customer-profiles/2020-08-15/endpoint-rule-set-1.json.gz,sha256=kFanK9O-OZuRzfhw55SHZbdJEgfaJRepRU493phvqRE,1148 +botocore/data/customer-profiles/2020-08-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/customer-profiles/2020-08-15/paginators-1.json,sha256=qXQvRpsrVX3Oz-LKJCsZqw5tpHDKwz3UJdjJ5DP8SDo,1877 +botocore/data/customer-profiles/2020-08-15/paginators-1.sdk-extras.json,sha256=1f1Dqw3cc1YvdqBJdmPMcEzzDehsm62v9MCIxPzxXlU,240 +botocore/data/customer-profiles/2020-08-15/service-2.json.gz,sha256=R7EYNa_iHFjAvk7Ypo2da-WgdT45DSkVBIlU7ONbgFg,53660 +botocore/data/databrew/2017-07-25/endpoint-rule-set-1.json.gz,sha256=orb5KfvaHHCd4_yxoNmW5ZpRT01RfJ-yb5E_ARzagMY,1210 +botocore/data/databrew/2017-07-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/databrew/2017-07-25/paginators-1.json,sha256=i_5ZTxjwAyOvq_e_Etz8L97TB_O2FHjLsAkKFbGJf8U,1316 +botocore/data/databrew/2017-07-25/service-2.json.gz,sha256=66ofM-Y9cV-BCu7IYSNU63fuLtDgQy_g7QYO7SBhMa0,20293 +botocore/data/dataexchange/2017-07-25/endpoint-rule-set-1.json.gz,sha256=J7Evp6EteL1sHRQyM_jlO4qSl7ysX-nN-mk2N57nR5k,1152 +botocore/data/dataexchange/2017-07-25/paginators-1.json,sha256=bmlL4xVyx7gej5sWbNSYqvUOmgM9e3H9YZXsJC4Qh-I,1204 +botocore/data/dataexchange/2017-07-25/service-2.json.gz,sha256=WoGTPwhkIepo60ZCRPqcGOYGiMJ8QR_UpBk54z-pBqA,17276 +botocore/data/dataexchange/2017-07-25/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/datapipeline/2012-10-29/endpoint-rule-set-1.json.gz,sha256=X3reyIqhr-e3r1DaKm4gYAvV0vu9yMWYS66VKUmXv6k,1151 +botocore/data/datapipeline/2012-10-29/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/datapipeline/2012-10-29/paginators-1.json,sha256=JdrA68aI3fnPWh2_ecOxC5DtcFz4OkiO8GvsBkzOgUw,554 +botocore/data/datapipeline/2012-10-29/service-2.json.gz,sha256=Xz8Tpi4l-7nHL6UFJ_nzM3hUkp5UniSsPstrFuMlg38,9609 +botocore/data/datasync/2018-11-09/endpoint-rule-set-1.json.gz,sha256=8rguqy18Mtmnmj4UK4Bds6ym3RI9hhiMff4iOFulPg8,1150 +botocore/data/datasync/2018-11-09/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/datasync/2018-11-09/paginators-1.json,sha256=OTxZXjoijv0y3dCqSgmbXidkpsOxdvViwEvduFFVp7g,842 +botocore/data/datasync/2018-11-09/service-2.json.gz,sha256=K9rVzJb41Aousx1ud9Re7Hxg4cuAfmeb7k9T9S68DlE,40386 +botocore/data/datazone/2018-05-10/endpoint-rule-set-1.json.gz,sha256=A2zmuthQ7tVDsXgqpTnPB5OJv2hRC3rXkFeHy2zlbGY,1126 +botocore/data/datazone/2018-05-10/paginators-1.json,sha256=ExdBh1hfdpz6Bz3zem4hJVUJVWNQcw5qzameugQw2XU,6177 +botocore/data/datazone/2018-05-10/paginators-1.sdk-extras.json,sha256=QFlwjsMb1_P--nZveHlXUkKQWkav_M60iEpGYEXQ5Sk,392 +botocore/data/datazone/2018-05-10/service-2.json.gz,sha256=5fqDx4Hx2xsW6pkUJpbZjULzqv8uypElu5nKj3E8WWg,85205 +botocore/data/datazone/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/dax/2017-04-19/endpoint-rule-set-1.json.gz,sha256=X2f-Hip4p5GGqROY1VcuMK7VKAXGkK7sePh4AIApU3s,1145 +botocore/data/dax/2017-04-19/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/dax/2017-04-19/paginators-1.json,sha256=OOhBXs1nXQbwQO2dybisWoE6M5Z7WrPyQUCAyGgfEiA,1175 +botocore/data/dax/2017-04-19/service-2.json.gz,sha256=PPfOjF69tPRHHtlKreK9-Qo3joeHEwiigdvWVjVHt84,10264 +botocore/data/deadline/2023-10-12/endpoint-rule-set-1.json.gz,sha256=wTmORzArpcW2431gpN5hh8b64BSYWaA7SSn0qXRurnk,1295 +botocore/data/deadline/2023-10-12/paginators-1.json,sha256=ehIpiR7Z0B8dWu4DvwXv7jImxr8VeYxDPwAxbn7j2D0,4939 +botocore/data/deadline/2023-10-12/paginators-1.sdk-extras.json,sha256=hFPg_wsYLg1pXtVqU63jGPKL7Fc9mW9vLLloldy_b04,218 +botocore/data/deadline/2023-10-12/service-2.json.gz,sha256=NOKQc-5GvXFKbZ2IGlXNFzTGdI--I3XH46isTJDgyTc,46876 +botocore/data/deadline/2023-10-12/waiters-2.json,sha256=wkHGWjUdYS6_6x4_XhRzQBCkbZjkedtHL_YQmWlP9Gg,4422 +botocore/data/detective/2018-10-26/endpoint-rule-set-1.json.gz,sha256=2Y7xg4vwdEOAPFFhXaiacYvwx3vDimaHtq4FVs7fphc,1496 +botocore/data/detective/2018-10-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/detective/2018-10-26/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/detective/2018-10-26/service-2.json.gz,sha256=kRkQS5cwqOnkeJHmaGCjYBh9GJ7hhFM1DX7OZk-RT68,13190 +botocore/data/devicefarm/2015-06-23/endpoint-rule-set-1.json.gz,sha256=BrQktQ8z9KzjH4S4ck6NzhyL-GjDmmiQX_fGPWAbyJE,1150 +botocore/data/devicefarm/2015-06-23/examples-1.json,sha256=ph2IehoxWkjr60w1Itx_H2XRMVKQ9J1WHbDDdS2-i6Q,42721 +botocore/data/devicefarm/2015-06-23/paginators-1.json,sha256=dsBpWrsUYvlphjtWSswDS3BYoWFzpq3sqwpOK4ER5vA,2870 +botocore/data/devicefarm/2015-06-23/service-2.json.gz,sha256=Y6baZp75Qn4o8y6-aj9d-YpnyXW2yJD442uvAx9uVd8,31336 +botocore/data/devops-guru/2020-12-01/endpoint-rule-set-1.json.gz,sha256=dDaKuPNTzjSBnjxykp0cbyAUcq3Q6wZr2qG-4ok4y1c,1153 +botocore/data/devops-guru/2020-12-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/devops-guru/2020-12-01/paginators-1.json,sha256=L8a_Vi9F4QUZiw34P5LPuf6ELhTE3_rKfSJTiF-Jsrw,3043 +botocore/data/devops-guru/2020-12-01/service-2.json.gz,sha256=b38H9fyh25_o-9W6619d8UI2S_vwJPNuB7p2ev-hMyc,25068 +botocore/data/directconnect/2012-10-25/endpoint-rule-set-1.json.gz,sha256=Lhf_wVmIlCESfGV15cr_lECTJc7K38HIZSen0_JX-84,1153 +botocore/data/directconnect/2012-10-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/directconnect/2012-10-25/paginators-1.json,sha256=xeMiI713ZrL0L4eTYXOT8iXsmsiguus1SZdRE7OWYCo,643 +botocore/data/directconnect/2012-10-25/service-2.json.gz,sha256=XoG2-pvWbGWwmDcpyNN8I9rtqk8E-X9Ts5h9AClE7ME,22029 +botocore/data/discovery/2015-11-01/endpoint-rule-set-1.json.gz,sha256=ItADJditEHmFcTD7LzkB1CfTrWzTFysy277by10hCRM,1150 +botocore/data/discovery/2015-11-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/discovery/2015-11-01/paginators-1.json,sha256=9TAcWsEEH768Rt1ArlrAzFDXYkp82xhdZ5Kh5LVrkmw,1221 +botocore/data/discovery/2015-11-01/service-2.json.gz,sha256=GNTFsOphaz6rFXL8WGiOEJkDzFczY0nrWJX7JPE7k1k,19020 +botocore/data/dlm/2018-01-12/endpoint-rule-set-1.json.gz,sha256=_ZGmWkMpRUj7iyWMsNfmz7ZcW9QRZbDm3jxsdIx-KUg,1230 +botocore/data/dlm/2018-01-12/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/dlm/2018-01-12/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/dlm/2018-01-12/service-2.json.gz,sha256=Hx3Tdt_yC18l53b0QKl76boc8Jyr8QmWBUiCV0fYyUk,12223 +botocore/data/dms/2016-01-01/endpoint-rule-set-1.json.gz,sha256=75kOf2v99Ni5dw7P4TCpYJSKK10ZEdxcRM72wPbPc4Y,1300 +botocore/data/dms/2016-01-01/examples-1.json,sha256=vV_0L6caRIbPqk4IOCZVqNc0xcbN77GsWwY3KaK0SA0,35747 +botocore/data/dms/2016-01-01/paginators-1.json,sha256=8X1HzmVYDUKuiYfiTM_NSKghY4YY9CshhiEc8DWd0Gc,2864 +botocore/data/dms/2016-01-01/service-2.json.gz,sha256=Febjbw3vnXEqa7421mi1Cj7_pJ_HdrSTlzCDCPlyNfw,86839 +botocore/data/dms/2016-01-01/waiters-2.json,sha256=q_cVn5QLry8e5ZZquSwUs7tJo5LQnnQfswzEpsF45F8,11781 +botocore/data/docdb-elastic/2022-11-28/endpoint-rule-set-1.json.gz,sha256=O51hbH7xjBIxTKUTq4pe3qNbEBGgm2xwiwVueLiWH7Q,1302 +botocore/data/docdb-elastic/2022-11-28/paginators-1.json,sha256=G7BrLkcnoebH5opU6J0VCq3MfoxNBz1fR1-o5H0zFno,562 +botocore/data/docdb-elastic/2022-11-28/service-2.json.gz,sha256=yrcEcFhoWPftq3YehYdojLHcX3wF4CPUL8Of2xMdGMk,7623 +botocore/data/docdb/2014-10-31/endpoint-rule-set-1.json.gz,sha256=-yaMsUCL6embVCf7xwIIkigYfUfh5gRJc6Rz4fpK5jw,1230 +botocore/data/docdb/2014-10-31/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/docdb/2014-10-31/paginators-1.json,sha256=Lc8FwQvudtu-XOnFfOh-qM6pOrsnlRajew2PKY6ZtZk,2318 +botocore/data/docdb/2014-10-31/service-2.json.gz,sha256=43zWfgoa6jnu4avuC5kPfBqDkZ_IEzIe3MJVJ60noSc,35568 +botocore/data/docdb/2014-10-31/service-2.sdk-extras.json,sha256=U_PgxwtPhWl8ZwLlxYiXD4ZQ4iy605x4miYT38nMvnM,561 +botocore/data/docdb/2014-10-31/waiters-2.json,sha256=8bYoMOMz2Tb0aGdtlPhvlMel075q1n7BRnCpQ-Bcc1c,2398 +botocore/data/drs/2020-02-26/endpoint-rule-set-1.json.gz,sha256=l5_lSGgGZZc4SRt_PjdVFOn5rumdHDHmMaAFKwlPSyI,1145 +botocore/data/drs/2020-02-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/drs/2020-02-26/paginators-1.json,sha256=j1Nq2iBDgHjtNTzLW5JGDB5BfwGLcqOX3kewE_mNNIM,1909 +botocore/data/drs/2020-02-26/service-2.json.gz,sha256=gER0hDNlr8HXr9AW4nQ2rK496EOkZTf51Xy8L_P9zl4,21053 +botocore/data/ds-data/2023-05-31/endpoint-rule-set-1.json.gz,sha256=HBUMX-WRESkMoVIAu_YgQNB293khmI-w8DBzu5Cp7ho,1295 +botocore/data/ds-data/2023-05-31/paginators-1.json,sha256=584legW_1CS0O-xK6_nd_ykAX1uQpbsHJyqh7qMGBGs,989 +botocore/data/ds-data/2023-05-31/paginators-1.sdk-extras.json,sha256=hMmJO2oml7dz_QZQiHrgSlhB_pocyqPxnhOMS3EU6Zg,959 +botocore/data/ds-data/2023-05-31/service-2.json.gz,sha256=0pZ6kkbLiyAzUQfFPIcL243OFobCH3eXkfx-MdhRV0I,7833 +botocore/data/ds/2015-04-16/endpoint-rule-set-1.json.gz,sha256=BcEclzHyPMhfhWwb7VkXjylAbuzbF6s4KQEXixxBV9o,1144 +botocore/data/ds/2015-04-16/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/ds/2015-04-16/paginators-1.json,sha256=yxY5Cm7p39vcMgdVs5pLlw3VkElaF6EQcTG7L5lY2cU,2555 +botocore/data/ds/2015-04-16/service-2.json.gz,sha256=5_6I1_X0z39J8tesbfDARbzDeupGWHCExun0JnIUmHw,33084 +botocore/data/ds/2015-04-16/waiters-2.json,sha256=0Wmt13KRi2wlleWgvUUHMnHbKBgJ0dKVvzJPbV1_qDQ,566 +botocore/data/dsql/2018-05-10/endpoint-rule-set-1.json.gz,sha256=nAdhksE_7iTq8Prunxu-MxeLK6rPkbP5U-C2-skW5Nk,831 +botocore/data/dsql/2018-05-10/paginators-1.json,sha256=TqVyPmU4ENrjA8pbfdQW-NgaOzWxliEb90BZ7NxjZ5I,187 +botocore/data/dsql/2018-05-10/service-2.json.gz,sha256=gVLbtWuaqmurnWVfUx48bsrT1M-VhhKM0KO4zcn5uUM,6436 +botocore/data/dsql/2018-05-10/waiters-2.json,sha256=lav4JgEiv9RzUWsPjBqpmv2bkfKIaKv92ErLhVzJk8M,665 +botocore/data/dynamodb/2011-12-05/endpoint-rule-set-1.json.gz,sha256=I13wMmM3WlbaybaoU4OsV1PLTNH9GEIU77CZJ4APn40,1343 +botocore/data/dynamodb/2011-12-05/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/dynamodb/2012-08-10/endpoint-rule-set-1.json.gz,sha256=aruRRw75w4u487we3G3OlAE3Kbs7GrOiABOGNGQLQ1A,3447 +botocore/data/dynamodb/2012-08-10/examples-1.json,sha256=cZ5PBzQtSA9b1ZN39RffvUM54Tqf_h5-AQA7zSBVK4Q,16947 +botocore/data/dynamodb/2012-08-10/paginators-1.json,sha256=U84oi-heJVXxjHM1enODt6qI5J117zh0YoM4BHwZZ18,1103 +botocore/data/dynamodb/2012-08-10/service-2.json.gz,sha256=ut_-Od1kyFsR48WrTfMbFpsYwz-xyGJFmpML0PKWajE,83205 +botocore/data/dynamodb/2012-08-10/waiters-2.json,sha256=G_iaXR3xZP3M8lpMR1olm2p-EvK6InTidNZnUUqPL70,727 +botocore/data/dynamodbstreams/2012-08-10/endpoint-rule-set-1.json.gz,sha256=0rmkIoEOHw6XeNRcXHZM0fyJiPmUEbspgS9eKTPMssc,1602 +botocore/data/dynamodbstreams/2012-08-10/examples-1.json,sha256=LF2m4pmyTs0G8NR6AhmybL0E2F9WHfnbxz5q31DtjAg,7693 +botocore/data/dynamodbstreams/2012-08-10/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/dynamodbstreams/2012-08-10/service-2.json.gz,sha256=7uXEfTTNcCQer_m0SEd26BLqit1Arc-WGz09ZZCVfyg,7008 +botocore/data/ebs/2019-11-02/endpoint-rule-set-1.json.gz,sha256=AMHSH5Sm246GuiYyt1kus-fNcSI6ChEupkoQ_Fq_-_s,1145 +botocore/data/ebs/2019-11-02/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/ebs/2019-11-02/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/ebs/2019-11-02/service-2.json.gz,sha256=F2eWLREvRlwCON3D5WnGqHlOwMYOKw0vghb3OUabLKM,6402 +botocore/data/ec2-instance-connect/2018-04-02/endpoint-rule-set-1.json.gz,sha256=wxBIjgGchbtQ2xBS9KR3FFu9eAN_KQnOSGx9ou7p12M,1161 +botocore/data/ec2-instance-connect/2018-04-02/examples-1.json,sha256=Qnm4-ldcu-2O38JTe_w17UJWdblMaRBfIc8HyJ62DYU,1712 +botocore/data/ec2-instance-connect/2018-04-02/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/ec2-instance-connect/2018-04-02/service-2.json.gz,sha256=XfkT6IFWG1y9TujDBA1PqijR_UOPDmAUxteVDNZDMek,2303 +botocore/data/ec2/2014-09-01/endpoint-rule-set-1.json.gz,sha256=BVzdc83h1Zo7H4syJH1W9-sdbdCu_vmWIC9VLH545Nw,1237 +botocore/data/ec2/2014-09-01/paginators-1.json,sha256=XpA8TZvmBGGraKlRGE-U-YeLIBN1ZvbcyE8Wh8uuIDM,1271 +botocore/data/ec2/2014-09-01/service-2.json.gz,sha256=Zw2r-T5cXzsBN1gQb-V6dJVhzzMMzcqjqUyQ1ADJEeM,71841 +botocore/data/ec2/2014-09-01/waiters-2.json,sha256=HG1xDu-8ICfvY1n_YV9i0ylufepFUYmDd0dLkQxwKuY,8548 +botocore/data/ec2/2014-10-01/endpoint-rule-set-1.json.gz,sha256=BVzdc83h1Zo7H4syJH1W9-sdbdCu_vmWIC9VLH545Nw,1237 +botocore/data/ec2/2014-10-01/paginators-1.json,sha256=Uns0O6V6ZIXI09iZdCY77w-CBHbes_siW5vFU-bpE1w,1439 +botocore/data/ec2/2014-10-01/service-2.json.gz,sha256=_SdZgkYg0ddekju6N3xJZJ4oKqwo54GbsSl-2HAmZT0,75362 +botocore/data/ec2/2014-10-01/waiters-2.json,sha256=UDhKYGIrItEq2e56vKMh6yLdn_YfsfTYsmankCjsR3k,11040 +botocore/data/ec2/2015-03-01/endpoint-rule-set-1.json.gz,sha256=BVzdc83h1Zo7H4syJH1W9-sdbdCu_vmWIC9VLH545Nw,1237 +botocore/data/ec2/2015-03-01/paginators-1.json,sha256=Uns0O6V6ZIXI09iZdCY77w-CBHbes_siW5vFU-bpE1w,1439 +botocore/data/ec2/2015-03-01/service-2.json.gz,sha256=Wz78Vs7QEd3rUd1B5DWnXrN9kQonBRYaCOF7GbI3ZeI,77885 +botocore/data/ec2/2015-03-01/waiters-2.json,sha256=UDhKYGIrItEq2e56vKMh6yLdn_YfsfTYsmankCjsR3k,11040 +botocore/data/ec2/2015-04-15/endpoint-rule-set-1.json.gz,sha256=BVzdc83h1Zo7H4syJH1W9-sdbdCu_vmWIC9VLH545Nw,1237 +botocore/data/ec2/2015-04-15/paginators-1.json,sha256=Uns0O6V6ZIXI09iZdCY77w-CBHbes_siW5vFU-bpE1w,1439 +botocore/data/ec2/2015-04-15/service-2.json.gz,sha256=XVGeCKOTi_uqysEqDtjBTm_pu5P8zgpFFQvzuU_GMIo,90171 +botocore/data/ec2/2015-04-15/waiters-2.json,sha256=1iUHJTDrTvb5_HbDMbVVzC4Ex1S97GZl-tnP70MaDEY,11546 +botocore/data/ec2/2015-10-01/endpoint-rule-set-1.json.gz,sha256=XH3W6aa6cK-NGd3qerhgVinrOAy4qmqI9KVt7higqeA,1391 +botocore/data/ec2/2015-10-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/ec2/2015-10-01/paginators-1.json,sha256=Vom5HeCc0UgDyEyYKw3piztolJ3IIxz_tIhEX61TvM8,1793 +botocore/data/ec2/2015-10-01/service-2.json.gz,sha256=i4wq_nWnXSIbWnPj_8y0lmyMIrulFm3U5l5kBiH01ok,107913 +botocore/data/ec2/2015-10-01/waiters-2.json,sha256=8sXo9xWtm1IZMKcm9Ne42ha-9XDTVP_fZUejgA1tw3E,14823 +botocore/data/ec2/2016-04-01/endpoint-rule-set-1.json.gz,sha256=XH3W6aa6cK-NGd3qerhgVinrOAy4qmqI9KVt7higqeA,1391 +botocore/data/ec2/2016-04-01/examples-1.json,sha256=0xdUoNVzXNn5ZMmA_aiPwiQC68adrXjBJPhw3AzQC8M,109914 +botocore/data/ec2/2016-04-01/paginators-1.json,sha256=Vom5HeCc0UgDyEyYKw3piztolJ3IIxz_tIhEX61TvM8,1793 +botocore/data/ec2/2016-04-01/service-2.json.gz,sha256=Bv16SPdDBi_KysjtyTWUAKP-R7oftAqmmrVIqksZV4Q,112481 +botocore/data/ec2/2016-04-01/waiters-2.json,sha256=ZjSjdDS-pisO_MoRjsulXMshrcU5qNJd4m1bOBQ9mKQ,15259 +botocore/data/ec2/2016-09-15/endpoint-rule-set-1.json.gz,sha256=XH3W6aa6cK-NGd3qerhgVinrOAy4qmqI9KVt7higqeA,1391 +botocore/data/ec2/2016-09-15/examples-1.json,sha256=Dv18Ql8faOeBMQlenC7HBzlgrNQXNeokvLsyFf6Q_yY,110174 +botocore/data/ec2/2016-09-15/paginators-1.json,sha256=Vom5HeCc0UgDyEyYKw3piztolJ3IIxz_tIhEX61TvM8,1793 +botocore/data/ec2/2016-09-15/service-2.json.gz,sha256=ctehmZyAUl4Btq-Rg3Xmp68XQ2SDgvSh_9jxIJdVM9w,114400 +botocore/data/ec2/2016-09-15/waiters-2.json,sha256=1ZtptOEInU4p-4ZQFXbC5lxZ8XNsseki72qxLO2dX4M,14875 +botocore/data/ec2/2016-11-15/endpoint-rule-set-1.json.gz,sha256=zeCwlXgusTgfFVgOthz1DuMICZbN3KC1UbwjAzwj29Y,1231 +botocore/data/ec2/2016-11-15/examples-1.json,sha256=gB8-MuMSl9N4ic1oBYCv02B_YplxOdnKsfS7g5pY7hk,147949 +botocore/data/ec2/2016-11-15/paginators-1.json,sha256=U7CXec4GP15ASq731ieA5tYrUc6A5gamOJ8mC5HqMN8,31259 +botocore/data/ec2/2016-11-15/paginators-1.sdk-extras.json,sha256=s-xAN9v51q2N4UE-PQ_I-wK9PDbrSnwQlKx0yA_rmSk,249 +botocore/data/ec2/2016-11-15/service-2.json.gz,sha256=4dfj-nJHZbsaa1fAociCjKDOtARHsl_gARR0ybDNYds,480767 +botocore/data/ec2/2016-11-15/waiters-2.json,sha256=vtdUBpnIDWN6HFhRORCKX2AhcB4607o4_3EGE7JarSQ,20590 +botocore/data/ecr-public/2020-10-30/endpoint-rule-set-1.json.gz,sha256=Yln-rrkJRSP9euxT_8PwakGN53QfH9um_kqcdL7_Bs8,1245 +botocore/data/ecr-public/2020-10-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/ecr-public/2020-10-30/paginators-1.json,sha256=EEmON1DSCdAARd-o_S_RiZ6rXcWO8AZbYlx4UMyZEGE,711 +botocore/data/ecr-public/2020-10-30/service-2.json.gz,sha256=ThFIUlnLb_FAE0_4XUjrnTaHVizPE44jibYJ9dbFcF0,10719 +botocore/data/ecr/2015-09-21/endpoint-rule-set-1.json.gz,sha256=6H9DXZHErscuE36OXZZm0DKhMPPiQmg9DeHtghZ3a_8,2387 +botocore/data/ecr/2015-09-21/examples-1.json,sha256=cFx-qAY3SfNXEHCMe7I9RTWxV-Jtlo8moRHGDZ5UCAM,6603 +botocore/data/ecr/2015-09-21/paginators-1.json,sha256=jTAyTM5a36H94lthRLaf_MOoFKa0_9YW0wqzaMoyiNM,1736 +botocore/data/ecr/2015-09-21/service-2.json.gz,sha256=8VLr0gz11vuhdChGaYoiCm6FtFJvoI6YEhgX_FY-s3Y,29968 +botocore/data/ecr/2015-09-21/waiters-2.json,sha256=j4QQUhn_PYN87gWoaY1j1RR-lv7KjzPItwwn1WMYkB8,1482 +botocore/data/ecs/2014-11-13/endpoint-rule-set-1.json.gz,sha256=LskB02qr3Ii1t0ybLOVQk9miimOEJQ7FF73jHFwKKnY,1145 +botocore/data/ecs/2014-11-13/examples-1.json,sha256=Qp-rrnSHaDiVv4ESeJkTGfC1-guCjRc9B9LfiwjrMjg,36519 +botocore/data/ecs/2014-11-13/paginators-1.json,sha256=Y_nqEkKUMY3UhZ5D6DJ2QqxBHfnLkqM6FsOxPp5JUVE,1565 +botocore/data/ecs/2014-11-13/service-2.json.gz,sha256=u0gWXxdfm9xBUAMmJnmFfyOK-be-4aCuzfLj_536khE,126535 +botocore/data/ecs/2014-11-13/waiters-2.json,sha256=F4d_a7_xVQIib5MpmSitTQBxupfL0Z9NqxOibIA6Igs,2246 +botocore/data/efs/2015-02-01/endpoint-rule-set-1.json.gz,sha256=Pab9xuZElYUTbcAPFrOovAaRkzLEpoAlYMeS7ba95s4,1571 +botocore/data/efs/2015-02-01/examples-1.json,sha256=0EFBCHNGLNS0ftGQqjngkhfTFYpw6E-7lnuAh-d6YKU,8825 +botocore/data/efs/2015-02-01/paginators-1.json,sha256=SKRuOWm1E5Nvvzppzjn-IeS1Lj0I3qSqvc9t9XtKpA4,878 +botocore/data/efs/2015-02-01/service-2.json.gz,sha256=ElhYyEYUCiX4uYv1eMR5yiLiAvwWB6vVlVMKtQCEfQY,23873 +botocore/data/eks-auth/2023-11-26/endpoint-rule-set-1.json.gz,sha256=nVxwJPUIswUMgtl7OBe4g-ZRXPesq_ZFoBAxZpJOzqw,1126 +botocore/data/eks-auth/2023-11-26/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/eks-auth/2023-11-26/service-2.json.gz,sha256=KLFMCIGnXTABMn-YGNGGufjtCj6J5rkFtGbD38fRkjs,2343 +botocore/data/eks-auth/2023-11-26/waiters-2.json,sha256=tj1ZnaqhwmJkUEQlwH7wm1SqY3lg1BvZDfzfPaIgNrY,38 +botocore/data/eks/2017-11-01/endpoint-rule-set-1.json.gz,sha256=YeFxhFk8yNwG0v-6e9zNYmYyBsYmvyBMXC0DtG6XOVQ,1265 +botocore/data/eks/2017-11-01/examples-1.json,sha256=vCT3MFB7D3tNzqaIdxd8nyDbt7hevsAvDE4RQTQcEKg,5021 +botocore/data/eks/2017-11-01/paginators-1.json,sha256=-dO7ymV_bnWAiWB8cAB4Me20IHUuL0M_wyxdaHa2NDg,2715 +botocore/data/eks/2017-11-01/service-2.json.gz,sha256=Z3Q5gqcnItte3raSgsNDAJDDUMtVHj3uzcQl47Mt3e4,56062 +botocore/data/eks/2017-11-01/service-2.sdk-extras.json,sha256=pmn0V8Su5NiqW8Y3X-IBtzD1Bz_JANtKgU4fsr-i_bM,107 +botocore/data/eks/2017-11-01/waiters-2.json,sha256=j-ZLRcYn34oHDZY9xth7Vrz7q1eCNn_fzC1bK1WVVwo,4198 +botocore/data/elasticache/2014-09-30/endpoint-rule-set-1.json.gz,sha256=isIMUYbXxqR1LDccTYsLcyKMPX0V0UhDqLEyve94ZxM,1241 +botocore/data/elasticache/2014-09-30/paginators-1.json,sha256=YkZxwpICpidoDrIimyr0yFGYg_T0emkSfhlNfPOfVMA,2171 +botocore/data/elasticache/2014-09-30/service-2.json.gz,sha256=YnjUlsuCBzGnkLwuMc5Bhw2bl_73pRPkuejPhV-A0fo,22920 +botocore/data/elasticache/2014-09-30/waiters-2.json,sha256=mIVMN9SNrvDJ2iW_uXAA-N5ptxGmDw964Sv89zKAs-g,3719 +botocore/data/elasticache/2015-02-02/endpoint-rule-set-1.json.gz,sha256=LTsm4T9uZEmFGtS6ITBEzKYtRbxXLUUJeELNg-yHgXQ,1236 +botocore/data/elasticache/2015-02-02/examples-1.json,sha256=iWpOlje8s2EFHlnYNgjHX2DpC7teIKmeA7f6e51u00I,111590 +botocore/data/elasticache/2015-02-02/paginators-1.json,sha256=XrsOWe2fflZLszEuZYsZjeXPNAAj5IjpOdfsse_Peg8,3401 +botocore/data/elasticache/2015-02-02/service-2.json.gz,sha256=iEuLrB5wNks5DW-0PhAzTd_BsNnAaAleRTDU5bEpPvU,56727 +botocore/data/elasticache/2015-02-02/waiters-2.json,sha256=N6NTYHqUoktWaIjapl3RDepPknxNlIbb8a0wnS0HB_E,5118 +botocore/data/elasticbeanstalk/2010-12-01/endpoint-rule-set-1.json.gz,sha256=8LUqy4asDLrRGOLDpDo0pzPMreUprh0unXBi_6PaX3w,1240 +botocore/data/elasticbeanstalk/2010-12-01/examples-1.json,sha256=EuEpZEobhGxWPfRosGTFNWYs8zRFVtkQtLXD8M_5fm0,37449 +botocore/data/elasticbeanstalk/2010-12-01/paginators-1.json,sha256=qM8N07fmdTtnZBXFiyFeW31EjqjmDWb-viwc19UyF5o,934 +botocore/data/elasticbeanstalk/2010-12-01/service-2.json.gz,sha256=ijctnRlXulrQLZ0Yrt4weQmB7KjYRz2G8dgslJUJmTg,27773 +botocore/data/elasticbeanstalk/2010-12-01/waiters-2.json,sha256=nS1qW0cVQpjnVhpONryvuFWWW4JwJYSW82ooLigmCu0,1463 +botocore/data/elb/2012-06-01/endpoint-rule-set-1.json.gz,sha256=CK5hOFanJwkCxHG3GxFHBXdsIG_vlDWyJVq_3FXeY5M,1242 +botocore/data/elb/2012-06-01/examples-1.json,sha256=NE6HcGypE87pOfvGkxKi_QD-UJ_qWHG2_Q9ynk6V9xA,30446 +botocore/data/elb/2012-06-01/paginators-1.json,sha256=udADJnjh3b-REUTKNlC9yYaRI6aOiXfx3demJA1Msxg,373 +botocore/data/elb/2012-06-01/service-2.json.gz,sha256=bFxMO_Ed29hxrz0DsI5kh-51r5JG1RWX2ubJe6Wnkdo,13212 +botocore/data/elb/2012-06-01/waiters-2.json,sha256=9NjB-6qbZ5pHxElH90T-4YPEBdXHCA9QHdcF96gTbP0,1527 +botocore/data/elbv2/2015-12-01/endpoint-rule-set-1.json.gz,sha256=CK5hOFanJwkCxHG3GxFHBXdsIG_vlDWyJVq_3FXeY5M,1242 +botocore/data/elbv2/2015-12-01/examples-1.json,sha256=4Qxoz28hEDW8u1O7iGLKnH9NNb7Po5qybLFQtvtR7ss,44281 +botocore/data/elbv2/2015-12-01/paginators-1.json,sha256=HdpSUaB3jTHWaSt0O3wUi_qAjuGs7sz-vbUPV64kgWc,1744 +botocore/data/elbv2/2015-12-01/service-2.json.gz,sha256=6b0jzCrJAHpfTVMJBXdW9ETw_9d_NxMDFHITWTOo9tg,32908 +botocore/data/elbv2/2015-12-01/waiters-2.json,sha256=k-g2ypXqfbW4ktwuK1iVKpApIncFhOPemhbs7pf7cW8,2371 +botocore/data/emr-containers/2020-10-01/endpoint-rule-set-1.json.gz,sha256=McHEdX8Mqhv3i4gdVV5rd1-HwpkrnhJb1edxrFMLDSo,1235 +botocore/data/emr-containers/2020-10-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/emr-containers/2020-10-01/paginators-1.json,sha256=H-qB-RVfZ-v6uivpkok6jdL9RsL9yHZmDhdG4hdiPtU,889 +botocore/data/emr-containers/2020-10-01/service-2.json.gz,sha256=sEAznbMN7HhRtWt7x_BRDafUQq0iANzWSTmSKRYaJtE,11282 +botocore/data/emr-serverless/2021-07-13/endpoint-rule-set-1.json.gz,sha256=a4ilhDvIdUByL5ghnnKLNAETBahgLffL7UTRdCnx6mM,1152 +botocore/data/emr-serverless/2021-07-13/paginators-1.json,sha256=X_bd8HxYUcjMp19q-YBGDOq_AGDlUYT1vP3yTfn6l8E,529 +botocore/data/emr-serverless/2021-07-13/service-2.json.gz,sha256=-ztTaAnwVBK-yBJcEFVPkMagg6glaWwFXR-WdGG_1w8,12704 +botocore/data/emr/2009-03-31/endpoint-rule-set-1.json.gz,sha256=8RMLjIlMq4thnvaIe7NNX7meDD01J6iSEhJ4Bp0y1tg,1239 +botocore/data/emr/2009-03-31/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/emr/2009-03-31/paginators-1.json,sha256=4EzVWE6TiQt5Mklp197KH8t17OiwaOVgVnBGK4y_HiQ,1357 +botocore/data/emr/2009-03-31/service-2.json.gz,sha256=hq2tVBLyZ0VrY2lPEPHr3V7a_07PDHG61L1jQkaz0G4,47420 +botocore/data/emr/2009-03-31/waiters-2.json,sha256=pMh5RSVHgFU-DlrH0dSf4IibHo9Hddmg9DvaR4a0Z90,2073 +botocore/data/endpoints.json,sha256=5rXhAQxlXMzPEQFq0pI2ZtdIEvnRqgjFdU4dIBbpqmE,1253666 +botocore/data/entityresolution/2018-05-10/endpoint-rule-set-1.json.gz,sha256=-tvg78eG42q4BoxindzVoXv2ce8ZCzHMLmYqzA06J7Y,1304 +botocore/data/entityresolution/2018-05-10/paginators-1.json,sha256=SRcdwInaqBXq7gpYBftOPb7OMmwgOTUQUTrGaY4594g,1245 +botocore/data/entityresolution/2018-05-10/service-2.json.gz,sha256=DAH6HOa5qDBwzLRtblczpYIfuEnHcZNaIxDu1Rz9AH4,19472 +botocore/data/es/2015-01-01/endpoint-rule-set-1.json.gz,sha256=wktV8CTN4c3plu7L7gipQwWT7s2EQJhzajuCH3rZvxs,1311 +botocore/data/es/2015-01-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/es/2015-01-01/paginators-1.json,sha256=sbfve7QYejJgHClHTY4PgdwH4A-PJlY2y0XZ0qRCq9Q,1022 +botocore/data/es/2015-01-01/service-2.json.gz,sha256=NBkZB3gOln1Wr_RLhIIzL-bWLyb47ZHO_wi2KcKC0ng,29503 +botocore/data/events/2014-02-03/endpoint-rule-set-1.json.gz,sha256=6tJWlrE7WbJTvfyZW-5XzHeyIL2ZOZyEXS0K1IZdWKI,1856 +botocore/data/events/2014-02-03/service-2.json.gz,sha256=Qnifbs8tDbBaDbzQG-ww1T-ldheWCJpWKZN5o-3P2L8,5254 +botocore/data/events/2015-10-07/endpoint-rule-set-1.json.gz,sha256=1OyfgEiiKeAiRXxaELUSmSQbJx5KuLyKnlStz6fwzOg,1843 +botocore/data/events/2015-10-07/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/events/2015-10-07/paginators-1.json,sha256=A4gA5VY4LAnP_3iCOI-P0-c5nVH5ntM9hOh3gytyGco,504 +botocore/data/events/2015-10-07/service-2.json.gz,sha256=-aA95MLzL-ysES7HIagN6MIY0c1M9Ot_0WxIb9oxVIg,36776 +botocore/data/evidently/2021-02-01/endpoint-rule-set-1.json.gz,sha256=SmMJXKxF6EXR2100P5t5UA_hdQPKFllo8HbXHRqkF0o,1150 +botocore/data/evidently/2021-02-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/evidently/2021-02-01/paginators-1.json,sha256=dzsz3rOFQc5MqVrha2K97L1ooI2e1kt8Om55efyV-tI,1016 +botocore/data/evidently/2021-02-01/service-2.json.gz,sha256=CFDPz7CsijoKtNVLpdn003ocbElOXbZ9kxpe-E6RfgM,20415 +botocore/data/evs/2023-07-27/endpoint-rule-set-1.json.gz,sha256=lt4GWaSNjF7es9hxIQ48_325Piv09sEsVIaWmidVy-U,1295 +botocore/data/evs/2023-07-27/paginators-1.json,sha256=7n46zlvhz-yf-V1sx4iKtmndDOZNMHXF08ziDtI8sk4,559 +botocore/data/evs/2023-07-27/service-2.json.gz,sha256=L7FhRc00J1DLzWe99yud6q7M4tRbcMzz4ZYLr6hD_48,10841 +botocore/data/evs/2023-07-27/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/finspace-data/2020-07-13/endpoint-rule-set-1.json.gz,sha256=JZY9ipabJjb76Thi8HtuTRrQsVR0u7Y8BaT5SJEQyHk,1152 +botocore/data/finspace-data/2020-07-13/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/finspace-data/2020-07-13/paginators-1.json,sha256=2RzSHda8vNoQX1L1pkYSwHfCF6Us0IKOrXdsXe-ZHkU,851 +botocore/data/finspace-data/2020-07-13/service-2.json.gz,sha256=ffjKQbRIERg1ATKJ2hJT7LcBb-6mjHdIrSbLqy2UAyM,14501 +botocore/data/finspace/2021-03-12/endpoint-rule-set-1.json.gz,sha256=6ci-_M8J5hlFDkXEr_v5wqFHTN-BR5ko7ZvaAjXvdWk,1149 +botocore/data/finspace/2021-03-12/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/finspace/2021-03-12/paginators-1.json,sha256=S_FGEtC07GgFCRSKmv_l4RhRBCFmOEmIsQl7QfDI678,197 +botocore/data/finspace/2021-03-12/service-2.json.gz,sha256=6GYBFxjmDnxxCbc5TaxlW3rHdWqHvI6LVa33qDDo1vo,30464 +botocore/data/firehose/2015-08-04/endpoint-rule-set-1.json.gz,sha256=viF_go5rd4S4cl_1odFyKedXblJGxvFmLcsEen-J-Fg,1148 +botocore/data/firehose/2015-08-04/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/firehose/2015-08-04/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/firehose/2015-08-04/service-2.json.gz,sha256=EeiCJcdgzWOA6IOp0zDS1RVucT_S_SLwl3lKfjSSvvQ,36343 +botocore/data/fis/2020-12-01/endpoint-rule-set-1.json.gz,sha256=aCFGDsDLgGUaj8sFCr-XDypLOEZiBgNyF63rup9unZc,1230 +botocore/data/fis/2020-12-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/fis/2020-12-01/paginators-1.json,sha256=FZ37WL0c12pVwCrWPZghttxIuVBtwDYe0h208m44zeQ,1107 +botocore/data/fis/2020-12-01/service-2.json.gz,sha256=dhioiRtY1o1Z2V_EH-tv4fGFGw_rdloIcB-sLkFyMmQ,10124 +botocore/data/fms/2018-01-01/endpoint-rule-set-1.json.gz,sha256=mkR3QwZlYcp8LpsQ7Mi9p4tT15StiUWDdxxG5xx6r_k,1145 +botocore/data/fms/2018-01-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/fms/2018-01-01/paginators-1.json,sha256=Nv9OHpCiWQyuj5sj_Pz-0TjbnmtiMCR0tuySMApzYjM,1470 +botocore/data/fms/2018-01-01/service-2.json.gz,sha256=y0Pnf5wa_vZxqFZnNMucv0ArHKVOVJiwTUxjm-Dei6c,34811 +botocore/data/forecast/2018-06-26/endpoint-rule-set-1.json.gz,sha256=jK91EkqYBRQ7FpDLsJWX4xwZY-u99n6VNiHeNEqAKLs,1148 +botocore/data/forecast/2018-06-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/forecast/2018-06-26/paginators-1.json,sha256=uwjfu4LU_nDuv9woqU_mcL_58oVcFi8QfUSAtQycpA8,2508 +botocore/data/forecast/2018-06-26/service-2.json.gz,sha256=NtxXFfClylbdtYy5wxp2YkdsmeXdv5n7CO84jomsrQo,40082 +botocore/data/forecastquery/2018-06-26/endpoint-rule-set-1.json.gz,sha256=F4dCQxRuY__0gQMszYGsBrKywmRrbrAIxlaTFZtv9KA,1152 +botocore/data/forecastquery/2018-06-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/forecastquery/2018-06-26/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/forecastquery/2018-06-26/service-2.json.gz,sha256=cLxijklr96F6KLHmobkuw21MLpUamFF2TGB5CpF8v4g,2184 +botocore/data/frauddetector/2019-11-15/endpoint-rule-set-1.json.gz,sha256=sCQwAu0aivhdZAutsG5rpIiefRm_XoT67msif--dOM4,1152 +botocore/data/frauddetector/2019-11-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/frauddetector/2019-11-15/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/frauddetector/2019-11-15/service-2.json.gz,sha256=Kd9OR3ZKPFFATnAXxcEj-y7RRS5RZK1ScvIM3ImXhRE,24352 +botocore/data/freetier/2023-09-07/endpoint-rule-set-1.json.gz,sha256=3NEvtMXCbZgoDS0rASDXWoV-vKKCoVmmhSVqwu8GLns,1420 +botocore/data/freetier/2023-09-07/paginators-1.json,sha256=7D1EodEvT5WrTkrtPkS0gfa4nMHocl8ljxcUoMJ1_8k,370 +botocore/data/freetier/2023-09-07/service-2.json.gz,sha256=9-3df7J8qfUZGzugfeaVGPtzF1eBvTkq7D_xuD2uczQ,4640 +botocore/data/freetier/2023-09-07/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/fsx/2018-03-01/endpoint-rule-set-1.json.gz,sha256=LEWvAHO4NvIe8jNt44hG1yywpJ4Bx1lgE1PxnI7DL84,1145 +botocore/data/fsx/2018-03-01/examples-1.json,sha256=Ys4PS4GcrfV3F5Lg4hkaZgyemGgNKNLYSm-uepLDkR4,14242 +botocore/data/fsx/2018-03-01/paginators-1.json,sha256=s2sVh0nBC-oAhWvml5z1jHydcZmhTD8r5N-d5V12VjU,1250 +botocore/data/fsx/2018-03-01/service-2.json.gz,sha256=OtycENtDOVpJIclHO361FMoso4LMRY70eNCVpEL7J-s,80261 +botocore/data/gamelift/2015-10-01/endpoint-rule-set-1.json.gz,sha256=JOJXua58Ka_4WS8QObZ3oHwOYSzHr2MAz8E_4wEaE8o,1149 +botocore/data/gamelift/2015-10-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/gamelift/2015-10-01/paginators-1.json,sha256=hPYwxPqikAsx9c8Py0ZJ_Yie8F5B8RnfEVJkend1l-M,4461 +botocore/data/gamelift/2015-10-01/service-2.json.gz,sha256=zy1-JBM-1QmOt-LNOMhmJpmYYOnYLXaEYfNmjrZPDYQ,116835 +botocore/data/gameliftstreams/2018-05-10/endpoint-rule-set-1.json.gz,sha256=e1z534PiO9GSezoUqEx6nuuCGDgfDCM_N9SvzXEywXA,839 +botocore/data/gameliftstreams/2018-05-10/paginators-1.json,sha256=j131UfxmFfUe6Mtq2v4AS_kj5v36e56Y_LEKcltg0lM,690 +botocore/data/gameliftstreams/2018-05-10/service-2.json.gz,sha256=F_wfJiNGIlXZYMy3T6FfApUHslR7T-E1-6d_Ys67QJI,26747 +botocore/data/gameliftstreams/2018-05-10/waiters-2.json,sha256=1bi9fg1J2pQ1930lM_N2eBvnZBKDCjdy7s5sFugPVA4,2343 +botocore/data/geo-maps/2020-11-19/endpoint-rule-set-1.json.gz,sha256=gg0on4-7cRYWe7CCKu97gij-AGVN-81S3y48fv5RD-Y,1639 +botocore/data/geo-maps/2020-11-19/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/geo-maps/2020-11-19/service-2.json.gz,sha256=tAP1KpsldqrUVjFyKBPfSXQWKc0ExKworExlunDX80k,7612 +botocore/data/geo-places/2020-11-19/endpoint-rule-set-1.json.gz,sha256=EJc4IilE4NyEIJXFPKLDuuS0-nWEFr1TXFq7eMdRvng,1640 +botocore/data/geo-places/2020-11-19/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/geo-places/2020-11-19/service-2.json.gz,sha256=xTb1-WLj0T7BCeN_uv_JiUVYk54VBSS_QBN2nllRcGU,16133 +botocore/data/geo-routes/2020-11-19/endpoint-rule-set-1.json.gz,sha256=WogdVnXy-7c5CvqWAT7CTBA9nzeePBsuOHy1kG7IQc8,1645 +botocore/data/geo-routes/2020-11-19/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/geo-routes/2020-11-19/service-2.json.gz,sha256=VrjH07FQmF_sqHGX2BqZTIb0Z2xPK_zZV7lAOjG5BEI,34018 +botocore/data/glacier/2012-06-01/endpoint-rule-set-1.json.gz,sha256=04JtAX_ig2tdqoZqJiEXVXKKFif0ANg_zmgMtpfB1UE,1232 +botocore/data/glacier/2012-06-01/examples-1.json,sha256=hR-1NmWo9lL0Cdqnr6x95Ywu_VfJucv0T4OveUp-S4o,27536 +botocore/data/glacier/2012-06-01/paginators-1.json,sha256=RAeqGFOs4GRiC-DuphMOBHWljwDfqBQINYf1qA2LbNA,628 +botocore/data/glacier/2012-06-01/service-2.json.gz,sha256=SOtZ8IeoD1S7-zVjWGWH901TumK6UzOCWv1t6ZPFwJ0,21004 +botocore/data/glacier/2012-06-01/waiters-2.json,sha256=hzoyJJT1wJh9lq1_z4MK2ZBj98TGRhroii0kbeFXnJw,785 +botocore/data/globalaccelerator/2018-08-08/endpoint-rule-set-1.json.gz,sha256=4Gq-53C-BIsDibLmmTndmJwTF9Jng8ulJ3onfSGaetE,1155 +botocore/data/globalaccelerator/2018-08-08/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/globalaccelerator/2018-08-08/paginators-1.json,sha256=Exal9Oqocr6pKQ_T5yEFYLXwm0BSxPYcuZTjZL2_8x8,2016 +botocore/data/globalaccelerator/2018-08-08/service-2.json.gz,sha256=ZUSeU4sfnZRxXGzxxlD6NvS1B0NNO_XtBxnIXb35VvA,21919 +botocore/data/glue/2017-03-31/endpoint-rule-set-1.json.gz,sha256=HznY3jGqGNuj4tCOn243hRsCwAyOGWjwvuc-HF2yqIU,1146 +botocore/data/glue/2017-03-31/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/glue/2017-03-31/paginators-1.json,sha256=tzX-0At6NqflYPWszHsH-AAfuAsYqw1LwJnu4nKh8Lg,5025 +botocore/data/glue/2017-03-31/paginators-1.sdk-extras.json,sha256=05DstPbLjNqM3IFboIp0gB9jwSQ1X_PuyxkLA1ecfL4,218 +botocore/data/glue/2017-03-31/service-2.json.gz,sha256=kgT3Jt2qD0j5FJL97CnnloyGHutGLrmL-8DjnqUjDNU,170328 +botocore/data/grafana/2020-08-18/endpoint-rule-set-1.json.gz,sha256=ibue-EXJXCTuIpZB4Qaii9Jg4KPNF_XJgsczNVk5_x8,1148 +botocore/data/grafana/2020-08-18/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/grafana/2020-08-18/paginators-1.json,sha256=1w34xYXn5nUies34W7BC_lzVPnbnhggKcWKUug4ckRc,908 +botocore/data/grafana/2020-08-18/paginators-1.sdk-extras.json,sha256=_g8panv1mpml0x69Y013wHvb22Sy63dKgVhK5oRqbwE,329 +botocore/data/grafana/2020-08-18/service-2.json.gz,sha256=W_l1b33N9yyWl3a_l2Ufohb3N0tn8S-wdar3CW1cTV0,15023 +botocore/data/greengrass/2017-06-07/endpoint-rule-set-1.json.gz,sha256=hTp7U2CffLhfC6h6wTir_obf-wlntGllS9QngFsrdSQ,1361 +botocore/data/greengrass/2017-06-07/paginators-1.json,sha256=LphzapxioJkdlNs-zU4IVmg_pjswwy8RuDPq79sbW64,3366 +botocore/data/greengrass/2017-06-07/service-2.json.gz,sha256=HrYOf5Vf22huj75ycgvBXLCSeNoPge9QJnVbcnbxJhk,17132 +botocore/data/greengrassv2/2020-11-30/endpoint-rule-set-1.json.gz,sha256=hTp7U2CffLhfC6h6wTir_obf-wlntGllS9QngFsrdSQ,1361 +botocore/data/greengrassv2/2020-11-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/greengrassv2/2020-11-30/paginators-1.json,sha256=geNY9pksg1eDuJ9mpqk1iee_t8zQuFBrBG_O6eaZ7GU,1283 +botocore/data/greengrassv2/2020-11-30/service-2.json.gz,sha256=7_xTen2o2I4RkksPjz-oUQEUp7OxY8a2YGMtuQyAqak,20272 +botocore/data/groundstation/2019-05-23/endpoint-rule-set-1.json.gz,sha256=MR-srRXfZrUP9RUGZTgnZ0zMQg2s3oPLp9z5tr6lHDs,1151 +botocore/data/groundstation/2019-05-23/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/groundstation/2019-05-23/paginators-1.json,sha256=4_ogVwU_XXx--s-8FB9fXMd5kIjdEXBdN6iBd04Kmlk,1236 +botocore/data/groundstation/2019-05-23/service-2.json.gz,sha256=88fdsesYxJ5U2AnEThGr6RDTpaFsngpTcWcZ1kIXwjs,18957 +botocore/data/groundstation/2019-05-23/waiters-2.json,sha256=fuayBSt0gQV3HjjFxrqZgUCLSo6DxBG5qb-ASxS3oKE,534 +botocore/data/guardduty/2017-11-28/endpoint-rule-set-1.json.gz,sha256=N9n00duuOd-VAx7oDI1inSw4DUfqQ3-DWsaTK8qSlv4,1235 +botocore/data/guardduty/2017-11-28/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/guardduty/2017-11-28/paginators-1.json,sha256=xlZuLLHS4zEJ5ldwSdcZic3kEOn-CbV_N9NCCDnX0_M,2224 +botocore/data/guardduty/2017-11-28/service-2.json.gz,sha256=H8mRBQaBq4EdlpRMzVupmnfxsHnr_VvvB1jBtqIFHU4,62575 +botocore/data/health/2016-08-04/endpoint-rule-set-1.json.gz,sha256=aUybVgazmCjIZ3iXyQPe1mE43H15rKtEVIQ4Lf0iKZI,1524 +botocore/data/health/2016-08-04/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/health/2016-08-04/paginators-1.json,sha256=yiHNcdPNOcqngUnAvp1BUD8e9oWSgqGS-T0Esl6r8vI,1397 +botocore/data/health/2016-08-04/service-2.json.gz,sha256=29iSAW4N97IJH5mC51ToJolF7Ds6zfECpEIw6n3Atig,10836 +botocore/data/healthlake/2017-07-01/endpoint-rule-set-1.json.gz,sha256=F4SfaOelNY7pN20TI7kZlaL942d4wxOlzismq6qQeD0,1151 +botocore/data/healthlake/2017-07-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/healthlake/2017-07-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/healthlake/2017-07-01/service-2.json.gz,sha256=k4x7lkiJnhm22bBUiA8zEMstkRqrccSusgA-v_0bQwU,6387 +botocore/data/healthlake/2017-07-01/waiters-2.json,sha256=4Sv75r3pHvxlRrR22FlUPVRujs263gAEJcberiB4dkU,2737 +botocore/data/iam/2010-05-08/endpoint-rule-set-1.json.gz,sha256=tPHOxqb46vbsfMAe2LL7hhnJrfDAx5onuBoEtIbffGY,2238 +botocore/data/iam/2010-05-08/examples-1.json,sha256=T5EqrFFZBiVlL9dsN-T5DnigU1UnMSXfVVwBK00AWrU,48537 +botocore/data/iam/2010-05-08/paginators-1.json,sha256=Mrjh9WIhO3YlPK04LELNlBGOWlr4EOWDPV22S4XlPM4,7036 +botocore/data/iam/2010-05-08/service-2.json.gz,sha256=UqZXLY2njv2hqMP7uTzr5ailWSISv1W9GqtnY8im_aE,81064 +botocore/data/iam/2010-05-08/waiters-2.json,sha256=sC6nS5oxMDEinb4z8GAMfZvFfPVWBzL_j1chnAT_z4k,1462 +botocore/data/identitystore/2020-06-15/endpoint-rule-set-1.json.gz,sha256=kBXiKN_bUi4ow1O3CcC0SK1a1jJMFvNKO5aVStVBLcY,1235 +botocore/data/identitystore/2020-06-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/identitystore/2020-06-15/paginators-1.json,sha256=lpGJQxUC8FqJ_JuWaDSHw3cMW677pwZDQpoWRcBvA0M,704 +botocore/data/identitystore/2020-06-15/service-2.json.gz,sha256=_7-hBcivDr2p3Burbcm2GVEwVffJDvM51hUVyypTPhQ,9460 +botocore/data/imagebuilder/2019-12-02/endpoint-rule-set-1.json.gz,sha256=uhqIlNtlp4CJX1BHQKLdiqi96STtzMVP2fDzijrsEo0,1237 +botocore/data/imagebuilder/2019-12-02/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/imagebuilder/2019-12-02/paginators-1.json,sha256=I6-12lIoWuh4TkN0X8GiCzc-8ilu4xD69Zuj_ckMoUQ,3820 +botocore/data/imagebuilder/2019-12-02/paginators-1.sdk-extras.json,sha256=bGrcj8XUhY0YmfiAvQv2t8JVDaOCrqss44DaLHra1HA,2040 +botocore/data/imagebuilder/2019-12-02/service-2.json.gz,sha256=IQrU5JN9HCJsIf86R0reRCl92KULrhqDvWjAb6Q1Y6k,44248 +botocore/data/importexport/2010-06-01/endpoint-rule-set-1.json.gz,sha256=puIbRv9-YTADKPuEA4Q1t-Av0VRjUk0tUA0zZuuiFNQ,1599 +botocore/data/importexport/2010-06-01/paginators-1.json,sha256=Etmobek-KI_4Gx8vLRBQsy6nYiRvog88hJCCXuRESZQ,215 +botocore/data/importexport/2010-06-01/service-2.json.gz,sha256=OxJPnsi447ke55nwUzT7FLeTc4iPpGGOVaSWF5GSvNI,4733 +botocore/data/inspector-scan/2023-08-08/endpoint-rule-set-1.json.gz,sha256=qicyrHOYNmjPMu8or7x2ngPGoeppfLVBKp28jMsl9WA,1303 +botocore/data/inspector-scan/2023-08-08/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/inspector-scan/2023-08-08/service-2.json.gz,sha256=SAquK1CFbDTZsw8gWZod6yN-wnL7hngaV1LmcE869Ww,1715 +botocore/data/inspector/2015-08-18/endpoint-rule-set-1.json.gz,sha256=KB1fNW2PPOiTBrtjogrCO1VHPl05iq7zLnzZxb0g490,1147 +botocore/data/inspector/2015-08-18/service-2.json.gz,sha256=fw2aNFQNKrIynbxJHPa0BBSwcSBxmd6-jaGsK_KK_a0,8021 +botocore/data/inspector/2016-02-16/endpoint-rule-set-1.json.gz,sha256=MZ5ho1cqYfInqZx_yvN1FissDIsxIf1Hoe0SNkcWwWM,1149 +botocore/data/inspector/2016-02-16/examples-1.json,sha256=EoIoRt_vSBIFaQ8UnXLRGL2W5H50CW9rscWvZ012w-g,36903 +botocore/data/inspector/2016-02-16/paginators-1.json,sha256=weo6-A-gbXJmE6B8bFERy0jQdJHvIDANiZLITbP_9ZQ,1610 +botocore/data/inspector/2016-02-16/service-2.json.gz,sha256=dzsD1VO7Egjse7lz3ai5wR_g-0fgsck2xvyFYR8IRDk,14162 +botocore/data/inspector2/2020-06-08/endpoint-rule-set-1.json.gz,sha256=NcJT0AB1R7MI4cntNpzs-16KFdfCxbG-buJAmwxkVio,1151 +botocore/data/inspector2/2020-06-08/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/inspector2/2020-06-08/paginators-1.json,sha256=Ib7iaLM9kEhPdRdVZSCwdlFg9qv4NEE6G8Hmobhq6jQ,2769 +botocore/data/inspector2/2020-06-08/paginators-1.sdk-extras.json,sha256=WXkFBTPQczZBVGrBAb2IoUJRliU1uNg-m8znDFawOOA,287 +botocore/data/inspector2/2020-06-08/service-2.json.gz,sha256=XAWTVtUqsgUVGjBIAt1RjCtmAmOcYcAw3Y29BVdhja4,45600 +botocore/data/internetmonitor/2021-06-03/endpoint-rule-set-1.json.gz,sha256=e5q3glfKctDroh-ouyYjEA2oSL0KdNliZG8-bxUQ8bg,1155 +botocore/data/internetmonitor/2021-06-03/paginators-1.json,sha256=5eozwrH81SIJWEZD5zsaAs9rr8CvRMMoXvYnOj1IxTs,531 +botocore/data/internetmonitor/2021-06-03/service-2.json.gz,sha256=hUOfOgcSUehYxZaxXbHuMOZFdvzqrjYZqCLTWwpR5UM,13894 +botocore/data/internetmonitor/2021-06-03/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/invoicing/2024-12-01/endpoint-rule-set-1.json.gz,sha256=UFvj9wpum0081eOtsFwUS9fPV--tFEu1xX4RAyZLD4w,899 +botocore/data/invoicing/2024-12-01/paginators-1.json,sha256=45c40-ubliTfW2ldc8DsZpWeou2oYKnh_TjoinLvgYg,575 +botocore/data/invoicing/2024-12-01/service-2.json.gz,sha256=tUgfNdBwzprqI2MF80aO5v0znJu3oSycjs-waewCMTY,11387 +botocore/data/invoicing/2024-12-01/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/iot-data/2015-05-28/endpoint-rule-set-1.json.gz,sha256=zC_JOvdIITOcRcPm1c1DEzMMCOrSRuHTrqZ5lswGgeA,1482 +botocore/data/iot-data/2015-05-28/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iot-data/2015-05-28/paginators-1.json,sha256=FCM_y5QY56bw4TOgH3_OTBsnKj2PjI3ObCOOnKtsq80,201 +botocore/data/iot-data/2015-05-28/service-2.json.gz,sha256=WU9Qh9C9epmK8Qu9jkb3jQK-J1zKbyJQnglLgM4T-Gg,4775 +botocore/data/iot-jobs-data/2017-09-29/endpoint-rule-set-1.json.gz,sha256=LTRR1itjgrWZw9jsc09GGgFomcWf7al0MY3o7fW4OcM,1153 +botocore/data/iot-jobs-data/2017-09-29/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iot-jobs-data/2017-09-29/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/iot-jobs-data/2017-09-29/service-2.json.gz,sha256=buVSfVUb_a_piqyvP08N0jp61Ey2hBrleubMO0BNny8,5041 +botocore/data/iot-managed-integrations/2025-03-03/endpoint-rule-set-1.json.gz,sha256=ibEVPMYCjTYNLVTurDRb0Fa847phuAm4qHzAzX5G-QI,843 +botocore/data/iot-managed-integrations/2025-03-03/paginators-1.json,sha256=leQj5NI9WDhpU0Nx_QY00lDJKI4MdDEV3xRl-1CP-0c,2987 +botocore/data/iot-managed-integrations/2025-03-03/service-2.json.gz,sha256=JRFk43pOIshIkVAMOM9-IMLtim-EJlZyPMfrt1mFN98,29487 +botocore/data/iot/2015-05-28/endpoint-rule-set-1.json.gz,sha256=5dJANpgZ8aYCKRipnG-Wlh8xE6x7Hp6nimvMvXL1icI,1266 +botocore/data/iot/2015-05-28/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iot/2015-05-28/paginators-1.json,sha256=cwRCZcmss_KOdZo4DoDmqXspGgJAJMjsUeZd9h_QzMk,11232 +botocore/data/iot/2015-05-28/service-2.json.gz,sha256=i0IDv1G2xxtyaQsd3tj9773ZvA3STTpzKkIHL2Ez5wM,125599 +botocore/data/iotanalytics/2017-11-27/endpoint-rule-set-1.json.gz,sha256=bsVBCCNB6ml5GylIiQtnmZuFIA_51jB0k9NFBPJqTz0,1152 +botocore/data/iotanalytics/2017-11-27/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iotanalytics/2017-11-27/paginators-1.json,sha256=X_pDdHN034STvHt8ULopV8fu0e5gyFt8Z1dj17AfZQY,895 +botocore/data/iotanalytics/2017-11-27/service-2.json.gz,sha256=KbGMc2-nHFFnH1354UG1PaSknLQJm-msmgA-s6GeqIw,18182 +botocore/data/iotdeviceadvisor/2020-09-18/endpoint-rule-set-1.json.gz,sha256=zIcUuCOcMxPzPyIS94p4Ou7CjJVPdQRRu7SpMvc_vQ0,1158 +botocore/data/iotdeviceadvisor/2020-09-18/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iotdeviceadvisor/2020-09-18/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/iotdeviceadvisor/2020-09-18/service-2.json.gz,sha256=jJUZMy9l9ZyV3yAbKksPzTCSs2tulkR-eNz3sUEEqSM,5326 +botocore/data/iotevents-data/2018-10-23/endpoint-rule-set-1.json.gz,sha256=dQV-eApS39H3MZscvRmkuHrU30o7UMSf_Xw3u3zlckc,1153 +botocore/data/iotevents-data/2018-10-23/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iotevents-data/2018-10-23/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/iotevents-data/2018-10-23/service-2.json.gz,sha256=vPvpm_lesF6eTJ2XlHVTThaTxLpqB-2HRSAWYzRj3Ro,6416 +botocore/data/iotevents/2018-07-27/endpoint-rule-set-1.json.gz,sha256=qzcfKyVLVRXUB-DfkbkYwxzXN30iMkABXwax3TMwOKQ,1149 +botocore/data/iotevents/2018-07-27/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iotevents/2018-07-27/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/iotevents/2018-07-27/service-2.json.gz,sha256=6lfivjin86L1m2JqTAeWyVgiqm9jWLhV1ihGV2gaXuk,16112 +botocore/data/iotfleetwise/2021-06-17/endpoint-rule-set-1.json.gz,sha256=UC6V41FhzE36PuPQtZXbSacvmenlIWl5NAahmFFKEbc,1151 +botocore/data/iotfleetwise/2021-06-17/paginators-1.json,sha256=Lpq4Xh8dmuXKeTACXakpt6YLxU7zzWOvkBqWbj9TuNI,2430 +botocore/data/iotfleetwise/2021-06-17/service-2.json.gz,sha256=f2SdECMZWI_2Ku0TJWR09DNiOUTAgHZ7jI-KtAueBmo,30396 +botocore/data/iotfleetwise/2021-06-17/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/iotsecuretunneling/2018-10-05/endpoint-rule-set-1.json.gz,sha256=oeI-eTJ8kmIHzjwvUZbJQKSAJ5xymTPI_96Zc_TUiAc,1401 +botocore/data/iotsecuretunneling/2018-10-05/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iotsecuretunneling/2018-10-05/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/iotsecuretunneling/2018-10-05/service-2.json.gz,sha256=vBh4XHzVJFyMUeR_POcV7Vt0vWoTXDfFDw4zuEhtzps,3432 +botocore/data/iotsitewise/2019-12-02/endpoint-rule-set-1.json.gz,sha256=nbHLDH03hNj0d0S-tE6DBRKcJplbj2qEGcl59AWzIfs,1150 +botocore/data/iotsitewise/2019-12-02/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iotsitewise/2019-12-02/paginators-1.json,sha256=zx8VdG9G3tMBgNLSeJAnaVAeuqsq6Fdmdo1D77G2k-4,4960 +botocore/data/iotsitewise/2019-12-02/paginators-1.sdk-extras.json,sha256=YRdxHylWCPUlQDFxU2BHajclulJZBfY-NpWldEBwzEU,159 +botocore/data/iotsitewise/2019-12-02/service-2.json.gz,sha256=vuwGYeCwCVBPkM5ZsZRvNdZ1U3cnjVth3r0-v24Rk80,60542 +botocore/data/iotsitewise/2019-12-02/waiters-2.json,sha256=qVN5Ie90YeUrNZqZKgckPkyTBYdKjgEbbrlsx-3RXUw,2237 +botocore/data/iotthingsgraph/2018-09-06/endpoint-rule-set-1.json.gz,sha256=qmkcxn1aA0bH2ZkYVkezBjzeUM9k6YiEE6JLdl1AnBI,1217 +botocore/data/iotthingsgraph/2018-09-06/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iotthingsgraph/2018-09-06/paginators-1.json,sha256=3329WY0CXoFVg2osoDFw4kPWYxWK559asARwgffXvbw,1730 +botocore/data/iotthingsgraph/2018-09-06/service-2.json.gz,sha256=4RDptl1OV-_3Nr82JpJj0aLbtB35ji2eLbP3EK2NW-k,10367 +botocore/data/iottwinmaker/2021-11-29/endpoint-rule-set-1.json.gz,sha256=baLwsQQyWzCvAO4bX6iBlJBLT2UKNef4C7MMqDy_7CA,1152 +botocore/data/iottwinmaker/2021-11-29/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iottwinmaker/2021-11-29/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/iottwinmaker/2021-11-29/service-2.json.gz,sha256=6jAqUw0ADN6IL8DNG1a9wWy12fghehk0TPc7mI02TT0,16782 +botocore/data/iottwinmaker/2021-11-29/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/iotwireless/2020-11-22/endpoint-rule-set-1.json.gz,sha256=mn3OGSrMdrBpNdHh4xhhp6gtCqIZm6Vkp_Ma4mmeQU4,1153 +botocore/data/iotwireless/2020-11-22/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/iotwireless/2020-11-22/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/iotwireless/2020-11-22/service-2.json.gz,sha256=DlYs2Pa_CsRk2ixf0EO4ij2e17pZ43lElygDF2FYTRM,37304 +botocore/data/ivs-realtime/2020-07-14/endpoint-rule-set-1.json.gz,sha256=EudCR5inzPDVPJwl87CHIYtUJyj60gQaJs_xa57ibgU,1300 +botocore/data/ivs-realtime/2020-07-14/paginators-1.json,sha256=HgDr8EFYuc0XAzussWpMVIYF65h571wHeqsQEV4DNTI,550 +botocore/data/ivs-realtime/2020-07-14/service-2.json.gz,sha256=99Tww6mHyBBdV4RGnRd2lt0MxRfOXf3u94w2uMSHH90,19023 +botocore/data/ivs-realtime/2020-07-14/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/ivs/2020-07-14/endpoint-rule-set-1.json.gz,sha256=L1Or9piR17DNLWUCcmelXnihlgzRjlOdAQ51Nx1q9zo,1145 +botocore/data/ivs/2020-07-14/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/ivs/2020-07-14/paginators-1.json,sha256=QibJ2axvh2Gp9C80kOHE6Ac5RxI-El9k6jxWbVtHyqw,875 +botocore/data/ivs/2020-07-14/service-2.json.gz,sha256=-3kHTVC-CpQcNvFWT4uPjeIis-YnJ6vl8HiiaDmxSPU,17154 +botocore/data/ivschat/2020-07-14/endpoint-rule-set-1.json.gz,sha256=tHU7XYr9vKTveOz2FfyMe8inzExaR4ZvWKQx5UyZqUw,1149 +botocore/data/ivschat/2020-07-14/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/ivschat/2020-07-14/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/ivschat/2020-07-14/service-2.json.gz,sha256=5JDswzBqzgOBMzWquHZb4IzM9DKklAJQXyuByehf1Fs,8309 +botocore/data/ivschat/2020-07-14/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/kafka/2018-11-14/endpoint-rule-set-1.json.gz,sha256=wrwoqeyMh-eZy2FpXpAv-kZUX37U8h6VOCp7nJZKhQU,1232 +botocore/data/kafka/2018-11-14/paginators-1.json,sha256=Lmq3WylSqus0mXFPis3ZiX_DYrpa08vSkyiIpYcpqjs,2459 +botocore/data/kafka/2018-11-14/service-2.json.gz,sha256=ugLG8VnuA-hIm9micevqvRQt9mqAgxx0oZ_Ju69dPMg,22702 +botocore/data/kafkaconnect/2021-09-14/endpoint-rule-set-1.json.gz,sha256=3-URUr9TSgsVlZs3_5w-PEbNYqyUCwZS8Ci45Y5KLuM,1417 +botocore/data/kafkaconnect/2021-09-14/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/kafkaconnect/2021-09-14/paginators-1.json,sha256=Eq3TTanPhDwtKpF3EUFUazS6C8Dkwb7TENJPyonaUkc,733 +botocore/data/kafkaconnect/2021-09-14/service-2.json.gz,sha256=h9AKXC067xbdwoJhPefdf2wA5nwbv9tDO1ZX6zGTxtY,8051 +botocore/data/kafkaconnect/2021-09-14/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/kendra-ranking/2022-10-19/endpoint-rule-set-1.json.gz,sha256=2Lt06FD-8Ix7feDdX8yM_g4hS3g3oNKi3DHTGhaIcXA,1129 +botocore/data/kendra-ranking/2022-10-19/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/kendra-ranking/2022-10-19/service-2.json.gz,sha256=YWErKGnzSpl02BbClz4ZpjF1_lqXQz7S8SiSop4d90k,4384 +botocore/data/kendra/2019-02-03/endpoint-rule-set-1.json.gz,sha256=fxnVTeqFX9zKM0Ox-2NHRPJmTqop8r3jSXO9WKfE22c,1147 +botocore/data/kendra/2019-02-03/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/kendra/2019-02-03/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/kendra/2019-02-03/service-2.json.gz,sha256=nBeyPFFbmb5dT2foo7KBZ8mHIVB-Ct_iOqP5xDVb43s,71284 +botocore/data/keyspaces/2022-02-10/endpoint-rule-set-1.json.gz,sha256=ZE8E96H-KTmyhrMtclatMnx1Im3u6gwf_jCE0ffvCvo,1235 +botocore/data/keyspaces/2022-02-10/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/keyspaces/2022-02-10/paginators-1.json,sha256=Wa_EIMfB_oAzXrO5w1GuutNoupwn1mvgPSdNbD3gySk,668 +botocore/data/keyspaces/2022-02-10/service-2.json.gz,sha256=sBnv3_HHLbrRnK-d0ZaXd3fNSnBCSli9x4yffl16FUU,14542 +botocore/data/keyspaces/2022-02-10/waiters-2.json,sha256=tj1ZnaqhwmJkUEQlwH7wm1SqY3lg1BvZDfzfPaIgNrY,38 +botocore/data/keyspacesstreams/2024-09-09/endpoint-rule-set-1.json.gz,sha256=iIheSd4LWmUBWCM5_1yyvB0Y2mFQrhAWiYTpYnvUr2U,840 +botocore/data/keyspacesstreams/2024-09-09/paginators-1.json,sha256=ec8rJotxhYa6UUQ81C-pRcMIA-QlKfB_AiaFoHv7Fbc,342 +botocore/data/keyspacesstreams/2024-09-09/paginators-1.sdk-extras.json,sha256=feP3TZjM_YFuizYme5vJUtGlqVgcpqFZXswcFM3kUwg,378 +botocore/data/keyspacesstreams/2024-09-09/service-2.json.gz,sha256=OrLUgee6tDy-nVQzBZW_rNJl5mXAxUZCJUA-fKPimf4,6953 +botocore/data/kinesis-video-archived-media/2017-09-30/endpoint-rule-set-1.json.gz,sha256=Tf8jNWc9x23E0XtTPGE-oyekCqpaL6h9LZglItdh4oE,1151 +botocore/data/kinesis-video-archived-media/2017-09-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/kinesis-video-archived-media/2017-09-30/paginators-1.json,sha256=2QyELet6SZ2S2nDPmoKrNlJ9kQyJyMlMTkrUh1FHeh0,346 +botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json.gz,sha256=preUNHVNpVzTAIE_hFOUq1HYDfbZgu5Mck_QoSm7Rtk,13543 +botocore/data/kinesis-video-media/2017-09-30/endpoint-rule-set-1.json.gz,sha256=Tf8jNWc9x23E0XtTPGE-oyekCqpaL6h9LZglItdh4oE,1151 +botocore/data/kinesis-video-media/2017-09-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/kinesis-video-media/2017-09-30/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/kinesis-video-media/2017-09-30/service-2.json.gz,sha256=idVEppk47GcOjLifM9-i-Kwr5rV773pqGJrUHzEyxjM,3456 +botocore/data/kinesis-video-signaling/2019-12-04/endpoint-rule-set-1.json.gz,sha256=Tf8jNWc9x23E0XtTPGE-oyekCqpaL6h9LZglItdh4oE,1151 +botocore/data/kinesis-video-signaling/2019-12-04/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/kinesis-video-signaling/2019-12-04/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/kinesis-video-signaling/2019-12-04/service-2.json.gz,sha256=MNtO2oK8bl8uzD7oa0iCBpYuvxS3wTPRz3OGBAJkB_I,2462 +botocore/data/kinesis-video-webrtc-storage/2018-05-10/endpoint-rule-set-1.json.gz,sha256=DV5B2vJw9h3CEsUhcfTZiP35jVG3F_sYHdVSfm4oZ-k,1302 +botocore/data/kinesis-video-webrtc-storage/2018-05-10/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/kinesis-video-webrtc-storage/2018-05-10/service-2.json.gz,sha256=GTFhdj3jI9uf3NcWJYfl1W5CIVtIMCSI76tr1lhPpBs,2094 +botocore/data/kinesis/2013-12-02/endpoint-rule-set-1.json.gz,sha256=kfPCh2k2X2umE9PBXTel2Bu5n7bBTq66PqLYMJKTHgA,5443 +botocore/data/kinesis/2013-12-02/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/kinesis/2013-12-02/paginators-1.json,sha256=qSFJYsvx9QiXPFHa-xy00L9bJWbtmRbGUfaVCF9VzNE,1257 +botocore/data/kinesis/2013-12-02/service-2.json.gz,sha256=KJirKFKw4JlBghZEPmclin9e0hYjrFb_uCrGGUR9TuE,27027 +botocore/data/kinesis/2013-12-02/waiters-2.json,sha256=O09l7u4uKnojQ0nCnGvABSm0pUXaLj8vvi2Y7sfH_9w,615 +botocore/data/kinesisanalytics/2015-08-14/endpoint-rule-set-1.json.gz,sha256=HkDnE3rkNpbvTyDuCFBvDHFSN8TmXFywe-9q1H8ewKo,1156 +botocore/data/kinesisanalytics/2015-08-14/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/kinesisanalytics/2015-08-14/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/kinesisanalytics/2015-08-14/service-2.json.gz,sha256=NP3isRB7IksyDxfFD1-MVYGYyZTndS9kS_E2oSVNCZY,14058 +botocore/data/kinesisanalyticsv2/2018-05-23/endpoint-rule-set-1.json.gz,sha256=HkDnE3rkNpbvTyDuCFBvDHFSN8TmXFywe-9q1H8ewKo,1156 +botocore/data/kinesisanalyticsv2/2018-05-23/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/kinesisanalyticsv2/2018-05-23/paginators-1.json,sha256=4ttV2ZxNQIdY8Gfsw2atQYWigRj6V6b8bLI70CA4vKs,753 +botocore/data/kinesisanalyticsv2/2018-05-23/service-2.json.gz,sha256=f5drap_tMlQyapM5UV83MbQuTeYqxb4RhGLB2CHTAxo,25510 +botocore/data/kinesisvideo/2017-09-30/endpoint-rule-set-1.json.gz,sha256=Tf8jNWc9x23E0XtTPGE-oyekCqpaL6h9LZglItdh4oE,1151 +botocore/data/kinesisvideo/2017-09-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/kinesisvideo/2017-09-30/paginators-1.json,sha256=u4Avq0nOOLDcxZR6MF_lKdBBqtPAxO96JsSaq9zIdqQ,758 +botocore/data/kinesisvideo/2017-09-30/service-2.json.gz,sha256=7zdpwJ35M8BNtnRUPXOQ56OYtQHs3Cnbe3Kdikunfds,15837 +botocore/data/kms/2014-11-01/endpoint-rule-set-1.json.gz,sha256=A4bnHhgJGC9OITzPIoz_iY72_LRStB_OcAoYCTwrFcQ,1145 +botocore/data/kms/2014-11-01/examples-1.json,sha256=TgahTl1uBYiHIxv63mxyaCc-5c9xQKobWHPhDio4x3c,77655 +botocore/data/kms/2014-11-01/paginators-1.json,sha256=pUo1LF_2xGXf1-sM8xJiafSISwL8m8ygUKravuO-Iv0,1565 +botocore/data/kms/2014-11-01/service-2.json.gz,sha256=4c9XvoguiF4tHeRclUrt2Oz-Ssi_gfdI4we1Qs2-pQ8,73287 +botocore/data/lakeformation/2017-03-31/endpoint-rule-set-1.json.gz,sha256=2u6cvrk55HkVHr4x85vu946_3u_Hf26G4L9oK1-vod4,1152 +botocore/data/lakeformation/2017-03-31/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/lakeformation/2017-03-31/paginators-1.json,sha256=Igy5JjRrtDgCMOgHT__Et_A6258hmdm2ohrVs2ZasJc,1052 +botocore/data/lakeformation/2017-03-31/paginators-1.sdk-extras.json,sha256=C6kS_EfPl5yTSl_zGXUU30Tp5Z82gPH2KKPi-u1IbOo,159 +botocore/data/lakeformation/2017-03-31/service-2.json.gz,sha256=mXo7bJpppjJ9A6PfKRZAhKEWCK2lqDY59P6M1Ms5hYY,24563 +botocore/data/lambda/2014-11-11/endpoint-rule-set-1.json.gz,sha256=gP-IbhRx9MRTeHc3GlklRbCqlp5SotRfuvQalmuhMNc,1288 +botocore/data/lambda/2014-11-11/service-2.json.gz,sha256=pIaElsexoKwST1xKO7dG4ZC2_lCM15NcNzq72WaslkM,5528 +botocore/data/lambda/2015-03-31/endpoint-rule-set-1.json.gz,sha256=wWhVcq2hhZ4iQKAzO7A4YqQxEjBkozC8QBfBkwrJ1pA,1148 +botocore/data/lambda/2015-03-31/examples-1.json,sha256=_TOXptTVZUFkSxrkaq_JpIKLxUYjRcK_TpC_0itGHLg,52811 +botocore/data/lambda/2015-03-31/paginators-1.json,sha256=i67K4A0RFZT91hXah41P-AlfAvxXy1TrUAcmy7foZ4o,2839 +botocore/data/lambda/2015-03-31/paginators-1.sdk-extras.json,sha256=evspsJGQ9hFD7SsREZ6pj-ooY7RdvPPHneM5PA4AKaM,196 +botocore/data/lambda/2015-03-31/service-2.json.gz,sha256=WdgHQ3BdWu_pvZkzej5L7X1ypWDy2SqSHCGue4ta5lc,65929 +botocore/data/lambda/2015-03-31/waiters-2.json,sha256=imWEXGOjCilT015RuJNYSaoSXEuafoWZQgL_4KttBqU,4032 +botocore/data/launch-wizard/2018-05-10/endpoint-rule-set-1.json.gz,sha256=_kB4OXfYCrno1mkPFTVD2WVfF5KnZPrdvOUC8aL1eG4,1302 +botocore/data/launch-wizard/2018-05-10/paginators-1.json,sha256=_qhTYa40h1ckIfS0xEC6DCUnO-0OPlclJSK9zAxC8D4,733 +botocore/data/launch-wizard/2018-05-10/service-2.json.gz,sha256=5HG02N9EVvbrvO9hUUzSgApk7m7FV0U-jDpSgDlHWQI,4452 +botocore/data/lex-models/2017-04-19/endpoint-rule-set-1.json.gz,sha256=o6lYwmxquYwhFwnuEqv3Wkrv8mNop2VsUBtvlGTfi_E,1331 +botocore/data/lex-models/2017-04-19/examples-1.json,sha256=bOPm5nP9H4YSzKIpuI2sCPe4agTMgdenNLtxDAWIat4,23898 +botocore/data/lex-models/2017-04-19/paginators-1.json,sha256=NmghgFUthvQgC3SqXuZBn-6vnUJ5ey3MZYBpRF7YMqI,1686 +botocore/data/lex-models/2017-04-19/service-2.json.gz,sha256=0BmxOHiubsxVzepmp7rgKxObX9k5E0QfBS0FmpnLe8c,29522 +botocore/data/lex-runtime/2016-11-28/endpoint-rule-set-1.json.gz,sha256=7k_N5a0bYUx4f5_sMjc6OIp0dkg8qrEksC6tPz9DwTU,1331 +botocore/data/lex-runtime/2016-11-28/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/lex-runtime/2016-11-28/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/lex-runtime/2016-11-28/service-2.json.gz,sha256=OTf0OrBTx0fvqbVllnbj6QebtowzsgQ98VcfP8FPm-E,11824 +botocore/data/lexv2-models/2020-08-07/endpoint-rule-set-1.json.gz,sha256=k6euJdFlTnye4wEmWMFRHIG_Wxy6n1sHaiSD7PBrT-U,1154 +botocore/data/lexv2-models/2020-08-07/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/lexv2-models/2020-08-07/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/lexv2-models/2020-08-07/service-2.json.gz,sha256=KEoS4cyYacPnMrlS_XFsMTUyC7TDh-pA5rCroIGj5D8,78964 +botocore/data/lexv2-models/2020-08-07/waiters-2.json,sha256=Kj-OzJdHpbEuK2Og-0ok3E17irFQKjDwk2KfOj_xKcQ,7231 +botocore/data/lexv2-runtime/2020-08-07/endpoint-rule-set-1.json.gz,sha256=eO7TDhJg0VReb7vqAHJZn2lUGTHJMjZIzVlGo_XUN6k,1154 +botocore/data/lexv2-runtime/2020-08-07/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/lexv2-runtime/2020-08-07/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/lexv2-runtime/2020-08-07/service-2.json.gz,sha256=E_WeugTLsQ2ms7Y-E8UXV9Hsd8LGD4G6lfSIdie_oEA,12913 +botocore/data/license-manager-linux-subscriptions/2018-05-10/endpoint-rule-set-1.json.gz,sha256=wqJUqbrrqhFS_NiAECVbUUD-yMc5Hb9BJ3Ga5SnWmJ8,1318 +botocore/data/license-manager-linux-subscriptions/2018-05-10/paginators-1.json,sha256=9hH87MXwn0OiJQlRwCyyof-Pe9Esid1WmRA32IvfLKU,591 +botocore/data/license-manager-linux-subscriptions/2018-05-10/service-2.json.gz,sha256=-v-K3pC144C4buByDl2Ggr9K8_S6Qvmx3ARSzYzmegQ,4790 +botocore/data/license-manager-user-subscriptions/2018-05-10/endpoint-rule-set-1.json.gz,sha256=IafmeCbP0V03XKUiizap949I9AABMGf_5PM4xZr1lIM,1171 +botocore/data/license-manager-user-subscriptions/2018-05-10/paginators-1.json,sha256=LNXlRPzi78dh0YocO4Tld8ErItNbVafz-InxaJIVd-s,944 +botocore/data/license-manager-user-subscriptions/2018-05-10/service-2.json.gz,sha256=jpBeArzilDUG-pVnGJpTm6y4PT4PiwDgOSbFuYTypUI,6693 +botocore/data/license-manager-user-subscriptions/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/license-manager/2018-08-01/endpoint-rule-set-1.json.gz,sha256=VvSYA0WGh0_suDcGC5DhZeLis_tvrbK6VdUPqvH3bFo,1156 +botocore/data/license-manager/2018-08-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/license-manager/2018-08-01/paginators-1.json,sha256=u83kulrKizQ1RsV1wfSx_UjFbm72dCbztJd3m2qKZwc,1012 +botocore/data/license-manager/2018-08-01/service-2.json.gz,sha256=sySCNUGuxdvcDDGoEelUs8nDPIoI73i6AYTiUeyQu18,20541 +botocore/data/lightsail/2016-11-28/endpoint-rule-set-1.json.gz,sha256=_YUkDM9D4yoBo-1HhoDihuX3fuItNamTKLg3Hun3ylk,1150 +botocore/data/lightsail/2016-11-28/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/lightsail/2016-11-28/paginators-1.json,sha256=9EaLlqeMLm1cO4A5z-uPznc4OgcKMLV3tbvMLdSjZF4,2925 +botocore/data/lightsail/2016-11-28/service-2.json.gz,sha256=IfKEtfIJc5E246vx13lQRqRgn2NP_hvXbS39gCOIWCk,88085 +botocore/data/location/2020-11-19/endpoint-rule-set-1.json.gz,sha256=Kd78nnpKjF0kUAXQa7rNVVle9B0UvaokBqBlT_Em0ho,1145 +botocore/data/location/2020-11-19/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/location/2020-11-19/paginators-1.json,sha256=voiPL9-aOzRI3yqf4kvw2pNzmkLpxD5QoFZZaOYaQoQ,1871 +botocore/data/location/2020-11-19/paginators-1.sdk-extras.json,sha256=RkjEzGF7VMsfK3VpqDSV5a3Ol5XSjUAn_udGxYz4uyA,197 +botocore/data/location/2020-11-19/service-2.json.gz,sha256=c5uElA-mqIc-soiL-y1EPZ38l_1lZopyCtm5m_Y1cvc,44276 +botocore/data/logs/2014-03-28/endpoint-rule-set-1.json.gz,sha256=bNg6UNt0xgDoBcHhCr-kiM092i08AJQGfLvxDvv_fBo,1230 +botocore/data/logs/2014-03-28/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/logs/2014-03-28/paginators-1.json,sha256=W91hHLGNPpWw3v8TNgDZcoNQYH16WKc2tDaMivPQgQI,3553 +botocore/data/logs/2014-03-28/paginators-1.sdk-extras.json,sha256=NEtgDwhuhzOQCycfuhXi4tSe1_y5ekrExDmxJdQ9M1Q,198 +botocore/data/logs/2014-03-28/service-2.json.gz,sha256=r6GD4Z2GyKrRgfim8YokkNJo_sOoKqh1OH9T2pi0Krg,79359 +botocore/data/lookoutequipment/2020-12-15/endpoint-rule-set-1.json.gz,sha256=fsMYdEXYcYubsX59IoC6NcVjpcDDe2XvJlLVY7Ut9EM,1156 +botocore/data/lookoutequipment/2020-12-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/lookoutequipment/2020-12-15/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/lookoutequipment/2020-12-15/service-2.json.gz,sha256=KyuxhtzBjo3Hv0yke6BmFDEAjCzn2buKWuAR-pk4n7U,23238 +botocore/data/m2/2021-04-28/endpoint-rule-set-1.json.gz,sha256=8J6sujPsmDuCKdof8UD9XOGw3PflH1zy1afsceTRIPI,1145 +botocore/data/m2/2021-04-28/paginators-1.json,sha256=-WFr8vEIZ868a3Kwj0mLLgJuYR_MI2osQq2gIZMWL4g,1787 +botocore/data/m2/2021-04-28/service-2.json.gz,sha256=dnhZRYDyJyg83Up5e1XhgVXLwv6b1mH_9aW1OMAsfB4,16743 +botocore/data/machinelearning/2014-12-12/endpoint-rule-set-1.json.gz,sha256=IaVJkUiek97sRX3uNXN8kIVZzUqhLOfrFVvFi10vdyA,1156 +botocore/data/machinelearning/2014-12-12/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/machinelearning/2014-12-12/paginators-1.json,sha256=80ddAOlwUPt-mXpDtk3eJqnm7lB95-DjTW6-G6eqmJc,679 +botocore/data/machinelearning/2014-12-12/service-2.json.gz,sha256=6CLo__3O2SVxJHhmIsl1owt_zOx6X8IW3Z-V95tkh4c,21306 +botocore/data/machinelearning/2014-12-12/waiters-2.json,sha256=_tyML4Sw4VQBk8fUWh1bUQjlcooL1hgRpvkqxKxEeCY,1902 +botocore/data/macie2/2020-01-01/endpoint-rule-set-1.json.gz,sha256=tvtZjQPGmKETjT-ZtQCNbFwXz1tMjP8gTINTmchvjo0,1148 +botocore/data/macie2/2020-01-01/paginators-1.json,sha256=QNpyggmzK1vrlEP4LHAy0qKzLTJNNoV9x3y8nqJkj3o,2959 +botocore/data/macie2/2020-01-01/service-2.json.gz,sha256=LP5kaIBx-tZt1mPSyuQcdVE05zGCkJ1440ZnjhwzTNk,59305 +botocore/data/macie2/2020-01-01/waiters-2.json,sha256=YjTydOnsawe754SLZZxzxMgFaq0M88fq5jOu-UQvAWE,553 +botocore/data/mailmanager/2023-10-17/endpoint-rule-set-1.json.gz,sha256=zYEpfcss1dc2pN4URp8xHgOdv4uEDFzSHOLiJ9D0_Hw,1302 +botocore/data/mailmanager/2023-10-17/paginators-1.json,sha256=6E7z80hMmR8EXzJv7fi20eCjKOVott4o8agYmmVvlU4,2045 +botocore/data/mailmanager/2023-10-17/service-2.json.gz,sha256=K9lqkU9Trsq9rQ7QRUiXuJbBb6lgy6rEJ24rwF4pph0,22050 +botocore/data/mailmanager/2023-10-17/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/managedblockchain-query/2023-05-04/endpoint-rule-set-1.json.gz,sha256=lEkAtoMbWOwwxIX6cpmJNjtGxvkwuNDUj4j6cgE37os,1312 +botocore/data/managedblockchain-query/2023-05-04/paginators-1.json,sha256=aLhFDqzj7KQVTC4MVIRoQAo6tDKInqtlsOkFmvln-7o,882 +botocore/data/managedblockchain-query/2023-05-04/service-2.json.gz,sha256=kRL9-zdeBtg1W6Q9OKqJZzO9tvyMCji16ZFUD0r2rdA,7186 +botocore/data/managedblockchain-query/2023-05-04/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/managedblockchain/2018-09-24/endpoint-rule-set-1.json.gz,sha256=i7a55Py_pWpBJV1musmL1492p2XVSmpIj65xKXF4vtg,1158 +botocore/data/managedblockchain/2018-09-24/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/managedblockchain/2018-09-24/paginators-1.json,sha256=zAjmRcrAx6dDwoJVM-7ceZ1U04fGfxMgQsREvvVcIeI,189 +botocore/data/managedblockchain/2018-09-24/service-2.json.gz,sha256=EcT9WBdv-8zYgGyGiDl_0TJnqiciDFpQBhiAAxoJyYk,13879 +botocore/data/marketplace-agreement/2020-03-01/endpoint-rule-set-1.json.gz,sha256=BYMfV-lFQBj0yeVKLrMXow9GGWMdNJraIS7JCMtcdJs,1309 +botocore/data/marketplace-agreement/2020-03-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/marketplace-agreement/2020-03-01/service-2.json.gz,sha256=j_M59Xg0tFUBihosJ8XwJ177ythicVUHUhqmqsFcEjg,8903 +botocore/data/marketplace-agreement/2020-03-01/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/marketplace-catalog/2018-09-17/endpoint-rule-set-1.json.gz,sha256=rxyxCqF5BP0PxBmrsgtU_U8w-Qyl6wkr4Z9Nzan4Vbc,1157 +botocore/data/marketplace-catalog/2018-09-17/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/marketplace-catalog/2018-09-17/paginators-1.json,sha256=JbO7iSHFp-U7kJIRHTRxPClYMlBkenux5Ow534JGcyQ,372 +botocore/data/marketplace-catalog/2018-09-17/service-2.json.gz,sha256=ePQDoXEI9_5MLWE5BcivNVFlg3IDHgc3GaZCiwMzfuA,13942 +botocore/data/marketplace-deployment/2023-01-25/endpoint-rule-set-1.json.gz,sha256=v4ZjFPE1cNWs5fpD447hNf4zSDlo3jOeWbJkb-dEmO0,1310 +botocore/data/marketplace-deployment/2023-01-25/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/marketplace-deployment/2023-01-25/service-2.json.gz,sha256=HZWMsXVp3uPhqpKz7eUGexS0n8X9U5G1r6vrix4-usc,2721 +botocore/data/marketplace-entitlement/2017-01-11/endpoint-rule-set-1.json.gz,sha256=QG2yPzfpuUuwlsZNpQ1_w1FQ_7p8gfdtU367xqB3AXY,1530 +botocore/data/marketplace-entitlement/2017-01-11/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/marketplace-entitlement/2017-01-11/paginators-1.json,sha256=xFY_-BU5Ho7OPWDGn_aX-WwguHOeDyE1N4F-7nlw2KA,194 +botocore/data/marketplace-entitlement/2017-01-11/service-2.json.gz,sha256=5VrbWPXKwD4E151WJQrlMmtmMVDG4Aq2qEav-I3bqLY,2313 +botocore/data/marketplace-reporting/2018-05-10/endpoint-rule-set-1.json.gz,sha256=0bgm8SO8EA9ulwFV1nRFgLSINpkVjfymaDPFzqdh98M,1308 +botocore/data/marketplace-reporting/2018-05-10/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/marketplace-reporting/2018-05-10/service-2.json.gz,sha256=9MChz8QQSBNYCUuSu1GCE9ovHqmgQuwy4viwHByCEDI,2451 +botocore/data/marketplace-reporting/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/marketplacecommerceanalytics/2015-07-01/endpoint-rule-set-1.json.gz,sha256=yurxrGKbe2OplRHo-ZfoKmyGgR2fkCxE-CHvwJdhl7M,1166 +botocore/data/marketplacecommerceanalytics/2015-07-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/marketplacecommerceanalytics/2015-07-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/marketplacecommerceanalytics/2015-07-01/service-2.json.gz,sha256=ftDjCBBZtR61jx0RBbugYUtfccpFjgLzb6BcmO_El6o,3272 +botocore/data/mediaconnect/2018-11-14/endpoint-rule-set-1.json.gz,sha256=Jrbf9gLzYw3kegUZxmZdJ_wYK90WG5hYL4uBQw7wHkI,1152 +botocore/data/mediaconnect/2018-11-14/paginators-1.json,sha256=KOhLwBbi4k8mumx_ac5Y-x7OkCFL705ZJOer8NxET9g,1712 +botocore/data/mediaconnect/2018-11-14/service-2.json.gz,sha256=SyPHNiU1x98RrtC-G6lzQRJ7lkC-_xz7jAry9qztTDs,46399 +botocore/data/mediaconnect/2018-11-14/waiters-2.json,sha256=QkoMT-f_yIvagwfOQZbjK4UnteV2fIVD4BZnXv3m1h8,8820 +botocore/data/mediaconvert/2017-08-29/endpoint-rule-set-1.json.gz,sha256=ZXquQEdT2vvXWx-vCm-i4JGs8IwIN7ZPE5Ffz-DXw9k,1299 +botocore/data/mediaconvert/2017-08-29/paginators-1.json,sha256=oX8chsZnZYHV50i1ILrfgyIYwOI82aSlb46knSQk0qw,1153 +botocore/data/mediaconvert/2017-08-29/paginators-1.sdk-extras.json,sha256=tG933F4yMTEHzj_2Y6YhkDuomhFhDjRDmF0k1I7n8II,208 +botocore/data/mediaconvert/2017-08-29/service-2.json.gz,sha256=n6eyfONSLRnPjv59HQa5Ss6cvwRxtCZbedyb_9riqJM,172695 +botocore/data/medialive/2017-10-14/endpoint-rule-set-1.json.gz,sha256=iYyd1rOV73aU3Wd2f5YNgq5eDcBB6BvfLdD9jgTrqlY,1149 +botocore/data/medialive/2017-10-14/paginators-1.json,sha256=WjRxjvuXG4nsTz1IQogEjLBHw0HlX28zgkSS-J5nc5U,4028 +botocore/data/medialive/2017-10-14/service-2.json.gz,sha256=dvSmmJ6Yff0VyZ-ouw5E72umToziOAT8grSzD1bb8Yg,128920 +botocore/data/medialive/2017-10-14/waiters-2.json,sha256=v1qapfFgcUvllk-vANZFuTJLDN_edc3DsDFq4_nTViQ,15342 +botocore/data/mediapackage-vod/2018-11-07/endpoint-rule-set-1.json.gz,sha256=6Xdaie2WjlsW4mShme5gvzsPm8FpGi-Lt1GhLJgGMA0,1155 +botocore/data/mediapackage-vod/2018-11-07/paginators-1.json,sha256=uyOY7MfVXvY7qil_RhqS9KThRg9A3_8LB6C8en49Z3k,551 +botocore/data/mediapackage-vod/2018-11-07/service-2.json.gz,sha256=TC0W-43UoII0H3D6Q36AKUpVo95ENxyj4uHOZfZ78C4,7204 +botocore/data/mediapackage/2017-10-12/endpoint-rule-set-1.json.gz,sha256=XG-VkSxSPYvhKxI5htXMwgGa3X1VIFITNMO_hhowZ0k,1152 +botocore/data/mediapackage/2017-10-12/paginators-1.json,sha256=Hkze_cyn0q7t1o4PHpf079W6jE_g7l8tGQf7x-t3ocs,531 +botocore/data/mediapackage/2017-10-12/service-2.json.gz,sha256=doAK_15MtOmG-WhtpehNGxXNpZVea1Rcg8DxG9LZP3s,9876 +botocore/data/mediapackagev2/2022-12-25/endpoint-rule-set-1.json.gz,sha256=PSkHprGdQHynyr9Hy8v8onqKpD2mo1J_dICT-FDfhnA,1304 +botocore/data/mediapackagev2/2022-12-25/paginators-1.json,sha256=TvuW6sRcN2gAFECRS2bfyHrPTUcTAZR4uEjHacwMgyU,676 +botocore/data/mediapackagev2/2022-12-25/service-2.json.gz,sha256=Aae08a8ASi6Tv7Mf70CxZaZ6GvwyfqjM_Ek1iPZo6SE,24864 +botocore/data/mediapackagev2/2022-12-25/waiters-2.json,sha256=lXTTb_E9woEww_3b3x_f7fITdrFO96-eKWcC7F6VXGU,832 +botocore/data/mediastore-data/2017-09-01/endpoint-rule-set-1.json.gz,sha256=BsxKiI4RBs6Zip5-cMJ_DucgWspWQs7LZiJHo2Efocs,1153 +botocore/data/mediastore-data/2017-09-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/mediastore-data/2017-09-01/paginators-1.json,sha256=iGhEIo_9ydhnm5jAD4K6mIgNGZ51FKUA4AlfMlG0sao,181 +botocore/data/mediastore-data/2017-09-01/service-2.json.gz,sha256=xoCGOgcRBlCjId7Al2JBc_U7O53SJrBHhfykli_JvwE,3785 +botocore/data/mediastore/2017-09-01/endpoint-rule-set-1.json.gz,sha256=CgBA8lZ-LWSN7xHqWkJjFSGStIBVb580r0Bc2KaXw3E,1150 +botocore/data/mediastore/2017-09-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/mediastore/2017-09-01/paginators-1.json,sha256=0XO8tEPJl9J7qprTHPQQt6dC7GrjIoqoCn4AcAbjiyM,191 +botocore/data/mediastore/2017-09-01/service-2.json.gz,sha256=gNR0kBPffjiJbviioz7FXiDACR5_1Xn_Kj0Rrs1ELTY,7064 +botocore/data/mediatailor/2018-04-23/endpoint-rule-set-1.json.gz,sha256=NpKEJ7jE6ozS24taiMxOnwbKdrwnuIUf73M_KOJCzIE,1153 +botocore/data/mediatailor/2018-04-23/paginators-1.json,sha256=AxqBHJot9wpawiVdBaiwALEkmIwfz6mhJsXIo7qDvlw,1336 +botocore/data/mediatailor/2018-04-23/service-2.json.gz,sha256=w8jjhJyJrMy1frmNWY3tkselvJuLMxRReatwMmIHzU0,26755 +botocore/data/medical-imaging/2023-07-19/endpoint-rule-set-1.json.gz,sha256=fOFep2V-qEhDNLdunuIFZJCbQNgCpHnhoHxx5aVYt6Y,1304 +botocore/data/medical-imaging/2023-07-19/paginators-1.json,sha256=Zdv-t-Mpi7RENFkReFlaQ40h5arjqt4t0EDliR_8VOs,739 +botocore/data/medical-imaging/2023-07-19/paginators-1.sdk-extras.json,sha256=SH5DkCGoc9NqpV_7FKFpREPdZP8dt8uz6TDVCQZmeCI,159 +botocore/data/medical-imaging/2023-07-19/service-2.json.gz,sha256=3n7AIukC5aJjXJDS89k9IQ3inNOUTlkoA7_NhXtFdFs,9442 +botocore/data/medical-imaging/2023-07-19/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/memorydb/2021-01-01/endpoint-rule-set-1.json.gz,sha256=5PItMUTNxIu2hEi6bNnEpJcc7dcFb1zDJyyI4RY-gEA,1264 +botocore/data/memorydb/2021-01-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/memorydb/2021-01-01/paginators-1.json,sha256=rPx4219WMZpwPaLBF1L70DN_b4x5cChfNl3u_g65bj0,2277 +botocore/data/memorydb/2021-01-01/service-2.json.gz,sha256=20OmA7Pks_JdrqW9U3iSovy5C7kvfTVSF1GYXmeTmSc,19249 +botocore/data/meteringmarketplace/2016-01-14/endpoint-rule-set-1.json.gz,sha256=HguEmKPhLkL5sRXT7WwmvbsF2EoFnGlhEieNgi3vqw0,1518 +botocore/data/meteringmarketplace/2016-01-14/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/meteringmarketplace/2016-01-14/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/meteringmarketplace/2016-01-14/service-2.json.gz,sha256=yASy-yIGgqcz4EkopmbA07KchDWe_v3lOnq2T1yk_ME,7834 +botocore/data/mgh/2017-05-31/endpoint-rule-set-1.json.gz,sha256=AuQDeeD4JnxQIZjJ67DQ00eEfYW9VxuuaCu9qeRqiIo,1145 +botocore/data/mgh/2017-05-31/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/mgh/2017-05-31/paginators-1.json,sha256=E2Ik6-I1lm3WF_e7avtK8OpnpzPT0CQg3im2ILInNK0,1326 +botocore/data/mgh/2017-05-31/service-2.json.gz,sha256=w9YK1yPidwjmxtUJn7uN-nncF2DNUbfwMw6qHn6jA7E,8366 +botocore/data/mgn/2020-02-26/endpoint-rule-set-1.json.gz,sha256=gl3rxVlWIFO_Eu5DPNAp_wsMWjvraqwwAqppcXXH9l0,1145 +botocore/data/mgn/2020-02-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/mgn/2020-02-26/paginators-1.json,sha256=zJ9gqjYlOC0wR5m9M1J-VB79ZFXJcrL78WvqPplRE8M,2682 +botocore/data/mgn/2020-02-26/service-2.json.gz,sha256=1tKqQ056GWmTsGsAya2drmMb_Ayyqoq1afNKQ7Ae27k,20311 +botocore/data/mgn/2020-02-26/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/migration-hub-refactor-spaces/2021-10-26/endpoint-rule-set-1.json.gz,sha256=rRzxj1I_Kr78EXHUOvjgP5lII9jxUEGgHOEBO8xfqq8,1154 +botocore/data/migration-hub-refactor-spaces/2021-10-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/migration-hub-refactor-spaces/2021-10-26/paginators-1.json,sha256=OZ6GIc0aC4ikn9W96M2UbxWeBdIy3QA7ksZ2Ec7t1e8,904 +botocore/data/migration-hub-refactor-spaces/2021-10-26/service-2.json.gz,sha256=lWF8Yb6vH9VAo_qCJYz5xLUMJzq02ZXwRtEGj5g4-6s,12478 +botocore/data/migrationhub-config/2019-06-30/endpoint-rule-set-1.json.gz,sha256=viyIDzrlDfxnPkNaNiuQikdzEkMJURlwVCmjRWQIGmU,1156 +botocore/data/migrationhub-config/2019-06-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/migrationhub-config/2019-06-30/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/migrationhub-config/2019-06-30/service-2.json.gz,sha256=H51U29-qi8Kj4JEZz4NFf7-pRCFyf3z2PgCNZLEdMng,2737 +botocore/data/migrationhuborchestrator/2021-08-28/endpoint-rule-set-1.json.gz,sha256=3knA03wCJF72qOLPVz5iY9nL2tViIVUQIBy-H40H60E,1308 +botocore/data/migrationhuborchestrator/2021-08-28/paginators-1.json,sha256=K3BSaAaX302rt-fuD-8ewfuAaO1cXLwfwPxQmgs4gLw,1272 +botocore/data/migrationhuborchestrator/2021-08-28/service-2.json.gz,sha256=ih0e2pvQMa7z5ZSSXmYwCF4Wwppoi5yCFRTArwYpGIY,8432 +botocore/data/migrationhuborchestrator/2021-08-28/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/migrationhubstrategy/2020-02-19/endpoint-rule-set-1.json.gz,sha256=tW7SdF8SgGHAiBRfQHgylICEpp3nm44wU4wm-9LhX-w,1157 +botocore/data/migrationhubstrategy/2020-02-19/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/migrationhubstrategy/2020-02-19/paginators-1.json,sha256=1kU7uoqpjQDozh9dBNVWf7QyZDxK2PBkajg_gfz7dxY,1076 +botocore/data/migrationhubstrategy/2020-02-19/paginators-1.sdk-extras.json,sha256=x686VmA6fsdUSIKSMZbp5ZF280pREQ7HpnPkgQTZ730,220 +botocore/data/migrationhubstrategy/2020-02-19/service-2.json.gz,sha256=pTy0GGHnXQROdX7oCiA4dIEqafhsMz8U-M-z5kk_7U0,13363 +botocore/data/mpa/2022-07-26/endpoint-rule-set-1.json.gz,sha256=_5AxAb7-Moc5R385CGRECzYKA3KlDyZGfssZv2EjpUg,830 +botocore/data/mpa/2022-07-26/paginators-1.json,sha256=KEAQbo0paz_QEgpKVo8h7BdV99138Ck3ni2bl7msPKs,1049 +botocore/data/mpa/2022-07-26/service-2.json.gz,sha256=CdhXBCgMZiITedeQfvOlnKh6HEIHlne-fI8PvMtSnas,9296 +botocore/data/mpa/2022-07-26/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/mq/2017-11-27/endpoint-rule-set-1.json.gz,sha256=LpoD8ikUz6eEOX6Ezmk0O-Zd2TvhHSOEOKywCKYoAKg,1145 +botocore/data/mq/2017-11-27/paginators-1.json,sha256=JZRhf6w_8oFT1nPyeTQNU09bR1-xrJn09KOtiOPO2Rg,193 +botocore/data/mq/2017-11-27/service-2.json.gz,sha256=gywQyn8ajmu4TRE2oghT5ZrF2KjhA1Nw0ninxUxAi9c,14581 +botocore/data/mturk/2017-01-17/endpoint-rule-set-1.json.gz,sha256=5P3LnbDto48zS2R1iJ1Fjdw08TDd-UbOdDF1oOvPwzE,1217 +botocore/data/mturk/2017-01-17/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/mturk/2017-01-17/paginators-1.json,sha256=NFfGwUHHAX0lwKOB92RJHnfVkFP5IvDCtM1FnTJ-A0g,1591 +botocore/data/mturk/2017-01-17/service-2.json.gz,sha256=anB6UzTbcwEabHK84p5aEREb9Iy9OYzYhyoupvtcRjs,19770 +botocore/data/mwaa-serverless/2024-07-26/endpoint-rule-set-1.json.gz,sha256=rMG0-S0n6fbWx_FRl5Ts4TuWVsrrStWal8b1iXbfdFA,841 +botocore/data/mwaa-serverless/2024-07-26/paginators-1.json,sha256=2DDXXuSHlzvaAEATWiKeZEofpzHNoZ6pZrqYA6mxWxw,709 +botocore/data/mwaa-serverless/2024-07-26/service-2.json.gz,sha256=xKKgSJsIOWdPZ0U-XFCLkGrf7UHlbJ0PZyHnMSXK4YY,10217 +botocore/data/mwaa-serverless/2024-07-26/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/mwaa/2020-07-01/endpoint-rule-set-1.json.gz,sha256=sFDydbs9OOit8tNH_JXF8R0gICMTQJ9_LTjM9pbJ9BE,1149 +botocore/data/mwaa/2020-07-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/mwaa/2020-07-01/paginators-1.json,sha256=ggep_PmvO9S8tClL3v6oAmOMYV4qZcZt06URX5M9658,195 +botocore/data/mwaa/2020-07-01/service-2.json.gz,sha256=LYpUuDH2ZF45Ow9yUn4iEHVtu3T0XBLxpvgLSq5Srpo,11437 +botocore/data/neptune-graph/2023-11-29/endpoint-rule-set-1.json.gz,sha256=rj6JQIK5FJ3qNA4ffvv7njpx9hQuR1ZSvt54JGq91nU,1408 +botocore/data/neptune-graph/2023-11-29/paginators-1.json,sha256=BahW2a3tKEIHiNWsH6L6euKU2GNi128H27Z9QERjxwE,869 +botocore/data/neptune-graph/2023-11-29/service-2.json.gz,sha256=9QetSXT1ZT0pd5O_AKUFHNgcc9skWBNZtJs2Cz87B3g,15138 +botocore/data/neptune-graph/2023-11-29/waiters-2.json,sha256=KT4CSJIIJS_NGt8Lr21pPZdE6vPj-STKCnh9Z8opMQs,6302 +botocore/data/neptune/2014-10-31/endpoint-rule-set-1.json.gz,sha256=-yaMsUCL6embVCf7xwIIkigYfUfh5gRJc6Rz4fpK5jw,1230 +botocore/data/neptune/2014-10-31/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/neptune/2014-10-31/paginators-1.json,sha256=66ojR4_WsS4k5APEI8fjU0mCJTn7B15KuG8mPLcqFk8,2881 +botocore/data/neptune/2014-10-31/service-2.json.gz,sha256=eUiIBUMV8sGJj51PlcqSfeLJVqVeXgd94SZx_V9bEj0,46014 +botocore/data/neptune/2014-10-31/service-2.sdk-extras.json,sha256=U_PgxwtPhWl8ZwLlxYiXD4ZQ4iy605x4miYT38nMvnM,561 +botocore/data/neptune/2014-10-31/waiters-2.json,sha256=8bYoMOMz2Tb0aGdtlPhvlMel075q1n7BRnCpQ-Bcc1c,2398 +botocore/data/neptunedata/2023-08-01/endpoint-rule-set-1.json.gz,sha256=y-7_cJlN8YjVqOtYgV1YfF7_qn8oTHrAASBYRm8oSpU,1297 +botocore/data/neptunedata/2023-08-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/neptunedata/2023-08-01/service-2.json.gz,sha256=kD6Wsk_5LhpGma1-7dTSU8WQ8SAbmamsS71uy4SJNUA,23576 +botocore/data/network-firewall/2020-11-12/endpoint-rule-set-1.json.gz,sha256=YfCQeJEcuMBjvSYSYJybrwgJIxAnNlmoWwlIkZ0HJuk,1155 +botocore/data/network-firewall/2020-11-12/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/network-firewall/2020-11-12/paginators-1.json,sha256=ZuWICIjteVuD8Zh_STIm6dvL7AVdPg6cfs2C07QIM5E,2318 +botocore/data/network-firewall/2020-11-12/paginators-1.sdk-extras.json,sha256=IXOL2iFcreaZkcp6DCHiCPgS0wjUqDn0ZJCbvuOIIS4,594 +botocore/data/network-firewall/2020-11-12/service-2.json.gz,sha256=QzWkhKOcpoaVQgcpfM1KsHVfs2LvklXsS4UxvB_c_8g,55508 +botocore/data/networkflowmonitor/2023-04-19/endpoint-rule-set-1.json.gz,sha256=yDn5lVs2EIL8RuSiNPs7fOJi-RW-l-FYKDeB19pDlLg,840 +botocore/data/networkflowmonitor/2023-04-19/paginators-1.json,sha256=wOkVGLLik9ddJ4lYIRfRNlO9nDh6X68BQwpo38vvMM4,944 +botocore/data/networkflowmonitor/2023-04-19/paginators-1.sdk-extras.json,sha256=-Yf09BFjNn75UmPPQdc2fhWCP04zh0pZHgwGD41NTeE,411 +botocore/data/networkflowmonitor/2023-04-19/service-2.json.gz,sha256=aZWnsCXguj0lucVywDeSX4oDLC42SP-oxptLvxyqMUE,10353 +botocore/data/networkflowmonitor/2023-04-19/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/networkmanager/2019-07-05/endpoint-rule-set-1.json.gz,sha256=fgvlszEGHaXqAxuNoleo0b8L04qyf8abBMRCY_CNyB8,1732 +botocore/data/networkmanager/2019-07-05/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/networkmanager/2019-07-05/paginators-1.json,sha256=754b8htCAy6FPKWQ_iWwTXNPcCZ262kCjNyOe3pwHeA,4412 +botocore/data/networkmanager/2019-07-05/service-2.json.gz,sha256=nXgxZ3IvuW6FFYmv31PGr23K_7HWmbZ_q5J96wbbU3Y,26467 +botocore/data/networkmonitor/2023-08-01/endpoint-rule-set-1.json.gz,sha256=KOaEtlzZ3jFXk5P_SWLm4PtxSJmQWsAlUajQGWtECrE,1303 +botocore/data/networkmonitor/2023-08-01/paginators-1.json,sha256=nHQ47DVYXQU7zjhe4CUO3-J0OdqR2OjaTaQ4c8vcMW0,187 +botocore/data/networkmonitor/2023-08-01/service-2.json.gz,sha256=J3M0_ioMB8YB9tBUlPSPHMLGusmt39MySOkSDMq_1lo,5147 +botocore/data/networkmonitor/2023-08-01/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/notifications/2018-05-10/endpoint-rule-set-1.json.gz,sha256=jVQ7k7M3VGgSZha3d0OZGu2j4V_zftohD52xnmGdwQ8,836 +botocore/data/notifications/2018-05-10/paginators-1.json,sha256=_5tXGmluTxccm4q-eU4QTXfA4R5Hxz9gDkSXXrlvX0U,2086 +botocore/data/notifications/2018-05-10/service-2.json.gz,sha256=ikkbprS3nFMbLXv6CgTCCt7iSNxbJAM_AxJ4iMPvTsk,16071 +botocore/data/notifications/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/notificationscontacts/2018-05-10/endpoint-rule-set-1.json.gz,sha256=dDz70hyXZ0oCdoVhHrGfkmptT6BGSc0l_qBlvwXlCrQ,907 +botocore/data/notificationscontacts/2018-05-10/paginators-1.json,sha256=iGpOqu4PGgEba54bj8oQAK9ZfNQBKCi6VlSw5JqLScQ,197 +botocore/data/notificationscontacts/2018-05-10/service-2.json.gz,sha256=RIM_EhUkac_cUcyOCfNAGOqhwM3uUqPoRilXPcm_yxo,3777 +botocore/data/notificationscontacts/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/nova-act/2025-08-22/endpoint-rule-set-1.json.gz,sha256=Fes0KaspIJO0rIDB2pCdhM6XfqcvpZ1oo_h38zZIoZY,1278 +botocore/data/nova-act/2025-08-22/paginators-1.json,sha256=2GRK4t9Cd6oYnF3H6gUz_6Lbv0c18GOjS43yorxtv40,727 +botocore/data/nova-act/2025-08-22/service-2.json.gz,sha256=V17JsLkCf6kh84l53GQW-kCSjREmnntzaYP0aQ8orhQ,7110 +botocore/data/nova-act/2025-08-22/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/oam/2022-06-10/endpoint-rule-set-1.json.gz,sha256=UBpuUJdMnr8J7I4S2gBfwGKIBF95uzw5efVp1sOm7As,1295 +botocore/data/oam/2022-06-10/paginators-1.json,sha256=O-yiC1jmUubOdoY_nq_BvS2UBfskjOM7cgJ547VWO3U,501 +botocore/data/oam/2022-06-10/service-2.json.gz,sha256=Fukiyjen9HGs9gcPJlMjbd5_4wEFI7LuLMHOZ6iSpow,7039 +botocore/data/observabilityadmin/2018-05-10/endpoint-rule-set-1.json.gz,sha256=lltUUZvKdKmuRJF0RJI9r0i5OvHCWXwWXDGOyfY9cjI,1307 +botocore/data/observabilityadmin/2018-05-10/paginators-1.json,sha256=z0q6D6Y3CIK8BNUSd6nDPb0Bc-BRDJnnb0Y1NOmsf0A,1364 +botocore/data/observabilityadmin/2018-05-10/service-2.json.gz,sha256=hXAVEgg0vx8lKp_iYsB3SGhHcbHNrcVVGMkZUH3UzGQ,15582 +botocore/data/observabilityadmin/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/odb/2024-08-20/endpoint-rule-set-1.json.gz,sha256=zX68iEEN8XppsODYrVXiiClZklDshXwo-p5wfFbeY7c,1295 +botocore/data/odb/2024-08-20/paginators-1.json,sha256=74QkGEVfoLcV2XLgheCVohdLRbXpRAWDw3edDGwS0NQ,1987 +botocore/data/odb/2024-08-20/service-2.json.gz,sha256=dIDDZh4MPgMsJKTWrWbFXlYnDFzTiFnaab8OmlZU23s,21340 +botocore/data/odb/2024-08-20/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/omics/2022-11-28/endpoint-rule-set-1.json.gz,sha256=F0KBIcukHgwEZ9XFsjnkIh2ZYchvyN-6LyNBg5ZW9Mg,1297 +botocore/data/omics/2022-11-28/paginators-1.json,sha256=jv36OpUcqzGkCvGA1USyceaqApWVuD6APgPpMVzsP9Y,3801 +botocore/data/omics/2022-11-28/service-2.json.gz,sha256=HMZBsdv5EHIo2ZOGRra0UMLutHQgjWMbPvMeZ_Mo4K0,42683 +botocore/data/omics/2022-11-28/waiters-2.json,sha256=ilyIBGDpQrZwAA4HzC3dsLnCTYa65vhX32YyypUzGwg,15423 +botocore/data/opensearch/2021-01-01/endpoint-rule-set-1.json.gz,sha256=wktV8CTN4c3plu7L7gipQwWT7s2EQJhzajuCH3rZvxs,1311 +botocore/data/opensearch/2021-01-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/opensearch/2021-01-01/paginators-1.json,sha256=8SvuZgZ0Y0xqPmOvNUfP4ApehWFu0vVQzqEDf2RJcc4,203 +botocore/data/opensearch/2021-01-01/service-2.json.gz,sha256=Rxk_JbpeqhcZlK8nuJ35j8DW_mgFKBDjPNE9fo_t9rg,49228 +botocore/data/opensearchserverless/2021-11-01/endpoint-rule-set-1.json.gz,sha256=r7wV6xruzrev_2FQ_W8lItvGydHVftKo1FuTsfbrldE,1296 +botocore/data/opensearchserverless/2021-11-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/opensearchserverless/2021-11-01/service-2.json.gz,sha256=HwyxCAWeAOtemN49LQftD7h0FspVoNsp5jzEqNN3Gw8,12356 +botocore/data/opensearchserverless/2021-11-01/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/organizations/2016-11-28/endpoint-rule-set-1.json.gz,sha256=CpcQ3hqKEhng__WYZJDacgCc0_ekKhDqoU1ucBPK9dY,1651 +botocore/data/organizations/2016-11-28/examples-1.json,sha256=H-s8eMAzogFkvDj193d_NweczAUFsyrDfjFEE_77BFQ,50009 +botocore/data/organizations/2016-11-28/paginators-1.json,sha256=gN3_2FHJt6Xyap3z3IqVjvbbPcC4jGIddUvlLZOsqA8,3185 +botocore/data/organizations/2016-11-28/paginators-1.sdk-extras.json,sha256=2OOgdafaSQgkls_L9T5FWZ0oZuzRK3NY3Dw4ogTQjDI,382 +botocore/data/organizations/2016-11-28/service-2.json.gz,sha256=Q-qY7nx14QKA9tMPdwkjOQXPlEcnRTdWo3ZgFQQwzUc,39484 +botocore/data/osis/2022-01-01/endpoint-rule-set-1.json.gz,sha256=hUUoFxdf-vSJfT5mUSqMMyyqpOzseePvpqc_0hnM1_Y,1296 +botocore/data/osis/2022-01-01/paginators-1.json,sha256=VWwFT-KdB44DIlPwqvTWail3glgRum4sRxVlzVEE5ek,405 +botocore/data/osis/2022-01-01/service-2.json.gz,sha256=af-7nLO-fVL6RsT10q8HlSj9PV1cG44ZTh0GbJJbDjE,7792 +botocore/data/outposts/2019-12-03/endpoint-rule-set-1.json.gz,sha256=qy_Kc5oOfCBm5dAMJLTuijLREeHRR80HuRgs0BiYw_o,1233 +botocore/data/outposts/2019-12-03/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/outposts/2019-12-03/paginators-1.json,sha256=ARUFaPXTMY1Sjtpwxkud166C_MT7IPGN0I01Lo_U8h4,1918 +botocore/data/outposts/2019-12-03/paginators-1.sdk-extras.json,sha256=IjBeb9H050H25aYAZb_jx0VRuUvzcIuHI_OxcsADLrM,315 +botocore/data/outposts/2019-12-03/service-2.json.gz,sha256=A3BoFiqzjtUUSnfHgf71_zM6z-nQ3Ly5XgnAoHa3P-M,14999 +botocore/data/panorama/2019-07-24/endpoint-rule-set-1.json.gz,sha256=Dg_RhzGcnRh99tNAzX6HpDlG40svuRhGbom6X3cpcM0,1149 +botocore/data/panorama/2019-07-24/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/panorama/2019-07-24/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/panorama/2019-07-24/service-2.json.gz,sha256=QbcQQ-X81eVgaiDOHxXVpfuyy7p2NvCL8GoQAoOjSQY,11986 +botocore/data/partitions.json,sha256=JAaRfCPvyMN1ESBXj-UGdQ1BkMWj1463fqTljs7vEvg,7089 +botocore/data/partnercentral-account/2025-04-04/endpoint-rule-set-1.json.gz,sha256=-7pQlXOVQfGsVvcj_jZOpnKkAvIWkI9K5HABdFAL7J0,843 +botocore/data/partnercentral-account/2025-04-04/paginators-1.json,sha256=rKgRX7jPlJ6Pexb0EVPaPAZJh-b3O0NbkhQQd6EM0Is,536 +botocore/data/partnercentral-account/2025-04-04/service-2.json.gz,sha256=OWuNmJebMvJ3Ke6yQWlRxuZYm5W7XRg8xCJNqUm7ebo,13188 +botocore/data/partnercentral-account/2025-04-04/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/partnercentral-benefits/2018-05-10/endpoint-rule-set-1.json.gz,sha256=alEV5Dbqp3lY_UyQp_sK58s91BS8PaBjDObYO1M_eeE,844 +botocore/data/partnercentral-benefits/2018-05-10/paginators-1.json,sha256=Spea3UM7gtCjad2_yxNdgtZ5FGKHphff2--T_uUEOpg,577 +botocore/data/partnercentral-benefits/2018-05-10/service-2.json.gz,sha256=WTRT0R_c38nSFj08bB1Mzm9Bla-P5xd7-D2EQMQN-YE,9064 +botocore/data/partnercentral-benefits/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/partnercentral-channel/2024-03-18/endpoint-rule-set-1.json.gz,sha256=QmmlZL2P3lsSsZMuk5Yc4xRzS0xykDKCo-z_P-eZsSo,1121 +botocore/data/partnercentral-channel/2024-03-18/paginators-1.json,sha256=J7M-NM9oSS1RXWYRJ9dLiXrIs9T2TZZWeMNvOEqIU80,533 +botocore/data/partnercentral-channel/2024-03-18/service-2.json.gz,sha256=k0pvK3blznn519NOYooVzQvQ_Kpw84EXMNjdGUhjSAs,6726 +botocore/data/partnercentral-channel/2024-03-18/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/partnercentral-selling/2022-07-26/endpoint-rule-set-1.json.gz,sha256=lOUO9CfmWVHColF_w-fIoJebVmB69QOahNY7awplj20,843 +botocore/data/partnercentral-selling/2022-07-26/paginators-1.json,sha256=EGbxWmVZij_vYX7N5XUnfJqb9RSjrrDwQrlXwQPIdbk,2102 +botocore/data/partnercentral-selling/2022-07-26/service-2.json.gz,sha256=6EO-0xgLXFe3feMujMkakCi5A0QuAIlquKn7HgqpanU,54744 +botocore/data/partnercentral-selling/2022-07-26/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/payment-cryptography-data/2022-02-03/endpoint-rule-set-1.json.gz,sha256=Wn_-Njg4lLKqabfWSifE48O7KJXBOaavGebHyG_rrRs,1318 +botocore/data/payment-cryptography-data/2022-02-03/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/payment-cryptography-data/2022-02-03/service-2.json.gz,sha256=YiPwN2Ba7_Z2r6oKpc43Gc9iDerfZjzWBySeipSW4Vs,18775 +botocore/data/payment-cryptography-data/2022-02-03/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/payment-cryptography/2021-09-14/endpoint-rule-set-1.json.gz,sha256=5EeeWwLPx0YnDaZGFPlEd53pjyxqOZtlJpMkC57GFfM,1318 +botocore/data/payment-cryptography/2021-09-14/paginators-1.json,sha256=Q3nZHuUZ53pNZpShnEVxB2Z6ec8thvlIx-hPXFVBNM8,504 +botocore/data/payment-cryptography/2021-09-14/service-2.json.gz,sha256=rmUoiTuvD24VLWuItA2J4tNpvKGOduvQNtox9LQQAAM,22419 +botocore/data/payment-cryptography/2021-09-14/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/pca-connector-ad/2018-05-10/endpoint-rule-set-1.json.gz,sha256=U5ZQnxu1eXvZ5IUIE3C_1-D6XYJrQFtpCLX8mDx6jQA,1305 +botocore/data/pca-connector-ad/2018-05-10/paginators-1.json,sha256=AS3R0cOqXrf6ALY1Ar4Z_HdXbvrA4SwPve_YSeqtIFc,932 +botocore/data/pca-connector-ad/2018-05-10/service-2.json.gz,sha256=7zQZU3H6hJuA3pdLZ31kfE-BpTWsbna9aQTyQk2G-GU,13292 +botocore/data/pca-connector-scep/2018-05-10/endpoint-rule-set-1.json.gz,sha256=7OHGoWR9UZmbzgqi-8EVRxkDGyxugihq0F2_QENZT3s,1307 +botocore/data/pca-connector-scep/2018-05-10/paginators-1.json,sha256=-TAE2EG4hET8i1kSBmb5SkQbT8NEQ_peQNskuUSs0Ug,364 +botocore/data/pca-connector-scep/2018-05-10/service-2.json.gz,sha256=1NN32AOagsKjayPv-xdP8eqbjejodOp7FNeyk3kQuSo,5804 +botocore/data/pca-connector-scep/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/pcs/2023-02-10/endpoint-rule-set-1.json.gz,sha256=_dTHmwClpjcHDBTt7oAsf81zM8DE_1x_bY9N4FTB3Gc,1295 +botocore/data/pcs/2023-02-10/paginators-1.json,sha256=rm1F2IEEf8TDUFnjJYcvrgQaCiVryXOmbjqdg-aescw,525 +botocore/data/pcs/2023-02-10/service-2.json.gz,sha256=NU5l3wXu7epPF7j5NKRr0Gk-KEr4AegaS0Ol2nDj1jo,12228 +botocore/data/pcs/2023-02-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/personalize-events/2018-03-22/endpoint-rule-set-1.json.gz,sha256=IzrYZkTEVSM5RmOyYD932H8bu5VDjO2lDS2nCiZkARo,1158 +botocore/data/personalize-events/2018-03-22/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/personalize-events/2018-03-22/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/personalize-events/2018-03-22/service-2.json.gz,sha256=M1dTETgBpea1Qptc97JP_gP-przZnZEPs-sgZaT9pNc,3891 +botocore/data/personalize-runtime/2018-05-22/endpoint-rule-set-1.json.gz,sha256=DcgHybzF1LdvvjX8V9e5AaUVSbHhYTNDHIIPeg8HSWc,1159 +botocore/data/personalize-runtime/2018-05-22/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/personalize-runtime/2018-05-22/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/personalize-runtime/2018-05-22/service-2.json.gz,sha256=EZEH2PmYOcJSA3jQ2jMtH2CrR94D_OBnQe3hIm-XY6M,3843 +botocore/data/personalize/2018-05-22/endpoint-rule-set-1.json.gz,sha256=IlcOozGf1J_0MPVhxQ5Obyw5BlDbJdhEiroV1Kkwa_I,1153 +botocore/data/personalize/2018-05-22/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/personalize/2018-05-22/paginators-1.json,sha256=PfTPE03jTLANh2F51b68_GALtAUqFWJp2R0o2Xl5u0A,2766 +botocore/data/personalize/2018-05-22/service-2.json.gz,sha256=DYpKan4OInp0V4QgsRGnIEBwNkCGT9Ly9A5Nm16FtGQ,31275 +botocore/data/pi/2018-02-27/endpoint-rule-set-1.json.gz,sha256=fT2qUOe95IIEAviKwwM6spdXKgN17B1nt7XTATrFgCg,1144 +botocore/data/pi/2018-02-27/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/pi/2018-02-27/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/pi/2018-02-27/service-2.json.gz,sha256=ilyNgCIoqE_ML6_cBtWLTL1RBPUVk9xtPCktOm-geN4,12024 +botocore/data/pinpoint-email/2018-07-26/endpoint-rule-set-1.json.gz,sha256=2zotIFxdjg14ynwSwr4p0CfDErvqDkuP6Mu0pVa7V9o,1146 +botocore/data/pinpoint-email/2018-07-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/pinpoint-email/2018-07-26/paginators-1.json,sha256=G74a7tI3gD77zuNQfj6bfDHtriSA2qhAWh6Su9Tw6Bc,914 +botocore/data/pinpoint-email/2018-07-26/service-2.json.gz,sha256=JVkTx6u07XShnODOBJblm1sUr4Z6oC0FjE0XKShH4cY,23622 +botocore/data/pinpoint-sms-voice-v2/2022-03-31/endpoint-rule-set-1.json.gz,sha256=pPKFEIRPsmilTecEvjkialxnGrhpQi4cKCyrOEolx-A,1150 +botocore/data/pinpoint-sms-voice-v2/2022-03-31/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/pinpoint-sms-voice-v2/2022-03-31/paginators-1.json,sha256=T0sS1LbRMIty2JJWn7b12bAUhGxaf5hA1ZAsI9Vsj-g,4129 +botocore/data/pinpoint-sms-voice-v2/2022-03-31/paginators-1.sdk-extras.json,sha256=CP3Bd5lERGMsJNkUdcHm8HDd9lidYVI8uQYg78EloQI,1431 +botocore/data/pinpoint-sms-voice-v2/2022-03-31/service-2.json.gz,sha256=NtGDreQycwffHztbIZywLmDB5wlYIuHNieX5uQeAa0w,38886 +botocore/data/pinpoint-sms-voice-v2/2022-03-31/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/pinpoint-sms-voice/2018-09-05/endpoint-rule-set-1.json.gz,sha256=susZ6LBeuWsp_XOP7q8Z-bQYjoS_EAmPIWtRLLaFFt4,1305 +botocore/data/pinpoint-sms-voice/2018-09-05/service-2.json.gz,sha256=jPx_kkccKH4jge6fpbrhHikjxll3E7pFUksdoHLQ6ro,3344 +botocore/data/pinpoint/2016-12-01/endpoint-rule-set-1.json.gz,sha256=f1JCCtRSmjODGm7gM-pSzNZuScg2JnHCm9dtCwIzu0g,1313 +botocore/data/pinpoint/2016-12-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/pinpoint/2016-12-01/service-2.json.gz,sha256=N4V7sZZUi8Nh3IOUIbG2gBja9Jwy3Qmsjhvpei2hVN0,70366 +botocore/data/pipes/2015-10-07/endpoint-rule-set-1.json.gz,sha256=2ThyyZXcbcXa3It3OM-tYpg3IkUyTj9chv7sD86dm7A,1293 +botocore/data/pipes/2015-10-07/paginators-1.json,sha256=a_b-W2Fj-9dt3XIXqHzXHKGRz8elOX8p9h2pI3wg5ls,176 +botocore/data/pipes/2015-10-07/service-2.json.gz,sha256=T85eXKT_m56yHJ3c_b_MfiWnOnaTxnlKXQKutjbPl2I,23040 +botocore/data/pipes/2015-10-07/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/polly/2016-06-10/endpoint-rule-set-1.json.gz,sha256=7rS_9bLfGYLnaJsxec31nAqIH0QOkuCLc3pJtCdnRaw,1147 +botocore/data/polly/2016-06-10/examples-1.json,sha256=4KBzptmKd-ySr0PXR8a5UOE6w8nw-mm0Iq-LRhrtcNM,5101 +botocore/data/polly/2016-06-10/paginators-1.json,sha256=IJnO61fPCtuJPYshmxGjm9ZzkXfOxEvsL0acyUPG55E,463 +botocore/data/polly/2016-06-10/service-2.json.gz,sha256=02UKL7ex8ARCpx-MBOpSc1NzzxabqU2vxDRN7fi4ybw,8149 +botocore/data/pricing/2017-10-15/endpoint-rule-set-1.json.gz,sha256=lhyO3oATwyIV7Cgpx8XF8SpcZSeecBGVRNM-IPdQ9K8,1217 +botocore/data/pricing/2017-10-15/examples-1.json,sha256=LX0A-kHCd3N64FsP7EdT6IV-Sej2qNX9ygW6n6jBucs,4263 +botocore/data/pricing/2017-10-15/paginators-1.json,sha256=rizUQ-J932MNyVUTMjrRSVOm-tmzWnvnYhWoIMGxuuM,820 +botocore/data/pricing/2017-10-15/service-2.json.gz,sha256=xciC62EiccwLX7prLtax-8rxqgjXlQgRvgjWU7qZoIg,4542 +botocore/data/pricing/2017-10-15/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/proton/2020-07-20/endpoint-rule-set-1.json.gz,sha256=5s0net66liWejYorq4XLGrtgWTeRTmyI-YDGYc0z2C8,1148 +botocore/data/proton/2020-07-20/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/proton/2020-07-20/paginators-1.json,sha256=oioU0xuoNT12gWLZPvkd8rPQIM9gk8AOnNiZxDJybLs,3501 +botocore/data/proton/2020-07-20/service-2.json.gz,sha256=KeBjOzHANAMY3R9zkHyZpoKfGd8FbOmlz8lkzadWato,28216 +botocore/data/proton/2020-07-20/waiters-2.json,sha256=sGpaiRnx46CfHQh_T__IIByVlrchRRjseWa3NCdIqdI,6872 +botocore/data/qapps/2023-11-27/endpoint-rule-set-1.json.gz,sha256=rz7_Frt4lrnN1GEEtvfHBBtuQZhXKMTgAcSli5gT1HQ,1300 +botocore/data/qapps/2023-11-27/paginators-1.json,sha256=McjYxb7TrO3PLEXrYryPjrdMEbmbSYts2zAu2s0zbnY,340 +botocore/data/qapps/2023-11-27/service-2.json.gz,sha256=ylsacBwwp65MfEx7yBDpGFfFH1kSWtLfiEpHC_243I8,13950 +botocore/data/qapps/2023-11-27/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/qbusiness/2023-11-27/endpoint-rule-set-1.json.gz,sha256=zDquuqJn840LQYmJ1a-pRtvFZFvoaYbl9bdr2X-WRSw,1126 +botocore/data/qbusiness/2023-11-27/paginators-1.json,sha256=gZen1QNNvkJOLGMSxX-PievGiDr5-MEjDOr73fyIbvY,3436 +botocore/data/qbusiness/2023-11-27/paginators-1.sdk-extras.json,sha256=q8kHxHtnclzAwbKItnYG8gmYQx4NgxA2wfYQVA3yew8,428 +botocore/data/qbusiness/2023-11-27/service-2.json.gz,sha256=qsizkhh9-eUzDoR7_JSX5vRGj18OCBxeKgaAcxamqg4,55359 +botocore/data/qbusiness/2023-11-27/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/qconnect/2020-10-19/endpoint-rule-set-1.json.gz,sha256=kjt4GFV-tqbyEXQ2WECwg8FYJ864N0IIVCnHbew8dZs,1298 +botocore/data/qconnect/2020-10-19/paginators-1.json,sha256=7QW4D2QMFD1FAsyTfPOtoK82I6R2HJeZLva7-ZmnKSM,3927 +botocore/data/qconnect/2020-10-19/service-2.json.gz,sha256=081k2EHFfzhuofX-gw9328aGXmXQFo1np5MwiZ55FW8,57899 +botocore/data/qconnect/2020-10-19/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/quicksight/2018-04-01/endpoint-rule-set-1.json.gz,sha256=ZheCMCS4E-B-xoOLPWEhU9cpT2FY1_kWIZi8EXUcPJ0,1152 +botocore/data/quicksight/2018-04-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/quicksight/2018-04-01/paginators-1.json,sha256=fSxmq31yhxOoSK5iC_8DsHBF-_7VedmZffrgV-gRyr0,6868 +botocore/data/quicksight/2018-04-01/paginators-1.sdk-extras.json,sha256=G2fJdAlG2TfBIPG8xqxUa132R8bQFRhCO7mvj-PC4r4,4779 +botocore/data/quicksight/2018-04-01/service-2.json.gz,sha256=OVI03N0_LmQApZ0Zd5hvus99EcyYwIgzavfW-5PANpQ,210313 +botocore/data/ram/2018-01-04/endpoint-rule-set-1.json.gz,sha256=6FrBO5uHMR1Bq_M0XLCiinWhk2_-y09uL6Q_Tl3VvKs,1230 +botocore/data/ram/2018-01-04/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/ram/2018-01-04/paginators-1.json,sha256=68WO6NwCy0OQL3rko-MRoZ0l1F2vhih8z8F3sse3R3g,1085 +botocore/data/ram/2018-01-04/service-2.json.gz,sha256=t2aQ073CsATMMDYLIOASieuBPsDT5R17BTjyohr9pkY,18021 +botocore/data/rbin/2021-06-15/endpoint-rule-set-1.json.gz,sha256=0LSFaAlOBpRYSUpXV1G2faK2_YnfdcEtdFtVfSHTyA4,1146 +botocore/data/rbin/2021-06-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/rbin/2021-06-15/paginators-1.json,sha256=LB-X6UiLpJdFPrOCSc0raKGabdXiY9PhtS7nzQJbMts,181 +botocore/data/rbin/2021-06-15/service-2.json.gz,sha256=5z4iMxfhDmB2fEtaMLz8qY1xkukE_3GNWJTKJHCWzKM,4962 +botocore/data/rds-data/2018-08-01/endpoint-rule-set-1.json.gz,sha256=eUs84dPVyQlvm3_DFBdKAW5P9-uKcJDG-ssrX2jRDF8,1149 +botocore/data/rds-data/2018-08-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/rds-data/2018-08-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/rds-data/2018-08-01/service-2.json.gz,sha256=IDI_3c_HnOIOSzk51OOxvzZAeRrOrd_lrQPgevYEuaU,6569 +botocore/data/rds/2014-09-01/endpoint-rule-set-1.json.gz,sha256=gXeNBzvu8FpAYg2WQaqnS9OpRh3u7FkS-fI0slP_n4s,1234 +botocore/data/rds/2014-09-01/paginators-1.json,sha256=CKMhQjYqNQB1hiHNi4vCNIVtQvu29SM_ySRhqxTKfOQ,3095 +botocore/data/rds/2014-09-01/service-2.json.gz,sha256=uYawlF7v5mMGuMpCHWs0Q3VY7pf0aoLDzcZgUDxCiP0,37839 +botocore/data/rds/2014-09-01/waiters-2.json,sha256=9BpCCotIHKKeyJHD5Bo1fdRi6EnHK6jyJJx_9wswzCQ,2645 +botocore/data/rds/2014-10-31/endpoint-rule-set-1.json.gz,sha256=-yaMsUCL6embVCf7xwIIkigYfUfh5gRJc6Rz4fpK5jw,1230 +botocore/data/rds/2014-10-31/examples-1.json,sha256=Pa_Dpbo8pg0O9rZRPEuFXsgnzT6XUqIfwHpXauQnc0M,57903 +botocore/data/rds/2014-10-31/paginators-1.json,sha256=sVBn30vV3XWtW5PxTgtpHzvoQcB0J900ZdRg9cPJnm0,7402 +botocore/data/rds/2014-10-31/paginators-1.sdk-extras.json,sha256=S21buVoyp0LlykSD0lYWlVIRbOqJB4qpVw7mt2GFprQ,192 +botocore/data/rds/2014-10-31/service-2.json.gz,sha256=ItCs4Oh6fEfYodh4dBGmPnC4WplzBKFYqxbSA8ZhDiY,168716 +botocore/data/rds/2014-10-31/service-2.sdk-extras.json,sha256=NWqAyPauBSLTPFOO_wMu4XZ7VTkw7nY8QjCorphUpTM,1345 +botocore/data/rds/2014-10-31/waiters-2.json,sha256=DaJxFaWQOJpx0aNV4rEHX8yDWHBfTWHNkA3u6NgDAOk,10970 +botocore/data/redshift-data/2019-12-20/endpoint-rule-set-1.json.gz,sha256=bEoR_2-YzkVPtD2_DmS5yOG1ka0iCr0XDzOutQ62-OY,1152 +botocore/data/redshift-data/2019-12-20/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/redshift-data/2019-12-20/paginators-1.json,sha256=o__jaQIlvpn0QI1nUo42lhQikOWDAJKjNGc75Q982y4,1108 +botocore/data/redshift-data/2019-12-20/paginators-1.sdk-extras.json,sha256=W5XyQjgYJkSJeO772ifvKMUzs8b5j4AvLb8YQwWnm_w,458 +botocore/data/redshift-data/2019-12-20/service-2.json.gz,sha256=pDu3wV1D-yrKqVtqZILoJB2Vsd54GVgqga5sszi018k,8044 +botocore/data/redshift-serverless/2021-04-21/endpoint-rule-set-1.json.gz,sha256=aCE8bs44qwcwx6rBXHa_ObNGw-Jxlus38ymZcumoqhs,1156 +botocore/data/redshift-serverless/2021-04-21/paginators-1.json,sha256=1vYwDzBLSRf-kJMkph5FppY0Ud0HNqTgr3GRJoiOKh4,2476 +botocore/data/redshift-serverless/2021-04-21/service-2.json.gz,sha256=7itlsTHviElMHz_FeHd-CjvMTw3pQJ50i7DW6Uwy6HI,22461 +botocore/data/redshift/2012-12-01/endpoint-rule-set-1.json.gz,sha256=S4Gj1STeYszg3vPVCJAIV7MJTqRdxkVqN8S-BLwOzIg,1234 +botocore/data/redshift/2012-12-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/redshift/2012-12-01/paginators-1.json,sha256=U9XZCiP9Zd_FwnH0gENTm1olzWqz-FWxLwLeiuiZMxI,6701 +botocore/data/redshift/2012-12-01/service-2.json.gz,sha256=IIFehCi_OL5WACisKPgHqZpG6DydLcoI7W4oYzfaIOI,78219 +botocore/data/redshift/2012-12-01/waiters-2.json,sha256=mvax_COD6X10xa7Toxa2DsrarFdKFg9kOWbIKRLahS4,2344 +botocore/data/rekognition/2016-06-27/endpoint-rule-set-1.json.gz,sha256=ds2I76y40uP9HoZ65sfyUXKOjKarAKWww50mQ5-bOJ4,1150 +botocore/data/rekognition/2016-06-27/examples-1.json,sha256=pEUj6cF9yKB10eaE3lAAObBMc4nV3Jak105Ro2A3ZMc,20327 +botocore/data/rekognition/2016-06-27/paginators-1.json,sha256=mDoU6wXUCCgHeOrcvnEqTpQ18yV5otpEqZt5TsFarFA,1699 +botocore/data/rekognition/2016-06-27/service-2.json.gz,sha256=T4V0iuN6-EjJtMkDTptkKMx2upOOQw14ICDr34dWnxY,71458 +botocore/data/rekognition/2016-06-27/waiters-2.json,sha256=KRKVzu37WzZwVdazhDURGYo_qTbgIDDIhBTPyvTt1lg,1542 +botocore/data/repostspace/2022-05-13/endpoint-rule-set-1.json.gz,sha256=gUpvOID7isn0jfT_5ZLOEbHGYUbZ0K0nFpZMuby7pQU,1300 +botocore/data/repostspace/2022-05-13/paginators-1.json,sha256=YMwMMFg603UCo-j7eNe9slLy3jUAAYa_ODX-Fh8ffd8,345 +botocore/data/repostspace/2022-05-13/service-2.json.gz,sha256=J7A4lxpQAsUmOPFehOf5KgaNzdCKpk28cX9cHuoF5Xk,5946 +botocore/data/repostspace/2022-05-13/waiters-2.json,sha256=IFWB48e2MaBFCt9EUH0lvhocQtIiyM5RKbLNpjKLwOY,2429 +botocore/data/resiliencehub/2020-04-30/endpoint-rule-set-1.json.gz,sha256=B64CNBssxkJGwjaGQdOdminBLPfs7P7i68566XGmGsA,1152 +botocore/data/resiliencehub/2020-04-30/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/resiliencehub/2020-04-30/paginators-1.json,sha256=i9EQooI4rve2dtahGRtoAc2pv-KEBS8cGy_L4ND_khM,569 +botocore/data/resiliencehub/2020-04-30/service-2.json.gz,sha256=A34E5FgYtAmRWtDgG-ApK8No_s1w85SrQ7MsBHrL7h4,29858 +botocore/data/resource-explorer-2/2022-07-28/endpoint-rule-set-1.json.gz,sha256=njBXGxZPdFggewA3ZxLoOz94nkmZOmNwbSxFoHGXScU,1158 +botocore/data/resource-explorer-2/2022-07-28/paginators-1.json,sha256=PuLP47m2fS4G52Ue_2zXbnv1mbBD9Nd9Rjb9ZH4H6pE,1891 +botocore/data/resource-explorer-2/2022-07-28/paginators-1.sdk-extras.json,sha256=1BdFcjO9uS6r4XTazfeCLVaX7KPO_oELRwSUy9VBt7M,268 +botocore/data/resource-explorer-2/2022-07-28/service-2.json.gz,sha256=LMX3xLBmSApq0UZ4dj4758xZrjIohCd5b_mvYRQBRvQ,15681 +botocore/data/resource-explorer-2/2022-07-28/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/resource-groups/2017-11-27/endpoint-rule-set-1.json.gz,sha256=pR1uV7xWkbFL0lqvMpPiK9v6FEr7b4D5kR-JWbFbA6M,1239 +botocore/data/resource-groups/2017-11-27/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/resource-groups/2017-11-27/paginators-1.json,sha256=UDYlyYXlpEjfEq1H2dATIOO_M33BElQGPX3C7qMybI8,971 +botocore/data/resource-groups/2017-11-27/paginators-1.sdk-extras.json,sha256=sLaKgsyulktCelU4GGH6YRaRLWwmRiSmoKar3VfbunY,165 +botocore/data/resource-groups/2017-11-27/service-2.json.gz,sha256=wncJfFLD6h4NGKBJlPgz4lnyleFtfm0aq-euIJpNd3s,14326 +botocore/data/resourcegroupstaggingapi/2017-01-26/endpoint-rule-set-1.json.gz,sha256=2JsFk-UpZNBTMHu-USo-eLPuFsC02xr9dGZ7I6YCQQc,1149 +botocore/data/resourcegroupstaggingapi/2017-01-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/resourcegroupstaggingapi/2017-01-26/paginators-1.json,sha256=foWHoPRSV6VjAwni6ujDQPi6y99hZYvvnaMzbzSAqFY,854 +botocore/data/resourcegroupstaggingapi/2017-01-26/service-2.json.gz,sha256=QauwOFI4JinkIruZAtcIr08QOjUDCoYEQAF3zgDmdoY,8919 +botocore/data/rolesanywhere/2018-05-10/endpoint-rule-set-1.json.gz,sha256=bYTnE-bvJJ_JTuBfjsxjOHyk6RitYMssB6lleNr44Kg,1153 +botocore/data/rolesanywhere/2018-05-10/paginators-1.json,sha256=IaF8k8b_3R6qbXcxbFkIQqN0DTaCim4eQhIiEanVZkc,541 +botocore/data/rolesanywhere/2018-05-10/service-2.json.gz,sha256=d6ZkSdyKqxnoJNoDaYnfe-rB-L9L4X68D_T9F4nrOUY,7228 +botocore/data/rolesanywhere/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/route53-recovery-cluster/2019-12-02/endpoint-rule-set-1.json.gz,sha256=_RrBptq-MUdfnl74IOjLmCm20BpECUVDGymnnyqZLDA,1168 +botocore/data/route53-recovery-cluster/2019-12-02/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/route53-recovery-cluster/2019-12-02/paginators-1.json,sha256=UhH6MsunbcB3w057DvJoHxEWGweOpch31kCr68-51eQ,201 +botocore/data/route53-recovery-cluster/2019-12-02/service-2.json.gz,sha256=fbrca3-yFsA4W2fwspXgRWvZpfXxGSUzeltqjhkWopY,4044 +botocore/data/route53-recovery-control-config/2020-11-02/endpoint-rule-set-1.json.gz,sha256=TdQO_65jHPPf5Ads1v5ZBTe2t4vNKo9TSJMLPSjvvOk,1563 +botocore/data/route53-recovery-control-config/2020-11-02/paginators-1.json,sha256=eDByeUTgAtdsrqJD0NiWUp5AfuXhqM2q0oa-5MCgt38,892 +botocore/data/route53-recovery-control-config/2020-11-02/service-2.json.gz,sha256=H46G4a2DdRJeM2MHgxoJeB3Lncl180wAjMRuc4Nf1hQ,8486 +botocore/data/route53-recovery-control-config/2020-11-02/waiters-2.json,sha256=iw6vHr5XZ7c87aPCP4ejk0EHpOVt-ZT2ioC0asbgGJA,3674 +botocore/data/route53-recovery-readiness/2019-12-02/endpoint-rule-set-1.json.gz,sha256=I9QtcDvhwP86HRmxUlW79sJ11ky66bXfzktA2udYnwQ,1166 +botocore/data/route53-recovery-readiness/2019-12-02/paginators-1.json,sha256=bkbDR1VU1mtDe84KapiLM8rWUPHKj-aEpn7TLzqFeW0,2032 +botocore/data/route53-recovery-readiness/2019-12-02/service-2.json.gz,sha256=m4ZRQQR9N2JoSP2BS41s1Bocndl_xtzbkE6zh50dmb0,7335 +botocore/data/route53/2013-04-01/endpoint-rule-set-1.json.gz,sha256=2reaSxLLP69X_BlBEMAaT4OCBlqUcA08Lm2TkwIQnnk,2244 +botocore/data/route53/2013-04-01/examples-1.json,sha256=C3c7hhO4Y2jbpqrTEGNc7x007deldIJsNVDxdhaH_T8,29631 +botocore/data/route53/2013-04-01/paginators-1.json,sha256=-nS2WnQKiOUbqyQRXiMxCbqHwZ7xJQXVS98-vYEjiuI,1734 +botocore/data/route53/2013-04-01/service-2.json.gz,sha256=2kcUUfrkM6XSqh8KuRvtCOvbGz7dBSdIMqcrgb6VvhU,64381 +botocore/data/route53/2013-04-01/waiters-2.json,sha256=s6BzW8AQ9pEM5yCsRa64E7lfUvhX5vxNARuiAtZwjsU,338 +botocore/data/route53domains/2014-05-15/endpoint-rule-set-1.json.gz,sha256=F3W6Kz0NOsWIgiflddkroI6ygSD7DbZGhyibiHt7k3U,1155 +botocore/data/route53domains/2014-05-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/route53domains/2014-05-15/paginators-1.json,sha256=VN49BhgZ_VxpcqSi9W0aIr8bv4iFv9QnrVoUerrFwoI,696 +botocore/data/route53domains/2014-05-15/service-2.json.gz,sha256=lCJkPgnG12ray_bIWk31mCa47Z29M9FmbHzo11nvHfE,21376 +botocore/data/route53globalresolver/2022-09-27/endpoint-rule-set-1.json.gz,sha256=WkYSUm35443A4XvchXUemVTWb228dVOx1VA_VQLVg-8,845 +botocore/data/route53globalresolver/2022-09-27/paginators-1.json,sha256=jCx3URYdT1kl3Hf37Pa8S-oFP9MXI4Lyq1G7bibeRdA,1617 +botocore/data/route53globalresolver/2022-09-27/service-2.json.gz,sha256=iRYkmBa44E4pFbwjaS6s9GhQ4wAjmFLHu-xsuoNxg98,16643 +botocore/data/route53globalresolver/2022-09-27/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/route53profiles/2018-05-10/endpoint-rule-set-1.json.gz,sha256=T47ACm5Ei_vWvpvskE7qBy4S9E8A4997yNDq-_nI2Ic,1306 +botocore/data/route53profiles/2018-05-10/paginators-1.json,sha256=-QdEoZNxlj37dlhmE8U5hCOev_UGR-5Nv-_bCti2HtE,579 +botocore/data/route53profiles/2018-05-10/service-2.json.gz,sha256=VeU-tR06TxXzppqAXv3zRwmZKJs216T1FChrkNvHxNY,4822 +botocore/data/route53resolver/2018-04-01/endpoint-rule-set-1.json.gz,sha256=cmHAgRo0gy8dqwokuyIIA6Rr2QL8B8pMN5XQw0RxpTI,1240 +botocore/data/route53resolver/2018-04-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/route53resolver/2018-04-01/paginators-1.json,sha256=dZl4mnbzBj99_gIPlVRqF3YSMSG98HW1xVD1Kh1C_-k,2954 +botocore/data/route53resolver/2018-04-01/paginators-1.sdk-extras.json,sha256=3XJ5UEbB_NT-xjx41jRgxgoNKMWuUL-bcLPzf9n1o9I,806 +botocore/data/route53resolver/2018-04-01/service-2.json.gz,sha256=w0Ypl4Oun1hXE1wWDq3WfU3y6b7VZGg80kpJ0OgLrKs,33355 +botocore/data/rtbfabric/2023-05-15/endpoint-rule-set-1.json.gz,sha256=dkZTaxPvTRG0VqNhvsAVbPRMPkiVuHJ8T1NIXaIYCSg,1279 +botocore/data/rtbfabric/2023-05-15/paginators-1.json,sha256=6ywBd8ZAvDUaJQrd50jIX7X-0dWKd2qJv8qr7gpM1_k,527 +botocore/data/rtbfabric/2023-05-15/service-2.json.gz,sha256=hgkRj_2yKVsXvyNLn0zL_6ix95EQaZo8zd8P6Im_Tng,8002 +botocore/data/rtbfabric/2023-05-15/waiters-2.json,sha256=mNT9bGvX2MjiibpZQCvH5iTEijZ576wsLLQlB_IcX3s,4847 +botocore/data/rum/2018-05-10/endpoint-rule-set-1.json.gz,sha256=j0p71OCv2KCn6NgYVRw8P2VIeApKCseuvMBguxWnD_U,1145 +botocore/data/rum/2018-05-10/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/rum/2018-05-10/paginators-1.json,sha256=eiv4iOnLCb9wVy6VijmIS8FeKbt7SfSmIY3M4qv3wIs,733 +botocore/data/rum/2018-05-10/service-2.json.gz,sha256=0UJR9XQHDb7NIVChUKw-6Ei3c8Tfy_ceAFR47b1fMok,15171 +botocore/data/rum/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/s3/2006-03-01/endpoint-rule-set-1.json.gz,sha256=tYaA5Am-D-hw0bKeLLDgTVWaa1Hrtoj7S36kXpm9OJo,23123 +botocore/data/s3/2006-03-01/examples-1.json,sha256=bGw9MrbmwHRES_w7kwW-Hr31-Js7JGP_oxoE4Tw21b4,57596 +botocore/data/s3/2006-03-01/paginators-1.json,sha256=sCuUQpM7lam7gE_27Js9PhAZ9gaz0L6CIoyeK07U3Tw,1837 +botocore/data/s3/2006-03-01/paginators-1.sdk-extras.json,sha256=FBQlFfamIxCDHfK2jYQSHUbEYyyQSfA-GrUTPx-aL_Q,882 +botocore/data/s3/2006-03-01/service-2.json.gz,sha256=MwElsza1Glt9EMlOyxDSy-kdxbCPC169v0vFxsZK7-M,170127 +botocore/data/s3/2006-03-01/service-2.sdk-extras.json,sha256=Fhejim14rytpb7Tha_0Y63Ktc6qdJHvAeGQ8-d_WZAg,98 +botocore/data/s3/2006-03-01/waiters-2.json,sha256=m0RJIxnJW7u6emLjY1201rmfeKxgz1f7VDU7qKJOI4c,1436 +botocore/data/s3control/2018-08-20/endpoint-rule-set-1.json.gz,sha256=5g0UVBXCsfDf0hkJFKr4-4udbxEndbh5UksVmx1fRbg,9127 +botocore/data/s3control/2018-08-20/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/s3control/2018-08-20/paginators-1.json,sha256=XOG3QAgT_Ohb5f0Z6B-TyJTX1M_rM05_HryKrEyxYiM,603 +botocore/data/s3control/2018-08-20/service-2.json.gz,sha256=Xii-B8GlFswUpUD_NrM_CLCTBThy0koqZ9YxubQl3DU,66285 +botocore/data/s3outposts/2017-07-25/endpoint-rule-set-1.json.gz,sha256=XZH_Uq1Ve0aJLoysYthGfDWEwruZrKbKCJXwalMN0lQ,1152 +botocore/data/s3outposts/2017-07-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/s3outposts/2017-07-25/paginators-1.json,sha256=MNhAyDW1gAXK_msh5EL1QpaFjXE7KCnk2xzMuUZUiT4,527 +botocore/data/s3outposts/2017-07-25/service-2.json.gz,sha256=-QOLLM8vuYeNuLYS7MPafRUeSERV_yhwpzMLzbdZgl4,3475 +botocore/data/s3tables/2018-05-10/endpoint-rule-set-1.json.gz,sha256=xOqi_FlgDolaIs61nhHwYL-I4wFyykKPYW7uWGO_-Fs,1298 +botocore/data/s3tables/2018-05-10/paginators-1.json,sha256=V7AYoq3xxG0TSO6MYFTjrAVhuUQ2fdLSCfrIS-LheUk,569 +botocore/data/s3tables/2018-05-10/service-2.json.gz,sha256=NzB1hWFHI-ne1wjvDVSz1yYcRbFs6F1qY-ZGg_ay9x8,13189 +botocore/data/s3tables/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/s3vectors/2025-07-15/endpoint-rule-set-1.json.gz,sha256=BEaEpJ78zpdcaNba-_IYI93jTdm21HWUhFHBAwjZyGM,835 +botocore/data/s3vectors/2025-07-15/paginators-1.json,sha256=dg8UTkD_91qznwta7_XjX-3mpkyV-NVNRH8SLZVjtKk,517 +botocore/data/s3vectors/2025-07-15/service-2.json.gz,sha256=sj1UajK4szwL7-KeO-WENFZcoGNhNpXPp8TAnFJTfZY,8796 +botocore/data/s3vectors/2025-07-15/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/sagemaker-a2i-runtime/2019-11-07/endpoint-rule-set-1.json.gz,sha256=fBeCuBmPxXfu-mDU4327p-k_ZvMA8lG4qiTq2v4_5q8,1159 +botocore/data/sagemaker-a2i-runtime/2019-11-07/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sagemaker-a2i-runtime/2019-11-07/paginators-1.json,sha256=X0gq-uz_QUVGPACQxWwKf6n-ZZ-MsaXi3huDYMOu10o,199 +botocore/data/sagemaker-a2i-runtime/2019-11-07/service-2.json.gz,sha256=yeeJeyww25WlcW3f0X9OzDRYgi_1IS8HJT9uEsDJzR0,3798 +botocore/data/sagemaker-edge/2020-09-23/endpoint-rule-set-1.json.gz,sha256=SgrtxEyidQCLiq4U2yBU1r1f9yfuvsLk3v7UqXDzkJg,1153 +botocore/data/sagemaker-edge/2020-09-23/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sagemaker-edge/2020-09-23/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/sagemaker-edge/2020-09-23/service-2.json.gz,sha256=Qke6xQFpZQLnx7XHoT_a6sym3Jex3D6bYCopTUz7Vrw,2248 +botocore/data/sagemaker-featurestore-runtime/2020-07-01/endpoint-rule-set-1.json.gz,sha256=aCphIJ5jQ9Zm3mm7Oe97G0oYYMCMAzG6Ru-Jk9b089A,1168 +botocore/data/sagemaker-featurestore-runtime/2020-07-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sagemaker-featurestore-runtime/2020-07-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/sagemaker-featurestore-runtime/2020-07-01/service-2.json.gz,sha256=SOitgGXJ7xvEKq3z8_2qsH_kY0Qlp5Pl3a-JS8becBI,4151 +botocore/data/sagemaker-geospatial/2020-05-27/endpoint-rule-set-1.json.gz,sha256=FzfTThcSF3S70c5pxm0ksWICxbP_RTuTFry0iHkAFNg,1308 +botocore/data/sagemaker-geospatial/2020-05-27/paginators-1.json,sha256=F6o4MlbqixSACzxItwWHBiMmvvc3VqdxdWlY9NRKy6E,609 +botocore/data/sagemaker-geospatial/2020-05-27/service-2.json.gz,sha256=no77Lqhy0E0isKh7ssoJKukiMPGhoEorEjzboFD80Vg,11920 +botocore/data/sagemaker-metrics/2022-09-30/endpoint-rule-set-1.json.gz,sha256=wpdBoyRYao_933u7mLmQp2M9tLuDk48ulepv1AtIvV4,1240 +botocore/data/sagemaker-metrics/2022-09-30/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/sagemaker-metrics/2022-09-30/service-2.json.gz,sha256=qb5o_z-QHyMGkvcSxdltn9UOR8RDSKzoyKMP7Y2J6cY,2024 +botocore/data/sagemaker-runtime/2017-05-13/endpoint-rule-set-1.json.gz,sha256=p-Q74t3JPD1-YMi105JGV3FdfdkWycHbSLIuM8OeP2I,1271 +botocore/data/sagemaker-runtime/2017-05-13/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sagemaker-runtime/2017-05-13/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/sagemaker-runtime/2017-05-13/service-2.json.gz,sha256=206X3ny3FcE_ZrNjlAimZBko0f90U7b4_EaoN8R_yEA,5713 +botocore/data/sagemaker/2017-07-24/endpoint-rule-set-1.json.gz,sha256=4bBju3PzYKa0ECSqkKsYlLksx1Yu9MeYNgAlTUs3gJk,1268 +botocore/data/sagemaker/2017-07-24/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sagemaker/2017-07-24/paginators-1.json,sha256=GNdREMpCEstnpX0K38x4i33IG3XZbv5pLMkTbxgnX18,14992 +botocore/data/sagemaker/2017-07-24/paginators-1.sdk-extras.json,sha256=ibwgf2aj5blabLx_CmGc9HT5PfqVfBU1UhdXmAVxmi0,154 +botocore/data/sagemaker/2017-07-24/service-2.json.gz,sha256=R5FeJFDIxrEPev3iGQbYYYdznYIAFNgn1x_UUaE8bK4,341496 +botocore/data/sagemaker/2017-07-24/waiters-2.json,sha256=dwquOoDq4TRr9dNbKme2UILOi8MJfe5ADTYkP4bfABA,7018 +botocore/data/savingsplans/2019-06-28/endpoint-rule-set-1.json.gz,sha256=uWPxKhYG9YiBFCL3k3tsdqKZxQiaSFKi8RM4rYb5h8E,1543 +botocore/data/savingsplans/2019-06-28/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/savingsplans/2019-06-28/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/savingsplans/2019-06-28/service-2.json.gz,sha256=spbggIfA0GaupYUN2encgnIW1NjP-sB8nsUrjADgwhw,4552 +botocore/data/scheduler/2021-06-30/endpoint-rule-set-1.json.gz,sha256=s_f8cwsQSm1dIBTnLJtETFRVZvAhKqpIl3INL-U90mA,1296 +botocore/data/scheduler/2021-06-30/paginators-1.json,sha256=VH5c3yVo1Un4lL_GVN-D3A5GNOTWMmnqAQ0QZAOMJOo,363 +botocore/data/scheduler/2021-06-30/service-2.json.gz,sha256=TvVcfqU5MAVSkqdZximaKjxIZXLnx_uD2JboDOTnzuM,9349 +botocore/data/schemas/2019-12-02/endpoint-rule-set-1.json.gz,sha256=y4rzlP0PGf8LKnN8iIGkxahcE0BC0I7wWleMnLWvVyc,1149 +botocore/data/schemas/2019-12-02/paginators-1.json,sha256=JG7VhSHU5MW5ZSEzWuvc0fcOMdYngtguHEeVk1fPoro,830 +botocore/data/schemas/2019-12-02/service-2.json.gz,sha256=4g241xWlFLZMGzFy50iew02X2mwI4hLEJeb7P50ElQI,5722 +botocore/data/schemas/2019-12-02/waiters-2.json,sha256=t1IowU2djOrDdhK7r7dmmVfVARz1Zp31Dl3MPtnqy5I,824 +botocore/data/sdb/2009-04-15/endpoint-rule-set-1.json.gz,sha256=Jb14UamWfzfAVtlY7QWjq40V5ED94vZ_Uis-1CEG2AU,1198 +botocore/data/sdb/2009-04-15/paginators-1.json,sha256=3KF7ZF879CPbTIZ8drlqnq5S3aFHdubXunwekE3ARG4,317 +botocore/data/sdb/2009-04-15/service-2.json.gz,sha256=Z2mIj-BHqcYbboNOvc5ek02iqLkghVPvYsUIGED-p6U,6036 +botocore/data/sdk-default-configuration.json,sha256=LlmdeqSk0HQAKMCGNgPsFO1K6dJXQdjzq8Ad3wRs7g8,4135 +botocore/data/secretsmanager/2017-10-17/endpoint-rule-set-1.json.gz,sha256=7U2rDbENODRsC275rxs1mXhkg9DJ9D-9kDUInEQXio8,1351 +botocore/data/secretsmanager/2017-10-17/examples-1.json,sha256=3LKYx_uc48qXDFx7m8cU2l8XByq1wu28h5fOggrmDCI,22410 +botocore/data/secretsmanager/2017-10-17/paginators-1.json,sha256=wFoEW6m_jRSAAt8D1r54a9XXWnZerkFn83sHj413-ww,188 +botocore/data/secretsmanager/2017-10-17/service-2.json.gz,sha256=wgV3bZmaP1obEDqjW_XE_K_ruiEWSpVyWvXmE8y-84Y,22383 +botocore/data/secretsmanager/2017-10-17/service-2.sdk-extras.json,sha256=IEA3uxtjPY8I1on-q2W9-tozHHIVmneQyB6gCTcYTro,120 +botocore/data/security-ir/2018-05-10/endpoint-rule-set-1.json.gz,sha256=mo5sMi0FIStcsTZ8LVtq381blH40irCzJ-Wb5OtvV6g,835 +botocore/data/security-ir/2018-05-10/paginators-1.json,sha256=0Rs0rrkL3TlunqQTlmqG3Exz6DaEY1o4HU93oVh66QA,842 +botocore/data/security-ir/2018-05-10/paginators-1.sdk-extras.json,sha256=fyqNotjQNWEgg7fBBmltwWw7IoVr8M4vv9081d1COKk,339 +botocore/data/security-ir/2018-05-10/service-2.json.gz,sha256=d_QF_TLDdp_iN-ZuUS19m5X7buJVRt5DwDaMK4PQyd0,12359 +botocore/data/security-ir/2018-05-10/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/securityhub/2018-10-26/endpoint-rule-set-1.json.gz,sha256=n-OphfR5lLGRcOTHYjtNDfQQRb2rXMoDu_56hUGJVR8,1151 +botocore/data/securityhub/2018-10-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/securityhub/2018-10-26/paginators-1.json,sha256=gfqAsoWM3yy_-y4GgIRJP0g8WaDWfVU7BeeYKd34gQw,4103 +botocore/data/securityhub/2018-10-26/paginators-1.sdk-extras.json,sha256=XfR__53xdVA-fFj_GM6nl3mulsEQYh6An6wvwDAye-k,443 +botocore/data/securityhub/2018-10-26/service-2.json.gz,sha256=8ETFIfoktTNAZ_hfKAy6Ujp7oF4x4nHK5omK9yGeFnw,166849 +botocore/data/securitylake/2018-05-10/endpoint-rule-set-1.json.gz,sha256=19qFVXyEk-pdywq56ovM8qSHI0GTGFHZEwj4PdXt81k,1299 +botocore/data/securitylake/2018-05-10/paginators-1.json,sha256=aw_RlW6BEfqxgzWUOJF6ZrCTf49mvjJ9uAmhefV_2kg,705 +botocore/data/securitylake/2018-05-10/paginators-1.sdk-extras.json,sha256=v0jKSsBUrC-WdKoMQzNm6hfXLmDajUWqKZtLDn1TA9k,169 +botocore/data/securitylake/2018-05-10/service-2.json.gz,sha256=N65TIuSXFpYGK5Aw2dJ-tce-AsizabA7NkSgQhDri1g,14449 +botocore/data/serverlessrepo/2017-09-08/endpoint-rule-set-1.json.gz,sha256=-zqqOgZX0-ntRoFMJ-F9VhNb9QdMm6Nu1K_KzkHaKvk,1237 +botocore/data/serverlessrepo/2017-09-08/paginators-1.json,sha256=6mp7kgpraGJSmfK8vEcMsz_LdDUfQN9dI4kjn83wRhY,543 +botocore/data/serverlessrepo/2017-09-08/service-2.json.gz,sha256=o9WFOZOptBOda1S0QxOjD3mejHhMI98lcGLxUhF3sYQ,9570 +botocore/data/service-quotas/2019-06-24/endpoint-rule-set-1.json.gz,sha256=c5HngciuhLKpDisaEn4EgYe_BTvY7TUSZupDjoi_z9Y,1237 +botocore/data/service-quotas/2019-06-24/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/service-quotas/2019-06-24/paginators-1.json,sha256=e9hZphztzUJpLy1e7mpXUhwobjRsYyLMWkY1DYQfRpw,1149 +botocore/data/service-quotas/2019-06-24/service-2.json.gz,sha256=mhV-AwibQFr7jYPd-qjSy-Lg3-j3DyVndwdOzXeOMFc,10513 +botocore/data/servicecatalog-appregistry/2020-06-24/endpoint-rule-set-1.json.gz,sha256=iDCecyLC40i04hwJk8dZ448MTrpCAXXWAWBwgNO6yt0,1247 +botocore/data/servicecatalog-appregistry/2020-06-24/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/servicecatalog-appregistry/2020-06-24/paginators-1.json,sha256=2lclqrEMyRSrGV8L1DZoppkiLhUGI1VcinKImELBFi0,928 +botocore/data/servicecatalog-appregistry/2020-06-24/service-2.json.gz,sha256=BrxAs7v8Bg3B1aOxvUPNIw7Elvln3wo9hSu4OoxplQs,7868 +botocore/data/servicecatalog/2015-12-10/endpoint-rule-set-1.json.gz,sha256=2vuaSs-J6dhk2O5ilrC5ypOW8QfCl4ac9LEM6BrtEeo,1153 +botocore/data/servicecatalog/2015-12-10/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/servicecatalog/2015-12-10/paginators-1.json,sha256=ghdoop27a-JBxcfHGVvA0vMp9y_Z-MY1R7TuRQCrmP4,2996 +botocore/data/servicecatalog/2015-12-10/service-2.json.gz,sha256=2763v1jH3RQW2bW-7Pi_gyRfjlicFQQIZzqeJvDECik,37405 +botocore/data/servicediscovery/2017-03-14/endpoint-rule-set-1.json.gz,sha256=MVQkESoUro4bjvRT5qVzhRvQBSZXiT0PQxXPKhtBah4,1306 +botocore/data/servicediscovery/2017-03-14/examples-1.json,sha256=iJqJB_1uy_oppRbcXbl5SmCA2yLLVdSdj674nZ7dSQQ,18861 +botocore/data/servicediscovery/2017-03-14/paginators-1.json,sha256=sKu-j-WBHT8KpiemY4vgLiQkV1Ub2GtqLbYiUxdkjjE,683 +botocore/data/servicediscovery/2017-03-14/paginators-1.sdk-extras.json,sha256=a89DrBwGFK_Oa_9ICtG1QFEBjaavhUhMm-2DyI02_Nw,166 +botocore/data/servicediscovery/2017-03-14/service-2.json.gz,sha256=kLMSbtZEWAmO6UENQQcOrsEYcULx3W7E4N3g3b3m1G4,20605 +botocore/data/ses/2010-12-01/endpoint-rule-set-1.json.gz,sha256=2zotIFxdjg14ynwSwr4p0CfDErvqDkuP6Mu0pVa7V9o,1146 +botocore/data/ses/2010-12-01/examples-1.json,sha256=LdOG9qOcWahQ6xYBc3_UEV-teA96yJJSesbf0fNI8Bw,28834 +botocore/data/ses/2010-12-01/paginators-1.json,sha256=G_7q2KFDP0LwwEUoCgd9qikwYlHoaFwDjQ_3CtWBVPw,883 +botocore/data/ses/2010-12-01/service-2.json.gz,sha256=L3afWuHXRGkKfgiygnpdDWKAoWiedBqaLaQGZhJySfY,36439 +botocore/data/ses/2010-12-01/waiters-2.json,sha256=4GF4zY3Tg43WiGAVWSJeabII8bSEU7_ElsMj_G3Bt68,380 +botocore/data/sesv2/2019-09-27/endpoint-rule-set-1.json.gz,sha256=olF3XFAQmxlg20AuBx8uAXfEnfBgJiK_-MKHHLaC4Bw,1649 +botocore/data/sesv2/2019-09-27/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sesv2/2019-09-27/paginators-1.json,sha256=PY5qJB7wseX5NphauvzMB6XGJ3MkcixWUI4tLUR_2C8,895 +botocore/data/sesv2/2019-09-27/service-2.json.gz,sha256=9LPhlCAHvmBiXlK1NgY-_pGbra_5bBQo7KGh_-aDIgE,65602 +botocore/data/shield/2016-06-02/endpoint-rule-set-1.json.gz,sha256=bkHlGip96RCxUbrVWZzXyqiirpgLYnNGvSwims4ZrR8,1345 +botocore/data/shield/2016-06-02/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/shield/2016-06-02/paginators-1.json,sha256=MRQd38Sw6vMYcdoF_zRIXAdMaDQHs_indt6OtJxi0BE,361 +botocore/data/shield/2016-06-02/service-2.json.gz,sha256=v8h4qyEkakAp26m7kck8_tdC7GBiPlLPmHHLL4zkZJM,15250 +botocore/data/signer/2017-08-25/endpoint-rule-set-1.json.gz,sha256=UH5hGxAq65T_gvfTJk3iKYFCt1VqguMTjcAuzX_lmqE,1146 +botocore/data/signer/2017-08-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/signer/2017-08-25/paginators-1.json,sha256=vjItW2pdi1KsZB_HwJEZqWIDJNHlrKbyxSuN6x8LHkU,526 +botocore/data/signer/2017-08-25/service-2.json.gz,sha256=g7Yh2W2EXOnzvAa9rlrPxKJMEbjWcaZEZVeOzCF8dQI,10333 +botocore/data/signer/2017-08-25/waiters-2.json,sha256=ZvZgSYJd2QhWkeR1jaM1ECQ8295slZ6oDEFLtA2tYRE,607 +botocore/data/signin/2023-01-01/endpoint-rule-set-1.json.gz,sha256=Gpt-H7oX0gjI8y3-c9bzcmAamFFWfxuA8i86Y3pNqbY,1486 +botocore/data/signin/2023-01-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/signin/2023-01-01/service-2.json.gz,sha256=JMFgrJqHjIEs5FWOeQBfcpqN7eadjr4ee4qC5OTu0Fo,4175 +botocore/data/simspaceweaver/2022-10-28/endpoint-rule-set-1.json.gz,sha256=jtdUEAoIwohCYd0wYaolvNtHLgBbR5-S5ELZrlBFju4,1303 +botocore/data/simspaceweaver/2022-10-28/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/simspaceweaver/2022-10-28/service-2.json.gz,sha256=wRUEpOjZS7kjtzjKuVFfguoMROGgIq3Twq7uxf3CYCE,6915 +botocore/data/sms-voice/2018-09-05/endpoint-rule-set-1.json.gz,sha256=PUGloTB8BgoNyYJ_qDvSslAnOo3YtbtiTBetn_ozU2s,1307 +botocore/data/sms-voice/2018-09-05/service-2.json.gz,sha256=jPx_kkccKH4jge6fpbrhHikjxll3E7pFUksdoHLQ6ro,3344 +botocore/data/snow-device-management/2021-08-04/endpoint-rule-set-1.json.gz,sha256=JQdClsZbLQId7fZ1FK5YuOXCw9d9R9OfxNHmsepKsfc,1162 +botocore/data/snow-device-management/2021-08-04/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/snow-device-management/2021-08-04/paginators-1.json,sha256=rNmRDBuxcetGirFRJQJA1vFXHeMY-sFLZ8BSld7BkFw,677 +botocore/data/snow-device-management/2021-08-04/service-2.json.gz,sha256=qz3P-Sc4pIg092OxLyApp62GS6G9oetQwp1KOT4dI4w,5955 +botocore/data/snowball/2016-06-30/endpoint-rule-set-1.json.gz,sha256=N6d1pMdMTWPU1S9GFwSU9n5yOkihA8rvmNx1bX0f0PQ,1150 +botocore/data/snowball/2016-06-30/examples-1.json,sha256=c2uRhH8SNSzMSPVVlezBwPPoxWxhOl2QxkdNc0A37q4,18099 +botocore/data/snowball/2016-06-30/paginators-1.json,sha256=vMdXg3dD9a7r3ifpM8lAmkBfXJBVz66l-6uUq_4OJjo,1061 +botocore/data/snowball/2016-06-30/service-2.json.gz,sha256=CVPLqgeRNNHiqsAKcJcZdtYFmqmR1ZQCJ8g26sIO_XE,17015 +botocore/data/sns/2010-03-31/endpoint-rule-set-1.json.gz,sha256=9G_RBm-V_FF2ZSqx7rmgMbPEvydXy5D_1SMkBiEi5ps,1228 +botocore/data/sns/2010-03-31/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sns/2010-03-31/paginators-1.json,sha256=a5cU7i3ZYF5D-u8S4oYs5kDUAAeav2kcWeG21u8RjPg,1241 +botocore/data/sns/2010-03-31/service-2.json.gz,sha256=KQjbPl9IlCd9uiuccoA9JkUelBMvyIgUokdE2N1vAs0,26186 +botocore/data/socialmessaging/2024-01-01/endpoint-rule-set-1.json.gz,sha256=Amnk6neg8M_PANP_N8h94N8XIqtZZmhtzIXWWc8o9QE,1305 +botocore/data/socialmessaging/2024-01-01/paginators-1.json,sha256=BH0O-x9zwgKqsun7vNHC6L3vzsKP_tfiueJ7G6i8nT0,583 +botocore/data/socialmessaging/2024-01-01/service-2.json.gz,sha256=XuEPOrOPnyeYPYZM8UlGRjmCL_S1LZ8EYTjh5uVRgOA,9926 +botocore/data/sqs/2012-11-05/endpoint-rule-set-1.json.gz,sha256=8yxLx5efXgvKfbBelkUqV4Gdc5MonPVKPAzTyx1lo_E,1230 +botocore/data/sqs/2012-11-05/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sqs/2012-11-05/paginators-1.json,sha256=fwyodl-UMt13laxQWAtCY9DEgncIy3mWPV-tS9M5m50,363 +botocore/data/sqs/2012-11-05/service-2.json.gz,sha256=cwVENDJSiA5Y4Vs-hHoWKPg_03yxiiGUApzXtXjgfhI,24462 +botocore/data/ssm-contacts/2021-05-03/endpoint-rule-set-1.json.gz,sha256=UhwOXyLH_Ep4hAX9HB2vLwRtURBOMjDCevVnbcHUqdY,1152 +botocore/data/ssm-contacts/2021-05-03/examples-1.json,sha256=DgD8jM1qr-3c2rDYBCXlsWUyaA_3S4VTwUogOr5KX0s,28860 +botocore/data/ssm-contacts/2021-05-03/paginators-1.json,sha256=Zvq8EuioTe0ZGvZrNX07bNJzAplhIUTDre4-HOhKrsc,1872 +botocore/data/ssm-contacts/2021-05-03/service-2.json.gz,sha256=bP_0SZxX5_YL4FznTFYVarkbmbd09zyhY_YZHJ-2ySc,12928 +botocore/data/ssm-guiconnect/2021-05-01/endpoint-rule-set-1.json.gz,sha256=6732QyT5tgz1M0n5SgLdjzLSqeg5CLdadoO4URKrD1A,1303 +botocore/data/ssm-guiconnect/2021-05-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/ssm-guiconnect/2021-05-01/service-2.json.gz,sha256=lRvlFRPwz1hrXXHggjZ42nzvOQA2z7EVvKQNREfG6wY,2137 +botocore/data/ssm-incidents/2018-05-10/endpoint-rule-set-1.json.gz,sha256=5AiXT7-dK0l1WWOTnli7D-Y4FBcTA266LQeHtmo4AME,1154 +botocore/data/ssm-incidents/2018-05-10/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/ssm-incidents/2018-05-10/paginators-1.json,sha256=4qlmECBX9jmRprL7ROo4h4MHrfWWjH2gGPLr9sjuV3o,1259 +botocore/data/ssm-incidents/2018-05-10/service-2.json.gz,sha256=XqcOrwstE9n88fHYaCzN-YU55l7FascCsya2fTHFvJk,14417 +botocore/data/ssm-incidents/2018-05-10/waiters-2.json,sha256=1xhj2BSaBj_CCZlCG7wTLL4ZB0e8_Uuq97DXjf7rADI,1465 +botocore/data/ssm-quicksetup/2018-05-10/endpoint-rule-set-1.json.gz,sha256=bZDcMhNNwXasxsHiIPByaxL3C-n7lsmUpoLedYmxxvY,1304 +botocore/data/ssm-quicksetup/2018-05-10/paginators-1.json,sha256=ytpk-IGuYLt-RpYS7gbZI2X5ie6eJJCCLWzTWUDNOK4,399 +botocore/data/ssm-quicksetup/2018-05-10/service-2.json.gz,sha256=xxH6aeM7jj1tAd7bD6YXvsN47dg5E4qrzD_NIW98ZNw,7343 +botocore/data/ssm-sap/2018-05-10/endpoint-rule-set-1.json.gz,sha256=7CKbM-dzrLrG-myeYDXhMKGBCd9n0GPfVu04rNYOnDE,1298 +botocore/data/ssm-sap/2018-05-10/paginators-1.json,sha256=i0Tvuamq_Ap6wKbKpSApgTZGJLyfNvFSUViSk9VAMfc,1615 +botocore/data/ssm-sap/2018-05-10/service-2.json.gz,sha256=Eh3Znv0Cg2egJo3ZuOlSK5n8pltKQ7j3-cvmAed7EkY,9460 +botocore/data/ssm/2014-11-06/endpoint-rule-set-1.json.gz,sha256=cZchCWNJvxesH5CqJYhoy65wV-hHJFTU0_mx-D2X0TQ,1230 +botocore/data/ssm/2014-11-06/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/ssm/2014-11-06/paginators-1.json,sha256=uvWveFCU2bgggyGE_TFsH5Qtpca7k5zdQNoKpxlALk4,9119 +botocore/data/ssm/2014-11-06/service-2.json.gz,sha256=s8qwfHX_vk8sNu49siHiXfnrRZfUbNh3oayL5vR8AKI,134833 +botocore/data/ssm/2014-11-06/waiters-2.json,sha256=eTUBQgvIuYcA9hhUZZ3mY4KqLap6FbcReyPUqdPYduc,1457 +botocore/data/sso-admin/2020-07-20/endpoint-rule-set-1.json.gz,sha256=gphlTYS6_hW9Bz6KxDrWehqRXn63_u2BCVosry2ZYDc,1230 +botocore/data/sso-admin/2020-07-20/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sso-admin/2020-07-20/paginators-1.json,sha256=blhgKthRgscSeZRmLa2ASHICJ4LeT3IVnlIJ0FAlpdA,3714 +botocore/data/sso-admin/2020-07-20/service-2.json.gz,sha256=pTxvG_H5YcEeGUR-lLgq6CKe88PPp_25vwlj4vIcyF4,22511 +botocore/data/sso-admin/2020-07-20/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/sso-oidc/2019-06-10/endpoint-rule-set-1.json.gz,sha256=6QDJZYx290ApJZxC_3-bi9C7KRru4LAEak_O0T9MIWM,1231 +botocore/data/sso-oidc/2019-06-10/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sso-oidc/2019-06-10/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/sso-oidc/2019-06-10/service-2.json.gz,sha256=rgBEKa5OHjBReJib7GGp1zdnMBHxGK2AW4m8eh6inRA,6085 +botocore/data/sso/2019-06-10/endpoint-rule-set-1.json.gz,sha256=9wdErHdwL5teOj-0GMU4OCOAshDGdqkeOacCZ9Vvc2U,1233 +botocore/data/sso/2019-06-10/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/sso/2019-06-10/paginators-1.json,sha256=IScw_JafDnQ5pGRs-y61MtR0d4glhFcZR5D-8KLn2-Y,356 +botocore/data/sso/2019-06-10/service-2.json.gz,sha256=2HmpuNND_2OET91Jt0EuB2DapmiPWripMdtLjn1Ppso,2954 +botocore/data/stepfunctions/2016-11-23/endpoint-rule-set-1.json.gz,sha256=qE7tYGmYIX6PB2uw4WUIGuLxfaxH0BATWPicMoL1Hqg,1208 +botocore/data/stepfunctions/2016-11-23/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/stepfunctions/2016-11-23/paginators-1.json,sha256=2p0xo5UgVh-6RA8-svDfT8HBM_Cf2d05upRi3VIOnuY,856 +botocore/data/stepfunctions/2016-11-23/service-2.json.gz,sha256=gJdPd5MHkhHRLpS32-CyP8AwRleGC-f9ykuSPQgXcHg,32628 +botocore/data/storagegateway/2013-06-30/endpoint-rule-set-1.json.gz,sha256=r8wkxJXnvTgD_ntGDWEIgsVyWrpG4IYyQBb-hByQpxk,1153 +botocore/data/storagegateway/2013-06-30/examples-1.json,sha256=2-mBPJqbSFv2f3t6KqdtrU5dW0Z49zylBvFGmoQEAk8,49947 +botocore/data/storagegateway/2013-06-30/paginators-1.json,sha256=eu2RmTSbh-kl0okK4tdVRDTS3tlqOAoyoeuPW4f_GJ4,2101 +botocore/data/storagegateway/2013-06-30/service-2.json.gz,sha256=QkyKZknC8pzv_ukJEBFHF7vjlNTtl3XSmH9yERRQ3ZY,57374 +botocore/data/sts/2011-06-15/endpoint-rule-set-1.json.gz,sha256=ukQCpfrSg1XNWEhWM7q4NIjGe8IJJcf-ItHLyX-9Atc,1775 +botocore/data/sts/2011-06-15/examples-1.json,sha256=yD_CcHN2f9t9PlGQ5NzOJaCYccexGPoonbBW2T4OMck,11885 +botocore/data/sts/2011-06-15/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/sts/2011-06-15/service-2.json.gz,sha256=TsGYM7DxxFFGSsY20jLlQQxGZNyhvQD96aMk5Deqw9g,19931 +botocore/data/supplychain/2024-01-01/endpoint-rule-set-1.json.gz,sha256=mxUUrIQ3v-2Ad3bHgjJzAur8gotw8h6paX-ZDVaXDSI,1295 +botocore/data/supplychain/2024-01-01/paginators-1.json,sha256=LBjGaMynSYN1nFn9bxNRdFiHV-bdpQsauaC28a-tWpI,1066 +botocore/data/supplychain/2024-01-01/service-2.json.gz,sha256=zATUIII93KSDx1jCWjfa0yzpLtTtbbQstsnsV-uTNXQ,14123 +botocore/data/support-app/2021-08-20/endpoint-rule-set-1.json.gz,sha256=LZDGuh7xF2s6mR3AWuPkanFNADYg4EaR53aQbQXEUSI,1148 +botocore/data/support-app/2021-08-20/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/support-app/2021-08-20/service-2.json.gz,sha256=c7F4s5AIz8rkr8hxXIqkeL2atBVYiO3khbiI6fHMnkk,4271 +botocore/data/support/2013-04-15/endpoint-rule-set-1.json.gz,sha256=_ugBkzabqmJ42S17Yz1m9vZTgtkEFEQFBoNO98ur3Ms,1557 +botocore/data/support/2013-04-15/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/support/2013-04-15/paginators-1.json,sha256=b74jGAMdiNu8tKXAfyVILd2DpHqQx91qieo1BlSXpK8,363 +botocore/data/support/2013-04-15/service-2.json.gz,sha256=hgSkEBzGMOQTf9_C0z2-Unrx-bRgBWlWLXfpF50-y8c,11901 +botocore/data/swf/2012-01-25/endpoint-rule-set-1.json.gz,sha256=lXOBTxA0s08NZR_KvgqhFncTB4h3nRP00bJrakO2Tpg,1406 +botocore/data/swf/2012-01-25/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/swf/2012-01-25/paginators-1.json,sha256=tOiP-8y-iuvOSJM35cQg6qCE0ai5dd5IWenCE1BH_yk,1496 +botocore/data/swf/2012-01-25/service-2.json.gz,sha256=bj5wETyk1xi_dEzDtEGBgnL26tvJEsCOaehp0Emp_qo,34463 +botocore/data/synthetics/2017-10-11/endpoint-rule-set-1.json.gz,sha256=AYc-3MZfeM4Cnvqtdnp8k9yYVw43xzo70xVzTwtB9X0,1150 +botocore/data/synthetics/2017-10-11/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/synthetics/2017-10-11/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/synthetics/2017-10-11/service-2.json.gz,sha256=ZV8R9URMbU3ZjEKlx9eM2OSEz8Xv1kXbjBx3YGuHWRU,18363 +botocore/data/taxsettings/2018-05-10/endpoint-rule-set-1.json.gz,sha256=f0I4p7-3xknixTgigovxveMo3RKIZbg3zOYcBIZzRhQ,1377 +botocore/data/taxsettings/2018-05-10/paginators-1.json,sha256=13Jr861DGqLOCbfOeKUIwyYioTGmqiwvg_Cps7HFSx0,572 +botocore/data/taxsettings/2018-05-10/service-2.json.gz,sha256=l2yhf0Ze3GNfeO3s8nqHABuVOng9RVVVceq35-wimsE,14016 +botocore/data/textract/2018-06-27/endpoint-rule-set-1.json.gz,sha256=ROyG3tWhN1iWvAdozy1rrmS9KoT663AWBgpVYdHRFGg,1149 +botocore/data/textract/2018-06-27/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/textract/2018-06-27/paginators-1.json,sha256=IQfBMdVD87vGqQnApoCTQrnbboZ3niS3DPFSlfrqh80,363 +botocore/data/textract/2018-06-27/service-2.json.gz,sha256=gZB2NyiBiqrOTrdK3PDt28DnhATvMaIo6uXHk4wvBKY,22046 +botocore/data/timestream-influxdb/2023-01-27/endpoint-rule-set-1.json.gz,sha256=cV1WRTsRpTlBIyQAzpN1sdfpwoVhWwZmEyIr4xaOKzY,1307 +botocore/data/timestream-influxdb/2023-01-27/paginators-1.json,sha256=MrHvKPvWlx0rMP-zmxg-KWRU_2dNof_f7e77IbVFi4M,688 +botocore/data/timestream-influxdb/2023-01-27/service-2.json.gz,sha256=rUSQCsuM4mO18Jl72uEUBV_a3zV7tZoi-H7CHAh_qhg,14484 +botocore/data/timestream-query/2018-11-01/endpoint-rule-set-1.json.gz,sha256=Q_zP9b9v9GWpVolnrvnffHQzuPiQgoyhib7qkiUgAuM,1375 +botocore/data/timestream-query/2018-11-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/timestream-query/2018-11-01/paginators-1.json,sha256=0Ppw_OmGYMcK7_ULTp_ACg7XEYwjNmlL1Uu5rXuTMRU,651 +botocore/data/timestream-query/2018-11-01/service-2.json.gz,sha256=5ekGDsc_8sDY5-ok2OZAOY5AIXLO8DoIvhbZQ3HyaFQ,14832 +botocore/data/timestream-write/2018-11-01/endpoint-rule-set-1.json.gz,sha256=vI1r2MbzPKpIOqZVLvsAi88xM3JpCuqcTojgO20To90,1374 +botocore/data/timestream-write/2018-11-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/timestream-write/2018-11-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/timestream-write/2018-11-01/service-2.json.gz,sha256=NsahXl89slHTxQGfVezN9sIj73PhjLNoLZFsh4EonQE,12148 +botocore/data/tnb/2008-10-21/endpoint-rule-set-1.json.gz,sha256=s_Q1sp2OTVIWy6_-3V0B0T8ppaRnWxVg3IopObMsIGo,1295 +botocore/data/tnb/2008-10-21/paginators-1.json,sha256=oz2uxUX8r9w5q6IjSx3zIxuNl3_jtJnCGLbFF1j0okw,932 +botocore/data/tnb/2008-10-21/service-2.json.gz,sha256=9vjM3_K3BtLh9bHHSdxrV4pSjiU9_3H7EQ5sp5KOw9Y,10024 +botocore/data/transcribe/2017-10-26/endpoint-rule-set-1.json.gz,sha256=KCOfqQjXAMh2v4osn0CXowe2WO-gv53m54FcnnjekM4,1338 +botocore/data/transcribe/2017-10-26/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/transcribe/2017-10-26/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/transcribe/2017-10-26/service-2.json.gz,sha256=9yyaS-47SLKiCzXaC6ZrJ61bu4LihjIkfzEffcwp3M8,34483 +botocore/data/transcribe/2017-10-26/waiters-2.json,sha256=A7s_Vv8U1ZbFcz9_ftc9qvvJsEi8j6nwiwPVBMiDsoI,3500 +botocore/data/transfer/2018-11-05/endpoint-rule-set-1.json.gz,sha256=Xy1LJ6cvGBRiJFn6OsskUqPzo7EKpP44lMYP-fPsJ-g,1148 +botocore/data/transfer/2018-11-05/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/transfer/2018-11-05/paginators-1.json,sha256=q8S788HxDFyo8rjz_-33w3gJTVEEJ3VDafrzWIwkJSQ,2416 +botocore/data/transfer/2018-11-05/service-2.json.gz,sha256=hsLMtnl3QlBMwDndYSjgr9eLaiSGMERf8mpC9vya6uI,65960 +botocore/data/transfer/2018-11-05/waiters-2.json,sha256=hVdSZ-CDADnA9zRgSm0tK-qrrIGLUKXug5j6Ave1F-Q,868 +botocore/data/translate/2017-07-01/endpoint-rule-set-1.json.gz,sha256=yIk112SyvRbv9mqhHmli0wQfKUa6iruLLssJ7cGjo9k,1149 +botocore/data/translate/2017-07-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/translate/2017-07-01/paginators-1.json,sha256=eE-1ycW-V5DQ_8t4NsRrfJYKhUnAaS7d5OyOimdaOaA,209 +botocore/data/translate/2017-07-01/service-2.json.gz,sha256=7Ig6G6qfsDqabofBn62gj-AVi6fbXTAYdgYWVjZRs58,13025 +botocore/data/trustedadvisor/2022-09-15/endpoint-rule-set-1.json.gz,sha256=UUzsnxp8SIzZgx5v0lR-4tgB67IBZbO3R5wFl_gHDI8,1303 +botocore/data/trustedadvisor/2022-09-15/paginators-1.json,sha256=eM9ClOnA5h4jNM-0Xgyq-ZplOH89DYmmJzOJ59FieIY,1226 +botocore/data/trustedadvisor/2022-09-15/service-2.json.gz,sha256=6KUsAoE83vyka7_AkFQ5_sqB9iGQRIM6toMwbBNBtXE,5193 +botocore/data/verifiedpermissions/2021-12-01/endpoint-rule-set-1.json.gz,sha256=KQFa14STLW9Pxx-lrKj2CXUEbkgwwNbNioi-z6-t4_Q,1306 +botocore/data/verifiedpermissions/2021-12-01/paginators-1.json,sha256=4cQu2IKJA_8dQUylEDfAsxkN5ZxnoXrjv9rRdWg3rsk,709 +botocore/data/verifiedpermissions/2021-12-01/service-2.json.gz,sha256=CpFOJjJqXoqk41lEg0jM1h20xHQJPi5m6O5dsnUGdlQ,25931 +botocore/data/verifiedpermissions/2021-12-01/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/voice-id/2021-09-27/endpoint-rule-set-1.json.gz,sha256=CWUxYGPNV7-dJM7Uho5jLyh1rTBoeMr6o3_RKEPuYzc,1148 +botocore/data/voice-id/2021-09-27/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/voice-id/2021-09-27/paginators-1.json,sha256=mgfNZB61NZhxJAtDiZ1WOqHTvwaWmArbDCHTAkdf520,1073 +botocore/data/voice-id/2021-09-27/service-2.json.gz,sha256=yIiGmD2NDmdubOdTCYinyV7v-0QBPPu3r3gGdLmcIME,11875 +botocore/data/vpc-lattice/2022-11-30/endpoint-rule-set-1.json.gz,sha256=aAbgo2GgqZ2tIDoyixdjN6Csqv7t6lv4ct8K2yL7wHw,1301 +botocore/data/vpc-lattice/2022-11-30/paginators-1.json,sha256=SvSN7CWErphlp2WSxvI_k2ML9sIm4oqTKNRBzZrWBKA,2586 +botocore/data/vpc-lattice/2022-11-30/service-2.json.gz,sha256=myWQHMP6cgdx-dSo3tbv7XOVO5apI5vLGZjIFtKIPNE,26481 +botocore/data/vpc-lattice/2022-11-30/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/waf-regional/2016-11-28/endpoint-rule-set-1.json.gz,sha256=jXf4U1dXRN9t_aM8tNlJr_9BSI_dQRXyZoYHapRa31s,1149 +botocore/data/waf-regional/2016-11-28/examples-1.json,sha256=6OPuCnLynJIfGO-Vxhb9ZZV9ktEKhpByvf2jSwAg-DY,29749 +botocore/data/waf-regional/2016-11-28/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/waf-regional/2016-11-28/service-2.json.gz,sha256=dPicZKrq0fOzKw6OQaiIVWpIYHD8GEcD4osXsyK8PfQ,42885 +botocore/data/waf/2015-08-24/endpoint-rule-set-1.json.gz,sha256=fEVuZcLDOrpqOhamxiNN1Q9WIh8Gytu2xpAJzfOAbJc,1340 +botocore/data/waf/2015-08-24/examples-1.json,sha256=6OPuCnLynJIfGO-Vxhb9ZZV9ktEKhpByvf2jSwAg-DY,29749 +botocore/data/waf/2015-08-24/paginators-1.json,sha256=ulE-ztimMiePJZAVUJkWb57N9b2OKV7xz_GIOHCw7PM,2717 +botocore/data/waf/2015-08-24/service-2.json.gz,sha256=OSl64DyEUZBt07v7kQbZmyrIOvN2Hee3fpwY_OO_c8M,41724 +botocore/data/wafv2/2019-07-29/endpoint-rule-set-1.json.gz,sha256=lOpSIztgUYIdWDdKl2euzzMJUIhy_ctMEUB4X_TWnJI,1148 +botocore/data/wafv2/2019-07-29/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/wafv2/2019-07-29/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/wafv2/2019-07-29/service-2.json.gz,sha256=jd7h-w9LFNfS_uo7wds4njLuFNTxsu6w3RyQu2sObYw,78520 +botocore/data/wellarchitected/2020-03-31/endpoint-rule-set-1.json.gz,sha256=sT-yud_AzKvhUIzctS5DrlPsjKjN0PDufl0H7Id9NWA,1154 +botocore/data/wellarchitected/2020-03-31/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/wellarchitected/2020-03-31/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/wellarchitected/2020-03-31/service-2.json.gz,sha256=lfXyA3Elxv8EkuazwRhk_RJLkEOt76ST0axKuB8cXew,21152 +botocore/data/wickr/2024-02-01/endpoint-rule-set-1.json.gz,sha256=KQXXhmypgZzgONTt1a4BqhV4ZG31ofgCZoh6oe8yeiI,1302 +botocore/data/wickr/2024-02-01/paginators-1.json,sha256=z_vaZpEWOTnwkP_Wdm5bru-1LQERs8qd2ufKsXmlvAU,1344 +botocore/data/wickr/2024-02-01/service-2.json.gz,sha256=8cZxIo14hOhFtbi7ZaueiRSP9Ok0mLIhSPTCu5n-Qks,20363 +botocore/data/wickr/2024-02-01/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/wisdom/2020-10-19/endpoint-rule-set-1.json.gz,sha256=YZYZRDFcNjOZz9jr85hafGUHZ3deY4XfPmmLYwm3qyA,1148 +botocore/data/wisdom/2020-10-19/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/wisdom/2020-10-19/paginators-1.json,sha256=Mrm9rH5_xIiJTb4YXQUznBuP7k3tAPM5lVtE0HpFnow,1778 +botocore/data/wisdom/2020-10-19/service-2.json.gz,sha256=aovaggVGZbZ_Pv8r3WKSpb5bijjfkt9Frd-5P49IDvE,17064 +botocore/data/workdocs/2016-05-01/endpoint-rule-set-1.json.gz,sha256=DAYeDK3LGRr5dBQUip3XAXgztuS4_4lbOMQgdTCXTjg,1150 +botocore/data/workdocs/2016-05-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/workdocs/2016-05-01/paginators-1.json,sha256=PERmz7nK6Ur9t877K2ivamloNl6knySKpwgvjbOcUe0,1666 +botocore/data/workdocs/2016-05-01/service-2.json.gz,sha256=MibLFMypZtM5_yu2Y_5Xo_3cJdQUDUaps6rWTUHq7M0,16249 +botocore/data/workmail/2017-10-01/endpoint-rule-set-1.json.gz,sha256=PCrHZXth-CObER188-mVeAz7BuEsXnPDgfw-oCEnE0Q,1150 +botocore/data/workmail/2017-10-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/workmail/2017-10-01/paginators-1.json,sha256=9nz-4DRPw6f_GmwUgKagpcRqgRjcQXmRkBBpC8Yk35E,1747 +botocore/data/workmail/2017-10-01/service-2.json.gz,sha256=l1e4CJtLA1xps7yumpmXYxDgR4EMTvt44hi8eMtrpIw,27947 +botocore/data/workmailmessageflow/2019-05-01/endpoint-rule-set-1.json.gz,sha256=GaiQZTN5AXBK8wvdPFI2yv-KBpczh35dvvnBgbXp720,1158 +botocore/data/workmailmessageflow/2019-05-01/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/workmailmessageflow/2019-05-01/paginators-1.json,sha256=hIQ7AbLBsY4fPSNLVMg0dS45m6cjZKFTjbp3ZLh4zj8,23 +botocore/data/workmailmessageflow/2019-05-01/service-2.json.gz,sha256=WKi2HYcTrrCUDaBjRJbu3VdHQtdNGZ45-7S40I_lsSM,2293 +botocore/data/workspaces-instances/2022-07-26/endpoint-rule-set-1.json.gz,sha256=4Wpw0aEBqGzLRHi9dByA8NqkyoD8sVhRfF8BRsaYAFE,841 +botocore/data/workspaces-instances/2022-07-26/paginators-1.json,sha256=sQfPhLdTn-F-MxfRXo7SLcJaHDvQ_P5RKI12w1QlaFo,539 +botocore/data/workspaces-instances/2022-07-26/service-2.json.gz,sha256=sRkL5jgUiEEq4uf333FUriTwpYnvICoMj7IV6PYkKDg,8640 +botocore/data/workspaces-instances/2022-07-26/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/workspaces-thin-client/2023-08-22/endpoint-rule-set-1.json.gz,sha256=1jF7GBBMtOgGfcbn1jIsiXptKEqIjlgdNiM8gynQYrg,1297 +botocore/data/workspaces-thin-client/2023-08-22/paginators-1.json,sha256=eoHZHYlG1VP49fqQ29q3I58cojJxkZ8AQQg_xOyd10Y,525 +botocore/data/workspaces-thin-client/2023-08-22/service-2.json.gz,sha256=mqBNL_FhpyuoKla9TrNUBp1lEeF7-hVt3XUw_aoANY8,6460 +botocore/data/workspaces-web/2020-07-08/endpoint-rule-set-1.json.gz,sha256=Edon-oxTGss9tgWmQpgTs2iWc3_noc2A2aZPeytUXMU,1154 +botocore/data/workspaces-web/2020-07-08/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/workspaces-web/2020-07-08/paginators-1.json,sha256=sN0zyznWr8VY3qKPlet-yVbvoL5SfLBtaJf0jYyes08,551 +botocore/data/workspaces-web/2020-07-08/service-2.json.gz,sha256=sH0ORVBHKKWA4l1yO2U96uEkbfc0e8He9ZYA4mxzOYo,21766 +botocore/data/workspaces-web/2020-07-08/waiters-2.json,sha256=fsA0_mwCl57UFPiqxJUWLb9AE7gd9kpBT4x0_6Q7dww,39 +botocore/data/workspaces/2015-04-08/endpoint-rule-set-1.json.gz,sha256=HVCOdzdLXf9FqBOcfSyegSuV9zfycHjtEUjXjQiHTdY,1151 +botocore/data/workspaces/2015-04-08/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/workspaces/2015-04-08/paginators-1.json,sha256=RiG7XOsbS-HVzRL_pYhhradEGVQnhRgv41nSgAAzABI,1504 +botocore/data/workspaces/2015-04-08/service-2.json.gz,sha256=-LalbiW2Pvrz3GTC0CIp269iuDBjlbFLK9_7WhlkCQw,40861 +botocore/data/xray/2016-04-12/endpoint-rule-set-1.json.gz,sha256=Rd-2_j3RhkBDCvJOcrjUPL4oQPtIGVe1YGYMeHvE_eY,1146 +botocore/data/xray/2016-04-12/examples-1.json,sha256=K3b6mgYkitvcecSlJT-iV_EQATmvOySs66iKJI5qx0g,44 +botocore/data/xray/2016-04-12/paginators-1.json,sha256=2BXVUlpR51GRav7g4-ML3Fr7U9pBDqXax4lZYeJnwZU,1785 +botocore/data/xray/2016-04-12/service-2.json.gz,sha256=KPQxh239MKo7OIO_1vxGui2AXKq2fnGPpZcy7BZDJTo,21603 +botocore/discovery.py,sha256=n1lxC_BzuKlwkj2fi-KtVTK-oku8RL9vWrdb7r4oaBc,11069 +botocore/docs/__init__.py,sha256=Mxx6eiy76-SxPpUsPMUPoHS-Wjy8Mj1gDfrowCu0S4U,2027 +botocore/docs/bcdoc/__init__.py,sha256=V2g87AefB2DOD9_3xIF5k9Nv5ttb4_gNJOVvSF0Mp3s,588 +botocore/docs/bcdoc/docstringparser.py,sha256=a-TefXFJysEog9AdTgTwa8Pzik9sxcFlbhQ83jGwFR0,10197 +botocore/docs/bcdoc/restdoc.py,sha256=FLK19ZxPqz-kHJ7SF97f_dhqhwv_wggDiANZ1rHNwrQ,9771 +botocore/docs/bcdoc/style.py,sha256=bNfbjquLLfEt9F6HH_g7FwMeCU9ZZJLNXN-spELATvY,13120 +botocore/docs/client.py,sha256=Ur2X-uoT6OMQ9WhxyZPmCi0OAe8ZFjjbj4GDeCqINX4,17321 +botocore/docs/docstring.py,sha256=Jo9lA4ZFPq75cNCUfpz7zWiXlDB-Cn3bP62cZvBntfA,3648 +botocore/docs/example.py,sha256=ZOCQpJ8irhZ-0Urf7VJkd0SwDeJ1WLvyJc-TMBCZ4Ho,8945 +botocore/docs/method.py,sha256=_GiyG_2GV1uNOCq2e5MEEc300Wk3rACROLUAAxxay7s,12058 +botocore/docs/paginator.py,sha256=QlDyldRiPT8JyNpeUIyx9rsZoy7GsCYGVY-o16oCGkY,8988 +botocore/docs/params.py,sha256=k8GP7bxv1lWMnxKb84QKjEo182pOwDUt7q8VV7vcmB8,11727 +botocore/docs/service.py,sha256=oSPLoXn08cSsFDToEuFp4DDOl_COuSYQJ7q7OEf2lfg,4990 +botocore/docs/shape.py,sha256=EZze3L3AhPNnx_iHvRtn2Z-04TbMHTZ2_okdpAmwPOc,5198 +botocore/docs/sharedexample.py,sha256=WrAklim6mYWOgqcm9qmm5ajXpr2FqIGNuXnmlQrmNiU,9202 +botocore/docs/translator.py,sha256=v9ZTifRrwmfxBHCBaRPoZqufvpHI31pdVMny1wcVi-4,2331 +botocore/docs/utils.py,sha256=Ukqkmy8ncFE8JjwDaj4cLyQ4-yroos6RSixykh3bW5w,7301 +botocore/docs/waiter.py,sha256=kEFcWwO0BCj1roRSqGZjoz3YQx8paCnLgNPijeXwcLU,6546 +botocore/endpoint.py,sha256=N-sWbt6PVUzn7U6OB8MMXUiSS6CTZPhf7gDhlJpcOTQ,16739 +botocore/endpoint_provider.py,sha256=OvId9vKApaVVOJTy4i9EST8SpjyKR-wJrIZAWqJQQAY,23005 +botocore/errorfactory.py,sha256=hdrxsOw0ihpT540ukWPbWqc7D-Dxe_la5H-ZcXgdLJ4,3722 +botocore/eventstream.py,sha256=3giwq42Lq1bg1j28-W2EfqQgbaS4kZKIyhn59pkR9s0,20279 +botocore/exceptions.py,sha256=otjffJnMwhQCdVVJZt0F3QTZejt674eS77jDfEkqOQE,24126 +botocore/handlers.py,sha256=RLYXZjUwh0xFavSpy_U2z5hP9kyprsDNBKBu4cMyZOU,66613 +botocore/history.py,sha256=QR1WnpJYTo02Rz3GqWt45sF6wzu6EQrM_kS3FPH58t4,1744 +botocore/hooks.py,sha256=pITClZg88eiQX3PsQ1_32wydi52LJDIanF0Yvf2B4xI,25053 +botocore/httpchecksum.py,sha256=Z29nbWKKhFM_Gh4qD0LJWTlBbp5o_U-__97jFmiEf_A,19452 +botocore/httpsession.py,sha256=1wM2kzb6-Epb7vG6hqh4PWOOs3vTQQtMOpiiblRdk8c,18696 +botocore/loaders.py,sha256=80fw6XvffeFplFwugoqB-3yeAum55FpunxRY2pohk9I,18834 +botocore/model.py,sha256=T3ym_Y0G5LT-sKzvBwDTKmPFYr2Zznasipl_fzUXphI,32347 +botocore/monitoring.py,sha256=RDflkGbBrwnsH4pSENOkWikk1UuqkJVqHSYpHQtvK5M,20594 +botocore/paginate.py,sha256=SYqY0TZ4OJIkp5ZvqMUKkn8nJHW8VS7lS_4hm1npGOI,27909 +botocore/parsers.py,sha256=aLe-HLrgOQG45UZAFwydmkbm7jTyWttu86F5aVLIXbs,60400 +botocore/plugin.py,sha256=zLC_HxZ6nI8qSYfq05Te2w8ekU3dcD0jOl2Sh_qBhKk,2534 +botocore/regions.py,sha256=k8gPG2C16QzETAKMPw6fqK3___jj3oJApVgd5HuMaOg,33932 +botocore/response.py,sha256=4u5vgew19drph59fXGANyiK3iixDzvsNrDuLiUzp0R0,7865 +botocore/retries/__init__.py,sha256=YaZ6AwMRyuDBs5fOvl-PAvxQxZE2RBlcad2JmLOMo8k,121 +botocore/retries/adaptive.py,sha256=0Y0QjSgK0sGS1nbWZV7wiBZgR82a-nA-vL5HjQadLOs,4207 +botocore/retries/base.py,sha256=rGJYVZEXLGSQ2BnaIT-W9ccGtSbIMvU-wzmV78d-Ccg,797 +botocore/retries/bucket.py,sha256=sZ5TGGMCyIh3qcRI5p5csNXPvtXZ2nC-sH6u5wJiIac,3994 +botocore/retries/quota.py,sha256=bijUNfy2fejuma0OB43sIn57OdRDhLSdFV_xCmcuTv4,1937 +botocore/retries/special.py,sha256=hePuqKytb0BmjKmfXbMf4fGrwiArbqu_HBepROpmi7U,1664 +botocore/retries/standard.py,sha256=Fu8JHmH-sCvS2aSMf1QjLXcoX6CQ3zUQoNt-AUXcx70,19971 +botocore/retries/throttling.py,sha256=x8pU_jMyapr0YODg8mtyYoXa8MzDAf0e-bWg9EfkFos,1779 +botocore/retryhandler.py,sha256=0cmoBCE3aIo60-co7idqO9FA2pIcyoI3NcDD_EHv0Yw,14702 +botocore/serialize.py,sha256=LmgqJo_qfFS0puj8DbeQxX-iILHrkAId69MZ3tifh3I,51612 +botocore/session.py,sha256=Hak58qxe5wV3XF5WARd9lxdPXOB1XtaVAVEYy2TGHWw,51661 +botocore/signers.py,sha256=cYFuBgbpSjqZi0kmRmgc0p327QCui8YkgIjXUfsMNSQ,34519 +botocore/stub.py,sha256=lsTeEZbwA4tZV1IRE3p-7hsmSHY7KW0dYF5sM6GuUfQ,16193 +botocore/tokens.py,sha256=LSLQD3w_3Ui72hp2eTzsFdrlP1IWYsBvgmOfJ-lTUvg,11764 +botocore/translate.py,sha256=UfKIIWr_BAcwvMScHuqrLtSD5yuXecl7Rs0Et3jfREc,3406 +botocore/useragent.py,sha256=ckJQ66jIQsyAcVqQ5oAo5E3JuFiY0TVyH3eLs0NtljE,24351 +botocore/utils.py,sha256=4KzBs7JNy_wOWWGNJwzVKE55Zt41vTHcmO8Kd_-xODg,137917 +botocore/validate.py,sha256=VHqG980ds_6zYnamTZSvV4EYFMtXs7FPnMVNAg_XgQ8,13759 +botocore/vendored/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +botocore/vendored/requests/__init__.py,sha256=Pu8JNWAMzj9l8E0Qs4rU7clTOfxVIA6OuUKJkJDmvvc,227 +botocore/vendored/requests/exceptions.py,sha256=zZhHieXgR1teqbvuo_9OrwDMHnrvRtulW97VfzumQv4,2517 +botocore/vendored/requests/packages/__init__.py,sha256=aXkbNCjM_WhryRBocE4AaA_p7-CTxL5LOutY7XzKm4s,62 +botocore/vendored/requests/packages/urllib3/__init__.py,sha256=Nrq2HJOk0McF4saJ5zySsjVKGPV6j05iAFTJwkKEzOI,184 +botocore/vendored/requests/packages/urllib3/exceptions.py,sha256=za-cEwBqxBKOqqKTaIVAMdH3j1nDRqi-MtdojdpU4Wc,4374 +botocore/vendored/six.py,sha256=TOOfQi7nFGfMrIvtdr6wX4wyHH8M7aknmuLfo2cBBrM,34549 +botocore/waiter.py,sha256=n4_6HcqEc-cXDMPsECdwKfhNSowmfzw490-c1BIZYn0,14711 diff --git a/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/WHEEL b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..dcfdc6e359074689c0bdb567634b4f84add7849c --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.1.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..c5b9e129e101b5cd594836d12ba05d8485d068f4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/botocore-1.42.27.dist-info/top_level.txt @@ -0,0 +1 @@ +botocore diff --git a/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/METADATA b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..d1bc526a9775f95fa2c73b15fc2b3444f341ecd7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/METADATA @@ -0,0 +1,78 @@ +Metadata-Version: 2.4 +Name: certifi +Version: 2026.1.4 +Summary: Python package for providing Mozilla's CA Bundle. +Home-page: https://github.com/certifi/python-certifi +Author: Kenneth Reitz +Author-email: me@kennethreitz.com +License: MPL-2.0 +Project-URL: Source, https://github.com/certifi/python-certifi +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) +Classifier: Natural Language :: English +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Requires-Python: >=3.7 +License-File: LICENSE +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: home-page +Dynamic: license +Dynamic: license-file +Dynamic: project-url +Dynamic: requires-python +Dynamic: summary + +Certifi: Python SSL Certificates +================================ + +Certifi provides Mozilla's carefully curated collection of Root Certificates for +validating the trustworthiness of SSL certificates while verifying the identity +of TLS hosts. It has been extracted from the `Requests`_ project. + +Installation +------------ + +``certifi`` is available on PyPI. Simply install it with ``pip``:: + + $ pip install certifi + +Usage +----- + +To reference the installed certificate authority (CA) bundle, you can use the +built-in function:: + + >>> import certifi + + >>> certifi.where() + '/usr/local/lib/python3.7/site-packages/certifi/cacert.pem' + +Or from the command line:: + + $ python -m certifi + /usr/local/lib/python3.7/site-packages/certifi/cacert.pem + +Enjoy! + +.. _`Requests`: https://requests.readthedocs.io/en/master/ + +Addition/Removal of Certificates +-------------------------------- + +Certifi does not support any addition/removal or other modification of the +CA trust store content. This project is intended to provide a reliable and +highly portable root of trust to python deployments. Look to upstream projects +for methods to use alternate trust. diff --git a/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/RECORD b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..4e358f6637db2153ccc744fc217a7406de7358fa --- /dev/null +++ b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/RECORD @@ -0,0 +1,12 @@ +certifi-2026.1.4.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +certifi-2026.1.4.dist-info/METADATA,sha256=FSfJEfKuMo6bJlofUrtRpn4PFTYtbYyXpHN_A3ZFpIY,2473 +certifi-2026.1.4.dist-info/RECORD,, +certifi-2026.1.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +certifi-2026.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +certifi-2026.1.4.dist-info/licenses/LICENSE,sha256=6TcW2mucDVpKHfYP5pWzcPBpVgPSH2-D8FPkLPwQyvc,989 +certifi-2026.1.4.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8 +certifi/__init__.py,sha256=969deMMS7Uchipr0oO4dbRBUvRi0uNYCn07VmG1aTrg,94 +certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243 +certifi/cacert.pem,sha256=Tzl1_zCrvzVEO0hgZK6Ly0Hf9wf_31dsdtKS-0WKoKk,270954 +certifi/core.py,sha256=XFXycndG5pf37ayeF8N32HUuDafsyhkVMbO4BAPWHa0,3394 +certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/WHEEL b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e7fa31b6f3f78deb1022c1f7927f07d4d16da822 --- /dev/null +++ b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..963eac530b9bc28d704d1bc410299c68e3216d4d --- /dev/null +++ b/py311/lib/python3.11/site-packages/certifi-2026.1.4.dist-info/top_level.txt @@ -0,0 +1 @@ +certifi diff --git a/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/METADATA b/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3f433afb9c48912fe13fc7b24a0e71874c675822 --- /dev/null +++ b/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/METADATA @@ -0,0 +1,84 @@ +Metadata-Version: 2.4 +Name: click +Version: 8.3.1 +Summary: Composable command line interface toolkit +Maintainer-email: Pallets +Requires-Python: >=3.10 +Description-Content-Type: text/markdown +License-Expression: BSD-3-Clause +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Typing :: Typed +License-File: LICENSE.txt +Requires-Dist: colorama; platform_system == 'Windows' +Project-URL: Changes, https://click.palletsprojects.com/page/changes/ +Project-URL: Chat, https://discord.gg/pallets +Project-URL: Documentation, https://click.palletsprojects.com/ +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Source, https://github.com/pallets/click/ + +
    + +# Click + +Click is a Python package for creating beautiful command line interfaces +in a composable way with as little code as necessary. It's the "Command +Line Interface Creation Kit". It's highly configurable but comes with +sensible defaults out of the box. + +It aims to make the process of writing command line tools quick and fun +while also preventing any frustration caused by the inability to +implement an intended CLI API. + +Click in three points: + +- Arbitrary nesting of commands +- Automatic help page generation +- Supports lazy loading of subcommands at runtime + + +## A Simple Example + +```python +import click + +@click.command() +@click.option("--count", default=1, help="Number of greetings.") +@click.option("--name", prompt="Your name", help="The person to greet.") +def hello(count, name): + """Simple program that greets NAME for a total of COUNT times.""" + for _ in range(count): + click.echo(f"Hello, {name}!") + +if __name__ == '__main__': + hello() +``` + +``` +$ python hello.py --count=3 +Your name: Click +Hello, Click! +Hello, Click! +Hello, Click! +``` + + +## Donate + +The Pallets organization develops and supports Click and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, [please +donate today][]. + +[please donate today]: https://palletsprojects.com/donate + +## Contributing + +See our [detailed contributing documentation][contrib] for many ways to +contribute, including reporting issues, requesting features, asking or answering +questions, and making PRs. + +[contrib]: https://palletsprojects.com/contributing/ + diff --git a/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/RECORD b/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..58fe30770db7431b2b66305ecd83d0101acfc9e6 --- /dev/null +++ b/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/RECORD @@ -0,0 +1,24 @@ +click-8.3.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +click-8.3.1.dist-info/METADATA,sha256=XZeBrMAE0ghTE88SjfrSDuSyNCpBPplxJR1tbwD9oZg,2621 +click-8.3.1.dist-info/RECORD,, +click-8.3.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +click-8.3.1.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +click-8.3.1.dist-info/licenses/LICENSE.txt,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475 +click/__init__.py,sha256=6YyS1aeyknZ0LYweWozNZy0A9nZ_11wmYIhv3cbQrYo,4473 +click/_compat.py,sha256=v3xBZkFbvA1BXPRkFfBJc6-pIwPI7345m-kQEnpVAs4,18693 +click/_termui_impl.py,sha256=rgCb3On8X5A4200rA5L6i13u5iapmFer7sru57Jy6zA,27093 +click/_textwrap.py,sha256=BOae0RQ6vg3FkNgSJyOoGzG1meGMxJ_ukWVZKx_v-0o,1400 +click/_utils.py,sha256=kZwtTf5gMuCilJJceS2iTCvRvCY-0aN5rJq8gKw7p8g,943 +click/_winconsole.py,sha256=_vxUuUaxwBhoR0vUWCNuHY8VUefiMdCIyU2SXPqoF-A,8465 +click/core.py,sha256=U6Bfxt8GkjNDqyJ0HqXvluJHtyZ4sY5USAvM1Cdq7mQ,132105 +click/decorators.py,sha256=5P7abhJtAQYp_KHgjUvhMv464ERwOzrv2enNknlwHyQ,18461 +click/exceptions.py,sha256=8utf8w6V5hJXMnO_ic1FNrtbwuEn1NUu1aDwV8UqnG4,9954 +click/formatting.py,sha256=RVfwwr0rwWNpgGr8NaHodPzkIr7_tUyVh_nDdanLMNc,9730 +click/globals.py,sha256=gM-Nh6A4M0HB_SgkaF5M4ncGGMDHc_flHXu9_oh4GEU,1923 +click/parser.py,sha256=Q31pH0FlQZEq-UXE_ABRzlygEfvxPTuZbWNh4xfXmzw,19010 +click/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +click/shell_completion.py,sha256=Cc4GQUFuWpfQBa9sF5qXeeYI7n3tI_1k6ZdSn4BZbT0,20994 +click/termui.py,sha256=hqCEjNndU-nzW08nRAkBaVgfZp_FdCA9KxfIWlKYaMc,31037 +click/testing.py,sha256=EERbzcl1br0mW0qBS9EqkknfNfXB9WQEW0ELIpkvuSs,19102 +click/types.py,sha256=ek54BNSFwPKsqtfT7jsqcc4WHui8AIFVMKM4oVZIXhc,39927 +click/utils.py,sha256=gCUoewdAhA-QLBUUHxrLh4uj6m7T1WjZZMNPvR0I7YA,20257 diff --git a/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/WHEEL b/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d8b9936dad9ab2513fa6979f411560d3b6b57e37 --- /dev/null +++ b/py311/lib/python3.11/site-packages/click-8.3.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.12.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/METADATA b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..fcaae0d0409e88b5a2ad4aa6c10e6b5a5268e12a --- /dev/null +++ b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/METADATA @@ -0,0 +1,2261 @@ +Metadata-Version: 2.4 +Name: fonttools +Version: 4.61.1 +Summary: Tools to manipulate font files +Home-page: http://github.com/fonttools/fonttools +Author: Just van Rossum +Author-email: just@letterror.com +Maintainer: Behdad Esfahbod +Maintainer-email: behdad@behdad.org +License: MIT +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Environment :: Other Environment +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: End Users/Desktop +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Text Processing :: Fonts +Classifier: Topic :: Multimedia :: Graphics +Classifier: Topic :: Multimedia :: Graphics :: Graphics Conversion +Requires-Python: >=3.10 +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: LICENSE.external +Provides-Extra: ufo +Provides-Extra: lxml +Requires-Dist: lxml>=4.0; extra == "lxml" +Provides-Extra: woff +Requires-Dist: brotli>=1.0.1; platform_python_implementation == "CPython" and extra == "woff" +Requires-Dist: brotlicffi>=0.8.0; platform_python_implementation != "CPython" and extra == "woff" +Requires-Dist: zopfli>=0.1.4; extra == "woff" +Provides-Extra: unicode +Requires-Dist: unicodedata2>=17.0.0; python_version <= "3.14" and extra == "unicode" +Provides-Extra: graphite +Requires-Dist: lz4>=1.7.4.2; extra == "graphite" +Provides-Extra: interpolatable +Requires-Dist: scipy; platform_python_implementation != "PyPy" and extra == "interpolatable" +Requires-Dist: munkres; platform_python_implementation == "PyPy" and extra == "interpolatable" +Requires-Dist: pycairo; extra == "interpolatable" +Provides-Extra: plot +Requires-Dist: matplotlib; extra == "plot" +Provides-Extra: symfont +Requires-Dist: sympy; extra == "symfont" +Provides-Extra: type1 +Requires-Dist: xattr; sys_platform == "darwin" and extra == "type1" +Provides-Extra: pathops +Requires-Dist: skia-pathops>=0.5.0; extra == "pathops" +Provides-Extra: repacker +Requires-Dist: uharfbuzz>=0.45.0; extra == "repacker" +Provides-Extra: all +Requires-Dist: lxml>=4.0; extra == "all" +Requires-Dist: brotli>=1.0.1; platform_python_implementation == "CPython" and extra == "all" +Requires-Dist: brotlicffi>=0.8.0; platform_python_implementation != "CPython" and extra == "all" +Requires-Dist: zopfli>=0.1.4; extra == "all" +Requires-Dist: unicodedata2>=17.0.0; python_version <= "3.14" and extra == "all" +Requires-Dist: lz4>=1.7.4.2; extra == "all" +Requires-Dist: scipy; platform_python_implementation != "PyPy" and extra == "all" +Requires-Dist: munkres; platform_python_implementation == "PyPy" and extra == "all" +Requires-Dist: pycairo; extra == "all" +Requires-Dist: matplotlib; extra == "all" +Requires-Dist: sympy; extra == "all" +Requires-Dist: xattr; sys_platform == "darwin" and extra == "all" +Requires-Dist: skia-pathops>=0.5.0; extra == "all" +Requires-Dist: uharfbuzz>=0.45.0; extra == "all" +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: license +Dynamic: license-file +Dynamic: maintainer +Dynamic: maintainer-email +Dynamic: platform +Dynamic: provides-extra +Dynamic: requires-python +Dynamic: summary + +|CI Build Status| |Coverage Status| |PyPI| |Gitter Chat| + +What is this? +~~~~~~~~~~~~~ + +| fontTools is a library for manipulating fonts, written in Python. The + project includes the TTX tool, that can convert TrueType and OpenType + fonts to and from an XML text format, which is also called TTX. It + supports TrueType, OpenType, AFM and to an extent Type 1 and some + Mac-specific formats. The project has an `MIT open-source + license `__. +| Among other things this means you can use it free of charge. + +`User documentation `_ and +`developer documentation `_ +are available at `Read the Docs `_. + +Installation +~~~~~~~~~~~~ + +FontTools requires `Python `__ 3.10 +or later. We try to follow the same schedule of minimum Python version support as +NumPy (see `NEP 29 `__). + +The package is listed in the Python Package Index (PyPI), so you can +install it with `pip `__: + +.. code:: sh + + pip install fonttools + +If you would like to contribute to its development, you can clone the +repository from GitHub, install the package in 'editable' mode and +modify the source code in place. We recommend creating a virtual +environment, using `virtualenv `__ or +Python 3 `venv `__ module. + +.. code:: sh + + # download the source code to 'fonttools' folder + git clone https://github.com/fonttools/fonttools.git + cd fonttools + + # create new virtual environment called e.g. 'fonttools-venv', or anything you like + python -m virtualenv fonttools-venv + + # source the `activate` shell script to enter the environment (Unix-like); to exit, just type `deactivate` + . fonttools-venv/bin/activate + + # to activate the virtual environment in Windows `cmd.exe`, do + fonttools-venv\Scripts\activate.bat + + # install in 'editable' mode + pip install -e . + +Optional Requirements +--------------------- + +The ``fontTools`` package currently has no (required) external dependencies +besides the modules included in the Python Standard Library. +However, a few extra dependencies are required by some of its modules, which +are needed to unlock optional features. +The ``fonttools`` PyPI distribution also supports so-called "extras", i.e. a +set of keywords that describe a group of additional dependencies, which can be +used when installing via pip, or when specifying a requirement. +For example: + +.. code:: sh + + pip install fonttools[ufo,lxml,woff,unicode] + +This command will install fonttools, as well as the optional dependencies that +are required to unlock the extra features named "ufo", etc. + +- ``Lib/fontTools/misc/etree.py`` + + The module exports a ElementTree-like API for reading/writing XML files, and + allows to use as the backend either the built-in ``xml.etree`` module or + `lxml `__. The latter is preferred whenever present, + as it is generally faster and more secure. + + *Extra:* ``lxml`` + +- ``Lib/fontTools/ttLib/woff2.py`` + + Module to compress/decompress WOFF 2.0 web fonts; it requires: + + * `brotli `__: Python bindings of + the Brotli compression library. + + *Extra:* ``woff`` + +- ``Lib/fontTools/ttLib/sfnt.py`` + + To better compress WOFF 1.0 web fonts, the following module can be used + instead of the built-in ``zlib`` library: + + * `zopfli `__: Python bindings of + the Zopfli compression library. + + *Extra:* ``woff`` + +- ``Lib/fontTools/unicode.py`` + + To display the Unicode character names when dumping the ``cmap`` table + with ``ttx`` we use the ``unicodedata`` module in the Standard Library. + The version included in there varies between different Python versions. + To use the latest available data, you can install: + + * `unicodedata2 `__: + ``unicodedata`` backport for Python 3.x updated to the latest Unicode + version 17.0. + + *Extra:* ``unicode`` + +- ``Lib/fontTools/varLib/interpolatable.py`` + + Module for finding wrong contour/component order between different masters. + It requires one of the following packages in order to solve the so-called + "minimum weight perfect matching problem in bipartite graphs", or + the Assignment problem: + + * `scipy `__: the Scientific Library + for Python, which internally uses `NumPy `__ + arrays and hence is very fast; + * `munkres `__: a pure-Python + module that implements the Hungarian or Kuhn-Munkres algorithm. Slower than + SciPy, but useful for minimalistic systems where adding SciPy is undesirable. + + This ensures both performance (via SciPy) and minimal footprint (via Munkres) + are possible. + + To plot the results to a PDF or HTML format, you also need to install: + + * `pycairo `__: Python bindings for the + Cairo graphics library. Note that wheels are currently only available for + Windows, for other platforms see pycairo's `installation instructions + `__. + + *Extra:* ``interpolatable`` + +- ``Lib/fontTools/varLib/plot.py`` + + Module for visualizing DesignSpaceDocument and resulting VariationModel. + + * `matplotlib `__: 2D plotting library. + + *Extra:* ``plot`` + +- ``Lib/fontTools/misc/symfont.py`` + + Advanced module for symbolic font statistics analysis; it requires: + + * `sympy `__: the Python library for + symbolic mathematics. + + *Extra:* ``symfont`` + +- ``Lib/fontTools/t1Lib.py`` + + To get the file creator and type of Macintosh PostScript Type 1 fonts + on Python 3 you need to install the following module, as the old ``MacOS`` + module is no longer included in Mac Python: + + * `xattr `__: Python wrapper for + extended filesystem attributes (macOS platform only). + + *Extra:* ``type1`` + +- ``Lib/fontTools/ttLib/removeOverlaps.py`` + + Simplify TrueType glyphs by merging overlapping contours and components. + + * `skia-pathops `__: Python + bindings for the Skia library's PathOps module, performing boolean + operations on paths (union, intersection, etc.). + + *Extra:* ``pathops`` + +- ``Lib/fontTools/ufoLib`` + + Package for reading and writing UFO source files; if available, it will use: + + * `fs `__: (aka ``pyfilesystem2``) filesystem abstraction layer + + for reading and writing UFOs to the local filesystem or zip files (.ufoz), instead of + the built-in ``fontTools.misc.filesystem`` package. + The reader and writer classes can in theory also accept any object compatible the + ``fs.base.FS`` interface, although not all have been tested. + +- ``Lib/fontTools/pens/cocoaPen.py`` and ``Lib/fontTools/pens/quartzPen.py`` + + Pens for drawing glyphs with Cocoa ``NSBezierPath`` or ``CGPath`` require: + + * `PyObjC `__: the bridge between + Python and the Objective-C runtime (macOS platform only). + +- ``Lib/fontTools/pens/qtPen.py`` + + Pen for drawing glyphs with Qt's ``QPainterPath``, requires: + + * `PyQt5 `__: Python bindings for + the Qt cross platform UI and application toolkit. + +- ``Lib/fontTools/pens/reportLabPen.py`` + + Pen to drawing glyphs as PNG images, requires: + + * `reportlab `__: Python toolkit + for generating PDFs and graphics. + +- ``Lib/fontTools/pens/freetypePen.py`` + + Pen to drawing glyphs with FreeType as raster images, requires: + + * `freetype-py `__: Python binding + for the FreeType library. + +- ``Lib/fontTools/ttLib/tables/otBase.py`` + + Use the Harfbuzz library to serialize GPOS/GSUB using ``hb_repack`` method, requires: + + * `uharfbuzz `__: Streamlined Cython + bindings for the harfbuzz shaping engine + + *Extra:* ``repacker`` + +How to make a new release +~~~~~~~~~~~~~~~~~~~~~~~~~ + +1) Update ``NEWS.rst`` with all the changes since the last release. Write a + changelog entry for each PR, with one or two short sentences summarizing it, + as well as links to the PR and relevant issues addressed by the PR. Do not + put a new title, the next command will do it for you. +2) Use semantic versioning to decide whether the new release will be a 'major', + 'minor' or 'patch' release. It's usually one of the latter two, depending on + whether new backward compatible APIs were added, or simply some bugs were fixed. +3) From inside a venv, first do ``pip install -r dev-requirements.txt``, then run + the ``python setup.py release`` command from the tip of the ``main`` branch. + By default this bumps the third or 'patch' digit only, unless you pass ``--major`` + or ``--minor`` to bump respectively the first or second digit. + This bumps the package version string, extracts the changes since the latest + version from ``NEWS.rst``, and uses that text to create an annotated git tag + (or a signed git tag if you pass the ``--sign`` option and your git and Github + account are configured for `signing commits `__ + using a GPG key). + It also commits an additional version bump which opens the main branch for + the subsequent developmental cycle +4) Push both the tag and commit to the upstream repository, by running the command + ``git push --follow-tags``. Note: it may push other local tags as well, be + careful. +5) Let the CI build the wheel and source distribution packages and verify both + get uploaded to the Python Package Index (PyPI). +6) [Optional] Go to fonttools `Github Releases `__ + page and create a new release, copy-pasting the content of the git tag + message. This way, the release notes are nicely formatted as markdown, and + users watching the repo will get an email notification. One day we shall + automate that too. + + +Acknowledgments +~~~~~~~~~~~~~~~~ + +In alphabetical order: + +aschmitz, Olivier Berten, Samyak Bhuta, Erik van Blokland, Petr van Blokland, +Jelle Bosma, Sascha Brawer, Tom Byrer, Antonio Cavedoni, Frédéric Coiffier, +Vincent Connare, David Corbett, Simon Cozens, Dave Crossland, Simon Daniels, +Peter Dekkers, Behdad Esfahbod, Behnam Esfahbod, Hannes Famira, Sam Fishman, +Matt Fontaine, Takaaki Fuji, Rob Hagemans, Yannis Haralambous, Greg Hitchcock, +Jeremie Hornus, Khaled Hosny, John Hudson, Denis Moyogo Jacquerye, Jack Jansen, +Tom Kacvinsky, Jens Kutilek, Antoine Leca, Werner Lemberg, Tal Leming, Liang Hai, Peter +Lofting, Cosimo Lupo, Olli Meier, Masaya Nakamura, Dave Opstad, Laurence Penney, +Roozbeh Pournader, Garret Rieger, Read Roberts, Colin Rofls, Guido van Rossum, +Just van Rossum, Andreas Seidel, Georg Seifert, Chris Simpkins, Miguel Sousa, +Adam Twardoch, Adrien Tétar, Vitaly Volkov, Paul Wise. + +Copyrights +~~~~~~~~~~ + +| Copyright (c) 1999-2004 Just van Rossum, LettError + (just@letterror.com) +| See `LICENSE `__ for the full license. + +Copyright (c) 2000 BeOpen.com. All Rights Reserved. + +Copyright (c) 1995-2001 Corporation for National Research Initiatives. +All Rights Reserved. + +Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. All +Rights Reserved. + +Have fun! + +.. |CI Build Status| image:: https://github.com/fonttools/fonttools/workflows/Test/badge.svg + :target: https://github.com/fonttools/fonttools/actions?query=workflow%3ATest +.. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/main/graph/badge.svg + :target: https://codecov.io/gh/fonttools/fonttools +.. |PyPI| image:: https://img.shields.io/pypi/v/fonttools.svg + :target: https://pypi.org/project/FontTools +.. |Gitter Chat| image:: https://badges.gitter.im/fonttools-dev/Lobby.svg + :alt: Join the chat at https://gitter.im/fonttools-dev/Lobby + :target: https://gitter.im/fonttools-dev/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + +Changelog +~~~~~~~~~ + +4.61.1 (released 2025-12-12) +---------------------------- + +- [otlLib] buildCoverage: return empty Coverage instead of None (#4003, #4004). +- [instancer] bug fix in ``avar2`` full instancing (#4002). +- [designspaceLib] Preserve empty conditionsets when serializing to XML (#4001). +- [fontBu ilder] Fix FontBuilder ``setupOS2()`` default params globally polluted (#3996, #3997). +- [ttFont] Add more typing annotations to ttFont, xmlWriter, sfnt, varLib.models and others (#3952, #3826). +- Explicitly test and declare support for Python 3.14, even though we were already shipping pre-built wheels for it (#3990). + +4.60.2 (released 2025-12-09) +---------------------------- + +- **Backport release** Same as 4.61.0 but without "Drop support for EOL Python 3.9" change to allow + downstream projects still on Python 3.9 to avail of the security fix for CVE-2025-66034 (#3994, #3999). + +4.61.0 (released 2025-11-28) +---------------------------- + +- [varLib.main]: **SECURITY** Only use basename(vf.filename) to prevent path traversal attacks when + running ``fonttools varLib`` command, or code which invokes ``fonttools.varLib.main()``. + Fixes CVE-2025-66034, see: + https://github.com/fonttools/fonttools/security/advisories/GHSA-768j-98cg-p3fv. +- [feaLib] Sort BaseLangSysRecords by tag (#3986). +- Drop support for EOL Python 3.9 (#3982). +- [instancer] Support --remove-overlaps for fonts with CFF2 table (#3975). +- [CFF2ToCFF] Add --remove-overlaps option (#3976). +- [feaLib] Raise an error for rsub with NULL target (#3979). +- [bezierTools] Fix logic bug in curveCurveIntersections (#3963). +- [feaLib] Error when condition sets have the same name (#3958). +- [cu2qu.ufo] skip processing empty glyphs to support sparse kerning masters (#3956). +- [unicodedata] Update to Unicode 17. Require ``unicodedata2 >= 17.0.0`` when installed with 'unicode' extra. + +4.60.1 (released 2025-09-29) +---------------------------- + +- [ufoLib] Reverted accidental method name change in ``UFOReader.getKerningGroupConversionRenameMaps`` + that broke compatibility with downstream projects like defcon (#3948, #3947, robotools/defcon#478). +- [ufoLib] Added test coverage for ``getKerningGroupConversionRenameMaps`` method (#3950). +- [subset] Don't try to subset BASE table; pass it through by default instead (#3949). +- [subset] Remove empty BaseRecord entries in MarkBasePos lookups (#3897, #3892). +- [subset] Add pruning for MarkLigPos and MarkMarkPos lookups (#3946). +- [subset] Remove duplicate features when subsetting (#3945). +- [Docs] Added documentation for the visitor module (#3944). + +4.60.0 (released 2025-09-17) +---------------------------- + +- [pointPen] Allow ``reverseFlipped`` parameter of ``DecomposingPointPen`` to take a ``ReverseFlipped`` + enum value to control whether/how to reverse contour direction of flipped components, in addition to + the existing True/False. This allows to set ``ReverseFlipped.ON_CURVE_FIRST`` to ensure that + the decomposed outline starts with an on-curve point before being reversed, for better consistency + with other segment-oriented contour transformations. The change is backward compatible, and the + default behavior hasn't changed (#3934). +- [filterPen] Added ``ContourFilterPointPen``, base pen for buffered contour operations, and + ``OnCurveStartPointPen`` filter to ensure contours start with an on-curve point (#3934). +- [cu2qu] Fixed difference in cython vs pure-python complex division by real number (#3930). +- [varLib.avar] Refactored and added some new sub-modules and scripts (#3926). + * ``varLib.avar.build`` module to build avar (and a missing fvar) binaries into a possibly empty TTFont, + * ``varLib.avar.unbuild`` module to print a .designspace snippet that would generate the same avar binary, + * ``varLib.avar.map`` module to take TTFont and do the mapping, in user/normalized space, + * ``varLib.avar.plan`` module moved from ``varLib.avarPlanner``. + The bare ``fonttools varLib.avar`` script is deprecated, in favour of ``fonttools varLib.avar.build`` (or ``unbuild``). +- [interpolatable] Clarify ``linear_sum_assignment`` backend options and minimal dependency + usage (#3927). +- [post] Speed up ``build_psNameMapping`` (#3923). +- [ufoLib] Added typing annotations to fontTools.ufoLib (#3875). + +4.59.2 (released 2025-08-27) +---------------------------- + +- [varLib] Clear ``USE_MY_METRICS`` component flags when inconsistent across masters (#3912). +- [varLib.instancer] Avoid negative advance width/height values when instatiating HVAR/VVAR, + (unlikely in well-behaved fonts) (#3918). +- [subset] Fix shaping behaviour when pruning empty mark sets (#3915, harfbuzz/harfbuzz#5499). +- [cu2qu] Fixed ``dot()`` product of perpendicular vectors not always returning exactly 0.0 + in all Python implementations (#3911) +- [varLib.instancer] Implemented fully-instantiating ``avar2`` fonts (#3909). +- [feaLib] Allow float values in ``VariableScalar``'s axis locations (#3906, #3907). +- [cu2qu] Handle special case in ``calc_intersect`` for degenerate cubic curves where 3 to 4 + control points are equal (#3904). + +4.59.1 (released 2025-08-14) +---------------------------- + +- [featureVars] Update OS/2.usMaxContext if possible after addFeatureVariationsRaw (#3894). +- [vhmtx] raise TTLibError('not enough data...') when hmtx/vmtx are truncated (#3843, #3901). +- [feaLib] Combine duplicate features that have the same set of lookups regardless of the order in which those lookups are added to the feature (#3895). +- [varLib] Deprecate ``varLib.mutator`` in favor of ``varLib.instancer``. The latter + provides equivalent full (static font) instancing in addition to partial VF instancing. + CLI users should replace ``fonttools varLib.mutator`` with ``fonttools varLib.instancer``. + API users should migrate to ``fontTools.varLib.instancer.instantiateVariableFont`` (#2680). + + +4.59.0 (released 2025-07-16) +---------------------------- + +- Removed hard-dependency on pyfilesystem2 (``fs`` package) from ``fonttools[ufo]`` extra. + This is replaced by the `fontTools.misc.filesystem` package, a stdlib-only, drop-in + replacement for the subset of the pyfilesystem2's API used by ``fontTools.ufoLib``. + The latter should continue to work with the upstream ``fs`` (we even test with/without). + Clients who wish to continue using ``fs`` can do so by depending on it directly instead + of via the ``fonttools[ufo]`` extra (#3885, #3620). +- [xmlWriter] Replace illegal XML characters (e.g. control or non-characters) with "?" + when dumping to ttx (#3868, #71). +- [varLib.hvar] Fixed vertical metrics fields copy/pasta error (#3884). +- Micro optimizations in ttLib and sstruct modules (#3878, #3879). +- [unicodedata] Add Garay script to RTL_SCRIPTS (#3882). +- [roundingPen] Remove unreliable kwarg usage. Argument names aren’t consistent among + point pens’ ``.addComponent()`` implementations, in particular ``baseGlyphName`` + vs ``glyphName`` (#3880). + +4.58.5 (released 2025-07-03) +---------------------------- + +- [feaLib] Don't try to combine ligature & multisub rules (#3874). +- [feaLib/ast] Use weakref proxies to avoid cycles in visitor (#3873). +- [varLib.instancer] Fixed instancing CFF2 fonts where VarData contains more than 64k items (#3858). + +4.58.4 (released 2025-06-13) +---------------------------- + +- [feaLib] Allow for empty MarkFilter & MarkAttach sets (#3856). + +4.58.3 (released 2025-06-13) +---------------------------- + +- [feaLib] Fixed iterable check for Python 3.13.4 and newer (#3854, #3855). + +4.58.2 (released 2025-06-06) +---------------------------- + +- [ttLib.reorderGlyphs] Handle CFF2 when reordering glyphs (#3852) +- [subset] Copy name IDs in use before scrapping or scrambling them for webfonts (#3853) + +4.58.1 (released 2025-05-28) +---------------------------- + +- [varLib] Make sure that fvar named instances only reuse name ID 2 or 17 if they are at the default location across all axes, to match OT spec requirement (#3831). +- [feaLib] Improve single substitution promotion to multiple/ligature substitutions, fixing a few bugs as well (#3849). +- [loggingTools] Make ``Timer._time`` a static method that doesn't take self, makes it easier to override (#3836). +- [featureVars] Use ``None`` for empty ConditionSet, which translates to a null offset in the compiled table (#3850). +- [feaLib] Raise an error on conflicting ligature substitution rules instead of silently taking the last one (#3835). +- Add typing annotations to T2CharStringPen (#3837). +- [feaLib] Add single substitutions that were promoted to multiple or ligature substitutions to ``aalt`` feature (#3847). +- [featureVars] Create a default ``LangSys`` in a ``ScriptRecord`` if missing when adding feature variations to existing GSUB later in the build (#3838). +- [symfont] Added a ``main()``. +- [cffLib.specializer] Fix rmoveto merging when blends used (#3839, #3840). +- [pyftmerge] Add support for cmap format 14 in the merge tool (#3830). +- [varLib.instancer/cff2] Fix vsindex of Private dicts when instantiating (#3828, #3232). +- Update text file read to use UTF-8 with optional BOM so it works with e.g. Windows Notepad.exe (#3824). +- [varLib] Ensure that instances only reuse name ID 2 or 17 if they are at the default location across all axes (#3831). +- [varLib] Create a dflt LangSys in a ScriptRecord when adding variations later, to fix an avoidable crash in an edge case (#3838). + +4.58.0 (released 2025-05-10) +---------------------------- + +- Drop Python 3.8, require 3.9+ (#3819) +- [HVAR, VVAR] Prune unused regions when using a direct mapping (#3797) +- [Docs] Improvements to ufoLib documentation (#3721) +- [Docs] Improvements to varLib documentation (#3727) +- [Docs] Improvements to Pens and pen-module documentation (#3724) +- [Docs] Miscellany updates to docs (misc modules and smaller modules) (#3730) +- [subset] Close codepoints over BiDi mirror variants. (#3801) +- [feaLib] Fix serializing ChainContextPosStatement and + ChainContextSubstStatement in some rare cases (#3788) +- [designspaceLib] Clarify user expectations for getStatNames (#2892) +- [GVAR] Add support for new `GVAR` table (#3728) +- [TSI0, TSI5] Derive number of entries to decompile from data length (#2477) +- [ttLib] Fix `AttributeError` when reporting table overflow (#3808) +- [ttLib] Apply rounding more often in getCoordinates (#3798) +- [ttLib] Ignore component bounds if empty (#3799) +- [ttLib] Change the separator for duplicate glyph names from "#" to "." (#3809) +- [feaLib] Support subtable breaks in CursivePos, MarkBasePos, MarkToLigPos and + MarkToMarkPos lookups (#3800, #3807) +- [feaLib] If the same lookup has single substitutions and ligature + substitutions, upgrade single substitutions to ligature substitutions with + one input glyph (#3805) +- [feaLib] Correctly handle in single pos lookups (#3803) +- [feaLib] Remove duplicates from class pair pos classes instead of raising an + error (#3804) +- [feaLib] Support creating extension lookups using useExtenion lookup flag + instead of silently ignoring it (#3811) +- [STAT] Add typing for the simpler STAT arguments (#3812) +- [otlLib.builder] Add future import for annotations (#3814) +- [cffLib] Fix reading supplement encoding (#3813) +- [voltLib] Add some missing functionality and fixes to voltLib and VoltToFea, + making the conversion to feature files more robust. Add also `fonttools + voltLib` command line tool to compile VOLT sources directly (doing an + intermediate fea conversion internally) (#3818) +- [pens] Add some PointPen annotations (#3820) + +4.57.0 (released 2025-04-03) +---------------------------- + +- [ttLib.__main__] Add `--no-recalc-timestamp` flag (#3771) +- [ttLib.__main__] Add `-b` (recalcBBoxes=False) flag (#3772) +- [cmap] Speed up glyphOrder loading from cmap (#3774) +- [ttLib.__main__] Improvements around the `-t` flag (#3776) +- [Debg] Fix parsing from XML; add roundtrip tests (#3781) +- [fealib] Support \*Base.MinMax tables (#3783, #3786) +- [config] Add OPTIMIZE_FONT_SPEED (#3784) +- [varLib.hvar] New module to add HVAR table to the font (#3780) +- [otlLib.optimize] Fix crash when the provided TTF does not contain a `GPOS` (#3794) + +4.56.0 (released 2025-02-07) +---------------------------- + +- [varStore] Sort the input todo list with the same sorting key used for the opimizer's output (#3767). +- [otData] Fix DeviceTable's ``DeltaValue`` repeat value which caused a crash after importing from XML and then compiling a GPOS containing Device tables (#3758). +- [feaLib] Make ``FeatureLibError`` pickleable, so client can e.g. use feaLib to can compile features in parallel with multiprocessing (#3762). +- [varLib/gvar] Removed workaround for old, long-fixed macOS bug about composite glyphs with all zero deltas (#1381, #1788). +- [Docs] Updated ttLib documentation, beefed up TTFont and TTGlyphSet explanations (#3720). + +4.55.8 (released 2025-01-29) +---------------------------- + +- [MetaTools] Fixed bug in buildUCD.py script whereby the first non-header line of some UCD text file was being skipped. This affected in particular the U+00B7 (MIDDLE DOT) entry of ScriptExtensions.txt (#3756). + +4.55.7 (released 2025-01-28) +---------------------------- + +- Shorten the changelog included in PyPI package description to accommodate maximum length limit imposed by Azure DevOps. No actual code changes since v4.55.6 (#3754). + +4.55.6 (released 2025-01-24) +---------------------------- + +- [glyf] Fixed regression introduced in 4.55.5 when computing bounds of nested composite glyphs with transformed components (#3752). + +4.55.5 (released 2025-01-23) +---------------------------- + +- [glyf] Fixed recalcBounds of transformed components with unrounded coordinates (#3750). +- [feaLib] Allow duplicate script/language statements (#3749). + +4.55.4 (released 2025-01-21) +---------------------------- + +- [bezierTools] Fixed ``splitCubicAtT`` sometimes not returning identical start/end points as result of numerical precision (#3742, #3743). +- [feaLib/ast] Fixed docstring of ``AlternateSubstStatement`` (#3735). +- [transform] Typing fixes (#3734). + +4.55.3 (released 2024-12-10) +---------------------------- + +- [Docs] fill out ttLib table section [#3716] +- [feaLib] More efficient inline format 4 lookups [#3726] + +4.55.2 (released 2024-12-05) +---------------------------- + +- [Docs] update Sphinx config (#3712) +- [designspaceLib] Allow axisOrdering to be set to zero (#3715) +- [feaLib] Don’t modify variable anchors in place (#3717) + +4.55.1 (released 2024-12-02) +---------------------------- + +- [ttGlyphSet] Support VARC CFF2 fonts (#3683) +- [DecomposedTransform] Document and implement always skewY == 0 (#3697) +- [varLib] "Fix" cython iup issue? (#3704) +- Cython minor refactor (#3705) + + +4.55.0 (released 2024-11-14) +---------------------------- + +- [cffLib.specializer] Adjust stack use calculation (#3689) +- [varLib] Lets not add mac names if the rest of name doesn't have them (#3688) +- [ttLib.reorderGlyphs] Update CFF table charstrings and charset (#3682) +- [cffLib.specializer] Add cmdline to specialize a CFF2 font (#3675, #3679) +- [CFF2] Lift uint16 VariationStore.length limitation (#3674) +- [subset] consider variation selectors subsetting cmap14 (#3672) +- [varLib.interpolatable] Support CFF2 fonts (#3670) +- Set isfinal to true in XML parser for proper resource cleanup (#3669) +- [removeOverlaps] Fix CFF CharString width (#3659) +- [glyf] Add optimizeSize option (#3657) +- Python 3.13 support (#3656) +- [TupleVariation] Optimize for loading speed, not size (#3650, #3653) + + +4.54.1 (released 2024-09-24) +---------------------------- + +- [unicodedata] Update to Unicode 16 +- [subset] Escape ``\\`` in doc string + +4.54.0 (released 2024-09-23) +---------------------------- + +- [Docs] Small docs cleanups by @n8willis (#3611) +- [Docs] cleanup code blocks by @n8willis (#3627) +- [Docs] fix Sphinx builds by @n8willis (#3625) +- [merge] Minor fixes to documentation for merge by @drj11 (#3588) +- [subset] Small tweaks to pyftsubset documentation by @RoelN (#3633) +- [Tests] Do not require fonttools command to be available by @behdad (#3612) +- [Tests] subset_test: add failing test to reproduce issue #3616 by @anthrotype (#3622) +- [ttLib] NameRecordVisitor: include whole sequence of character variants' UI labels, not just the first by @anthrotype (#3617) +- [varLib.avar] Reconstruct mappings from binary by @behdad (#3598) +- [varLib.instancer] Fix visual artefacts with partial L2 instancing by @Hoolean (#3635) +- [varLib.interpolatable] Support discrete axes in .designspace by @behdad (#3599) +- [varLib.models] By default, assume OpenType-like normalized space by @behdad (#3601) + +4.53.1 (released 2024-07-05) +---------------------------- + +- [feaLib] Improve the sharing of inline chained lookups (#3559) +- [otlLib] Correct the calculation of OS/2.usMaxContext with reversed chaining contextual single substitutions (#3569) +- [misc.visitor] Visitors search the inheritance chain of objects they are visiting (#3581) + +4.53.0 (released 2024-05-31) +---------------------------- + +- [ttLib.removeOverlaps] Support CFF table to aid in downconverting CFF2 fonts (#3528) +- [avar] Fix crash when accessing not-yet-existing attribute (#3550) +- [docs] Add buildMathTable to otlLib.builder documentation (#3540) +- [feaLib] Allow UTF-8 with BOM when reading features (#3495) +- [SVGPathPen] Revert rounding coordinates to two decimal places by default (#3543) +- [varLib.instancer] Refix output filename decision-making (#3545, #3544, #3548) + +4.52.4 (released 2024-05-27) +---------------------------- + +- [varLib.cff] Restore and deprecate convertCFFtoCFF2 that was removed in 4.52.0 + release as it is used by downstream projects (#3535). + +4.52.3 (released 2024-05-27) +---------------------------- + +- Fixed a small syntax error in the reStructuredText-formatted NEWS.rst file + which caused the upload to PyPI to fail for 4.52.2. No other code changes. + +4.52.2 (released 2024-05-27) +---------------------------- + +- [varLib.interpolatable] Ensure that scipy/numpy output is JSON-serializable + (#3522, #3526). +- [housekeeping] Regenerate table lists, to fix pyinstaller packaging of the new + ``VARC`` table (#3531, #3529). +- [cffLib] Make CFFToCFF2 and CFF2ToCFF more robust (#3521, #3525). + +4.52.1 (released 2024-05-24) +---------------------------- + +- Fixed a small syntax error in the reStructuredText-formatted NEWS.rst file + which caused the upload to PyPI to fail for 4.52.0. No other code changes. + +4.52.0 (released 2024-05-24) +---------------------------- + +- Added support for the new ``VARC`` (Variable Composite) table that is being + proposed to OpenType spec (#3395). For more info: + https://github.com/harfbuzz/boring-expansion-spec/blob/main/VARC.md +- [ttLib.__main__] Fixed decompiling all tables (90fed08). +- [feaLib] Don't reference the same lookup index multiple times within the same + feature record, it is only applied once anyway (#3520). +- [cffLib] Moved methods to desubroutinize, remove hints and unused subroutines + from subset module to cffLib (#3517). +- [varLib.instancer] Added support for partial-instancing CFF2 tables! Also, added + method to down-convert from CFF2 to CFF 1.0, and CLI entry points to convert + CFF<->CFF2 (#3506). +- [subset] Prune unused user name IDs even with --name-IDs='*' (#3410). +- [ttx] use GNU-style getopt to intermix options and positional arguments (#3509). +- [feaLib.variableScalar] Fixed ``value_at_location()`` method (#3491) +- [psCharStrings] Shorten output of ``encodeFloat`` (#3492). +- [bezierTools] Fix infinite-recursion in ``calcCubicArcLength`` (#3502). +- [avar2] Implement ``avar2`` support in ``TTFont.getGlyphSet()`` (#3473). + +4.51.0 (released 2024-04-05) +---------------------------- + +- [ttLib] Optimization on loading aux fields (#3464). +- [ttFont] Add reorderGlyphs (#3468). + +4.50.0 (released 2024-03-15) +---------------------------- + +- [pens] Added decomposing filter pens that draw components as regular contours (#3460). +- [instancer] Drop explicit no-op axes from TupleVariations (#3457). +- [cu2qu/ufo] Return set of modified glyph names from fonts_to_quadratic (#3456). + +4.49.0 (released 2024-02-15) +---------------------------- + +- [otlLib] Add API for building ``MATH`` table (#3446) + +4.48.1 (released 2024-02-06) +---------------------------- + +- Fixed uploading wheels to PyPI, no code changes since v4.48.0. + +4.48.0 (released 2024-02-06) +---------------------------- + +- [varLib] Do not log when there are no OTL tables to be merged. +- [setup.py] Do not restrict lxml<5 any more, tests pass just fine with lxml>=5. +- [feaLib] Remove glyph and class names length restrictions in FEA (#3424). +- [roundingPens] Added ``transformRoundFunc`` parameter to the rounding pens to allow + for custom rounding of the components' transforms (#3426). +- [feaLib] Keep declaration order of ligature components within a ligature set, instead + of sorting by glyph name (#3429). +- [feaLib] Fixed ordering of alternates in ``aalt`` lookups, following the declaration + order of feature references within the ``aalt`` feature block (#3430). +- [varLib.instancer] Fixed a bug in the instancer's IUP optimization (#3432). +- [sbix] Support sbix glyphs with new graphicType "flip" (#3433). +- [svgPathPen] Added ``--glyphs`` option to dump the SVG paths for the named glyphs + in the font (0572f78). +- [designspaceLib] Added "description" attribute to ```` and ```` + elements, and allow multiple ```` elements to group ```` elements + that are logically related (#3435, #3437). +- [otlLib] Correctly choose the most compact GSUB contextual lookup format (#3439). + +4.47.2 (released 2024-01-11) +---------------------------- + +Minor release to fix uploading wheels to PyPI. + +4.47.1 (released 2024-01-11) +---------------------------- + +- [merge] Improve help message and add standard command line options (#3408) +- [otlLib] Pass ``ttFont`` to ``name.addName`` in ``buildStatTable`` (#3406) +- [featureVars] Re-use ``FeatureVariationRecord``'s when possible (#3413) + +4.47.0 (released 2023-12-18) +---------------------------- + +- [varLib.models] New API for VariationModel: ``getMasterScalars`` and + ``interpolateFromValuesAndScalars``. +- [varLib.interpolatable] Various bugfixes and rendering improvements. In particular, + add a Summary page in the front, and an Index and Table-of-Contents in the back. + Change the page size to Letter. +- [Docs/designspaceLib] Defined a new ``public.fontInfo`` lib key, not used anywhere yet (#3358). + +4.46.0 (released 2023-12-02) +---------------------------- + +- [featureVars] Allow to register the same set of substitution rules to multiple features. + The ``addFeatureVariations`` function can now take a list of featureTags; similarly, the + lib key 'com.github.fonttools.varLib.featureVarsFeatureTag' can now take a + comma-separateed string of feature tags (e.g. "salt,ss01") instead of a single tag (#3360). +- [featureVars] Don't overwrite GSUB FeatureVariations, but append new records to it + for features which are not already there. But raise ``VarLibError`` if the feature tag + already has feature variations associated with it (#3363). +- [varLib] Added ``addGSUBFeatureVariations`` function to add GSUB Feature Variations + to an existing variable font from rules defined in a DesignSpace document (#3362). +- [varLib.interpolatable] Various bugfixes and rendering improvements. In particular, + a new test for "underweight" glyphs. The new test reports quite a few false-positives + though. Please send feedback. + +4.45.1 (released 2023-11-23) +---------------------------- + +- [varLib.interpolatable] Various bugfixes and improvements, better reporting, reduced + false positives. +- [ttGlyphSet] Added option to not recalculate glyf bounds (#3348). + +4.45.0 (released 2023-11-20) +---------------------------- + +- [varLib.interpolatable] Vastly improved algorithms. Also available now is ``--pdf`` + and ``--html`` options to generate a PDF or HTML report of the interpolation issues. + The PDF/HTML report showcases the problematic masters, the interpolated broken + glyph, as well as the proposed fixed version. + +4.44.3 (released 2023-11-15) +---------------------------- + +- [subset] Only prune codepage ranges for OS/2.version >= 1, ignore otherwise (#3334). +- [instancer] Ensure hhea vertical metrics stay in sync with OS/2 ones after instancing + MVAR table containing 'hasc', 'hdsc' or 'hlgp' tags (#3297). + +4.44.2 (released 2023-11-14) +---------------------------- + +- [glyf] Have ``Glyph.recalcBounds`` skip empty components (base glyph with no contours) + when computing the bounding box of composite glyphs. This simply restores the existing + behavior before some changes were introduced in fonttools 4.44.0 (#3333). + +4.44.1 (released 2023-11-14) +---------------------------- + +- [feaLib] Ensure variable mark anchors are deep-copied while building since they + get modified in-place and later reused (#3330). +- [OS/2|subset] Added method to ``recalcCodePageRanges`` to OS/2 table class; added + ``--prune-codepage-ranges`` to `fonttools subset` command (#3328, #2607). + +4.44.0 (released 2023-11-03) +---------------------------- + +- [instancer] Recalc OS/2 AvgCharWidth after instancing if default changes (#3317). +- [otlLib] Make ClassDefBuilder class order match varLib.merger's, i.e. large + classes first, then glyph lexicographic order (#3321, #3324). +- [instancer] Allow not specifying any of min:default:max values and let be filled + up with fvar's values (#3322, #3323). +- [instancer] When running --update-name-table ignore axes that have no STAT axis + values (#3318, #3319). +- [Debg] When dumping to ttx, write the embedded JSON as multi-line string with + indentation (92cbfee0d). +- [varStore] Handle > 65535 items per encoding by splitting VarData subtable (#3310). +- [subset] Handle null-offsets in MarkLigPos subtables. +- [subset] Keep East Asian spacing fatures vhal, halt, chws, vchw by default (#3305). +- [instancer.solver] Fixed case where axisDef < lower and upper < axisMax (#3304). +- [glyf] Speed up compilation, mostly around ``recalcBounds`` (#3301). +- [varLib.interpolatable] Speed it up when working on variable fonts, plus various + micro-optimizations (#3300). +- Require unicodedata2 >= 15.1.0 when installed with 'unicode' extra, contains UCD 15.1. + +4.43.1 (released 2023-10-06) +---------------------------- + +- [EBDT] Fixed TypeError exception in `_reverseBytes` method triggered when dumping + some bitmap fonts with `ttx -z bitwise` option (#3162). +- [v/hhea] Fixed UnboundLocalError exception in ``recalc`` method when no vmtx or hmtx + tables are present (#3290). +- [bezierTools] Fixed incorrectly typed cython local variable leading to TypeError when + calling ``calcQuadraticArcLength`` (#3288). +- [feaLib/otlLib] Better error message when building Coverage table with missing glyph (#3286). + +4.43.0 (released 2023-09-29) +---------------------------- + +- [subset] Set up lxml ``XMLParser(resolve_entities=False)`` when parsing OT-SVG documents + to prevent XML External Entity (XXE) attacks (9f61271dc): + https://codeql.github.com/codeql-query-help/python/py-xxe/ +- [varLib.iup] Added workaround for a Cython bug in ``iup_delta_optimize`` that was + leading to IUP tolerance being incorrectly initialised, resulting in sub-optimal deltas + (60126435d, cython/cython#5732). +- [varLib] Added new command-line entry point ``fonttools varLib.avar`` to add an + ``avar`` table to an existing VF from axes mappings in a .designspace file (0a3360e52). +- [instancer] Fixed bug whereby no longer used variation regions were not correctly pruned + after VarData optimization (#3268). +- Added support for Python 3.12 (#3283). + +4.42.1 (released 2023-08-20) +---------------------------- + +- [t1Lib] Fixed several Type 1 issues (#3238, #3240). +- [otBase/packer] Allow sharing tables reached by different offset sizes (#3241, #3236). +- [varLib/merger] Fix Cursive attachment merging error when all anchors are NULL (#3248, #3247). +- [ttLib] Fixed warning when calling ``addMultilingualName`` and ``ttFont`` parameter was not + passed on to ``findMultilingualName`` (#3253). + +4.42.0 (released 2023-08-02) +---------------------------- + +- [varLib] Use sentinel value 0xFFFF to mark a glyph advance in hmtx/vmtx as non + participating, allowing sparse masters to contain glyphs for variation purposes other + than {H,V}VAR (#3235). +- [varLib/cff] Treat empty glyphs in non-default masters as missing, thus not participating + in CFF2 delta computation, similarly to how varLib already treats them for gvar (#3234). +- Added varLib.avarPlanner script to deduce 'correct' avar v1 axis mappings based on + glyph average weights (#3223). + +4.41.1 (released 2023-07-21) +---------------------------- + +- [subset] Fixed perf regression in v4.41.0 by making ``NameRecordVisitor`` only visit + tables that do contain nameID references (#3213, #3214). +- [varLib.instancer] Support instancing fonts containing null ConditionSet offsets in + FeatureVariationRecords (#3211, #3212). +- [statisticsPen] Report font glyph-average weight/width and font-wide slant. +- [fontBuilder] Fixed head.created date incorrectly set to 0 instead of the current + timestamp, regression introduced in v4.40.0 (#3210). +- [varLib.merger] Support sparse ``CursivePos`` masters (#3209). + +4.41.0 (released 2023-07-12) +---------------------------- + +- [fontBuilder] Fixed bug in setupOS2 with default panose attribute incorrectly being + set to a dict instead of a Panose object (#3201). +- [name] Added method to ``removeUnusedNameRecords`` in the user range (#3185). +- [varLib.instancer] Fixed issue with L4 instancing (moving default) (#3179). +- [cffLib] Use latin1 so we can roundtrip non-ASCII in {Full,Font,Family}Name (#3202). +- [designspaceLib] Mark as optional in docs (as it is in the code). +- [glyf-1] Fixed drawPoints() bug whereby last cubic segment becomes quadratic (#3189, #3190). +- [fontBuilder] Propagate the 'hidden' flag to the fvar Axis instance (#3184). +- [fontBuilder] Update setupAvar() to also support avar 2, fixing ``_add_avar()`` call + site (#3183). +- Added new ``voltLib.voltToFea`` submodule (originally Tiro Typeworks' "Volto") for + converting VOLT OpenType Layout sources to FEA format (#3164). + +4.40.0 (released 2023-06-12) +---------------------------- + +- Published native binary wheels to PyPI for all the python minor versions and platform + and architectures currently supported that would benefit from this. They will include + precompiled Cython-accelerated modules (e.g. cu2qu) without requiring to compile them + from source. The pure-python wheel and source distribution will continue to be + published as always (pip will automatically chose them when no binary wheel is + available for the given platform, e.g. pypy). Use ``pip install --no-binary=fonttools fonttools`` + to expliclity request pip to install from the pure-python source. +- [designspaceLib|varLib] Add initial support for specifying axis mappings and build + ``avar2`` table from those (#3123). +- [feaLib] Support variable ligature caret position (#3130). +- [varLib|glyf] Added option to --drop-implied-oncurves; test for impliable oncurve + points either before or after rounding (#3146, #3147, #3155, #3156). +- [TTGlyphPointPen] Don't error with empty contours, simply ignore them (#3145). +- [sfnt] Fixed str vs bytes remnant of py3 transition in code dealing with de/compiling + WOFF metadata (#3129). +- [instancer-solver] Fixed bug when moving default instance with sparse masters (#3139, #3140). +- [feaLib] Simplify variable scalars that don’t vary (#3132). +- [pens] Added filter pen that explicitly emits closing line when lastPt != movePt (#3100). +- [varStore] Improve optimize algorithm and better document the algorithm (#3124, #3127). + Added ``quantization`` option (#3126). +- Added CI workflow config file for building native binary wheels (#3121). +- [fontBuilder] Added glyphDataFormat=0 option; raise error when glyphs contain cubic + outlines but glyphDataFormat was not explicitly set to 1 (#3113, #3119). +- [subset] Prune emptied GDEF.MarkGlyphSetsDef and remap indices; ensure GDEF is + subsetted before GSUB and GPOS (#3114, #3118). +- [xmlReader] Fixed issue whereby DSIG table data was incorrectly parsed (#3115, #2614). +- [varLib/merger] Fixed merging of SinglePos with pos=0 (#3111, #3112). +- [feaLib] Demote "Feature has not been defined" error to a warning when building aalt + and referenced feature is empty (#3110). +- [feaLib] Dedupe multiple substitutions with classes (#3105). + +4.39.4 (released 2023-05-10) +---------------------------- + +- [varLib.interpolatable] Allow for sparse masters (#3075) +- [merge] Handle differing default/nominalWidthX in CFF (#3070) +- [ttLib] Add missing main.py file to ttLib package (#3088) +- [ttx] Fix missing composite instructions in XML (#3092) +- [ttx] Fix split tables option to work on filenames containing '%' (#3096) +- [featureVars] Process lookups for features other than rvrn last (#3099) +- [feaLib] support multiple substitution with classes (#3103) + +4.39.3 (released 2023-03-28) +---------------------------- + +- [sbix] Fixed TypeError when compiling empty glyphs whose imageData is None, regression + was introduced in v4.39 (#3059). +- [ttFont] Fixed AttributeError on python <= 3.10 when opening a TTFont from a tempfile + SpooledTemporaryFile, seekable method only added on python 3.11 (#3052). + +4.39.2 (released 2023-03-16) +---------------------------- + +- [varLib] Fixed regression introduced in 4.39.1 whereby an incomplete 'STAT' table + would be built even though a DesignSpace v5 did contain 'STAT' definitions (#3045, #3046). + +4.39.1 (released 2023-03-16) +---------------------------- + +- [avar2] Added experimental support for reading/writing avar version 2 as specified in + this draft proposal: https://github.com/harfbuzz/boring-expansion-spec/blob/main/avar2.md +- [glifLib] Wrap underlying XML library exceptions with GlifLibError when parsing GLIFs, + and also print the name and path of the glyph that fails to be parsed (#3042). +- [feaLib] Consult avar for normalizing user-space values in ConditionSets and in + VariableScalars (#3042, #3043). +- [ttProgram] Handle string input to Program.fromAssembly() (#3038). +- [otlLib] Added a config option to emit GPOS 7 lookups, currently disabled by default + because of a macOS bug (#3034). +- [COLRv1] Added method to automatically compute ClipBoxes (#3027). +- [ttFont] Fixed getGlyphID to raise KeyError on missing glyphs instead of returning + None. The regression was introduced in v4.27.0 (#3032). +- [sbix] Fixed UnboundLocalError: cannot access local variable 'rawdata' (#3031). +- [varLib] When building VF, do not overwrite a pre-existing ``STAT`` table that was built + with feaLib from FEA feature file. Also, added support for building multiple VFs + defined in Designspace v5 from ``fonttools varLib`` script (#3024). +- [mtiLib] Only add ``Debg`` table with lookup names when ``FONTTOOLS_LOOKUP_DEBUGGING`` + env variable is set (#3023). + +4.39.0 (released 2023-03-06) +---------------------------- + +- [mtiLib] Optionally add `Debg` debug info for MTI feature builds (#3018). +- [ttx] Support reading input file from standard input using special `-` character, + similar to existing `-o -` option to write output to standard output (#3020). +- [cython] Prevent ``cython.compiled`` raise AttributeError if cython not installed + properly (#3017). +- [OS/2] Guard against ZeroDivisionError when calculating xAvgCharWidth in the unlikely + scenario no glyph has non-zero advance (#3015). +- [subset] Recompute xAvgCharWidth independently of --no-prune-unicode-ranges, + previously the two options were involuntarily bundled together (#3012). +- [fontBuilder] Add ``debug`` parameter to addOpenTypeFeatures method to add source + debugging information to the font in the ``Debg`` private table (#3008). +- [name] Make NameRecord `__lt__` comparison not fail on Unicode encoding errors (#3006). +- [featureVars] Fixed bug in ``overlayBox`` (#3003, #3005). +- [glyf] Added experimental support for cubic bezier curves in TrueType glyf table, as + outlined in glyf v1 proposal (#2988): + https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1-cubicOutlines.md +- Added new qu2cu module and related qu2cuPen, the reverse of cu2qu for converting + TrueType quadratic splines to cubic bezier curves (#2993). +- [glyf] Added experimental support for reading and writing Variable Composites/Components + as defined in glyf v1 spec proposal (#2958): + https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1-varComposites.md. +- [pens]: Added `addVarComponent` method to pen protocols' base classes, which pens can implement + to handle varcomponents (by default they get decomposed) (#2958). +- [misc.transform] Added DecomposedTransform class which implements an affine transformation + with separate translate, rotation, scale, skew, and transformation-center components (#2598) +- [sbix] Ensure Glyph.referenceGlyphName is set; fixes error after dumping and + re-compiling sbix table with 'dupe' glyphs (#2984). +- [feaLib] Be cleverer when merging chained single substitutions into same lookup + when they are specified using the inline notation (#2150, #2974). +- [instancer] Clamp user-inputted axis ranges to those of fvar (#2959). +- [otBase/subset] Define ``__getstate__`` for BaseTable so that a copied/pickled 'lazy' + object gets its own OTTableReader to read from; incidentally fixes a bug while + subsetting COLRv1 table containing ClipBoxes on python 3.11 (#2965, #2968). +- [sbix] Handle glyphs with "dupe" graphic type on compile correctly (#2963). +- [glyf] ``endPointsOfContours`` field should be unsigned! Kudos to behdad for + spotting one of the oldest bugs in FT. Probably nobody has ever dared to make + glyphs with more than 32767 points... (#2957). +- [feaLib] Fixed handling of ``ignore`` statements with unmarked glyphs to match + makeotf behavior, which assumes the first glyph is marked (#2950). +- Reformatted code with ``black`` and enforce new code style via CI check (#2925). +- [feaLib] Sort name table entries following OT spec prescribed order in the builder (#2927). +- [cu2quPen] Add Cu2QuMultiPen that converts multiple outlines at a time in + interpolation compatible way; its methods take a list of tuples arguments + that would normally be passed to individual segment pens, and at the end it + dispatches the converted outlines to each pen (#2912). +- [reverseContourPen/ttGlyphPen] Add outputImpliedClosingLine option (#2913, #2914, + #2921, #2922, #2995). +- [gvar] Avoid expanding all glyphs unnecessarily upon compile (#2918). +- [scaleUpem] Fixed bug whereby CFF2 vsindex was scaled; it should not (#2893, #2894). +- [designspaceLib] Add DS.getAxisByTag and refactor getAxis (#2891). +- [unicodedata] map Zmth<->math in ot_tag_{to,from}_script (#1737, #2889). +- [woff2] Support encoding/decoding OVERLAP_SIMPLE glyf flags (#2576, #2884). +- [instancer] Update OS/2 class and post.italicAngle when default moved (L4) +- Dropped support for Python 3.7 which reached EOL, fontTools requires 3.8+. +- [instancer] Fixed instantiateFeatureVariations logic when a rule range becomes + default-applicable (#2737, #2880). +- [ttLib] Add main to ttFont and ttCollection that just decompile and re-compile the + input font (#2869). +- [featureVars] Insert 'rvrn' lookup at the beginning of LookupList, to work around bug + in Apple implementation of 'rvrn' feature which the spec says it should be processed + early whereas on macOS 10.15 it follows lookup order (#2140, #2867). +- [instancer/mutator] Remove 'DSIG' table if present. +- [svgPathPen] Don't close path in endPath(), assume open unless closePath() (#2089, #2865). + +4.38.0 (released 2022-10-21) +---------------------------- + +- [varLib.instancer] Added support for L4 instancing, i.e. moving the default value of + an axis while keeping it variable. Thanks Behdad! (#2728, #2861). + It's now also possible to restrict an axis min/max values beyond the current default + value, e.g. a font wght has min=100, def=400, max=900 and you want a partial VF that + only varies between 500 and 700, you can now do that. + You can either specify two min/max values (wght=500:700), and the new default will be + set to either the minimum or maximum, depending on which one is closer to the current + default (e.g. 500 in this case). Or you can specify three values (e.g. wght=500:600:700) + to specify the new default value explicitly. +- [otlLib/featureVars] Set a few Count values so one doesn't need to compile the font + to update them (#2860). +- [varLib.models] Make extrapolation work for 2-master models as well where one master + is at the default location (#2843, #2846). + Add optional extrapolate=False to normalizeLocation() (#2847, #2849). +- [varLib.cff] Fixed sub-optimal packing of CFF2 deltas by no longer rounding them to + integer (#2838). +- [scaleUpem] Calculate numShorts in VarData after scale; handle CFF hintmasks (#2840). + +4.37.4 (released 2022-09-30) +---------------------------- + +- [subset] Keep nameIDs used by CPAL palette entry labels (#2837). +- [varLib] Avoid negative hmtx values when creating font from variable CFF2 font (#2827). +- [instancer] Don't prune stat.ElidedFallbackNameID (#2828). +- [unicodedata] Update Scripts/Blocks to Unicode 15.0 (#2833). + +4.37.3 (released 2022-09-20) +---------------------------- + +- Fix arguments in calls to (glyf) glyph.draw() and drawPoints(), whereby offset wasn't + correctly passed down; this fix also exposed a second bug, where lsb and tsb were not + set (#2824, #2825, adobe-type-tools/afdko#1560). + +4.37.2 (released 2022-09-15) +---------------------------- + +- [subset] Keep CPAL table and don't attempt to prune unused color indices if OT-SVG + table is present even if COLR table was subsetted away; OT-SVG may be referencing the + CPAL table; for now we assume that's the case (#2814, #2815). +- [varLib.instancer] Downgrade GPOS/GSUB version if there are no more FeatureVariations + after instancing (#2812). +- [subset] Added ``--no-lazy`` to optionally load fonts eagerly (mostly to ease + debugging of table lazy loading, no practical effects) (#2807). +- [varLib] Avoid building empty COLR.DeltaSetIndexMap with only identity mappings (#2803). +- [feaLib] Allow multiple value record types (by promoting to the most general format) + within the same PairPos subtable; e.g. this allows variable and non variable kerning + rules to share the same subtable. This also fixes a bug whereby some kerning pairs + would become unreachable while shapiong because of premature subtable splitting (#2772, #2776). +- [feaLib] Speed up ``VarScalar`` by caching models for recurring master locations (#2798). +- [feaLib] Optionally cythonize ``feaLib.lexer``, speeds up parsing FEA a bit (#2799). +- [designspaceLib] Avoid crash when handling unbounded rule conditions (#2797). +- [post] Don't crash if ``post`` legacy format 1 is malformed/improperly used (#2786) +- [gvar] Don't be "lazy" (load all glyph variations up front) when TTFont.lazy=False (#2771). +- [TTFont] Added ``normalizeLocation`` method to normalize a location dict from the + font's defined axes space (also known as "user space") into the normalized (-1..+1) + space. It applies ``avar`` mapping if the font contains an ``avar`` table (#2789). +- [TTVarGlyphSet] Support drawing glyph instances from CFF2 variable glyph set (#2784). +- [fontBuilder] Do not error when building cmap if there are zero code points (#2785). +- [varLib.plot] Added ability to plot a variation model and set of accompaning master + values corresponding to the model's master locations into a pyplot figure (#2767). +- [Snippets] Added ``statShape.py`` script to draw statistical shape of a glyph as an + ellips (requires pycairo) (baecd88). +- [TTVarGlyphSet] implement drawPoints natively, avoiding going through + SegmentToPointPen (#2778). +- [TTVarGlyphSet] Fixed bug whereby drawing a composite glyph multiple times, its + components would shif; needed an extra copy (#2774). + +4.37.1 (released 2022-08-24) +---------------------------- + +- [subset] Fixed regression introduced with v4.37.0 while subsetting the VarStore of + ``HVAR`` and ``VVAR`` tables, whereby an ``AttributeError: subset_varidxes`` was + thrown because an apparently unused import statement (with the side-effect of + dynamically binding that ``subset_varidxes`` method to the VarStore class) had been + accidentally deleted in an unrelated PR (#2679, #2773). +- [pens] Added ``cairoPen`` (#2678). +- [gvar] Read ``gvar`` more lazily by not parsing all of the ``glyf`` table (#2771). +- [ttGlyphSet] Make ``drawPoints(pointPen)`` method work for CFF fonts as well via + adapter pen (#2770). + +4.37.0 (released 2022-08-23) +---------------------------- + +- [varLib.models] Reverted PR #2717 which added support for "narrow tents" in v4.36.0, + as it introduced a regression (#2764, #2765). It will be restored in upcoming release + once we found a solution to the bug. +- [cff.specializer] Fixed issue in charstring generalizer with the ``blend`` operator + (#2750, #1975). +- [varLib.models] Added support for extrapolation (#2757). +- [ttGlyphSet] Ensure the newly added ``_TTVarGlyphSet`` inherits from ``_TTGlyphSet`` + to keep backward compatibility with existing API (#2762). +- [kern] Allow compiling legacy kern tables with more than 64k entries (d21cfdede). +- [visitor] Added new visitor API to traverse tree of objects and dispatch based + on the attribute type: cf. ``fontTools.misc.visitor`` and ``fontTools.ttLib.ttVisitor``. Added ``fontTools.ttLib.scaleUpem`` module that uses the latter to + change a font's units-per-em and scale all the related fields accordingly (#2718, + #2755). + +4.36.0 (released 2022-08-17) +---------------------------- + +- [varLib.models] Use a simpler model that generates narrower "tents" (regions, master + supports) whenever possible: specifically when any two axes that actively "cooperate" + (have masters at non-zero positions for both axes) have a complete set of intermediates. + The simpler algorithm produces fewer overlapping regions and behaves better with + respect to rounding at the peak positions than the generic solver, always matching + intermediate masters exactly, instead of maximally 0.5 units off. This may be useful + when 100% metrics compatibility is desired (#2218, #2717). +- [feaLib] Remove warning when about ``GDEF`` not being built when explicitly not + requested; don't build one unconditonally even when not requested (#2744, also works + around #2747). +- [ttFont] ``TTFont.getGlyphSet`` method now supports selecting a location that + represents an instance of a variable font (supports both user-scale and normalized + axes coordinates via the ``normalized=False`` parameter). Currently this only works + for TrueType-flavored variable fonts (#2738). + +4.35.0 (released 2022-08-15) +---------------------------- + +- [otData/otConverters] Added support for 'biased' PaintSweepGradient start/end angles + to match latest COLRv1 spec (#2743). +- [varLib.instancer] Fixed bug in ``_instantiateFeatureVariations`` when at the same + time pinning one axis and restricting the range of a subsequent axis; the wrong axis + tag was being used in the latter step (as the records' axisIdx was updated in the + preceding step but looked up using the old axes order in the following step) (#2733, + #2734). +- [mtiLib] Pad script tags with space when less than 4 char long (#1727). +- [merge] Use ``'.'`` instead of ``'#'`` in duplicate glyph names (#2742). +- [gvar] Added support for lazily loading glyph variations (#2741). +- [varLib] In ``build_many``, we forgot to pass on ``colr_layer_reuse`` parameter to + the ``build`` method (#2730). +- [svgPathPen] Add a main that prints SVG for input text (6df779fd). +- [cffLib.width] Fixed off-by-one in optimized values; previous code didn't match the + code block above it (2963fa50). +- [varLib.interpolatable] Support reading .designspace and .glyphs files (via optional + ``glyphsLib``). +- Compile some modules with Cython when available and building/installing fonttools + from source: ``varLib.iup`` (35% faster), ``pens.momentsPen`` (makes + ``varLib.interpolatable`` 3x faster). +- [feaLib] Allow features to be built for VF without also building a GDEF table (e.g. + only build GSUB); warn when GDEF would be needed but isn't requested (#2705, 2694). +- [otBase] Fixed ``AttributeError`` when uharfbuzz < 0.23.0 and 'repack' method is + missing (32aa8eaf). Use new ``uharfbuzz.repack_with_tag`` when available (since + uharfbuzz>=0.30.0), enables table-specific optimizations to be performed during + repacking (#2724). +- [statisticsPen] By default report all glyphs (4139d891). Avoid division-by-zero + (52b28f90). +- [feaLib] Added missing required argument to FeatureLibError exception (#2693) +- [varLib.merge] Fixed error during error reporting (#2689). Fixed undefined + ``NotANone`` variable (#2714). + +4.34.4 (released 2022-07-07) +---------------------------- + +- Fixed typo in varLib/merger.py that causes NameError merging COLR glyphs + containing more than 255 layers (#2685). + +4.34.3 (released 2022-07-07) +---------------------------- + +- [designspaceLib] Don't make up bad PS names when no STAT data (#2684) + +4.34.2 (released 2022-07-06) +---------------------------- + +- [varStore/subset] fixed KeyError exception to do with NO_VARIATION_INDEX while + subsetting varidxes in GPOS/GDEF (a08140d). + +4.34.1 (released 2022-07-06) +---------------------------- + +- [instancer] When optimizing HVAR/VVAR VarStore, use_NO_VARIATION_INDEX=False to avoid + including NO_VARIATION_INDEX in AdvWidthMap, RsbMap, LsbMap mappings, which would + push the VarIdx width to maximum (4bytes), which is not desirable. This also fixes + a hard crash when attempting to subset a varfont after it had been partially instanced + with use_NO_VARIATION_INDEX=True. + +4.34.0 (released 2022-07-06) +---------------------------- + +- [instancer] Set RIBBI bits in head and OS/2 table when cutting instances and the + subfamily nameID=2 contains strings like 'Italic' or 'Bold' (#2673). +- [otTraverse] Addded module containing methods for traversing trees of otData tables + (#2660). +- [otTables] Made DeltaSetIndexMap TTX dump less verbose by omitting no-op entries + (#2660). +- [colorLib.builder] Added option to disable PaintColrLayers's reuse of layers from + LayerList (#2660). +- [varLib] Added support for merging multiple master COLRv1 tables into a variable + COLR table (#2660, #2328). Base color glyphs of same name in different masters must have + identical paint graph structure (incl. number of layers, palette indices, number + of color line stops, corresponding paint formats at each level of the graph), + but can differ in the variable fields (e.g. PaintSolid.Alpha). PaintVar* tables + are produced when this happens and a VarStore/DeltaSetIndexMap is added to the + variable COLR table. It is possible for non-default masters to be 'sparse', i.e. + omit some of the color glyphs present in the default master. +- [feaLib] Let the Parser set nameIDs 1 through 6 that were previously reserved (#2675). +- [varLib.varStore] Support NO_VARIATION_INDEX in optimizer and instancer. +- [feaLib] Show all missing glyphs at once at end of parsing (#2665). +- [varLib.iup] Rewrite force-set conditions and limit DP loopback length (#2651). + For Noto Sans, IUP time drops from 23s down to 9s, with only a slight size increase + in the final font. This basically turns the algorithm from O(n^3) into O(n). +- [featureVars] Report about missing glyphs in substitution rules (#2654). +- [mutator/instancer] Added CLI flag to --no-recalc-timestamp (#2649). +- [SVG] Allow individual SVG documents in SVG OT table to be compressed on uncompressed, + and remember that when roundtripping to/from ttx. The SVG.docList is now a list + of SVGDocument namedtuple-like dataclass containing an extra ``compressed`` field, + and no longer a bare 3-tuple (#2645). +- [designspaceLib] Check for descriptor types with hasattr() to allow custom classes + that don't inherit the default descriptors (#2634). +- [subset] Enable sharing across subtables of extension lookups for harfbuzz packing + (#2626). Updated how table packing falls back to fontTools from harfbuzz (#2668). +- [subset] Updated default feature tags following current Harfbuzz (#2637). +- [svgLib] Fixed regex for real number to support e.g. 1e-4 in addition to 1.0e-4. + Support parsing negative rx, ry on arc commands (#2596, #2611). +- [subset] Fixed subsetting SinglePosFormat2 when ValueFormat=0 (#2603). + +4.33.3 (released 2022-04-26) +---------------------------- + +- [designspaceLib] Fixed typo in ``deepcopyExceptFonts`` method, preventing font + references to be transferred (#2600). Fixed another typo in the name of ``Range`` + dataclass's ``__post_init__`` magic method (#2597). + +4.33.2 (released 2022-04-22) +---------------------------- + +- [otBase] Make logging less verbose when harfbuzz fails to serialize. Do not exit + at the first failure but continue attempting to fix offset overflow error using + the pure-python serializer even when the ``USE_HARFBUZZ_REPACKER`` option was + explicitly set to ``True``. This is normal with fonts with relatively large + tables, at least until hb.repack implements proper table splitting. + +4.33.1 (released 2022-04-22) +---------------------------- + +- [otlLib] Put back the ``FONTTOOLS_GPOS_COMPACT_MODE`` environment variable to fix + regression in ufo2ft (and thus fontmake) introduced with v4.33.0 (#2592, #2593). + This is deprecated and will be removed one ufo2ft gets updated to use the new + config setup. + +4.33.0 (released 2022-04-21) +---------------------------- + +- [OS/2 / merge] Automatically recalculate ``OS/2.xAvgCharWidth`` after merging + fonts with ``fontTools.merge`` (#2591, #2538). +- [misc/config] Added ``fontTools.misc.configTools`` module, a generic configuration + system (#2416, #2439). + Added ``fontTools.config`` module, a fontTools-specific configuration + system using ``configTools`` above. + Attached a ``Config`` object to ``TTFont``. +- [otlLib] Replaced environment variable for GPOS compression level with an + equivalent option using the new config system. +- [designspaceLib] Incremented format version to 5.0 (#2436). + Added discrete axes, variable fonts, STAT information, either design- or + user-space location on instances. + Added ``fontTools.designspaceLib.split`` module to split a designspace + into sub-spaces that interpolate and that represent the variable fonts + listed in the document. + Made instance names optional and allow computing them from STAT data instead. + Added ``fontTools.designspaceLib.statNames`` module. + Allow instances to have the same location as a previously defined STAT label. + Deprecated some attributes: + ``SourceDescriptor``: ``copyLib``, ``copyInfo``, ``copyGroups``, ``copyFeatures``. + ``InstanceDescriptor``: ``kerning``, ``info``; ``glyphs``: use rules or sparse + sources. + For both, ``location``: use the more explicit designLocation. + Note: all are soft deprecations and existing code should keep working. + Updated documentation for Python methods and the XML format. +- [varLib] Added ``build_many`` to build several variable fonts from a single + designspace document (#2436). + Added ``fontTools.varLib.stat`` module to build STAT tables from a designspace + document. +- [otBase] Try to use the Harfbuzz Repacker for packing GSUB/GPOS tables when + ``uharfbuzz`` python bindings are available (#2552). Disable it by setting the + "fontTools.ttLib.tables.otBase:USE_HARFBUZZ_REPACKER" config option to ``False``. + If the option is set explicitly to ``True`` but ``uharfbuzz`` can't be imported + or fails to serialize for any reasons, an error will be raised (ImportError or + uharfbuzz errors). +- [CFF/T2] Ensure that ``pen.closePath()`` gets called for CFF2 charstrings (#2577). + Handle implicit CFF2 closePath within ``T2OutlineExtractor`` (#2580). + +4.32.0 (released 2022-04-08) +---------------------------- + +- [otlLib] Disable GPOS7 optimization to work around bug in Apple CoreText. + Always force Chaining GPOS8 for now (#2540). +- [glifLib] Added ``outputImpliedClosingLine=False`` parameter to ``Glyph.draw()``, + to control behaviour of ``PointToSegmentPen`` (6b4e2e7). +- [varLib.interpolatable] Check for wrong contour starting point (#2571). +- [cffLib] Remove leftover ``GlobalState`` class and fix calls to ``TopDictIndex()`` + (#2569, #2570). +- [instancer] Clear ``AxisValueArray`` if it is empty after instantiating (#2563). + +4.31.2 (released 2022-03-22) +---------------------------- + +- [varLib] fix instantiation of GPOS SinglePos values (#2555). + +4.31.1 (released 2022-03-18) +---------------------------- + +- [subset] fix subsetting OT-SVG when glyph id attribute is on the root ```` + element (#2553). + +4.31.0 (released 2022-03-18) +---------------------------- + +- [ttCollection] Fixed 'ResourceWarning: unclosed file' warning (#2549). +- [varLib.merger] Handle merging SinglePos with valueformat=0 (#2550). +- [ttFont] Update glyf's glyphOrder when calling TTFont.setGlyphOrder() (#2544). +- [ttFont] Added ``ensureDecompiled`` method to load all tables irrespective + of the ``lazy`` attribute (#2551). +- [otBase] Added ``iterSubTable`` method to iterate over BaseTable's children of + type BaseTable; useful for traversing a tree of otTables (#2551). + +4.30.0 (released 2022-03-10) +---------------------------- + +- [varLib] Added debug logger showing the glyph name for which ``gvar`` is built (#2542). +- [varLib.errors] Fixed undefined names in ``FoundANone`` and ``UnsupportedFormat`` + exceptions (ac4d5611). +- [otlLib.builder] Added ``windowsNames`` and ``macNames`` (bool) parameters to the + ``buildStatTabe`` function, so that one can select whether to only add one or both + of the two sets (#2528). +- [t1Lib] Added the ability to recreate PostScript stream (#2504). +- [name] Added ``getFirstDebugName``, ``getBest{Family,SubFamily,Full}Name`` methods (#2526). + +4.29.1 (released 2022-02-01) +---------------------------- + +- [colorLib] Fixed rounding issue with radial gradient's start/end circles inside + one another (#2521). +- [freetypePen] Handle rotate/skew transform when auto-computing width/height of the + buffer; raise PenError wen missing moveTo (#2517) + +4.29.0 (released 2022-01-24) +---------------------------- + +- [ufoLib] Fixed illegal characters and expanded reserved filenames (#2506). +- [COLRv1] Don't emit useless PaintColrLayers of lenght=1 in LayerListBuilder (#2513). +- [ttx] Removed legacy ``waitForKeyPress`` method on Windows (#2509). +- [pens] Added FreeTypePen that uses ``freetype-py`` and the pen protocol for + rasterizating outline paths (#2494). +- [unicodedata] Updated the script direction list to Unicode 14.0 (#2484). + Bumped unicodedata2 dependency to 14.0 (#2499). +- [psLib] Fixed type of ``fontName`` in ``suckfont`` (#2496). + +4.28.5 (released 2021-12-19) +---------------------------- + +- [svgPathPen] Continuation of #2471: make sure all occurrences of ``str()`` are now + replaced with user-defined ``ntos`` callable. +- [merge] Refactored code into submodules, plus several bugfixes and improvements: + fixed duplicate-glyph-resolution GSUB-lookup generation code; use tolerance in glyph + comparison for empty glyph's width; ignore space of default ignorable glyphs; + downgrade duplicates-resolution missing-GSUB from assert to warn; added --drop-tables + option (#2473, #2475, #2476). + +4.28.4 (released 2021-12-15) +---------------------------- + +- [merge] Merge GDEF marksets in Lookups properly (#2474). +- [feaLib] Have ``fontTools feaLib`` script exit with error code when build fails (#2459) +- [svgPathPen] Added ``ntos`` option to customize number formatting (e.g. rounding) (#2471). +- [subset] Speed up subsetting of large CFF fonts (#2467). +- [otTables] Speculatively promote lookups to extension to speed up compilation. If the + offset to lookup N is too big to fit in a ushort, the offset to lookup N+1 is going to + be too big as well, so we promote to extension all lookups from lookup N onwards (#2465). + +4.28.3 (released 2021-12-03) +---------------------------- + +- [subset] Fixed bug while subsetting ``COLR`` table, whereby incomplete layer records + pointing to missing glyphs were being retained leading to ``struct.error`` upon + compiling. Make it so that ``glyf`` glyph closure, which follows the ``COLR`` glyph + closure, does not influence the ``COLR`` table subsetting (#2461, #2462). +- [docs] Fully document the ``cmap`` and ``glyf`` tables (#2454, #2457). +- [colorLib.unbuilder] Fixed CLI by deleting no longer existing parameter (180bb1867). + +4.28.2 (released 2021-11-22) +---------------------------- + +- [otlLib] Remove duplicates when building coverage (#2433). +- [docs] Add interrogate configuration (#2443). +- [docs] Remove comment about missing “start” optional argument to ``calcChecksum`` (#2448). +- [cu2qu/cli] Adapt to the latest ufoLib2. +- [subset] Support subsetting SVG table and remove it from the list of drop by default tables (#534). +- [subset] add ``--pretty-svg`` option to pretty print SVG table contents (#2452). +- [merge] Support merging ``CFF`` tables (CID-keyed ``CFF`` is still not supported) (#2447). +- [merge] Support ``--output-file`` (#2447). +- [docs] Split table docs into individual pages (#2444). +- [feaLib] Forbid empty classes (#2446). +- [docs] Improve documentation for ``fontTools.ttLib.ttFont`` (#2442). + +4.28.1 (released 2021-11-08) +---------------------------- + +- [subset] Fixed AttributeError while traversing a color glyph's Paint graph when there is no + LayerList, which is optional (#2441). + +4.28.0 (released 2021-11-05) +---------------------------- + +- Dropped support for EOL Python 3.6, require Python 3.7 (#2417). +- [ufoLib/glifLib] Make filename-clash checks faster by using a set instead of a list (#2422). +- [subset] Don't crash if optional ClipList and LayerList are ``None`` (empty) (#2424, 2439). +- [OT-SVG] Removed support for old deprecated version 1 and embedded color palettes, + which were never officially part of the OpenType SVG spec. Upon compile, reuse offsets + to SVG documents that are identical (#2430). +- [feaLib] Added support for Variable Feature File syntax. This is experimental and subject + to change until it is finalized in the Adobe FEA spec (#2432). +- [unicodedata] Update Scripts/ScriptExtensions/Blocks to UnicodeData 14.0 (#2437). + +4.27.1 (released 2021-09-23) +---------------------------- + +- [otlLib] Fixed error when chained contextual lookup builder overflows (#2404, #2411). +- [bezierTools] Fixed two floating-point bugs: one when computing `t` for a point + lying on an almost horizontal/vertical line; another when computing the intersection + point between a curve and a line (#2413). + +4.27.0 (released 2021-09-14) +---------------------------- + +- [ttLib/otTables] Cleaned up virtual GID handling: allow virtual GIDs in ``Coverage`` + and ``ClassDef`` readers; removed unused ``allowVID`` argument from ``TTFont`` + constructor, and ``requireReal`` argument in ``TTFont.getGlyphID`` method. + Make ``TTFont.setGlyphOrder`` clear reverse glyphOrder map, and assume ``glyphOrder`` + internal attribute is never modified outside setGlyphOrder; added ``TTFont.getGlyphNameMany`` + and ``getGlyphIDMany`` (#1536, #1654, #2334, #2398). +- [py23] Dropped internal use of ``fontTools.py23`` module to fix deprecation warnings + in client code that imports from fontTools (#2234, #2399, #2400). +- [subset] Fix subsetting COLRv1 clip boxes when font is loaded lazily (#2408). + +4.26.2 (released 2021-08-09) +---------------------------- + +- [otTables] Added missing ``CompositeMode.PLUS`` operator (#2390). + +4.26.1 (released 2021-08-03) +---------------------------- + +- [transform] Added ``transformVector`` and ``transformVectors`` methods to the + ``Transform`` class. Similar to ``transformPoint`` but ignore the translation + part (#2386). + +4.26.0 (released 2021-08-03) +---------------------------- + +- [xmlWriter] Default to ``"\n"`` for ``newlinestr`` instead of platform-specific + ``os.linesep`` (#2384). +- [otData] Define COLRv1 ClipList and ClipBox (#2379). +- [removeOverlaps/instancer] Added --ignore-overlap-errors option to work around + Skia PathOps.Simplify bug (#2382, #2363, google/fonts#3365). +- NOTE: This will be the last version to support Python 3.6. FontTools will require + Python 3.7 or above from the next release (#2350) + +4.25.2 (released 2021-07-26) +---------------------------- + +- [COLRv1] Various changes to sync with the latest CORLv1 draft spec. In particular: + define COLR.VarIndexMap, remove/inline ColorIndex struct, add VarIndexBase to ``PaintVar*`` tables (#2372); + add reduced-precicion specialized transform Paints; + define Angle as fraction of half circle encoded as F2Dot14; + use FWORD (int16) for all Paint center coordinates; + change PaintTransform to have an offset to Affine2x3; +- [ttLib] when importing XML, only set sfntVersion if the font has no reader and is empty (#2376) + +4.25.1 (released 2021-07-16) +---------------------------- + +- [ttGlyphPen] Fixed bug in ``TTGlyphPointPen``, whereby open contours (i.e. starting + with segmentType "move") would throw ``NotImplementedError``. They are now treated + as if they are closed, like with the ``TTGlyphPen`` (#2364, #2366). + +4.25.0 (released 2021-07-05) +---------------------------- + +- [tfmLib] Added new library for parsing TeX Font Metric (TFM) files (#2354). +- [TupleVariation] Make shared tuples order deterministic on python < 3.7 where + Counter (subclass of dict) doesn't remember insertion order (#2351, #2353). +- [otData] Renamed COLRv1 structs to remove 'v1' suffix and match the updated draft + spec: 'LayerV1List' -> 'LayerList', 'BaseGlyphV1List' -> 'BaseGlyphList', + 'BaseGlyphV1Record' -> 'BaseGlyphPaintRecord' (#2346). + Added 8 new ``PaintScale*`` tables: with/without centers, uniform vs non-uniform. + Added ``*AroundCenter`` variants to ``PaintRotate`` and ``PaintSkew``: the default + versions no longer have centerX/Y, but default to origin. + ``PaintRotate``, ``PaintSkew`` and ``PaintComposite`` formats were re-numbered. + NOTE: these are breaking changes; clients using the experimental COLRv1 API will + have to be updated (#2348). +- [pointPens] Allow ``GuessSmoothPointPen`` to accept a tolerance. Fixed call to + ``math.atan2`` with x/y parameters inverted. Sync the code with fontPens (#2344). +- [post] Fixed parsing ``post`` table format 2.0 when it contains extra garbage + at the end of the stringData array (#2314). +- [subset] drop empty features unless 'size' with FeatureParams table (#2324). +- [otlLib] Added ``otlLib.optimize`` module; added GPOS compaction algorithm. + The compaction can be run on existing fonts with ``fonttools otlLib.optimize`` + or using the snippet ``compact_gpos.py``. There's experimental support for + compacting fonts at compilation time using an environment variable, but that + might be removed later (#2326). + +4.24.4 (released 2021-05-25) +---------------------------- + +- [subset/instancer] Fixed ``AttributeError`` when instantiating a VF that + contains GPOS ValueRecords with ``Device`` tables but without the respective + non-Device values (e.g. ``XAdvDevice`` without ``XAdvance``). When not + explicitly set, the latter are assumed to be 0 (#2323). + +4.24.3 (released 2021-05-20) +---------------------------- + +- [otTables] Fixed ``AttributeError`` in methods that split LigatureSubst, + MultipleSubst and AlternateSubst subtables when an offset overflow occurs. + The ``Format`` attribute was removed in v4.22.0 (#2319). + +4.24.2 (released 2021-05-20) +---------------------------- + +- [ttGlyphPen] Fixed typing annotation of TTGlyphPen glyphSet parameter (#2315). +- Fixed two instances of DeprecationWarning: invalid escape sequence (#2311). + +4.24.1 (released 2021-05-20) +---------------------------- + +- [subset] Fixed AttributeError when SinglePos subtable has None Value (ValueFormat 0) + (#2312, #2313). + +4.24.0 (released 2021-05-17) +---------------------------- + +- [pens] Add ``ttGlyphPen.TTGlyphPointPen`` similar to ``TTGlyphPen`` (#2205). + +4.23.1 (released 2021-05-14) +---------------------------- + +- [subset] Fix ``KeyError`` after subsetting ``COLR`` table that initially contains + both v0 and v1 color glyphs when the subset only requested v1 glyphs; we were + not pruning the v0 portion of the table (#2308). +- [colorLib] Set ``LayerV1List`` attribute to ``None`` when empty, it's optional + in CORLv1 (#2308). + +4.23.0 (released 2021-05-13) +---------------------------- + +- [designspaceLib] Allow to use ``\\UNC`` absolute paths on Windows (#2299, #2306). +- [varLib.merger] Fixed bug where ``VarLibMergeError`` was raised with incorrect + parameters (#2300). +- [feaLib] Allow substituting a glyph class with ``NULL`` to delete multiple glyphs + (#2303). +- [glyf] Fixed ``NameError`` exception in ``getPhantomPoints`` (#2295, #2305). +- [removeOverlaps] Retry pathops.simplify after rounding path coordinates to integers + if it fails the first time using floats, to work around a rare and hard to debug + Skia bug (#2288). +- [varLib] Added support for building, reading, writing and optimizing 32-bit + ``ItemVariationStore`` as used in COLRv1 table (#2285). +- [otBase/otConverters] Add array readers/writers for int types (#2285). +- [feaLib] Allow more than one lookahead glyph/class in contextual positioning with + "value at end" (#2293, #2294). +- [COLRv1] Default varIdx should be 0xFFFFFFFF (#2297, #2298). +- [pens] Make RecordingPointPen actually pass on identifiers; replace asserts with + explicit ``PenError`` exception (#2284). +- [mutator] Round lsb for CF2 fonts as well (#2286). + +4.22.1 (released 2021-04-26) +---------------------------- + +- [feaLib] Skip references to named lookups if the lookup block definition + is empty, similarly to makeotf. This also fixes an ``AttributeError`` while + generating ``aalt`` feature (#2276, #2277). +- [subset] Fixed bug with ``--no-hinting`` implementation for Device tables (#2272, + #2275). The previous code was alwyas dropping Device tables if no-hinting was + requested, but some Device tables (DeltaFormat=0x8000) are also used to encode + variation indices and need to be retained. +- [otBase] Fixed bug in getting the ValueRecordSize when decompiling ``MVAR`` + table with ``lazy=True`` (#2273, #2274). +- [varLib/glyf/gvar] Optimized and simplified ``GlyphCoordinates`` and + ``TupleVariation`` classes, use ``bytearray`` where possible, refactored + phantom-points calculations. We measured about 30% speedup in total time + of loading master ttfs, building gvar, and saving (#2261, #2266). +- [subset] Fixed ``AssertionError`` while pruning unused CPAL palettes when + ``0xFFFF`` is present (#2257, #2259). + +4.22.0 (released 2021-04-01) +---------------------------- + +- [ttLib] Remove .Format from Coverage, ClassDef, SingleSubst, LigatureSubst, + AlternateSubst, MultipleSubst (#2238). + ATTENTION: This will change your TTX dumps! +- [misc.arrayTools] move Vector to its own submodule, and rewrite as a tuple + subclass (#2201). +- [docs] Added a terminology section for varLib (#2209). +- [varLib] Move rounding to VariationModel, to avoid error accumulation from + multiple deltas (#2214) +- [varLib] Explain merge errors in more human-friendly terms (#2223, #2226) +- [otlLib] Correct some documentation (#2225) +- [varLib/otlLib] Allow merging into VariationFont without first saving GPOS + PairPos2 (#2229) +- [subset] Improve PairPosFormat2 subsetting (#2221) +- [ttLib] TTFont.save: create file on disk as late as possible (#2253) +- [cffLib] Add missing CFF2 dict operators LanguageGroup and ExpansionFactor + (#2249) + ATTENTION: This will change your TTX dumps! + +4.21.1 (released 2021-02-26) +---------------------------- + +- [pens] Reverted breaking change that turned ``AbstractPen`` and ``AbstractPointPen`` + into abstract base classes (#2164, #2198). + +4.21.0 (released 2021-02-26) +---------------------------- + +- [feaLib] Indent anchor statements in ``asFea()`` to make them more legible and + diff-able (#2193). +- [pens] Turn ``AbstractPen`` and ``AbstractPointPen`` into abstract base classes + (#2164). +- [feaLib] Added support for parsing and building ``STAT`` table from AFDKO feature + files (#2039). +- [instancer] Added option to update name table of generated instance using ``STAT`` + table's axis values (#2189). +- [bezierTools] Added functions to compute bezier point-at-time, as well as line-line, + curve-line and curve-curve intersections (#2192). + +4.20.0 (released 2021-02-15) +---------------------------- + +- [COLRv1] Added ``unbuildColrV1`` to deconstruct COLRv1 otTables to raw json-able + data structure; it does the reverse of ``buildColrV1`` (#2171). +- [feaLib] Allow ``sub X by NULL`` sequence to delete a glyph (#2170). +- [arrayTools] Fixed ``Vector`` division (#2173). +- [COLRv1] Define new ``PaintSweepGradient`` (#2172). +- [otTables] Moved ``Paint.Format`` enum class outside of ``Paint`` class definition, + now named ``PaintFormat``. It was clashing with paint instance ``Format`` attribute + and thus was breaking lazy load of COLR table which relies on magic ``__getattr__`` + (#2175). +- [COLRv1] Replace hand-coded builder functions with otData-driven dynamic + implementation (#2181). +- [COLRv1] Define additional static (non-variable) Paint formats (#2181). +- [subset] Added support for subsetting COLR v1 and CPAL tables (#2174, #2177). +- [fontBuilder] Allow ``setupFvar`` to optionally take ``designspaceLib.AxisDescriptor`` + objects. Added new ``setupAvar`` method. Support localised names for axes and + named instances (#2185). + +4.19.1 (released 2021-01-28) +---------------------------- + +- [woff2] An initial off-curve point with an overlap flag now stays an off-curve + point after compression. + +4.19.0 (released 2021-01-25) +---------------------------- + +- [codecs] Handle ``errors`` parameter different from 'strict' for the custom + extended mac encodings (#2137, #2132). +- [featureVars] Raise better error message when a script is missing the required + default language system (#2154). +- [COLRv1] Avoid abrupt change caused by rounding ``PaintRadialGradient.c0`` when + the start circle almost touches the end circle's perimeter (#2148). +- [COLRv1] Support building unlimited lists of paints as 255-ary trees of + ``PaintColrLayers`` tables (#2153). +- [subset] Prune redundant format-12 cmap subtables when all non-BMP characters + are dropped (#2146). +- [basePen] Raise ``MissingComponentError`` instead of bare ``KeyError`` when a + referenced component is missing (#2145). + +4.18.2 (released 2020-12-16) +---------------------------- + +- [COLRv1] Implemented ``PaintTranslate`` paint format (#2129). +- [varLib.cff] Fixed unbound local variable error (#1787). +- [otlLib] Don't crash when creating OpenType class definitions if some glyphs + occur more than once (#2125). + +4.18.1 (released 2020-12-09) +---------------------------- + +- [colorLib] Speed optimization for ``LayerV1ListBuilder`` (#2119). +- [mutator] Fixed missing tab in ``interpolate_cff2_metrics`` (0957dc7a). + +4.18.0 (released 2020-12-04) +---------------------------- + +- [COLRv1] Update to latest draft: added ``PaintRotate`` and ``PaintSkew`` (#2118). +- [woff2] Support new ``brotlicffi`` bindings for PyPy (#2117). +- [glifLib] Added ``expectContentsFile`` parameter to ``GlyphSet``, for use when + reading existing UFOs, to comply with the specification stating that a + ``contents.plist`` file must exist in a glyph set (#2114). +- [subset] Allow ``LangSys`` tags in ``--layout-scripts`` option (#2112). For example: + ``--layout-scripts=arab.dflt,arab.URD,latn``; this will keep ``DefaultLangSys`` + and ``URD`` language for ``arab`` script, and all languages for ``latn`` script. +- [varLib.interpolatable] Allow UFOs to be checked; report open paths, non existant + glyphs; add a ``--json`` option to produce a machine-readable list of + incompatibilities +- [pens] Added ``QuartzPen`` to create ``CGPath`` from glyph outlines on macOS. + Requires pyobjc (#2107). +- [feaLib] You can export ``FONTTOOLS_LOOKUP_DEBUGGING=1`` to enable feature file + debugging info stored in ``Debg`` table (#2106). +- [otlLib] Build more efficient format 1 and format 2 contextual lookups whenever + possible (#2101). + +4.17.1 (released 2020-11-16) +---------------------------- + +- [colorLib] Fixed regression in 4.17.0 when building COLR v0 table; when color + layers are stored in UFO lib plist, we can't distinguish tuples from lists so + we need to accept either types (e5439eb9, googlefonts/ufo2ft/issues#426). + +4.17.0 (released 2020-11-12) +---------------------------- + +- [colorLib/otData] Updated to latest draft ``COLR`` v1 spec (#2092). +- [svgLib] Fixed parsing error when arc commands' boolean flags are not separated + by space or comma (#2094). +- [varLib] Interpret empty non-default glyphs as 'missing', if the default glyph is + not empty (#2082). +- [feaLib.builder] Only stash lookup location for ``Debg`` if ``Builder.buildLookups_`` + has cooperated (#2065, #2067). +- [varLib] Fixed bug in VarStore optimizer (#2073, #2083). +- [varLib] Add designspace lib key for custom feavar feature tag (#2080). +- Add HashPointPen adapted from psautohint. With this pen, a hash value of a glyph + can be computed, which can later be used to detect glyph changes (#2005). + +4.16.1 (released 2020-10-05) +---------------------------- + +- [varLib.instancer] Fixed ``TypeError`` exception when instantiating a VF with + a GSUB table 1.1 in which ``FeatureVariations`` attribute is present but set to + ``None`` -- indicating that optional ``FeatureVariations`` is missing (#2077). +- [glifLib] Make ``x`` and ``y`` attributes of the ``point`` element required + even when validation is turned off, and raise a meaningful ``GlifLibError`` + message when that happens (#2075). + +4.16.0 (released 2020-09-30) +---------------------------- + +- [removeOverlaps] Added new module and ``removeOverlaps`` function that merges + overlapping contours and components in TrueType glyphs. It requires the + `skia-pathops `__ module. + Note that removing overlaps invalidates the TrueType hinting (#2068). +- [varLib.instancer] Added ``--remove-overlaps`` command-line option. + The ``overlap`` option in ``instantiateVariableFont`` now takes an ``OverlapMode`` + enum: 0: KEEP_AND_DONT_SET_FLAGS, 1: KEEP_AND_SET_FLAGS (default), and 2: REMOVE. + The latter is equivalent to calling ``removeOverlaps`` on the generated static + instance. The option continues to accept ``bool`` value for backward compatibility. + + +4.15.0 (released 2020-09-21) +---------------------------- + +- [plistlib] Added typing annotations to plistlib module. Set up mypy static + typechecker to run automatically on CI (#2061). +- [ttLib] Implement private ``Debg`` table, a reverse-DNS namespaced JSON dict. +- [feaLib] Optionally add an entry into the ``Debg`` table with the original + lookup name (if any), feature name / script / language combination (if any), + and original source filename and line location. Annotate the ttx output for + a lookup with the information from the Debg table (#2052). +- [sfnt] Disabled checksum checking by default in ``SFNTReader`` (#2058). +- [Docs] Document ``mtiLib`` module (#2027). +- [varLib.interpolatable] Added checks for contour node count and operation type + of each node (#2054). +- [ttLib] Added API to register custom table packer/unpacker classes (#2055). + +4.14.0 (released 2020-08-19) +---------------------------- + +- [feaLib] Allow anonymous classes in LookupFlags definitions (#2037). +- [Docs] Better document DesignSpace rules processing order (#2041). +- [ttLib] Fixed 21-year old bug in ``maxp.maxComponentDepth`` calculation (#2044, + #2045). +- [varLib.models] Fixed misspelled argument name in CLI entry point (81d0042a). +- [subset] When subsetting GSUB v1.1, fixed TypeError by checking whether the + optional FeatureVariations table is present (e63ecc5b). +- [Snippets] Added snippet to show how to decompose glyphs in a TTF (#2030). +- [otlLib] Generate GSUB type 5 and GPOS type 7 contextual lookups where appropriate + (#2016). + +4.13.0 (released 2020-07-10) +---------------------------- + +- [feaLib/otlLib] Moved lookup subtable builders from feaLib to otlLib; refactored + some common code (#2004, #2007). +- [docs] Document otlLib module (#2009). +- [glifLib] Fixed bug with some UFO .glif filenames clashing on case-insensitive + filesystems (#2001, #2002). +- [colorLib] Updated COLRv1 implementation following changes in the draft spec: + (#2008, googlefonts/colr-gradients-spec#24). + +4.12.1 (released 2020-06-16) +---------------------------- + +- [_n_a_m_e] Fixed error in ``addMultilingualName`` with one-character names. + Only attempt to recovered malformed UTF-16 data from a ``bytes`` string, + not from unicode ``str`` (#1997, #1998). + +4.12.0 (released 2020-06-09) +---------------------------- + +- [otlLib/varLib] Ensure that the ``AxisNameID`` in the ``STAT`` and ``fvar`` + tables is grater than 255 as per OpenType spec (#1985, #1986). +- [docs] Document more modules in ``fontTools.misc`` package: ``filenames``, + ``fixedTools``, ``intTools``, ``loggingTools``, ``macCreatorType``, ``macRes``, + ``plistlib`` (#1981). +- [OS/2] Don't calculate whole sets of unicode codepoints, use faster and more memory + efficient ranges and bisect lookups (#1984). +- [voltLib] Support writing back abstract syntax tree as VOLT data (#1983). +- [voltLib] Accept DO_NOT_TOUCH_CMAP keyword (#1987). +- [subset/merge] Fixed a namespace clash involving a private helper class (#1955). + +4.11.0 (released 2020-05-28) +---------------------------- + +- [feaLib] Introduced ``includeDir`` parameter on Parser and IncludingLexer to + explicitly specify the directory to search when ``include()`` statements are + encountered (#1973). +- [ufoLib] Silently delete duplicate glyphs within the same kerning group when reading + groups (#1970). +- [ttLib] Set version of COLR table when decompiling COLRv1 (commit 9d8a7e2). + +4.10.2 (released 2020-05-20) +---------------------------- + +- [sfnt] Fixed ``NameError: SimpleNamespace`` while reading TTC header. The regression + was introduced with 4.10.1 after removing ``py23`` star import. + +4.10.1 (released 2020-05-19) +---------------------------- + +- [sfnt] Make ``SFNTReader`` pickleable even when TTFont is loaded with lazy=True + option and thus keeps a reference to an external file (#1962, #1967). +- [feaLib.ast] Restore backward compatibility (broken in 4.10 with #1905) for + ``ChainContextPosStatement`` and ``ChainContextSubstStatement`` classes. + Make them accept either list of lookups or list of lists of lookups (#1961). +- [docs] Document some modules in ``fontTools.misc`` package: ``arrayTools``, + ``bezierTools`` ``cliTools`` and ``eexec`` (#1956). +- [ttLib._n_a_m_e] Fixed ``findMultilingualName()`` when name record's ``string`` is + encoded as bytes sequence (#1963). + +4.10.0 (released 2020-05-15) +---------------------------- + +- [varLib] Allow feature variations to be active across the entire space (#1957). +- [ufoLib] Added support for ``formatVersionMinor`` in UFO's ``fontinfo.plist`` and for + ``formatMinor`` attribute in GLIF file as discussed in unified-font-object/ufo-spec#78. + No changes in reading or writing UFOs until an upcoming (non-0) minor update of the + UFO specification is published (#1786). +- [merge] Fixed merging fonts with different versions of ``OS/2`` table (#1865, #1952). +- [subset] Fixed ``AttributeError`` while subsetting ``ContextSubst`` and ``ContextPos`` + Format 3 subtable (#1879, #1944). +- [ttLib.table._m_e_t_a] if data happens to be ascii, emit comment in TTX (#1938). +- [feaLib] Support multiple lookups per glyph position (#1905). +- [psCharStrings] Use inheritance to avoid repeated code in initializer (#1932). +- [Doc] Improved documentation for the following modules: ``afmLib`` (#1933), ``agl`` + (#1934), ``cffLib`` (#1935), ``cu2qu`` (#1937), ``encodings`` (#1940), ``feaLib`` + (#1941), ``merge`` (#1949). +- [Doc] Split off developer-centric info to new page, making front page of docs more + user-focused. List all utilities and sub-modules with brief descriptions. + Make README more concise and focused (#1914). +- [otlLib] Add function to build STAT table from high-level description (#1926). +- [ttLib._n_a_m_e] Add ``findMultilingualName()`` method (#1921). +- [unicodedata] Update ``RTL_SCRIPTS`` for Unicode 13.0 (#1925). +- [gvar] Sort ``gvar`` XML output by glyph name, not glyph order (#1907, #1908). +- [Doc] Added help options to ``fonttools`` command line tool (#1913, #1920). + Ensure all fonttools CLI tools have help documentation (#1948). +- [ufoLib] Only write fontinfo.plist when there actually is content (#1911). + +4.9.0 (released 2020-04-29) +--------------------------- + +- [subset] Fixed subsetting of FeatureVariations table. The subsetter no longer drops + FeatureVariationRecords that have empty substitutions as that will keep the search + going and thus change the logic. It will only drop empty records that occur at the + end of the FeatureVariationRecords array (#1881). +- [subset] Remove FeatureVariations table and downgrade GSUB/GPOS to version 0x10000 + when FeatureVariations contain no FeatureVariationRecords after subsetting (#1903). +- [agl] Add support for legacy Adobe Glyph List of glyph names in ``fontTools.agl`` + (#1895). +- [feaLib] Ignore superfluous script statements (#1883). +- [feaLib] Hide traceback by default on ``fonttools feaLib`` command line. + Use ``--traceback`` option to show (#1898). +- [feaLib] Check lookup index in chaining sub/pos lookups and print better error + message (#1896, #1897). +- [feaLib] Fix building chained alt substitutions (#1902). +- [Doc] Included all fontTools modules in the sphinx-generated documentation, and + published it to ReadTheDocs for continuous documentation of the fontTools project + (#1333). Check it out at https://fonttools.readthedocs.io/. Thanks to Chris Simpkins! +- [transform] The ``Transform`` class is now subclass of ``typing.NamedTuple``. No + change in functionality (#1904). + + +4.8.1 (released 2020-04-17) +--------------------------- + +- [feaLib] Fixed ``AttributeError: 'NoneType' has no attribute 'getAlternateGlyphs'`` + when ``aalt`` feature references a chain contextual substitution lookup + (googlefonts/fontmake#648, #1878). + +4.8.0 (released 2020-04-16) +--------------------------- + +- [feaLib] If Parser is initialized without a ``glyphNames`` parameter, it cannot + distinguish between a glyph name containing an hyphen, or a range of glyph names; + instead of raising an error, it now interprets them as literal glyph names, while + also outputting a logging warning to alert user about the ambiguity (#1768, #1870). +- [feaLib] When serializing AST to string, emit spaces around hyphens that denote + ranges. Also, fixed an issue with CID ranges when round-tripping AST->string->AST + (#1872). +- [Snippets/otf2ttf] In otf2ttf.py script update LSB in hmtx to match xMin (#1873). +- [colorLib] Added experimental support for building ``COLR`` v1 tables as per + the `colr-gradients-spec `__ + draft proposal. **NOTE**: both the API and the XML dump of ``COLR`` v1 are + susceptible to change while the proposal is being discussed and formalized (#1822). + +4.7.0 (released 2020-04-03) +--------------------------- + +- [cu2qu] Added ``fontTools.cu2qu`` package, imported from the original + `cu2qu `__ project. The ``cu2qu.pens`` module + was moved to ``fontTools.pens.cu2quPen``. The optional cu2qu extension module + can be compiled by installing `Cython `__ before installing + fonttools from source (i.e. git repo or sdist tarball). The wheel package that + is published on PyPI (i.e. the one ``pip`` downloads, unless ``--no-binary`` + option is used), will continue to be pure-Python for now (#1868). + +4.6.0 (released 2020-03-24) +--------------------------- + +- [varLib] Added support for building variable ``BASE`` table version 1.1 (#1858). +- [CPAL] Added ``fromRGBA`` method to ``Color`` class (#1861). + + +4.5.0 (released 2020-03-20) +--------------------------- + +- [designspaceLib] Added ``add{Axis,Source,Instance,Rule}Descriptor`` methods to + ``DesignSpaceDocument`` class, to initialize new descriptor objects using keyword + arguments, and at the same time append them to the current document (#1860). +- [unicodedata] Update to Unicode 13.0 (#1859). + +4.4.3 (released 2020-03-13) +--------------------------- + +- [varLib] Always build ``gvar`` table for TrueType-flavored Variable Fonts, + even if it contains no variation data. The table is required according to + the OpenType spec (#1855, #1857). + +4.4.2 (released 2020-03-12) +--------------------------- + +- [ttx] Annotate ``LookupFlag`` in XML dump with comment explaining what bits + are set and what they mean (#1850). +- [feaLib] Added more descriptive message to ``IncludedFeaNotFound`` error (#1842). + +4.4.1 (released 2020-02-26) +--------------------------- + +- [woff2] Skip normalizing ``glyf`` and ``loca`` tables if these are missing from + a font (e.g. in NotoColorEmoji using ``CBDT/CBLC`` tables). +- [timeTools] Use non-localized date parsing in ``timestampFromString``, to fix + error when non-English ``LC_TIME`` locale is set (#1838, #1839). +- [fontBuilder] Make sure the CFF table generated by fontBuilder can be used by varLib + without having to compile and decompile the table first. This was breaking in + converting the CFF table to CFF2 due to some unset attributes (#1836). + +4.4.0 (released 2020-02-18) +--------------------------- + +- [colorLib] Added ``fontTools.colorLib.builder`` module, initially with ``buildCOLR`` + and ``buildCPAL`` public functions. More color font formats will follow (#1827). +- [fontBuilder] Added ``setupCOLR`` and ``setupCPAL`` methods (#1826). +- [ttGlyphPen] Quantize ``GlyphComponent.transform`` floats to ``F2Dot14`` to fix + round-trip issue when computing bounding boxes of transformed components (#1830). +- [glyf] If a component uses reference points (``firstPt`` and ``secondPt``) for + alignment (instead of X and Y offsets), compute the effective translation offset + *after* having applied any transform (#1831). +- [glyf] When all glyphs have zero contours, compile ``glyf`` table data as a single + null byte in order to pass validation by OTS and Windows (#1829). +- [feaLib] Parsing feature code now ensures that referenced glyph names are part of + the known glyph set, unless a glyph set was not provided. +- [varLib] When filling in the default axis value for a missing location of a source or + instance, correctly map the value forward. +- [varLib] The avar table can now contain mapping output values that are greater than + OR EQUAL to the preceeding value, as the avar specification allows this. +- [varLib] The errors of the module are now ordered hierarchically below VarLibError. + See #1821. + +4.3.0 (released 2020-02-03) +--------------------------- + +- [EBLC/CBLC] Fixed incorrect padding length calculation for Format 3 IndexSubTable + (#1817, #1818). +- [varLib] Fixed error when merging OTL tables and TTFonts were loaded as ``lazy=True`` + (#1808, #1809). +- [varLib] Allow to use master fonts containing ``CFF2`` table when building VF (#1816). +- [ttLib] Make ``recalcBBoxes`` option work also with ``CFF2`` table (#1816). +- [feaLib] Don't reset ``lookupflag`` in lookups defined inside feature blocks. + They will now inherit the current ``lookupflag`` of the feature. This is what + Adobe ``makeotf`` also does in this case (#1815). +- [feaLib] Fixed bug with mixed single/multiple substitutions. If a single substitution + involved a glyph class, we were incorrectly using only the first glyph in the class + (#1814). + +4.2.5 (released 2020-01-29) +--------------------------- + +- [feaLib] Do not fail on duplicate multiple substitutions, only warn (#1811). +- [subset] Optimize SinglePos subtables to Format 1 if all ValueRecords are the same + (#1802). + +4.2.4 (released 2020-01-09) +--------------------------- + +- [unicodedata] Update RTL_SCRIPTS for Unicode 11 and 12. + +4.2.3 (released 2020-01-07) +--------------------------- + +- [otTables] Fixed bug when splitting `MarkBasePos` subtables as offsets overflow. + The mark class values in the split subtable were not being updated, leading to + invalid mark-base attachments (#1797, googlefonts/noto-source#145). +- [feaLib] Only log a warning instead of error when features contain duplicate + substitutions (#1767). +- [glifLib] Strip XML comments when parsing with lxml (#1784, #1785). + +4.2.2 (released 2019-12-12) +--------------------------- + +- [subset] Fixed issue with subsetting FeatureVariations table when the index + of features changes as features get dropped. The feature index need to be + remapped to point to index of the remaining features (#1777, #1782). +- [fontBuilder] Added `addFeatureVariations` method to `FontBuilder` class. This + is a shorthand for calling `featureVars.addFeatureVariations` on the builder's + TTFont object (#1781). +- [glyf] Fixed the flags bug in glyph.drawPoints() like we did for glyph.draw() + (#1771, #1774). + +4.2.1 (released 2019-12-06) +--------------------------- + +- [glyf] Use the ``flagOnCurve`` bit mask in ``glyph.draw()``, so that we ignore + the ``overlap`` flag that may be set when instantiating variable fonts (#1771). + +4.2.0 (released 2019-11-28) +--------------------------- + +- [pens] Added the following pens: + + * ``roundingPen.RoundingPen``: filter pen that rounds coordinates and components' + offsets to integer; + * ``roundingPen.RoundingPointPen``: like the above, but using PointPen protocol. + * ``filterPen.FilterPointPen``: base class for filter point pens; + * ``transformPen.TransformPointPen``: filter point pen to apply affine transform; + * ``recordingPen.RecordingPointPen``: records and replays point-pen commands. + +- [ttGlyphPen] Always round float coordinates and component offsets to integers + (#1763). +- [ufoLib] When converting kerning groups from UFO2 to UFO3, avoid confusing + groups with the same name as one of the glyphs (#1761, #1762, + unified-font-object/ufo-spec#98). + +4.1.0 (released 2019-11-18) +--------------------------- + +- [instancer] Implemented restricting axis ranges (level 3 partial instancing). + You can now pass ``{axis_tag: (min, max)}`` tuples as input to the + ``instantiateVariableFont`` function. Note that changing the default axis + position is not supported yet. The command-line script also accepts axis ranges + in the form of colon-separated float values, e.g. ``wght=400:700`` (#1753, #1537). +- [instancer] Never drop STAT ``DesignAxis`` records, but only prune out-of-range + ``AxisValue`` records. +- [otBase/otTables] Enforce that VarStore.RegionAxisCount == fvar.axisCount, even + when regions list is empty to appease OTS < v8.0 (#1752). +- [designspaceLib] Defined new ``processing`` attribute for ```` element, + with values "first" or "last", plus other editorial changes to DesignSpace + specification. Bumped format version to 4.1 (#1750). +- [varLib] Improved error message when masters' glyph orders do not match (#1758, + #1759). +- [featureVars] Allow to specify custom feature tag in ``addFeatureVariations``; + allow said feature to already exist, in which case we append new lookup indices + to existing features. Implemented ```` attribute ``processing`` according to + DesignSpace specification update in #1750. Depending on this flag, we generate + either an 'rvrn' (always processed first) or a 'rclt' feature (follows lookup order, + therefore last) (#1747, #1625, #1371). +- [ttCollection] Added support for context manager auto-closing via ``with`` statement + like with ``TTFont`` (#1751). +- [unicodedata] Require unicodedata2 >= 12.1.0. +- [py2.py3] Removed yet more PY2 vestiges (#1743). +- [_n_a_m_e] Fixed issue when comparing NameRecords with different string types (#1742). +- [fixedTools] Changed ``fixedToFloat`` to not do any rounding but simply return + ``value / (1 << precisionBits)``. Added ``floatToFixedToStr`` and + ``strToFixedToFloat`` functions to be used when loading from or dumping to XML. + Fixed values (e.g. fvar axes and instance coordinates, avar mappings, etc.) are + are now stored as un-rounded decimal floats upon decompiling (#1740, #737). +- [feaLib] Fixed handling of multiple ``LigatureCaret`` statements for the same glyph. + Only the first rule per glyph is used, additional ones are ignored (#1733). + +4.0.2 (released 2019-09-26) +--------------------------- + +- [voltLib] Added support for ``ALL`` and ``NONE`` in ``PROCESS_MARKS`` (#1732). +- [Silf] Fixed issue in ``Silf`` table compilation and decompilation regarding str vs + bytes in python3 (#1728). +- [merge] Handle duplicate glyph names better: instead of appending font index to + all glyph names, use similar code like we use in ``post`` and ``CFF`` tables (#1729). + +4.0.1 (released 2019-09-11) +--------------------------- + +- [otTables] Support fixing offset overflows in ``MultipleSubst`` lookup subtables + (#1706). +- [subset] Prune empty strikes in ``EBDT`` and ``CBDT`` table data (#1698, #1633). +- [pens] Fixed issue in ``PointToSegmentPen`` when last point of closed contour has + same coordinates as the starting point and was incorrectly dropped (#1720). +- [Graphite] Fixed ``Sill`` table output to pass OTS (#1705). +- [name] Added ``removeNames`` method to ``table__n_a_m_e`` class (#1719). +- [ttLib] Added aliases for renamed entries ``ascender`` and ``descender`` in + ``hhea`` table (#1715). + +4.0.0 (released 2019-08-22) +--------------------------- + +- NOTE: The v4.x version series only supports Python 3.6 or greater. You can keep + using fonttools 3.x if you need support for Python 2. +- [py23] Removed all the python2-only code since it is no longer reachable, thus + unused; only the Python3 symbols were kept, but these are no-op. The module is now + DEPRECATED and will removed in the future. +- [ttLib] Fixed UnboundLocalError for empty loca/glyph tables (#1680). Also, allow + the glyf table to be incomplete when dumping to XML (#1681). +- [varLib.models] Fixed KeyError while sorting masters and there are no on-axis for + a given axis (38a8eb0e). +- [cffLib] Make sure glyph names are unique (#1699). +- [feaLib] Fix feature parser to correctly handle octal numbers (#1700). + +\... see `here `__ for earlier changes diff --git a/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/RECORD b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..0bd81c45b348fbe9de7b8143d34e6c13d682355b --- /dev/null +++ b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/RECORD @@ -0,0 +1,359 @@ +../../../bin/fonttools,sha256=YclgV5_yeKcq0XV1rg7WMxXIrmOVkuyoyyHG_pgG9JQ,332 +../../../bin/pyftmerge,sha256=-BKFZV24__DoJqG3AXoglGDnkvMiHUH-1kNwzxzNW5I,329 +../../../bin/pyftsubset,sha256=VCe32L8FYXBnMZfHVIvQTJH9LT1uRQJiGipepHHZZPk,330 +../../../bin/ttx,sha256=yh_7t1kDty6MJQGKS35RPVHwlcpJGJxaHGekFluWmeE,327 +../../../share/man/man1/ttx.1,sha256=cLbm_pOOj1C76T2QXvDxzwDj9gk-GTd5RztvTMsouFw,5377 +fontTools/__init__.py,sha256=w-yY9D2nPQeBbS3lLIyNrc-4KGqgArD4IhQXdd8qioQ,183 +fontTools/__main__.py,sha256=VjkGh1UD-i1zTDA1dXo1uecSs6PxHdGQ5vlCk_mCCYs,925 +fontTools/afmLib.py,sha256=1MagIItOzRV4vV5kKPxeDZbPJsfxLB3wdHLFkQvl0uk,13164 +fontTools/agl.py,sha256=05bm8Uq45uVWW8nPbP6xbNgmFyxQr8sWhYAiP0VSjnI,112975 +fontTools/annotations.py,sha256=BdIIriNYDzBfgniwWFg_u71qLZnV0sCcn4-VAkXkYNM,1225 +fontTools/cffLib/CFF2ToCFF.py,sha256=7h0fK53ji4JlA4mAxlVb54eQTM4N2e3LAd4wwW3TLyE,8201 +fontTools/cffLib/CFFToCFF2.py,sha256=Qnk7lYlsTRHnlZQ6NXNdr_f4MJwZQ21kcS08KFbsyY8,10119 +fontTools/cffLib/__init__.py,sha256=62vpcR7u8cE407kXduAwnFttHnsoCpDQ7IBK-qOYFQ8,107886 +fontTools/cffLib/specializer.py,sha256=vsOPkR_jHNe6tESQEjmm0i76y7sWI5MKo3bsTmI3sNM,32609 +fontTools/cffLib/transforms.py,sha256=SEIZc1XxWYiVXVBsoNm6LTvM9SUN7Z76QOaSAlR1ZCo,17455 +fontTools/cffLib/width.py,sha256=IqGL0CLyCZqi_hvsHySG08qpYxS3kaqW-tsAT-bjHV4,6074 +fontTools/colorLib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fontTools/colorLib/builder.py,sha256=kmO7OuudQQb3fEOS7aLzgTDVjqS9i2xIQmk9p1uBe8A,23008 +fontTools/colorLib/errors.py,sha256=CsaviiRxxrpgVX4blm7KCyK8553ljwL44xkJOeC5U7U,41 +fontTools/colorLib/geometry.py,sha256=3ScySrR2YDJa7d5K5_xM5Yt1-3NCV-ry8ikYA5VwVbI,5518 +fontTools/colorLib/table_builder.py,sha256=ZeltWY6n-YPiJv_hQ1iBXoEFAG70EKxZyScgsMKUFGU,7469 +fontTools/colorLib/unbuilder.py,sha256=iW-E5I39WsV82K3NgCO4Cjzwm1WqzGrtypHt8epwbHM,2142 +fontTools/config/__init__.py,sha256=JICOHIz06KuHCiBmrxj-ga19P6ZTuLXh0lHmPh-Ra1w,3154 +fontTools/cu2qu/__init__.py,sha256=Cuc7Uglb0nSgaraTxXY5J8bReznH5wApW0uakN7MycY,618 +fontTools/cu2qu/__main__.py,sha256=kTUI-jczsHeelULLlory74QEeFjZWp9zigCc7PrdVQY,92 +fontTools/cu2qu/benchmark.py,sha256=wasPJmf8q9k9UHjpHChC3WQAGbBAyHN9PvJzXvWC0Fw,1296 +fontTools/cu2qu/cli.py,sha256=MbAQnOpZwrUFe_tjAP3Tgf6uLdOgHlONUcPNeTXwH0Y,6076 +fontTools/cu2qu/cu2qu.c,sha256=Hacw9XncC5wYjSd-uT4MxoOQGDBQFnG8mLZrohGHtXQ,648759 +fontTools/cu2qu/cu2qu.cpython-311-x86_64-linux-gnu.so,sha256=KfjWBWgB189ICB-N49rcfA_iA3rR_B5j9D7g4FLtlME,1107736 +fontTools/cu2qu/cu2qu.py,sha256=6LTe1ZI-jxW8y79s_UFjbkeFoFleIekTLm2jAE-uqGQ,17986 +fontTools/cu2qu/errors.py,sha256=PyJNMy8lHDtKpfFkc0nkM8F4jNLZAC4lPQCN1Km4bpg,2441 +fontTools/cu2qu/ufo.py,sha256=g2NkcMqdS-t3tBtvcqtt4E02QVyQr3pTC1qXDRtI31U,12307 +fontTools/designspaceLib/__init__.py,sha256=whzi46QY0FK77k9iE646rGP4Ivlq9BEXKO0y7UU9Plo,129502 +fontTools/designspaceLib/__main__.py,sha256=xhtYXo1T1tsykhQDD0tcconSNYgWL5hoTBORpVDUYrc,103 +fontTools/designspaceLib/split.py,sha256=FB1NuvhUO453UXveQZi9oyrW_caoCPM3RADp1rYWkDs,19239 +fontTools/designspaceLib/statNames.py,sha256=gXGKWVr1ju2_oL-R_DkyoZ3GlI7mfLORovpk1Ebgmvc,9237 +fontTools/designspaceLib/types.py,sha256=ofK65qXNADqcpl7zI72Pa5s07-cm7G41iEmLVV44-Es,5320 +fontTools/encodings/MacRoman.py,sha256=4vEooUDm2gLCG8KIIDhRxm5-A64w7XrhP9cjDRr2Eo0,3576 +fontTools/encodings/StandardEncoding.py,sha256=Eo3AGE8FE_p-IVYYuV097KouSsF3UrXoRRN0XyvYbrs,3581 +fontTools/encodings/__init__.py,sha256=DJBWmoX_Haau7qlgmvWyfbhSzrX2qL636Rns7CG01pk,75 +fontTools/encodings/codecs.py,sha256=u50ruwz9fcRsrUrRGpR17Cr55Ovn1fvCHCKrElVumDE,4721 +fontTools/feaLib/__init__.py,sha256=jlIru2ghxvb1HhC5Je2BCXjFJmFQlYKpruorPoz3BvQ,213 +fontTools/feaLib/__main__.py,sha256=Df2PA6LXwna98lSXiL7R4as_ZEdWCIk3egSM5w7GpvM,2240 +fontTools/feaLib/ast.py,sha256=48Y6vpSD_wYfucWyh_bQtzf2AQFX-pOwBvsxdcpVDz0,74158 +fontTools/feaLib/builder.py,sha256=OI3slR3tTlH1LyWGHJMDw3jol5PzvRd8_T5j1603xZM,73968 +fontTools/feaLib/error.py,sha256=Bz_5tNcNVcY7_nrAmFlQNhQldtqZWd8WUGQ2E3PWhZo,648 +fontTools/feaLib/lexer.c,sha256=LWJfBpHqWqXW1IAGjiFT7pcXWmJWX55FjrQPvN3d9uo,747744 +fontTools/feaLib/lexer.cpython-311-x86_64-linux-gnu.so,sha256=SrwfAd4fYaSC52oLab-rPYSva9nucFX5oWdb1XhSuYs,1512184 +fontTools/feaLib/lexer.py,sha256=emyMPmRoqNZkzxnJyI6JRCCtXrbCOFofwa9O6ABGLiw,11121 +fontTools/feaLib/location.py,sha256=JXzHqGV56EHdcq823AwA5oaK05hf_1ySWpScbo3zGC0,234 +fontTools/feaLib/lookupDebugInfo.py,sha256=gVRr5-APWfT_a5-25hRuawSVX8fEvXVsOSLWkH91T2w,304 +fontTools/feaLib/parser.py,sha256=eM4ph-_6ab8EpAWqg9Ax7is5We-KwkzJYlui31mJSBc,99911 +fontTools/feaLib/variableScalar.py,sha256=f6sOg9cfFJRI3fw04uRohDeFux0xnZanaPT_lcxAVOw,4200 +fontTools/fontBuilder.py,sha256=d9bBaId9K6KPpR5nP0l5Ou_8Pp7hr4gRuLeQV0NmwUY,34280 +fontTools/help.py,sha256=bAjatvIhV7TJyXI7WhsxdYO4YVlhScZXu_kRtHANEPo,1125 +fontTools/merge/__init__.py,sha256=8i6ownyQTAOBKWnTEHvvCYFw64Mv7Z1HPBgJI-ZiuKo,8256 +fontTools/merge/__main__.py,sha256=hDx3gfbUBO83AJKumSEhiV-xqNTJNNgK2uFjazOGTmw,94 +fontTools/merge/base.py,sha256=l0G1Px98E9ZdVuFLMUBKWdtr7Jb8JX8vxcjeaDUUnzY,2389 +fontTools/merge/cmap.py,sha256=HpthxVH5lA7VegJ8yHoBjd9vrFBV7UB5OknKGYpxWY8,6728 +fontTools/merge/layout.py,sha256=fkMPGPLxEdxohS3scVM4W7LmNthSz-UPyocsffe2KqE,16075 +fontTools/merge/options.py,sha256=xko_1-WErcNQkirECzIOOYxSJR_bRtdQYQYOtmgccYI,2501 +fontTools/merge/tables.py,sha256=7SzXYL04awDEDhvU2-9T_8A2gAjvgGyYAHUICUJOpZg,10958 +fontTools/merge/unicode.py,sha256=kb1Jrfuoq1KUcVhhSKnflAED_wMZxXDjVwB-CI9k05Y,4273 +fontTools/merge/util.py,sha256=BH3bZWNFy-Tsj1cth7aSpGVJ18YXKXqDakPn6Wzku6U,3378 +fontTools/misc/__init__.py,sha256=DJBWmoX_Haau7qlgmvWyfbhSzrX2qL636Rns7CG01pk,75 +fontTools/misc/arrayTools.py,sha256=jZk__GE-K9VViZE_H-LPPj0smWbKng-yfPE8BfGp8HI,11483 +fontTools/misc/bezierTools.c,sha256=7ymo3Ld3xnDe1fJBXiqMVMFzoYRc5hY9Qxby9g3r5XI,1836341 +fontTools/misc/bezierTools.cpython-311-x86_64-linux-gnu.so,sha256=w7zS1dxOUuv_na-nWXV-PzynrdSmhsGnj9ouqIFuwpU,5095248 +fontTools/misc/bezierTools.py,sha256=2xvnsKboUZ-Ln1bbiJk2392foh3vRhulmpqaWl1a-lQ,45240 +fontTools/misc/classifyTools.py,sha256=zcg3EM4GOerBW9c063ljaLllgeeZ772EpFZjp9CdgLI,5613 +fontTools/misc/cliTools.py,sha256=qCznJMLCQu3ZHQD_4ctUnr3TkfAUdkGl-UuxZUrppy0,1862 +fontTools/misc/configTools.py,sha256=tDede_q8h81w6mDh132_F5WzfWkwmDGAF8nwj3dHVus,11229 +fontTools/misc/cython.py,sha256=eyLcL2Bw-SSToYro8f44dkkYRlQfiFbhcza0afS-qHE,682 +fontTools/misc/dictTools.py,sha256=VxjarsGJuk_wa3z29FSCtKZNCFfXtMBiNEu0RPAlpDk,2417 +fontTools/misc/eexec.py,sha256=GNn2OCRvO1HbbIeDPxk9i0glO7cux_AQaoVMXhBR8y8,3331 +fontTools/misc/encodingTools.py,sha256=hCv5PFfnXQJVCZA8Wyn1vr3vzLBbUuEPtGk5CzWM9RY,2073 +fontTools/misc/enumTools.py,sha256=YQZW-d2ES9KFFkAXOUMIBbRUk6v_3BT6Q7lXE1ufhxA,502 +fontTools/misc/etree.py,sha256=ZzJc6TvAS579deAgZLVDvTY_HeTm-ZsKJ5s3LYhZSSY,16304 +fontTools/misc/filenames.py,sha256=MMCO3xjk1pcDc-baobcKd8IdoFPt-bcGqu8t8HUGAkI,8223 +fontTools/misc/filesystem/__init__.py,sha256=iwoOj6DpXKk8q-NRRHqOfRxFF6lcXIhsIA46j-cZswU,2011 +fontTools/misc/filesystem/_base.py,sha256=p74O7xREadfPQgzPJ9mP3ehu0ZHDgsmXlpsL9CnTRso,4010 +fontTools/misc/filesystem/_copy.py,sha256=ifMSs-A_bz1Aa4tIQrlUd9HtdJQ5fp5M3B6mbYuDtXI,1361 +fontTools/misc/filesystem/_errors.py,sha256=-YziRB1BT1I80ypmufvCR-M_4XoerCHyBVqX-cRnIzU,641 +fontTools/misc/filesystem/_info.py,sha256=pbV7bDTJ5F8ms6alK34J0FZYWzmRO7FT0NM3yRA3czo,2013 +fontTools/misc/filesystem/_osfs.py,sha256=RkKCE2IxcRaj7gyqFW10LhEm_-VJYtXxsS5s0DCXihM,5737 +fontTools/misc/filesystem/_path.py,sha256=frP6ZLmMeP9E3NiwoCbbgBPWpQbLBRh7T-0vOE-EuPo,1745 +fontTools/misc/filesystem/_subfs.py,sha256=vRotQwyLVfINbR88xIBQUbq9j4Kmg1_mJvEhnpvK_t4,3028 +fontTools/misc/filesystem/_tempfs.py,sha256=9FUXdCBTwFtZMFx8ghYuZVYoQdDb0tDB-jXNu3D-Qy0,924 +fontTools/misc/filesystem/_tools.py,sha256=r75dpadp7C9EdQ6r7pJQKZlCZDUJzVq2ikb_LXN-wCI,972 +fontTools/misc/filesystem/_walk.py,sha256=KMQ-GavWYr4SsA5V8ohLPmz3boilvY2P0JKrLoxW6NU,1655 +fontTools/misc/filesystem/_zipfs.py,sha256=i3qolbkDRntB_oL3v79KuEgfVlVojecPBnBA0X04PWc,6301 +fontTools/misc/fixedTools.py,sha256=XycD5QpaejJE-GFaFMlLlku6RG0ipJowuamMXjFrMjQ,7668 +fontTools/misc/intTools.py,sha256=l6pjk4UYlXcyLtfC0DdOC5RL6UJ8ihRR0zRiYow5xA8,586 +fontTools/misc/iterTools.py,sha256=17H6LPZszp32bTKoNorp6uZF1PKj47BAbe5QG8irUjo,390 +fontTools/misc/lazyTools.py,sha256=BC6MmF-OzJ3GrBD8TYDZ-VCSN4UOx0pN0r3oF4GSoiw,1020 +fontTools/misc/loggingTools.py,sha256=NOYROsLK5TzONK5967OGdVonNyXC6kP_CmPr7M2PW_c,19933 +fontTools/misc/macCreatorType.py,sha256=Je9jtqUr7EPbpH3QxlVl3pizoQ-1AOPMBIctHIMTM3k,1593 +fontTools/misc/macRes.py,sha256=GT_pnfPw2NCvvOF86nHLAnOtZ6SMHqEuLntaplXzvHM,8579 +fontTools/misc/plistlib/__init__.py,sha256=1HfhHPt3As6u2eRSlFfl6XdnXv_ypQImeQdWIw6wK7Y,21113 +fontTools/misc/plistlib/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fontTools/misc/psCharStrings.py,sha256=Zn8mr7NRTfEJbnaXbOnSarrFBPcvPM0AupAxF2C80LY,43468 +fontTools/misc/psLib.py,sha256=ioIPm5x3MHkBXF2vzNkC4iVZYobrkWcyvFhmYsjOrPY,12099 +fontTools/misc/psOperators.py,sha256=9SLl5PPBulLo0Xxg_dqlJMitNIBdiGKdkXhOWsNSYZE,15700 +fontTools/misc/py23.py,sha256=aPVCEUz_deggwLBCeTSsccX6QgJavZqvdVtuhpzrPvA,2238 +fontTools/misc/roundTools.py,sha256=1RSXZ0gyi1qW42tz6WSBMJD1FlPdtgqKfWixVN9bd78,3173 +fontTools/misc/sstruct.py,sha256=vUODd2CKvHLtjr7yn1K94Hui_yxOPWKmlgAmBMm3KDQ,7009 +fontTools/misc/symfont.py,sha256=x5ZwqK9Ik9orG6qSftgVGygBFE1wTSngrMK2We1Z5AM,6977 +fontTools/misc/testTools.py,sha256=3vj_KllUQVEiVFbS0SzTmeuKv44-L-disI1dZ4XhOfw,7052 +fontTools/misc/textTools.py,sha256=wNjH5zl1v9qNfmTl4BL52IO2IG1H5xY3o_pslqPPRjc,3483 +fontTools/misc/timeTools.py,sha256=e9h5pgzL04tBDXmCv_8eRGB4boFV8GKXlS6dq3ggEpw,2234 +fontTools/misc/transform.py,sha256=OR8dPsAw87z77gkZQMq00iUkDWLIxYv-12XiKH1-erk,15798 +fontTools/misc/treeTools.py,sha256=tLWkwyDHeZUPVOGNnJeD4Pn7x2bQeZetwJKaEAW2J2M,1269 +fontTools/misc/vector.py,sha256=6lqZcDjAgHJFQgjzD-ULQ_PrigAMfeZKaBZmAfcC0ig,4062 +fontTools/misc/visitor.py,sha256=zwBAVfZ3MTsrbhNFj03pSSjNRyT6oGkare-kfWkN5ns,5754 +fontTools/misc/xmlReader.py,sha256=igut4_d13RT4WarliqVvuuPybO1uSXVeoBOeW4j0_e4,6580 +fontTools/misc/xmlWriter.py,sha256=EIdcS_0y9gvpc82QvZC9VcerAqEsP8eeC0gqMoPK4jc,7374 +fontTools/mtiLib/__init__.py,sha256=EzYwNaENLf906h1THBeq6nSRHUKpOAYxuzO9x9PHzh8,46602 +fontTools/mtiLib/__main__.py,sha256=gd8X89jnZOe-752k7uaR1lWoiju-2zIT5Yx35Kl0Xek,94 +fontTools/otlLib/__init__.py,sha256=D2leUW-3gsUTOFcJYGC18edBYjIJ804ut4qitJYWsaQ,45 +fontTools/otlLib/builder.py,sha256=DEhmbyVzsPFwCDrxgIjfOaZSIcKcVmCwrg9KMxkksEs,129088 +fontTools/otlLib/error.py,sha256=cthuhBuOwZYpkTLi5gFPupUxkXkCHe-L_YgkE7N1wCI,335 +fontTools/otlLib/maxContextCalc.py,sha256=3es4Kt84TaZ49sA2ev1zrlwPJikJCAECx5KavwhyB-I,3175 +fontTools/otlLib/optimize/__init__.py,sha256=UUQRpNkHU2RczCRt-Gz7sEiYE9AQq9BHLXZEOyvsnX4,1530 +fontTools/otlLib/optimize/__main__.py,sha256=BvP472kA9KxBb9RMyyehPNevAfpmgW9MfdazkUiAO3M,104 +fontTools/otlLib/optimize/gpos.py,sha256=htOgSP743DZDUKF3eWAeJ-kdqNYOnpGXdlV-rbEXJ1A,17668 +fontTools/pens/__init__.py,sha256=DJBWmoX_Haau7qlgmvWyfbhSzrX2qL636Rns7CG01pk,75 +fontTools/pens/areaPen.py,sha256=Y1WkmqzcC4z_bpGAR0IZUKrtHFtxKUQBmr5-64_zCOk,1472 +fontTools/pens/basePen.py,sha256=eIGSKrKm6w4LLHuG6XJoQZ3eObtoKV5P6aF4gT4sk7U,17073 +fontTools/pens/boundsPen.py,sha256=wE3owOQA8DfhH-zBGC3lJvnVwp-oyIt0KZrEqXbmS9I,3129 +fontTools/pens/cairoPen.py,sha256=wuuOJ1qQDSt_K3zscM2nukRyHZTZMwMzzCXCirfq_qQ,592 +fontTools/pens/cocoaPen.py,sha256=IJRQcAxRuVOTQ90bB_Bgjnmz7px_ST5uLF9CW-Y0KPY,612 +fontTools/pens/cu2quPen.py,sha256=gMUwFUsm_-WzBlDjTMQiNnEuI2heomGeOJBX81zYXPo,13007 +fontTools/pens/explicitClosingLinePen.py,sha256=kKKtdZiwaf8Cj4_ytrIDdGB2GMpPPDXm5Nwbw5WDgwU,3219 +fontTools/pens/filterPen.py,sha256=REgspXaaSvF3XUwqe40abe3X_E7-WbBP13IqLUUBLCw,14703 +fontTools/pens/freetypePen.py,sha256=HD-gXJSbgImJdBc8sIBk0HWBdjv3WKFofs6PgCCsGOY,19908 +fontTools/pens/hashPointPen.py,sha256=gElrFyQoOQp3ZbpKHRWPwC61A9OgT2Js8crVUD8BQAY,3573 +fontTools/pens/momentsPen.c,sha256=YVgVJ8eTQIscGOc4zNztWjIdC4RGkiqLLpm8dDBamPY,567692 +fontTools/pens/momentsPen.cpython-311-x86_64-linux-gnu.so,sha256=B2yW-erEYhOclSiyxQf4rjKmAoTbY0zlfamny8kCevQ,926496 +fontTools/pens/momentsPen.py,sha256=kjLVXhGe55Abl__Yr1gob0bl0dHe7fPSwyr7TRJnbug,25658 +fontTools/pens/perimeterPen.py,sha256=lr6NzrIWxi4TXBJPbcJsKzqABWfQeil2Bgm9BgUD3N4,2153 +fontTools/pens/pointInsidePen.py,sha256=noEUvBQIeAheDMJwzvvfnEiKhmwbS1i0RQE9jik6Gl4,6355 +fontTools/pens/pointPen.py,sha256=oeE_uabVCNJ1Lpk5Hn3eBmafaao3QqKMjK6FAy0hKBo,24197 +fontTools/pens/qtPen.py,sha256=QRNLIry2rQl4E_7ct2tu10-qLHneQp0XV7FfaZ-tcL8,634 +fontTools/pens/qu2cuPen.py,sha256=pRST43-rUpzlOP83Z_Rr0IvIQBCx6RWI6nnNaitQcLk,3985 +fontTools/pens/quartzPen.py,sha256=EH482Kz_xsqYhVRovv6N_T1CXaSvOzUKPLxTaN956tU,1287 +fontTools/pens/recordingPen.py,sha256=VgFZ4NMhnZt1qSTzFEU0cma-gw3kBe47bfSxPYH73rs,12489 +fontTools/pens/reportLabPen.py,sha256=kpfMfOLXt2vOQ5smPsU82ft80FpCPWJzQLl7ENOH8Ew,2066 +fontTools/pens/reverseContourPen.py,sha256=oz64ZRhLAvT7DYMAwGKoLzZXQK8l81jRiYnTZkW6a-Y,4022 +fontTools/pens/roundingPen.py,sha256=vh_FjikRd82-S4I8glgGMGEuGrj5IkCjRT_wmZ8jfqY,4620 +fontTools/pens/statisticsPen.py,sha256=piWK6NjjWqk9MLROjeE2-4EsxVYMyNU7UQFGD_trE9g,9808 +fontTools/pens/svgPathPen.py,sha256=T3b6SZS9B9sVWMK9mSFDtjHeviQs_yOJOZKq5Sg5Zdg,8572 +fontTools/pens/t2CharStringPen.py,sha256=GgGklb5XsCer0w37ujgRLRXx-EuzdFsyCYuzCx4n-Qs,2931 +fontTools/pens/teePen.py,sha256=P1ARJOCMJ6MxK-PB1yZ-ips3CUfnadWYnQ_do6VIasQ,1290 +fontTools/pens/transformPen.py,sha256=s0kUyQdnemUwHvYr2SFboFmh4WY1S9OHBL8L4PJKRwE,4056 +fontTools/pens/ttGlyphPen.py,sha256=yLtB-E5pTQR59OKVYySttWBu1xC2vR8ezSaRhIMtVwg,11870 +fontTools/pens/wxPen.py,sha256=W9RRHlBWHp-CVC4Exvk3ytBmRaB4-LgJPP5Bv7o9BA0,680 +fontTools/qu2cu/__init__.py,sha256=Jfm1JljXbt91w4gyvZn6jzEmVnhRx50sh2fDongrOsE,618 +fontTools/qu2cu/__main__.py,sha256=9FWf6SIZaRaC8SiL0LhjAWC2yIdY9N_9wlRko8m1l2Q,93 +fontTools/qu2cu/benchmark.py,sha256=GMcr_4r7L6K9SmJ13itt-_XKhnKqSVUDPlXUG6IZmmM,1400 +fontTools/qu2cu/cli.py,sha256=U2rooYnVVEalGRAWGFHk-Kp6Okys8wtzdaWLjw1bngY,3714 +fontTools/qu2cu/qu2cu.c,sha256=1F7GKT21uskZbU01eryh1sg-aTbB_KUXDZnL1JGOahY,692747 +fontTools/qu2cu/qu2cu.cpython-311-x86_64-linux-gnu.so,sha256=pdhK7XtFhhZn3ebqpi-1R4apLywh1_4KGdpHxkcnDnw,1248624 +fontTools/qu2cu/qu2cu.py,sha256=IYtpkwHdfKOXJr65Y_pJ9Lrt_MgJaISAKGMAs5ilFSM,12288 +fontTools/subset/__init__.py,sha256=R9VoZ2QWhqENHC5Ct1wyhLhEU-xo4mUXwCWl6EZGgwQ,143263 +fontTools/subset/__main__.py,sha256=bhtfP2SqP4k799pxtksFgnC-XGNQDr3LcO4lc8T5e5g,95 +fontTools/subset/cff.py,sha256=rqMRJOlX5FacV1LW8aDlVOglgEM87TkMA9bdsYenask,6145 +fontTools/subset/svg.py,sha256=sbuR0UHVB_9vYIb6dWCbZ5yCqushkpp5lg-f9NeatBM,9297 +fontTools/subset/util.py,sha256=9SXFYb5Ef9Z58uXmYPCQil8B2i3Q7aFB_1fFDFSppdU,754 +fontTools/svgLib/__init__.py,sha256=IGCLwSbU8jLhq6HI2vSdPQgNs6zDUi5774TgX5MCXPY,75 +fontTools/svgLib/path/__init__.py,sha256=C82fh7xH6ZHsSFVnV848-xeDezpokx1EwTmayJCouFU,1996 +fontTools/svgLib/path/arc.py,sha256=-f5Ym6q4tDWQ76sMNSTUTWgL_7AfgXojvBhtBS7bWwQ,5812 +fontTools/svgLib/path/parser.py,sha256=8T6okMstvgM9ufb2zBcwSzsuuoYbqfnUjNYgb6kjznU,10788 +fontTools/svgLib/path/shapes.py,sha256=xvBUIckKyT9JLy7q_ZP50r6TjvZANyHdZP7wFDzErcI,5322 +fontTools/t1Lib/__init__.py,sha256=p42y70wEIbuX0IIxZG7-b_I-gHto1VLy0gLsDvxCfkw,20865 +fontTools/tfmLib.py,sha256=UMbkM73JXRJVS9t2B-BJc13rSjImaWBuzCoehLwHFhs,14270 +fontTools/ttLib/__init__.py,sha256=1k7qp9z04gA3m6GvxDaINjqrKbzOkdTA_4RnqW_-LrA,661 +fontTools/ttLib/__main__.py,sha256=lHMPWsnzjKPuMFavf6i1gpk9KexiAk4qzgDd50Mbby0,4733 +fontTools/ttLib/macUtils.py,sha256=lj3oeFpyjV7ko_JqnluneITmAtlc119J-vwTTg2s73A,1737 +fontTools/ttLib/removeOverlaps.py,sha256=ny3XhFw-RrriJ6n0sSh4SaVaKc3GhmCVZMw8g4oI0TY,12744 +fontTools/ttLib/reorderGlyphs.py,sha256=TbxLxqPTUGiKRX3ulGFCwVm2lEisFYlX6caONJr_4oY,10371 +fontTools/ttLib/scaleUpem.py,sha256=U_-NGkwfS9GRIackdEXjGYZ-wSomcUPXQahDneLeArI,14618 +fontTools/ttLib/sfnt.py,sha256=D5f7kvZX7rXDcmDqVBXqPPki4k27QgiBFYi8ziH2OiU,22981 +fontTools/ttLib/standardGlyphOrder.py,sha256=7AY_fVWdtwZ4iv5uWdyKAUcbEQiSDt1lN4sqx9xXwE0,5785 +fontTools/ttLib/tables/B_A_S_E_.py,sha256=H71A9pJ850mvjbrWHqy8iFI2Dxg7102YRtAkfdCooig,369 +fontTools/ttLib/tables/BitmapGlyphMetrics.py,sha256=9gcGPVzsxEYnVBO7YLWfeOuht9PaCl09GmbAqDYqKi0,1769 +fontTools/ttLib/tables/C_B_D_T_.py,sha256=5LNdc8FMir1kC5fvp5iHwWfeuE-RuqdxAArFXaqPjQ0,3646 +fontTools/ttLib/tables/C_B_L_C_.py,sha256=YXlwovoCHYx8THLQD9iBU_VGoaB9LFObEKtL6ddD320,520 +fontTools/ttLib/tables/C_F_F_.py,sha256=yg3mUtYBudgmpG7Bz475j_DNnCelsgrTsM8DH1uR4ek,1978 +fontTools/ttLib/tables/C_F_F__2.py,sha256=YoHfJQdF-ezx4OwRQ2Y2O7rRJEPjOkf3Hx5Y11Xq0AM,807 +fontTools/ttLib/tables/C_O_L_R_.py,sha256=SHwFVNVmoUQR2e87KuTSe-J9LfeegS4f2hEpee29_2o,5993 +fontTools/ttLib/tables/C_P_A_L_.py,sha256=odFjqM4GnjXyQYGEC-e0Gvqms1jQ5zHHG3SDg7y-BI0,11942 +fontTools/ttLib/tables/D_S_I_G_.py,sha256=AgQPM9Cdro1P-ehJjTfsC9mRTTtSc16At0nnpb1XOGI,5517 +fontTools/ttLib/tables/D__e_b_g.py,sha256=KDnfkNOUnm3F13wD_j3YNBOvYadZ40Gf_0170hFkJp0,1134 +fontTools/ttLib/tables/DefaultTable.py,sha256=cOtgkLWPY9qmOH2BSPt4c4IUSdANWTKx2rK1CTxQ4h0,1487 +fontTools/ttLib/tables/E_B_D_T_.py,sha256=uOpmt25gOJQeO1u1IGAyPWgVmh-4vSZqrQEHvOYwbwg,32534 +fontTools/ttLib/tables/E_B_L_C_.py,sha256=LfEVzBg_yWr9dhChzS0U2G-7wNOwzwB0LWoXIUYNKKM,30054 +fontTools/ttLib/tables/F_F_T_M_.py,sha256=_450vdbEH7Y-0_rOwb3Q0hg-Qq2W8C_sHljy7rZtqqM,1683 +fontTools/ttLib/tables/F__e_a_t.py,sha256=ct79Gf__5ALlqfSBn6wvw6fazb31Od71R6vIp6o9XF4,5483 +fontTools/ttLib/tables/G_D_E_F_.py,sha256=QXiILFCRnPNZcwpub6ojN5S9WP6y56LsXi25pUWLgp4,299 +fontTools/ttLib/tables/G_M_A_P_.py,sha256=fvIQumokOCLa8DFeq_xi069F9RROsXSVmDvWtxgyacQ,4720 +fontTools/ttLib/tables/G_P_K_G_.py,sha256=Xi4Hj2OxZ2IZgVyBQ-Qyiie0hPZjpXZkrao-E5EdTWM,4646 +fontTools/ttLib/tables/G_P_O_S_.py,sha256=UkP3mlnyvQg-jj6ZBOh6j-OieVg_goJQ31nlLvoLGSI,397 +fontTools/ttLib/tables/G_S_U_B_.py,sha256=cwFMKO-pgwsn1H8Q9Jb58Z6ZrBrCoN0sqJB0YunBfSk,294 +fontTools/ttLib/tables/G_V_A_R_.py,sha256=13oO2dD-L4yfkrBuR-KN2rc40wh5lLIlx_khwMz5GH4,94 +fontTools/ttLib/tables/G__l_a_t.py,sha256=Xh3IzFgYlvNjrAOn7Ja73DrWrQTJgJxmDFSUKS6yHdM,8645 +fontTools/ttLib/tables/G__l_o_c.py,sha256=5DsxGzaG7HyJVvLlKQeff1lXt-XPWaHNNaf-EYwsKh4,2685 +fontTools/ttLib/tables/H_V_A_R_.py,sha256=6kPLDUGT8EussA3y9gKr_mrgY5PNv7YaK1V0keMXD9w,313 +fontTools/ttLib/tables/J_S_T_F_.py,sha256=Q9TEf3OuyDIxZlmoz9a3c-mDMlJK6YBQ9KcYmiwFRbU,315 +fontTools/ttLib/tables/L_T_S_H_.py,sha256=Iu6syJFuhJj0_7Aan2NPlDuQDIq-AzLwsOQbXVTnlL0,2189 +fontTools/ttLib/tables/M_A_T_H_.py,sha256=-TVu9Nlcs-1shkElbIk-CWtUwXUMdycHFkjvPE8C_fs,342 +fontTools/ttLib/tables/M_E_T_A_.py,sha256=sA6ookcjchw8UYVEuS8QEXc62I9_Rms9cu_jKA6MkNI,11989 +fontTools/ttLib/tables/M_V_A_R_.py,sha256=67cEuiTw5y5W1Zk98L_S_SmJINIfy_mzWCkyHcujz94,308 +fontTools/ttLib/tables/O_S_2f_2.py,sha256=1Pq2Xu4oYOJePTHC_hTKg3RIfKely3j6T1u_lMTEpD8,28030 +fontTools/ttLib/tables/S_I_N_G_.py,sha256=CFDy8R2fDeYn7ocfrZr7Ui7U9D0h4G55CdPfY55g-Bk,3317 +fontTools/ttLib/tables/S_T_A_T_.py,sha256=y9NiWCtnlZtMjw4K9_SdA84Xa-dJk7G5eb2dSe6ciWc,498 +fontTools/ttLib/tables/S_V_G_.py,sha256=vT6QTW5ArtskVUxnPEH_ZxKz4DF4v1pKbylN6DG0R3o,7676 +fontTools/ttLib/tables/S__i_l_f.py,sha256=lPQV2RdhcJRgfDzHp_dkgSxVUUdkcAnY1Bz7V18Gt9U,34985 +fontTools/ttLib/tables/S__i_l_l.py,sha256=Vjtn7SI83vaLGIuQf2e-jhZSFOXb9vXB4jwqznjqnMc,3224 +fontTools/ttLib/tables/T_S_I_B_.py,sha256=3WhEtyNnjYumcowD0GpjubrgnS-RzouZxCxEe4yLDo8,341 +fontTools/ttLib/tables/T_S_I_C_.py,sha256=hAV9Hq_ALsWaducDpw1tDRREvFL7hx7onnUF0sXTelU,381 +fontTools/ttLib/tables/T_S_I_D_.py,sha256=TsdX-G2xxVQO9sSE1wE_xDRx-gor5YiXTHeUthMwCPY,341 +fontTools/ttLib/tables/T_S_I_J_.py,sha256=x8Tlvi6aTxoQcI12UL7muoWF1Q61iBDseAS1mRdOYrg,341 +fontTools/ttLib/tables/T_S_I_P_.py,sha256=-il2ucTBOghVBY7cmleHdLZc3W3CKh7-iPPT0A3KBzk,341 +fontTools/ttLib/tables/T_S_I_S_.py,sha256=tVBnl63vyZUIq93oM6dEjHCXvPn9vt5vvL3jG59b0Lg,341 +fontTools/ttLib/tables/T_S_I_V_.py,sha256=iUWxz2MSrtw7mzuQZj30QAJrCPnyJ4GincFfySFUNAg,855 +fontTools/ttLib/tables/T_S_I__0.py,sha256=O-2oI0eBgt4mP15-UwH0_0r7YWi3EEEhG-4etqDueGI,2505 +fontTools/ttLib/tables/T_S_I__1.py,sha256=nSUhni-fvYmeKXW4zLfP3FG_3LQU2QKPKS1_gKY5lYg,6971 +fontTools/ttLib/tables/T_S_I__2.py,sha256=q2rub-d77iWWiBM6awO0-TCl-Xq7kalPobHYC2QEOfc,496 +fontTools/ttLib/tables/T_S_I__3.py,sha256=0LcvvCzVZJzyz7i4zjIkUuYXEqXwOCs9WeCsgDFqKJ8,543 +fontTools/ttLib/tables/T_S_I__5.py,sha256=hhvJn6jiXs8kuBtun8krNUTXTljH-eKxaxXM1T-7SXM,1905 +fontTools/ttLib/tables/T_T_F_A_.py,sha256=LuT0w__AMtawnsBMobhEMW9gp2yk0mA5ZRzwF45c0UI,392 +fontTools/ttLib/tables/TupleVariation.py,sha256=4XTDTRPZWPg9_1K5SVgdNoxtgQvahtiO4LNO7fk1cK4,32235 +fontTools/ttLib/tables/V_A_R_C_.py,sha256=3jFX50J6X-Cc4dwwiztKKsDTRXVHTXlVdQH328UN1-k,289 +fontTools/ttLib/tables/V_D_M_X_.py,sha256=RbHl7vvO9pcjT_kKvcCmcByQj39n4PmVeq55wD5C14g,10437 +fontTools/ttLib/tables/V_O_R_G_.py,sha256=Cn3OxjVtcO-Uvp61P5c2336V9iEbuGr6vWAXnSIaihk,5965 +fontTools/ttLib/tables/V_V_A_R_.py,sha256=Cstw6tc_U4-EmTriRItBSpvTJODAjMFQjfyTaxLzsbI,319 +fontTools/ttLib/tables/__init__.py,sha256=eQPcuHCfRuGtt6nOa0KwV6vtUNKHnwuQyA7xSN8SPoc,2651 +fontTools/ttLib/tables/_a_n_k_r.py,sha256=MpAzIifmIi_3gx2oP6PC3R2lu36Ewsr2-W1rXjsz2Ug,483 +fontTools/ttLib/tables/_a_v_a_r.py,sha256=_qplWEwFos0VwRTdZa7LhfLIWw-CF1G76NbiRM_Rgvs,7374 +fontTools/ttLib/tables/_b_s_l_n.py,sha256=_848o7SQqztzBDfHYei-80u9ltxIHVBzXu1dYHLV57M,465 +fontTools/ttLib/tables/_c_i_d_g.py,sha256=yt8rVIadpJSDUCoVH4dZetNiy0Azm5ESAxHjB2BX_eA,913 +fontTools/ttLib/tables/_c_m_a_p.py,sha256=r8-bB_E0EQh5h4TGX5nTnDnwTUtXuRB3iuqEDoN_IOM,62202 +fontTools/ttLib/tables/_c_v_a_r.py,sha256=35ayk2kX1pcLGwyx0y4I1l-r7LHgdKv0ulVx8oBPteI,3527 +fontTools/ttLib/tables/_c_v_t.py,sha256=1_RhEcTmhWQWQp7Hsj8UsByKmXCIppZyIbIArGywEEM,1618 +fontTools/ttLib/tables/_f_e_a_t.py,sha256=Fi1XnjhkCG0tp43AcvpIaivD-YRFpufo6feGIrenQDo,469 +fontTools/ttLib/tables/_f_p_g_m.py,sha256=uZHZzqL6OdLn_Hxskv-xf3XuE4fyaSv_jbALEjwXYug,1633 +fontTools/ttLib/tables/_f_v_a_r.py,sha256=rV33H2BgHUl3Wuydsou1G-Hi4uASBppWaLj3FMmiLjs,8837 +fontTools/ttLib/tables/_g_a_s_p.py,sha256=YvhAVDvdssN2fjPMTfSrO4WBCfTuh9T2cU5zquDVnSw,2203 +fontTools/ttLib/tables/_g_c_i_d.py,sha256=AJ4uV7PTHbnsw4Tfw8c2Ezh0VMox3oAH0qhhq7y8hdM,362 +fontTools/ttLib/tables/_g_l_y_f.py,sha256=dDV65llsEDI9fKcVKC5TOiaXpXSyMHNEytuYOGt7adM,85584 +fontTools/ttLib/tables/_g_v_a_r.py,sha256=E9WCKjeITUfd5hcJLQ0rjQFBtZdxw1eswFlWp1U6bD4,12196 +fontTools/ttLib/tables/_h_d_m_x.py,sha256=wMrO4D04QNT8u30p8AV-aG3bndXCq4wlPNvtbd8ip7c,4252 +fontTools/ttLib/tables/_h_e_a_d.py,sha256=yY2GTFq6Mn6nN8EegbMVJRMUWIqDYFln3FhTk3ziw6s,4926 +fontTools/ttLib/tables/_h_h_e_a.py,sha256=X4t1aF1MZMuz3phCVSFwKcNTeoZdx-042wFtHc-nK9w,4767 +fontTools/ttLib/tables/_h_m_t_x.py,sha256=rbxr3cy9-9Jm0HCGIWQiX6fGH5iu6yojp9kfgWrW2Ks,6192 +fontTools/ttLib/tables/_k_e_r_n.py,sha256=DQNLmD_HEdDKPfp4tamOd9W3T5a1lXFM5tDaWrKl164,10794 +fontTools/ttLib/tables/_l_c_a_r.py,sha256=8W6xFOj-sm003MCXX4bIHxs9ntfVvT0FXYllPxa3z4I,390 +fontTools/ttLib/tables/_l_o_c_a.py,sha256=yxiwLKXLZjNju5XYmLb6EhNLec1d7ezEDDe1dszceHo,2180 +fontTools/ttLib/tables/_l_t_a_g.py,sha256=9YpApjI-rZ4e3HeT8Pj-osiHl3uALD9JXg5O7pqk9L0,2552 +fontTools/ttLib/tables/_m_a_x_p.py,sha256=cIDIZWse9czwwsnlxIh3qwgwaXbt7PQAjXKAcmMDspY,5264 +fontTools/ttLib/tables/_m_e_t_a.py,sha256=A0CZPEAVxYrpytjXUGQJCTddwG8KrvUVbtBe3A1MqgI,3913 +fontTools/ttLib/tables/_m_o_r_t.py,sha256=u35tYqn3cjzKxeCF0FUFeLtaf36mjDDSN08uuk0Kme8,487 +fontTools/ttLib/tables/_m_o_r_x.py,sha256=OwamVpIO7REDnFr95HuFPoY_0U6i9zQPb11K1sFTvDY,548 +fontTools/ttLib/tables/_n_a_m_e.py,sha256=86_0fUeA5_c-GY5ZnkqUI0jyWwMh1mn6yVOf6KKqIlU,41266 +fontTools/ttLib/tables/_o_p_b_d.py,sha256=TNZv_2YTrj4dGzd6wA9Jb-KGZ99un177s5p3LlfxQ74,448 +fontTools/ttLib/tables/_p_o_s_t.py,sha256=9siVXSisWGdTfj_mC1E9dUDz9Jdm1i3QzI-l3i3VWME,11671 +fontTools/ttLib/tables/_p_r_e_p.py,sha256=CcKr4HrswkupLmbJdrJLTM-z9XgLefQyv8467j9V0zs,427 +fontTools/ttLib/tables/_p_r_o_p.py,sha256=Eg8x5qWyXDzPezMafFu0s0qyPDHj-sPsFxGtE6h29qo,427 +fontTools/ttLib/tables/_s_b_i_x.py,sha256=tkkKbNKNYkUhZJuN0kl7q37x5KK5OovB06y28obPV6A,4865 +fontTools/ttLib/tables/_t_r_a_k.py,sha256=rrrPZLELFYA5F8PERoafIS9cb_d_i6xtpAzHEbsFHSw,11379 +fontTools/ttLib/tables/_v_h_e_a.py,sha256=FuULIBl4OQyUeLPOFEY8buB0pAnQhGa1-5a6kN9i5Sc,4459 +fontTools/ttLib/tables/_v_m_t_x.py,sha256=AUuxtyQvMWrTBNbOIaL6uKcB_DNpNb0YX28JIuTHw_Y,500 +fontTools/ttLib/tables/asciiTable.py,sha256=4c69jsAirUnDEpylf9CYBoCKTzwbmfbtUAOrtPnpHjY,637 +fontTools/ttLib/tables/grUtils.py,sha256=hcOJ5oJPOd2uJWnWA7qwR7AfL37YZ5zUT7g8o5BBV80,2270 +fontTools/ttLib/tables/otBase.py,sha256=k1Mt5sLd2EL6ufX3e0ZaBDgTvSLCUKOW6qy5UDeKwxI,52986 +fontTools/ttLib/tables/otConverters.py,sha256=ihE_WMSKAKSaBbMvnFYDj2eMxf7PvRMMa8zGwfoYuYc,74202 +fontTools/ttLib/tables/otData.py,sha256=-XXRwdVfP-Wz7oBjMPpku0A0QH9lw_fFGNzZlt9N0mo,197262 +fontTools/ttLib/tables/otTables.py,sha256=2U04ot_2ITlBZx2QtpnIOtBGftPFs9ZX2FWfz4vz1G0,96987 +fontTools/ttLib/tables/otTraverse.py,sha256=HznEVAlVf_8eyqjsO2edgELtMlXnjnUqccK3PytvVUE,5518 +fontTools/ttLib/tables/sbixGlyph.py,sha256=tjEUPVRfx6gr5yme8UytGTtVrimKN5qmbzT1GZPjXiM,5796 +fontTools/ttLib/tables/sbixStrike.py,sha256=dL8O9K8R4S6RVQDP-PVjIPBrvbqbE9zwra0uRL0nLq0,6651 +fontTools/ttLib/tables/table_API_readme.txt,sha256=eZlRTLUkLzc_9Ot3pdfhyMb3ahU0_Iipx0vSbzOVGy8,2748 +fontTools/ttLib/tables/ttProgram.py,sha256=tgtxgd-EnOq-2PUlYEihp-6NHu_7HnE5rxeSAtmXOtU,35888 +fontTools/ttLib/ttCollection.py,sha256=aRph2MkBK3kd9-JCLqhJ1EN9pffN_lVX6WWmOTTewc8,3963 +fontTools/ttLib/ttFont.py,sha256=TKcK7qSvxHum0iXpX0zS3YiDjt6yW9wlK_YsoP2torE,63707 +fontTools/ttLib/ttGlyphSet.py,sha256=cUBhMGa5hszeVqOm2KpOdeJh-LsiqE7RNdyIUPZ2vO8,17476 +fontTools/ttLib/ttVisitor.py,sha256=_tah4C42Tv6Pm9QeLNQwwVCxqI4VNEAqYCbmThp6cvY,1025 +fontTools/ttLib/woff2.py,sha256=6LPISeBQ1dubzKjWrUcYm_vgETC46BTLY4XkG52qvSA,60921 +fontTools/ttx.py,sha256=FxuGubujWCGJWSTrJEjoNH--25fVIPy-ZRtYy9H6iTk,17277 +fontTools/ufoLib/__init__.py,sha256=nKG8gu6NEvqGJoZ781IARoQ7ii4LoWfMMvX3Yf5TsVw,98981 +fontTools/ufoLib/converters.py,sha256=YnBKr8kmyjwLcq8LdD46ubGOgyL9Pxt9avlvZn9anKI,13444 +fontTools/ufoLib/errors.py,sha256=9f8l5NaFAj3BZPa6Bbqt06FL4afffLuMzy4nPf-eOlE,845 +fontTools/ufoLib/etree.py,sha256=T3sjLTgjMAq6VyYRicWPaMIVBJ2YSuwZxV6Vc5yZtQI,231 +fontTools/ufoLib/filenames.py,sha256=hoyUhzzQMDaeckT7UdreISANq4-gLR2jGyk5yAyYtOA,10654 +fontTools/ufoLib/glifLib.py,sha256=Y-xzf4qbTIOl3-dVLXvu3iFCIDtAEu_klId2_UNngWs,77170 +fontTools/ufoLib/kerning.py,sha256=o1BeJDVZ_CZZPzmOPwRKTqglYmhA_JZPjwq2JLgdQIk,4836 +fontTools/ufoLib/plistlib.py,sha256=jzMGOGvHO6XvS-IO8hS04ur7r8-v2dnVq-vKMoJZvqQ,1510 +fontTools/ufoLib/pointPen.py,sha256=CuREcm3IYteZNBDAd_ZRAV4XqBsy0s07jdWc4en9r-8,244 +fontTools/ufoLib/utils.py,sha256=nZoJJqHXQSL-LXYE58_WHA97XlbTkEbYkdH3GL32SmQ,3192 +fontTools/ufoLib/validators.py,sha256=MWBqcLThGyYpst61QothA_BSlc6jGVhPvFiay-pobCY,32387 +fontTools/unicode.py,sha256=ZZ7OMmWvIyV1IL1k6ioTzaRAh3tUvm6gvK7QgFbOIHY,1237 +fontTools/unicodedata/Blocks.py,sha256=frShN07WqD1uLt-V0TNlUZPnn0-cKc0Nuhtd2uNDoSw,33139 +fontTools/unicodedata/Mirrored.py,sha256=kdhwCWOWaArmfNkDah0Thv-67M9wWz45R5IMPhqyzFM,9242 +fontTools/unicodedata/OTTags.py,sha256=wOPpbMsNcp_gdvPFeITtgVMnTN8TJSNAsVEdu_nuPXE,1196 +fontTools/unicodedata/ScriptExtensions.py,sha256=udL53Wdd5n-ZFEVHeIC0LpKKBqPHD2WqRiBbzXs2U7U,28647 +fontTools/unicodedata/Scripts.py,sha256=wBD6SElfM3w-u6JcgyNsyofXPvqW1e4YltgeHE4XaOc,131148 +fontTools/unicodedata/__init__.py,sha256=4eR0Luk4QJHC9YDG0_wi6UrIDawED36e6Xbf3S5hDPg,9085 +fontTools/varLib/__init__.py,sha256=nzk6FSRozAMz9CDVqhMVYhh4Ho_hjIKrYtkMIJLn0y0,57467 +fontTools/varLib/__main__.py,sha256=wbdYC5bPjWCxA0I4SKcLO88gl-UMtsYS8MxdW9ySTkY,95 +fontTools/varLib/avar/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fontTools/varLib/avar/__main__.py,sha256=ew1fJpg81GpYbdkrp4I7ntWdbhkDeg5fNn1SrpF254k,1770 +fontTools/varLib/avar/build.py,sha256=YSMsRMjGkNW2zhKn29HgovNNG-jgiFSsK7zYH4un2Ws,2087 +fontTools/varLib/avar/map.py,sha256=UBeElT40SHXvJrQ7SflH5MNYKTTom7RLHwq2varb_oE,2867 +fontTools/varLib/avar/plan.py,sha256=qDKZjQq6OfeXBr3T3DZnrmauyMm5ek0rm5YVosAO_UA,27354 +fontTools/varLib/avar/unbuild.py,sha256=h3qb0JBN7SngijPrwjTjGEjueeENlhPYY9FyN02FLEk,10467 +fontTools/varLib/avarPlanner.py,sha256=CabG5xB57FMdwmN1acpVMvyOucVPxCdRdJSK2gu7gb4,109 +fontTools/varLib/builder.py,sha256=mSKOCcnnw-WzmZs15FayoqCDh77Ts7o9Tre9psh8CUc,6609 +fontTools/varLib/cff.py,sha256=EVgaQcoROIrYQsRuftnxFuGGldEPYbrIh5yBckylJC4,22901 +fontTools/varLib/errors.py,sha256=dMo8eGj76I7H4hrBEiNbYrGs2J1K1SwdsUyTHpkVOrQ,6934 +fontTools/varLib/featureVars.py,sha256=kp4gPjKyyGRu7yBWxgd1N4OkKnU9V45QwiWSHs6OWd0,26180 +fontTools/varLib/hvar.py,sha256=1IvL5BneTkg8jJYicH0TSQViB6D0vBEesLdlfqoLBX4,3695 +fontTools/varLib/instancer/__init__.py,sha256=KVXejhVDoIx8cqQuJKOB_N2q8uE2stsMQ68tqp268PI,75502 +fontTools/varLib/instancer/__main__.py,sha256=zfULwcP01FhplS1IlcMgNQnLxk5RVfmOuinWjqeid-g,104 +fontTools/varLib/instancer/featureVars.py,sha256=oPqSlnHLMDTtOsmQMi6gkzLox7ymCrqlRAkvC_EJ4bc,7110 +fontTools/varLib/instancer/names.py,sha256=IPRqel_M8zVU0jl30WsfgufxUm9PBBQDQCY3VHapeHc,14950 +fontTools/varLib/instancer/solver.py,sha256=uMePwX0BVT5F94kUvDglsI4_F0nEH67F7RFuJ6tQwQ0,11002 +fontTools/varLib/interpolatable.py,sha256=Bhlq_LhEZ-sXfLNY8aFEChFrsKuT2kzmnuMfG5qi0v4,45221 +fontTools/varLib/interpolatableHelpers.py,sha256=cTFgTqDjggSCqNfTM77__5b9Sja_g7xWWMiB-pXDx84,11672 +fontTools/varLib/interpolatablePlot.py,sha256=w393P6mGLRhYkIjSxMww3qyoYxAUZzCXlmPBbI_84C0,44375 +fontTools/varLib/interpolatableTestContourOrder.py,sha256=mHJ9Ry7Rm7H3zHDwEUQEtEIDseiUzOxjg4MveW_FSiU,3021 +fontTools/varLib/interpolatableTestStartingPoint.py,sha256=K6OYKBspim6BXc91pfLTbGLyi5XZukfMuBc6hRpENG8,4296 +fontTools/varLib/interpolate_layout.py,sha256=22VjGZuV2YiAe2MpdTf0xPVz1x2G84bcOL0vOeBpGQM,3689 +fontTools/varLib/iup.c,sha256=64aSGfKmZAN_uGJrYxvtmzstksJYD3ak2iv8iyjW3bM,834812 +fontTools/varLib/iup.cpython-311-x86_64-linux-gnu.so,sha256=B088mcQoK9BuKhsaLhd69wok-4g1Us-fi-91TX65sko,1685552 +fontTools/varLib/iup.py,sha256=mKq_GRWuUg4yTmw2V32nu0v2r-SzzN7xS7rIbV0mYuc,14984 +fontTools/varLib/merger.py,sha256=E59oli4AwqWZ-FgnuStMSBvsB-FHe-55esXTYUqGeJ8,60802 +fontTools/varLib/models.py,sha256=mkwlucmKDkyhPZWEpab7ofcuA19hGlqjHtY9MwmYtOI,22697 +fontTools/varLib/multiVarStore.py,sha256=eQEuWNY01YF5zDpy1UwNtvOYyD6c0FLxpH-QFpX1i78,8305 +fontTools/varLib/mutator.py,sha256=kzXiLFxRLgU2pcHzOzh9u0n0KkO3DuBk06xZ_RPhWz8,19804 +fontTools/varLib/mvar.py,sha256=LTV77vH_3Ecg_qKBO5xQzjLOlJir_ppEr7mPVZRgad8,2449 +fontTools/varLib/plot.py,sha256=NoSZkJ5ndxNcDvJIvd5pQ9_jX6X1oM1K2G_tR4sdPVs,7494 +fontTools/varLib/stat.py,sha256=XuNKKZxGlBrl4OGFDAwVXhpBwJi23U3BdHmNTKoJnvE,4811 +fontTools/varLib/varStore.py,sha256=2QA9SDI6jQyQ_zq82OOwa3FBkfl-ksaSo1KGmVFpa9Q,24069 +fontTools/voltLib/__init__.py,sha256=ZZ1AsTx1VlDn40Kupce-fM3meOWugy3RZraBW9LG-9M,151 +fontTools/voltLib/__main__.py,sha256=uVtABLzMeHtvKL8zetf4rpC4aB8BkYr5QLSegNjZZZI,5928 +fontTools/voltLib/ast.py,sha256=arA9W3Gqo6OqljwNNKnMAojz-C5LStbC5SgjJh7buKk,13300 +fontTools/voltLib/error.py,sha256=phcQOQj-xOspCXu9hBJQRhSOBDzxHRgZd3fWQOFNJzw,395 +fontTools/voltLib/lexer.py,sha256=OvuETOSvlS6v7iCVeJ3IdH2Cg71n3OJoEyiB3-h6vhE,3368 +fontTools/voltLib/parser.py,sha256=rkw2IHBZPsrhGVC7Kw7V501m0u52kh1JSM5HXp-xchM,25396 +fontTools/voltLib/voltToFea.py,sha256=Z2yvnaZLQXzPLT86Uta0zRsXIYgj6NnvZtSWt5xmw2s,36549 +fonttools-4.61.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +fonttools-4.61.1.dist-info/METADATA,sha256=sF0h1Ul5KoQam4cNjcDOXJ1D2Z1nYh_xN5sYvop8OUA,114185 +fonttools-4.61.1.dist-info/RECORD,, +fonttools-4.61.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fonttools-4.61.1.dist-info/WHEEL,sha256=_CFvICYDmZlAYHt8L7Zn3n-BGLj8dkZLQPp22Piy5JE,151 +fonttools-4.61.1.dist-info/entry_points.txt,sha256=8kVHddxfFWA44FSD4mBpmC-4uCynQnkoz_9aNJb227Y,147 +fonttools-4.61.1.dist-info/licenses/LICENSE,sha256=Z4cgj4P2Wcy8IiOy_elS_6b36KymLxqKK_W8UbsbI4M,1072 +fonttools-4.61.1.dist-info/licenses/LICENSE.external,sha256=lKg6ruBymg8wLTSsxKzsvZ1YNm8mJCkHX-VX5KVLLmk,20022 +fonttools-4.61.1.dist-info/top_level.txt,sha256=rRgRylrXzekqWOsrhygzib12pQ7WILf7UGjqEwkIFDM,10 diff --git a/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/WHEEL b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7cc1bea2cb3d38c1dba6db468730cd8fd970d117 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: false +Tag: cp311-cp311-manylinux_2_17_x86_64 +Tag: cp311-cp311-manylinux2014_x86_64 + diff --git a/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/entry_points.txt b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..87ae781f169a63f0cf672a9050474035bfa5add4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +fonttools = fontTools.__main__:main +pyftmerge = fontTools.merge:main +pyftsubset = fontTools.subset:main +ttx = fontTools.ttx:main diff --git a/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..9af65ba39d292309497df4accdc44bd6f8143d10 --- /dev/null +++ b/py311/lib/python3.11/site-packages/fonttools-4.61.1.dist-info/top_level.txt @@ -0,0 +1 @@ +fontTools diff --git a/py311/lib/python3.11/site-packages/frozenlist/__init__.py b/py311/lib/python3.11/site-packages/frozenlist/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..41c859588035a89d2706c37247f1d92ee0e7c5a7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/frozenlist/__init__.py @@ -0,0 +1,86 @@ +import os +import types +from collections.abc import MutableSequence +from functools import total_ordering + +__version__ = "1.8.0" + +__all__ = ("FrozenList", "PyFrozenList") # type: Tuple[str, ...] + + +NO_EXTENSIONS = bool(os.environ.get("FROZENLIST_NO_EXTENSIONS")) # type: bool + + +@total_ordering +class FrozenList(MutableSequence): + __slots__ = ("_frozen", "_items") + __class_getitem__ = classmethod(types.GenericAlias) + + def __init__(self, items=None): + self._frozen = False + if items is not None: + items = list(items) + else: + items = [] + self._items = items + + @property + def frozen(self): + return self._frozen + + def freeze(self): + self._frozen = True + + def __getitem__(self, index): + return self._items[index] + + def __setitem__(self, index, value): + if self._frozen: + raise RuntimeError("Cannot modify frozen list.") + self._items[index] = value + + def __delitem__(self, index): + if self._frozen: + raise RuntimeError("Cannot modify frozen list.") + del self._items[index] + + def __len__(self): + return self._items.__len__() + + def __iter__(self): + return self._items.__iter__() + + def __reversed__(self): + return self._items.__reversed__() + + def __eq__(self, other): + return list(self) == other + + def __le__(self, other): + return list(self) <= other + + def insert(self, pos, item): + if self._frozen: + raise RuntimeError("Cannot modify frozen list.") + self._items.insert(pos, item) + + def __repr__(self): + return f"" + + def __hash__(self): + if self._frozen: + return hash(tuple(self)) + else: + raise RuntimeError("Cannot hash unfrozen list.") + + +PyFrozenList = FrozenList + + +if not NO_EXTENSIONS: + try: + from ._frozenlist import FrozenList as CFrozenList # type: ignore + except ImportError: # pragma: no cover + pass + else: + FrozenList = CFrozenList # type: ignore diff --git a/py311/lib/python3.11/site-packages/frozenlist/__init__.pyi b/py311/lib/python3.11/site-packages/frozenlist/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ae803ef6aad72f57e7379db5a2044a95f214df7b --- /dev/null +++ b/py311/lib/python3.11/site-packages/frozenlist/__init__.pyi @@ -0,0 +1,47 @@ +from typing import ( + Generic, + Iterable, + Iterator, + List, + MutableSequence, + Optional, + TypeVar, + Union, + overload, +) + +_T = TypeVar("_T") +_Arg = Union[List[_T], Iterable[_T]] + +class FrozenList(MutableSequence[_T], Generic[_T]): + def __init__(self, items: Optional[_Arg[_T]] = None) -> None: ... + @property + def frozen(self) -> bool: ... + def freeze(self) -> None: ... + @overload + def __getitem__(self, i: int) -> _T: ... + @overload + def __getitem__(self, s: slice) -> FrozenList[_T]: ... + @overload + def __setitem__(self, i: int, o: _T) -> None: ... + @overload + def __setitem__(self, s: slice, o: Iterable[_T]) -> None: ... + @overload + def __delitem__(self, i: int) -> None: ... + @overload + def __delitem__(self, i: slice) -> None: ... + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[_T]: ... + def __reversed__(self) -> Iterator[_T]: ... + def __eq__(self, other: object) -> bool: ... + def __le__(self, other: FrozenList[_T]) -> bool: ... + def __ne__(self, other: object) -> bool: ... + def __lt__(self, other: FrozenList[_T]) -> bool: ... + def __ge__(self, other: FrozenList[_T]) -> bool: ... + def __gt__(self, other: FrozenList[_T]) -> bool: ... + def insert(self, pos: int, item: _T) -> None: ... + def __repr__(self) -> str: ... + def __hash__(self) -> int: ... + +# types for C accelerators are the same +CFrozenList = PyFrozenList = FrozenList diff --git a/py311/lib/python3.11/site-packages/frozenlist/_frozenlist.pyx b/py311/lib/python3.11/site-packages/frozenlist/_frozenlist.pyx new file mode 100644 index 0000000000000000000000000000000000000000..a82d8c8ff6c909073deb5efd29ec5fa6075537fc --- /dev/null +++ b/py311/lib/python3.11/site-packages/frozenlist/_frozenlist.pyx @@ -0,0 +1,148 @@ +# cython: freethreading_compatible = True +# distutils: language = c++ + +from cpython.bool cimport PyBool_FromLong +from libcpp.atomic cimport atomic + +import copy +import types +from collections.abc import MutableSequence + + +cdef class FrozenList: + __class_getitem__ = classmethod(types.GenericAlias) + + cdef atomic[bint] _frozen + cdef list _items + + def __init__(self, items=None): + self._frozen.store(False) + if items is not None: + items = list(items) + else: + items = [] + self._items = items + + @property + def frozen(self): + return PyBool_FromLong(self._frozen.load()) + + cdef object _check_frozen(self): + if self._frozen.load(): + raise RuntimeError("Cannot modify frozen list.") + + cdef inline object _fast_len(self): + return len(self._items) + + def freeze(self): + self._frozen.store(True) + + def __getitem__(self, index): + return self._items[index] + + def __setitem__(self, index, value): + self._check_frozen() + self._items[index] = value + + def __delitem__(self, index): + self._check_frozen() + del self._items[index] + + def __len__(self): + return self._fast_len() + + def __iter__(self): + return self._items.__iter__() + + def __reversed__(self): + return self._items.__reversed__() + + def __richcmp__(self, other, op): + if op == 0: # < + return list(self) < other + if op == 1: # <= + return list(self) <= other + if op == 2: # == + return list(self) == other + if op == 3: # != + return list(self) != other + if op == 4: # > + return list(self) > other + if op == 5: # => + return list(self) >= other + + def insert(self, pos, item): + self._check_frozen() + self._items.insert(pos, item) + + def __contains__(self, item): + return item in self._items + + def __iadd__(self, items): + self._check_frozen() + self._items += list(items) + return self + + def index(self, item): + return self._items.index(item) + + def remove(self, item): + self._check_frozen() + self._items.remove(item) + + def clear(self): + self._check_frozen() + self._items.clear() + + def extend(self, items): + self._check_frozen() + self._items += list(items) + + def reverse(self): + self._check_frozen() + self._items.reverse() + + def pop(self, index=-1): + self._check_frozen() + return self._items.pop(index) + + def append(self, item): + self._check_frozen() + return self._items.append(item) + + def count(self, item): + return self._items.count(item) + + def __repr__(self): + return ''.format(self._frozen.load(), + self._items) + + def __hash__(self): + if self._frozen.load(): + return hash(tuple(self._items)) + else: + raise RuntimeError("Cannot hash unfrozen list.") + + def __deepcopy__(self, memo): + cdef FrozenList new_list + obj_id = id(self) + + # Return existing copy if already processed (circular reference) + if obj_id in memo: + return memo[obj_id] + + # Create new instance and register immediately + new_list = self.__class__([]) + memo[obj_id] = new_list + + # Deep copy items + new_list._items[:] = [copy.deepcopy(item, memo) for item in self._items] + + # Preserve frozen state + if self._frozen.load(): + new_list.freeze() + + return new_list + + +MutableSequence.register(FrozenList) diff --git a/py311/lib/python3.11/site-packages/frozenlist/py.typed b/py311/lib/python3.11/site-packages/frozenlist/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..f5642f79f21d872f010979dcf6f0c4a415acc19d --- /dev/null +++ b/py311/lib/python3.11/site-packages/frozenlist/py.typed @@ -0,0 +1 @@ +Marker diff --git a/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/METADATA b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..8d8077391f6c5725d6bbc2b1ffce036ae640c165 --- /dev/null +++ b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/METADATA @@ -0,0 +1,79 @@ +Metadata-Version: 2.4 +Name: google-api-core +Version: 2.29.0 +Summary: Google API client core library +Author-email: Google LLC +License: Apache 2.0 +Project-URL: Documentation, https://googleapis.dev/python/google-api-core/latest/ +Project-URL: Repository, https://github.com/googleapis/python-api-core +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Operating System :: OS Independent +Classifier: Topic :: Internet +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: googleapis-common-protos<2.0.0,>=1.56.2 +Requires-Dist: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<7.0.0,>=3.19.5 +Requires-Dist: proto-plus<2.0.0,>=1.22.3 +Requires-Dist: proto-plus<2.0.0,>=1.25.0; python_version >= "3.13" +Requires-Dist: google-auth<3.0.0,>=2.14.1 +Requires-Dist: requests<3.0.0,>=2.18.0 +Requires-Dist: importlib_metadata>=1.4; python_version < "3.8" +Provides-Extra: async-rest +Requires-Dist: google-auth[aiohttp]<3.0.0,>=2.35.0; extra == "async-rest" +Provides-Extra: grpc +Requires-Dist: grpcio<2.0.0,>=1.33.2; extra == "grpc" +Requires-Dist: grpcio<2.0.0,>=1.49.1; python_version >= "3.11" and extra == "grpc" +Requires-Dist: grpcio<2.0.0,>=1.75.1; python_version >= "3.14" and extra == "grpc" +Requires-Dist: grpcio-status<2.0.0,>=1.33.2; extra == "grpc" +Requires-Dist: grpcio-status<2.0.0,>=1.49.1; python_version >= "3.11" and extra == "grpc" +Requires-Dist: grpcio-status<2.0.0,>=1.75.1; python_version >= "3.14" and extra == "grpc" +Provides-Extra: grpcgcp +Requires-Dist: grpcio-gcp<1.0.0,>=0.2.2; extra == "grpcgcp" +Provides-Extra: grpcio-gcp +Requires-Dist: grpcio-gcp<1.0.0,>=0.2.2; extra == "grpcio-gcp" +Dynamic: license-file + +Core Library for Google Client Libraries +======================================== + +|pypi| |versions| + +This library is not meant to stand-alone. Instead it defines +common helpers used by all Google API clients. For more information, see the +`documentation`_. + +.. |pypi| image:: https://img.shields.io/pypi/v/google-api_core.svg + :target: https://pypi.org/project/google-api_core/ +.. |versions| image:: https://img.shields.io/pypi/pyversions/google-api_core.svg + :target: https://pypi.org/project/google-api_core/ +.. _documentation: https://googleapis.dev/python/google-api-core/latest + + +Supported Python Versions +------------------------- +Python >= 3.7 + + +Unsupported Python Versions +--------------------------- + +Python == 2.7, Python == 3.5, Python == 3.6. + +The last version of this library compatible with Python 2.7 and 3.5 is +`google-api-core==1.31.1`. + +The last version of this library compatible with Python 3.6 is +`google-api-core==2.8.2`. diff --git a/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/RECORD b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..4da43e15ca0368709afd6b14a7cbcb98dfbe2b54 --- /dev/null +++ b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/RECORD @@ -0,0 +1,71 @@ +google/api_core/__init__.py,sha256=J15BhkHZyZFegVV0J9uN33uS9TTB-JC1h1Arq7AlrJ8,1656 +google/api_core/_python_package_support.py,sha256=xGZrkQNQ8K8d2uFgpyAqZDHGLTf4Oq5m9exspS1-Yg8,9464 +google/api_core/_python_version_support.py,sha256=h0ZR34hgtICbcPjV3cINdTmObKtbmPiKPvltlv1wdaY,10649 +google/api_core/_rest_streaming_base.py,sha256=AlkPe71v0kRUeWP5yn6N1KbxCxKhr-vfQOCgoF6x8ZE,4351 +google/api_core/bidi.py,sha256=XyJpZ1z5cUNpBaCjVw6NRMM6tWFguQCRF7fCsLSPy4g,27678 +google/api_core/bidi_async.py,sha256=nV5-AIXc64QI4OX5ucdImOKXTtyGEIqEvoL1zpZ7EaA,8774 +google/api_core/bidi_base.py,sha256=C5WSthnI3vK5K-Ltr6_YavyGYi1MAijTw-ZYni5VIlU,3495 +google/api_core/client_info.py,sha256=A1yzixILdp55Rk8Hu1m7QGlnOh6CGMWhKLNi9TUotRU,4092 +google/api_core/client_logging.py,sha256=o7VrcpJ5yqIfdpBDGKTIIVaqIfl5Ppr_AxiOfyKIGTk,5023 +google/api_core/client_options.py,sha256=12EgRX-6pqBg7asxknA9eIs3CjCNmvoxhGvKt3zGeNk,6860 +google/api_core/datetime_helpers.py,sha256=5gFi7n0r-xVImQdj6rQKNwk58m2LcMF9WliXGHbBsDA,9034 +google/api_core/exceptions.py,sha256=5VXhbkcGCrfjo6tzP4hCVh6vakqGM7kZSewVj6pCS8M,21150 +google/api_core/extended_operation.py,sha256=r9xSOblNF35lwn2hrrjUQ-f3JDoo0a4Z8xwOy_VkvL0,8632 +google/api_core/future/__init__.py,sha256=7sToxNNu9c_xqcpmO8dbrcSLOOxplnYOOSXjOX9QIXw,702 +google/api_core/future/_helpers.py,sha256=jA6m2L1aqlOJA-9NdC1BDosPksZQ7FmLLYWDOrsQOPc,1248 +google/api_core/future/async_future.py,sha256=7rOK0tzud8MCoUwO9AjF-3OQDtELwhtp2ONltSB3GEI,5355 +google/api_core/future/base.py,sha256=SHyudamSWR7EyUsYaQ-XrGGkLeYClSfXfsHIHSqDIYI,1763 +google/api_core/future/polling.py,sha256=0HUw1bp7ZLgEqMtwsvxIXNMHQbHgsP6TpmpVrMbjJ2I,14349 +google/api_core/gapic_v1/__init__.py,sha256=r6kCwKznSXPTYRdz4C384fscefaw_rXP2bzJdnzEVnw,988 +google/api_core/gapic_v1/client_info.py,sha256=FhmeHuSgFIxCCXaFPb4QdpoBzR4FVTy2997fkXorXbM,2421 +google/api_core/gapic_v1/config.py,sha256=5isOOYPSZCXpDcJDJiwmTxGTUo0RjxJJvW2yjqBR4BI,6300 +google/api_core/gapic_v1/config_async.py,sha256=_jrB5Yv6rxxSU6KwzOxWQ-G_x5mXilpSFAgnQ_6ktrU,1728 +google/api_core/gapic_v1/method.py,sha256=SnMqRoKKCRph9xpnQvQ29SGjCd9WVpHEPK60X-uPyWM,9494 +google/api_core/gapic_v1/method_async.py,sha256=L8BHV3SkvKTDqVSonDuUY1OIRMPEqfsOsTitYRQ_UwQ,2090 +google/api_core/gapic_v1/routing_header.py,sha256=kJKOYpNS2mgSZa4Qt8Ib2Q5ONfNwpJwbNloVJ8e2wMs,3093 +google/api_core/general_helpers.py,sha256=GGgwvdYMLIiRvUqojd3uKBMOPILpK78ZU5we-VRHtwE,2284 +google/api_core/grpc_helpers.py,sha256=cC2ssIUQisK81AkKqUGjYhXEA7nE9kLqksHk9WifjoY,25028 +google/api_core/grpc_helpers_async.py,sha256=tq8sEpe40r0zD5j61C80WltbrVYYKgR_KrdYoCag4-s,13225 +google/api_core/iam.py,sha256=BGz63HtOP5_5oH9Zs93RP0Y6Qshty2eOhFEYj_CoE64,13213 +google/api_core/operation.py,sha256=xmVGQhNvu6Znz94AVZiKryAI_Q31F_tTquodXZeV-eM,13199 +google/api_core/operation_async.py,sha256=XdunwVY6aKA-K0OK-5_dYbqjbvF1DLTYUUL4IOztld4,8046 +google/api_core/operations_v1/__init__.py,sha256=ncvxAGOrunbMNRoQ9n1Io1p1nRN_LV5DutV52UidV8k,1638 +google/api_core/operations_v1/abstract_operations_base_client.py,sha256=BnPbE0-YMjrNG_a5yRyVSSym3TXirhhaX5rA5ZLkuok,15159 +google/api_core/operations_v1/abstract_operations_client.py,sha256=j_ulCLJpyqGh1SY8z5kss9iYBfOwE_XXCTqwQAKpyeI,16073 +google/api_core/operations_v1/operations_async_client.py,sha256=1BENex2y2ovlCHlXR4v5Cfiqk2o36DBWEzPyCCCudbU,14794 +google/api_core/operations_v1/operations_client.py,sha256=-fmbRv_2L_5cJv70WfybRw9EUyLlHB-wTbC-n0Iq4Fg,15274 +google/api_core/operations_v1/operations_client_config.py,sha256=v7B0FiVc5p9HhnpPY1_3FIomFdA-J-4lilomeoC9SkQ,2285 +google/api_core/operations_v1/operations_rest_client_async.py,sha256=qMYVo08Y0jfSU53dmHSDvO7_UL3x8DzJgpvnwAaTyyE,14616 +google/api_core/operations_v1/pagers.py,sha256=exAhGRRYw-Zmkjxs1DhE29pph8a9vFgi8bHTRLCR_f4,2464 +google/api_core/operations_v1/pagers_async.py,sha256=hVrUBo8T-grTFz8FXcS7TIniOElmHmUWIZMoLvu9j2k,2625 +google/api_core/operations_v1/pagers_base.py,sha256=wuFx3UtI-dptyjKnt2jS_LvhyLSjOyEWfxlBBIVujio,2653 +google/api_core/operations_v1/transports/__init__.py,sha256=Ng5VDMks97QNfbkhFSRKmvNwUv3_IQmLUszCGTeJYvE,1457 +google/api_core/operations_v1/transports/base.py,sha256=BHVV_bpPEvrY9ckFwGujHy36z7jlzVi3EOH5f-lOKuA,11780 +google/api_core/operations_v1/transports/rest.py,sha256=135nfJhi_7wLRDil0v3WU2u2jHlc2_BsEzyDdHtN5-Q,20911 +google/api_core/operations_v1/transports/rest_asyncio.py,sha256=sBr6K5f6PYtV-vLdUM3_BjepK1vQSIJdD0iCLA1-eOU,26130 +google/api_core/page_iterator.py,sha256=FXMfqbhlVYAEVjpojytYAiUluVNYAVSC41MdfAhHAX4,20330 +google/api_core/page_iterator_async.py,sha256=TbuXorRhP1wcQTD3raBJhWgSJP1JwJO_nCKJphCbVdw,10294 +google/api_core/path_template.py,sha256=Lyqqw8OECuw5O7y9x1BJvfNbYEbmx4lnTGqc6opSyHk,11685 +google/api_core/protobuf_helpers.py,sha256=ct_P2z6iYNvum0FZ5Uj-96qf83Q_99TP1qcGwvlO_9c,12448 +google/api_core/py.typed,sha256=q8dgH9l1moUXiufHBVjqI0MuJy4Be9a3rNH8Zl_sICA,78 +google/api_core/rest_helpers.py,sha256=2DsInZiHv0sLd9dfLIbEL2vDJQIybWgxlkxnNFahPnI,3529 +google/api_core/rest_streaming.py,sha256=AwduJ7tYa0_iBhFEqCY696NVmNGWWCm6g4wnTqoVjS4,2209 +google/api_core/rest_streaming_async.py,sha256=5GuzrfYFHfR22d9guOtXvZ1E-VHCCusJyWKVRxOcFuE,3340 +google/api_core/retry/__init__.py,sha256=WhgtLBQO2oK-AehH_AHbGbfWo1IdG5ahUGrs3aFGw0o,2088 +google/api_core/retry/retry_base.py,sha256=e1Asrsjp8Joj__GS9n0tiMeseYN5HWocHK2cbThyPHU,12890 +google/api_core/retry/retry_streaming.py,sha256=sw6Bx7w9G1lK8KCAYn2pw0hZ12sPQAD9h4otC2BXIuQ,11032 +google/api_core/retry/retry_streaming_async.py,sha256=gYs5KWzQ9RHb05ciPuoptOm5VWCOS7fliNG006Ndveg,14517 +google/api_core/retry/retry_unary.py,sha256=X8wIBzhKMpf3PlOmMgotIgg21Hvv16hUb3f8d20hnDc,13517 +google/api_core/retry/retry_unary_async.py,sha256=7PANk3jx6dBKKUZKd3yb2TFPBYO9l7uXg4QmBxmwhQQ,9594 +google/api_core/retry_async.py,sha256=_r0ROYeQqdATtRMx-q_6o4bPmqFzPyjr_oV3lfloDSM,1514 +google/api_core/timeout.py,sha256=heil0E6scuyFkMvymbR2bA33ZmJSavH_SmRNK9kpqcM,10279 +google/api_core/universe.py,sha256=k_K5J0I3kKQiM2yEHvxeqAWxXEQZKJ2SfDlMAH-rQ08,2952 +google/api_core/version.py,sha256=o60ck0jWbBNK-apeCZ-X4zhY4zwRnMnsa5qBTNVm65M,598 +google/api_core/version_header.py,sha256=uEFXosCp8UH7XhznG5GQseTYtWNoJHXRPA557DWsUxA,1046 +google_api_core-2.29.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +google_api_core-2.29.0.dist-info/METADATA,sha256=QahYIiTJrc7BxsAwvPpdyp11_YKr67gdXkKxI8YAuBY,3296 +google_api_core-2.29.0.dist-info/RECORD,, +google_api_core-2.29.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +google_api_core-2.29.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +google_api_core-2.29.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +google_api_core-2.29.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7 diff --git a/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/WHEEL b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e7fa31b6f3f78deb1022c1f7927f07d4d16da822 --- /dev/null +++ b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..cb429113e0f9a73019fd799e8052093fea7f0c8b --- /dev/null +++ b/py311/lib/python3.11/site-packages/google_api_core-2.29.0.dist-info/top_level.txt @@ -0,0 +1 @@ +google diff --git a/py311/lib/python3.11/site-packages/h11/__init__.py b/py311/lib/python3.11/site-packages/h11/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..989e92c3458681a6f0be72ae4105ea742750d328 --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/__init__.py @@ -0,0 +1,62 @@ +# A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230), +# containing no networking code at all, loosely modelled on hyper-h2's generic +# implementation of HTTP/2 (and in particular the h2.connection.H2Connection +# class). There's still a bunch of subtle details you need to get right if you +# want to make this actually useful, because it doesn't implement all the +# semantics to check that what you're asking to write to the wire is sensible, +# but at least it gets you out of dealing with the wire itself. + +from h11._connection import Connection, NEED_DATA, PAUSED +from h11._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from h11._state import ( + CLIENT, + CLOSED, + DONE, + ERROR, + IDLE, + MIGHT_SWITCH_PROTOCOL, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, + SWITCHED_PROTOCOL, +) +from h11._util import LocalProtocolError, ProtocolError, RemoteProtocolError +from h11._version import __version__ + +PRODUCT_ID = "python-h11/" + __version__ + + +__all__ = ( + "Connection", + "NEED_DATA", + "PAUSED", + "ConnectionClosed", + "Data", + "EndOfMessage", + "Event", + "InformationalResponse", + "Request", + "Response", + "CLIENT", + "CLOSED", + "DONE", + "ERROR", + "IDLE", + "MUST_CLOSE", + "SEND_BODY", + "SEND_RESPONSE", + "SERVER", + "SWITCHED_PROTOCOL", + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", +) diff --git a/py311/lib/python3.11/site-packages/h11/_abnf.py b/py311/lib/python3.11/site-packages/h11/_abnf.py new file mode 100644 index 0000000000000000000000000000000000000000..933587fba22290d7eb7df4c88e12f1e61702b8ce --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/_abnf.py @@ -0,0 +1,132 @@ +# We use native strings for all the re patterns, to take advantage of string +# formatting, and then convert to bytestrings when compiling the final re +# objects. + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#whitespace +# OWS = *( SP / HTAB ) +# ; optional whitespace +OWS = r"[ \t]*" + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.token.separators +# token = 1*tchar +# +# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" +# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" +# / DIGIT / ALPHA +# ; any VCHAR, except delimiters +token = r"[-!#$%&'*+.^_`|~0-9a-zA-Z]+" + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#header.fields +# field-name = token +field_name = token + +# The standard says: +# +# field-value = *( field-content / obs-fold ) +# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +# field-vchar = VCHAR / obs-text +# obs-fold = CRLF 1*( SP / HTAB ) +# ; obsolete line folding +# ; see Section 3.2.4 +# +# https://tools.ietf.org/html/rfc5234#appendix-B.1 +# +# VCHAR = %x21-7E +# ; visible (printing) characters +# +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.quoted-string +# obs-text = %x80-FF +# +# However, the standard definition of field-content is WRONG! It disallows +# fields containing a single visible character surrounded by whitespace, +# e.g. "foo a bar". +# +# See: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 +# +# So our definition of field_content attempts to fix it up... +# +# Also, we allow lots of control characters, because apparently people assume +# that they're legal in practice (e.g., google analytics makes cookies with +# \x01 in them!): +# https://github.com/python-hyper/h11/issues/57 +# We still don't allow NUL or whitespace, because those are often treated as +# meta-characters and letting them through can lead to nasty issues like SSRF. +vchar = r"[\x21-\x7e]" +vchar_or_obs_text = r"[^\x00\s]" +field_vchar = vchar_or_obs_text +field_content = r"{field_vchar}+(?:[ \t]+{field_vchar}+)*".format(**globals()) + +# We handle obs-fold at a different level, and our fixed-up field_content +# already grows to swallow the whole value, so ? instead of * +field_value = r"({field_content})?".format(**globals()) + +# header-field = field-name ":" OWS field-value OWS +header_field = ( + r"(?P{field_name})" + r":" + r"{OWS}" + r"(?P{field_value})" + r"{OWS}".format(**globals()) +) + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#request.line +# +# request-line = method SP request-target SP HTTP-version CRLF +# method = token +# HTTP-version = HTTP-name "/" DIGIT "." DIGIT +# HTTP-name = %x48.54.54.50 ; "HTTP", case-sensitive +# +# request-target is complicated (see RFC 7230 sec 5.3) -- could be path, full +# URL, host+port (for connect), or even "*", but in any case we are guaranteed +# that it contists of the visible printing characters. +method = token +request_target = r"{vchar}+".format(**globals()) +http_version = r"HTTP/(?P[0-9]\.[0-9])" +request_line = ( + r"(?P{method})" + r" " + r"(?P{request_target})" + r" " + r"{http_version}".format(**globals()) +) + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#status.line +# +# status-line = HTTP-version SP status-code SP reason-phrase CRLF +# status-code = 3DIGIT +# reason-phrase = *( HTAB / SP / VCHAR / obs-text ) +status_code = r"[0-9]{3}" +reason_phrase = r"([ \t]|{vchar_or_obs_text})*".format(**globals()) +status_line = ( + r"{http_version}" + r" " + r"(?P{status_code})" + # However, there are apparently a few too many servers out there that just + # leave out the reason phrase: + # https://github.com/scrapy/scrapy/issues/345#issuecomment-281756036 + # https://github.com/seanmonstar/httparse/issues/29 + # so make it optional. ?: is a non-capturing group. + r"(?: (?P{reason_phrase}))?".format(**globals()) +) + +HEXDIG = r"[0-9A-Fa-f]" +# Actually +# +# chunk-size = 1*HEXDIG +# +# but we impose an upper-limit to avoid ridiculosity. len(str(2**64)) == 20 +chunk_size = r"({HEXDIG}){{1,20}}".format(**globals()) +# Actually +# +# chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] ) +# +# but we aren't parsing the things so we don't really care. +chunk_ext = r";.*" +chunk_header = ( + r"(?P{chunk_size})" + r"(?P{chunk_ext})?" + r"{OWS}\r\n".format( + **globals() + ) # Even though the specification does not allow for extra whitespaces, + # we are lenient with trailing whitespaces because some servers on the wild use it. +) diff --git a/py311/lib/python3.11/site-packages/h11/_connection.py b/py311/lib/python3.11/site-packages/h11/_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..e37d82a82a882c072cb938a90eb4486b51cdad99 --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/_connection.py @@ -0,0 +1,659 @@ +# This contains the main Connection class. Everything in h11 revolves around +# this. +from typing import ( + Any, + Callable, + cast, + Dict, + List, + Optional, + overload, + Tuple, + Type, + Union, +) + +from ._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from ._headers import get_comma_header, has_expect_100_continue, set_comma_header +from ._readers import READERS, ReadersType +from ._receivebuffer import ReceiveBuffer +from ._state import ( + _SWITCH_CONNECT, + _SWITCH_UPGRADE, + CLIENT, + ConnectionState, + DONE, + ERROR, + MIGHT_SWITCH_PROTOCOL, + SEND_BODY, + SERVER, + SWITCHED_PROTOCOL, +) +from ._util import ( # Import the internal things we need + LocalProtocolError, + RemoteProtocolError, + Sentinel, +) +from ._writers import WRITERS, WritersType + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = ["Connection", "NEED_DATA", "PAUSED"] + + +class NEED_DATA(Sentinel, metaclass=Sentinel): + pass + + +class PAUSED(Sentinel, metaclass=Sentinel): + pass + + +# If we ever have this much buffered without it making a complete parseable +# event, we error out. The only time we really buffer is when reading the +# request/response line + headers together, so this is effectively the limit on +# the size of that. +# +# Some precedents for defaults: +# - node.js: 80 * 1024 +# - tomcat: 8 * 1024 +# - IIS: 16 * 1024 +# - Apache: <8 KiB per line> +DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024 + + +# RFC 7230's rules for connection lifecycles: +# - If either side says they want to close the connection, then the connection +# must close. +# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close +# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive +# (and even this is a mess -- e.g. if you're implementing a proxy then +# sending Connection: keep-alive is forbidden). +# +# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So +# our rule is: +# - If someone says Connection: close, we will close +# - If someone uses HTTP/1.0, we will close. +def _keep_alive(event: Union[Request, Response]) -> bool: + connection = get_comma_header(event.headers, b"connection") + if b"close" in connection: + return False + if getattr(event, "http_version", b"1.1") < b"1.1": + return False + return True + + +def _body_framing( + request_method: bytes, event: Union[Request, Response] +) -> Tuple[str, Union[Tuple[()], Tuple[int]]]: + # Called when we enter SEND_BODY to figure out framing information for + # this body. + # + # These are the only two events that can trigger a SEND_BODY state: + assert type(event) in (Request, Response) + # Returns one of: + # + # ("content-length", count) + # ("chunked", ()) + # ("http/1.0", ()) + # + # which are (lookup key, *args) for constructing body reader/writer + # objects. + # + # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3 + # + # Step 1: some responses always have an empty body, regardless of what the + # headers say. + if type(event) is Response: + if ( + event.status_code in (204, 304) + or request_method == b"HEAD" + or (request_method == b"CONNECT" and 200 <= event.status_code < 300) + ): + return ("content-length", (0,)) + # Section 3.3.3 also lists another case -- responses with status_code + # < 200. For us these are InformationalResponses, not Responses, so + # they can't get into this function in the first place. + assert event.status_code >= 200 + + # Step 2: check for Transfer-Encoding (T-E beats C-L): + transfer_encodings = get_comma_header(event.headers, b"transfer-encoding") + if transfer_encodings: + assert transfer_encodings == [b"chunked"] + return ("chunked", ()) + + # Step 3: check for Content-Length + content_lengths = get_comma_header(event.headers, b"content-length") + if content_lengths: + return ("content-length", (int(content_lengths[0]),)) + + # Step 4: no applicable headers; fallback/default depends on type + if type(event) is Request: + return ("content-length", (0,)) + else: + return ("http/1.0", ()) + + +################################################################ +# +# The main Connection class +# +################################################################ + + +class Connection: + """An object encapsulating the state of an HTTP connection. + + Args: + our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If + you're implementing a server, pass :data:`h11.SERVER`. + + max_incomplete_event_size (int): + The maximum number of bytes we're willing to buffer of an + incomplete event. In practice this mostly sets a limit on the + maximum size of the request/response line + headers. If this is + exceeded, then :meth:`next_event` will raise + :exc:`RemoteProtocolError`. + + """ + + def __init__( + self, + our_role: Type[Sentinel], + max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE, + ) -> None: + self._max_incomplete_event_size = max_incomplete_event_size + # State and role tracking + if our_role not in (CLIENT, SERVER): + raise ValueError(f"expected CLIENT or SERVER, not {our_role!r}") + self.our_role = our_role + self.their_role: Type[Sentinel] + if our_role is CLIENT: + self.their_role = SERVER + else: + self.their_role = CLIENT + self._cstate = ConnectionState() + + # Callables for converting data->events or vice-versa given the + # current state + self._writer = self._get_io_object(self.our_role, None, WRITERS) + self._reader = self._get_io_object(self.their_role, None, READERS) + + # Holds any unprocessed received data + self._receive_buffer = ReceiveBuffer() + # If this is true, then it indicates that the incoming connection was + # closed *after* the end of whatever's in self._receive_buffer: + self._receive_buffer_closed = False + + # Extra bits of state that don't fit into the state machine. + # + # These two are only used to interpret framing headers for figuring + # out how to read/write response bodies. their_http_version is also + # made available as a convenient public API. + self.their_http_version: Optional[bytes] = None + self._request_method: Optional[bytes] = None + # This is pure flow-control and doesn't at all affect the set of legal + # transitions, so no need to bother ConnectionState with it: + self.client_is_waiting_for_100_continue = False + + @property + def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]: + """A dictionary like:: + + {CLIENT: , SERVER: } + + See :ref:`state-machine` for details. + + """ + return dict(self._cstate.states) + + @property + def our_state(self) -> Type[Sentinel]: + """The current state of whichever role we are playing. See + :ref:`state-machine` for details. + """ + return self._cstate.states[self.our_role] + + @property + def their_state(self) -> Type[Sentinel]: + """The current state of whichever role we are NOT playing. See + :ref:`state-machine` for details. + """ + return self._cstate.states[self.their_role] + + @property + def they_are_waiting_for_100_continue(self) -> bool: + return self.their_role is CLIENT and self.client_is_waiting_for_100_continue + + def start_next_cycle(self) -> None: + """Attempt to reset our connection state for a new request/response + cycle. + + If both client and server are in :data:`DONE` state, then resets them + both to :data:`IDLE` state in preparation for a new request/response + cycle on this same connection. Otherwise, raises a + :exc:`LocalProtocolError`. + + See :ref:`keepalive-and-pipelining`. + + """ + old_states = dict(self._cstate.states) + self._cstate.start_next_cycle() + self._request_method = None + # self.their_http_version gets left alone, since it presumably lasts + # beyond a single request/response cycle + assert not self.client_is_waiting_for_100_continue + self._respond_to_state_changes(old_states) + + def _process_error(self, role: Type[Sentinel]) -> None: + old_states = dict(self._cstate.states) + self._cstate.process_error(role) + self._respond_to_state_changes(old_states) + + def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]: + if type(event) is InformationalResponse and event.status_code == 101: + return _SWITCH_UPGRADE + if type(event) is Response: + if ( + _SWITCH_CONNECT in self._cstate.pending_switch_proposals + and 200 <= event.status_code < 300 + ): + return _SWITCH_CONNECT + return None + + # All events go through here + def _process_event(self, role: Type[Sentinel], event: Event) -> None: + # First, pass the event through the state machine to make sure it + # succeeds. + old_states = dict(self._cstate.states) + if role is CLIENT and type(event) is Request: + if event.method == b"CONNECT": + self._cstate.process_client_switch_proposal(_SWITCH_CONNECT) + if get_comma_header(event.headers, b"upgrade"): + self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE) + server_switch_event = None + if role is SERVER: + server_switch_event = self._server_switch_event(event) + self._cstate.process_event(role, type(event), server_switch_event) + + # Then perform the updates triggered by it. + + if type(event) is Request: + self._request_method = event.method + + if role is self.their_role and type(event) in ( + Request, + Response, + InformationalResponse, + ): + event = cast(Union[Request, Response, InformationalResponse], event) + self.their_http_version = event.http_version + + # Keep alive handling + # + # RFC 7230 doesn't really say what one should do if Connection: close + # shows up on a 1xx InformationalResponse. I think the idea is that + # this is not supposed to happen. In any case, if it does happen, we + # ignore it. + if type(event) in (Request, Response) and not _keep_alive( + cast(Union[Request, Response], event) + ): + self._cstate.process_keep_alive_disabled() + + # 100-continue + if type(event) is Request and has_expect_100_continue(event): + self.client_is_waiting_for_100_continue = True + if type(event) in (InformationalResponse, Response): + self.client_is_waiting_for_100_continue = False + if role is CLIENT and type(event) in (Data, EndOfMessage): + self.client_is_waiting_for_100_continue = False + + self._respond_to_state_changes(old_states, event) + + def _get_io_object( + self, + role: Type[Sentinel], + event: Optional[Event], + io_dict: Union[ReadersType, WritersType], + ) -> Optional[Callable[..., Any]]: + # event may be None; it's only used when entering SEND_BODY + state = self._cstate.states[role] + if state is SEND_BODY: + # Special case: the io_dict has a dict of reader/writer factories + # that depend on the request/response framing. + framing_type, args = _body_framing( + cast(bytes, self._request_method), cast(Union[Request, Response], event) + ) + return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index] + else: + # General case: the io_dict just has the appropriate reader/writer + # for this state + return io_dict.get((role, state)) # type: ignore[return-value] + + # This must be called after any action that might have caused + # self._cstate.states to change. + def _respond_to_state_changes( + self, + old_states: Dict[Type[Sentinel], Type[Sentinel]], + event: Optional[Event] = None, + ) -> None: + # Update reader/writer + if self.our_state != old_states[self.our_role]: + self._writer = self._get_io_object(self.our_role, event, WRITERS) + if self.their_state != old_states[self.their_role]: + self._reader = self._get_io_object(self.their_role, event, READERS) + + @property + def trailing_data(self) -> Tuple[bytes, bool]: + """Data that has been received, but not yet processed, represented as + a tuple with two elements, where the first is a byte-string containing + the unprocessed data itself, and the second is a bool that is True if + the receive connection was closed. + + See :ref:`switching-protocols` for discussion of why you'd want this. + """ + return (bytes(self._receive_buffer), self._receive_buffer_closed) + + def receive_data(self, data: bytes) -> None: + """Add data to our internal receive buffer. + + This does not actually do any processing on the data, just stores + it. To trigger processing, you have to call :meth:`next_event`. + + Args: + data (:term:`bytes-like object`): + The new data that was just received. + + Special case: If *data* is an empty byte-string like ``b""``, + then this indicates that the remote side has closed the + connection (end of file). Normally this is convenient, because + standard Python APIs like :meth:`file.read` or + :meth:`socket.recv` use ``b""`` to indicate end-of-file, while + other failures to read are indicated using other mechanisms + like raising :exc:`TimeoutError`. When using such an API you + can just blindly pass through whatever you get from ``read`` + to :meth:`receive_data`, and everything will work. + + But, if you have an API where reading an empty string is a + valid non-EOF condition, then you need to be aware of this and + make sure to check for such strings and avoid passing them to + :meth:`receive_data`. + + Returns: + Nothing, but after calling this you should call :meth:`next_event` + to parse the newly received data. + + Raises: + RuntimeError: + Raised if you pass an empty *data*, indicating EOF, and then + pass a non-empty *data*, indicating more data that somehow + arrived after the EOF. + + (Calling ``receive_data(b"")`` multiple times is fine, + and equivalent to calling it once.) + + """ + if data: + if self._receive_buffer_closed: + raise RuntimeError("received close, then received more data?") + self._receive_buffer += data + else: + self._receive_buffer_closed = True + + def _extract_next_receive_event( + self, + ) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: + state = self.their_state + # We don't pause immediately when they enter DONE, because even in + # DONE state we can still process a ConnectionClosed() event. But + # if we have data in our buffer, then we definitely aren't getting + # a ConnectionClosed() immediately and we need to pause. + if state is DONE and self._receive_buffer: + return PAUSED + if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL: + return PAUSED + assert self._reader is not None + event = self._reader(self._receive_buffer) + if event is None: + if not self._receive_buffer and self._receive_buffer_closed: + # In some unusual cases (basically just HTTP/1.0 bodies), EOF + # triggers an actual protocol event; in that case, we want to + # return that event, and then the state will change and we'll + # get called again to generate the actual ConnectionClosed(). + if hasattr(self._reader, "read_eof"): + event = self._reader.read_eof() + else: + event = ConnectionClosed() + if event is None: + event = NEED_DATA + return event # type: ignore[no-any-return] + + def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: + """Parse the next event out of our receive buffer, update our internal + state, and return it. + + This is a mutating operation -- think of it like calling :func:`next` + on an iterator. + + Returns: + : One of three things: + + 1) An event object -- see :ref:`events`. + + 2) The special constant :data:`NEED_DATA`, which indicates that + you need to read more data from your socket and pass it to + :meth:`receive_data` before this method will be able to return + any more events. + + 3) The special constant :data:`PAUSED`, which indicates that we + are not in a state where we can process incoming data (usually + because the peer has finished their part of the current + request/response cycle, and you have not yet called + :meth:`start_next_cycle`). See :ref:`flow-control` for details. + + Raises: + RemoteProtocolError: + The peer has misbehaved. You should close the connection + (possibly after sending some kind of 4xx response). + + Once this method returns :class:`ConnectionClosed` once, then all + subsequent calls will also return :class:`ConnectionClosed`. + + If this method raises any exception besides :exc:`RemoteProtocolError` + then that's a bug -- if it happens please file a bug report! + + If this method raises any exception then it also sets + :attr:`Connection.their_state` to :data:`ERROR` -- see + :ref:`error-handling` for discussion. + + """ + + if self.their_state is ERROR: + raise RemoteProtocolError("Can't receive data when peer state is ERROR") + try: + event = self._extract_next_receive_event() + if event not in [NEED_DATA, PAUSED]: + self._process_event(self.their_role, cast(Event, event)) + if event is NEED_DATA: + if len(self._receive_buffer) > self._max_incomplete_event_size: + # 431 is "Request header fields too large" which is pretty + # much the only situation where we can get here + raise RemoteProtocolError( + "Receive buffer too long", error_status_hint=431 + ) + if self._receive_buffer_closed: + # We're still trying to complete some event, but that's + # never going to happen because no more data is coming + raise RemoteProtocolError("peer unexpectedly closed connection") + return event + except BaseException as exc: + self._process_error(self.their_role) + if isinstance(exc, LocalProtocolError): + exc._reraise_as_remote_protocol_error() + else: + raise + + @overload + def send(self, event: ConnectionClosed) -> None: + ... + + @overload + def send( + self, event: Union[Request, InformationalResponse, Response, Data, EndOfMessage] + ) -> bytes: + ... + + @overload + def send(self, event: Event) -> Optional[bytes]: + ... + + def send(self, event: Event) -> Optional[bytes]: + """Convert a high-level event into bytes that can be sent to the peer, + while updating our internal state machine. + + Args: + event: The :ref:`event ` to send. + + Returns: + If ``type(event) is ConnectionClosed``, then returns + ``None``. Otherwise, returns a :term:`bytes-like object`. + + Raises: + LocalProtocolError: + Sending this event at this time would violate our + understanding of the HTTP/1.1 protocol. + + If this method raises any exception then it also sets + :attr:`Connection.our_state` to :data:`ERROR` -- see + :ref:`error-handling` for discussion. + + """ + data_list = self.send_with_data_passthrough(event) + if data_list is None: + return None + else: + return b"".join(data_list) + + def send_with_data_passthrough(self, event: Event) -> Optional[List[bytes]]: + """Identical to :meth:`send`, except that in situations where + :meth:`send` returns a single :term:`bytes-like object`, this instead + returns a list of them -- and when sending a :class:`Data` event, this + list is guaranteed to contain the exact object you passed in as + :attr:`Data.data`. See :ref:`sendfile` for discussion. + + """ + if self.our_state is ERROR: + raise LocalProtocolError("Can't send data when our state is ERROR") + try: + if type(event) is Response: + event = self._clean_up_response_headers_for_sending(event) + # We want to call _process_event before calling the writer, + # because if someone tries to do something invalid then this will + # give a sensible error message, while our writers all just assume + # they will only receive valid events. But, _process_event might + # change self._writer. So we have to do a little dance: + writer = self._writer + self._process_event(self.our_role, event) + if type(event) is ConnectionClosed: + return None + else: + # In any situation where writer is None, process_event should + # have raised ProtocolError + assert writer is not None + data_list: List[bytes] = [] + writer(event, data_list.append) + return data_list + except: + self._process_error(self.our_role) + raise + + def send_failed(self) -> None: + """Notify the state machine that we failed to send the data it gave + us. + + This causes :attr:`Connection.our_state` to immediately become + :data:`ERROR` -- see :ref:`error-handling` for discussion. + + """ + self._process_error(self.our_role) + + # When sending a Response, we take responsibility for a few things: + # + # - Sometimes you MUST set Connection: close. We take care of those + # times. (You can also set it yourself if you want, and if you do then + # we'll respect that and close the connection at the right time. But you + # don't have to worry about that unless you want to.) + # + # - The user has to set Content-Length if they want it. Otherwise, for + # responses that have bodies (e.g. not HEAD), then we will automatically + # select the right mechanism for streaming a body of unknown length, + # which depends on depending on the peer's HTTP version. + # + # This function's *only* responsibility is making sure headers are set up + # right -- everything downstream just looks at the headers. There are no + # side channels. + def _clean_up_response_headers_for_sending(self, response: Response) -> Response: + assert type(response) is Response + + headers = response.headers + need_close = False + + # HEAD requests need some special handling: they always act like they + # have Content-Length: 0, and that's how _body_framing treats + # them. But their headers are supposed to match what we would send if + # the request was a GET. (Technically there is one deviation allowed: + # we're allowed to leave out the framing headers -- see + # https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as + # easy to get them right.) + method_for_choosing_headers = cast(bytes, self._request_method) + if method_for_choosing_headers == b"HEAD": + method_for_choosing_headers = b"GET" + framing_type, _ = _body_framing(method_for_choosing_headers, response) + if framing_type in ("chunked", "http/1.0"): + # This response has a body of unknown length. + # If our peer is HTTP/1.1, we use Transfer-Encoding: chunked + # If our peer is HTTP/1.0, we use no framing headers, and close the + # connection afterwards. + # + # Make sure to clear Content-Length (in principle user could have + # set both and then we ignored Content-Length b/c + # Transfer-Encoding overwrote it -- this would be naughty of them, + # but the HTTP spec says that if our peer does this then we have + # to fix it instead of erroring out, so we'll accord the user the + # same respect). + headers = set_comma_header(headers, b"content-length", []) + if self.their_http_version is None or self.their_http_version < b"1.1": + # Either we never got a valid request and are sending back an + # error (their_http_version is None), so we assume the worst; + # or else we did get a valid HTTP/1.0 request, so we know that + # they don't understand chunked encoding. + headers = set_comma_header(headers, b"transfer-encoding", []) + # This is actually redundant ATM, since currently we + # unconditionally disable keep-alive when talking to HTTP/1.0 + # peers. But let's be defensive just in case we add + # Connection: keep-alive support later: + if self._request_method != b"HEAD": + need_close = True + else: + headers = set_comma_header(headers, b"transfer-encoding", [b"chunked"]) + + if not self._cstate.keep_alive or need_close: + # Make sure Connection: close is set + connection = set(get_comma_header(headers, b"connection")) + connection.discard(b"keep-alive") + connection.add(b"close") + headers = set_comma_header(headers, b"connection", sorted(connection)) + + return Response( + headers=headers, + status_code=response.status_code, + http_version=response.http_version, + reason=response.reason, + ) diff --git a/py311/lib/python3.11/site-packages/h11/_events.py b/py311/lib/python3.11/site-packages/h11/_events.py new file mode 100644 index 0000000000000000000000000000000000000000..ca1c3adbde2c4e7710482a18e3471f91f1da610e --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/_events.py @@ -0,0 +1,369 @@ +# High level events that make up HTTP/1.1 conversations. Loosely inspired by +# the corresponding events in hyper-h2: +# +# http://python-hyper.org/h2/en/stable/api.html#events +# +# Don't subclass these. Stuff will break. + +import re +from abc import ABC +from dataclasses import dataclass +from typing import List, Tuple, Union + +from ._abnf import method, request_target +from ._headers import Headers, normalize_and_validate +from ._util import bytesify, LocalProtocolError, validate + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = [ + "Event", + "Request", + "InformationalResponse", + "Response", + "Data", + "EndOfMessage", + "ConnectionClosed", +] + +method_re = re.compile(method.encode("ascii")) +request_target_re = re.compile(request_target.encode("ascii")) + + +class Event(ABC): + """ + Base class for h11 events. + """ + + __slots__ = () + + +@dataclass(init=False, frozen=True) +class Request(Event): + """The beginning of an HTTP request. + + Fields: + + .. attribute:: method + + An HTTP method, e.g. ``b"GET"`` or ``b"POST"``. Always a byte + string. :term:`Bytes-like objects ` and native + strings containing only ascii characters will be automatically + converted to byte strings. + + .. attribute:: target + + The target of an HTTP request, e.g. ``b"/index.html"``, or one of the + more exotic formats described in `RFC 7320, section 5.3 + `_. Always a byte + string. :term:`Bytes-like objects ` and native + strings containing only ascii characters will be automatically + converted to byte strings. + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + """ + + __slots__ = ("method", "headers", "target", "http_version") + + method: bytes + headers: Headers + target: bytes + http_version: bytes + + def __init__( + self, + *, + method: Union[bytes, str], + headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]], + target: Union[bytes, str], + http_version: Union[bytes, str] = b"1.1", + _parsed: bool = False, + ) -> None: + super().__init__() + if isinstance(headers, Headers): + object.__setattr__(self, "headers", headers) + else: + object.__setattr__( + self, "headers", normalize_and_validate(headers, _parsed=_parsed) + ) + if not _parsed: + object.__setattr__(self, "method", bytesify(method)) + object.__setattr__(self, "target", bytesify(target)) + object.__setattr__(self, "http_version", bytesify(http_version)) + else: + object.__setattr__(self, "method", method) + object.__setattr__(self, "target", target) + object.__setattr__(self, "http_version", http_version) + + # "A server MUST respond with a 400 (Bad Request) status code to any + # HTTP/1.1 request message that lacks a Host header field and to any + # request message that contains more than one Host header field or a + # Host header field with an invalid field-value." + # -- https://tools.ietf.org/html/rfc7230#section-5.4 + host_count = 0 + for name, value in self.headers: + if name == b"host": + host_count += 1 + if self.http_version == b"1.1" and host_count == 0: + raise LocalProtocolError("Missing mandatory Host: header") + if host_count > 1: + raise LocalProtocolError("Found multiple Host: headers") + + validate(method_re, self.method, "Illegal method characters") + validate(request_target_re, self.target, "Illegal target characters") + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class _ResponseBase(Event): + __slots__ = ("headers", "http_version", "reason", "status_code") + + headers: Headers + http_version: bytes + reason: bytes + status_code: int + + def __init__( + self, + *, + headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]], + status_code: int, + http_version: Union[bytes, str] = b"1.1", + reason: Union[bytes, str] = b"", + _parsed: bool = False, + ) -> None: + super().__init__() + if isinstance(headers, Headers): + object.__setattr__(self, "headers", headers) + else: + object.__setattr__( + self, "headers", normalize_and_validate(headers, _parsed=_parsed) + ) + if not _parsed: + object.__setattr__(self, "reason", bytesify(reason)) + object.__setattr__(self, "http_version", bytesify(http_version)) + if not isinstance(status_code, int): + raise LocalProtocolError("status code must be integer") + # Because IntEnum objects are instances of int, but aren't + # duck-compatible (sigh), see gh-72. + object.__setattr__(self, "status_code", int(status_code)) + else: + object.__setattr__(self, "reason", reason) + object.__setattr__(self, "http_version", http_version) + object.__setattr__(self, "status_code", status_code) + + self.__post_init__() + + def __post_init__(self) -> None: + pass + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class InformationalResponse(_ResponseBase): + """An HTTP informational response. + + Fields: + + .. attribute:: status_code + + The status code of this response, as an integer. For an + :class:`InformationalResponse`, this is always in the range [100, + 200). + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for + details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + .. attribute:: reason + + The reason phrase of this response, as a byte string. For example: + ``b"OK"``, or ``b"Not Found"``. + + """ + + def __post_init__(self) -> None: + if not (100 <= self.status_code < 200): + raise LocalProtocolError( + "InformationalResponse status_code should be in range " + "[100, 200), not {}".format(self.status_code) + ) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class Response(_ResponseBase): + """The beginning of an HTTP response. + + Fields: + + .. attribute:: status_code + + The status code of this response, as an integer. For an + :class:`Response`, this is always in the range [200, + 1000). + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + .. attribute:: reason + + The reason phrase of this response, as a byte string. For example: + ``b"OK"``, or ``b"Not Found"``. + + """ + + def __post_init__(self) -> None: + if not (200 <= self.status_code < 1000): + raise LocalProtocolError( + "Response status_code should be in range [200, 1000), not {}".format( + self.status_code + ) + ) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class Data(Event): + """Part of an HTTP message body. + + Fields: + + .. attribute:: data + + A :term:`bytes-like object` containing part of a message body. Or, if + using the ``combine=False`` argument to :meth:`Connection.send`, then + any object that your socket writing code knows what to do with, and for + which calling :func:`len` returns the number of bytes that will be + written -- see :ref:`sendfile` for details. + + .. attribute:: chunk_start + + A marker that indicates whether this data object is from the start of a + chunked transfer encoding chunk. This field is ignored when when a Data + event is provided to :meth:`Connection.send`: it is only valid on + events emitted from :meth:`Connection.next_event`. You probably + shouldn't use this attribute at all; see + :ref:`chunk-delimiters-are-bad` for details. + + .. attribute:: chunk_end + + A marker that indicates whether this data object is the last for a + given chunked transfer encoding chunk. This field is ignored when when + a Data event is provided to :meth:`Connection.send`: it is only valid + on events emitted from :meth:`Connection.next_event`. You probably + shouldn't use this attribute at all; see + :ref:`chunk-delimiters-are-bad` for details. + + """ + + __slots__ = ("data", "chunk_start", "chunk_end") + + data: bytes + chunk_start: bool + chunk_end: bool + + def __init__( + self, data: bytes, chunk_start: bool = False, chunk_end: bool = False + ) -> None: + object.__setattr__(self, "data", data) + object.__setattr__(self, "chunk_start", chunk_start) + object.__setattr__(self, "chunk_end", chunk_end) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +# XX FIXME: "A recipient MUST ignore (or consider as an error) any fields that +# are forbidden to be sent in a trailer, since processing them as if they were +# present in the header section might bypass external security filters." +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#chunked.trailer.part +# Unfortunately, the list of forbidden fields is long and vague :-/ +@dataclass(init=False, frozen=True) +class EndOfMessage(Event): + """The end of an HTTP message. + + Fields: + + .. attribute:: headers + + Default value: ``[]`` + + Any trailing headers attached to this message, represented as a list of + (name, value) pairs. See :ref:`the header normalization rules + ` for details. + + Must be empty unless ``Transfer-Encoding: chunked`` is in use. + + """ + + __slots__ = ("headers",) + + headers: Headers + + def __init__( + self, + *, + headers: Union[ + Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]], None + ] = None, + _parsed: bool = False, + ) -> None: + super().__init__() + if headers is None: + headers = Headers([]) + elif not isinstance(headers, Headers): + headers = normalize_and_validate(headers, _parsed=_parsed) + + object.__setattr__(self, "headers", headers) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(frozen=True) +class ConnectionClosed(Event): + """This event indicates that the sender has closed their outgoing + connection. + + Note that this does not necessarily mean that they can't *receive* further + data, because TCP connections are composed to two one-way channels which + can be closed independently. See :ref:`closing` for details. + + No fields. + """ + + pass diff --git a/py311/lib/python3.11/site-packages/h11/_headers.py b/py311/lib/python3.11/site-packages/h11/_headers.py new file mode 100644 index 0000000000000000000000000000000000000000..31da3e2b23b55a624b36f105e62a6902e63286aa --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/_headers.py @@ -0,0 +1,282 @@ +import re +from typing import AnyStr, cast, List, overload, Sequence, Tuple, TYPE_CHECKING, Union + +from ._abnf import field_name, field_value +from ._util import bytesify, LocalProtocolError, validate + +if TYPE_CHECKING: + from ._events import Request + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal # type: ignore + +CONTENT_LENGTH_MAX_DIGITS = 20 # allow up to 1 billion TB - 1 + + +# Facts +# ----- +# +# Headers are: +# keys: case-insensitive ascii +# values: mixture of ascii and raw bytes +# +# "Historically, HTTP has allowed field content with text in the ISO-8859-1 +# charset [ISO-8859-1], supporting other charsets only through use of +# [RFC2047] encoding. In practice, most HTTP header field values use only a +# subset of the US-ASCII charset [USASCII]. Newly defined header fields SHOULD +# limit their field values to US-ASCII octets. A recipient SHOULD treat other +# octets in field content (obs-text) as opaque data." +# And it deprecates all non-ascii values +# +# Leading/trailing whitespace in header names is forbidden +# +# Values get leading/trailing whitespace stripped +# +# Content-Disposition actually needs to contain unicode semantically; to +# accomplish this it has a terrifically weird way of encoding the filename +# itself as ascii (and even this still has lots of cross-browser +# incompatibilities) +# +# Order is important: +# "a proxy MUST NOT change the order of these field values when forwarding a +# message" +# (and there are several headers where the order indicates a preference) +# +# Multiple occurences of the same header: +# "A sender MUST NOT generate multiple header fields with the same field name +# in a message unless either the entire field value for that header field is +# defined as a comma-separated list [or the header is Set-Cookie which gets a +# special exception]" - RFC 7230. (cookies are in RFC 6265) +# +# So every header aside from Set-Cookie can be merged by b", ".join if it +# occurs repeatedly. But, of course, they can't necessarily be split by +# .split(b","), because quoting. +# +# Given all this mess (case insensitive, duplicates allowed, order is +# important, ...), there doesn't appear to be any standard way to handle +# headers in Python -- they're almost like dicts, but... actually just +# aren't. For now we punt and just use a super simple representation: headers +# are a list of pairs +# +# [(name1, value1), (name2, value2), ...] +# +# where all entries are bytestrings, names are lowercase and have no +# leading/trailing whitespace, and values are bytestrings with no +# leading/trailing whitespace. Searching and updating are done via naive O(n) +# methods. +# +# Maybe a dict-of-lists would be better? + +_content_length_re = re.compile(rb"[0-9]+") +_field_name_re = re.compile(field_name.encode("ascii")) +_field_value_re = re.compile(field_value.encode("ascii")) + + +class Headers(Sequence[Tuple[bytes, bytes]]): + """ + A list-like interface that allows iterating over headers as byte-pairs + of (lowercased-name, value). + + Internally we actually store the representation as three-tuples, + including both the raw original casing, in order to preserve casing + over-the-wire, and the lowercased name, for case-insensitive comparisions. + + r = Request( + method="GET", + target="/", + headers=[("Host", "example.org"), ("Connection", "keep-alive")], + http_version="1.1", + ) + assert r.headers == [ + (b"host", b"example.org"), + (b"connection", b"keep-alive") + ] + assert r.headers.raw_items() == [ + (b"Host", b"example.org"), + (b"Connection", b"keep-alive") + ] + """ + + __slots__ = "_full_items" + + def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None: + self._full_items = full_items + + def __bool__(self) -> bool: + return bool(self._full_items) + + def __eq__(self, other: object) -> bool: + return list(self) == list(other) # type: ignore + + def __len__(self) -> int: + return len(self._full_items) + + def __repr__(self) -> str: + return "" % repr(list(self)) + + def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override] + _, name, value = self._full_items[idx] + return (name, value) + + def raw_items(self) -> List[Tuple[bytes, bytes]]: + return [(raw_name, value) for raw_name, _, value in self._full_items] + + +HeaderTypes = Union[ + List[Tuple[bytes, bytes]], + List[Tuple[bytes, str]], + List[Tuple[str, bytes]], + List[Tuple[str, str]], +] + + +@overload +def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers: + ... + + +@overload +def normalize_and_validate(headers: HeaderTypes, _parsed: Literal[False]) -> Headers: + ... + + +@overload +def normalize_and_validate( + headers: Union[Headers, HeaderTypes], _parsed: bool = False +) -> Headers: + ... + + +def normalize_and_validate( + headers: Union[Headers, HeaderTypes], _parsed: bool = False +) -> Headers: + new_headers = [] + seen_content_length = None + saw_transfer_encoding = False + for name, value in headers: + # For headers coming out of the parser, we can safely skip some steps, + # because it always returns bytes and has already run these regexes + # over the data: + if not _parsed: + name = bytesify(name) + value = bytesify(value) + validate(_field_name_re, name, "Illegal header name {!r}", name) + validate(_field_value_re, value, "Illegal header value {!r}", value) + assert isinstance(name, bytes) + assert isinstance(value, bytes) + + raw_name = name + name = name.lower() + if name == b"content-length": + lengths = {length.strip() for length in value.split(b",")} + if len(lengths) != 1: + raise LocalProtocolError("conflicting Content-Length headers") + value = lengths.pop() + validate(_content_length_re, value, "bad Content-Length") + if len(value) > CONTENT_LENGTH_MAX_DIGITS: + raise LocalProtocolError("bad Content-Length") + if seen_content_length is None: + seen_content_length = value + new_headers.append((raw_name, name, value)) + elif seen_content_length != value: + raise LocalProtocolError("conflicting Content-Length headers") + elif name == b"transfer-encoding": + # "A server that receives a request message with a transfer coding + # it does not understand SHOULD respond with 501 (Not + # Implemented)." + # https://tools.ietf.org/html/rfc7230#section-3.3.1 + if saw_transfer_encoding: + raise LocalProtocolError( + "multiple Transfer-Encoding headers", error_status_hint=501 + ) + # "All transfer-coding names are case-insensitive" + # -- https://tools.ietf.org/html/rfc7230#section-4 + value = value.lower() + if value != b"chunked": + raise LocalProtocolError( + "Only Transfer-Encoding: chunked is supported", + error_status_hint=501, + ) + saw_transfer_encoding = True + new_headers.append((raw_name, name, value)) + else: + new_headers.append((raw_name, name, value)) + return Headers(new_headers) + + +def get_comma_header(headers: Headers, name: bytes) -> List[bytes]: + # Should only be used for headers whose value is a list of + # comma-separated, case-insensitive values. + # + # The header name `name` is expected to be lower-case bytes. + # + # Connection: meets these criteria (including cast insensitivity). + # + # Content-Length: technically is just a single value (1*DIGIT), but the + # standard makes reference to implementations that do multiple values, and + # using this doesn't hurt. Ditto, case insensitivity doesn't things either + # way. + # + # Transfer-Encoding: is more complex (allows for quoted strings), so + # splitting on , is actually wrong. For example, this is legal: + # + # Transfer-Encoding: foo; options="1,2", chunked + # + # and should be parsed as + # + # foo; options="1,2" + # chunked + # + # but this naive function will parse it as + # + # foo; options="1 + # 2" + # chunked + # + # However, this is okay because the only thing we are going to do with + # any Transfer-Encoding is reject ones that aren't just "chunked", so + # both of these will be treated the same anyway. + # + # Expect: the only legal value is the literal string + # "100-continue". Splitting on commas is harmless. Case insensitive. + # + out: List[bytes] = [] + for _, found_name, found_raw_value in headers._full_items: + if found_name == name: + found_raw_value = found_raw_value.lower() + for found_split_value in found_raw_value.split(b","): + found_split_value = found_split_value.strip() + if found_split_value: + out.append(found_split_value) + return out + + +def set_comma_header(headers: Headers, name: bytes, new_values: List[bytes]) -> Headers: + # The header name `name` is expected to be lower-case bytes. + # + # Note that when we store the header we use title casing for the header + # names, in order to match the conventional HTTP header style. + # + # Simply calling `.title()` is a blunt approach, but it's correct + # here given the cases where we're using `set_comma_header`... + # + # Connection, Content-Length, Transfer-Encoding. + new_headers: List[Tuple[bytes, bytes]] = [] + for found_raw_name, found_name, found_raw_value in headers._full_items: + if found_name != name: + new_headers.append((found_raw_name, found_raw_value)) + for new_value in new_values: + new_headers.append((name.title(), new_value)) + return normalize_and_validate(new_headers) + + +def has_expect_100_continue(request: "Request") -> bool: + # https://tools.ietf.org/html/rfc7231#section-5.1.1 + # "A server that receives a 100-continue expectation in an HTTP/1.0 request + # MUST ignore that expectation." + if request.http_version < b"1.1": + return False + expect = get_comma_header(request.headers, b"expect") + return b"100-continue" in expect diff --git a/py311/lib/python3.11/site-packages/h11/_readers.py b/py311/lib/python3.11/site-packages/h11/_readers.py new file mode 100644 index 0000000000000000000000000000000000000000..576804cc282032526e0a932c9853d586a094bad0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/_readers.py @@ -0,0 +1,250 @@ +# Code to read HTTP data +# +# Strategy: each reader is a callable which takes a ReceiveBuffer object, and +# either: +# 1) consumes some of it and returns an Event +# 2) raises a LocalProtocolError (for consistency -- e.g. we call validate() +# and it might raise a LocalProtocolError, so simpler just to always use +# this) +# 3) returns None, meaning "I need more data" +# +# If they have a .read_eof attribute, then this will be called if an EOF is +# received -- but this is optional. Either way, the actual ConnectionClosed +# event will be generated afterwards. +# +# READERS is a dict describing how to pick a reader. It maps states to either: +# - a reader +# - or, for body readers, a dict of per-framing reader factories + +import re +from typing import Any, Callable, Dict, Iterable, NoReturn, Optional, Tuple, Type, Union + +from ._abnf import chunk_header, header_field, request_line, status_line +from ._events import Data, EndOfMessage, InformationalResponse, Request, Response +from ._receivebuffer import ReceiveBuffer +from ._state import ( + CLIENT, + CLOSED, + DONE, + IDLE, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, +) +from ._util import LocalProtocolError, RemoteProtocolError, Sentinel, validate + +__all__ = ["READERS"] + +header_field_re = re.compile(header_field.encode("ascii")) +obs_fold_re = re.compile(rb"[ \t]+") + + +def _obsolete_line_fold(lines: Iterable[bytes]) -> Iterable[bytes]: + it = iter(lines) + last: Optional[bytes] = None + for line in it: + match = obs_fold_re.match(line) + if match: + if last is None: + raise LocalProtocolError("continuation line at start of headers") + if not isinstance(last, bytearray): + # Cast to a mutable type, avoiding copy on append to ensure O(n) time + last = bytearray(last) + last += b" " + last += line[match.end() :] + else: + if last is not None: + yield last + last = line + if last is not None: + yield last + + +def _decode_header_lines( + lines: Iterable[bytes], +) -> Iterable[Tuple[bytes, bytes]]: + for line in _obsolete_line_fold(lines): + matches = validate(header_field_re, line, "illegal header line: {!r}", line) + yield (matches["field_name"], matches["field_value"]) + + +request_line_re = re.compile(request_line.encode("ascii")) + + +def maybe_read_from_IDLE_client(buf: ReceiveBuffer) -> Optional[Request]: + lines = buf.maybe_extract_lines() + if lines is None: + if buf.is_next_line_obviously_invalid_request_line(): + raise LocalProtocolError("illegal request line") + return None + if not lines: + raise LocalProtocolError("no request line received") + matches = validate( + request_line_re, lines[0], "illegal request line: {!r}", lines[0] + ) + return Request( + headers=list(_decode_header_lines(lines[1:])), _parsed=True, **matches + ) + + +status_line_re = re.compile(status_line.encode("ascii")) + + +def maybe_read_from_SEND_RESPONSE_server( + buf: ReceiveBuffer, +) -> Union[InformationalResponse, Response, None]: + lines = buf.maybe_extract_lines() + if lines is None: + if buf.is_next_line_obviously_invalid_request_line(): + raise LocalProtocolError("illegal request line") + return None + if not lines: + raise LocalProtocolError("no response line received") + matches = validate(status_line_re, lines[0], "illegal status line: {!r}", lines[0]) + http_version = ( + b"1.1" if matches["http_version"] is None else matches["http_version"] + ) + reason = b"" if matches["reason"] is None else matches["reason"] + status_code = int(matches["status_code"]) + class_: Union[Type[InformationalResponse], Type[Response]] = ( + InformationalResponse if status_code < 200 else Response + ) + return class_( + headers=list(_decode_header_lines(lines[1:])), + _parsed=True, + status_code=status_code, + reason=reason, + http_version=http_version, + ) + + +class ContentLengthReader: + def __init__(self, length: int) -> None: + self._length = length + self._remaining = length + + def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]: + if self._remaining == 0: + return EndOfMessage() + data = buf.maybe_extract_at_most(self._remaining) + if data is None: + return None + self._remaining -= len(data) + return Data(data=data) + + def read_eof(self) -> NoReturn: + raise RemoteProtocolError( + "peer closed connection without sending complete message body " + "(received {} bytes, expected {})".format( + self._length - self._remaining, self._length + ) + ) + + +chunk_header_re = re.compile(chunk_header.encode("ascii")) + + +class ChunkedReader: + def __init__(self) -> None: + self._bytes_in_chunk = 0 + # After reading a chunk, we have to throw away the trailing \r\n. + # This tracks the bytes that we need to match and throw away. + self._bytes_to_discard = b"" + self._reading_trailer = False + + def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]: + if self._reading_trailer: + lines = buf.maybe_extract_lines() + if lines is None: + return None + return EndOfMessage(headers=list(_decode_header_lines(lines))) + if self._bytes_to_discard: + data = buf.maybe_extract_at_most(len(self._bytes_to_discard)) + if data is None: + return None + if data != self._bytes_to_discard[: len(data)]: + raise LocalProtocolError( + f"malformed chunk footer: {data!r} (expected {self._bytes_to_discard!r})" + ) + self._bytes_to_discard = self._bytes_to_discard[len(data) :] + if self._bytes_to_discard: + return None + # else, fall through and read some more + assert self._bytes_to_discard == b"" + if self._bytes_in_chunk == 0: + # We need to refill our chunk count + chunk_header = buf.maybe_extract_next_line() + if chunk_header is None: + return None + matches = validate( + chunk_header_re, + chunk_header, + "illegal chunk header: {!r}", + chunk_header, + ) + # XX FIXME: we discard chunk extensions. Does anyone care? + self._bytes_in_chunk = int(matches["chunk_size"], base=16) + if self._bytes_in_chunk == 0: + self._reading_trailer = True + return self(buf) + chunk_start = True + else: + chunk_start = False + assert self._bytes_in_chunk > 0 + data = buf.maybe_extract_at_most(self._bytes_in_chunk) + if data is None: + return None + self._bytes_in_chunk -= len(data) + if self._bytes_in_chunk == 0: + self._bytes_to_discard = b"\r\n" + chunk_end = True + else: + chunk_end = False + return Data(data=data, chunk_start=chunk_start, chunk_end=chunk_end) + + def read_eof(self) -> NoReturn: + raise RemoteProtocolError( + "peer closed connection without sending complete message body " + "(incomplete chunked read)" + ) + + +class Http10Reader: + def __call__(self, buf: ReceiveBuffer) -> Optional[Data]: + data = buf.maybe_extract_at_most(999999999) + if data is None: + return None + return Data(data=data) + + def read_eof(self) -> EndOfMessage: + return EndOfMessage() + + +def expect_nothing(buf: ReceiveBuffer) -> None: + if buf: + raise LocalProtocolError("Got data when expecting EOF") + return None + + +ReadersType = Dict[ + Union[Type[Sentinel], Tuple[Type[Sentinel], Type[Sentinel]]], + Union[Callable[..., Any], Dict[str, Callable[..., Any]]], +] + +READERS: ReadersType = { + (CLIENT, IDLE): maybe_read_from_IDLE_client, + (SERVER, IDLE): maybe_read_from_SEND_RESPONSE_server, + (SERVER, SEND_RESPONSE): maybe_read_from_SEND_RESPONSE_server, + (CLIENT, DONE): expect_nothing, + (CLIENT, MUST_CLOSE): expect_nothing, + (CLIENT, CLOSED): expect_nothing, + (SERVER, DONE): expect_nothing, + (SERVER, MUST_CLOSE): expect_nothing, + (SERVER, CLOSED): expect_nothing, + SEND_BODY: { + "chunked": ChunkedReader, + "content-length": ContentLengthReader, + "http/1.0": Http10Reader, + }, +} diff --git a/py311/lib/python3.11/site-packages/h11/_receivebuffer.py b/py311/lib/python3.11/site-packages/h11/_receivebuffer.py new file mode 100644 index 0000000000000000000000000000000000000000..e5c4e08a56f5081e87103f38b4add6ce1b730204 --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/_receivebuffer.py @@ -0,0 +1,153 @@ +import re +import sys +from typing import List, Optional, Union + +__all__ = ["ReceiveBuffer"] + + +# Operations we want to support: +# - find next \r\n or \r\n\r\n (\n or \n\n are also acceptable), +# or wait until there is one +# - read at-most-N bytes +# Goals: +# - on average, do this fast +# - worst case, do this in O(n) where n is the number of bytes processed +# Plan: +# - store bytearray, offset, how far we've searched for a separator token +# - use the how-far-we've-searched data to avoid rescanning +# - while doing a stream of uninterrupted processing, advance offset instead +# of constantly copying +# WARNING: +# - I haven't benchmarked or profiled any of this yet. +# +# Note that starting in Python 3.4, deleting the initial n bytes from a +# bytearray is amortized O(n), thanks to some excellent work by Antoine +# Martin: +# +# https://bugs.python.org/issue19087 +# +# This means that if we only supported 3.4+, we could get rid of the code here +# involving self._start and self.compress, because it's doing exactly the same +# thing that bytearray now does internally. +# +# BUT unfortunately, we still support 2.7, and reading short segments out of a +# long buffer MUST be O(bytes read) to avoid DoS issues, so we can't actually +# delete this code. Yet: +# +# https://pythonclock.org/ +# +# (Two things to double-check first though: make sure PyPy also has the +# optimization, and benchmark to make sure it's a win, since we do have a +# slightly clever thing where we delay calling compress() until we've +# processed a whole event, which could in theory be slightly more efficient +# than the internal bytearray support.) +blank_line_regex = re.compile(b"\n\r?\n", re.MULTILINE) + + +class ReceiveBuffer: + def __init__(self) -> None: + self._data = bytearray() + self._next_line_search = 0 + self._multiple_lines_search = 0 + + def __iadd__(self, byteslike: Union[bytes, bytearray]) -> "ReceiveBuffer": + self._data += byteslike + return self + + def __bool__(self) -> bool: + return bool(len(self)) + + def __len__(self) -> int: + return len(self._data) + + # for @property unprocessed_data + def __bytes__(self) -> bytes: + return bytes(self._data) + + def _extract(self, count: int) -> bytearray: + # extracting an initial slice of the data buffer and return it + out = self._data[:count] + del self._data[:count] + + self._next_line_search = 0 + self._multiple_lines_search = 0 + + return out + + def maybe_extract_at_most(self, count: int) -> Optional[bytearray]: + """ + Extract a fixed number of bytes from the buffer. + """ + out = self._data[:count] + if not out: + return None + + return self._extract(count) + + def maybe_extract_next_line(self) -> Optional[bytearray]: + """ + Extract the first line, if it is completed in the buffer. + """ + # Only search in buffer space that we've not already looked at. + search_start_index = max(0, self._next_line_search - 1) + partial_idx = self._data.find(b"\r\n", search_start_index) + + if partial_idx == -1: + self._next_line_search = len(self._data) + return None + + # + 2 is to compensate len(b"\r\n") + idx = partial_idx + 2 + + return self._extract(idx) + + def maybe_extract_lines(self) -> Optional[List[bytearray]]: + """ + Extract everything up to the first blank line, and return a list of lines. + """ + # Handle the case where we have an immediate empty line. + if self._data[:1] == b"\n": + self._extract(1) + return [] + + if self._data[:2] == b"\r\n": + self._extract(2) + return [] + + # Only search in buffer space that we've not already looked at. + match = blank_line_regex.search(self._data, self._multiple_lines_search) + if match is None: + self._multiple_lines_search = max(0, len(self._data) - 2) + return None + + # Truncate the buffer and return it. + idx = match.span(0)[-1] + out = self._extract(idx) + lines = out.split(b"\n") + + for line in lines: + if line.endswith(b"\r"): + del line[-1] + + assert lines[-2] == lines[-1] == b"" + + del lines[-2:] + + return lines + + # In theory we should wait until `\r\n` before starting to validate + # incoming data. However it's interesting to detect (very) invalid data + # early given they might not even contain `\r\n` at all (hence only + # timeout will get rid of them). + # This is not a 100% effective detection but more of a cheap sanity check + # allowing for early abort in some useful cases. + # This is especially interesting when peer is messing up with HTTPS and + # sent us a TLS stream where we were expecting plain HTTP given all + # versions of TLS so far start handshake with a 0x16 message type code. + def is_next_line_obviously_invalid_request_line(self) -> bool: + try: + # HTTP header line must not contain non-printable characters + # and should not start with a space + return self._data[0] < 0x21 + except IndexError: + return False diff --git a/py311/lib/python3.11/site-packages/h11/_state.py b/py311/lib/python3.11/site-packages/h11/_state.py new file mode 100644 index 0000000000000000000000000000000000000000..3ad444b043e3f3d6c05c2d9d84d5119312bfaa34 --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/_state.py @@ -0,0 +1,365 @@ +################################################################ +# The core state machine +################################################################ +# +# Rule 1: everything that affects the state machine and state transitions must +# live here in this file. As much as possible goes into the table-based +# representation, but for the bits that don't quite fit, the actual code and +# state must nonetheless live here. +# +# Rule 2: this file does not know about what role we're playing; it only knows +# about HTTP request/response cycles in the abstract. This ensures that we +# don't cheat and apply different rules to local and remote parties. +# +# +# Theory of operation +# =================== +# +# Possibly the simplest way to think about this is that we actually have 5 +# different state machines here. Yes, 5. These are: +# +# 1) The client state, with its complicated automaton (see the docs) +# 2) The server state, with its complicated automaton (see the docs) +# 3) The keep-alive state, with possible states {True, False} +# 4) The SWITCH_CONNECT state, with possible states {False, True} +# 5) The SWITCH_UPGRADE state, with possible states {False, True} +# +# For (3)-(5), the first state listed is the initial state. +# +# (1)-(3) are stored explicitly in member variables. The last +# two are stored implicitly in the pending_switch_proposals set as: +# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals) +# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals) +# +# And each of these machines has two different kinds of transitions: +# +# a) Event-triggered +# b) State-triggered +# +# Event triggered is the obvious thing that you'd think it is: some event +# happens, and if it's the right event at the right time then a transition +# happens. But there are somewhat complicated rules for which machines can +# "see" which events. (As a rule of thumb, if a machine "sees" an event, this +# means two things: the event can affect the machine, and if the machine is +# not in a state where it expects that event then it's an error.) These rules +# are: +# +# 1) The client machine sees all h11.events objects emitted by the client. +# +# 2) The server machine sees all h11.events objects emitted by the server. +# +# It also sees the client's Request event. +# +# And sometimes, server events are annotated with a _SWITCH_* event. For +# example, we can have a (Response, _SWITCH_CONNECT) event, which is +# different from a regular Response event. +# +# 3) The keep-alive machine sees the process_keep_alive_disabled() event +# (which is derived from Request/Response events), and this event +# transitions it from True -> False, or from False -> False. There's no way +# to transition back. +# +# 4&5) The _SWITCH_* machines transition from False->True when we get a +# Request that proposes the relevant type of switch (via +# process_client_switch_proposals), and they go from True->False when we +# get a Response that has no _SWITCH_* annotation. +# +# So that's event-triggered transitions. +# +# State-triggered transitions are less standard. What they do here is couple +# the machines together. The way this works is, when certain *joint* +# configurations of states are achieved, then we automatically transition to a +# new *joint* state. So, for example, if we're ever in a joint state with +# +# client: DONE +# keep-alive: False +# +# then the client state immediately transitions to: +# +# client: MUST_CLOSE +# +# This is fundamentally different from an event-based transition, because it +# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state +# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive +# transitioned True -> False. Either way, once this precondition is satisfied, +# this transition is immediately triggered. +# +# What if two conflicting state-based transitions get enabled at the same +# time? In practice there's only one case where this arises (client DONE -> +# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by +# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition. +# +# Implementation +# -------------- +# +# The event-triggered transitions for the server and client machines are all +# stored explicitly in a table. Ditto for the state-triggered transitions that +# involve just the server and client state. +# +# The transitions for the other machines, and the state-triggered transitions +# that involve the other machines, are written out as explicit Python code. +# +# It'd be nice if there were some cleaner way to do all this. This isn't +# *too* terrible, but I feel like it could probably be better. +# +# WARNING +# ------- +# +# The script that generates the state machine diagrams for the docs knows how +# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS +# tables. But it can't automatically read the transitions that are written +# directly in Python code. So if you touch those, you need to also update the +# script to keep it in sync! +from typing import cast, Dict, Optional, Set, Tuple, Type, Union + +from ._events import * +from ._util import LocalProtocolError, Sentinel + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = [ + "CLIENT", + "SERVER", + "IDLE", + "SEND_RESPONSE", + "SEND_BODY", + "DONE", + "MUST_CLOSE", + "CLOSED", + "MIGHT_SWITCH_PROTOCOL", + "SWITCHED_PROTOCOL", + "ERROR", +] + + +class CLIENT(Sentinel, metaclass=Sentinel): + pass + + +class SERVER(Sentinel, metaclass=Sentinel): + pass + + +# States +class IDLE(Sentinel, metaclass=Sentinel): + pass + + +class SEND_RESPONSE(Sentinel, metaclass=Sentinel): + pass + + +class SEND_BODY(Sentinel, metaclass=Sentinel): + pass + + +class DONE(Sentinel, metaclass=Sentinel): + pass + + +class MUST_CLOSE(Sentinel, metaclass=Sentinel): + pass + + +class CLOSED(Sentinel, metaclass=Sentinel): + pass + + +class ERROR(Sentinel, metaclass=Sentinel): + pass + + +# Switch types +class MIGHT_SWITCH_PROTOCOL(Sentinel, metaclass=Sentinel): + pass + + +class SWITCHED_PROTOCOL(Sentinel, metaclass=Sentinel): + pass + + +class _SWITCH_UPGRADE(Sentinel, metaclass=Sentinel): + pass + + +class _SWITCH_CONNECT(Sentinel, metaclass=Sentinel): + pass + + +EventTransitionType = Dict[ + Type[Sentinel], + Dict[ + Type[Sentinel], + Dict[Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], Type[Sentinel]], + ], +] + +EVENT_TRIGGERED_TRANSITIONS: EventTransitionType = { + CLIENT: { + IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED}, + SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE}, + DONE: {ConnectionClosed: CLOSED}, + MUST_CLOSE: {ConnectionClosed: CLOSED}, + CLOSED: {ConnectionClosed: CLOSED}, + MIGHT_SWITCH_PROTOCOL: {}, + SWITCHED_PROTOCOL: {}, + ERROR: {}, + }, + SERVER: { + IDLE: { + ConnectionClosed: CLOSED, + Response: SEND_BODY, + # Special case: server sees client Request events, in this form + (Request, CLIENT): SEND_RESPONSE, + }, + SEND_RESPONSE: { + InformationalResponse: SEND_RESPONSE, + Response: SEND_BODY, + (InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL, + (Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL, + }, + SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE}, + DONE: {ConnectionClosed: CLOSED}, + MUST_CLOSE: {ConnectionClosed: CLOSED}, + CLOSED: {ConnectionClosed: CLOSED}, + SWITCHED_PROTOCOL: {}, + ERROR: {}, + }, +} + +StateTransitionType = Dict[ + Tuple[Type[Sentinel], Type[Sentinel]], Dict[Type[Sentinel], Type[Sentinel]] +] + +# NB: there are also some special-case state-triggered transitions hard-coded +# into _fire_state_triggered_transitions below. +STATE_TRIGGERED_TRANSITIONS: StateTransitionType = { + # (Client state, Server state) -> new states + # Protocol negotiation + (MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL}, + # Socket shutdown + (CLOSED, DONE): {SERVER: MUST_CLOSE}, + (CLOSED, IDLE): {SERVER: MUST_CLOSE}, + (ERROR, DONE): {SERVER: MUST_CLOSE}, + (DONE, CLOSED): {CLIENT: MUST_CLOSE}, + (IDLE, CLOSED): {CLIENT: MUST_CLOSE}, + (DONE, ERROR): {CLIENT: MUST_CLOSE}, +} + + +class ConnectionState: + def __init__(self) -> None: + # Extra bits of state that don't quite fit into the state model. + + # If this is False then it enables the automatic DONE -> MUST_CLOSE + # transition. Don't set this directly; call .keep_alive_disabled() + self.keep_alive = True + + # This is a subset of {UPGRADE, CONNECT}, containing the proposals + # made by the client for switching protocols. + self.pending_switch_proposals: Set[Type[Sentinel]] = set() + + self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE} + + def process_error(self, role: Type[Sentinel]) -> None: + self.states[role] = ERROR + self._fire_state_triggered_transitions() + + def process_keep_alive_disabled(self) -> None: + self.keep_alive = False + self._fire_state_triggered_transitions() + + def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None: + self.pending_switch_proposals.add(switch_event) + self._fire_state_triggered_transitions() + + def process_event( + self, + role: Type[Sentinel], + event_type: Type[Event], + server_switch_event: Optional[Type[Sentinel]] = None, + ) -> None: + _event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type + if server_switch_event is not None: + assert role is SERVER + if server_switch_event not in self.pending_switch_proposals: + raise LocalProtocolError( + "Received server _SWITCH_UPGRADE event without a pending proposal" + ) + _event_type = (event_type, server_switch_event) + if server_switch_event is None and _event_type is Response: + self.pending_switch_proposals = set() + self._fire_event_triggered_transitions(role, _event_type) + # Special case: the server state does get to see Request + # events. + if _event_type is Request: + assert role is CLIENT + self._fire_event_triggered_transitions(SERVER, (Request, CLIENT)) + self._fire_state_triggered_transitions() + + def _fire_event_triggered_transitions( + self, + role: Type[Sentinel], + event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], + ) -> None: + state = self.states[role] + try: + new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type] + except KeyError: + event_type = cast(Type[Event], event_type) + raise LocalProtocolError( + "can't handle event type {} when role={} and state={}".format( + event_type.__name__, role, self.states[role] + ) + ) from None + self.states[role] = new_state + + def _fire_state_triggered_transitions(self) -> None: + # We apply these rules repeatedly until converging on a fixed point + while True: + start_states = dict(self.states) + + # It could happen that both these special-case transitions are + # enabled at the same time: + # + # DONE -> MIGHT_SWITCH_PROTOCOL + # DONE -> MUST_CLOSE + # + # For example, this will always be true of a HTTP/1.0 client + # requesting CONNECT. If this happens, the protocol switch takes + # priority. From there the client will either go to + # SWITCHED_PROTOCOL, in which case it's none of our business when + # they close the connection, or else the server will deny the + # request, in which case the client will go back to DONE and then + # from there to MUST_CLOSE. + if self.pending_switch_proposals: + if self.states[CLIENT] is DONE: + self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL + + if not self.pending_switch_proposals: + if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL: + self.states[CLIENT] = DONE + + if not self.keep_alive: + for role in (CLIENT, SERVER): + if self.states[role] is DONE: + self.states[role] = MUST_CLOSE + + # Tabular state-triggered transitions + joint_state = (self.states[CLIENT], self.states[SERVER]) + changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {}) + self.states.update(changes) + + if self.states == start_states: + # Fixed point reached + return + + def start_next_cycle(self) -> None: + if self.states != {CLIENT: DONE, SERVER: DONE}: + raise LocalProtocolError( + f"not in a reusable state. self.states={self.states}" + ) + # Can't reach DONE/DONE with any of these active, but still, let's be + # sure. + assert self.keep_alive + assert not self.pending_switch_proposals + self.states = {CLIENT: IDLE, SERVER: IDLE} diff --git a/py311/lib/python3.11/site-packages/h11/_util.py b/py311/lib/python3.11/site-packages/h11/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..6718445290770e028ea2f1f662026c9a0b0991db --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/_util.py @@ -0,0 +1,135 @@ +from typing import Any, Dict, NoReturn, Pattern, Tuple, Type, TypeVar, Union + +__all__ = [ + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", + "validate", + "bytesify", +] + + +class ProtocolError(Exception): + """Exception indicating a violation of the HTTP/1.1 protocol. + + This as an abstract base class, with two concrete base classes: + :exc:`LocalProtocolError`, which indicates that you tried to do something + that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which + indicates that the remote peer tried to do something that HTTP/1.1 says is + illegal. See :ref:`error-handling` for details. + + In addition to the normal :exc:`Exception` features, it has one attribute: + + .. attribute:: error_status_hint + + This gives a suggestion as to what status code a server might use if + this error occurred as part of a request. + + For a :exc:`RemoteProtocolError`, this is useful as a suggestion for + how you might want to respond to a misbehaving peer, if you're + implementing a server. + + For a :exc:`LocalProtocolError`, this can be taken as a suggestion for + how your peer might have responded to *you* if h11 had allowed you to + continue. + + The default is 400 Bad Request, a generic catch-all for protocol + violations. + + """ + + def __init__(self, msg: str, error_status_hint: int = 400) -> None: + if type(self) is ProtocolError: + raise TypeError("tried to directly instantiate ProtocolError") + Exception.__init__(self, msg) + self.error_status_hint = error_status_hint + + +# Strategy: there are a number of public APIs where a LocalProtocolError can +# be raised (send(), all the different event constructors, ...), and only one +# public API where RemoteProtocolError can be raised +# (receive_data()). Therefore we always raise LocalProtocolError internally, +# and then receive_data will translate this into a RemoteProtocolError. +# +# Internally: +# LocalProtocolError is the generic "ProtocolError". +# Externally: +# LocalProtocolError is for local errors and RemoteProtocolError is for +# remote errors. +class LocalProtocolError(ProtocolError): + def _reraise_as_remote_protocol_error(self) -> NoReturn: + # After catching a LocalProtocolError, use this method to re-raise it + # as a RemoteProtocolError. This method must be called from inside an + # except: block. + # + # An easy way to get an equivalent RemoteProtocolError is just to + # modify 'self' in place. + self.__class__ = RemoteProtocolError # type: ignore + # But the re-raising is somewhat non-trivial -- you might think that + # now that we've modified the in-flight exception object, that just + # doing 'raise' to re-raise it would be enough. But it turns out that + # this doesn't work, because Python tracks the exception type + # (exc_info[0]) separately from the exception object (exc_info[1]), + # and we only modified the latter. So we really do need to re-raise + # the new type explicitly. + # On py3, the traceback is part of the exception object, so our + # in-place modification preserved it and we can just re-raise: + raise self + + +class RemoteProtocolError(ProtocolError): + pass + + +def validate( + regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any +) -> Dict[str, bytes]: + match = regex.fullmatch(data) + if not match: + if format_args: + msg = msg.format(*format_args) + raise LocalProtocolError(msg) + return match.groupdict() + + +# Sentinel values +# +# - Inherit identity-based comparison and hashing from object +# - Have a nice repr +# - Have a *bonus property*: type(sentinel) is sentinel +# +# The bonus property is useful if you want to take the return value from +# next_event() and do some sort of dispatch based on type(event). + +_T_Sentinel = TypeVar("_T_Sentinel", bound="Sentinel") + + +class Sentinel(type): + def __new__( + cls: Type[_T_Sentinel], + name: str, + bases: Tuple[type, ...], + namespace: Dict[str, Any], + **kwds: Any + ) -> _T_Sentinel: + assert bases == (Sentinel,) + v = super().__new__(cls, name, bases, namespace, **kwds) + v.__class__ = v # type: ignore + return v + + def __repr__(self) -> str: + return self.__name__ + + +# Used for methods, request targets, HTTP versions, header names, and header +# values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always +# returns bytes. +def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes: + # Fast-path: + if type(s) is bytes: + return s + if isinstance(s, str): + s = s.encode("ascii") + if isinstance(s, int): + raise TypeError("expected bytes-like object, not int") + return bytes(s) diff --git a/py311/lib/python3.11/site-packages/h11/_version.py b/py311/lib/python3.11/site-packages/h11/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..76e7327b8617c9d12236f511414d5eb58e98a44b --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/_version.py @@ -0,0 +1,16 @@ +# This file must be kept very simple, because it is consumed from several +# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc. + +# We use a simple scheme: +# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev +# where the +dev versions are never released into the wild, they're just what +# we stick into the VCS in between releases. +# +# This is compatible with PEP 440: +# http://legacy.python.org/dev/peps/pep-0440/ +# via the use of the "local suffix" "+dev", which is disallowed on index +# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we +# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before* +# 1.0.0.) + +__version__ = "0.16.0" diff --git a/py311/lib/python3.11/site-packages/h11/_writers.py b/py311/lib/python3.11/site-packages/h11/_writers.py new file mode 100644 index 0000000000000000000000000000000000000000..939cdb912a9debaea07fbf3a9ac04549c44d077c --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/_writers.py @@ -0,0 +1,145 @@ +# Code to read HTTP data +# +# Strategy: each writer takes an event + a write-some-bytes function, which is +# calls. +# +# WRITERS is a dict describing how to pick a reader. It maps states to either: +# - a writer +# - or, for body writers, a dict of framin-dependent writer factories + +from typing import Any, Callable, Dict, List, Tuple, Type, Union + +from ._events import Data, EndOfMessage, Event, InformationalResponse, Request, Response +from ._headers import Headers +from ._state import CLIENT, IDLE, SEND_BODY, SEND_RESPONSE, SERVER +from ._util import LocalProtocolError, Sentinel + +__all__ = ["WRITERS"] + +Writer = Callable[[bytes], Any] + + +def write_headers(headers: Headers, write: Writer) -> None: + # "Since the Host field-value is critical information for handling a + # request, a user agent SHOULD generate Host as the first header field + # following the request-line." - RFC 7230 + raw_items = headers._full_items + for raw_name, name, value in raw_items: + if name == b"host": + write(b"%s: %s\r\n" % (raw_name, value)) + for raw_name, name, value in raw_items: + if name != b"host": + write(b"%s: %s\r\n" % (raw_name, value)) + write(b"\r\n") + + +def write_request(request: Request, write: Writer) -> None: + if request.http_version != b"1.1": + raise LocalProtocolError("I only send HTTP/1.1") + write(b"%s %s HTTP/1.1\r\n" % (request.method, request.target)) + write_headers(request.headers, write) + + +# Shared between InformationalResponse and Response +def write_any_response( + response: Union[InformationalResponse, Response], write: Writer +) -> None: + if response.http_version != b"1.1": + raise LocalProtocolError("I only send HTTP/1.1") + status_bytes = str(response.status_code).encode("ascii") + # We don't bother sending ascii status messages like "OK"; they're + # optional and ignored by the protocol. (But the space after the numeric + # status code is mandatory.) + # + # XX FIXME: could at least make an effort to pull out the status message + # from stdlib's http.HTTPStatus table. Or maybe just steal their enums + # (either by import or copy/paste). We already accept them as status codes + # since they're of type IntEnum < int. + write(b"HTTP/1.1 %s %s\r\n" % (status_bytes, response.reason)) + write_headers(response.headers, write) + + +class BodyWriter: + def __call__(self, event: Event, write: Writer) -> None: + if type(event) is Data: + self.send_data(event.data, write) + elif type(event) is EndOfMessage: + self.send_eom(event.headers, write) + else: # pragma: no cover + assert False + + def send_data(self, data: bytes, write: Writer) -> None: + pass + + def send_eom(self, headers: Headers, write: Writer) -> None: + pass + + +# +# These are all careful not to do anything to 'data' except call len(data) and +# write(data). This allows us to transparently pass-through funny objects, +# like placeholder objects referring to files on disk that will be sent via +# sendfile(2). +# +class ContentLengthWriter(BodyWriter): + def __init__(self, length: int) -> None: + self._length = length + + def send_data(self, data: bytes, write: Writer) -> None: + self._length -= len(data) + if self._length < 0: + raise LocalProtocolError("Too much data for declared Content-Length") + write(data) + + def send_eom(self, headers: Headers, write: Writer) -> None: + if self._length != 0: + raise LocalProtocolError("Too little data for declared Content-Length") + if headers: + raise LocalProtocolError("Content-Length and trailers don't mix") + + +class ChunkedWriter(BodyWriter): + def send_data(self, data: bytes, write: Writer) -> None: + # if we encoded 0-length data in the naive way, it would look like an + # end-of-message. + if not data: + return + write(b"%x\r\n" % len(data)) + write(data) + write(b"\r\n") + + def send_eom(self, headers: Headers, write: Writer) -> None: + write(b"0\r\n") + write_headers(headers, write) + + +class Http10Writer(BodyWriter): + def send_data(self, data: bytes, write: Writer) -> None: + write(data) + + def send_eom(self, headers: Headers, write: Writer) -> None: + if headers: + raise LocalProtocolError("can't send trailers to HTTP/1.0 client") + # no need to close the socket ourselves, that will be taken care of by + # Connection: close machinery + + +WritersType = Dict[ + Union[Tuple[Type[Sentinel], Type[Sentinel]], Type[Sentinel]], + Union[ + Dict[str, Type[BodyWriter]], + Callable[[Union[InformationalResponse, Response], Writer], None], + Callable[[Request, Writer], None], + ], +] + +WRITERS: WritersType = { + (CLIENT, IDLE): write_request, + (SERVER, IDLE): write_any_response, + (SERVER, SEND_RESPONSE): write_any_response, + SEND_BODY: { + "chunked": ChunkedWriter, + "content-length": ContentLengthWriter, + "http/1.0": Http10Writer, + }, +} diff --git a/py311/lib/python3.11/site-packages/h11/py.typed b/py311/lib/python3.11/site-packages/h11/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..f5642f79f21d872f010979dcf6f0c4a415acc19d --- /dev/null +++ b/py311/lib/python3.11/site-packages/h11/py.typed @@ -0,0 +1 @@ +Marker diff --git a/py311/lib/python3.11/site-packages/httpcore/__init__.py b/py311/lib/python3.11/site-packages/httpcore/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9a92dc4a440bdf6f259ec1083c89c817eb7b631b --- /dev/null +++ b/py311/lib/python3.11/site-packages/httpcore/__init__.py @@ -0,0 +1,141 @@ +from ._api import request, stream +from ._async import ( + AsyncConnectionInterface, + AsyncConnectionPool, + AsyncHTTP2Connection, + AsyncHTTP11Connection, + AsyncHTTPConnection, + AsyncHTTPProxy, + AsyncSOCKSProxy, +) +from ._backends.base import ( + SOCKET_OPTION, + AsyncNetworkBackend, + AsyncNetworkStream, + NetworkBackend, + NetworkStream, +) +from ._backends.mock import AsyncMockBackend, AsyncMockStream, MockBackend, MockStream +from ._backends.sync import SyncBackend +from ._exceptions import ( + ConnectError, + ConnectionNotAvailable, + ConnectTimeout, + LocalProtocolError, + NetworkError, + PoolTimeout, + ProtocolError, + ProxyError, + ReadError, + ReadTimeout, + RemoteProtocolError, + TimeoutException, + UnsupportedProtocol, + WriteError, + WriteTimeout, +) +from ._models import URL, Origin, Proxy, Request, Response +from ._ssl import default_ssl_context +from ._sync import ( + ConnectionInterface, + ConnectionPool, + HTTP2Connection, + HTTP11Connection, + HTTPConnection, + HTTPProxy, + SOCKSProxy, +) + +# The 'httpcore.AnyIOBackend' class is conditional on 'anyio' being installed. +try: + from ._backends.anyio import AnyIOBackend +except ImportError: # pragma: nocover + + class AnyIOBackend: # type: ignore + def __init__(self, *args, **kwargs): # type: ignore + msg = ( + "Attempted to use 'httpcore.AnyIOBackend' but 'anyio' is not installed." + ) + raise RuntimeError(msg) + + +# The 'httpcore.TrioBackend' class is conditional on 'trio' being installed. +try: + from ._backends.trio import TrioBackend +except ImportError: # pragma: nocover + + class TrioBackend: # type: ignore + def __init__(self, *args, **kwargs): # type: ignore + msg = "Attempted to use 'httpcore.TrioBackend' but 'trio' is not installed." + raise RuntimeError(msg) + + +__all__ = [ + # top-level requests + "request", + "stream", + # models + "Origin", + "URL", + "Request", + "Response", + "Proxy", + # async + "AsyncHTTPConnection", + "AsyncConnectionPool", + "AsyncHTTPProxy", + "AsyncHTTP11Connection", + "AsyncHTTP2Connection", + "AsyncConnectionInterface", + "AsyncSOCKSProxy", + # sync + "HTTPConnection", + "ConnectionPool", + "HTTPProxy", + "HTTP11Connection", + "HTTP2Connection", + "ConnectionInterface", + "SOCKSProxy", + # network backends, implementations + "SyncBackend", + "AnyIOBackend", + "TrioBackend", + # network backends, mock implementations + "AsyncMockBackend", + "AsyncMockStream", + "MockBackend", + "MockStream", + # network backends, interface + "AsyncNetworkStream", + "AsyncNetworkBackend", + "NetworkStream", + "NetworkBackend", + # util + "default_ssl_context", + "SOCKET_OPTION", + # exceptions + "ConnectionNotAvailable", + "ProxyError", + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", + "UnsupportedProtocol", + "TimeoutException", + "PoolTimeout", + "ConnectTimeout", + "ReadTimeout", + "WriteTimeout", + "NetworkError", + "ConnectError", + "ReadError", + "WriteError", +] + +__version__ = "1.0.9" + + +__locals = locals() +for __name in __all__: + # Exclude SOCKET_OPTION, it causes AttributeError on Python 3.14 + if not __name.startswith(("__", "SOCKET_OPTION")): + setattr(__locals[__name], "__module__", "httpcore") # noqa diff --git a/py311/lib/python3.11/site-packages/httpcore/_api.py b/py311/lib/python3.11/site-packages/httpcore/_api.py new file mode 100644 index 0000000000000000000000000000000000000000..38b961d10de88bebc98c758d0d1f14af1e7c0370 --- /dev/null +++ b/py311/lib/python3.11/site-packages/httpcore/_api.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +import contextlib +import typing + +from ._models import URL, Extensions, HeaderTypes, Response +from ._sync.connection_pool import ConnectionPool + + +def request( + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes | typing.Iterator[bytes] | None = None, + extensions: Extensions | None = None, +) -> Response: + """ + Sends an HTTP request, returning the response. + + ``` + response = httpcore.request("GET", "https://www.example.com/") + ``` + + Arguments: + method: The HTTP method for the request. Typically one of `"GET"`, + `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`. + url: The URL of the HTTP request. Either as an instance of `httpcore.URL`, + or as str/bytes. + headers: The HTTP request headers. Either as a dictionary of str/bytes, + or as a list of two-tuples of str/bytes. + content: The content of the request body. Either as bytes, + or as a bytes iterator. + extensions: A dictionary of optional extra information included on the request. + Possible keys include `"timeout"`. + + Returns: + An instance of `httpcore.Response`. + """ + with ConnectionPool() as pool: + return pool.request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + + +@contextlib.contextmanager +def stream( + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes | typing.Iterator[bytes] | None = None, + extensions: Extensions | None = None, +) -> typing.Iterator[Response]: + """ + Sends an HTTP request, returning the response within a content manager. + + ``` + with httpcore.stream("GET", "https://www.example.com/") as response: + ... + ``` + + When using the `stream()` function, the body of the response will not be + automatically read. If you want to access the response body you should + either use `content = response.read()`, or `for chunk in response.iter_content()`. + + Arguments: + method: The HTTP method for the request. Typically one of `"GET"`, + `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`. + url: The URL of the HTTP request. Either as an instance of `httpcore.URL`, + or as str/bytes. + headers: The HTTP request headers. Either as a dictionary of str/bytes, + or as a list of two-tuples of str/bytes. + content: The content of the request body. Either as bytes, + or as a bytes iterator. + extensions: A dictionary of optional extra information included on the request. + Possible keys include `"timeout"`. + + Returns: + An instance of `httpcore.Response`. + """ + with ConnectionPool() as pool: + with pool.stream( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) as response: + yield response diff --git a/py311/lib/python3.11/site-packages/httpcore/_exceptions.py b/py311/lib/python3.11/site-packages/httpcore/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..bc28d44f55bdc4b872951a74780469a3999d9ab4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/httpcore/_exceptions.py @@ -0,0 +1,81 @@ +import contextlib +import typing + +ExceptionMapping = typing.Mapping[typing.Type[Exception], typing.Type[Exception]] + + +@contextlib.contextmanager +def map_exceptions(map: ExceptionMapping) -> typing.Iterator[None]: + try: + yield + except Exception as exc: # noqa: PIE786 + for from_exc, to_exc in map.items(): + if isinstance(exc, from_exc): + raise to_exc(exc) from exc + raise # pragma: nocover + + +class ConnectionNotAvailable(Exception): + pass + + +class ProxyError(Exception): + pass + + +class UnsupportedProtocol(Exception): + pass + + +class ProtocolError(Exception): + pass + + +class RemoteProtocolError(ProtocolError): + pass + + +class LocalProtocolError(ProtocolError): + pass + + +# Timeout errors + + +class TimeoutException(Exception): + pass + + +class PoolTimeout(TimeoutException): + pass + + +class ConnectTimeout(TimeoutException): + pass + + +class ReadTimeout(TimeoutException): + pass + + +class WriteTimeout(TimeoutException): + pass + + +# Network errors + + +class NetworkError(Exception): + pass + + +class ConnectError(NetworkError): + pass + + +class ReadError(NetworkError): + pass + + +class WriteError(NetworkError): + pass diff --git a/py311/lib/python3.11/site-packages/httpcore/_models.py b/py311/lib/python3.11/site-packages/httpcore/_models.py new file mode 100644 index 0000000000000000000000000000000000000000..8a65f13347d6621289a166d08123cbc8e1ad0157 --- /dev/null +++ b/py311/lib/python3.11/site-packages/httpcore/_models.py @@ -0,0 +1,516 @@ +from __future__ import annotations + +import base64 +import ssl +import typing +import urllib.parse + +# Functions for typechecking... + + +ByteOrStr = typing.Union[bytes, str] +HeadersAsSequence = typing.Sequence[typing.Tuple[ByteOrStr, ByteOrStr]] +HeadersAsMapping = typing.Mapping[ByteOrStr, ByteOrStr] +HeaderTypes = typing.Union[HeadersAsSequence, HeadersAsMapping, None] + +Extensions = typing.MutableMapping[str, typing.Any] + + +def enforce_bytes(value: bytes | str, *, name: str) -> bytes: + """ + Any arguments that are ultimately represented as bytes can be specified + either as bytes or as strings. + + However we enforce that any string arguments must only contain characters in + the plain ASCII range. chr(0)...chr(127). If you need to use characters + outside that range then be precise, and use a byte-wise argument. + """ + if isinstance(value, str): + try: + return value.encode("ascii") + except UnicodeEncodeError: + raise TypeError(f"{name} strings may not include unicode characters.") + elif isinstance(value, bytes): + return value + + seen_type = type(value).__name__ + raise TypeError(f"{name} must be bytes or str, but got {seen_type}.") + + +def enforce_url(value: URL | bytes | str, *, name: str) -> URL: + """ + Type check for URL parameters. + """ + if isinstance(value, (bytes, str)): + return URL(value) + elif isinstance(value, URL): + return value + + seen_type = type(value).__name__ + raise TypeError(f"{name} must be a URL, bytes, or str, but got {seen_type}.") + + +def enforce_headers( + value: HeadersAsMapping | HeadersAsSequence | None = None, *, name: str +) -> list[tuple[bytes, bytes]]: + """ + Convienence function that ensure all items in request or response headers + are either bytes or strings in the plain ASCII range. + """ + if value is None: + return [] + elif isinstance(value, typing.Mapping): + return [ + ( + enforce_bytes(k, name="header name"), + enforce_bytes(v, name="header value"), + ) + for k, v in value.items() + ] + elif isinstance(value, typing.Sequence): + return [ + ( + enforce_bytes(k, name="header name"), + enforce_bytes(v, name="header value"), + ) + for k, v in value + ] + + seen_type = type(value).__name__ + raise TypeError( + f"{name} must be a mapping or sequence of two-tuples, but got {seen_type}." + ) + + +def enforce_stream( + value: bytes | typing.Iterable[bytes] | typing.AsyncIterable[bytes] | None, + *, + name: str, +) -> typing.Iterable[bytes] | typing.AsyncIterable[bytes]: + if value is None: + return ByteStream(b"") + elif isinstance(value, bytes): + return ByteStream(value) + return value + + +# * https://tools.ietf.org/html/rfc3986#section-3.2.3 +# * https://url.spec.whatwg.org/#url-miscellaneous +# * https://url.spec.whatwg.org/#scheme-state +DEFAULT_PORTS = { + b"ftp": 21, + b"http": 80, + b"https": 443, + b"ws": 80, + b"wss": 443, +} + + +def include_request_headers( + headers: list[tuple[bytes, bytes]], + *, + url: "URL", + content: None | bytes | typing.Iterable[bytes] | typing.AsyncIterable[bytes], +) -> list[tuple[bytes, bytes]]: + headers_set = set(k.lower() for k, v in headers) + + if b"host" not in headers_set: + default_port = DEFAULT_PORTS.get(url.scheme) + if url.port is None or url.port == default_port: + header_value = url.host + else: + header_value = b"%b:%d" % (url.host, url.port) + headers = [(b"Host", header_value)] + headers + + if ( + content is not None + and b"content-length" not in headers_set + and b"transfer-encoding" not in headers_set + ): + if isinstance(content, bytes): + content_length = str(len(content)).encode("ascii") + headers += [(b"Content-Length", content_length)] + else: + headers += [(b"Transfer-Encoding", b"chunked")] # pragma: nocover + + return headers + + +# Interfaces for byte streams... + + +class ByteStream: + """ + A container for non-streaming content, and that supports both sync and async + stream iteration. + """ + + def __init__(self, content: bytes) -> None: + self._content = content + + def __iter__(self) -> typing.Iterator[bytes]: + yield self._content + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + yield self._content + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{len(self._content)} bytes]>" + + +class Origin: + def __init__(self, scheme: bytes, host: bytes, port: int) -> None: + self.scheme = scheme + self.host = host + self.port = port + + def __eq__(self, other: typing.Any) -> bool: + return ( + isinstance(other, Origin) + and self.scheme == other.scheme + and self.host == other.host + and self.port == other.port + ) + + def __str__(self) -> str: + scheme = self.scheme.decode("ascii") + host = self.host.decode("ascii") + port = str(self.port) + return f"{scheme}://{host}:{port}" + + +class URL: + """ + Represents the URL against which an HTTP request may be made. + + The URL may either be specified as a plain string, for convienence: + + ```python + url = httpcore.URL("https://www.example.com/") + ``` + + Or be constructed with explicitily pre-parsed components: + + ```python + url = httpcore.URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/') + ``` + + Using this second more explicit style allows integrations that are using + `httpcore` to pass through URLs that have already been parsed in order to use + libraries such as `rfc-3986` rather than relying on the stdlib. It also ensures + that URL parsing is treated identically at both the networking level and at any + higher layers of abstraction. + + The four components are important here, as they allow the URL to be precisely + specified in a pre-parsed format. They also allow certain types of request to + be created that could not otherwise be expressed. + + For example, an HTTP request to `http://www.example.com/` forwarded via a proxy + at `http://localhost:8080`... + + ```python + # Constructs an HTTP request with a complete URL as the target: + # GET https://www.example.com/ HTTP/1.1 + url = httpcore.URL( + scheme=b'http', + host=b'localhost', + port=8080, + target=b'https://www.example.com/' + ) + request = httpcore.Request( + method="GET", + url=url + ) + ``` + + Another example is constructing an `OPTIONS *` request... + + ```python + # Constructs an 'OPTIONS *' HTTP request: + # OPTIONS * HTTP/1.1 + url = httpcore.URL(scheme=b'https', host=b'www.example.com', target=b'*') + request = httpcore.Request(method="OPTIONS", url=url) + ``` + + This kind of request is not possible to formulate with a URL string, + because the `/` delimiter is always used to demark the target from the + host/port portion of the URL. + + For convenience, string-like arguments may be specified either as strings or + as bytes. However, once a request is being issue over-the-wire, the URL + components are always ultimately required to be a bytewise representation. + + In order to avoid any ambiguity over character encodings, when strings are used + as arguments, they must be strictly limited to the ASCII range `chr(0)`-`chr(127)`. + If you require a bytewise representation that is outside this range you must + handle the character encoding directly, and pass a bytes instance. + """ + + def __init__( + self, + url: bytes | str = "", + *, + scheme: bytes | str = b"", + host: bytes | str = b"", + port: int | None = None, + target: bytes | str = b"", + ) -> None: + """ + Parameters: + url: The complete URL as a string or bytes. + scheme: The URL scheme as a string or bytes. + Typically either `"http"` or `"https"`. + host: The URL host as a string or bytes. Such as `"www.example.com"`. + port: The port to connect to. Either an integer or `None`. + target: The target of the HTTP request. Such as `"/items?search=red"`. + """ + if url: + parsed = urllib.parse.urlparse(enforce_bytes(url, name="url")) + self.scheme = parsed.scheme + self.host = parsed.hostname or b"" + self.port = parsed.port + self.target = (parsed.path or b"/") + ( + b"?" + parsed.query if parsed.query else b"" + ) + else: + self.scheme = enforce_bytes(scheme, name="scheme") + self.host = enforce_bytes(host, name="host") + self.port = port + self.target = enforce_bytes(target, name="target") + + @property + def origin(self) -> Origin: + default_port = { + b"http": 80, + b"https": 443, + b"ws": 80, + b"wss": 443, + b"socks5": 1080, + b"socks5h": 1080, + }[self.scheme] + return Origin( + scheme=self.scheme, host=self.host, port=self.port or default_port + ) + + def __eq__(self, other: typing.Any) -> bool: + return ( + isinstance(other, URL) + and other.scheme == self.scheme + and other.host == self.host + and other.port == self.port + and other.target == self.target + ) + + def __bytes__(self) -> bytes: + if self.port is None: + return b"%b://%b%b" % (self.scheme, self.host, self.target) + return b"%b://%b:%d%b" % (self.scheme, self.host, self.port, self.target) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(scheme={self.scheme!r}, " + f"host={self.host!r}, port={self.port!r}, target={self.target!r})" + ) + + +class Request: + """ + An HTTP request. + """ + + def __init__( + self, + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes + | typing.Iterable[bytes] + | typing.AsyncIterable[bytes] + | None = None, + extensions: Extensions | None = None, + ) -> None: + """ + Parameters: + method: The HTTP request method, either as a string or bytes. + For example: `GET`. + url: The request URL, either as a `URL` instance, or as a string or bytes. + For example: `"https://www.example.com".` + headers: The HTTP request headers. + content: The content of the request body. + extensions: A dictionary of optional extra information included on + the request. Possible keys include `"timeout"`, and `"trace"`. + """ + self.method: bytes = enforce_bytes(method, name="method") + self.url: URL = enforce_url(url, name="url") + self.headers: list[tuple[bytes, bytes]] = enforce_headers( + headers, name="headers" + ) + self.stream: typing.Iterable[bytes] | typing.AsyncIterable[bytes] = ( + enforce_stream(content, name="content") + ) + self.extensions = {} if extensions is None else extensions + + if "target" in self.extensions: + self.url = URL( + scheme=self.url.scheme, + host=self.url.host, + port=self.url.port, + target=self.extensions["target"], + ) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.method!r}]>" + + +class Response: + """ + An HTTP response. + """ + + def __init__( + self, + status: int, + *, + headers: HeaderTypes = None, + content: bytes + | typing.Iterable[bytes] + | typing.AsyncIterable[bytes] + | None = None, + extensions: Extensions | None = None, + ) -> None: + """ + Parameters: + status: The HTTP status code of the response. For example `200`. + headers: The HTTP response headers. + content: The content of the response body. + extensions: A dictionary of optional extra information included on + the responseself.Possible keys include `"http_version"`, + `"reason_phrase"`, and `"network_stream"`. + """ + self.status: int = status + self.headers: list[tuple[bytes, bytes]] = enforce_headers( + headers, name="headers" + ) + self.stream: typing.Iterable[bytes] | typing.AsyncIterable[bytes] = ( + enforce_stream(content, name="content") + ) + self.extensions = {} if extensions is None else extensions + + self._stream_consumed = False + + @property + def content(self) -> bytes: + if not hasattr(self, "_content"): + if isinstance(self.stream, typing.Iterable): + raise RuntimeError( + "Attempted to access 'response.content' on a streaming response. " + "Call 'response.read()' first." + ) + else: + raise RuntimeError( + "Attempted to access 'response.content' on a streaming response. " + "Call 'await response.aread()' first." + ) + return self._content + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.status}]>" + + # Sync interface... + + def read(self) -> bytes: + if not isinstance(self.stream, typing.Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to read an asynchronous response using 'response.read()'. " + "You should use 'await response.aread()' instead." + ) + if not hasattr(self, "_content"): + self._content = b"".join([part for part in self.iter_stream()]) + return self._content + + def iter_stream(self) -> typing.Iterator[bytes]: + if not isinstance(self.stream, typing.Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to stream an asynchronous response using 'for ... in " + "response.iter_stream()'. " + "You should use 'async for ... in response.aiter_stream()' instead." + ) + if self._stream_consumed: + raise RuntimeError( + "Attempted to call 'for ... in response.iter_stream()' more than once." + ) + self._stream_consumed = True + for chunk in self.stream: + yield chunk + + def close(self) -> None: + if not isinstance(self.stream, typing.Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to close an asynchronous response using 'response.close()'. " + "You should use 'await response.aclose()' instead." + ) + if hasattr(self.stream, "close"): + self.stream.close() + + # Async interface... + + async def aread(self) -> bytes: + if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to read an synchronous response using " + "'await response.aread()'. " + "You should use 'response.read()' instead." + ) + if not hasattr(self, "_content"): + self._content = b"".join([part async for part in self.aiter_stream()]) + return self._content + + async def aiter_stream(self) -> typing.AsyncIterator[bytes]: + if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to stream an synchronous response using 'async for ... in " + "response.aiter_stream()'. " + "You should use 'for ... in response.iter_stream()' instead." + ) + if self._stream_consumed: + raise RuntimeError( + "Attempted to call 'async for ... in response.aiter_stream()' " + "more than once." + ) + self._stream_consumed = True + async for chunk in self.stream: + yield chunk + + async def aclose(self) -> None: + if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to close a synchronous response using " + "'await response.aclose()'. " + "You should use 'response.close()' instead." + ) + if hasattr(self.stream, "aclose"): + await self.stream.aclose() + + +class Proxy: + def __init__( + self, + url: URL | bytes | str, + auth: tuple[bytes | str, bytes | str] | None = None, + headers: HeadersAsMapping | HeadersAsSequence | None = None, + ssl_context: ssl.SSLContext | None = None, + ): + self.url = enforce_url(url, name="url") + self.headers = enforce_headers(headers, name="headers") + self.ssl_context = ssl_context + + if auth is not None: + username = enforce_bytes(auth[0], name="auth") + password = enforce_bytes(auth[1], name="auth") + userpass = username + b":" + password + authorization = b"Basic " + base64.b64encode(userpass) + self.auth: tuple[bytes, bytes] | None = (username, password) + self.headers = [(b"Proxy-Authorization", authorization)] + self.headers + else: + self.auth = None diff --git a/py311/lib/python3.11/site-packages/httpcore/_ssl.py b/py311/lib/python3.11/site-packages/httpcore/_ssl.py new file mode 100644 index 0000000000000000000000000000000000000000..c99c5a67945b8a3a3544d481e979c791ab45fe23 --- /dev/null +++ b/py311/lib/python3.11/site-packages/httpcore/_ssl.py @@ -0,0 +1,9 @@ +import ssl + +import certifi + + +def default_ssl_context() -> ssl.SSLContext: + context = ssl.create_default_context() + context.load_verify_locations(certifi.where()) + return context diff --git a/py311/lib/python3.11/site-packages/httpcore/_synchronization.py b/py311/lib/python3.11/site-packages/httpcore/_synchronization.py new file mode 100644 index 0000000000000000000000000000000000000000..2ecc9e9c363e2f16c4f934cf41cf871826d6a495 --- /dev/null +++ b/py311/lib/python3.11/site-packages/httpcore/_synchronization.py @@ -0,0 +1,318 @@ +from __future__ import annotations + +import threading +import types + +from ._exceptions import ExceptionMapping, PoolTimeout, map_exceptions + +# Our async synchronization primatives use either 'anyio' or 'trio' depending +# on if they're running under asyncio or trio. + +try: + import trio +except (ImportError, NotImplementedError): # pragma: nocover + trio = None # type: ignore + +try: + import anyio +except ImportError: # pragma: nocover + anyio = None # type: ignore + + +def current_async_library() -> str: + # Determine if we're running under trio or asyncio. + # See https://sniffio.readthedocs.io/en/latest/ + try: + import sniffio + except ImportError: # pragma: nocover + environment = "asyncio" + else: + environment = sniffio.current_async_library() + + if environment not in ("asyncio", "trio"): # pragma: nocover + raise RuntimeError("Running under an unsupported async environment.") + + if environment == "asyncio" and anyio is None: # pragma: nocover + raise RuntimeError( + "Running with asyncio requires installation of 'httpcore[asyncio]'." + ) + + if environment == "trio" and trio is None: # pragma: nocover + raise RuntimeError( + "Running with trio requires installation of 'httpcore[trio]'." + ) + + return environment + + +class AsyncLock: + """ + This is a standard lock. + + In the sync case `Lock` provides thread locking. + In the async case `AsyncLock` provides async locking. + """ + + def __init__(self) -> None: + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a lock with the correct implementation. + """ + self._backend = current_async_library() + if self._backend == "trio": + self._trio_lock = trio.Lock() + elif self._backend == "asyncio": + self._anyio_lock = anyio.Lock() + + async def __aenter__(self) -> AsyncLock: + if not self._backend: + self.setup() + + if self._backend == "trio": + await self._trio_lock.acquire() + elif self._backend == "asyncio": + await self._anyio_lock.acquire() + + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + if self._backend == "trio": + self._trio_lock.release() + elif self._backend == "asyncio": + self._anyio_lock.release() + + +class AsyncThreadLock: + """ + This is a threading-only lock for no-I/O contexts. + + In the sync case `ThreadLock` provides thread locking. + In the async case `AsyncThreadLock` is a no-op. + """ + + def __enter__(self) -> AsyncThreadLock: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + pass + + +class AsyncEvent: + def __init__(self) -> None: + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a lock with the correct implementation. + """ + self._backend = current_async_library() + if self._backend == "trio": + self._trio_event = trio.Event() + elif self._backend == "asyncio": + self._anyio_event = anyio.Event() + + def set(self) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + self._trio_event.set() + elif self._backend == "asyncio": + self._anyio_event.set() + + async def wait(self, timeout: float | None = None) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + trio_exc_map: ExceptionMapping = {trio.TooSlowError: PoolTimeout} + timeout_or_inf = float("inf") if timeout is None else timeout + with map_exceptions(trio_exc_map): + with trio.fail_after(timeout_or_inf): + await self._trio_event.wait() + elif self._backend == "asyncio": + anyio_exc_map: ExceptionMapping = {TimeoutError: PoolTimeout} + with map_exceptions(anyio_exc_map): + with anyio.fail_after(timeout): + await self._anyio_event.wait() + + +class AsyncSemaphore: + def __init__(self, bound: int) -> None: + self._bound = bound + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a semaphore with the correct implementation. + """ + self._backend = current_async_library() + if self._backend == "trio": + self._trio_semaphore = trio.Semaphore( + initial_value=self._bound, max_value=self._bound + ) + elif self._backend == "asyncio": + self._anyio_semaphore = anyio.Semaphore( + initial_value=self._bound, max_value=self._bound + ) + + async def acquire(self) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + await self._trio_semaphore.acquire() + elif self._backend == "asyncio": + await self._anyio_semaphore.acquire() + + async def release(self) -> None: + if self._backend == "trio": + self._trio_semaphore.release() + elif self._backend == "asyncio": + self._anyio_semaphore.release() + + +class AsyncShieldCancellation: + # For certain portions of our codebase where we're dealing with + # closing connections during exception handling we want to shield + # the operation from being cancelled. + # + # with AsyncShieldCancellation(): + # ... # clean-up operations, shielded from cancellation. + + def __init__(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a shielded scope with the correct implementation. + """ + self._backend = current_async_library() + + if self._backend == "trio": + self._trio_shield = trio.CancelScope(shield=True) + elif self._backend == "asyncio": + self._anyio_shield = anyio.CancelScope(shield=True) + + def __enter__(self) -> AsyncShieldCancellation: + if self._backend == "trio": + self._trio_shield.__enter__() + elif self._backend == "asyncio": + self._anyio_shield.__enter__() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + if self._backend == "trio": + self._trio_shield.__exit__(exc_type, exc_value, traceback) + elif self._backend == "asyncio": + self._anyio_shield.__exit__(exc_type, exc_value, traceback) + + +# Our thread-based synchronization primitives... + + +class Lock: + """ + This is a standard lock. + + In the sync case `Lock` provides thread locking. + In the async case `AsyncLock` provides async locking. + """ + + def __init__(self) -> None: + self._lock = threading.Lock() + + def __enter__(self) -> Lock: + self._lock.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + self._lock.release() + + +class ThreadLock: + """ + This is a threading-only lock for no-I/O contexts. + + In the sync case `ThreadLock` provides thread locking. + In the async case `AsyncThreadLock` is a no-op. + """ + + def __init__(self) -> None: + self._lock = threading.Lock() + + def __enter__(self) -> ThreadLock: + self._lock.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + self._lock.release() + + +class Event: + def __init__(self) -> None: + self._event = threading.Event() + + def set(self) -> None: + self._event.set() + + def wait(self, timeout: float | None = None) -> None: + if timeout == float("inf"): # pragma: no cover + timeout = None + if not self._event.wait(timeout=timeout): + raise PoolTimeout() # pragma: nocover + + +class Semaphore: + def __init__(self, bound: int) -> None: + self._semaphore = threading.Semaphore(value=bound) + + def acquire(self) -> None: + self._semaphore.acquire() + + def release(self) -> None: + self._semaphore.release() + + +class ShieldCancellation: + # Thread-synchronous codebases don't support cancellation semantics. + # We have this class because we need to mirror the async and sync + # cases within our package, but it's just a no-op. + def __enter__(self) -> ShieldCancellation: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + pass diff --git a/py311/lib/python3.11/site-packages/httpcore/_trace.py b/py311/lib/python3.11/site-packages/httpcore/_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..5f1cd7c47829ce17dbcf651ab56b4ffdce04a485 --- /dev/null +++ b/py311/lib/python3.11/site-packages/httpcore/_trace.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +import inspect +import logging +import types +import typing + +from ._models import Request + + +class Trace: + def __init__( + self, + name: str, + logger: logging.Logger, + request: Request | None = None, + kwargs: dict[str, typing.Any] | None = None, + ) -> None: + self.name = name + self.logger = logger + self.trace_extension = ( + None if request is None else request.extensions.get("trace") + ) + self.debug = self.logger.isEnabledFor(logging.DEBUG) + self.kwargs = kwargs or {} + self.return_value: typing.Any = None + self.should_trace = self.debug or self.trace_extension is not None + self.prefix = self.logger.name.split(".")[-1] + + def trace(self, name: str, info: dict[str, typing.Any]) -> None: + if self.trace_extension is not None: + prefix_and_name = f"{self.prefix}.{name}" + ret = self.trace_extension(prefix_and_name, info) + if inspect.iscoroutine(ret): # pragma: no cover + raise TypeError( + "If you are using a synchronous interface, " + "the callback of the `trace` extension should " + "be a normal function instead of an asynchronous function." + ) + + if self.debug: + if not info or "return_value" in info and info["return_value"] is None: + message = name + else: + args = " ".join([f"{key}={value!r}" for key, value in info.items()]) + message = f"{name} {args}" + self.logger.debug(message) + + def __enter__(self) -> Trace: + if self.should_trace: + info = self.kwargs + self.trace(f"{self.name}.started", info) + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + if self.should_trace: + if exc_value is None: + info = {"return_value": self.return_value} + self.trace(f"{self.name}.complete", info) + else: + info = {"exception": exc_value} + self.trace(f"{self.name}.failed", info) + + async def atrace(self, name: str, info: dict[str, typing.Any]) -> None: + if self.trace_extension is not None: + prefix_and_name = f"{self.prefix}.{name}" + coro = self.trace_extension(prefix_and_name, info) + if not inspect.iscoroutine(coro): # pragma: no cover + raise TypeError( + "If you're using an asynchronous interface, " + "the callback of the `trace` extension should " + "be an asynchronous function rather than a normal function." + ) + await coro + + if self.debug: + if not info or "return_value" in info and info["return_value"] is None: + message = name + else: + args = " ".join([f"{key}={value!r}" for key, value in info.items()]) + message = f"{name} {args}" + self.logger.debug(message) + + async def __aenter__(self) -> Trace: + if self.should_trace: + info = self.kwargs + await self.atrace(f"{self.name}.started", info) + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + if self.should_trace: + if exc_value is None: + info = {"return_value": self.return_value} + await self.atrace(f"{self.name}.complete", info) + else: + info = {"exception": exc_value} + await self.atrace(f"{self.name}.failed", info) diff --git a/py311/lib/python3.11/site-packages/httpcore/_utils.py b/py311/lib/python3.11/site-packages/httpcore/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c44ff93cb2f572afc6e679308024b744b65c3b0a --- /dev/null +++ b/py311/lib/python3.11/site-packages/httpcore/_utils.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +import select +import socket +import sys + + +def is_socket_readable(sock: socket.socket | None) -> bool: + """ + Return whether a socket, as identifed by its file descriptor, is readable. + "A socket is readable" means that the read buffer isn't empty, i.e. that calling + .recv() on it would immediately return some data. + """ + # NOTE: we want check for readability without actually attempting to read, because + # we don't want to block forever if it's not readable. + + # In the case that the socket no longer exists, or cannot return a file + # descriptor, we treat it as being readable, as if it the next read operation + # on it is ready to return the terminating `b""`. + sock_fd = None if sock is None else sock.fileno() + if sock_fd is None or sock_fd < 0: # pragma: nocover + return True + + # The implementation below was stolen from: + # https://github.com/python-trio/trio/blob/20ee2b1b7376db637435d80e266212a35837ddcc/trio/_socket.py#L471-L478 + # See also: https://github.com/encode/httpcore/pull/193#issuecomment-703129316 + + # Use select.select on Windows, and when poll is unavailable and select.poll + # everywhere else. (E.g. When eventlet is in use. See #327) + if ( + sys.platform == "win32" or getattr(select, "poll", None) is None + ): # pragma: nocover + rready, _, _ = select.select([sock_fd], [], [], 0) + return bool(rready) + p = select.poll() + p.register(sock_fd, select.POLLIN) + return bool(p.poll(0)) diff --git a/py311/lib/python3.11/site-packages/httpcore/py.typed b/py311/lib/python3.11/site-packages/httpcore/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/identify/__init__.py b/py311/lib/python3.11/site-packages/identify/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/identify/cli.py b/py311/lib/python3.11/site-packages/identify/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..98c77bd6abde74c53c058bd5bfa9bec248d005b9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/identify/cli.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +import argparse +import json +from collections.abc import Sequence + +from identify import identify + + +def main(argv: Sequence[str] | None = None) -> int: + parser = argparse.ArgumentParser() + parser.add_argument('--filename-only', action='store_true') + parser.add_argument('path') + args = parser.parse_args(argv) + + if args.filename_only: + func = identify.tags_from_filename + else: + func = identify.tags_from_path + + try: + tags = sorted(func(args.path)) + except ValueError as e: + print(e) + return 1 + + if not tags: + return 1 + else: + print(json.dumps(tags)) + return 0 + + +if __name__ == '__main__': + raise SystemExit(main()) diff --git a/py311/lib/python3.11/site-packages/identify/extensions.py b/py311/lib/python3.11/site-packages/identify/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..3a509153019b33634145607fa683de9bcbefa43d --- /dev/null +++ b/py311/lib/python3.11/site-packages/identify/extensions.py @@ -0,0 +1,420 @@ +from __future__ import annotations +EXTENSIONS = { + 'adoc': {'text', 'asciidoc'}, + 'ai': {'binary', 'adobe-illustrator'}, + 'aj': {'text', 'aspectj'}, + 'asciidoc': {'text', 'asciidoc'}, + 'apinotes': {'text', 'apinotes'}, + 'asar': {'binary', 'asar'}, + 'asm': {'text', 'asm'}, + 'astro': {'text', 'astro'}, + 'avif': {'binary', 'image', 'avif'}, + 'avsc': {'text', 'avro-schema'}, + 'bash': {'text', 'shell', 'bash'}, + 'bat': {'text', 'batch'}, + 'bats': {'text', 'shell', 'bash', 'bats'}, + 'bazel': {'text', 'bazel'}, + 'bb': {'text', 'bitbake'}, + 'bbappend': {'text', 'bitbake'}, + 'bbclass': {'text', 'bitbake'}, + 'beancount': {'text', 'beancount'}, + 'bib': {'text', 'bib'}, + 'bmp': {'binary', 'image', 'bitmap'}, + 'bz2': {'binary', 'bzip2'}, + 'bz3': {'binary', 'bzip3'}, + 'bzl': {'text', 'bazel'}, + 'c': {'text', 'c'}, + 'c++': {'text', 'c++'}, + 'c++m': {'text', 'c++'}, + 'cc': {'text', 'c++'}, + 'ccm': {'text', 'c++'}, + 'cfg': {'text'}, + 'chs': {'text', 'c2hs'}, + 'cjs': {'text', 'javascript'}, + 'clj': {'text', 'clojure'}, + 'cljc': {'text', 'clojure'}, + 'cljs': {'text', 'clojure', 'clojurescript'}, + 'cmake': {'text', 'cmake'}, + 'cnf': {'text'}, + 'coffee': {'text', 'coffee'}, + 'conf': {'text'}, + 'cpp': {'text', 'c++'}, + 'cppm': {'text', 'c++'}, + 'cr': {'text', 'crystal'}, + 'crt': {'text', 'pem'}, + 'cs': {'text', 'c#'}, + 'csproj': {'text', 'xml', 'csproj', 'msbuild'}, + 'csh': {'text', 'shell', 'csh'}, + 'cson': {'text', 'cson'}, + 'css': {'text', 'css'}, + 'csv': {'text', 'csv'}, + 'csx': {'text', 'c#', 'c#script'}, + 'cu': {'text', 'cuda'}, + 'cue': {'text', 'cue'}, + 'cuh': {'text', 'cuda'}, + 'cxx': {'text', 'c++'}, + 'cxxm': {'text', 'c++'}, + 'cylc': {'text', 'cylc'}, + 'dart': {'text', 'dart'}, + 'dbc': {'text', 'dbc'}, + 'def': {'text', 'def'}, + 'dll': {'binary'}, + 'dtd': {'text', 'dtd'}, + 'ear': {'binary', 'zip', 'jar'}, + 'edn': {'text', 'clojure', 'edn'}, + 'ejs': {'text', 'ejs'}, + 'ejson': {'text', 'json', 'ejson'}, + 'elm': {'text', 'elm'}, + 'env': {'text', 'dotenv'}, + 'eot': {'binary', 'eot'}, + 'eps': {'binary', 'eps'}, + 'erb': {'text', 'erb'}, + 'erl': {'text', 'erlang'}, + 'ex': {'text', 'elixir'}, + 'exe': {'binary'}, + 'exs': {'text', 'elixir'}, + 'eyaml': {'text', 'yaml'}, + 'f03': {'text', 'fortran'}, + 'f08': {'text', 'fortran'}, + 'f90': {'text', 'fortran'}, + 'f95': {'text', 'fortran'}, + 'feature': {'text', 'gherkin'}, + 'fish': {'text', 'fish'}, + 'fits': {'binary', 'fits'}, + 'fs': {'text', 'f#'}, + 'fsproj': {'text', 'xml', 'fsproj', 'msbuild'}, + 'fsx': {'text', 'f#', 'f#script'}, + 'gd': {'text', 'gdscript'}, + 'gemspec': {'text', 'ruby'}, + 'geojson': {'text', 'geojson', 'json'}, + 'ggb': {'binary', 'zip', 'ggb'}, + 'gif': {'binary', 'image', 'gif'}, + 'gleam': {'text', 'gleam'}, + 'go': {'text', 'go'}, + 'gotmpl': {'text', 'gotmpl'}, + 'gpx': {'text', 'gpx', 'xml'}, + 'graphql': {'text', 'graphql'}, + 'gradle': {'text', 'groovy'}, + 'groovy': {'text', 'groovy'}, + 'gyb': {'text', 'gyb'}, + 'gyp': {'text', 'gyp', 'python'}, + 'gypi': {'text', 'gyp', 'python'}, + 'gz': {'binary', 'gzip'}, + 'h': {'text', 'header', 'c', 'c++'}, + 'hbs': {'text', 'handlebars'}, + 'hcl': {'text', 'hcl'}, + 'hh': {'text', 'header', 'c++'}, + 'hpp': {'text', 'header', 'c++'}, + 'hrl': {'text', 'erlang'}, + 'hs': {'text', 'haskell'}, + 'htm': {'text', 'html'}, + 'html': {'text', 'html'}, + 'hxx': {'text', 'header', 'c++'}, + 'icns': {'binary', 'icns'}, + 'ico': {'binary', 'icon'}, + 'ics': {'text', 'icalendar'}, + 'idl': {'text', 'idl'}, + 'idr': {'text', 'idris'}, + 'inc': {'text', 'inc'}, + 'ini': {'text', 'ini'}, + 'inl': {'text', 'inl', 'c++'}, + 'ino': {'text', 'ino', 'c++'}, + 'inx': {'text', 'xml', 'inx'}, + 'ipynb': {'text', 'jupyter', 'json'}, + 'ipp': {'text', 'c++'}, + 'ipxe': {'text', 'ipxe'}, + 'ixx': {'text', 'c++'}, + 'j2': {'text', 'jinja'}, + 'jade': {'text', 'jade'}, + 'jar': {'binary', 'zip', 'jar'}, + 'java': {'text', 'java'}, + 'jenkins': {'text', 'groovy', 'jenkins'}, + 'jenkinsfile': {'text', 'groovy', 'jenkins'}, + 'jinja': {'text', 'jinja'}, + 'jinja2': {'text', 'jinja'}, + 'jl': {'text', 'julia'}, + 'jpeg': {'binary', 'image', 'jpeg'}, + 'jpg': {'binary', 'image', 'jpeg'}, + 'js': {'text', 'javascript'}, + 'json': {'text', 'json'}, + 'jsonld': {'text', 'json', 'jsonld'}, + 'jsonnet': {'text', 'jsonnet'}, + 'json5': {'text', 'json5'}, + 'jsx': {'text', 'jsx'}, + 'key': {'text', 'pem'}, + 'kml': {'text', 'kml', 'xml'}, + 'kt': {'text', 'kotlin'}, + 'kts': {'text', 'kotlin'}, + 'lean': {'text', 'lean'}, + 'lektorproject': {'text', 'ini', 'lektorproject'}, + 'less': {'text', 'less'}, + 'lfm': {'text', 'lazarus', 'lazarus-form'}, + 'lhs': {'text', 'literate-haskell'}, + 'libsonnet': {'text', 'jsonnet'}, + 'lidr': {'text', 'idris'}, + 'liquid': {'text', 'liquid'}, + 'lpi': {'text', 'lazarus', 'xml'}, + 'lpr': {'text', 'lazarus', 'pascal'}, + 'lr': {'text', 'lektor'}, + 'lua': {'text', 'lua'}, + 'm': {'text', 'objective-c'}, + 'm4': {'text', 'm4'}, + 'magik': {'text', 'magik'}, + 'make': {'text', 'makefile'}, + 'manifest': {'text', 'manifest'}, + 'map': {'text', 'map'}, + 'markdown': {'text', 'markdown'}, + 'md': {'text', 'markdown'}, + 'mdx': {'text', 'mdx'}, + 'meson': {'text', 'meson'}, + 'metal': {'text', 'metal'}, + 'mib': {'text', 'mib'}, + 'mjs': {'text', 'javascript'}, + 'mk': {'text', 'makefile'}, + 'ml': {'text', 'ocaml'}, + 'mli': {'text', 'ocaml'}, + 'mm': {'text', 'c++', 'objective-c++'}, + 'modulemap': {'text', 'modulemap'}, + 'mscx': {'text', 'xml', 'musescore'}, + 'mscz': {'binary', 'zip', 'musescore'}, + 'mustache': {'text', 'mustache'}, + 'myst': {'text', 'myst'}, + 'ngdoc': {'text', 'ngdoc'}, + 'nim': {'text', 'nim'}, + 'nims': {'text', 'nim'}, + 'nimble': {'text', 'nimble'}, + 'nix': {'text', 'nix'}, + 'njk': {'text', 'nunjucks'}, + 'otf': {'binary', 'otf'}, + 'p12': {'binary', 'p12'}, + 'pas': {'text', 'pascal'}, + 'patch': {'text', 'diff'}, + 'pdf': {'binary', 'pdf'}, + 'pem': {'text', 'pem'}, + 'php': {'text', 'php'}, + 'php4': {'text', 'php'}, + 'php5': {'text', 'php'}, + 'phtml': {'text', 'php'}, + 'piskel': {'text', 'piskel', 'json'}, + 'pl': {'text', 'perl'}, + 'plantuml': {'text', 'plantuml'}, + 'pm': {'text', 'perl'}, + 'png': {'binary', 'image', 'png'}, + 'po': {'text', 'pofile'}, + 'pom': {'pom', 'text', 'xml'}, + 'pp': {'text', 'puppet'}, + 'prisma': {'text', 'prisma'}, + 'properties': {'text', 'java-properties'}, + 'props': {'text', 'xml', 'msbuild'}, + 'proto': {'text', 'proto'}, + 'ps1': {'text', 'powershell'}, + 'psd1': {'text', 'powershell'}, + 'psm1': {'text', 'powershell'}, + 'pug': {'text', 'pug'}, + 'puml': {'text', 'plantuml'}, + 'purs': {'text', 'purescript'}, + 'pxd': {'text', 'cython'}, + 'pxi': {'text', 'cython'}, + 'py': {'text', 'python'}, + 'pyi': {'text', 'pyi'}, + 'pyproj': {'text', 'xml', 'pyproj', 'msbuild'}, + 'pyt': {'text', 'python'}, + 'pyx': {'text', 'cython'}, + 'pyz': {'binary', 'pyz'}, + 'pyzw': {'binary', 'pyz'}, + 'qml': {'text', 'qml'}, + 'r': {'text', 'r'}, + 'rake': {'text', 'ruby'}, + 'rb': {'text', 'ruby'}, + 'resx': {'text', 'resx', 'xml'}, + 'rng': {'text', 'xml', 'relax-ng'}, + 'robot': {'text', 'robot'}, + 'rs': {'text', 'rust'}, + 'rst': {'text', 'rst'}, + 's': {'text', 'asm'}, + 'sas': {'text', 'sas'}, + 'sass': {'text', 'sass'}, + 'sbt': {'text', 'sbt', 'scala'}, + 'sc': {'text', 'scala'}, + 'scala': {'text', 'scala'}, + 'scm': {'text', 'scheme'}, + 'scss': {'text', 'scss'}, + 'sh': {'text', 'shell'}, + 'sln': {'text', 'sln'}, + 'sls': {'text', 'salt'}, + 'so': {'binary'}, + 'sol': {'text', 'solidity'}, + 'spec': {'text', 'spec'}, + 'sql': {'text', 'sql'}, + 'ss': {'text', 'scheme'}, + 'sty': {'text', 'tex'}, + 'styl': {'text', 'stylus'}, + 'sv': {'text', 'system-verilog'}, + 'svelte': {'text', 'svelte'}, + 'svg': {'text', 'image', 'svg', 'xml'}, + 'svh': {'text', 'system-verilog'}, + 'swf': {'binary', 'swf'}, + 'swift': {'text', 'swift'}, + 'swiftdeps': {'text', 'swiftdeps'}, + 'tac': {'text', 'twisted', 'python'}, + 'tar': {'binary', 'tar'}, + 'targets': {'text', 'xml', 'msbuild'}, + 'templ': {'text', 'templ'}, + 'tex': {'text', 'tex'}, + 'textproto': {'text', 'textproto'}, + 'tf': {'text', 'terraform'}, + 'tfvars': {'text', 'terraform'}, + 'tgz': {'binary', 'gzip'}, + 'thrift': {'text', 'thrift'}, + 'tiff': {'binary', 'image', 'tiff'}, + 'toml': {'text', 'toml'}, + 'tpp': {'text', 'c++'}, + 'ts': {'text', 'ts'}, + 'tsv': {'text', 'tsv'}, + 'tsx': {'text', 'tsx'}, + 'ttf': {'binary', 'ttf'}, + 'twig': {'text', 'twig'}, + 'txsprofile': {'text', 'ini', 'txsprofile'}, + 'txt': {'text', 'plain-text'}, + 'txtpb': {'text', 'textproto'}, + 'urdf': {'text', 'xml', 'urdf'}, + 'v': {'text', 'verilog'}, + 'vb': {'text', 'vb'}, + 'vbproj': {'text', 'xml', 'vbproj', 'msbuild'}, + 'vcxproj': {'text', 'xml', 'vcxproj', 'msbuild'}, + 'vdx': {'text', 'vdx'}, + 'vh': {'text', 'verilog'}, + 'vhd': {'text', 'vhdl'}, + 'vim': {'text', 'vim'}, + 'vtl': {'text', 'vtl'}, + 'vue': {'text', 'vue'}, + 'war': {'binary', 'zip', 'jar'}, + 'wav': {'binary', 'audio', 'wav'}, + 'webp': {'binary', 'image', 'webp'}, + 'whl': {'binary', 'wheel', 'zip'}, + 'wkt': {'text', 'wkt'}, + 'woff': {'binary', 'woff'}, + 'woff2': {'binary', 'woff2'}, + 'wsdl': {'text', 'xml', 'wsdl'}, + 'wsgi': {'text', 'wsgi', 'python'}, + 'xhtml': {'text', 'xml', 'html', 'xhtml'}, + 'xacro': {'text', 'xml', 'urdf', 'xacro'}, + 'xctestplan': {'text', 'json'}, + 'xlf': {'text', 'xml', 'xliff'}, + 'xliff': {'text', 'xml', 'xliff'}, + 'xml': {'text', 'xml'}, + 'xq': {'text', 'xquery'}, + 'xql': {'text', 'xquery'}, + 'xqm': {'text', 'xquery'}, + 'xqu': {'text', 'xquery'}, + 'xquery': {'text', 'xquery'}, + 'xqy': {'text', 'xquery'}, + 'xsd': {'text', 'xml', 'xsd'}, + 'xsl': {'text', 'xml', 'xsl'}, + 'xslt': {'text', 'xml', 'xsl'}, + 'yaml': {'text', 'yaml'}, + 'yamlld': {'text', 'yaml', 'yamlld'}, + 'yang': {'text', 'yang'}, + 'yin': {'text', 'xml', 'yin'}, + 'yml': {'text', 'yaml'}, + 'zcml': {'text', 'xml', 'zcml'}, + 'zig': {'text', 'zig'}, + 'zip': {'binary', 'zip'}, + 'zpt': {'text', 'zpt'}, + 'zsh': {'text', 'shell', 'zsh'}, +} +EXTENSIONS_NEED_BINARY_CHECK = { + 'plist': {'plist'}, + 'ppm': {'image', 'ppm'}, +} + +NAMES = { + '.ansible-lint': EXTENSIONS['yaml'], + '.babelrc': EXTENSIONS['json'] | {'babelrc'}, + '.bash_aliases': EXTENSIONS['bash'], + '.bash_profile': EXTENSIONS['bash'], + '.bashrc': EXTENSIONS['bash'], + '.bazelrc': {'text', 'bazelrc'}, + '.bowerrc': EXTENSIONS['json'] | {'bowerrc'}, + '.browserslistrc': {'text', 'browserslistrc'}, + '.clang-format': EXTENSIONS['yaml'], + '.clang-tidy': EXTENSIONS['yaml'], + '.codespellrc': EXTENSIONS['ini'] | {'codespellrc'}, + '.coveragerc': EXTENSIONS['ini'] | {'coveragerc'}, + '.cshrc': EXTENSIONS['csh'], + '.csslintrc': EXTENSIONS['json'] | {'csslintrc'}, + '.dockerignore': {'text', 'dockerignore'}, + '.editorconfig': {'text', 'editorconfig'}, + '.envrc': EXTENSIONS['bash'], + '.flake8': EXTENSIONS['ini'] | {'flake8'}, + '.gitattributes': {'text', 'gitattributes'}, + '.gitconfig': EXTENSIONS['ini'] | {'gitconfig'}, + '.gitignore': {'text', 'gitignore'}, + '.gitlint': EXTENSIONS['ini'] | {'gitlint'}, + '.gitmodules': {'text', 'gitmodules'}, + '.hgrc': EXTENSIONS['ini'] | {'hgrc'}, + '.isort.cfg': EXTENSIONS['ini'] | {'isort'}, + '.jshintrc': EXTENSIONS['json'] | {'jshintrc'}, + '.mailmap': {'text', 'mailmap'}, + '.mention-bot': EXTENSIONS['json'] | {'mention-bot'}, + '.npmignore': {'text', 'npmignore'}, + '.pdbrc': EXTENSIONS['py'] | {'pdbrc'}, + '.prettierignore': {'text', 'gitignore', 'prettierignore'}, + '.pypirc': EXTENSIONS['ini'] | {'pypirc'}, + '.rstcheck.cfg': EXTENSIONS['ini'], + '.salt-lint': EXTENSIONS['yaml'] | {'salt-lint'}, + '.sqlfluff': EXTENSIONS['ini'], + '.yamllint': EXTENSIONS['yaml'] | {'yamllint'}, + '.zlogin': EXTENSIONS['zsh'], + '.zlogout': EXTENSIONS['zsh'], + '.zprofile': EXTENSIONS['zsh'], + '.zshrc': EXTENSIONS['zsh'], + '.zshenv': EXTENSIONS['zsh'], + 'AUTHORS': EXTENSIONS['txt'], + 'bblayers.conf': EXTENSIONS['bb'], + 'bitbake.conf': EXTENSIONS['bb'], + 'BUILD': EXTENSIONS['bzl'], + 'Cargo.toml': EXTENSIONS['toml'] | {'cargo'}, + 'Cargo.lock': EXTENSIONS['toml'] | {'cargo-lock'}, + 'CMakeLists.txt': EXTENSIONS['cmake'], + 'CHANGELOG': EXTENSIONS['txt'], + 'config.ru': EXTENSIONS['rb'], + 'Containerfile': {'text', 'dockerfile'}, + 'CONTRIBUTING': EXTENSIONS['txt'], + 'copy.bara.sky': EXTENSIONS['bzl'], + 'COPYING': EXTENSIONS['txt'], + 'Dockerfile': {'text', 'dockerfile'}, + 'direnvrc': EXTENSIONS['bash'], + 'Gemfile': EXTENSIONS['rb'], + 'Gemfile.lock': {'text'}, + 'GNUmakefile': EXTENSIONS['mk'], + 'go.mod': {'text', 'go-mod'}, + 'go.sum': {'text', 'go-sum'}, + 'Jenkinsfile': EXTENSIONS['jenkins'], + 'LICENSE': EXTENSIONS['txt'], + 'MAINTAINERS': EXTENSIONS['txt'], + 'Makefile': EXTENSIONS['mk'], + 'meson.build': EXTENSIONS['meson'], + 'meson.options': EXTENSIONS['meson'] | {'meson-options'}, + 'meson_options.txt': EXTENSIONS['meson'] | {'meson-options'}, + 'makefile': EXTENSIONS['mk'], + 'NEWS': EXTENSIONS['txt'], + 'NOTICE': EXTENSIONS['txt'], + 'PATENTS': EXTENSIONS['txt'], + 'Pipfile': EXTENSIONS['toml'], + 'Pipfile.lock': EXTENSIONS['json'], + 'PKGBUILD': EXTENSIONS['bash'] | {'pkgbuild', 'alpm'}, + 'poetry.lock': EXTENSIONS['toml'], + 'pom.xml': EXTENSIONS['pom'], + 'pylintrc': EXTENSIONS['ini'] | {'pylintrc'}, + 'README': EXTENSIONS['txt'], + 'Rakefile': EXTENSIONS['rb'], + 'rebar.config': EXTENSIONS['erl'], + 'setup.cfg': EXTENSIONS['ini'], + 'sys.config': EXTENSIONS['erl'], + 'sys.config.src': EXTENSIONS['erl'], + 'Tiltfile': {'text', 'tiltfile'}, + 'Vagrantfile': EXTENSIONS['rb'], + 'WORKSPACE': EXTENSIONS['bzl'], + 'wscript': EXTENSIONS['py'], +} diff --git a/py311/lib/python3.11/site-packages/identify/identify.py b/py311/lib/python3.11/site-packages/identify/identify.py new file mode 100644 index 0000000000000000000000000000000000000000..0279ba8e404de57dec05dede61650db6dadda9a1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/identify/identify.py @@ -0,0 +1,278 @@ +from __future__ import annotations + +import errno +import math +import os.path +import re +import shlex +import stat +import string +import sys +from typing import IO + +from identify import extensions +from identify import interpreters +from identify.vendor import licenses + + +printable = frozenset(string.printable) + +DIRECTORY = 'directory' +SYMLINK = 'symlink' +SOCKET = 'socket' +FILE = 'file' +EXECUTABLE = 'executable' +NON_EXECUTABLE = 'non-executable' +TEXT = 'text' +BINARY = 'binary' + +TYPE_TAGS = frozenset((DIRECTORY, FILE, SYMLINK, SOCKET)) +MODE_TAGS = frozenset((EXECUTABLE, NON_EXECUTABLE)) +ENCODING_TAGS = frozenset((BINARY, TEXT)) +_ALL_TAGS = {*TYPE_TAGS, *MODE_TAGS, *ENCODING_TAGS} +_ALL_TAGS.update(*extensions.EXTENSIONS.values()) +_ALL_TAGS.update(*extensions.EXTENSIONS_NEED_BINARY_CHECK.values()) +_ALL_TAGS.update(*extensions.NAMES.values()) +_ALL_TAGS.update(*interpreters.INTERPRETERS.values()) +ALL_TAGS = frozenset(_ALL_TAGS) + + +def tags_from_path(path: str) -> set[str]: + try: + sr = os.lstat(path) + except (OSError, ValueError): # same error-handling as `os.lexists()` + raise ValueError(f'{path} does not exist.') + + mode = sr.st_mode + if stat.S_ISDIR(mode): + return {DIRECTORY} + if stat.S_ISLNK(mode): + return {SYMLINK} + if stat.S_ISSOCK(mode): + return {SOCKET} + + tags = {FILE} + + executable = os.access(path, os.X_OK) + if executable: + tags.add(EXECUTABLE) + else: + tags.add(NON_EXECUTABLE) + + # As an optimization, if we're able to read tags from the filename, then we + # don't peek at the file contents. + t = tags_from_filename(os.path.basename(path)) + if len(t) > 0: + tags.update(t) + else: + if executable: + shebang = parse_shebang_from_file(path) + if len(shebang) > 0: + tags.update(tags_from_interpreter(shebang[0])) + + # some extensions can be both binary and text + # see EXTENSIONS_NEED_BINARY_CHECK + if not ENCODING_TAGS & tags: + if file_is_text(path): + tags.add(TEXT) + else: + tags.add(BINARY) + + assert ENCODING_TAGS & tags, tags + assert MODE_TAGS & tags, tags + return tags + + +def tags_from_filename(path: str) -> set[str]: + _, filename = os.path.split(path) + _, ext = os.path.splitext(filename) + + ret = set() + + # Allow e.g. "Dockerfile.xenial" to match "Dockerfile" + for part in [filename] + filename.split('.'): + if part in extensions.NAMES: + ret.update(extensions.NAMES[part]) + break + + if len(ext) > 0: + ext = ext[1:].lower() + if ext in extensions.EXTENSIONS: + ret.update(extensions.EXTENSIONS[ext]) + elif ext in extensions.EXTENSIONS_NEED_BINARY_CHECK: + ret.update(extensions.EXTENSIONS_NEED_BINARY_CHECK[ext]) + + return ret + + +def tags_from_interpreter(interpreter: str) -> set[str]: + _, _, interpreter = interpreter.rpartition('/') + + # Try "python3.5.2" => "python3.5" => "python3" until one matches. + while interpreter: + if interpreter in interpreters.INTERPRETERS: + return interpreters.INTERPRETERS[interpreter] + else: + interpreter, _, _ = interpreter.rpartition('.') + + return set() + + +def is_text(bytesio: IO[bytes]) -> bool: + """Return whether the first KB of contents seems to be binary. + + This is roughly based on libmagic's binary/text detection: + https://github.com/file/file/blob/df74b09b9027676088c797528edcaae5a9ce9ad0/src/encoding.c#L203-L228 + """ + text_chars = ( + bytearray([7, 8, 9, 10, 11, 12, 13, 27]) + + bytearray(range(0x20, 0x7F)) + + bytearray(range(0x80, 0X100)) + ) + return not bool(bytesio.read(1024).translate(None, text_chars)) + + +def file_is_text(path: str) -> bool: + if not os.path.lexists(path): + raise ValueError(f'{path} does not exist.') + with open(path, 'rb') as f: + return is_text(f) + + +def _shebang_split(line: str) -> list[str]: + try: + # shebangs aren't supposed to be quoted, though some tools such as + # setuptools will write them with quotes so we'll best-guess parse + # with shlex first + return shlex.split(line) + except ValueError: + # failing that, we'll do a more "traditional" shebang parsing which + # just involves splitting by whitespace + return line.split() + + +def _parse_nix_shebang( + bytesio: IO[bytes], + cmd: tuple[str, ...], +) -> tuple[str, ...]: + while bytesio.read(2) == b'#!': + next_line_b = bytesio.readline() + try: + next_line = next_line_b.decode('UTF-8') + except UnicodeDecodeError: + return cmd + + for c in next_line: + if c not in printable: + return cmd + + line_tokens = tuple(_shebang_split(next_line.strip())) + for i, token in enumerate(line_tokens[:-1]): + if token != '-i': + continue + # the argument to -i flag + cmd = (line_tokens[i + 1],) + return cmd + + +def parse_shebang(bytesio: IO[bytes]) -> tuple[str, ...]: + """Parse the shebang from a file opened for reading binary.""" + if bytesio.read(2) != b'#!': + return () + first_line_b = bytesio.readline() + try: + first_line = first_line_b.decode('UTF-8') + except UnicodeDecodeError: + return () + + # Require only printable ascii + for c in first_line: + if c not in printable: + return () + + cmd = tuple(_shebang_split(first_line.strip())) + if cmd[:2] == ('/usr/bin/env', '-S'): + cmd = cmd[2:] + elif cmd[:1] == ('/usr/bin/env',): + cmd = cmd[1:] + + if cmd == ('nix-shell',): + return _parse_nix_shebang(bytesio, cmd) + + return cmd + + +def parse_shebang_from_file(path: str) -> tuple[str, ...]: + """Parse the shebang given a file path.""" + if not os.path.lexists(path): + raise ValueError(f'{path} does not exist.') + if not os.access(path, os.X_OK): + return () + + try: + with open(path, 'rb') as f: + return parse_shebang(f) + except OSError as e: + if e.errno == errno.EINVAL: + return () + else: + raise + + +COPYRIGHT_RE = re.compile(r'^\s*(Copyright|\(C\)) .*$', re.I | re.MULTILINE) +WS_RE = re.compile(r'\s+') + + +def _norm_license(s: str) -> str: + s = COPYRIGHT_RE.sub('', s) + s = WS_RE.sub(' ', s) + return s.strip() + + +def license_id(filename: str) -> str | None: + """Return the spdx id for the license contained in `filename`. If no + license is detected, returns `None`. + + spdx: https://spdx.org/licenses/ + licenses from choosealicense.com: https://github.com/choosealicense.com + + Approximate algorithm: + + 1. strip copyright line + 2. normalize whitespace (replace all whitespace with a single space) + 3. check exact text match with existing licenses + 4. failing that use edit distance + """ + import ukkonen # `pip install identify[license]` + + with open(filename, encoding='UTF-8') as f: + contents = f.read() + + norm = _norm_license(contents) + + min_edit_dist = sys.maxsize + min_edit_dist_spdx = '' + + cutoff = math.ceil(.05 * len(norm)) + + # try exact matches + for spdx, text in licenses.LICENSES: + norm_license = _norm_license(text) + if norm == norm_license: + return spdx + + # skip the slow calculation if the lengths are very different + if norm and abs(len(norm) - len(norm_license)) / len(norm) > .05: + continue + + edit_dist = ukkonen.distance(norm, norm_license, cutoff) + if edit_dist < cutoff and edit_dist < min_edit_dist: + min_edit_dist = edit_dist + min_edit_dist_spdx = spdx + + # if there's less than 5% edited from the license, we found our match + if norm and min_edit_dist < cutoff: + return min_edit_dist_spdx + else: + # no matches :'( + return None diff --git a/py311/lib/python3.11/site-packages/identify/interpreters.py b/py311/lib/python3.11/site-packages/identify/interpreters.py new file mode 100644 index 0000000000000000000000000000000000000000..3022e009309e709d433b4481221ff6bcd0131841 --- /dev/null +++ b/py311/lib/python3.11/site-packages/identify/interpreters.py @@ -0,0 +1,25 @@ +from __future__ import annotations +INTERPRETERS = { + 'ash': {'shell', 'ash'}, + 'awk': {'awk'}, + 'bash': {'shell', 'bash'}, + 'bats': {'shell', 'bash', 'bats'}, + 'cbsd': {'shell', 'cbsd'}, + 'csh': {'shell', 'csh'}, + 'dash': {'shell', 'dash'}, + 'expect': {'expect'}, + 'ksh': {'shell', 'ksh'}, + 'node': {'javascript'}, + 'nodejs': {'javascript'}, + 'perl': {'perl'}, + 'php': {'php'}, + 'php7': {'php', 'php7'}, + 'php8': {'php', 'php8'}, + 'python': {'python'}, + 'python2': {'python', 'python2'}, + 'python3': {'python', 'python3'}, + 'ruby': {'ruby'}, + 'sh': {'shell', 'sh'}, + 'tcsh': {'shell', 'tcsh'}, + 'zsh': {'shell', 'zsh'}, +} diff --git a/py311/lib/python3.11/site-packages/identify/py.typed b/py311/lib/python3.11/site-packages/identify/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/idna/__init__.py b/py311/lib/python3.11/site-packages/idna/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cfdc030a751b089fc7e38fc88093b791605d501d --- /dev/null +++ b/py311/lib/python3.11/site-packages/idna/__init__.py @@ -0,0 +1,45 @@ +from .core import ( + IDNABidiError, + IDNAError, + InvalidCodepoint, + InvalidCodepointContext, + alabel, + check_bidi, + check_hyphen_ok, + check_initial_combiner, + check_label, + check_nfc, + decode, + encode, + ulabel, + uts46_remap, + valid_contextj, + valid_contexto, + valid_label_length, + valid_string_length, +) +from .intranges import intranges_contain +from .package_data import __version__ + +__all__ = [ + "__version__", + "IDNABidiError", + "IDNAError", + "InvalidCodepoint", + "InvalidCodepointContext", + "alabel", + "check_bidi", + "check_hyphen_ok", + "check_initial_combiner", + "check_label", + "check_nfc", + "decode", + "encode", + "intranges_contain", + "ulabel", + "uts46_remap", + "valid_contextj", + "valid_contexto", + "valid_label_length", + "valid_string_length", +] diff --git a/py311/lib/python3.11/site-packages/idna/codec.py b/py311/lib/python3.11/site-packages/idna/codec.py new file mode 100644 index 0000000000000000000000000000000000000000..cbc2e4ff4ec3e2318d47615bab44ea0ca3dba978 --- /dev/null +++ b/py311/lib/python3.11/site-packages/idna/codec.py @@ -0,0 +1,122 @@ +import codecs +import re +from typing import Any, Optional, Tuple + +from .core import IDNAError, alabel, decode, encode, ulabel + +_unicode_dots_re = re.compile("[\u002e\u3002\uff0e\uff61]") + + +class Codec(codecs.Codec): + def encode(self, data: str, errors: str = "strict") -> Tuple[bytes, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return b"", 0 + + return encode(data), len(data) + + def decode(self, data: bytes, errors: str = "strict") -> Tuple[str, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return "", 0 + + return decode(data), len(data) + + +class IncrementalEncoder(codecs.BufferedIncrementalEncoder): + def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[bytes, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return b"", 0 + + labels = _unicode_dots_re.split(data) + trailing_dot = b"" + if labels: + if not labels[-1]: + trailing_dot = b"." + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = b"." + + result = [] + size = 0 + for label in labels: + result.append(alabel(label)) + if size: + size += 1 + size += len(label) + + # Join with U+002E + result_bytes = b".".join(result) + trailing_dot + size += len(trailing_dot) + return result_bytes, size + + +class IncrementalDecoder(codecs.BufferedIncrementalDecoder): + def _buffer_decode(self, data: Any, errors: str, final: bool) -> Tuple[str, int]: + if errors != "strict": + raise IDNAError('Unsupported error handling "{}"'.format(errors)) + + if not data: + return ("", 0) + + if not isinstance(data, str): + data = str(data, "ascii") + + labels = _unicode_dots_re.split(data) + trailing_dot = "" + if labels: + if not labels[-1]: + trailing_dot = "." + del labels[-1] + elif not final: + # Keep potentially unfinished label until the next call + del labels[-1] + if labels: + trailing_dot = "." + + result = [] + size = 0 + for label in labels: + result.append(ulabel(label)) + if size: + size += 1 + size += len(label) + + result_str = ".".join(result) + trailing_dot + size += len(trailing_dot) + return (result_str, size) + + +class StreamWriter(Codec, codecs.StreamWriter): + pass + + +class StreamReader(Codec, codecs.StreamReader): + pass + + +def search_function(name: str) -> Optional[codecs.CodecInfo]: + if name != "idna2008": + return None + return codecs.CodecInfo( + name=name, + encode=Codec().encode, + decode=Codec().decode, # type: ignore + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamwriter=StreamWriter, + streamreader=StreamReader, + ) + + +codecs.register(search_function) diff --git a/py311/lib/python3.11/site-packages/idna/compat.py b/py311/lib/python3.11/site-packages/idna/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..1df9f2a70e6815908f2784e88897a9a359eef84c --- /dev/null +++ b/py311/lib/python3.11/site-packages/idna/compat.py @@ -0,0 +1,15 @@ +from typing import Any, Union + +from .core import decode, encode + + +def ToASCII(label: str) -> bytes: + return encode(label) + + +def ToUnicode(label: Union[bytes, bytearray]) -> str: + return decode(label) + + +def nameprep(s: Any) -> None: + raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol") diff --git a/py311/lib/python3.11/site-packages/idna/core.py b/py311/lib/python3.11/site-packages/idna/core.py new file mode 100644 index 0000000000000000000000000000000000000000..8177bf7a324f9f54a29e41e867f5d56f2dd0a924 --- /dev/null +++ b/py311/lib/python3.11/site-packages/idna/core.py @@ -0,0 +1,437 @@ +import bisect +import re +import unicodedata +from typing import Optional, Union + +from . import idnadata +from .intranges import intranges_contain + +_virama_combining_class = 9 +_alabel_prefix = b"xn--" +_unicode_dots_re = re.compile("[\u002e\u3002\uff0e\uff61]") + + +class IDNAError(UnicodeError): + """Base exception for all IDNA-encoding related problems""" + + pass + + +class IDNABidiError(IDNAError): + """Exception when bidirectional requirements are not satisfied""" + + pass + + +class InvalidCodepoint(IDNAError): + """Exception when a disallowed or unallocated codepoint is used""" + + pass + + +class InvalidCodepointContext(IDNAError): + """Exception when the codepoint is not valid in the context it is used""" + + pass + + +def _combining_class(cp: int) -> int: + v = unicodedata.combining(chr(cp)) + if v == 0: + if not unicodedata.name(chr(cp)): + raise ValueError("Unknown character in unicodedata") + return v + + +def _is_script(cp: str, script: str) -> bool: + return intranges_contain(ord(cp), idnadata.scripts[script]) + + +def _punycode(s: str) -> bytes: + return s.encode("punycode") + + +def _unot(s: int) -> str: + return "U+{:04X}".format(s) + + +def valid_label_length(label: Union[bytes, str]) -> bool: + if len(label) > 63: + return False + return True + + +def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool: + if len(label) > (254 if trailing_dot else 253): + return False + return True + + +def check_bidi(label: str, check_ltr: bool = False) -> bool: + # Bidi rules should only be applied if string contains RTL characters + bidi_label = False + for idx, cp in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + if direction == "": + # String likely comes from a newer version of Unicode + raise IDNABidiError("Unknown directionality in label {} at position {}".format(repr(label), idx)) + if direction in ["R", "AL", "AN"]: + bidi_label = True + if not bidi_label and not check_ltr: + return True + + # Bidi rule 1 + direction = unicodedata.bidirectional(label[0]) + if direction in ["R", "AL"]: + rtl = True + elif direction == "L": + rtl = False + else: + raise IDNABidiError("First codepoint in label {} must be directionality L, R or AL".format(repr(label))) + + valid_ending = False + number_type: Optional[str] = None + for idx, cp in enumerate(label, 1): + direction = unicodedata.bidirectional(cp) + + if rtl: + # Bidi rule 2 + if direction not in [ + "R", + "AL", + "AN", + "EN", + "ES", + "CS", + "ET", + "ON", + "BN", + "NSM", + ]: + raise IDNABidiError("Invalid direction for codepoint at position {} in a right-to-left label".format(idx)) + # Bidi rule 3 + if direction in ["R", "AL", "EN", "AN"]: + valid_ending = True + elif direction != "NSM": + valid_ending = False + # Bidi rule 4 + if direction in ["AN", "EN"]: + if not number_type: + number_type = direction + else: + if number_type != direction: + raise IDNABidiError("Can not mix numeral types in a right-to-left label") + else: + # Bidi rule 5 + if direction not in ["L", "EN", "ES", "CS", "ET", "ON", "BN", "NSM"]: + raise IDNABidiError("Invalid direction for codepoint at position {} in a left-to-right label".format(idx)) + # Bidi rule 6 + if direction in ["L", "EN"]: + valid_ending = True + elif direction != "NSM": + valid_ending = False + + if not valid_ending: + raise IDNABidiError("Label ends with illegal codepoint directionality") + + return True + + +def check_initial_combiner(label: str) -> bool: + if unicodedata.category(label[0])[0] == "M": + raise IDNAError("Label begins with an illegal combining character") + return True + + +def check_hyphen_ok(label: str) -> bool: + if label[2:4] == "--": + raise IDNAError("Label has disallowed hyphens in 3rd and 4th position") + if label[0] == "-" or label[-1] == "-": + raise IDNAError("Label must not start or end with a hyphen") + return True + + +def check_nfc(label: str) -> None: + if unicodedata.normalize("NFC", label) != label: + raise IDNAError("Label must be in Normalization Form C") + + +def valid_contextj(label: str, pos: int) -> bool: + cp_value = ord(label[pos]) + + if cp_value == 0x200C: + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + + ok = False + for i in range(pos - 1, -1, -1): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord("T"): + continue + elif joining_type in [ord("L"), ord("D")]: + ok = True + break + else: + break + + if not ok: + return False + + ok = False + for i in range(pos + 1, len(label)): + joining_type = idnadata.joining_types.get(ord(label[i])) + if joining_type == ord("T"): + continue + elif joining_type in [ord("R"), ord("D")]: + ok = True + break + else: + break + return ok + + if cp_value == 0x200D: + if pos > 0: + if _combining_class(ord(label[pos - 1])) == _virama_combining_class: + return True + return False + + else: + return False + + +def valid_contexto(label: str, pos: int, exception: bool = False) -> bool: + cp_value = ord(label[pos]) + + if cp_value == 0x00B7: + if 0 < pos < len(label) - 1: + if ord(label[pos - 1]) == 0x006C and ord(label[pos + 1]) == 0x006C: + return True + return False + + elif cp_value == 0x0375: + if pos < len(label) - 1 and len(label) > 1: + return _is_script(label[pos + 1], "Greek") + return False + + elif cp_value == 0x05F3 or cp_value == 0x05F4: + if pos > 0: + return _is_script(label[pos - 1], "Hebrew") + return False + + elif cp_value == 0x30FB: + for cp in label: + if cp == "\u30fb": + continue + if _is_script(cp, "Hiragana") or _is_script(cp, "Katakana") or _is_script(cp, "Han"): + return True + return False + + elif 0x660 <= cp_value <= 0x669: + for cp in label: + if 0x6F0 <= ord(cp) <= 0x06F9: + return False + return True + + elif 0x6F0 <= cp_value <= 0x6F9: + for cp in label: + if 0x660 <= ord(cp) <= 0x0669: + return False + return True + + return False + + +def check_label(label: Union[str, bytes, bytearray]) -> None: + if isinstance(label, (bytes, bytearray)): + label = label.decode("utf-8") + if len(label) == 0: + raise IDNAError("Empty Label") + + check_nfc(label) + check_hyphen_ok(label) + check_initial_combiner(label) + + for pos, cp in enumerate(label): + cp_value = ord(cp) + if intranges_contain(cp_value, idnadata.codepoint_classes["PVALID"]): + continue + elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTJ"]): + try: + if not valid_contextj(label, pos): + raise InvalidCodepointContext( + "Joiner {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label)) + ) + except ValueError: + raise IDNAError( + "Unknown codepoint adjacent to joiner {} at position {} in {}".format( + _unot(cp_value), pos + 1, repr(label) + ) + ) + elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTO"]): + if not valid_contexto(label, pos): + raise InvalidCodepointContext( + "Codepoint {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label)) + ) + else: + raise InvalidCodepoint( + "Codepoint {} at position {} of {} not allowed".format(_unot(cp_value), pos + 1, repr(label)) + ) + + check_bidi(label) + + +def alabel(label: str) -> bytes: + try: + label_bytes = label.encode("ascii") + ulabel(label_bytes) + if not valid_label_length(label_bytes): + raise IDNAError("Label too long") + return label_bytes + except UnicodeEncodeError: + pass + + check_label(label) + label_bytes = _alabel_prefix + _punycode(label) + + if not valid_label_length(label_bytes): + raise IDNAError("Label too long") + + return label_bytes + + +def ulabel(label: Union[str, bytes, bytearray]) -> str: + if not isinstance(label, (bytes, bytearray)): + try: + label_bytes = label.encode("ascii") + except UnicodeEncodeError: + check_label(label) + return label + else: + label_bytes = bytes(label) + + label_bytes = label_bytes.lower() + if label_bytes.startswith(_alabel_prefix): + label_bytes = label_bytes[len(_alabel_prefix) :] + if not label_bytes: + raise IDNAError("Malformed A-label, no Punycode eligible content found") + if label_bytes.decode("ascii")[-1] == "-": + raise IDNAError("A-label must not end with a hyphen") + else: + check_label(label_bytes) + return label_bytes.decode("ascii") + + try: + label = label_bytes.decode("punycode") + except UnicodeError: + raise IDNAError("Invalid A-label") + check_label(label) + return label + + +def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str: + """Re-map the characters in the string according to UTS46 processing.""" + from .uts46data import uts46data + + output = "" + + for pos, char in enumerate(domain): + code_point = ord(char) + try: + uts46row = uts46data[code_point if code_point < 256 else bisect.bisect_left(uts46data, (code_point, "Z")) - 1] + status = uts46row[1] + replacement: Optional[str] = None + if len(uts46row) == 3: + replacement = uts46row[2] + if ( + status == "V" + or (status == "D" and not transitional) + or (status == "3" and not std3_rules and replacement is None) + ): + output += char + elif replacement is not None and ( + status == "M" or (status == "3" and not std3_rules) or (status == "D" and transitional) + ): + output += replacement + elif status != "I": + raise IndexError() + except IndexError: + raise InvalidCodepoint( + "Codepoint {} not allowed at position {} in {}".format(_unot(code_point), pos + 1, repr(domain)) + ) + + return unicodedata.normalize("NFC", output) + + +def encode( + s: Union[str, bytes, bytearray], + strict: bool = False, + uts46: bool = False, + std3_rules: bool = False, + transitional: bool = False, +) -> bytes: + if not isinstance(s, str): + try: + s = str(s, "ascii") + except UnicodeDecodeError: + raise IDNAError("should pass a unicode string to the function rather than a byte string.") + if uts46: + s = uts46_remap(s, std3_rules, transitional) + trailing_dot = False + result = [] + if strict: + labels = s.split(".") + else: + labels = _unicode_dots_re.split(s) + if not labels or labels == [""]: + raise IDNAError("Empty domain") + if labels[-1] == "": + del labels[-1] + trailing_dot = True + for label in labels: + s = alabel(label) + if s: + result.append(s) + else: + raise IDNAError("Empty label") + if trailing_dot: + result.append(b"") + s = b".".join(result) + if not valid_string_length(s, trailing_dot): + raise IDNAError("Domain too long") + return s + + +def decode( + s: Union[str, bytes, bytearray], + strict: bool = False, + uts46: bool = False, + std3_rules: bool = False, +) -> str: + try: + if not isinstance(s, str): + s = str(s, "ascii") + except UnicodeDecodeError: + raise IDNAError("Invalid ASCII in A-label") + if uts46: + s = uts46_remap(s, std3_rules, False) + trailing_dot = False + result = [] + if not strict: + labels = _unicode_dots_re.split(s) + else: + labels = s.split(".") + if not labels or labels == [""]: + raise IDNAError("Empty domain") + if not labels[-1]: + del labels[-1] + trailing_dot = True + for label in labels: + s = ulabel(label) + if s: + result.append(s) + else: + raise IDNAError("Empty label") + if trailing_dot: + result.append("") + return ".".join(result) diff --git a/py311/lib/python3.11/site-packages/idna/idnadata.py b/py311/lib/python3.11/site-packages/idna/idnadata.py new file mode 100644 index 0000000000000000000000000000000000000000..ded47cae0b16977aae69f3895ecfe8b8980f58d0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/idna/idnadata.py @@ -0,0 +1,4309 @@ +# This file is automatically generated by tools/idna-data + +__version__ = "16.0.0" + +scripts = { + "Greek": ( + 0x37000000374, + 0x37500000378, + 0x37A0000037E, + 0x37F00000380, + 0x38400000385, + 0x38600000387, + 0x3880000038B, + 0x38C0000038D, + 0x38E000003A2, + 0x3A3000003E2, + 0x3F000000400, + 0x1D2600001D2B, + 0x1D5D00001D62, + 0x1D6600001D6B, + 0x1DBF00001DC0, + 0x1F0000001F16, + 0x1F1800001F1E, + 0x1F2000001F46, + 0x1F4800001F4E, + 0x1F5000001F58, + 0x1F5900001F5A, + 0x1F5B00001F5C, + 0x1F5D00001F5E, + 0x1F5F00001F7E, + 0x1F8000001FB5, + 0x1FB600001FC5, + 0x1FC600001FD4, + 0x1FD600001FDC, + 0x1FDD00001FF0, + 0x1FF200001FF5, + 0x1FF600001FFF, + 0x212600002127, + 0xAB650000AB66, + 0x101400001018F, + 0x101A0000101A1, + 0x1D2000001D246, + ), + "Han": ( + 0x2E8000002E9A, + 0x2E9B00002EF4, + 0x2F0000002FD6, + 0x300500003006, + 0x300700003008, + 0x30210000302A, + 0x30380000303C, + 0x340000004DC0, + 0x4E000000A000, + 0xF9000000FA6E, + 0xFA700000FADA, + 0x16FE200016FE4, + 0x16FF000016FF2, + 0x200000002A6E0, + 0x2A7000002B73A, + 0x2B7400002B81E, + 0x2B8200002CEA2, + 0x2CEB00002EBE1, + 0x2EBF00002EE5E, + 0x2F8000002FA1E, + 0x300000003134B, + 0x31350000323B0, + ), + "Hebrew": ( + 0x591000005C8, + 0x5D0000005EB, + 0x5EF000005F5, + 0xFB1D0000FB37, + 0xFB380000FB3D, + 0xFB3E0000FB3F, + 0xFB400000FB42, + 0xFB430000FB45, + 0xFB460000FB50, + ), + "Hiragana": ( + 0x304100003097, + 0x309D000030A0, + 0x1B0010001B120, + 0x1B1320001B133, + 0x1B1500001B153, + 0x1F2000001F201, + ), + "Katakana": ( + 0x30A1000030FB, + 0x30FD00003100, + 0x31F000003200, + 0x32D0000032FF, + 0x330000003358, + 0xFF660000FF70, + 0xFF710000FF9E, + 0x1AFF00001AFF4, + 0x1AFF50001AFFC, + 0x1AFFD0001AFFF, + 0x1B0000001B001, + 0x1B1200001B123, + 0x1B1550001B156, + 0x1B1640001B168, + ), +} +joining_types = { + 0xAD: 84, + 0x300: 84, + 0x301: 84, + 0x302: 84, + 0x303: 84, + 0x304: 84, + 0x305: 84, + 0x306: 84, + 0x307: 84, + 0x308: 84, + 0x309: 84, + 0x30A: 84, + 0x30B: 84, + 0x30C: 84, + 0x30D: 84, + 0x30E: 84, + 0x30F: 84, + 0x310: 84, + 0x311: 84, + 0x312: 84, + 0x313: 84, + 0x314: 84, + 0x315: 84, + 0x316: 84, + 0x317: 84, + 0x318: 84, + 0x319: 84, + 0x31A: 84, + 0x31B: 84, + 0x31C: 84, + 0x31D: 84, + 0x31E: 84, + 0x31F: 84, + 0x320: 84, + 0x321: 84, + 0x322: 84, + 0x323: 84, + 0x324: 84, + 0x325: 84, + 0x326: 84, + 0x327: 84, + 0x328: 84, + 0x329: 84, + 0x32A: 84, + 0x32B: 84, + 0x32C: 84, + 0x32D: 84, + 0x32E: 84, + 0x32F: 84, + 0x330: 84, + 0x331: 84, + 0x332: 84, + 0x333: 84, + 0x334: 84, + 0x335: 84, + 0x336: 84, + 0x337: 84, + 0x338: 84, + 0x339: 84, + 0x33A: 84, + 0x33B: 84, + 0x33C: 84, + 0x33D: 84, + 0x33E: 84, + 0x33F: 84, + 0x340: 84, + 0x341: 84, + 0x342: 84, + 0x343: 84, + 0x344: 84, + 0x345: 84, + 0x346: 84, + 0x347: 84, + 0x348: 84, + 0x349: 84, + 0x34A: 84, + 0x34B: 84, + 0x34C: 84, + 0x34D: 84, + 0x34E: 84, + 0x34F: 84, + 0x350: 84, + 0x351: 84, + 0x352: 84, + 0x353: 84, + 0x354: 84, + 0x355: 84, + 0x356: 84, + 0x357: 84, + 0x358: 84, + 0x359: 84, + 0x35A: 84, + 0x35B: 84, + 0x35C: 84, + 0x35D: 84, + 0x35E: 84, + 0x35F: 84, + 0x360: 84, + 0x361: 84, + 0x362: 84, + 0x363: 84, + 0x364: 84, + 0x365: 84, + 0x366: 84, + 0x367: 84, + 0x368: 84, + 0x369: 84, + 0x36A: 84, + 0x36B: 84, + 0x36C: 84, + 0x36D: 84, + 0x36E: 84, + 0x36F: 84, + 0x483: 84, + 0x484: 84, + 0x485: 84, + 0x486: 84, + 0x487: 84, + 0x488: 84, + 0x489: 84, + 0x591: 84, + 0x592: 84, + 0x593: 84, + 0x594: 84, + 0x595: 84, + 0x596: 84, + 0x597: 84, + 0x598: 84, + 0x599: 84, + 0x59A: 84, + 0x59B: 84, + 0x59C: 84, + 0x59D: 84, + 0x59E: 84, + 0x59F: 84, + 0x5A0: 84, + 0x5A1: 84, + 0x5A2: 84, + 0x5A3: 84, + 0x5A4: 84, + 0x5A5: 84, + 0x5A6: 84, + 0x5A7: 84, + 0x5A8: 84, + 0x5A9: 84, + 0x5AA: 84, + 0x5AB: 84, + 0x5AC: 84, + 0x5AD: 84, + 0x5AE: 84, + 0x5AF: 84, + 0x5B0: 84, + 0x5B1: 84, + 0x5B2: 84, + 0x5B3: 84, + 0x5B4: 84, + 0x5B5: 84, + 0x5B6: 84, + 0x5B7: 84, + 0x5B8: 84, + 0x5B9: 84, + 0x5BA: 84, + 0x5BB: 84, + 0x5BC: 84, + 0x5BD: 84, + 0x5BF: 84, + 0x5C1: 84, + 0x5C2: 84, + 0x5C4: 84, + 0x5C5: 84, + 0x5C7: 84, + 0x610: 84, + 0x611: 84, + 0x612: 84, + 0x613: 84, + 0x614: 84, + 0x615: 84, + 0x616: 84, + 0x617: 84, + 0x618: 84, + 0x619: 84, + 0x61A: 84, + 0x61C: 84, + 0x620: 68, + 0x622: 82, + 0x623: 82, + 0x624: 82, + 0x625: 82, + 0x626: 68, + 0x627: 82, + 0x628: 68, + 0x629: 82, + 0x62A: 68, + 0x62B: 68, + 0x62C: 68, + 0x62D: 68, + 0x62E: 68, + 0x62F: 82, + 0x630: 82, + 0x631: 82, + 0x632: 82, + 0x633: 68, + 0x634: 68, + 0x635: 68, + 0x636: 68, + 0x637: 68, + 0x638: 68, + 0x639: 68, + 0x63A: 68, + 0x63B: 68, + 0x63C: 68, + 0x63D: 68, + 0x63E: 68, + 0x63F: 68, + 0x640: 67, + 0x641: 68, + 0x642: 68, + 0x643: 68, + 0x644: 68, + 0x645: 68, + 0x646: 68, + 0x647: 68, + 0x648: 82, + 0x649: 68, + 0x64A: 68, + 0x64B: 84, + 0x64C: 84, + 0x64D: 84, + 0x64E: 84, + 0x64F: 84, + 0x650: 84, + 0x651: 84, + 0x652: 84, + 0x653: 84, + 0x654: 84, + 0x655: 84, + 0x656: 84, + 0x657: 84, + 0x658: 84, + 0x659: 84, + 0x65A: 84, + 0x65B: 84, + 0x65C: 84, + 0x65D: 84, + 0x65E: 84, + 0x65F: 84, + 0x66E: 68, + 0x66F: 68, + 0x670: 84, + 0x671: 82, + 0x672: 82, + 0x673: 82, + 0x675: 82, + 0x676: 82, + 0x677: 82, + 0x678: 68, + 0x679: 68, + 0x67A: 68, + 0x67B: 68, + 0x67C: 68, + 0x67D: 68, + 0x67E: 68, + 0x67F: 68, + 0x680: 68, + 0x681: 68, + 0x682: 68, + 0x683: 68, + 0x684: 68, + 0x685: 68, + 0x686: 68, + 0x687: 68, + 0x688: 82, + 0x689: 82, + 0x68A: 82, + 0x68B: 82, + 0x68C: 82, + 0x68D: 82, + 0x68E: 82, + 0x68F: 82, + 0x690: 82, + 0x691: 82, + 0x692: 82, + 0x693: 82, + 0x694: 82, + 0x695: 82, + 0x696: 82, + 0x697: 82, + 0x698: 82, + 0x699: 82, + 0x69A: 68, + 0x69B: 68, + 0x69C: 68, + 0x69D: 68, + 0x69E: 68, + 0x69F: 68, + 0x6A0: 68, + 0x6A1: 68, + 0x6A2: 68, + 0x6A3: 68, + 0x6A4: 68, + 0x6A5: 68, + 0x6A6: 68, + 0x6A7: 68, + 0x6A8: 68, + 0x6A9: 68, + 0x6AA: 68, + 0x6AB: 68, + 0x6AC: 68, + 0x6AD: 68, + 0x6AE: 68, + 0x6AF: 68, + 0x6B0: 68, + 0x6B1: 68, + 0x6B2: 68, + 0x6B3: 68, + 0x6B4: 68, + 0x6B5: 68, + 0x6B6: 68, + 0x6B7: 68, + 0x6B8: 68, + 0x6B9: 68, + 0x6BA: 68, + 0x6BB: 68, + 0x6BC: 68, + 0x6BD: 68, + 0x6BE: 68, + 0x6BF: 68, + 0x6C0: 82, + 0x6C1: 68, + 0x6C2: 68, + 0x6C3: 82, + 0x6C4: 82, + 0x6C5: 82, + 0x6C6: 82, + 0x6C7: 82, + 0x6C8: 82, + 0x6C9: 82, + 0x6CA: 82, + 0x6CB: 82, + 0x6CC: 68, + 0x6CD: 82, + 0x6CE: 68, + 0x6CF: 82, + 0x6D0: 68, + 0x6D1: 68, + 0x6D2: 82, + 0x6D3: 82, + 0x6D5: 82, + 0x6D6: 84, + 0x6D7: 84, + 0x6D8: 84, + 0x6D9: 84, + 0x6DA: 84, + 0x6DB: 84, + 0x6DC: 84, + 0x6DF: 84, + 0x6E0: 84, + 0x6E1: 84, + 0x6E2: 84, + 0x6E3: 84, + 0x6E4: 84, + 0x6E7: 84, + 0x6E8: 84, + 0x6EA: 84, + 0x6EB: 84, + 0x6EC: 84, + 0x6ED: 84, + 0x6EE: 82, + 0x6EF: 82, + 0x6FA: 68, + 0x6FB: 68, + 0x6FC: 68, + 0x6FF: 68, + 0x70F: 84, + 0x710: 82, + 0x711: 84, + 0x712: 68, + 0x713: 68, + 0x714: 68, + 0x715: 82, + 0x716: 82, + 0x717: 82, + 0x718: 82, + 0x719: 82, + 0x71A: 68, + 0x71B: 68, + 0x71C: 68, + 0x71D: 68, + 0x71E: 82, + 0x71F: 68, + 0x720: 68, + 0x721: 68, + 0x722: 68, + 0x723: 68, + 0x724: 68, + 0x725: 68, + 0x726: 68, + 0x727: 68, + 0x728: 82, + 0x729: 68, + 0x72A: 82, + 0x72B: 68, + 0x72C: 82, + 0x72D: 68, + 0x72E: 68, + 0x72F: 82, + 0x730: 84, + 0x731: 84, + 0x732: 84, + 0x733: 84, + 0x734: 84, + 0x735: 84, + 0x736: 84, + 0x737: 84, + 0x738: 84, + 0x739: 84, + 0x73A: 84, + 0x73B: 84, + 0x73C: 84, + 0x73D: 84, + 0x73E: 84, + 0x73F: 84, + 0x740: 84, + 0x741: 84, + 0x742: 84, + 0x743: 84, + 0x744: 84, + 0x745: 84, + 0x746: 84, + 0x747: 84, + 0x748: 84, + 0x749: 84, + 0x74A: 84, + 0x74D: 82, + 0x74E: 68, + 0x74F: 68, + 0x750: 68, + 0x751: 68, + 0x752: 68, + 0x753: 68, + 0x754: 68, + 0x755: 68, + 0x756: 68, + 0x757: 68, + 0x758: 68, + 0x759: 82, + 0x75A: 82, + 0x75B: 82, + 0x75C: 68, + 0x75D: 68, + 0x75E: 68, + 0x75F: 68, + 0x760: 68, + 0x761: 68, + 0x762: 68, + 0x763: 68, + 0x764: 68, + 0x765: 68, + 0x766: 68, + 0x767: 68, + 0x768: 68, + 0x769: 68, + 0x76A: 68, + 0x76B: 82, + 0x76C: 82, + 0x76D: 68, + 0x76E: 68, + 0x76F: 68, + 0x770: 68, + 0x771: 82, + 0x772: 68, + 0x773: 82, + 0x774: 82, + 0x775: 68, + 0x776: 68, + 0x777: 68, + 0x778: 82, + 0x779: 82, + 0x77A: 68, + 0x77B: 68, + 0x77C: 68, + 0x77D: 68, + 0x77E: 68, + 0x77F: 68, + 0x7A6: 84, + 0x7A7: 84, + 0x7A8: 84, + 0x7A9: 84, + 0x7AA: 84, + 0x7AB: 84, + 0x7AC: 84, + 0x7AD: 84, + 0x7AE: 84, + 0x7AF: 84, + 0x7B0: 84, + 0x7CA: 68, + 0x7CB: 68, + 0x7CC: 68, + 0x7CD: 68, + 0x7CE: 68, + 0x7CF: 68, + 0x7D0: 68, + 0x7D1: 68, + 0x7D2: 68, + 0x7D3: 68, + 0x7D4: 68, + 0x7D5: 68, + 0x7D6: 68, + 0x7D7: 68, + 0x7D8: 68, + 0x7D9: 68, + 0x7DA: 68, + 0x7DB: 68, + 0x7DC: 68, + 0x7DD: 68, + 0x7DE: 68, + 0x7DF: 68, + 0x7E0: 68, + 0x7E1: 68, + 0x7E2: 68, + 0x7E3: 68, + 0x7E4: 68, + 0x7E5: 68, + 0x7E6: 68, + 0x7E7: 68, + 0x7E8: 68, + 0x7E9: 68, + 0x7EA: 68, + 0x7EB: 84, + 0x7EC: 84, + 0x7ED: 84, + 0x7EE: 84, + 0x7EF: 84, + 0x7F0: 84, + 0x7F1: 84, + 0x7F2: 84, + 0x7F3: 84, + 0x7FA: 67, + 0x7FD: 84, + 0x816: 84, + 0x817: 84, + 0x818: 84, + 0x819: 84, + 0x81B: 84, + 0x81C: 84, + 0x81D: 84, + 0x81E: 84, + 0x81F: 84, + 0x820: 84, + 0x821: 84, + 0x822: 84, + 0x823: 84, + 0x825: 84, + 0x826: 84, + 0x827: 84, + 0x829: 84, + 0x82A: 84, + 0x82B: 84, + 0x82C: 84, + 0x82D: 84, + 0x840: 82, + 0x841: 68, + 0x842: 68, + 0x843: 68, + 0x844: 68, + 0x845: 68, + 0x846: 82, + 0x847: 82, + 0x848: 68, + 0x849: 82, + 0x84A: 68, + 0x84B: 68, + 0x84C: 68, + 0x84D: 68, + 0x84E: 68, + 0x84F: 68, + 0x850: 68, + 0x851: 68, + 0x852: 68, + 0x853: 68, + 0x854: 82, + 0x855: 68, + 0x856: 82, + 0x857: 82, + 0x858: 82, + 0x859: 84, + 0x85A: 84, + 0x85B: 84, + 0x860: 68, + 0x862: 68, + 0x863: 68, + 0x864: 68, + 0x865: 68, + 0x867: 82, + 0x868: 68, + 0x869: 82, + 0x86A: 82, + 0x870: 82, + 0x871: 82, + 0x872: 82, + 0x873: 82, + 0x874: 82, + 0x875: 82, + 0x876: 82, + 0x877: 82, + 0x878: 82, + 0x879: 82, + 0x87A: 82, + 0x87B: 82, + 0x87C: 82, + 0x87D: 82, + 0x87E: 82, + 0x87F: 82, + 0x880: 82, + 0x881: 82, + 0x882: 82, + 0x883: 67, + 0x884: 67, + 0x885: 67, + 0x886: 68, + 0x889: 68, + 0x88A: 68, + 0x88B: 68, + 0x88C: 68, + 0x88D: 68, + 0x88E: 82, + 0x897: 84, + 0x898: 84, + 0x899: 84, + 0x89A: 84, + 0x89B: 84, + 0x89C: 84, + 0x89D: 84, + 0x89E: 84, + 0x89F: 84, + 0x8A0: 68, + 0x8A1: 68, + 0x8A2: 68, + 0x8A3: 68, + 0x8A4: 68, + 0x8A5: 68, + 0x8A6: 68, + 0x8A7: 68, + 0x8A8: 68, + 0x8A9: 68, + 0x8AA: 82, + 0x8AB: 82, + 0x8AC: 82, + 0x8AE: 82, + 0x8AF: 68, + 0x8B0: 68, + 0x8B1: 82, + 0x8B2: 82, + 0x8B3: 68, + 0x8B4: 68, + 0x8B5: 68, + 0x8B6: 68, + 0x8B7: 68, + 0x8B8: 68, + 0x8B9: 82, + 0x8BA: 68, + 0x8BB: 68, + 0x8BC: 68, + 0x8BD: 68, + 0x8BE: 68, + 0x8BF: 68, + 0x8C0: 68, + 0x8C1: 68, + 0x8C2: 68, + 0x8C3: 68, + 0x8C4: 68, + 0x8C5: 68, + 0x8C6: 68, + 0x8C7: 68, + 0x8C8: 68, + 0x8CA: 84, + 0x8CB: 84, + 0x8CC: 84, + 0x8CD: 84, + 0x8CE: 84, + 0x8CF: 84, + 0x8D0: 84, + 0x8D1: 84, + 0x8D2: 84, + 0x8D3: 84, + 0x8D4: 84, + 0x8D5: 84, + 0x8D6: 84, + 0x8D7: 84, + 0x8D8: 84, + 0x8D9: 84, + 0x8DA: 84, + 0x8DB: 84, + 0x8DC: 84, + 0x8DD: 84, + 0x8DE: 84, + 0x8DF: 84, + 0x8E0: 84, + 0x8E1: 84, + 0x8E3: 84, + 0x8E4: 84, + 0x8E5: 84, + 0x8E6: 84, + 0x8E7: 84, + 0x8E8: 84, + 0x8E9: 84, + 0x8EA: 84, + 0x8EB: 84, + 0x8EC: 84, + 0x8ED: 84, + 0x8EE: 84, + 0x8EF: 84, + 0x8F0: 84, + 0x8F1: 84, + 0x8F2: 84, + 0x8F3: 84, + 0x8F4: 84, + 0x8F5: 84, + 0x8F6: 84, + 0x8F7: 84, + 0x8F8: 84, + 0x8F9: 84, + 0x8FA: 84, + 0x8FB: 84, + 0x8FC: 84, + 0x8FD: 84, + 0x8FE: 84, + 0x8FF: 84, + 0x900: 84, + 0x901: 84, + 0x902: 84, + 0x93A: 84, + 0x93C: 84, + 0x941: 84, + 0x942: 84, + 0x943: 84, + 0x944: 84, + 0x945: 84, + 0x946: 84, + 0x947: 84, + 0x948: 84, + 0x94D: 84, + 0x951: 84, + 0x952: 84, + 0x953: 84, + 0x954: 84, + 0x955: 84, + 0x956: 84, + 0x957: 84, + 0x962: 84, + 0x963: 84, + 0x981: 84, + 0x9BC: 84, + 0x9C1: 84, + 0x9C2: 84, + 0x9C3: 84, + 0x9C4: 84, + 0x9CD: 84, + 0x9E2: 84, + 0x9E3: 84, + 0x9FE: 84, + 0xA01: 84, + 0xA02: 84, + 0xA3C: 84, + 0xA41: 84, + 0xA42: 84, + 0xA47: 84, + 0xA48: 84, + 0xA4B: 84, + 0xA4C: 84, + 0xA4D: 84, + 0xA51: 84, + 0xA70: 84, + 0xA71: 84, + 0xA75: 84, + 0xA81: 84, + 0xA82: 84, + 0xABC: 84, + 0xAC1: 84, + 0xAC2: 84, + 0xAC3: 84, + 0xAC4: 84, + 0xAC5: 84, + 0xAC7: 84, + 0xAC8: 84, + 0xACD: 84, + 0xAE2: 84, + 0xAE3: 84, + 0xAFA: 84, + 0xAFB: 84, + 0xAFC: 84, + 0xAFD: 84, + 0xAFE: 84, + 0xAFF: 84, + 0xB01: 84, + 0xB3C: 84, + 0xB3F: 84, + 0xB41: 84, + 0xB42: 84, + 0xB43: 84, + 0xB44: 84, + 0xB4D: 84, + 0xB55: 84, + 0xB56: 84, + 0xB62: 84, + 0xB63: 84, + 0xB82: 84, + 0xBC0: 84, + 0xBCD: 84, + 0xC00: 84, + 0xC04: 84, + 0xC3C: 84, + 0xC3E: 84, + 0xC3F: 84, + 0xC40: 84, + 0xC46: 84, + 0xC47: 84, + 0xC48: 84, + 0xC4A: 84, + 0xC4B: 84, + 0xC4C: 84, + 0xC4D: 84, + 0xC55: 84, + 0xC56: 84, + 0xC62: 84, + 0xC63: 84, + 0xC81: 84, + 0xCBC: 84, + 0xCBF: 84, + 0xCC6: 84, + 0xCCC: 84, + 0xCCD: 84, + 0xCE2: 84, + 0xCE3: 84, + 0xD00: 84, + 0xD01: 84, + 0xD3B: 84, + 0xD3C: 84, + 0xD41: 84, + 0xD42: 84, + 0xD43: 84, + 0xD44: 84, + 0xD4D: 84, + 0xD62: 84, + 0xD63: 84, + 0xD81: 84, + 0xDCA: 84, + 0xDD2: 84, + 0xDD3: 84, + 0xDD4: 84, + 0xDD6: 84, + 0xE31: 84, + 0xE34: 84, + 0xE35: 84, + 0xE36: 84, + 0xE37: 84, + 0xE38: 84, + 0xE39: 84, + 0xE3A: 84, + 0xE47: 84, + 0xE48: 84, + 0xE49: 84, + 0xE4A: 84, + 0xE4B: 84, + 0xE4C: 84, + 0xE4D: 84, + 0xE4E: 84, + 0xEB1: 84, + 0xEB4: 84, + 0xEB5: 84, + 0xEB6: 84, + 0xEB7: 84, + 0xEB8: 84, + 0xEB9: 84, + 0xEBA: 84, + 0xEBB: 84, + 0xEBC: 84, + 0xEC8: 84, + 0xEC9: 84, + 0xECA: 84, + 0xECB: 84, + 0xECC: 84, + 0xECD: 84, + 0xECE: 84, + 0xF18: 84, + 0xF19: 84, + 0xF35: 84, + 0xF37: 84, + 0xF39: 84, + 0xF71: 84, + 0xF72: 84, + 0xF73: 84, + 0xF74: 84, + 0xF75: 84, + 0xF76: 84, + 0xF77: 84, + 0xF78: 84, + 0xF79: 84, + 0xF7A: 84, + 0xF7B: 84, + 0xF7C: 84, + 0xF7D: 84, + 0xF7E: 84, + 0xF80: 84, + 0xF81: 84, + 0xF82: 84, + 0xF83: 84, + 0xF84: 84, + 0xF86: 84, + 0xF87: 84, + 0xF8D: 84, + 0xF8E: 84, + 0xF8F: 84, + 0xF90: 84, + 0xF91: 84, + 0xF92: 84, + 0xF93: 84, + 0xF94: 84, + 0xF95: 84, + 0xF96: 84, + 0xF97: 84, + 0xF99: 84, + 0xF9A: 84, + 0xF9B: 84, + 0xF9C: 84, + 0xF9D: 84, + 0xF9E: 84, + 0xF9F: 84, + 0xFA0: 84, + 0xFA1: 84, + 0xFA2: 84, + 0xFA3: 84, + 0xFA4: 84, + 0xFA5: 84, + 0xFA6: 84, + 0xFA7: 84, + 0xFA8: 84, + 0xFA9: 84, + 0xFAA: 84, + 0xFAB: 84, + 0xFAC: 84, + 0xFAD: 84, + 0xFAE: 84, + 0xFAF: 84, + 0xFB0: 84, + 0xFB1: 84, + 0xFB2: 84, + 0xFB3: 84, + 0xFB4: 84, + 0xFB5: 84, + 0xFB6: 84, + 0xFB7: 84, + 0xFB8: 84, + 0xFB9: 84, + 0xFBA: 84, + 0xFBB: 84, + 0xFBC: 84, + 0xFC6: 84, + 0x102D: 84, + 0x102E: 84, + 0x102F: 84, + 0x1030: 84, + 0x1032: 84, + 0x1033: 84, + 0x1034: 84, + 0x1035: 84, + 0x1036: 84, + 0x1037: 84, + 0x1039: 84, + 0x103A: 84, + 0x103D: 84, + 0x103E: 84, + 0x1058: 84, + 0x1059: 84, + 0x105E: 84, + 0x105F: 84, + 0x1060: 84, + 0x1071: 84, + 0x1072: 84, + 0x1073: 84, + 0x1074: 84, + 0x1082: 84, + 0x1085: 84, + 0x1086: 84, + 0x108D: 84, + 0x109D: 84, + 0x135D: 84, + 0x135E: 84, + 0x135F: 84, + 0x1712: 84, + 0x1713: 84, + 0x1714: 84, + 0x1732: 84, + 0x1733: 84, + 0x1752: 84, + 0x1753: 84, + 0x1772: 84, + 0x1773: 84, + 0x17B4: 84, + 0x17B5: 84, + 0x17B7: 84, + 0x17B8: 84, + 0x17B9: 84, + 0x17BA: 84, + 0x17BB: 84, + 0x17BC: 84, + 0x17BD: 84, + 0x17C6: 84, + 0x17C9: 84, + 0x17CA: 84, + 0x17CB: 84, + 0x17CC: 84, + 0x17CD: 84, + 0x17CE: 84, + 0x17CF: 84, + 0x17D0: 84, + 0x17D1: 84, + 0x17D2: 84, + 0x17D3: 84, + 0x17DD: 84, + 0x1807: 68, + 0x180A: 67, + 0x180B: 84, + 0x180C: 84, + 0x180D: 84, + 0x180F: 84, + 0x1820: 68, + 0x1821: 68, + 0x1822: 68, + 0x1823: 68, + 0x1824: 68, + 0x1825: 68, + 0x1826: 68, + 0x1827: 68, + 0x1828: 68, + 0x1829: 68, + 0x182A: 68, + 0x182B: 68, + 0x182C: 68, + 0x182D: 68, + 0x182E: 68, + 0x182F: 68, + 0x1830: 68, + 0x1831: 68, + 0x1832: 68, + 0x1833: 68, + 0x1834: 68, + 0x1835: 68, + 0x1836: 68, + 0x1837: 68, + 0x1838: 68, + 0x1839: 68, + 0x183A: 68, + 0x183B: 68, + 0x183C: 68, + 0x183D: 68, + 0x183E: 68, + 0x183F: 68, + 0x1840: 68, + 0x1841: 68, + 0x1842: 68, + 0x1843: 68, + 0x1844: 68, + 0x1845: 68, + 0x1846: 68, + 0x1847: 68, + 0x1848: 68, + 0x1849: 68, + 0x184A: 68, + 0x184B: 68, + 0x184C: 68, + 0x184D: 68, + 0x184E: 68, + 0x184F: 68, + 0x1850: 68, + 0x1851: 68, + 0x1852: 68, + 0x1853: 68, + 0x1854: 68, + 0x1855: 68, + 0x1856: 68, + 0x1857: 68, + 0x1858: 68, + 0x1859: 68, + 0x185A: 68, + 0x185B: 68, + 0x185C: 68, + 0x185D: 68, + 0x185E: 68, + 0x185F: 68, + 0x1860: 68, + 0x1861: 68, + 0x1862: 68, + 0x1863: 68, + 0x1864: 68, + 0x1865: 68, + 0x1866: 68, + 0x1867: 68, + 0x1868: 68, + 0x1869: 68, + 0x186A: 68, + 0x186B: 68, + 0x186C: 68, + 0x186D: 68, + 0x186E: 68, + 0x186F: 68, + 0x1870: 68, + 0x1871: 68, + 0x1872: 68, + 0x1873: 68, + 0x1874: 68, + 0x1875: 68, + 0x1876: 68, + 0x1877: 68, + 0x1878: 68, + 0x1885: 84, + 0x1886: 84, + 0x1887: 68, + 0x1888: 68, + 0x1889: 68, + 0x188A: 68, + 0x188B: 68, + 0x188C: 68, + 0x188D: 68, + 0x188E: 68, + 0x188F: 68, + 0x1890: 68, + 0x1891: 68, + 0x1892: 68, + 0x1893: 68, + 0x1894: 68, + 0x1895: 68, + 0x1896: 68, + 0x1897: 68, + 0x1898: 68, + 0x1899: 68, + 0x189A: 68, + 0x189B: 68, + 0x189C: 68, + 0x189D: 68, + 0x189E: 68, + 0x189F: 68, + 0x18A0: 68, + 0x18A1: 68, + 0x18A2: 68, + 0x18A3: 68, + 0x18A4: 68, + 0x18A5: 68, + 0x18A6: 68, + 0x18A7: 68, + 0x18A8: 68, + 0x18A9: 84, + 0x18AA: 68, + 0x1920: 84, + 0x1921: 84, + 0x1922: 84, + 0x1927: 84, + 0x1928: 84, + 0x1932: 84, + 0x1939: 84, + 0x193A: 84, + 0x193B: 84, + 0x1A17: 84, + 0x1A18: 84, + 0x1A1B: 84, + 0x1A56: 84, + 0x1A58: 84, + 0x1A59: 84, + 0x1A5A: 84, + 0x1A5B: 84, + 0x1A5C: 84, + 0x1A5D: 84, + 0x1A5E: 84, + 0x1A60: 84, + 0x1A62: 84, + 0x1A65: 84, + 0x1A66: 84, + 0x1A67: 84, + 0x1A68: 84, + 0x1A69: 84, + 0x1A6A: 84, + 0x1A6B: 84, + 0x1A6C: 84, + 0x1A73: 84, + 0x1A74: 84, + 0x1A75: 84, + 0x1A76: 84, + 0x1A77: 84, + 0x1A78: 84, + 0x1A79: 84, + 0x1A7A: 84, + 0x1A7B: 84, + 0x1A7C: 84, + 0x1A7F: 84, + 0x1AB0: 84, + 0x1AB1: 84, + 0x1AB2: 84, + 0x1AB3: 84, + 0x1AB4: 84, + 0x1AB5: 84, + 0x1AB6: 84, + 0x1AB7: 84, + 0x1AB8: 84, + 0x1AB9: 84, + 0x1ABA: 84, + 0x1ABB: 84, + 0x1ABC: 84, + 0x1ABD: 84, + 0x1ABE: 84, + 0x1ABF: 84, + 0x1AC0: 84, + 0x1AC1: 84, + 0x1AC2: 84, + 0x1AC3: 84, + 0x1AC4: 84, + 0x1AC5: 84, + 0x1AC6: 84, + 0x1AC7: 84, + 0x1AC8: 84, + 0x1AC9: 84, + 0x1ACA: 84, + 0x1ACB: 84, + 0x1ACC: 84, + 0x1ACD: 84, + 0x1ACE: 84, + 0x1B00: 84, + 0x1B01: 84, + 0x1B02: 84, + 0x1B03: 84, + 0x1B34: 84, + 0x1B36: 84, + 0x1B37: 84, + 0x1B38: 84, + 0x1B39: 84, + 0x1B3A: 84, + 0x1B3C: 84, + 0x1B42: 84, + 0x1B6B: 84, + 0x1B6C: 84, + 0x1B6D: 84, + 0x1B6E: 84, + 0x1B6F: 84, + 0x1B70: 84, + 0x1B71: 84, + 0x1B72: 84, + 0x1B73: 84, + 0x1B80: 84, + 0x1B81: 84, + 0x1BA2: 84, + 0x1BA3: 84, + 0x1BA4: 84, + 0x1BA5: 84, + 0x1BA8: 84, + 0x1BA9: 84, + 0x1BAB: 84, + 0x1BAC: 84, + 0x1BAD: 84, + 0x1BE6: 84, + 0x1BE8: 84, + 0x1BE9: 84, + 0x1BED: 84, + 0x1BEF: 84, + 0x1BF0: 84, + 0x1BF1: 84, + 0x1C2C: 84, + 0x1C2D: 84, + 0x1C2E: 84, + 0x1C2F: 84, + 0x1C30: 84, + 0x1C31: 84, + 0x1C32: 84, + 0x1C33: 84, + 0x1C36: 84, + 0x1C37: 84, + 0x1CD0: 84, + 0x1CD1: 84, + 0x1CD2: 84, + 0x1CD4: 84, + 0x1CD5: 84, + 0x1CD6: 84, + 0x1CD7: 84, + 0x1CD8: 84, + 0x1CD9: 84, + 0x1CDA: 84, + 0x1CDB: 84, + 0x1CDC: 84, + 0x1CDD: 84, + 0x1CDE: 84, + 0x1CDF: 84, + 0x1CE0: 84, + 0x1CE2: 84, + 0x1CE3: 84, + 0x1CE4: 84, + 0x1CE5: 84, + 0x1CE6: 84, + 0x1CE7: 84, + 0x1CE8: 84, + 0x1CED: 84, + 0x1CF4: 84, + 0x1CF8: 84, + 0x1CF9: 84, + 0x1DC0: 84, + 0x1DC1: 84, + 0x1DC2: 84, + 0x1DC3: 84, + 0x1DC4: 84, + 0x1DC5: 84, + 0x1DC6: 84, + 0x1DC7: 84, + 0x1DC8: 84, + 0x1DC9: 84, + 0x1DCA: 84, + 0x1DCB: 84, + 0x1DCC: 84, + 0x1DCD: 84, + 0x1DCE: 84, + 0x1DCF: 84, + 0x1DD0: 84, + 0x1DD1: 84, + 0x1DD2: 84, + 0x1DD3: 84, + 0x1DD4: 84, + 0x1DD5: 84, + 0x1DD6: 84, + 0x1DD7: 84, + 0x1DD8: 84, + 0x1DD9: 84, + 0x1DDA: 84, + 0x1DDB: 84, + 0x1DDC: 84, + 0x1DDD: 84, + 0x1DDE: 84, + 0x1DDF: 84, + 0x1DE0: 84, + 0x1DE1: 84, + 0x1DE2: 84, + 0x1DE3: 84, + 0x1DE4: 84, + 0x1DE5: 84, + 0x1DE6: 84, + 0x1DE7: 84, + 0x1DE8: 84, + 0x1DE9: 84, + 0x1DEA: 84, + 0x1DEB: 84, + 0x1DEC: 84, + 0x1DED: 84, + 0x1DEE: 84, + 0x1DEF: 84, + 0x1DF0: 84, + 0x1DF1: 84, + 0x1DF2: 84, + 0x1DF3: 84, + 0x1DF4: 84, + 0x1DF5: 84, + 0x1DF6: 84, + 0x1DF7: 84, + 0x1DF8: 84, + 0x1DF9: 84, + 0x1DFA: 84, + 0x1DFB: 84, + 0x1DFC: 84, + 0x1DFD: 84, + 0x1DFE: 84, + 0x1DFF: 84, + 0x200B: 84, + 0x200D: 67, + 0x200E: 84, + 0x200F: 84, + 0x202A: 84, + 0x202B: 84, + 0x202C: 84, + 0x202D: 84, + 0x202E: 84, + 0x2060: 84, + 0x2061: 84, + 0x2062: 84, + 0x2063: 84, + 0x2064: 84, + 0x206A: 84, + 0x206B: 84, + 0x206C: 84, + 0x206D: 84, + 0x206E: 84, + 0x206F: 84, + 0x20D0: 84, + 0x20D1: 84, + 0x20D2: 84, + 0x20D3: 84, + 0x20D4: 84, + 0x20D5: 84, + 0x20D6: 84, + 0x20D7: 84, + 0x20D8: 84, + 0x20D9: 84, + 0x20DA: 84, + 0x20DB: 84, + 0x20DC: 84, + 0x20DD: 84, + 0x20DE: 84, + 0x20DF: 84, + 0x20E0: 84, + 0x20E1: 84, + 0x20E2: 84, + 0x20E3: 84, + 0x20E4: 84, + 0x20E5: 84, + 0x20E6: 84, + 0x20E7: 84, + 0x20E8: 84, + 0x20E9: 84, + 0x20EA: 84, + 0x20EB: 84, + 0x20EC: 84, + 0x20ED: 84, + 0x20EE: 84, + 0x20EF: 84, + 0x20F0: 84, + 0x2CEF: 84, + 0x2CF0: 84, + 0x2CF1: 84, + 0x2D7F: 84, + 0x2DE0: 84, + 0x2DE1: 84, + 0x2DE2: 84, + 0x2DE3: 84, + 0x2DE4: 84, + 0x2DE5: 84, + 0x2DE6: 84, + 0x2DE7: 84, + 0x2DE8: 84, + 0x2DE9: 84, + 0x2DEA: 84, + 0x2DEB: 84, + 0x2DEC: 84, + 0x2DED: 84, + 0x2DEE: 84, + 0x2DEF: 84, + 0x2DF0: 84, + 0x2DF1: 84, + 0x2DF2: 84, + 0x2DF3: 84, + 0x2DF4: 84, + 0x2DF5: 84, + 0x2DF6: 84, + 0x2DF7: 84, + 0x2DF8: 84, + 0x2DF9: 84, + 0x2DFA: 84, + 0x2DFB: 84, + 0x2DFC: 84, + 0x2DFD: 84, + 0x2DFE: 84, + 0x2DFF: 84, + 0x302A: 84, + 0x302B: 84, + 0x302C: 84, + 0x302D: 84, + 0x3099: 84, + 0x309A: 84, + 0xA66F: 84, + 0xA670: 84, + 0xA671: 84, + 0xA672: 84, + 0xA674: 84, + 0xA675: 84, + 0xA676: 84, + 0xA677: 84, + 0xA678: 84, + 0xA679: 84, + 0xA67A: 84, + 0xA67B: 84, + 0xA67C: 84, + 0xA67D: 84, + 0xA69E: 84, + 0xA69F: 84, + 0xA6F0: 84, + 0xA6F1: 84, + 0xA802: 84, + 0xA806: 84, + 0xA80B: 84, + 0xA825: 84, + 0xA826: 84, + 0xA82C: 84, + 0xA840: 68, + 0xA841: 68, + 0xA842: 68, + 0xA843: 68, + 0xA844: 68, + 0xA845: 68, + 0xA846: 68, + 0xA847: 68, + 0xA848: 68, + 0xA849: 68, + 0xA84A: 68, + 0xA84B: 68, + 0xA84C: 68, + 0xA84D: 68, + 0xA84E: 68, + 0xA84F: 68, + 0xA850: 68, + 0xA851: 68, + 0xA852: 68, + 0xA853: 68, + 0xA854: 68, + 0xA855: 68, + 0xA856: 68, + 0xA857: 68, + 0xA858: 68, + 0xA859: 68, + 0xA85A: 68, + 0xA85B: 68, + 0xA85C: 68, + 0xA85D: 68, + 0xA85E: 68, + 0xA85F: 68, + 0xA860: 68, + 0xA861: 68, + 0xA862: 68, + 0xA863: 68, + 0xA864: 68, + 0xA865: 68, + 0xA866: 68, + 0xA867: 68, + 0xA868: 68, + 0xA869: 68, + 0xA86A: 68, + 0xA86B: 68, + 0xA86C: 68, + 0xA86D: 68, + 0xA86E: 68, + 0xA86F: 68, + 0xA870: 68, + 0xA871: 68, + 0xA872: 76, + 0xA8C4: 84, + 0xA8C5: 84, + 0xA8E0: 84, + 0xA8E1: 84, + 0xA8E2: 84, + 0xA8E3: 84, + 0xA8E4: 84, + 0xA8E5: 84, + 0xA8E6: 84, + 0xA8E7: 84, + 0xA8E8: 84, + 0xA8E9: 84, + 0xA8EA: 84, + 0xA8EB: 84, + 0xA8EC: 84, + 0xA8ED: 84, + 0xA8EE: 84, + 0xA8EF: 84, + 0xA8F0: 84, + 0xA8F1: 84, + 0xA8FF: 84, + 0xA926: 84, + 0xA927: 84, + 0xA928: 84, + 0xA929: 84, + 0xA92A: 84, + 0xA92B: 84, + 0xA92C: 84, + 0xA92D: 84, + 0xA947: 84, + 0xA948: 84, + 0xA949: 84, + 0xA94A: 84, + 0xA94B: 84, + 0xA94C: 84, + 0xA94D: 84, + 0xA94E: 84, + 0xA94F: 84, + 0xA950: 84, + 0xA951: 84, + 0xA980: 84, + 0xA981: 84, + 0xA982: 84, + 0xA9B3: 84, + 0xA9B6: 84, + 0xA9B7: 84, + 0xA9B8: 84, + 0xA9B9: 84, + 0xA9BC: 84, + 0xA9BD: 84, + 0xA9E5: 84, + 0xAA29: 84, + 0xAA2A: 84, + 0xAA2B: 84, + 0xAA2C: 84, + 0xAA2D: 84, + 0xAA2E: 84, + 0xAA31: 84, + 0xAA32: 84, + 0xAA35: 84, + 0xAA36: 84, + 0xAA43: 84, + 0xAA4C: 84, + 0xAA7C: 84, + 0xAAB0: 84, + 0xAAB2: 84, + 0xAAB3: 84, + 0xAAB4: 84, + 0xAAB7: 84, + 0xAAB8: 84, + 0xAABE: 84, + 0xAABF: 84, + 0xAAC1: 84, + 0xAAEC: 84, + 0xAAED: 84, + 0xAAF6: 84, + 0xABE5: 84, + 0xABE8: 84, + 0xABED: 84, + 0xFB1E: 84, + 0xFE00: 84, + 0xFE01: 84, + 0xFE02: 84, + 0xFE03: 84, + 0xFE04: 84, + 0xFE05: 84, + 0xFE06: 84, + 0xFE07: 84, + 0xFE08: 84, + 0xFE09: 84, + 0xFE0A: 84, + 0xFE0B: 84, + 0xFE0C: 84, + 0xFE0D: 84, + 0xFE0E: 84, + 0xFE0F: 84, + 0xFE20: 84, + 0xFE21: 84, + 0xFE22: 84, + 0xFE23: 84, + 0xFE24: 84, + 0xFE25: 84, + 0xFE26: 84, + 0xFE27: 84, + 0xFE28: 84, + 0xFE29: 84, + 0xFE2A: 84, + 0xFE2B: 84, + 0xFE2C: 84, + 0xFE2D: 84, + 0xFE2E: 84, + 0xFE2F: 84, + 0xFEFF: 84, + 0xFFF9: 84, + 0xFFFA: 84, + 0xFFFB: 84, + 0x101FD: 84, + 0x102E0: 84, + 0x10376: 84, + 0x10377: 84, + 0x10378: 84, + 0x10379: 84, + 0x1037A: 84, + 0x10A01: 84, + 0x10A02: 84, + 0x10A03: 84, + 0x10A05: 84, + 0x10A06: 84, + 0x10A0C: 84, + 0x10A0D: 84, + 0x10A0E: 84, + 0x10A0F: 84, + 0x10A38: 84, + 0x10A39: 84, + 0x10A3A: 84, + 0x10A3F: 84, + 0x10AC0: 68, + 0x10AC1: 68, + 0x10AC2: 68, + 0x10AC3: 68, + 0x10AC4: 68, + 0x10AC5: 82, + 0x10AC7: 82, + 0x10AC9: 82, + 0x10ACA: 82, + 0x10ACD: 76, + 0x10ACE: 82, + 0x10ACF: 82, + 0x10AD0: 82, + 0x10AD1: 82, + 0x10AD2: 82, + 0x10AD3: 68, + 0x10AD4: 68, + 0x10AD5: 68, + 0x10AD6: 68, + 0x10AD7: 76, + 0x10AD8: 68, + 0x10AD9: 68, + 0x10ADA: 68, + 0x10ADB: 68, + 0x10ADC: 68, + 0x10ADD: 82, + 0x10ADE: 68, + 0x10ADF: 68, + 0x10AE0: 68, + 0x10AE1: 82, + 0x10AE4: 82, + 0x10AE5: 84, + 0x10AE6: 84, + 0x10AEB: 68, + 0x10AEC: 68, + 0x10AED: 68, + 0x10AEE: 68, + 0x10AEF: 82, + 0x10B80: 68, + 0x10B81: 82, + 0x10B82: 68, + 0x10B83: 82, + 0x10B84: 82, + 0x10B85: 82, + 0x10B86: 68, + 0x10B87: 68, + 0x10B88: 68, + 0x10B89: 82, + 0x10B8A: 68, + 0x10B8B: 68, + 0x10B8C: 82, + 0x10B8D: 68, + 0x10B8E: 82, + 0x10B8F: 82, + 0x10B90: 68, + 0x10B91: 82, + 0x10BA9: 82, + 0x10BAA: 82, + 0x10BAB: 82, + 0x10BAC: 82, + 0x10BAD: 68, + 0x10BAE: 68, + 0x10D00: 76, + 0x10D01: 68, + 0x10D02: 68, + 0x10D03: 68, + 0x10D04: 68, + 0x10D05: 68, + 0x10D06: 68, + 0x10D07: 68, + 0x10D08: 68, + 0x10D09: 68, + 0x10D0A: 68, + 0x10D0B: 68, + 0x10D0C: 68, + 0x10D0D: 68, + 0x10D0E: 68, + 0x10D0F: 68, + 0x10D10: 68, + 0x10D11: 68, + 0x10D12: 68, + 0x10D13: 68, + 0x10D14: 68, + 0x10D15: 68, + 0x10D16: 68, + 0x10D17: 68, + 0x10D18: 68, + 0x10D19: 68, + 0x10D1A: 68, + 0x10D1B: 68, + 0x10D1C: 68, + 0x10D1D: 68, + 0x10D1E: 68, + 0x10D1F: 68, + 0x10D20: 68, + 0x10D21: 68, + 0x10D22: 82, + 0x10D23: 68, + 0x10D24: 84, + 0x10D25: 84, + 0x10D26: 84, + 0x10D27: 84, + 0x10D69: 84, + 0x10D6A: 84, + 0x10D6B: 84, + 0x10D6C: 84, + 0x10D6D: 84, + 0x10EAB: 84, + 0x10EAC: 84, + 0x10EC2: 82, + 0x10EC3: 68, + 0x10EC4: 68, + 0x10EFC: 84, + 0x10EFD: 84, + 0x10EFE: 84, + 0x10EFF: 84, + 0x10F30: 68, + 0x10F31: 68, + 0x10F32: 68, + 0x10F33: 82, + 0x10F34: 68, + 0x10F35: 68, + 0x10F36: 68, + 0x10F37: 68, + 0x10F38: 68, + 0x10F39: 68, + 0x10F3A: 68, + 0x10F3B: 68, + 0x10F3C: 68, + 0x10F3D: 68, + 0x10F3E: 68, + 0x10F3F: 68, + 0x10F40: 68, + 0x10F41: 68, + 0x10F42: 68, + 0x10F43: 68, + 0x10F44: 68, + 0x10F46: 84, + 0x10F47: 84, + 0x10F48: 84, + 0x10F49: 84, + 0x10F4A: 84, + 0x10F4B: 84, + 0x10F4C: 84, + 0x10F4D: 84, + 0x10F4E: 84, + 0x10F4F: 84, + 0x10F50: 84, + 0x10F51: 68, + 0x10F52: 68, + 0x10F53: 68, + 0x10F54: 82, + 0x10F70: 68, + 0x10F71: 68, + 0x10F72: 68, + 0x10F73: 68, + 0x10F74: 82, + 0x10F75: 82, + 0x10F76: 68, + 0x10F77: 68, + 0x10F78: 68, + 0x10F79: 68, + 0x10F7A: 68, + 0x10F7B: 68, + 0x10F7C: 68, + 0x10F7D: 68, + 0x10F7E: 68, + 0x10F7F: 68, + 0x10F80: 68, + 0x10F81: 68, + 0x10F82: 84, + 0x10F83: 84, + 0x10F84: 84, + 0x10F85: 84, + 0x10FB0: 68, + 0x10FB2: 68, + 0x10FB3: 68, + 0x10FB4: 82, + 0x10FB5: 82, + 0x10FB6: 82, + 0x10FB8: 68, + 0x10FB9: 82, + 0x10FBA: 82, + 0x10FBB: 68, + 0x10FBC: 68, + 0x10FBD: 82, + 0x10FBE: 68, + 0x10FBF: 68, + 0x10FC1: 68, + 0x10FC2: 82, + 0x10FC3: 82, + 0x10FC4: 68, + 0x10FC9: 82, + 0x10FCA: 68, + 0x10FCB: 76, + 0x11001: 84, + 0x11038: 84, + 0x11039: 84, + 0x1103A: 84, + 0x1103B: 84, + 0x1103C: 84, + 0x1103D: 84, + 0x1103E: 84, + 0x1103F: 84, + 0x11040: 84, + 0x11041: 84, + 0x11042: 84, + 0x11043: 84, + 0x11044: 84, + 0x11045: 84, + 0x11046: 84, + 0x11070: 84, + 0x11073: 84, + 0x11074: 84, + 0x1107F: 84, + 0x11080: 84, + 0x11081: 84, + 0x110B3: 84, + 0x110B4: 84, + 0x110B5: 84, + 0x110B6: 84, + 0x110B9: 84, + 0x110BA: 84, + 0x110C2: 84, + 0x11100: 84, + 0x11101: 84, + 0x11102: 84, + 0x11127: 84, + 0x11128: 84, + 0x11129: 84, + 0x1112A: 84, + 0x1112B: 84, + 0x1112D: 84, + 0x1112E: 84, + 0x1112F: 84, + 0x11130: 84, + 0x11131: 84, + 0x11132: 84, + 0x11133: 84, + 0x11134: 84, + 0x11173: 84, + 0x11180: 84, + 0x11181: 84, + 0x111B6: 84, + 0x111B7: 84, + 0x111B8: 84, + 0x111B9: 84, + 0x111BA: 84, + 0x111BB: 84, + 0x111BC: 84, + 0x111BD: 84, + 0x111BE: 84, + 0x111C9: 84, + 0x111CA: 84, + 0x111CB: 84, + 0x111CC: 84, + 0x111CF: 84, + 0x1122F: 84, + 0x11230: 84, + 0x11231: 84, + 0x11234: 84, + 0x11236: 84, + 0x11237: 84, + 0x1123E: 84, + 0x11241: 84, + 0x112DF: 84, + 0x112E3: 84, + 0x112E4: 84, + 0x112E5: 84, + 0x112E6: 84, + 0x112E7: 84, + 0x112E8: 84, + 0x112E9: 84, + 0x112EA: 84, + 0x11300: 84, + 0x11301: 84, + 0x1133B: 84, + 0x1133C: 84, + 0x11340: 84, + 0x11366: 84, + 0x11367: 84, + 0x11368: 84, + 0x11369: 84, + 0x1136A: 84, + 0x1136B: 84, + 0x1136C: 84, + 0x11370: 84, + 0x11371: 84, + 0x11372: 84, + 0x11373: 84, + 0x11374: 84, + 0x113BB: 84, + 0x113BC: 84, + 0x113BD: 84, + 0x113BE: 84, + 0x113BF: 84, + 0x113C0: 84, + 0x113CE: 84, + 0x113D0: 84, + 0x113D2: 84, + 0x113E1: 84, + 0x113E2: 84, + 0x11438: 84, + 0x11439: 84, + 0x1143A: 84, + 0x1143B: 84, + 0x1143C: 84, + 0x1143D: 84, + 0x1143E: 84, + 0x1143F: 84, + 0x11442: 84, + 0x11443: 84, + 0x11444: 84, + 0x11446: 84, + 0x1145E: 84, + 0x114B3: 84, + 0x114B4: 84, + 0x114B5: 84, + 0x114B6: 84, + 0x114B7: 84, + 0x114B8: 84, + 0x114BA: 84, + 0x114BF: 84, + 0x114C0: 84, + 0x114C2: 84, + 0x114C3: 84, + 0x115B2: 84, + 0x115B3: 84, + 0x115B4: 84, + 0x115B5: 84, + 0x115BC: 84, + 0x115BD: 84, + 0x115BF: 84, + 0x115C0: 84, + 0x115DC: 84, + 0x115DD: 84, + 0x11633: 84, + 0x11634: 84, + 0x11635: 84, + 0x11636: 84, + 0x11637: 84, + 0x11638: 84, + 0x11639: 84, + 0x1163A: 84, + 0x1163D: 84, + 0x1163F: 84, + 0x11640: 84, + 0x116AB: 84, + 0x116AD: 84, + 0x116B0: 84, + 0x116B1: 84, + 0x116B2: 84, + 0x116B3: 84, + 0x116B4: 84, + 0x116B5: 84, + 0x116B7: 84, + 0x1171D: 84, + 0x1171F: 84, + 0x11722: 84, + 0x11723: 84, + 0x11724: 84, + 0x11725: 84, + 0x11727: 84, + 0x11728: 84, + 0x11729: 84, + 0x1172A: 84, + 0x1172B: 84, + 0x1182F: 84, + 0x11830: 84, + 0x11831: 84, + 0x11832: 84, + 0x11833: 84, + 0x11834: 84, + 0x11835: 84, + 0x11836: 84, + 0x11837: 84, + 0x11839: 84, + 0x1183A: 84, + 0x1193B: 84, + 0x1193C: 84, + 0x1193E: 84, + 0x11943: 84, + 0x119D4: 84, + 0x119D5: 84, + 0x119D6: 84, + 0x119D7: 84, + 0x119DA: 84, + 0x119DB: 84, + 0x119E0: 84, + 0x11A01: 84, + 0x11A02: 84, + 0x11A03: 84, + 0x11A04: 84, + 0x11A05: 84, + 0x11A06: 84, + 0x11A07: 84, + 0x11A08: 84, + 0x11A09: 84, + 0x11A0A: 84, + 0x11A33: 84, + 0x11A34: 84, + 0x11A35: 84, + 0x11A36: 84, + 0x11A37: 84, + 0x11A38: 84, + 0x11A3B: 84, + 0x11A3C: 84, + 0x11A3D: 84, + 0x11A3E: 84, + 0x11A47: 84, + 0x11A51: 84, + 0x11A52: 84, + 0x11A53: 84, + 0x11A54: 84, + 0x11A55: 84, + 0x11A56: 84, + 0x11A59: 84, + 0x11A5A: 84, + 0x11A5B: 84, + 0x11A8A: 84, + 0x11A8B: 84, + 0x11A8C: 84, + 0x11A8D: 84, + 0x11A8E: 84, + 0x11A8F: 84, + 0x11A90: 84, + 0x11A91: 84, + 0x11A92: 84, + 0x11A93: 84, + 0x11A94: 84, + 0x11A95: 84, + 0x11A96: 84, + 0x11A98: 84, + 0x11A99: 84, + 0x11C30: 84, + 0x11C31: 84, + 0x11C32: 84, + 0x11C33: 84, + 0x11C34: 84, + 0x11C35: 84, + 0x11C36: 84, + 0x11C38: 84, + 0x11C39: 84, + 0x11C3A: 84, + 0x11C3B: 84, + 0x11C3C: 84, + 0x11C3D: 84, + 0x11C3F: 84, + 0x11C92: 84, + 0x11C93: 84, + 0x11C94: 84, + 0x11C95: 84, + 0x11C96: 84, + 0x11C97: 84, + 0x11C98: 84, + 0x11C99: 84, + 0x11C9A: 84, + 0x11C9B: 84, + 0x11C9C: 84, + 0x11C9D: 84, + 0x11C9E: 84, + 0x11C9F: 84, + 0x11CA0: 84, + 0x11CA1: 84, + 0x11CA2: 84, + 0x11CA3: 84, + 0x11CA4: 84, + 0x11CA5: 84, + 0x11CA6: 84, + 0x11CA7: 84, + 0x11CAA: 84, + 0x11CAB: 84, + 0x11CAC: 84, + 0x11CAD: 84, + 0x11CAE: 84, + 0x11CAF: 84, + 0x11CB0: 84, + 0x11CB2: 84, + 0x11CB3: 84, + 0x11CB5: 84, + 0x11CB6: 84, + 0x11D31: 84, + 0x11D32: 84, + 0x11D33: 84, + 0x11D34: 84, + 0x11D35: 84, + 0x11D36: 84, + 0x11D3A: 84, + 0x11D3C: 84, + 0x11D3D: 84, + 0x11D3F: 84, + 0x11D40: 84, + 0x11D41: 84, + 0x11D42: 84, + 0x11D43: 84, + 0x11D44: 84, + 0x11D45: 84, + 0x11D47: 84, + 0x11D90: 84, + 0x11D91: 84, + 0x11D95: 84, + 0x11D97: 84, + 0x11EF3: 84, + 0x11EF4: 84, + 0x11F00: 84, + 0x11F01: 84, + 0x11F36: 84, + 0x11F37: 84, + 0x11F38: 84, + 0x11F39: 84, + 0x11F3A: 84, + 0x11F40: 84, + 0x11F42: 84, + 0x11F5A: 84, + 0x13430: 84, + 0x13431: 84, + 0x13432: 84, + 0x13433: 84, + 0x13434: 84, + 0x13435: 84, + 0x13436: 84, + 0x13437: 84, + 0x13438: 84, + 0x13439: 84, + 0x1343A: 84, + 0x1343B: 84, + 0x1343C: 84, + 0x1343D: 84, + 0x1343E: 84, + 0x1343F: 84, + 0x13440: 84, + 0x13447: 84, + 0x13448: 84, + 0x13449: 84, + 0x1344A: 84, + 0x1344B: 84, + 0x1344C: 84, + 0x1344D: 84, + 0x1344E: 84, + 0x1344F: 84, + 0x13450: 84, + 0x13451: 84, + 0x13452: 84, + 0x13453: 84, + 0x13454: 84, + 0x13455: 84, + 0x1611E: 84, + 0x1611F: 84, + 0x16120: 84, + 0x16121: 84, + 0x16122: 84, + 0x16123: 84, + 0x16124: 84, + 0x16125: 84, + 0x16126: 84, + 0x16127: 84, + 0x16128: 84, + 0x16129: 84, + 0x1612D: 84, + 0x1612E: 84, + 0x1612F: 84, + 0x16AF0: 84, + 0x16AF1: 84, + 0x16AF2: 84, + 0x16AF3: 84, + 0x16AF4: 84, + 0x16B30: 84, + 0x16B31: 84, + 0x16B32: 84, + 0x16B33: 84, + 0x16B34: 84, + 0x16B35: 84, + 0x16B36: 84, + 0x16F4F: 84, + 0x16F8F: 84, + 0x16F90: 84, + 0x16F91: 84, + 0x16F92: 84, + 0x16FE4: 84, + 0x1BC9D: 84, + 0x1BC9E: 84, + 0x1BCA0: 84, + 0x1BCA1: 84, + 0x1BCA2: 84, + 0x1BCA3: 84, + 0x1CF00: 84, + 0x1CF01: 84, + 0x1CF02: 84, + 0x1CF03: 84, + 0x1CF04: 84, + 0x1CF05: 84, + 0x1CF06: 84, + 0x1CF07: 84, + 0x1CF08: 84, + 0x1CF09: 84, + 0x1CF0A: 84, + 0x1CF0B: 84, + 0x1CF0C: 84, + 0x1CF0D: 84, + 0x1CF0E: 84, + 0x1CF0F: 84, + 0x1CF10: 84, + 0x1CF11: 84, + 0x1CF12: 84, + 0x1CF13: 84, + 0x1CF14: 84, + 0x1CF15: 84, + 0x1CF16: 84, + 0x1CF17: 84, + 0x1CF18: 84, + 0x1CF19: 84, + 0x1CF1A: 84, + 0x1CF1B: 84, + 0x1CF1C: 84, + 0x1CF1D: 84, + 0x1CF1E: 84, + 0x1CF1F: 84, + 0x1CF20: 84, + 0x1CF21: 84, + 0x1CF22: 84, + 0x1CF23: 84, + 0x1CF24: 84, + 0x1CF25: 84, + 0x1CF26: 84, + 0x1CF27: 84, + 0x1CF28: 84, + 0x1CF29: 84, + 0x1CF2A: 84, + 0x1CF2B: 84, + 0x1CF2C: 84, + 0x1CF2D: 84, + 0x1CF30: 84, + 0x1CF31: 84, + 0x1CF32: 84, + 0x1CF33: 84, + 0x1CF34: 84, + 0x1CF35: 84, + 0x1CF36: 84, + 0x1CF37: 84, + 0x1CF38: 84, + 0x1CF39: 84, + 0x1CF3A: 84, + 0x1CF3B: 84, + 0x1CF3C: 84, + 0x1CF3D: 84, + 0x1CF3E: 84, + 0x1CF3F: 84, + 0x1CF40: 84, + 0x1CF41: 84, + 0x1CF42: 84, + 0x1CF43: 84, + 0x1CF44: 84, + 0x1CF45: 84, + 0x1CF46: 84, + 0x1D167: 84, + 0x1D168: 84, + 0x1D169: 84, + 0x1D173: 84, + 0x1D174: 84, + 0x1D175: 84, + 0x1D176: 84, + 0x1D177: 84, + 0x1D178: 84, + 0x1D179: 84, + 0x1D17A: 84, + 0x1D17B: 84, + 0x1D17C: 84, + 0x1D17D: 84, + 0x1D17E: 84, + 0x1D17F: 84, + 0x1D180: 84, + 0x1D181: 84, + 0x1D182: 84, + 0x1D185: 84, + 0x1D186: 84, + 0x1D187: 84, + 0x1D188: 84, + 0x1D189: 84, + 0x1D18A: 84, + 0x1D18B: 84, + 0x1D1AA: 84, + 0x1D1AB: 84, + 0x1D1AC: 84, + 0x1D1AD: 84, + 0x1D242: 84, + 0x1D243: 84, + 0x1D244: 84, + 0x1DA00: 84, + 0x1DA01: 84, + 0x1DA02: 84, + 0x1DA03: 84, + 0x1DA04: 84, + 0x1DA05: 84, + 0x1DA06: 84, + 0x1DA07: 84, + 0x1DA08: 84, + 0x1DA09: 84, + 0x1DA0A: 84, + 0x1DA0B: 84, + 0x1DA0C: 84, + 0x1DA0D: 84, + 0x1DA0E: 84, + 0x1DA0F: 84, + 0x1DA10: 84, + 0x1DA11: 84, + 0x1DA12: 84, + 0x1DA13: 84, + 0x1DA14: 84, + 0x1DA15: 84, + 0x1DA16: 84, + 0x1DA17: 84, + 0x1DA18: 84, + 0x1DA19: 84, + 0x1DA1A: 84, + 0x1DA1B: 84, + 0x1DA1C: 84, + 0x1DA1D: 84, + 0x1DA1E: 84, + 0x1DA1F: 84, + 0x1DA20: 84, + 0x1DA21: 84, + 0x1DA22: 84, + 0x1DA23: 84, + 0x1DA24: 84, + 0x1DA25: 84, + 0x1DA26: 84, + 0x1DA27: 84, + 0x1DA28: 84, + 0x1DA29: 84, + 0x1DA2A: 84, + 0x1DA2B: 84, + 0x1DA2C: 84, + 0x1DA2D: 84, + 0x1DA2E: 84, + 0x1DA2F: 84, + 0x1DA30: 84, + 0x1DA31: 84, + 0x1DA32: 84, + 0x1DA33: 84, + 0x1DA34: 84, + 0x1DA35: 84, + 0x1DA36: 84, + 0x1DA3B: 84, + 0x1DA3C: 84, + 0x1DA3D: 84, + 0x1DA3E: 84, + 0x1DA3F: 84, + 0x1DA40: 84, + 0x1DA41: 84, + 0x1DA42: 84, + 0x1DA43: 84, + 0x1DA44: 84, + 0x1DA45: 84, + 0x1DA46: 84, + 0x1DA47: 84, + 0x1DA48: 84, + 0x1DA49: 84, + 0x1DA4A: 84, + 0x1DA4B: 84, + 0x1DA4C: 84, + 0x1DA4D: 84, + 0x1DA4E: 84, + 0x1DA4F: 84, + 0x1DA50: 84, + 0x1DA51: 84, + 0x1DA52: 84, + 0x1DA53: 84, + 0x1DA54: 84, + 0x1DA55: 84, + 0x1DA56: 84, + 0x1DA57: 84, + 0x1DA58: 84, + 0x1DA59: 84, + 0x1DA5A: 84, + 0x1DA5B: 84, + 0x1DA5C: 84, + 0x1DA5D: 84, + 0x1DA5E: 84, + 0x1DA5F: 84, + 0x1DA60: 84, + 0x1DA61: 84, + 0x1DA62: 84, + 0x1DA63: 84, + 0x1DA64: 84, + 0x1DA65: 84, + 0x1DA66: 84, + 0x1DA67: 84, + 0x1DA68: 84, + 0x1DA69: 84, + 0x1DA6A: 84, + 0x1DA6B: 84, + 0x1DA6C: 84, + 0x1DA75: 84, + 0x1DA84: 84, + 0x1DA9B: 84, + 0x1DA9C: 84, + 0x1DA9D: 84, + 0x1DA9E: 84, + 0x1DA9F: 84, + 0x1DAA1: 84, + 0x1DAA2: 84, + 0x1DAA3: 84, + 0x1DAA4: 84, + 0x1DAA5: 84, + 0x1DAA6: 84, + 0x1DAA7: 84, + 0x1DAA8: 84, + 0x1DAA9: 84, + 0x1DAAA: 84, + 0x1DAAB: 84, + 0x1DAAC: 84, + 0x1DAAD: 84, + 0x1DAAE: 84, + 0x1DAAF: 84, + 0x1E000: 84, + 0x1E001: 84, + 0x1E002: 84, + 0x1E003: 84, + 0x1E004: 84, + 0x1E005: 84, + 0x1E006: 84, + 0x1E008: 84, + 0x1E009: 84, + 0x1E00A: 84, + 0x1E00B: 84, + 0x1E00C: 84, + 0x1E00D: 84, + 0x1E00E: 84, + 0x1E00F: 84, + 0x1E010: 84, + 0x1E011: 84, + 0x1E012: 84, + 0x1E013: 84, + 0x1E014: 84, + 0x1E015: 84, + 0x1E016: 84, + 0x1E017: 84, + 0x1E018: 84, + 0x1E01B: 84, + 0x1E01C: 84, + 0x1E01D: 84, + 0x1E01E: 84, + 0x1E01F: 84, + 0x1E020: 84, + 0x1E021: 84, + 0x1E023: 84, + 0x1E024: 84, + 0x1E026: 84, + 0x1E027: 84, + 0x1E028: 84, + 0x1E029: 84, + 0x1E02A: 84, + 0x1E08F: 84, + 0x1E130: 84, + 0x1E131: 84, + 0x1E132: 84, + 0x1E133: 84, + 0x1E134: 84, + 0x1E135: 84, + 0x1E136: 84, + 0x1E2AE: 84, + 0x1E2EC: 84, + 0x1E2ED: 84, + 0x1E2EE: 84, + 0x1E2EF: 84, + 0x1E4EC: 84, + 0x1E4ED: 84, + 0x1E4EE: 84, + 0x1E4EF: 84, + 0x1E5EE: 84, + 0x1E5EF: 84, + 0x1E8D0: 84, + 0x1E8D1: 84, + 0x1E8D2: 84, + 0x1E8D3: 84, + 0x1E8D4: 84, + 0x1E8D5: 84, + 0x1E8D6: 84, + 0x1E900: 68, + 0x1E901: 68, + 0x1E902: 68, + 0x1E903: 68, + 0x1E904: 68, + 0x1E905: 68, + 0x1E906: 68, + 0x1E907: 68, + 0x1E908: 68, + 0x1E909: 68, + 0x1E90A: 68, + 0x1E90B: 68, + 0x1E90C: 68, + 0x1E90D: 68, + 0x1E90E: 68, + 0x1E90F: 68, + 0x1E910: 68, + 0x1E911: 68, + 0x1E912: 68, + 0x1E913: 68, + 0x1E914: 68, + 0x1E915: 68, + 0x1E916: 68, + 0x1E917: 68, + 0x1E918: 68, + 0x1E919: 68, + 0x1E91A: 68, + 0x1E91B: 68, + 0x1E91C: 68, + 0x1E91D: 68, + 0x1E91E: 68, + 0x1E91F: 68, + 0x1E920: 68, + 0x1E921: 68, + 0x1E922: 68, + 0x1E923: 68, + 0x1E924: 68, + 0x1E925: 68, + 0x1E926: 68, + 0x1E927: 68, + 0x1E928: 68, + 0x1E929: 68, + 0x1E92A: 68, + 0x1E92B: 68, + 0x1E92C: 68, + 0x1E92D: 68, + 0x1E92E: 68, + 0x1E92F: 68, + 0x1E930: 68, + 0x1E931: 68, + 0x1E932: 68, + 0x1E933: 68, + 0x1E934: 68, + 0x1E935: 68, + 0x1E936: 68, + 0x1E937: 68, + 0x1E938: 68, + 0x1E939: 68, + 0x1E93A: 68, + 0x1E93B: 68, + 0x1E93C: 68, + 0x1E93D: 68, + 0x1E93E: 68, + 0x1E93F: 68, + 0x1E940: 68, + 0x1E941: 68, + 0x1E942: 68, + 0x1E943: 68, + 0x1E944: 84, + 0x1E945: 84, + 0x1E946: 84, + 0x1E947: 84, + 0x1E948: 84, + 0x1E949: 84, + 0x1E94A: 84, + 0x1E94B: 84, + 0xE0001: 84, + 0xE0020: 84, + 0xE0021: 84, + 0xE0022: 84, + 0xE0023: 84, + 0xE0024: 84, + 0xE0025: 84, + 0xE0026: 84, + 0xE0027: 84, + 0xE0028: 84, + 0xE0029: 84, + 0xE002A: 84, + 0xE002B: 84, + 0xE002C: 84, + 0xE002D: 84, + 0xE002E: 84, + 0xE002F: 84, + 0xE0030: 84, + 0xE0031: 84, + 0xE0032: 84, + 0xE0033: 84, + 0xE0034: 84, + 0xE0035: 84, + 0xE0036: 84, + 0xE0037: 84, + 0xE0038: 84, + 0xE0039: 84, + 0xE003A: 84, + 0xE003B: 84, + 0xE003C: 84, + 0xE003D: 84, + 0xE003E: 84, + 0xE003F: 84, + 0xE0040: 84, + 0xE0041: 84, + 0xE0042: 84, + 0xE0043: 84, + 0xE0044: 84, + 0xE0045: 84, + 0xE0046: 84, + 0xE0047: 84, + 0xE0048: 84, + 0xE0049: 84, + 0xE004A: 84, + 0xE004B: 84, + 0xE004C: 84, + 0xE004D: 84, + 0xE004E: 84, + 0xE004F: 84, + 0xE0050: 84, + 0xE0051: 84, + 0xE0052: 84, + 0xE0053: 84, + 0xE0054: 84, + 0xE0055: 84, + 0xE0056: 84, + 0xE0057: 84, + 0xE0058: 84, + 0xE0059: 84, + 0xE005A: 84, + 0xE005B: 84, + 0xE005C: 84, + 0xE005D: 84, + 0xE005E: 84, + 0xE005F: 84, + 0xE0060: 84, + 0xE0061: 84, + 0xE0062: 84, + 0xE0063: 84, + 0xE0064: 84, + 0xE0065: 84, + 0xE0066: 84, + 0xE0067: 84, + 0xE0068: 84, + 0xE0069: 84, + 0xE006A: 84, + 0xE006B: 84, + 0xE006C: 84, + 0xE006D: 84, + 0xE006E: 84, + 0xE006F: 84, + 0xE0070: 84, + 0xE0071: 84, + 0xE0072: 84, + 0xE0073: 84, + 0xE0074: 84, + 0xE0075: 84, + 0xE0076: 84, + 0xE0077: 84, + 0xE0078: 84, + 0xE0079: 84, + 0xE007A: 84, + 0xE007B: 84, + 0xE007C: 84, + 0xE007D: 84, + 0xE007E: 84, + 0xE007F: 84, + 0xE0100: 84, + 0xE0101: 84, + 0xE0102: 84, + 0xE0103: 84, + 0xE0104: 84, + 0xE0105: 84, + 0xE0106: 84, + 0xE0107: 84, + 0xE0108: 84, + 0xE0109: 84, + 0xE010A: 84, + 0xE010B: 84, + 0xE010C: 84, + 0xE010D: 84, + 0xE010E: 84, + 0xE010F: 84, + 0xE0110: 84, + 0xE0111: 84, + 0xE0112: 84, + 0xE0113: 84, + 0xE0114: 84, + 0xE0115: 84, + 0xE0116: 84, + 0xE0117: 84, + 0xE0118: 84, + 0xE0119: 84, + 0xE011A: 84, + 0xE011B: 84, + 0xE011C: 84, + 0xE011D: 84, + 0xE011E: 84, + 0xE011F: 84, + 0xE0120: 84, + 0xE0121: 84, + 0xE0122: 84, + 0xE0123: 84, + 0xE0124: 84, + 0xE0125: 84, + 0xE0126: 84, + 0xE0127: 84, + 0xE0128: 84, + 0xE0129: 84, + 0xE012A: 84, + 0xE012B: 84, + 0xE012C: 84, + 0xE012D: 84, + 0xE012E: 84, + 0xE012F: 84, + 0xE0130: 84, + 0xE0131: 84, + 0xE0132: 84, + 0xE0133: 84, + 0xE0134: 84, + 0xE0135: 84, + 0xE0136: 84, + 0xE0137: 84, + 0xE0138: 84, + 0xE0139: 84, + 0xE013A: 84, + 0xE013B: 84, + 0xE013C: 84, + 0xE013D: 84, + 0xE013E: 84, + 0xE013F: 84, + 0xE0140: 84, + 0xE0141: 84, + 0xE0142: 84, + 0xE0143: 84, + 0xE0144: 84, + 0xE0145: 84, + 0xE0146: 84, + 0xE0147: 84, + 0xE0148: 84, + 0xE0149: 84, + 0xE014A: 84, + 0xE014B: 84, + 0xE014C: 84, + 0xE014D: 84, + 0xE014E: 84, + 0xE014F: 84, + 0xE0150: 84, + 0xE0151: 84, + 0xE0152: 84, + 0xE0153: 84, + 0xE0154: 84, + 0xE0155: 84, + 0xE0156: 84, + 0xE0157: 84, + 0xE0158: 84, + 0xE0159: 84, + 0xE015A: 84, + 0xE015B: 84, + 0xE015C: 84, + 0xE015D: 84, + 0xE015E: 84, + 0xE015F: 84, + 0xE0160: 84, + 0xE0161: 84, + 0xE0162: 84, + 0xE0163: 84, + 0xE0164: 84, + 0xE0165: 84, + 0xE0166: 84, + 0xE0167: 84, + 0xE0168: 84, + 0xE0169: 84, + 0xE016A: 84, + 0xE016B: 84, + 0xE016C: 84, + 0xE016D: 84, + 0xE016E: 84, + 0xE016F: 84, + 0xE0170: 84, + 0xE0171: 84, + 0xE0172: 84, + 0xE0173: 84, + 0xE0174: 84, + 0xE0175: 84, + 0xE0176: 84, + 0xE0177: 84, + 0xE0178: 84, + 0xE0179: 84, + 0xE017A: 84, + 0xE017B: 84, + 0xE017C: 84, + 0xE017D: 84, + 0xE017E: 84, + 0xE017F: 84, + 0xE0180: 84, + 0xE0181: 84, + 0xE0182: 84, + 0xE0183: 84, + 0xE0184: 84, + 0xE0185: 84, + 0xE0186: 84, + 0xE0187: 84, + 0xE0188: 84, + 0xE0189: 84, + 0xE018A: 84, + 0xE018B: 84, + 0xE018C: 84, + 0xE018D: 84, + 0xE018E: 84, + 0xE018F: 84, + 0xE0190: 84, + 0xE0191: 84, + 0xE0192: 84, + 0xE0193: 84, + 0xE0194: 84, + 0xE0195: 84, + 0xE0196: 84, + 0xE0197: 84, + 0xE0198: 84, + 0xE0199: 84, + 0xE019A: 84, + 0xE019B: 84, + 0xE019C: 84, + 0xE019D: 84, + 0xE019E: 84, + 0xE019F: 84, + 0xE01A0: 84, + 0xE01A1: 84, + 0xE01A2: 84, + 0xE01A3: 84, + 0xE01A4: 84, + 0xE01A5: 84, + 0xE01A6: 84, + 0xE01A7: 84, + 0xE01A8: 84, + 0xE01A9: 84, + 0xE01AA: 84, + 0xE01AB: 84, + 0xE01AC: 84, + 0xE01AD: 84, + 0xE01AE: 84, + 0xE01AF: 84, + 0xE01B0: 84, + 0xE01B1: 84, + 0xE01B2: 84, + 0xE01B3: 84, + 0xE01B4: 84, + 0xE01B5: 84, + 0xE01B6: 84, + 0xE01B7: 84, + 0xE01B8: 84, + 0xE01B9: 84, + 0xE01BA: 84, + 0xE01BB: 84, + 0xE01BC: 84, + 0xE01BD: 84, + 0xE01BE: 84, + 0xE01BF: 84, + 0xE01C0: 84, + 0xE01C1: 84, + 0xE01C2: 84, + 0xE01C3: 84, + 0xE01C4: 84, + 0xE01C5: 84, + 0xE01C6: 84, + 0xE01C7: 84, + 0xE01C8: 84, + 0xE01C9: 84, + 0xE01CA: 84, + 0xE01CB: 84, + 0xE01CC: 84, + 0xE01CD: 84, + 0xE01CE: 84, + 0xE01CF: 84, + 0xE01D0: 84, + 0xE01D1: 84, + 0xE01D2: 84, + 0xE01D3: 84, + 0xE01D4: 84, + 0xE01D5: 84, + 0xE01D6: 84, + 0xE01D7: 84, + 0xE01D8: 84, + 0xE01D9: 84, + 0xE01DA: 84, + 0xE01DB: 84, + 0xE01DC: 84, + 0xE01DD: 84, + 0xE01DE: 84, + 0xE01DF: 84, + 0xE01E0: 84, + 0xE01E1: 84, + 0xE01E2: 84, + 0xE01E3: 84, + 0xE01E4: 84, + 0xE01E5: 84, + 0xE01E6: 84, + 0xE01E7: 84, + 0xE01E8: 84, + 0xE01E9: 84, + 0xE01EA: 84, + 0xE01EB: 84, + 0xE01EC: 84, + 0xE01ED: 84, + 0xE01EE: 84, + 0xE01EF: 84, +} +codepoint_classes = { + "PVALID": ( + 0x2D0000002E, + 0x300000003A, + 0x610000007B, + 0xDF000000F7, + 0xF800000100, + 0x10100000102, + 0x10300000104, + 0x10500000106, + 0x10700000108, + 0x1090000010A, + 0x10B0000010C, + 0x10D0000010E, + 0x10F00000110, + 0x11100000112, + 0x11300000114, + 0x11500000116, + 0x11700000118, + 0x1190000011A, + 0x11B0000011C, + 0x11D0000011E, + 0x11F00000120, + 0x12100000122, + 0x12300000124, + 0x12500000126, + 0x12700000128, + 0x1290000012A, + 0x12B0000012C, + 0x12D0000012E, + 0x12F00000130, + 0x13100000132, + 0x13500000136, + 0x13700000139, + 0x13A0000013B, + 0x13C0000013D, + 0x13E0000013F, + 0x14200000143, + 0x14400000145, + 0x14600000147, + 0x14800000149, + 0x14B0000014C, + 0x14D0000014E, + 0x14F00000150, + 0x15100000152, + 0x15300000154, + 0x15500000156, + 0x15700000158, + 0x1590000015A, + 0x15B0000015C, + 0x15D0000015E, + 0x15F00000160, + 0x16100000162, + 0x16300000164, + 0x16500000166, + 0x16700000168, + 0x1690000016A, + 0x16B0000016C, + 0x16D0000016E, + 0x16F00000170, + 0x17100000172, + 0x17300000174, + 0x17500000176, + 0x17700000178, + 0x17A0000017B, + 0x17C0000017D, + 0x17E0000017F, + 0x18000000181, + 0x18300000184, + 0x18500000186, + 0x18800000189, + 0x18C0000018E, + 0x19200000193, + 0x19500000196, + 0x1990000019C, + 0x19E0000019F, + 0x1A1000001A2, + 0x1A3000001A4, + 0x1A5000001A6, + 0x1A8000001A9, + 0x1AA000001AC, + 0x1AD000001AE, + 0x1B0000001B1, + 0x1B4000001B5, + 0x1B6000001B7, + 0x1B9000001BC, + 0x1BD000001C4, + 0x1CE000001CF, + 0x1D0000001D1, + 0x1D2000001D3, + 0x1D4000001D5, + 0x1D6000001D7, + 0x1D8000001D9, + 0x1DA000001DB, + 0x1DC000001DE, + 0x1DF000001E0, + 0x1E1000001E2, + 0x1E3000001E4, + 0x1E5000001E6, + 0x1E7000001E8, + 0x1E9000001EA, + 0x1EB000001EC, + 0x1ED000001EE, + 0x1EF000001F1, + 0x1F5000001F6, + 0x1F9000001FA, + 0x1FB000001FC, + 0x1FD000001FE, + 0x1FF00000200, + 0x20100000202, + 0x20300000204, + 0x20500000206, + 0x20700000208, + 0x2090000020A, + 0x20B0000020C, + 0x20D0000020E, + 0x20F00000210, + 0x21100000212, + 0x21300000214, + 0x21500000216, + 0x21700000218, + 0x2190000021A, + 0x21B0000021C, + 0x21D0000021E, + 0x21F00000220, + 0x22100000222, + 0x22300000224, + 0x22500000226, + 0x22700000228, + 0x2290000022A, + 0x22B0000022C, + 0x22D0000022E, + 0x22F00000230, + 0x23100000232, + 0x2330000023A, + 0x23C0000023D, + 0x23F00000241, + 0x24200000243, + 0x24700000248, + 0x2490000024A, + 0x24B0000024C, + 0x24D0000024E, + 0x24F000002B0, + 0x2B9000002C2, + 0x2C6000002D2, + 0x2EC000002ED, + 0x2EE000002EF, + 0x30000000340, + 0x34200000343, + 0x3460000034F, + 0x35000000370, + 0x37100000372, + 0x37300000374, + 0x37700000378, + 0x37B0000037E, + 0x39000000391, + 0x3AC000003CF, + 0x3D7000003D8, + 0x3D9000003DA, + 0x3DB000003DC, + 0x3DD000003DE, + 0x3DF000003E0, + 0x3E1000003E2, + 0x3E3000003E4, + 0x3E5000003E6, + 0x3E7000003E8, + 0x3E9000003EA, + 0x3EB000003EC, + 0x3ED000003EE, + 0x3EF000003F0, + 0x3F3000003F4, + 0x3F8000003F9, + 0x3FB000003FD, + 0x43000000460, + 0x46100000462, + 0x46300000464, + 0x46500000466, + 0x46700000468, + 0x4690000046A, + 0x46B0000046C, + 0x46D0000046E, + 0x46F00000470, + 0x47100000472, + 0x47300000474, + 0x47500000476, + 0x47700000478, + 0x4790000047A, + 0x47B0000047C, + 0x47D0000047E, + 0x47F00000480, + 0x48100000482, + 0x48300000488, + 0x48B0000048C, + 0x48D0000048E, + 0x48F00000490, + 0x49100000492, + 0x49300000494, + 0x49500000496, + 0x49700000498, + 0x4990000049A, + 0x49B0000049C, + 0x49D0000049E, + 0x49F000004A0, + 0x4A1000004A2, + 0x4A3000004A4, + 0x4A5000004A6, + 0x4A7000004A8, + 0x4A9000004AA, + 0x4AB000004AC, + 0x4AD000004AE, + 0x4AF000004B0, + 0x4B1000004B2, + 0x4B3000004B4, + 0x4B5000004B6, + 0x4B7000004B8, + 0x4B9000004BA, + 0x4BB000004BC, + 0x4BD000004BE, + 0x4BF000004C0, + 0x4C2000004C3, + 0x4C4000004C5, + 0x4C6000004C7, + 0x4C8000004C9, + 0x4CA000004CB, + 0x4CC000004CD, + 0x4CE000004D0, + 0x4D1000004D2, + 0x4D3000004D4, + 0x4D5000004D6, + 0x4D7000004D8, + 0x4D9000004DA, + 0x4DB000004DC, + 0x4DD000004DE, + 0x4DF000004E0, + 0x4E1000004E2, + 0x4E3000004E4, + 0x4E5000004E6, + 0x4E7000004E8, + 0x4E9000004EA, + 0x4EB000004EC, + 0x4ED000004EE, + 0x4EF000004F0, + 0x4F1000004F2, + 0x4F3000004F4, + 0x4F5000004F6, + 0x4F7000004F8, + 0x4F9000004FA, + 0x4FB000004FC, + 0x4FD000004FE, + 0x4FF00000500, + 0x50100000502, + 0x50300000504, + 0x50500000506, + 0x50700000508, + 0x5090000050A, + 0x50B0000050C, + 0x50D0000050E, + 0x50F00000510, + 0x51100000512, + 0x51300000514, + 0x51500000516, + 0x51700000518, + 0x5190000051A, + 0x51B0000051C, + 0x51D0000051E, + 0x51F00000520, + 0x52100000522, + 0x52300000524, + 0x52500000526, + 0x52700000528, + 0x5290000052A, + 0x52B0000052C, + 0x52D0000052E, + 0x52F00000530, + 0x5590000055A, + 0x56000000587, + 0x58800000589, + 0x591000005BE, + 0x5BF000005C0, + 0x5C1000005C3, + 0x5C4000005C6, + 0x5C7000005C8, + 0x5D0000005EB, + 0x5EF000005F3, + 0x6100000061B, + 0x62000000640, + 0x64100000660, + 0x66E00000675, + 0x679000006D4, + 0x6D5000006DD, + 0x6DF000006E9, + 0x6EA000006F0, + 0x6FA00000700, + 0x7100000074B, + 0x74D000007B2, + 0x7C0000007F6, + 0x7FD000007FE, + 0x8000000082E, + 0x8400000085C, + 0x8600000086B, + 0x87000000888, + 0x8890000088F, + 0x897000008E2, + 0x8E300000958, + 0x96000000964, + 0x96600000970, + 0x97100000984, + 0x9850000098D, + 0x98F00000991, + 0x993000009A9, + 0x9AA000009B1, + 0x9B2000009B3, + 0x9B6000009BA, + 0x9BC000009C5, + 0x9C7000009C9, + 0x9CB000009CF, + 0x9D7000009D8, + 0x9E0000009E4, + 0x9E6000009F2, + 0x9FC000009FD, + 0x9FE000009FF, + 0xA0100000A04, + 0xA0500000A0B, + 0xA0F00000A11, + 0xA1300000A29, + 0xA2A00000A31, + 0xA3200000A33, + 0xA3500000A36, + 0xA3800000A3A, + 0xA3C00000A3D, + 0xA3E00000A43, + 0xA4700000A49, + 0xA4B00000A4E, + 0xA5100000A52, + 0xA5C00000A5D, + 0xA6600000A76, + 0xA8100000A84, + 0xA8500000A8E, + 0xA8F00000A92, + 0xA9300000AA9, + 0xAAA00000AB1, + 0xAB200000AB4, + 0xAB500000ABA, + 0xABC00000AC6, + 0xAC700000ACA, + 0xACB00000ACE, + 0xAD000000AD1, + 0xAE000000AE4, + 0xAE600000AF0, + 0xAF900000B00, + 0xB0100000B04, + 0xB0500000B0D, + 0xB0F00000B11, + 0xB1300000B29, + 0xB2A00000B31, + 0xB3200000B34, + 0xB3500000B3A, + 0xB3C00000B45, + 0xB4700000B49, + 0xB4B00000B4E, + 0xB5500000B58, + 0xB5F00000B64, + 0xB6600000B70, + 0xB7100000B72, + 0xB8200000B84, + 0xB8500000B8B, + 0xB8E00000B91, + 0xB9200000B96, + 0xB9900000B9B, + 0xB9C00000B9D, + 0xB9E00000BA0, + 0xBA300000BA5, + 0xBA800000BAB, + 0xBAE00000BBA, + 0xBBE00000BC3, + 0xBC600000BC9, + 0xBCA00000BCE, + 0xBD000000BD1, + 0xBD700000BD8, + 0xBE600000BF0, + 0xC0000000C0D, + 0xC0E00000C11, + 0xC1200000C29, + 0xC2A00000C3A, + 0xC3C00000C45, + 0xC4600000C49, + 0xC4A00000C4E, + 0xC5500000C57, + 0xC5800000C5B, + 0xC5D00000C5E, + 0xC6000000C64, + 0xC6600000C70, + 0xC8000000C84, + 0xC8500000C8D, + 0xC8E00000C91, + 0xC9200000CA9, + 0xCAA00000CB4, + 0xCB500000CBA, + 0xCBC00000CC5, + 0xCC600000CC9, + 0xCCA00000CCE, + 0xCD500000CD7, + 0xCDD00000CDF, + 0xCE000000CE4, + 0xCE600000CF0, + 0xCF100000CF4, + 0xD0000000D0D, + 0xD0E00000D11, + 0xD1200000D45, + 0xD4600000D49, + 0xD4A00000D4F, + 0xD5400000D58, + 0xD5F00000D64, + 0xD6600000D70, + 0xD7A00000D80, + 0xD8100000D84, + 0xD8500000D97, + 0xD9A00000DB2, + 0xDB300000DBC, + 0xDBD00000DBE, + 0xDC000000DC7, + 0xDCA00000DCB, + 0xDCF00000DD5, + 0xDD600000DD7, + 0xDD800000DE0, + 0xDE600000DF0, + 0xDF200000DF4, + 0xE0100000E33, + 0xE3400000E3B, + 0xE4000000E4F, + 0xE5000000E5A, + 0xE8100000E83, + 0xE8400000E85, + 0xE8600000E8B, + 0xE8C00000EA4, + 0xEA500000EA6, + 0xEA700000EB3, + 0xEB400000EBE, + 0xEC000000EC5, + 0xEC600000EC7, + 0xEC800000ECF, + 0xED000000EDA, + 0xEDE00000EE0, + 0xF0000000F01, + 0xF0B00000F0C, + 0xF1800000F1A, + 0xF2000000F2A, + 0xF3500000F36, + 0xF3700000F38, + 0xF3900000F3A, + 0xF3E00000F43, + 0xF4400000F48, + 0xF4900000F4D, + 0xF4E00000F52, + 0xF5300000F57, + 0xF5800000F5C, + 0xF5D00000F69, + 0xF6A00000F6D, + 0xF7100000F73, + 0xF7400000F75, + 0xF7A00000F81, + 0xF8200000F85, + 0xF8600000F93, + 0xF9400000F98, + 0xF9900000F9D, + 0xF9E00000FA2, + 0xFA300000FA7, + 0xFA800000FAC, + 0xFAD00000FB9, + 0xFBA00000FBD, + 0xFC600000FC7, + 0x10000000104A, + 0x10500000109E, + 0x10D0000010FB, + 0x10FD00001100, + 0x120000001249, + 0x124A0000124E, + 0x125000001257, + 0x125800001259, + 0x125A0000125E, + 0x126000001289, + 0x128A0000128E, + 0x1290000012B1, + 0x12B2000012B6, + 0x12B8000012BF, + 0x12C0000012C1, + 0x12C2000012C6, + 0x12C8000012D7, + 0x12D800001311, + 0x131200001316, + 0x13180000135B, + 0x135D00001360, + 0x138000001390, + 0x13A0000013F6, + 0x14010000166D, + 0x166F00001680, + 0x16810000169B, + 0x16A0000016EB, + 0x16F1000016F9, + 0x170000001716, + 0x171F00001735, + 0x174000001754, + 0x17600000176D, + 0x176E00001771, + 0x177200001774, + 0x1780000017B4, + 0x17B6000017D4, + 0x17D7000017D8, + 0x17DC000017DE, + 0x17E0000017EA, + 0x18100000181A, + 0x182000001879, + 0x1880000018AB, + 0x18B0000018F6, + 0x19000000191F, + 0x19200000192C, + 0x19300000193C, + 0x19460000196E, + 0x197000001975, + 0x1980000019AC, + 0x19B0000019CA, + 0x19D0000019DA, + 0x1A0000001A1C, + 0x1A2000001A5F, + 0x1A6000001A7D, + 0x1A7F00001A8A, + 0x1A9000001A9A, + 0x1AA700001AA8, + 0x1AB000001ABE, + 0x1ABF00001ACF, + 0x1B0000001B4D, + 0x1B5000001B5A, + 0x1B6B00001B74, + 0x1B8000001BF4, + 0x1C0000001C38, + 0x1C4000001C4A, + 0x1C4D00001C7E, + 0x1C8A00001C8B, + 0x1CD000001CD3, + 0x1CD400001CFB, + 0x1D0000001D2C, + 0x1D2F00001D30, + 0x1D3B00001D3C, + 0x1D4E00001D4F, + 0x1D6B00001D78, + 0x1D7900001D9B, + 0x1DC000001E00, + 0x1E0100001E02, + 0x1E0300001E04, + 0x1E0500001E06, + 0x1E0700001E08, + 0x1E0900001E0A, + 0x1E0B00001E0C, + 0x1E0D00001E0E, + 0x1E0F00001E10, + 0x1E1100001E12, + 0x1E1300001E14, + 0x1E1500001E16, + 0x1E1700001E18, + 0x1E1900001E1A, + 0x1E1B00001E1C, + 0x1E1D00001E1E, + 0x1E1F00001E20, + 0x1E2100001E22, + 0x1E2300001E24, + 0x1E2500001E26, + 0x1E2700001E28, + 0x1E2900001E2A, + 0x1E2B00001E2C, + 0x1E2D00001E2E, + 0x1E2F00001E30, + 0x1E3100001E32, + 0x1E3300001E34, + 0x1E3500001E36, + 0x1E3700001E38, + 0x1E3900001E3A, + 0x1E3B00001E3C, + 0x1E3D00001E3E, + 0x1E3F00001E40, + 0x1E4100001E42, + 0x1E4300001E44, + 0x1E4500001E46, + 0x1E4700001E48, + 0x1E4900001E4A, + 0x1E4B00001E4C, + 0x1E4D00001E4E, + 0x1E4F00001E50, + 0x1E5100001E52, + 0x1E5300001E54, + 0x1E5500001E56, + 0x1E5700001E58, + 0x1E5900001E5A, + 0x1E5B00001E5C, + 0x1E5D00001E5E, + 0x1E5F00001E60, + 0x1E6100001E62, + 0x1E6300001E64, + 0x1E6500001E66, + 0x1E6700001E68, + 0x1E6900001E6A, + 0x1E6B00001E6C, + 0x1E6D00001E6E, + 0x1E6F00001E70, + 0x1E7100001E72, + 0x1E7300001E74, + 0x1E7500001E76, + 0x1E7700001E78, + 0x1E7900001E7A, + 0x1E7B00001E7C, + 0x1E7D00001E7E, + 0x1E7F00001E80, + 0x1E8100001E82, + 0x1E8300001E84, + 0x1E8500001E86, + 0x1E8700001E88, + 0x1E8900001E8A, + 0x1E8B00001E8C, + 0x1E8D00001E8E, + 0x1E8F00001E90, + 0x1E9100001E92, + 0x1E9300001E94, + 0x1E9500001E9A, + 0x1E9C00001E9E, + 0x1E9F00001EA0, + 0x1EA100001EA2, + 0x1EA300001EA4, + 0x1EA500001EA6, + 0x1EA700001EA8, + 0x1EA900001EAA, + 0x1EAB00001EAC, + 0x1EAD00001EAE, + 0x1EAF00001EB0, + 0x1EB100001EB2, + 0x1EB300001EB4, + 0x1EB500001EB6, + 0x1EB700001EB8, + 0x1EB900001EBA, + 0x1EBB00001EBC, + 0x1EBD00001EBE, + 0x1EBF00001EC0, + 0x1EC100001EC2, + 0x1EC300001EC4, + 0x1EC500001EC6, + 0x1EC700001EC8, + 0x1EC900001ECA, + 0x1ECB00001ECC, + 0x1ECD00001ECE, + 0x1ECF00001ED0, + 0x1ED100001ED2, + 0x1ED300001ED4, + 0x1ED500001ED6, + 0x1ED700001ED8, + 0x1ED900001EDA, + 0x1EDB00001EDC, + 0x1EDD00001EDE, + 0x1EDF00001EE0, + 0x1EE100001EE2, + 0x1EE300001EE4, + 0x1EE500001EE6, + 0x1EE700001EE8, + 0x1EE900001EEA, + 0x1EEB00001EEC, + 0x1EED00001EEE, + 0x1EEF00001EF0, + 0x1EF100001EF2, + 0x1EF300001EF4, + 0x1EF500001EF6, + 0x1EF700001EF8, + 0x1EF900001EFA, + 0x1EFB00001EFC, + 0x1EFD00001EFE, + 0x1EFF00001F08, + 0x1F1000001F16, + 0x1F2000001F28, + 0x1F3000001F38, + 0x1F4000001F46, + 0x1F5000001F58, + 0x1F6000001F68, + 0x1F7000001F71, + 0x1F7200001F73, + 0x1F7400001F75, + 0x1F7600001F77, + 0x1F7800001F79, + 0x1F7A00001F7B, + 0x1F7C00001F7D, + 0x1FB000001FB2, + 0x1FB600001FB7, + 0x1FC600001FC7, + 0x1FD000001FD3, + 0x1FD600001FD8, + 0x1FE000001FE3, + 0x1FE400001FE8, + 0x1FF600001FF7, + 0x214E0000214F, + 0x218400002185, + 0x2C3000002C60, + 0x2C6100002C62, + 0x2C6500002C67, + 0x2C6800002C69, + 0x2C6A00002C6B, + 0x2C6C00002C6D, + 0x2C7100002C72, + 0x2C7300002C75, + 0x2C7600002C7C, + 0x2C8100002C82, + 0x2C8300002C84, + 0x2C8500002C86, + 0x2C8700002C88, + 0x2C8900002C8A, + 0x2C8B00002C8C, + 0x2C8D00002C8E, + 0x2C8F00002C90, + 0x2C9100002C92, + 0x2C9300002C94, + 0x2C9500002C96, + 0x2C9700002C98, + 0x2C9900002C9A, + 0x2C9B00002C9C, + 0x2C9D00002C9E, + 0x2C9F00002CA0, + 0x2CA100002CA2, + 0x2CA300002CA4, + 0x2CA500002CA6, + 0x2CA700002CA8, + 0x2CA900002CAA, + 0x2CAB00002CAC, + 0x2CAD00002CAE, + 0x2CAF00002CB0, + 0x2CB100002CB2, + 0x2CB300002CB4, + 0x2CB500002CB6, + 0x2CB700002CB8, + 0x2CB900002CBA, + 0x2CBB00002CBC, + 0x2CBD00002CBE, + 0x2CBF00002CC0, + 0x2CC100002CC2, + 0x2CC300002CC4, + 0x2CC500002CC6, + 0x2CC700002CC8, + 0x2CC900002CCA, + 0x2CCB00002CCC, + 0x2CCD00002CCE, + 0x2CCF00002CD0, + 0x2CD100002CD2, + 0x2CD300002CD4, + 0x2CD500002CD6, + 0x2CD700002CD8, + 0x2CD900002CDA, + 0x2CDB00002CDC, + 0x2CDD00002CDE, + 0x2CDF00002CE0, + 0x2CE100002CE2, + 0x2CE300002CE5, + 0x2CEC00002CED, + 0x2CEE00002CF2, + 0x2CF300002CF4, + 0x2D0000002D26, + 0x2D2700002D28, + 0x2D2D00002D2E, + 0x2D3000002D68, + 0x2D7F00002D97, + 0x2DA000002DA7, + 0x2DA800002DAF, + 0x2DB000002DB7, + 0x2DB800002DBF, + 0x2DC000002DC7, + 0x2DC800002DCF, + 0x2DD000002DD7, + 0x2DD800002DDF, + 0x2DE000002E00, + 0x2E2F00002E30, + 0x300500003008, + 0x302A0000302E, + 0x303C0000303D, + 0x304100003097, + 0x30990000309B, + 0x309D0000309F, + 0x30A1000030FB, + 0x30FC000030FF, + 0x310500003130, + 0x31A0000031C0, + 0x31F000003200, + 0x340000004DC0, + 0x4E000000A48D, + 0xA4D00000A4FE, + 0xA5000000A60D, + 0xA6100000A62C, + 0xA6410000A642, + 0xA6430000A644, + 0xA6450000A646, + 0xA6470000A648, + 0xA6490000A64A, + 0xA64B0000A64C, + 0xA64D0000A64E, + 0xA64F0000A650, + 0xA6510000A652, + 0xA6530000A654, + 0xA6550000A656, + 0xA6570000A658, + 0xA6590000A65A, + 0xA65B0000A65C, + 0xA65D0000A65E, + 0xA65F0000A660, + 0xA6610000A662, + 0xA6630000A664, + 0xA6650000A666, + 0xA6670000A668, + 0xA6690000A66A, + 0xA66B0000A66C, + 0xA66D0000A670, + 0xA6740000A67E, + 0xA67F0000A680, + 0xA6810000A682, + 0xA6830000A684, + 0xA6850000A686, + 0xA6870000A688, + 0xA6890000A68A, + 0xA68B0000A68C, + 0xA68D0000A68E, + 0xA68F0000A690, + 0xA6910000A692, + 0xA6930000A694, + 0xA6950000A696, + 0xA6970000A698, + 0xA6990000A69A, + 0xA69B0000A69C, + 0xA69E0000A6E6, + 0xA6F00000A6F2, + 0xA7170000A720, + 0xA7230000A724, + 0xA7250000A726, + 0xA7270000A728, + 0xA7290000A72A, + 0xA72B0000A72C, + 0xA72D0000A72E, + 0xA72F0000A732, + 0xA7330000A734, + 0xA7350000A736, + 0xA7370000A738, + 0xA7390000A73A, + 0xA73B0000A73C, + 0xA73D0000A73E, + 0xA73F0000A740, + 0xA7410000A742, + 0xA7430000A744, + 0xA7450000A746, + 0xA7470000A748, + 0xA7490000A74A, + 0xA74B0000A74C, + 0xA74D0000A74E, + 0xA74F0000A750, + 0xA7510000A752, + 0xA7530000A754, + 0xA7550000A756, + 0xA7570000A758, + 0xA7590000A75A, + 0xA75B0000A75C, + 0xA75D0000A75E, + 0xA75F0000A760, + 0xA7610000A762, + 0xA7630000A764, + 0xA7650000A766, + 0xA7670000A768, + 0xA7690000A76A, + 0xA76B0000A76C, + 0xA76D0000A76E, + 0xA76F0000A770, + 0xA7710000A779, + 0xA77A0000A77B, + 0xA77C0000A77D, + 0xA77F0000A780, + 0xA7810000A782, + 0xA7830000A784, + 0xA7850000A786, + 0xA7870000A789, + 0xA78C0000A78D, + 0xA78E0000A790, + 0xA7910000A792, + 0xA7930000A796, + 0xA7970000A798, + 0xA7990000A79A, + 0xA79B0000A79C, + 0xA79D0000A79E, + 0xA79F0000A7A0, + 0xA7A10000A7A2, + 0xA7A30000A7A4, + 0xA7A50000A7A6, + 0xA7A70000A7A8, + 0xA7A90000A7AA, + 0xA7AF0000A7B0, + 0xA7B50000A7B6, + 0xA7B70000A7B8, + 0xA7B90000A7BA, + 0xA7BB0000A7BC, + 0xA7BD0000A7BE, + 0xA7BF0000A7C0, + 0xA7C10000A7C2, + 0xA7C30000A7C4, + 0xA7C80000A7C9, + 0xA7CA0000A7CB, + 0xA7CD0000A7CE, + 0xA7D10000A7D2, + 0xA7D30000A7D4, + 0xA7D50000A7D6, + 0xA7D70000A7D8, + 0xA7D90000A7DA, + 0xA7DB0000A7DC, + 0xA7F60000A7F8, + 0xA7FA0000A828, + 0xA82C0000A82D, + 0xA8400000A874, + 0xA8800000A8C6, + 0xA8D00000A8DA, + 0xA8E00000A8F8, + 0xA8FB0000A8FC, + 0xA8FD0000A92E, + 0xA9300000A954, + 0xA9800000A9C1, + 0xA9CF0000A9DA, + 0xA9E00000A9FF, + 0xAA000000AA37, + 0xAA400000AA4E, + 0xAA500000AA5A, + 0xAA600000AA77, + 0xAA7A0000AAC3, + 0xAADB0000AADE, + 0xAAE00000AAF0, + 0xAAF20000AAF7, + 0xAB010000AB07, + 0xAB090000AB0F, + 0xAB110000AB17, + 0xAB200000AB27, + 0xAB280000AB2F, + 0xAB300000AB5B, + 0xAB600000AB69, + 0xABC00000ABEB, + 0xABEC0000ABEE, + 0xABF00000ABFA, + 0xAC000000D7A4, + 0xFA0E0000FA10, + 0xFA110000FA12, + 0xFA130000FA15, + 0xFA1F0000FA20, + 0xFA210000FA22, + 0xFA230000FA25, + 0xFA270000FA2A, + 0xFB1E0000FB1F, + 0xFE200000FE30, + 0xFE730000FE74, + 0x100000001000C, + 0x1000D00010027, + 0x100280001003B, + 0x1003C0001003E, + 0x1003F0001004E, + 0x100500001005E, + 0x10080000100FB, + 0x101FD000101FE, + 0x102800001029D, + 0x102A0000102D1, + 0x102E0000102E1, + 0x1030000010320, + 0x1032D00010341, + 0x103420001034A, + 0x103500001037B, + 0x103800001039E, + 0x103A0000103C4, + 0x103C8000103D0, + 0x104280001049E, + 0x104A0000104AA, + 0x104D8000104FC, + 0x1050000010528, + 0x1053000010564, + 0x10597000105A2, + 0x105A3000105B2, + 0x105B3000105BA, + 0x105BB000105BD, + 0x105C0000105F4, + 0x1060000010737, + 0x1074000010756, + 0x1076000010768, + 0x1078000010781, + 0x1080000010806, + 0x1080800010809, + 0x1080A00010836, + 0x1083700010839, + 0x1083C0001083D, + 0x1083F00010856, + 0x1086000010877, + 0x108800001089F, + 0x108E0000108F3, + 0x108F4000108F6, + 0x1090000010916, + 0x109200001093A, + 0x10980000109B8, + 0x109BE000109C0, + 0x10A0000010A04, + 0x10A0500010A07, + 0x10A0C00010A14, + 0x10A1500010A18, + 0x10A1900010A36, + 0x10A3800010A3B, + 0x10A3F00010A40, + 0x10A6000010A7D, + 0x10A8000010A9D, + 0x10AC000010AC8, + 0x10AC900010AE7, + 0x10B0000010B36, + 0x10B4000010B56, + 0x10B6000010B73, + 0x10B8000010B92, + 0x10C0000010C49, + 0x10CC000010CF3, + 0x10D0000010D28, + 0x10D3000010D3A, + 0x10D4000010D50, + 0x10D6900010D6E, + 0x10D6F00010D86, + 0x10E8000010EAA, + 0x10EAB00010EAD, + 0x10EB000010EB2, + 0x10EC200010EC5, + 0x10EFC00010F1D, + 0x10F2700010F28, + 0x10F3000010F51, + 0x10F7000010F86, + 0x10FB000010FC5, + 0x10FE000010FF7, + 0x1100000011047, + 0x1106600011076, + 0x1107F000110BB, + 0x110C2000110C3, + 0x110D0000110E9, + 0x110F0000110FA, + 0x1110000011135, + 0x1113600011140, + 0x1114400011148, + 0x1115000011174, + 0x1117600011177, + 0x11180000111C5, + 0x111C9000111CD, + 0x111CE000111DB, + 0x111DC000111DD, + 0x1120000011212, + 0x1121300011238, + 0x1123E00011242, + 0x1128000011287, + 0x1128800011289, + 0x1128A0001128E, + 0x1128F0001129E, + 0x1129F000112A9, + 0x112B0000112EB, + 0x112F0000112FA, + 0x1130000011304, + 0x113050001130D, + 0x1130F00011311, + 0x1131300011329, + 0x1132A00011331, + 0x1133200011334, + 0x113350001133A, + 0x1133B00011345, + 0x1134700011349, + 0x1134B0001134E, + 0x1135000011351, + 0x1135700011358, + 0x1135D00011364, + 0x113660001136D, + 0x1137000011375, + 0x113800001138A, + 0x1138B0001138C, + 0x1138E0001138F, + 0x11390000113B6, + 0x113B7000113C1, + 0x113C2000113C3, + 0x113C5000113C6, + 0x113C7000113CB, + 0x113CC000113D4, + 0x113E1000113E3, + 0x114000001144B, + 0x114500001145A, + 0x1145E00011462, + 0x11480000114C6, + 0x114C7000114C8, + 0x114D0000114DA, + 0x11580000115B6, + 0x115B8000115C1, + 0x115D8000115DE, + 0x1160000011641, + 0x1164400011645, + 0x116500001165A, + 0x11680000116B9, + 0x116C0000116CA, + 0x116D0000116E4, + 0x117000001171B, + 0x1171D0001172C, + 0x117300001173A, + 0x1174000011747, + 0x118000001183B, + 0x118C0000118EA, + 0x118FF00011907, + 0x119090001190A, + 0x1190C00011914, + 0x1191500011917, + 0x1191800011936, + 0x1193700011939, + 0x1193B00011944, + 0x119500001195A, + 0x119A0000119A8, + 0x119AA000119D8, + 0x119DA000119E2, + 0x119E3000119E5, + 0x11A0000011A3F, + 0x11A4700011A48, + 0x11A5000011A9A, + 0x11A9D00011A9E, + 0x11AB000011AF9, + 0x11BC000011BE1, + 0x11BF000011BFA, + 0x11C0000011C09, + 0x11C0A00011C37, + 0x11C3800011C41, + 0x11C5000011C5A, + 0x11C7200011C90, + 0x11C9200011CA8, + 0x11CA900011CB7, + 0x11D0000011D07, + 0x11D0800011D0A, + 0x11D0B00011D37, + 0x11D3A00011D3B, + 0x11D3C00011D3E, + 0x11D3F00011D48, + 0x11D5000011D5A, + 0x11D6000011D66, + 0x11D6700011D69, + 0x11D6A00011D8F, + 0x11D9000011D92, + 0x11D9300011D99, + 0x11DA000011DAA, + 0x11EE000011EF7, + 0x11F0000011F11, + 0x11F1200011F3B, + 0x11F3E00011F43, + 0x11F5000011F5B, + 0x11FB000011FB1, + 0x120000001239A, + 0x1248000012544, + 0x12F9000012FF1, + 0x1300000013430, + 0x1344000013456, + 0x13460000143FB, + 0x1440000014647, + 0x161000001613A, + 0x1680000016A39, + 0x16A4000016A5F, + 0x16A6000016A6A, + 0x16A7000016ABF, + 0x16AC000016ACA, + 0x16AD000016AEE, + 0x16AF000016AF5, + 0x16B0000016B37, + 0x16B4000016B44, + 0x16B5000016B5A, + 0x16B6300016B78, + 0x16B7D00016B90, + 0x16D4000016D6D, + 0x16D7000016D7A, + 0x16E6000016E80, + 0x16F0000016F4B, + 0x16F4F00016F88, + 0x16F8F00016FA0, + 0x16FE000016FE2, + 0x16FE300016FE5, + 0x16FF000016FF2, + 0x17000000187F8, + 0x1880000018CD6, + 0x18CFF00018D09, + 0x1AFF00001AFF4, + 0x1AFF50001AFFC, + 0x1AFFD0001AFFF, + 0x1B0000001B123, + 0x1B1320001B133, + 0x1B1500001B153, + 0x1B1550001B156, + 0x1B1640001B168, + 0x1B1700001B2FC, + 0x1BC000001BC6B, + 0x1BC700001BC7D, + 0x1BC800001BC89, + 0x1BC900001BC9A, + 0x1BC9D0001BC9F, + 0x1CCF00001CCFA, + 0x1CF000001CF2E, + 0x1CF300001CF47, + 0x1DA000001DA37, + 0x1DA3B0001DA6D, + 0x1DA750001DA76, + 0x1DA840001DA85, + 0x1DA9B0001DAA0, + 0x1DAA10001DAB0, + 0x1DF000001DF1F, + 0x1DF250001DF2B, + 0x1E0000001E007, + 0x1E0080001E019, + 0x1E01B0001E022, + 0x1E0230001E025, + 0x1E0260001E02B, + 0x1E08F0001E090, + 0x1E1000001E12D, + 0x1E1300001E13E, + 0x1E1400001E14A, + 0x1E14E0001E14F, + 0x1E2900001E2AF, + 0x1E2C00001E2FA, + 0x1E4D00001E4FA, + 0x1E5D00001E5FB, + 0x1E7E00001E7E7, + 0x1E7E80001E7EC, + 0x1E7ED0001E7EF, + 0x1E7F00001E7FF, + 0x1E8000001E8C5, + 0x1E8D00001E8D7, + 0x1E9220001E94C, + 0x1E9500001E95A, + 0x200000002A6E0, + 0x2A7000002B73A, + 0x2B7400002B81E, + 0x2B8200002CEA2, + 0x2CEB00002EBE1, + 0x2EBF00002EE5E, + 0x300000003134B, + 0x31350000323B0, + ), + "CONTEXTJ": (0x200C0000200E,), + "CONTEXTO": ( + 0xB7000000B8, + 0x37500000376, + 0x5F3000005F5, + 0x6600000066A, + 0x6F0000006FA, + 0x30FB000030FC, + ), +} diff --git a/py311/lib/python3.11/site-packages/idna/intranges.py b/py311/lib/python3.11/site-packages/idna/intranges.py new file mode 100644 index 0000000000000000000000000000000000000000..7bfaa8d80d7dc471d572db0f949460901126e8bd --- /dev/null +++ b/py311/lib/python3.11/site-packages/idna/intranges.py @@ -0,0 +1,57 @@ +""" +Given a list of integers, made up of (hopefully) a small number of long runs +of consecutive integers, compute a representation of the form +((start1, end1), (start2, end2) ...). Then answer the question "was x present +in the original list?" in time O(log(# runs)). +""" + +import bisect +from typing import List, Tuple + + +def intranges_from_list(list_: List[int]) -> Tuple[int, ...]: + """Represent a list of integers as a sequence of ranges: + ((start_0, end_0), (start_1, end_1), ...), such that the original + integers are exactly those x such that start_i <= x < end_i for some i. + + Ranges are encoded as single integers (start << 32 | end), not as tuples. + """ + + sorted_list = sorted(list_) + ranges = [] + last_write = -1 + for i in range(len(sorted_list)): + if i + 1 < len(sorted_list): + if sorted_list[i] == sorted_list[i + 1] - 1: + continue + current_range = sorted_list[last_write + 1 : i + 1] + ranges.append(_encode_range(current_range[0], current_range[-1] + 1)) + last_write = i + + return tuple(ranges) + + +def _encode_range(start: int, end: int) -> int: + return (start << 32) | end + + +def _decode_range(r: int) -> Tuple[int, int]: + return (r >> 32), (r & ((1 << 32) - 1)) + + +def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool: + """Determine if `int_` falls into one of the ranges in `ranges`.""" + tuple_ = _encode_range(int_, 0) + pos = bisect.bisect_left(ranges, tuple_) + # we could be immediately ahead of a tuple (start, end) + # with start < int_ <= end + if pos > 0: + left, right = _decode_range(ranges[pos - 1]) + if left <= int_ < right: + return True + # or we could be immediately behind a tuple (int_, end) + if pos < len(ranges): + left, _ = _decode_range(ranges[pos]) + if left == int_: + return True + return False diff --git a/py311/lib/python3.11/site-packages/idna/package_data.py b/py311/lib/python3.11/site-packages/idna/package_data.py new file mode 100644 index 0000000000000000000000000000000000000000..7272c8d92364886c51fefd22837ed5ceab145606 --- /dev/null +++ b/py311/lib/python3.11/site-packages/idna/package_data.py @@ -0,0 +1 @@ +__version__ = "3.11" diff --git a/py311/lib/python3.11/site-packages/idna/py.typed b/py311/lib/python3.11/site-packages/idna/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/idna/uts46data.py b/py311/lib/python3.11/site-packages/idna/uts46data.py new file mode 100644 index 0000000000000000000000000000000000000000..4610b71dad9196838d4e1e04e76d5e7c9baf8cd9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/idna/uts46data.py @@ -0,0 +1,8841 @@ +# This file is automatically generated by tools/idna-data +# vim: set fileencoding=utf-8 : + +from typing import List, Tuple, Union + +"""IDNA Mapping Table from UTS46.""" + + +__version__ = "16.0.0" + + +def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x0, "V"), + (0x1, "V"), + (0x2, "V"), + (0x3, "V"), + (0x4, "V"), + (0x5, "V"), + (0x6, "V"), + (0x7, "V"), + (0x8, "V"), + (0x9, "V"), + (0xA, "V"), + (0xB, "V"), + (0xC, "V"), + (0xD, "V"), + (0xE, "V"), + (0xF, "V"), + (0x10, "V"), + (0x11, "V"), + (0x12, "V"), + (0x13, "V"), + (0x14, "V"), + (0x15, "V"), + (0x16, "V"), + (0x17, "V"), + (0x18, "V"), + (0x19, "V"), + (0x1A, "V"), + (0x1B, "V"), + (0x1C, "V"), + (0x1D, "V"), + (0x1E, "V"), + (0x1F, "V"), + (0x20, "V"), + (0x21, "V"), + (0x22, "V"), + (0x23, "V"), + (0x24, "V"), + (0x25, "V"), + (0x26, "V"), + (0x27, "V"), + (0x28, "V"), + (0x29, "V"), + (0x2A, "V"), + (0x2B, "V"), + (0x2C, "V"), + (0x2D, "V"), + (0x2E, "V"), + (0x2F, "V"), + (0x30, "V"), + (0x31, "V"), + (0x32, "V"), + (0x33, "V"), + (0x34, "V"), + (0x35, "V"), + (0x36, "V"), + (0x37, "V"), + (0x38, "V"), + (0x39, "V"), + (0x3A, "V"), + (0x3B, "V"), + (0x3C, "V"), + (0x3D, "V"), + (0x3E, "V"), + (0x3F, "V"), + (0x40, "V"), + (0x41, "M", "a"), + (0x42, "M", "b"), + (0x43, "M", "c"), + (0x44, "M", "d"), + (0x45, "M", "e"), + (0x46, "M", "f"), + (0x47, "M", "g"), + (0x48, "M", "h"), + (0x49, "M", "i"), + (0x4A, "M", "j"), + (0x4B, "M", "k"), + (0x4C, "M", "l"), + (0x4D, "M", "m"), + (0x4E, "M", "n"), + (0x4F, "M", "o"), + (0x50, "M", "p"), + (0x51, "M", "q"), + (0x52, "M", "r"), + (0x53, "M", "s"), + (0x54, "M", "t"), + (0x55, "M", "u"), + (0x56, "M", "v"), + (0x57, "M", "w"), + (0x58, "M", "x"), + (0x59, "M", "y"), + (0x5A, "M", "z"), + (0x5B, "V"), + (0x5C, "V"), + (0x5D, "V"), + (0x5E, "V"), + (0x5F, "V"), + (0x60, "V"), + (0x61, "V"), + (0x62, "V"), + (0x63, "V"), + ] + + +def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x64, "V"), + (0x65, "V"), + (0x66, "V"), + (0x67, "V"), + (0x68, "V"), + (0x69, "V"), + (0x6A, "V"), + (0x6B, "V"), + (0x6C, "V"), + (0x6D, "V"), + (0x6E, "V"), + (0x6F, "V"), + (0x70, "V"), + (0x71, "V"), + (0x72, "V"), + (0x73, "V"), + (0x74, "V"), + (0x75, "V"), + (0x76, "V"), + (0x77, "V"), + (0x78, "V"), + (0x79, "V"), + (0x7A, "V"), + (0x7B, "V"), + (0x7C, "V"), + (0x7D, "V"), + (0x7E, "V"), + (0x7F, "V"), + (0x80, "X"), + (0x81, "X"), + (0x82, "X"), + (0x83, "X"), + (0x84, "X"), + (0x85, "X"), + (0x86, "X"), + (0x87, "X"), + (0x88, "X"), + (0x89, "X"), + (0x8A, "X"), + (0x8B, "X"), + (0x8C, "X"), + (0x8D, "X"), + (0x8E, "X"), + (0x8F, "X"), + (0x90, "X"), + (0x91, "X"), + (0x92, "X"), + (0x93, "X"), + (0x94, "X"), + (0x95, "X"), + (0x96, "X"), + (0x97, "X"), + (0x98, "X"), + (0x99, "X"), + (0x9A, "X"), + (0x9B, "X"), + (0x9C, "X"), + (0x9D, "X"), + (0x9E, "X"), + (0x9F, "X"), + (0xA0, "M", " "), + (0xA1, "V"), + (0xA2, "V"), + (0xA3, "V"), + (0xA4, "V"), + (0xA5, "V"), + (0xA6, "V"), + (0xA7, "V"), + (0xA8, "M", " ̈"), + (0xA9, "V"), + (0xAA, "M", "a"), + (0xAB, "V"), + (0xAC, "V"), + (0xAD, "I"), + (0xAE, "V"), + (0xAF, "M", " ̄"), + (0xB0, "V"), + (0xB1, "V"), + (0xB2, "M", "2"), + (0xB3, "M", "3"), + (0xB4, "M", " ́"), + (0xB5, "M", "μ"), + (0xB6, "V"), + (0xB7, "V"), + (0xB8, "M", " ̧"), + (0xB9, "M", "1"), + (0xBA, "M", "o"), + (0xBB, "V"), + (0xBC, "M", "1⁄4"), + (0xBD, "M", "1⁄2"), + (0xBE, "M", "3⁄4"), + (0xBF, "V"), + (0xC0, "M", "à"), + (0xC1, "M", "á"), + (0xC2, "M", "â"), + (0xC3, "M", "ã"), + (0xC4, "M", "ä"), + (0xC5, "M", "å"), + (0xC6, "M", "æ"), + (0xC7, "M", "ç"), + ] + + +def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xC8, "M", "è"), + (0xC9, "M", "é"), + (0xCA, "M", "ê"), + (0xCB, "M", "ë"), + (0xCC, "M", "ì"), + (0xCD, "M", "í"), + (0xCE, "M", "î"), + (0xCF, "M", "ï"), + (0xD0, "M", "ð"), + (0xD1, "M", "ñ"), + (0xD2, "M", "ò"), + (0xD3, "M", "ó"), + (0xD4, "M", "ô"), + (0xD5, "M", "õ"), + (0xD6, "M", "ö"), + (0xD7, "V"), + (0xD8, "M", "ø"), + (0xD9, "M", "ù"), + (0xDA, "M", "ú"), + (0xDB, "M", "û"), + (0xDC, "M", "ü"), + (0xDD, "M", "ý"), + (0xDE, "M", "þ"), + (0xDF, "D", "ss"), + (0xE0, "V"), + (0xE1, "V"), + (0xE2, "V"), + (0xE3, "V"), + (0xE4, "V"), + (0xE5, "V"), + (0xE6, "V"), + (0xE7, "V"), + (0xE8, "V"), + (0xE9, "V"), + (0xEA, "V"), + (0xEB, "V"), + (0xEC, "V"), + (0xED, "V"), + (0xEE, "V"), + (0xEF, "V"), + (0xF0, "V"), + (0xF1, "V"), + (0xF2, "V"), + (0xF3, "V"), + (0xF4, "V"), + (0xF5, "V"), + (0xF6, "V"), + (0xF7, "V"), + (0xF8, "V"), + (0xF9, "V"), + (0xFA, "V"), + (0xFB, "V"), + (0xFC, "V"), + (0xFD, "V"), + (0xFE, "V"), + (0xFF, "V"), + (0x100, "M", "ā"), + (0x101, "V"), + (0x102, "M", "ă"), + (0x103, "V"), + (0x104, "M", "ą"), + (0x105, "V"), + (0x106, "M", "ć"), + (0x107, "V"), + (0x108, "M", "ĉ"), + (0x109, "V"), + (0x10A, "M", "ċ"), + (0x10B, "V"), + (0x10C, "M", "č"), + (0x10D, "V"), + (0x10E, "M", "ď"), + (0x10F, "V"), + (0x110, "M", "đ"), + (0x111, "V"), + (0x112, "M", "ē"), + (0x113, "V"), + (0x114, "M", "ĕ"), + (0x115, "V"), + (0x116, "M", "ė"), + (0x117, "V"), + (0x118, "M", "ę"), + (0x119, "V"), + (0x11A, "M", "ě"), + (0x11B, "V"), + (0x11C, "M", "ĝ"), + (0x11D, "V"), + (0x11E, "M", "ğ"), + (0x11F, "V"), + (0x120, "M", "ġ"), + (0x121, "V"), + (0x122, "M", "ģ"), + (0x123, "V"), + (0x124, "M", "ĥ"), + (0x125, "V"), + (0x126, "M", "ħ"), + (0x127, "V"), + (0x128, "M", "ĩ"), + (0x129, "V"), + (0x12A, "M", "ī"), + (0x12B, "V"), + ] + + +def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x12C, "M", "ĭ"), + (0x12D, "V"), + (0x12E, "M", "į"), + (0x12F, "V"), + (0x130, "M", "i̇"), + (0x131, "V"), + (0x132, "M", "ij"), + (0x134, "M", "ĵ"), + (0x135, "V"), + (0x136, "M", "ķ"), + (0x137, "V"), + (0x139, "M", "ĺ"), + (0x13A, "V"), + (0x13B, "M", "ļ"), + (0x13C, "V"), + (0x13D, "M", "ľ"), + (0x13E, "V"), + (0x13F, "M", "l·"), + (0x141, "M", "ł"), + (0x142, "V"), + (0x143, "M", "ń"), + (0x144, "V"), + (0x145, "M", "ņ"), + (0x146, "V"), + (0x147, "M", "ň"), + (0x148, "V"), + (0x149, "M", "ʼn"), + (0x14A, "M", "ŋ"), + (0x14B, "V"), + (0x14C, "M", "ō"), + (0x14D, "V"), + (0x14E, "M", "ŏ"), + (0x14F, "V"), + (0x150, "M", "ő"), + (0x151, "V"), + (0x152, "M", "œ"), + (0x153, "V"), + (0x154, "M", "ŕ"), + (0x155, "V"), + (0x156, "M", "ŗ"), + (0x157, "V"), + (0x158, "M", "ř"), + (0x159, "V"), + (0x15A, "M", "ś"), + (0x15B, "V"), + (0x15C, "M", "ŝ"), + (0x15D, "V"), + (0x15E, "M", "ş"), + (0x15F, "V"), + (0x160, "M", "š"), + (0x161, "V"), + (0x162, "M", "ţ"), + (0x163, "V"), + (0x164, "M", "ť"), + (0x165, "V"), + (0x166, "M", "ŧ"), + (0x167, "V"), + (0x168, "M", "ũ"), + (0x169, "V"), + (0x16A, "M", "ū"), + (0x16B, "V"), + (0x16C, "M", "ŭ"), + (0x16D, "V"), + (0x16E, "M", "ů"), + (0x16F, "V"), + (0x170, "M", "ű"), + (0x171, "V"), + (0x172, "M", "ų"), + (0x173, "V"), + (0x174, "M", "ŵ"), + (0x175, "V"), + (0x176, "M", "ŷ"), + (0x177, "V"), + (0x178, "M", "ÿ"), + (0x179, "M", "ź"), + (0x17A, "V"), + (0x17B, "M", "ż"), + (0x17C, "V"), + (0x17D, "M", "ž"), + (0x17E, "V"), + (0x17F, "M", "s"), + (0x180, "V"), + (0x181, "M", "ɓ"), + (0x182, "M", "ƃ"), + (0x183, "V"), + (0x184, "M", "ƅ"), + (0x185, "V"), + (0x186, "M", "ɔ"), + (0x187, "M", "ƈ"), + (0x188, "V"), + (0x189, "M", "ɖ"), + (0x18A, "M", "ɗ"), + (0x18B, "M", "ƌ"), + (0x18C, "V"), + (0x18E, "M", "ǝ"), + (0x18F, "M", "ə"), + (0x190, "M", "ɛ"), + (0x191, "M", "ƒ"), + (0x192, "V"), + (0x193, "M", "ɠ"), + ] + + +def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x194, "M", "ɣ"), + (0x195, "V"), + (0x196, "M", "ɩ"), + (0x197, "M", "ɨ"), + (0x198, "M", "ƙ"), + (0x199, "V"), + (0x19C, "M", "ɯ"), + (0x19D, "M", "ɲ"), + (0x19E, "V"), + (0x19F, "M", "ɵ"), + (0x1A0, "M", "ơ"), + (0x1A1, "V"), + (0x1A2, "M", "ƣ"), + (0x1A3, "V"), + (0x1A4, "M", "ƥ"), + (0x1A5, "V"), + (0x1A6, "M", "ʀ"), + (0x1A7, "M", "ƨ"), + (0x1A8, "V"), + (0x1A9, "M", "ʃ"), + (0x1AA, "V"), + (0x1AC, "M", "ƭ"), + (0x1AD, "V"), + (0x1AE, "M", "ʈ"), + (0x1AF, "M", "ư"), + (0x1B0, "V"), + (0x1B1, "M", "ʊ"), + (0x1B2, "M", "ʋ"), + (0x1B3, "M", "ƴ"), + (0x1B4, "V"), + (0x1B5, "M", "ƶ"), + (0x1B6, "V"), + (0x1B7, "M", "ʒ"), + (0x1B8, "M", "ƹ"), + (0x1B9, "V"), + (0x1BC, "M", "ƽ"), + (0x1BD, "V"), + (0x1C4, "M", "dž"), + (0x1C7, "M", "lj"), + (0x1CA, "M", "nj"), + (0x1CD, "M", "ǎ"), + (0x1CE, "V"), + (0x1CF, "M", "ǐ"), + (0x1D0, "V"), + (0x1D1, "M", "ǒ"), + (0x1D2, "V"), + (0x1D3, "M", "ǔ"), + (0x1D4, "V"), + (0x1D5, "M", "ǖ"), + (0x1D6, "V"), + (0x1D7, "M", "ǘ"), + (0x1D8, "V"), + (0x1D9, "M", "ǚ"), + (0x1DA, "V"), + (0x1DB, "M", "ǜ"), + (0x1DC, "V"), + (0x1DE, "M", "ǟ"), + (0x1DF, "V"), + (0x1E0, "M", "ǡ"), + (0x1E1, "V"), + (0x1E2, "M", "ǣ"), + (0x1E3, "V"), + (0x1E4, "M", "ǥ"), + (0x1E5, "V"), + (0x1E6, "M", "ǧ"), + (0x1E7, "V"), + (0x1E8, "M", "ǩ"), + (0x1E9, "V"), + (0x1EA, "M", "ǫ"), + (0x1EB, "V"), + (0x1EC, "M", "ǭ"), + (0x1ED, "V"), + (0x1EE, "M", "ǯ"), + (0x1EF, "V"), + (0x1F1, "M", "dz"), + (0x1F4, "M", "ǵ"), + (0x1F5, "V"), + (0x1F6, "M", "ƕ"), + (0x1F7, "M", "ƿ"), + (0x1F8, "M", "ǹ"), + (0x1F9, "V"), + (0x1FA, "M", "ǻ"), + (0x1FB, "V"), + (0x1FC, "M", "ǽ"), + (0x1FD, "V"), + (0x1FE, "M", "ǿ"), + (0x1FF, "V"), + (0x200, "M", "ȁ"), + (0x201, "V"), + (0x202, "M", "ȃ"), + (0x203, "V"), + (0x204, "M", "ȅ"), + (0x205, "V"), + (0x206, "M", "ȇ"), + (0x207, "V"), + (0x208, "M", "ȉ"), + (0x209, "V"), + (0x20A, "M", "ȋ"), + (0x20B, "V"), + (0x20C, "M", "ȍ"), + ] + + +def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x20D, "V"), + (0x20E, "M", "ȏ"), + (0x20F, "V"), + (0x210, "M", "ȑ"), + (0x211, "V"), + (0x212, "M", "ȓ"), + (0x213, "V"), + (0x214, "M", "ȕ"), + (0x215, "V"), + (0x216, "M", "ȗ"), + (0x217, "V"), + (0x218, "M", "ș"), + (0x219, "V"), + (0x21A, "M", "ț"), + (0x21B, "V"), + (0x21C, "M", "ȝ"), + (0x21D, "V"), + (0x21E, "M", "ȟ"), + (0x21F, "V"), + (0x220, "M", "ƞ"), + (0x221, "V"), + (0x222, "M", "ȣ"), + (0x223, "V"), + (0x224, "M", "ȥ"), + (0x225, "V"), + (0x226, "M", "ȧ"), + (0x227, "V"), + (0x228, "M", "ȩ"), + (0x229, "V"), + (0x22A, "M", "ȫ"), + (0x22B, "V"), + (0x22C, "M", "ȭ"), + (0x22D, "V"), + (0x22E, "M", "ȯ"), + (0x22F, "V"), + (0x230, "M", "ȱ"), + (0x231, "V"), + (0x232, "M", "ȳ"), + (0x233, "V"), + (0x23A, "M", "ⱥ"), + (0x23B, "M", "ȼ"), + (0x23C, "V"), + (0x23D, "M", "ƚ"), + (0x23E, "M", "ⱦ"), + (0x23F, "V"), + (0x241, "M", "ɂ"), + (0x242, "V"), + (0x243, "M", "ƀ"), + (0x244, "M", "ʉ"), + (0x245, "M", "ʌ"), + (0x246, "M", "ɇ"), + (0x247, "V"), + (0x248, "M", "ɉ"), + (0x249, "V"), + (0x24A, "M", "ɋ"), + (0x24B, "V"), + (0x24C, "M", "ɍ"), + (0x24D, "V"), + (0x24E, "M", "ɏ"), + (0x24F, "V"), + (0x2B0, "M", "h"), + (0x2B1, "M", "ɦ"), + (0x2B2, "M", "j"), + (0x2B3, "M", "r"), + (0x2B4, "M", "ɹ"), + (0x2B5, "M", "ɻ"), + (0x2B6, "M", "ʁ"), + (0x2B7, "M", "w"), + (0x2B8, "M", "y"), + (0x2B9, "V"), + (0x2D8, "M", " ̆"), + (0x2D9, "M", " ̇"), + (0x2DA, "M", " ̊"), + (0x2DB, "M", " ̨"), + (0x2DC, "M", " ̃"), + (0x2DD, "M", " ̋"), + (0x2DE, "V"), + (0x2E0, "M", "ɣ"), + (0x2E1, "M", "l"), + (0x2E2, "M", "s"), + (0x2E3, "M", "x"), + (0x2E4, "M", "ʕ"), + (0x2E5, "V"), + (0x340, "M", "̀"), + (0x341, "M", "́"), + (0x342, "V"), + (0x343, "M", "̓"), + (0x344, "M", "̈́"), + (0x345, "M", "ι"), + (0x346, "V"), + (0x34F, "I"), + (0x350, "V"), + (0x370, "M", "ͱ"), + (0x371, "V"), + (0x372, "M", "ͳ"), + (0x373, "V"), + (0x374, "M", "ʹ"), + (0x375, "V"), + (0x376, "M", "ͷ"), + (0x377, "V"), + ] + + +def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x378, "X"), + (0x37A, "M", " ι"), + (0x37B, "V"), + (0x37E, "M", ";"), + (0x37F, "M", "ϳ"), + (0x380, "X"), + (0x384, "M", " ́"), + (0x385, "M", " ̈́"), + (0x386, "M", "ά"), + (0x387, "M", "·"), + (0x388, "M", "έ"), + (0x389, "M", "ή"), + (0x38A, "M", "ί"), + (0x38B, "X"), + (0x38C, "M", "ό"), + (0x38D, "X"), + (0x38E, "M", "ύ"), + (0x38F, "M", "ώ"), + (0x390, "V"), + (0x391, "M", "α"), + (0x392, "M", "β"), + (0x393, "M", "γ"), + (0x394, "M", "δ"), + (0x395, "M", "ε"), + (0x396, "M", "ζ"), + (0x397, "M", "η"), + (0x398, "M", "θ"), + (0x399, "M", "ι"), + (0x39A, "M", "κ"), + (0x39B, "M", "λ"), + (0x39C, "M", "μ"), + (0x39D, "M", "ν"), + (0x39E, "M", "ξ"), + (0x39F, "M", "ο"), + (0x3A0, "M", "π"), + (0x3A1, "M", "ρ"), + (0x3A2, "X"), + (0x3A3, "M", "σ"), + (0x3A4, "M", "τ"), + (0x3A5, "M", "υ"), + (0x3A6, "M", "φ"), + (0x3A7, "M", "χ"), + (0x3A8, "M", "ψ"), + (0x3A9, "M", "ω"), + (0x3AA, "M", "ϊ"), + (0x3AB, "M", "ϋ"), + (0x3AC, "V"), + (0x3C2, "D", "σ"), + (0x3C3, "V"), + (0x3CF, "M", "ϗ"), + (0x3D0, "M", "β"), + (0x3D1, "M", "θ"), + (0x3D2, "M", "υ"), + (0x3D3, "M", "ύ"), + (0x3D4, "M", "ϋ"), + (0x3D5, "M", "φ"), + (0x3D6, "M", "π"), + (0x3D7, "V"), + (0x3D8, "M", "ϙ"), + (0x3D9, "V"), + (0x3DA, "M", "ϛ"), + (0x3DB, "V"), + (0x3DC, "M", "ϝ"), + (0x3DD, "V"), + (0x3DE, "M", "ϟ"), + (0x3DF, "V"), + (0x3E0, "M", "ϡ"), + (0x3E1, "V"), + (0x3E2, "M", "ϣ"), + (0x3E3, "V"), + (0x3E4, "M", "ϥ"), + (0x3E5, "V"), + (0x3E6, "M", "ϧ"), + (0x3E7, "V"), + (0x3E8, "M", "ϩ"), + (0x3E9, "V"), + (0x3EA, "M", "ϫ"), + (0x3EB, "V"), + (0x3EC, "M", "ϭ"), + (0x3ED, "V"), + (0x3EE, "M", "ϯ"), + (0x3EF, "V"), + (0x3F0, "M", "κ"), + (0x3F1, "M", "ρ"), + (0x3F2, "M", "σ"), + (0x3F3, "V"), + (0x3F4, "M", "θ"), + (0x3F5, "M", "ε"), + (0x3F6, "V"), + (0x3F7, "M", "ϸ"), + (0x3F8, "V"), + (0x3F9, "M", "σ"), + (0x3FA, "M", "ϻ"), + (0x3FB, "V"), + (0x3FD, "M", "ͻ"), + (0x3FE, "M", "ͼ"), + (0x3FF, "M", "ͽ"), + (0x400, "M", "ѐ"), + (0x401, "M", "ё"), + (0x402, "M", "ђ"), + ] + + +def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x403, "M", "ѓ"), + (0x404, "M", "є"), + (0x405, "M", "ѕ"), + (0x406, "M", "і"), + (0x407, "M", "ї"), + (0x408, "M", "ј"), + (0x409, "M", "љ"), + (0x40A, "M", "њ"), + (0x40B, "M", "ћ"), + (0x40C, "M", "ќ"), + (0x40D, "M", "ѝ"), + (0x40E, "M", "ў"), + (0x40F, "M", "џ"), + (0x410, "M", "а"), + (0x411, "M", "б"), + (0x412, "M", "в"), + (0x413, "M", "г"), + (0x414, "M", "д"), + (0x415, "M", "е"), + (0x416, "M", "ж"), + (0x417, "M", "з"), + (0x418, "M", "и"), + (0x419, "M", "й"), + (0x41A, "M", "к"), + (0x41B, "M", "л"), + (0x41C, "M", "м"), + (0x41D, "M", "н"), + (0x41E, "M", "о"), + (0x41F, "M", "п"), + (0x420, "M", "р"), + (0x421, "M", "с"), + (0x422, "M", "т"), + (0x423, "M", "у"), + (0x424, "M", "ф"), + (0x425, "M", "х"), + (0x426, "M", "ц"), + (0x427, "M", "ч"), + (0x428, "M", "ш"), + (0x429, "M", "щ"), + (0x42A, "M", "ъ"), + (0x42B, "M", "ы"), + (0x42C, "M", "ь"), + (0x42D, "M", "э"), + (0x42E, "M", "ю"), + (0x42F, "M", "я"), + (0x430, "V"), + (0x460, "M", "ѡ"), + (0x461, "V"), + (0x462, "M", "ѣ"), + (0x463, "V"), + (0x464, "M", "ѥ"), + (0x465, "V"), + (0x466, "M", "ѧ"), + (0x467, "V"), + (0x468, "M", "ѩ"), + (0x469, "V"), + (0x46A, "M", "ѫ"), + (0x46B, "V"), + (0x46C, "M", "ѭ"), + (0x46D, "V"), + (0x46E, "M", "ѯ"), + (0x46F, "V"), + (0x470, "M", "ѱ"), + (0x471, "V"), + (0x472, "M", "ѳ"), + (0x473, "V"), + (0x474, "M", "ѵ"), + (0x475, "V"), + (0x476, "M", "ѷ"), + (0x477, "V"), + (0x478, "M", "ѹ"), + (0x479, "V"), + (0x47A, "M", "ѻ"), + (0x47B, "V"), + (0x47C, "M", "ѽ"), + (0x47D, "V"), + (0x47E, "M", "ѿ"), + (0x47F, "V"), + (0x480, "M", "ҁ"), + (0x481, "V"), + (0x48A, "M", "ҋ"), + (0x48B, "V"), + (0x48C, "M", "ҍ"), + (0x48D, "V"), + (0x48E, "M", "ҏ"), + (0x48F, "V"), + (0x490, "M", "ґ"), + (0x491, "V"), + (0x492, "M", "ғ"), + (0x493, "V"), + (0x494, "M", "ҕ"), + (0x495, "V"), + (0x496, "M", "җ"), + (0x497, "V"), + (0x498, "M", "ҙ"), + (0x499, "V"), + (0x49A, "M", "қ"), + (0x49B, "V"), + (0x49C, "M", "ҝ"), + (0x49D, "V"), + ] + + +def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x49E, "M", "ҟ"), + (0x49F, "V"), + (0x4A0, "M", "ҡ"), + (0x4A1, "V"), + (0x4A2, "M", "ң"), + (0x4A3, "V"), + (0x4A4, "M", "ҥ"), + (0x4A5, "V"), + (0x4A6, "M", "ҧ"), + (0x4A7, "V"), + (0x4A8, "M", "ҩ"), + (0x4A9, "V"), + (0x4AA, "M", "ҫ"), + (0x4AB, "V"), + (0x4AC, "M", "ҭ"), + (0x4AD, "V"), + (0x4AE, "M", "ү"), + (0x4AF, "V"), + (0x4B0, "M", "ұ"), + (0x4B1, "V"), + (0x4B2, "M", "ҳ"), + (0x4B3, "V"), + (0x4B4, "M", "ҵ"), + (0x4B5, "V"), + (0x4B6, "M", "ҷ"), + (0x4B7, "V"), + (0x4B8, "M", "ҹ"), + (0x4B9, "V"), + (0x4BA, "M", "һ"), + (0x4BB, "V"), + (0x4BC, "M", "ҽ"), + (0x4BD, "V"), + (0x4BE, "M", "ҿ"), + (0x4BF, "V"), + (0x4C0, "M", "ӏ"), + (0x4C1, "M", "ӂ"), + (0x4C2, "V"), + (0x4C3, "M", "ӄ"), + (0x4C4, "V"), + (0x4C5, "M", "ӆ"), + (0x4C6, "V"), + (0x4C7, "M", "ӈ"), + (0x4C8, "V"), + (0x4C9, "M", "ӊ"), + (0x4CA, "V"), + (0x4CB, "M", "ӌ"), + (0x4CC, "V"), + (0x4CD, "M", "ӎ"), + (0x4CE, "V"), + (0x4D0, "M", "ӑ"), + (0x4D1, "V"), + (0x4D2, "M", "ӓ"), + (0x4D3, "V"), + (0x4D4, "M", "ӕ"), + (0x4D5, "V"), + (0x4D6, "M", "ӗ"), + (0x4D7, "V"), + (0x4D8, "M", "ә"), + (0x4D9, "V"), + (0x4DA, "M", "ӛ"), + (0x4DB, "V"), + (0x4DC, "M", "ӝ"), + (0x4DD, "V"), + (0x4DE, "M", "ӟ"), + (0x4DF, "V"), + (0x4E0, "M", "ӡ"), + (0x4E1, "V"), + (0x4E2, "M", "ӣ"), + (0x4E3, "V"), + (0x4E4, "M", "ӥ"), + (0x4E5, "V"), + (0x4E6, "M", "ӧ"), + (0x4E7, "V"), + (0x4E8, "M", "ө"), + (0x4E9, "V"), + (0x4EA, "M", "ӫ"), + (0x4EB, "V"), + (0x4EC, "M", "ӭ"), + (0x4ED, "V"), + (0x4EE, "M", "ӯ"), + (0x4EF, "V"), + (0x4F0, "M", "ӱ"), + (0x4F1, "V"), + (0x4F2, "M", "ӳ"), + (0x4F3, "V"), + (0x4F4, "M", "ӵ"), + (0x4F5, "V"), + (0x4F6, "M", "ӷ"), + (0x4F7, "V"), + (0x4F8, "M", "ӹ"), + (0x4F9, "V"), + (0x4FA, "M", "ӻ"), + (0x4FB, "V"), + (0x4FC, "M", "ӽ"), + (0x4FD, "V"), + (0x4FE, "M", "ӿ"), + (0x4FF, "V"), + (0x500, "M", "ԁ"), + (0x501, "V"), + (0x502, "M", "ԃ"), + ] + + +def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x503, "V"), + (0x504, "M", "ԅ"), + (0x505, "V"), + (0x506, "M", "ԇ"), + (0x507, "V"), + (0x508, "M", "ԉ"), + (0x509, "V"), + (0x50A, "M", "ԋ"), + (0x50B, "V"), + (0x50C, "M", "ԍ"), + (0x50D, "V"), + (0x50E, "M", "ԏ"), + (0x50F, "V"), + (0x510, "M", "ԑ"), + (0x511, "V"), + (0x512, "M", "ԓ"), + (0x513, "V"), + (0x514, "M", "ԕ"), + (0x515, "V"), + (0x516, "M", "ԗ"), + (0x517, "V"), + (0x518, "M", "ԙ"), + (0x519, "V"), + (0x51A, "M", "ԛ"), + (0x51B, "V"), + (0x51C, "M", "ԝ"), + (0x51D, "V"), + (0x51E, "M", "ԟ"), + (0x51F, "V"), + (0x520, "M", "ԡ"), + (0x521, "V"), + (0x522, "M", "ԣ"), + (0x523, "V"), + (0x524, "M", "ԥ"), + (0x525, "V"), + (0x526, "M", "ԧ"), + (0x527, "V"), + (0x528, "M", "ԩ"), + (0x529, "V"), + (0x52A, "M", "ԫ"), + (0x52B, "V"), + (0x52C, "M", "ԭ"), + (0x52D, "V"), + (0x52E, "M", "ԯ"), + (0x52F, "V"), + (0x530, "X"), + (0x531, "M", "ա"), + (0x532, "M", "բ"), + (0x533, "M", "գ"), + (0x534, "M", "դ"), + (0x535, "M", "ե"), + (0x536, "M", "զ"), + (0x537, "M", "է"), + (0x538, "M", "ը"), + (0x539, "M", "թ"), + (0x53A, "M", "ժ"), + (0x53B, "M", "ի"), + (0x53C, "M", "լ"), + (0x53D, "M", "խ"), + (0x53E, "M", "ծ"), + (0x53F, "M", "կ"), + (0x540, "M", "հ"), + (0x541, "M", "ձ"), + (0x542, "M", "ղ"), + (0x543, "M", "ճ"), + (0x544, "M", "մ"), + (0x545, "M", "յ"), + (0x546, "M", "ն"), + (0x547, "M", "շ"), + (0x548, "M", "ո"), + (0x549, "M", "չ"), + (0x54A, "M", "պ"), + (0x54B, "M", "ջ"), + (0x54C, "M", "ռ"), + (0x54D, "M", "ս"), + (0x54E, "M", "վ"), + (0x54F, "M", "տ"), + (0x550, "M", "ր"), + (0x551, "M", "ց"), + (0x552, "M", "ւ"), + (0x553, "M", "փ"), + (0x554, "M", "ք"), + (0x555, "M", "օ"), + (0x556, "M", "ֆ"), + (0x557, "X"), + (0x559, "V"), + (0x587, "M", "եւ"), + (0x588, "V"), + (0x58B, "X"), + (0x58D, "V"), + (0x590, "X"), + (0x591, "V"), + (0x5C8, "X"), + (0x5D0, "V"), + (0x5EB, "X"), + (0x5EF, "V"), + (0x5F5, "X"), + (0x606, "V"), + (0x61C, "X"), + (0x61D, "V"), + ] + + +def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x675, "M", "اٴ"), + (0x676, "M", "وٴ"), + (0x677, "M", "ۇٴ"), + (0x678, "M", "يٴ"), + (0x679, "V"), + (0x6DD, "X"), + (0x6DE, "V"), + (0x70E, "X"), + (0x710, "V"), + (0x74B, "X"), + (0x74D, "V"), + (0x7B2, "X"), + (0x7C0, "V"), + (0x7FB, "X"), + (0x7FD, "V"), + (0x82E, "X"), + (0x830, "V"), + (0x83F, "X"), + (0x840, "V"), + (0x85C, "X"), + (0x85E, "V"), + (0x85F, "X"), + (0x860, "V"), + (0x86B, "X"), + (0x870, "V"), + (0x88F, "X"), + (0x897, "V"), + (0x8E2, "X"), + (0x8E3, "V"), + (0x958, "M", "क़"), + (0x959, "M", "ख़"), + (0x95A, "M", "ग़"), + (0x95B, "M", "ज़"), + (0x95C, "M", "ड़"), + (0x95D, "M", "ढ़"), + (0x95E, "M", "फ़"), + (0x95F, "M", "य़"), + (0x960, "V"), + (0x984, "X"), + (0x985, "V"), + (0x98D, "X"), + (0x98F, "V"), + (0x991, "X"), + (0x993, "V"), + (0x9A9, "X"), + (0x9AA, "V"), + (0x9B1, "X"), + (0x9B2, "V"), + (0x9B3, "X"), + (0x9B6, "V"), + (0x9BA, "X"), + (0x9BC, "V"), + (0x9C5, "X"), + (0x9C7, "V"), + (0x9C9, "X"), + (0x9CB, "V"), + (0x9CF, "X"), + (0x9D7, "V"), + (0x9D8, "X"), + (0x9DC, "M", "ড়"), + (0x9DD, "M", "ঢ়"), + (0x9DE, "X"), + (0x9DF, "M", "য়"), + (0x9E0, "V"), + (0x9E4, "X"), + (0x9E6, "V"), + (0x9FF, "X"), + (0xA01, "V"), + (0xA04, "X"), + (0xA05, "V"), + (0xA0B, "X"), + (0xA0F, "V"), + (0xA11, "X"), + (0xA13, "V"), + (0xA29, "X"), + (0xA2A, "V"), + (0xA31, "X"), + (0xA32, "V"), + (0xA33, "M", "ਲ਼"), + (0xA34, "X"), + (0xA35, "V"), + (0xA36, "M", "ਸ਼"), + (0xA37, "X"), + (0xA38, "V"), + (0xA3A, "X"), + (0xA3C, "V"), + (0xA3D, "X"), + (0xA3E, "V"), + (0xA43, "X"), + (0xA47, "V"), + (0xA49, "X"), + (0xA4B, "V"), + (0xA4E, "X"), + (0xA51, "V"), + (0xA52, "X"), + (0xA59, "M", "ਖ਼"), + (0xA5A, "M", "ਗ਼"), + (0xA5B, "M", "ਜ਼"), + (0xA5C, "V"), + (0xA5D, "X"), + ] + + +def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA5E, "M", "ਫ਼"), + (0xA5F, "X"), + (0xA66, "V"), + (0xA77, "X"), + (0xA81, "V"), + (0xA84, "X"), + (0xA85, "V"), + (0xA8E, "X"), + (0xA8F, "V"), + (0xA92, "X"), + (0xA93, "V"), + (0xAA9, "X"), + (0xAAA, "V"), + (0xAB1, "X"), + (0xAB2, "V"), + (0xAB4, "X"), + (0xAB5, "V"), + (0xABA, "X"), + (0xABC, "V"), + (0xAC6, "X"), + (0xAC7, "V"), + (0xACA, "X"), + (0xACB, "V"), + (0xACE, "X"), + (0xAD0, "V"), + (0xAD1, "X"), + (0xAE0, "V"), + (0xAE4, "X"), + (0xAE6, "V"), + (0xAF2, "X"), + (0xAF9, "V"), + (0xB00, "X"), + (0xB01, "V"), + (0xB04, "X"), + (0xB05, "V"), + (0xB0D, "X"), + (0xB0F, "V"), + (0xB11, "X"), + (0xB13, "V"), + (0xB29, "X"), + (0xB2A, "V"), + (0xB31, "X"), + (0xB32, "V"), + (0xB34, "X"), + (0xB35, "V"), + (0xB3A, "X"), + (0xB3C, "V"), + (0xB45, "X"), + (0xB47, "V"), + (0xB49, "X"), + (0xB4B, "V"), + (0xB4E, "X"), + (0xB55, "V"), + (0xB58, "X"), + (0xB5C, "M", "ଡ଼"), + (0xB5D, "M", "ଢ଼"), + (0xB5E, "X"), + (0xB5F, "V"), + (0xB64, "X"), + (0xB66, "V"), + (0xB78, "X"), + (0xB82, "V"), + (0xB84, "X"), + (0xB85, "V"), + (0xB8B, "X"), + (0xB8E, "V"), + (0xB91, "X"), + (0xB92, "V"), + (0xB96, "X"), + (0xB99, "V"), + (0xB9B, "X"), + (0xB9C, "V"), + (0xB9D, "X"), + (0xB9E, "V"), + (0xBA0, "X"), + (0xBA3, "V"), + (0xBA5, "X"), + (0xBA8, "V"), + (0xBAB, "X"), + (0xBAE, "V"), + (0xBBA, "X"), + (0xBBE, "V"), + (0xBC3, "X"), + (0xBC6, "V"), + (0xBC9, "X"), + (0xBCA, "V"), + (0xBCE, "X"), + (0xBD0, "V"), + (0xBD1, "X"), + (0xBD7, "V"), + (0xBD8, "X"), + (0xBE6, "V"), + (0xBFB, "X"), + (0xC00, "V"), + (0xC0D, "X"), + (0xC0E, "V"), + (0xC11, "X"), + (0xC12, "V"), + (0xC29, "X"), + (0xC2A, "V"), + ] + + +def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xC3A, "X"), + (0xC3C, "V"), + (0xC45, "X"), + (0xC46, "V"), + (0xC49, "X"), + (0xC4A, "V"), + (0xC4E, "X"), + (0xC55, "V"), + (0xC57, "X"), + (0xC58, "V"), + (0xC5B, "X"), + (0xC5D, "V"), + (0xC5E, "X"), + (0xC60, "V"), + (0xC64, "X"), + (0xC66, "V"), + (0xC70, "X"), + (0xC77, "V"), + (0xC8D, "X"), + (0xC8E, "V"), + (0xC91, "X"), + (0xC92, "V"), + (0xCA9, "X"), + (0xCAA, "V"), + (0xCB4, "X"), + (0xCB5, "V"), + (0xCBA, "X"), + (0xCBC, "V"), + (0xCC5, "X"), + (0xCC6, "V"), + (0xCC9, "X"), + (0xCCA, "V"), + (0xCCE, "X"), + (0xCD5, "V"), + (0xCD7, "X"), + (0xCDD, "V"), + (0xCDF, "X"), + (0xCE0, "V"), + (0xCE4, "X"), + (0xCE6, "V"), + (0xCF0, "X"), + (0xCF1, "V"), + (0xCF4, "X"), + (0xD00, "V"), + (0xD0D, "X"), + (0xD0E, "V"), + (0xD11, "X"), + (0xD12, "V"), + (0xD45, "X"), + (0xD46, "V"), + (0xD49, "X"), + (0xD4A, "V"), + (0xD50, "X"), + (0xD54, "V"), + (0xD64, "X"), + (0xD66, "V"), + (0xD80, "X"), + (0xD81, "V"), + (0xD84, "X"), + (0xD85, "V"), + (0xD97, "X"), + (0xD9A, "V"), + (0xDB2, "X"), + (0xDB3, "V"), + (0xDBC, "X"), + (0xDBD, "V"), + (0xDBE, "X"), + (0xDC0, "V"), + (0xDC7, "X"), + (0xDCA, "V"), + (0xDCB, "X"), + (0xDCF, "V"), + (0xDD5, "X"), + (0xDD6, "V"), + (0xDD7, "X"), + (0xDD8, "V"), + (0xDE0, "X"), + (0xDE6, "V"), + (0xDF0, "X"), + (0xDF2, "V"), + (0xDF5, "X"), + (0xE01, "V"), + (0xE33, "M", "ํา"), + (0xE34, "V"), + (0xE3B, "X"), + (0xE3F, "V"), + (0xE5C, "X"), + (0xE81, "V"), + (0xE83, "X"), + (0xE84, "V"), + (0xE85, "X"), + (0xE86, "V"), + (0xE8B, "X"), + (0xE8C, "V"), + (0xEA4, "X"), + (0xEA5, "V"), + (0xEA6, "X"), + (0xEA7, "V"), + (0xEB3, "M", "ໍາ"), + (0xEB4, "V"), + ] + + +def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xEBE, "X"), + (0xEC0, "V"), + (0xEC5, "X"), + (0xEC6, "V"), + (0xEC7, "X"), + (0xEC8, "V"), + (0xECF, "X"), + (0xED0, "V"), + (0xEDA, "X"), + (0xEDC, "M", "ຫນ"), + (0xEDD, "M", "ຫມ"), + (0xEDE, "V"), + (0xEE0, "X"), + (0xF00, "V"), + (0xF0C, "M", "་"), + (0xF0D, "V"), + (0xF43, "M", "གྷ"), + (0xF44, "V"), + (0xF48, "X"), + (0xF49, "V"), + (0xF4D, "M", "ཌྷ"), + (0xF4E, "V"), + (0xF52, "M", "དྷ"), + (0xF53, "V"), + (0xF57, "M", "བྷ"), + (0xF58, "V"), + (0xF5C, "M", "ཛྷ"), + (0xF5D, "V"), + (0xF69, "M", "ཀྵ"), + (0xF6A, "V"), + (0xF6D, "X"), + (0xF71, "V"), + (0xF73, "M", "ཱི"), + (0xF74, "V"), + (0xF75, "M", "ཱུ"), + (0xF76, "M", "ྲྀ"), + (0xF77, "M", "ྲཱྀ"), + (0xF78, "M", "ླྀ"), + (0xF79, "M", "ླཱྀ"), + (0xF7A, "V"), + (0xF81, "M", "ཱྀ"), + (0xF82, "V"), + (0xF93, "M", "ྒྷ"), + (0xF94, "V"), + (0xF98, "X"), + (0xF99, "V"), + (0xF9D, "M", "ྜྷ"), + (0xF9E, "V"), + (0xFA2, "M", "ྡྷ"), + (0xFA3, "V"), + (0xFA7, "M", "ྦྷ"), + (0xFA8, "V"), + (0xFAC, "M", "ྫྷ"), + (0xFAD, "V"), + (0xFB9, "M", "ྐྵ"), + (0xFBA, "V"), + (0xFBD, "X"), + (0xFBE, "V"), + (0xFCD, "X"), + (0xFCE, "V"), + (0xFDB, "X"), + (0x1000, "V"), + (0x10A0, "M", "ⴀ"), + (0x10A1, "M", "ⴁ"), + (0x10A2, "M", "ⴂ"), + (0x10A3, "M", "ⴃ"), + (0x10A4, "M", "ⴄ"), + (0x10A5, "M", "ⴅ"), + (0x10A6, "M", "ⴆ"), + (0x10A7, "M", "ⴇ"), + (0x10A8, "M", "ⴈ"), + (0x10A9, "M", "ⴉ"), + (0x10AA, "M", "ⴊ"), + (0x10AB, "M", "ⴋ"), + (0x10AC, "M", "ⴌ"), + (0x10AD, "M", "ⴍ"), + (0x10AE, "M", "ⴎ"), + (0x10AF, "M", "ⴏ"), + (0x10B0, "M", "ⴐ"), + (0x10B1, "M", "ⴑ"), + (0x10B2, "M", "ⴒ"), + (0x10B3, "M", "ⴓ"), + (0x10B4, "M", "ⴔ"), + (0x10B5, "M", "ⴕ"), + (0x10B6, "M", "ⴖ"), + (0x10B7, "M", "ⴗ"), + (0x10B8, "M", "ⴘ"), + (0x10B9, "M", "ⴙ"), + (0x10BA, "M", "ⴚ"), + (0x10BB, "M", "ⴛ"), + (0x10BC, "M", "ⴜ"), + (0x10BD, "M", "ⴝ"), + (0x10BE, "M", "ⴞ"), + (0x10BF, "M", "ⴟ"), + (0x10C0, "M", "ⴠ"), + (0x10C1, "M", "ⴡ"), + (0x10C2, "M", "ⴢ"), + (0x10C3, "M", "ⴣ"), + (0x10C4, "M", "ⴤ"), + (0x10C5, "M", "ⴥ"), + ] + + +def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x10C6, "X"), + (0x10C7, "M", "ⴧ"), + (0x10C8, "X"), + (0x10CD, "M", "ⴭ"), + (0x10CE, "X"), + (0x10D0, "V"), + (0x10FC, "M", "ნ"), + (0x10FD, "V"), + (0x115F, "I"), + (0x1161, "V"), + (0x1249, "X"), + (0x124A, "V"), + (0x124E, "X"), + (0x1250, "V"), + (0x1257, "X"), + (0x1258, "V"), + (0x1259, "X"), + (0x125A, "V"), + (0x125E, "X"), + (0x1260, "V"), + (0x1289, "X"), + (0x128A, "V"), + (0x128E, "X"), + (0x1290, "V"), + (0x12B1, "X"), + (0x12B2, "V"), + (0x12B6, "X"), + (0x12B8, "V"), + (0x12BF, "X"), + (0x12C0, "V"), + (0x12C1, "X"), + (0x12C2, "V"), + (0x12C6, "X"), + (0x12C8, "V"), + (0x12D7, "X"), + (0x12D8, "V"), + (0x1311, "X"), + (0x1312, "V"), + (0x1316, "X"), + (0x1318, "V"), + (0x135B, "X"), + (0x135D, "V"), + (0x137D, "X"), + (0x1380, "V"), + (0x139A, "X"), + (0x13A0, "V"), + (0x13F6, "X"), + (0x13F8, "M", "Ᏸ"), + (0x13F9, "M", "Ᏹ"), + (0x13FA, "M", "Ᏺ"), + (0x13FB, "M", "Ᏻ"), + (0x13FC, "M", "Ᏼ"), + (0x13FD, "M", "Ᏽ"), + (0x13FE, "X"), + (0x1400, "V"), + (0x1680, "X"), + (0x1681, "V"), + (0x169D, "X"), + (0x16A0, "V"), + (0x16F9, "X"), + (0x1700, "V"), + (0x1716, "X"), + (0x171F, "V"), + (0x1737, "X"), + (0x1740, "V"), + (0x1754, "X"), + (0x1760, "V"), + (0x176D, "X"), + (0x176E, "V"), + (0x1771, "X"), + (0x1772, "V"), + (0x1774, "X"), + (0x1780, "V"), + (0x17B4, "I"), + (0x17B6, "V"), + (0x17DE, "X"), + (0x17E0, "V"), + (0x17EA, "X"), + (0x17F0, "V"), + (0x17FA, "X"), + (0x1800, "V"), + (0x180B, "I"), + (0x1810, "V"), + (0x181A, "X"), + (0x1820, "V"), + (0x1879, "X"), + (0x1880, "V"), + (0x18AB, "X"), + (0x18B0, "V"), + (0x18F6, "X"), + (0x1900, "V"), + (0x191F, "X"), + (0x1920, "V"), + (0x192C, "X"), + (0x1930, "V"), + (0x193C, "X"), + (0x1940, "V"), + (0x1941, "X"), + (0x1944, "V"), + (0x196E, "X"), + ] + + +def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1970, "V"), + (0x1975, "X"), + (0x1980, "V"), + (0x19AC, "X"), + (0x19B0, "V"), + (0x19CA, "X"), + (0x19D0, "V"), + (0x19DB, "X"), + (0x19DE, "V"), + (0x1A1C, "X"), + (0x1A1E, "V"), + (0x1A5F, "X"), + (0x1A60, "V"), + (0x1A7D, "X"), + (0x1A7F, "V"), + (0x1A8A, "X"), + (0x1A90, "V"), + (0x1A9A, "X"), + (0x1AA0, "V"), + (0x1AAE, "X"), + (0x1AB0, "V"), + (0x1ACF, "X"), + (0x1B00, "V"), + (0x1B4D, "X"), + (0x1B4E, "V"), + (0x1BF4, "X"), + (0x1BFC, "V"), + (0x1C38, "X"), + (0x1C3B, "V"), + (0x1C4A, "X"), + (0x1C4D, "V"), + (0x1C80, "M", "в"), + (0x1C81, "M", "д"), + (0x1C82, "M", "о"), + (0x1C83, "M", "с"), + (0x1C84, "M", "т"), + (0x1C86, "M", "ъ"), + (0x1C87, "M", "ѣ"), + (0x1C88, "M", "ꙋ"), + (0x1C89, "M", "ᲊ"), + (0x1C8A, "V"), + (0x1C8B, "X"), + (0x1C90, "M", "ა"), + (0x1C91, "M", "ბ"), + (0x1C92, "M", "გ"), + (0x1C93, "M", "დ"), + (0x1C94, "M", "ე"), + (0x1C95, "M", "ვ"), + (0x1C96, "M", "ზ"), + (0x1C97, "M", "თ"), + (0x1C98, "M", "ი"), + (0x1C99, "M", "კ"), + (0x1C9A, "M", "ლ"), + (0x1C9B, "M", "მ"), + (0x1C9C, "M", "ნ"), + (0x1C9D, "M", "ო"), + (0x1C9E, "M", "პ"), + (0x1C9F, "M", "ჟ"), + (0x1CA0, "M", "რ"), + (0x1CA1, "M", "ს"), + (0x1CA2, "M", "ტ"), + (0x1CA3, "M", "უ"), + (0x1CA4, "M", "ფ"), + (0x1CA5, "M", "ქ"), + (0x1CA6, "M", "ღ"), + (0x1CA7, "M", "ყ"), + (0x1CA8, "M", "შ"), + (0x1CA9, "M", "ჩ"), + (0x1CAA, "M", "ც"), + (0x1CAB, "M", "ძ"), + (0x1CAC, "M", "წ"), + (0x1CAD, "M", "ჭ"), + (0x1CAE, "M", "ხ"), + (0x1CAF, "M", "ჯ"), + (0x1CB0, "M", "ჰ"), + (0x1CB1, "M", "ჱ"), + (0x1CB2, "M", "ჲ"), + (0x1CB3, "M", "ჳ"), + (0x1CB4, "M", "ჴ"), + (0x1CB5, "M", "ჵ"), + (0x1CB6, "M", "ჶ"), + (0x1CB7, "M", "ჷ"), + (0x1CB8, "M", "ჸ"), + (0x1CB9, "M", "ჹ"), + (0x1CBA, "M", "ჺ"), + (0x1CBB, "X"), + (0x1CBD, "M", "ჽ"), + (0x1CBE, "M", "ჾ"), + (0x1CBF, "M", "ჿ"), + (0x1CC0, "V"), + (0x1CC8, "X"), + (0x1CD0, "V"), + (0x1CFB, "X"), + (0x1D00, "V"), + (0x1D2C, "M", "a"), + (0x1D2D, "M", "æ"), + (0x1D2E, "M", "b"), + (0x1D2F, "V"), + (0x1D30, "M", "d"), + (0x1D31, "M", "e"), + ] + + +def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D32, "M", "ǝ"), + (0x1D33, "M", "g"), + (0x1D34, "M", "h"), + (0x1D35, "M", "i"), + (0x1D36, "M", "j"), + (0x1D37, "M", "k"), + (0x1D38, "M", "l"), + (0x1D39, "M", "m"), + (0x1D3A, "M", "n"), + (0x1D3B, "V"), + (0x1D3C, "M", "o"), + (0x1D3D, "M", "ȣ"), + (0x1D3E, "M", "p"), + (0x1D3F, "M", "r"), + (0x1D40, "M", "t"), + (0x1D41, "M", "u"), + (0x1D42, "M", "w"), + (0x1D43, "M", "a"), + (0x1D44, "M", "ɐ"), + (0x1D45, "M", "ɑ"), + (0x1D46, "M", "ᴂ"), + (0x1D47, "M", "b"), + (0x1D48, "M", "d"), + (0x1D49, "M", "e"), + (0x1D4A, "M", "ə"), + (0x1D4B, "M", "ɛ"), + (0x1D4C, "M", "ɜ"), + (0x1D4D, "M", "g"), + (0x1D4E, "V"), + (0x1D4F, "M", "k"), + (0x1D50, "M", "m"), + (0x1D51, "M", "ŋ"), + (0x1D52, "M", "o"), + (0x1D53, "M", "ɔ"), + (0x1D54, "M", "ᴖ"), + (0x1D55, "M", "ᴗ"), + (0x1D56, "M", "p"), + (0x1D57, "M", "t"), + (0x1D58, "M", "u"), + (0x1D59, "M", "ᴝ"), + (0x1D5A, "M", "ɯ"), + (0x1D5B, "M", "v"), + (0x1D5C, "M", "ᴥ"), + (0x1D5D, "M", "β"), + (0x1D5E, "M", "γ"), + (0x1D5F, "M", "δ"), + (0x1D60, "M", "φ"), + (0x1D61, "M", "χ"), + (0x1D62, "M", "i"), + (0x1D63, "M", "r"), + (0x1D64, "M", "u"), + (0x1D65, "M", "v"), + (0x1D66, "M", "β"), + (0x1D67, "M", "γ"), + (0x1D68, "M", "ρ"), + (0x1D69, "M", "φ"), + (0x1D6A, "M", "χ"), + (0x1D6B, "V"), + (0x1D78, "M", "н"), + (0x1D79, "V"), + (0x1D9B, "M", "ɒ"), + (0x1D9C, "M", "c"), + (0x1D9D, "M", "ɕ"), + (0x1D9E, "M", "ð"), + (0x1D9F, "M", "ɜ"), + (0x1DA0, "M", "f"), + (0x1DA1, "M", "ɟ"), + (0x1DA2, "M", "ɡ"), + (0x1DA3, "M", "ɥ"), + (0x1DA4, "M", "ɨ"), + (0x1DA5, "M", "ɩ"), + (0x1DA6, "M", "ɪ"), + (0x1DA7, "M", "ᵻ"), + (0x1DA8, "M", "ʝ"), + (0x1DA9, "M", "ɭ"), + (0x1DAA, "M", "ᶅ"), + (0x1DAB, "M", "ʟ"), + (0x1DAC, "M", "ɱ"), + (0x1DAD, "M", "ɰ"), + (0x1DAE, "M", "ɲ"), + (0x1DAF, "M", "ɳ"), + (0x1DB0, "M", "ɴ"), + (0x1DB1, "M", "ɵ"), + (0x1DB2, "M", "ɸ"), + (0x1DB3, "M", "ʂ"), + (0x1DB4, "M", "ʃ"), + (0x1DB5, "M", "ƫ"), + (0x1DB6, "M", "ʉ"), + (0x1DB7, "M", "ʊ"), + (0x1DB8, "M", "ᴜ"), + (0x1DB9, "M", "ʋ"), + (0x1DBA, "M", "ʌ"), + (0x1DBB, "M", "z"), + (0x1DBC, "M", "ʐ"), + (0x1DBD, "M", "ʑ"), + (0x1DBE, "M", "ʒ"), + (0x1DBF, "M", "θ"), + (0x1DC0, "V"), + (0x1E00, "M", "ḁ"), + (0x1E01, "V"), + ] + + +def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E02, "M", "ḃ"), + (0x1E03, "V"), + (0x1E04, "M", "ḅ"), + (0x1E05, "V"), + (0x1E06, "M", "ḇ"), + (0x1E07, "V"), + (0x1E08, "M", "ḉ"), + (0x1E09, "V"), + (0x1E0A, "M", "ḋ"), + (0x1E0B, "V"), + (0x1E0C, "M", "ḍ"), + (0x1E0D, "V"), + (0x1E0E, "M", "ḏ"), + (0x1E0F, "V"), + (0x1E10, "M", "ḑ"), + (0x1E11, "V"), + (0x1E12, "M", "ḓ"), + (0x1E13, "V"), + (0x1E14, "M", "ḕ"), + (0x1E15, "V"), + (0x1E16, "M", "ḗ"), + (0x1E17, "V"), + (0x1E18, "M", "ḙ"), + (0x1E19, "V"), + (0x1E1A, "M", "ḛ"), + (0x1E1B, "V"), + (0x1E1C, "M", "ḝ"), + (0x1E1D, "V"), + (0x1E1E, "M", "ḟ"), + (0x1E1F, "V"), + (0x1E20, "M", "ḡ"), + (0x1E21, "V"), + (0x1E22, "M", "ḣ"), + (0x1E23, "V"), + (0x1E24, "M", "ḥ"), + (0x1E25, "V"), + (0x1E26, "M", "ḧ"), + (0x1E27, "V"), + (0x1E28, "M", "ḩ"), + (0x1E29, "V"), + (0x1E2A, "M", "ḫ"), + (0x1E2B, "V"), + (0x1E2C, "M", "ḭ"), + (0x1E2D, "V"), + (0x1E2E, "M", "ḯ"), + (0x1E2F, "V"), + (0x1E30, "M", "ḱ"), + (0x1E31, "V"), + (0x1E32, "M", "ḳ"), + (0x1E33, "V"), + (0x1E34, "M", "ḵ"), + (0x1E35, "V"), + (0x1E36, "M", "ḷ"), + (0x1E37, "V"), + (0x1E38, "M", "ḹ"), + (0x1E39, "V"), + (0x1E3A, "M", "ḻ"), + (0x1E3B, "V"), + (0x1E3C, "M", "ḽ"), + (0x1E3D, "V"), + (0x1E3E, "M", "ḿ"), + (0x1E3F, "V"), + (0x1E40, "M", "ṁ"), + (0x1E41, "V"), + (0x1E42, "M", "ṃ"), + (0x1E43, "V"), + (0x1E44, "M", "ṅ"), + (0x1E45, "V"), + (0x1E46, "M", "ṇ"), + (0x1E47, "V"), + (0x1E48, "M", "ṉ"), + (0x1E49, "V"), + (0x1E4A, "M", "ṋ"), + (0x1E4B, "V"), + (0x1E4C, "M", "ṍ"), + (0x1E4D, "V"), + (0x1E4E, "M", "ṏ"), + (0x1E4F, "V"), + (0x1E50, "M", "ṑ"), + (0x1E51, "V"), + (0x1E52, "M", "ṓ"), + (0x1E53, "V"), + (0x1E54, "M", "ṕ"), + (0x1E55, "V"), + (0x1E56, "M", "ṗ"), + (0x1E57, "V"), + (0x1E58, "M", "ṙ"), + (0x1E59, "V"), + (0x1E5A, "M", "ṛ"), + (0x1E5B, "V"), + (0x1E5C, "M", "ṝ"), + (0x1E5D, "V"), + (0x1E5E, "M", "ṟ"), + (0x1E5F, "V"), + (0x1E60, "M", "ṡ"), + (0x1E61, "V"), + (0x1E62, "M", "ṣ"), + (0x1E63, "V"), + (0x1E64, "M", "ṥ"), + (0x1E65, "V"), + ] + + +def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E66, "M", "ṧ"), + (0x1E67, "V"), + (0x1E68, "M", "ṩ"), + (0x1E69, "V"), + (0x1E6A, "M", "ṫ"), + (0x1E6B, "V"), + (0x1E6C, "M", "ṭ"), + (0x1E6D, "V"), + (0x1E6E, "M", "ṯ"), + (0x1E6F, "V"), + (0x1E70, "M", "ṱ"), + (0x1E71, "V"), + (0x1E72, "M", "ṳ"), + (0x1E73, "V"), + (0x1E74, "M", "ṵ"), + (0x1E75, "V"), + (0x1E76, "M", "ṷ"), + (0x1E77, "V"), + (0x1E78, "M", "ṹ"), + (0x1E79, "V"), + (0x1E7A, "M", "ṻ"), + (0x1E7B, "V"), + (0x1E7C, "M", "ṽ"), + (0x1E7D, "V"), + (0x1E7E, "M", "ṿ"), + (0x1E7F, "V"), + (0x1E80, "M", "ẁ"), + (0x1E81, "V"), + (0x1E82, "M", "ẃ"), + (0x1E83, "V"), + (0x1E84, "M", "ẅ"), + (0x1E85, "V"), + (0x1E86, "M", "ẇ"), + (0x1E87, "V"), + (0x1E88, "M", "ẉ"), + (0x1E89, "V"), + (0x1E8A, "M", "ẋ"), + (0x1E8B, "V"), + (0x1E8C, "M", "ẍ"), + (0x1E8D, "V"), + (0x1E8E, "M", "ẏ"), + (0x1E8F, "V"), + (0x1E90, "M", "ẑ"), + (0x1E91, "V"), + (0x1E92, "M", "ẓ"), + (0x1E93, "V"), + (0x1E94, "M", "ẕ"), + (0x1E95, "V"), + (0x1E9A, "M", "aʾ"), + (0x1E9B, "M", "ṡ"), + (0x1E9C, "V"), + (0x1E9E, "M", "ß"), + (0x1E9F, "V"), + (0x1EA0, "M", "ạ"), + (0x1EA1, "V"), + (0x1EA2, "M", "ả"), + (0x1EA3, "V"), + (0x1EA4, "M", "ấ"), + (0x1EA5, "V"), + (0x1EA6, "M", "ầ"), + (0x1EA7, "V"), + (0x1EA8, "M", "ẩ"), + (0x1EA9, "V"), + (0x1EAA, "M", "ẫ"), + (0x1EAB, "V"), + (0x1EAC, "M", "ậ"), + (0x1EAD, "V"), + (0x1EAE, "M", "ắ"), + (0x1EAF, "V"), + (0x1EB0, "M", "ằ"), + (0x1EB1, "V"), + (0x1EB2, "M", "ẳ"), + (0x1EB3, "V"), + (0x1EB4, "M", "ẵ"), + (0x1EB5, "V"), + (0x1EB6, "M", "ặ"), + (0x1EB7, "V"), + (0x1EB8, "M", "ẹ"), + (0x1EB9, "V"), + (0x1EBA, "M", "ẻ"), + (0x1EBB, "V"), + (0x1EBC, "M", "ẽ"), + (0x1EBD, "V"), + (0x1EBE, "M", "ế"), + (0x1EBF, "V"), + (0x1EC0, "M", "ề"), + (0x1EC1, "V"), + (0x1EC2, "M", "ể"), + (0x1EC3, "V"), + (0x1EC4, "M", "ễ"), + (0x1EC5, "V"), + (0x1EC6, "M", "ệ"), + (0x1EC7, "V"), + (0x1EC8, "M", "ỉ"), + (0x1EC9, "V"), + (0x1ECA, "M", "ị"), + (0x1ECB, "V"), + (0x1ECC, "M", "ọ"), + (0x1ECD, "V"), + (0x1ECE, "M", "ỏ"), + ] + + +def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1ECF, "V"), + (0x1ED0, "M", "ố"), + (0x1ED1, "V"), + (0x1ED2, "M", "ồ"), + (0x1ED3, "V"), + (0x1ED4, "M", "ổ"), + (0x1ED5, "V"), + (0x1ED6, "M", "ỗ"), + (0x1ED7, "V"), + (0x1ED8, "M", "ộ"), + (0x1ED9, "V"), + (0x1EDA, "M", "ớ"), + (0x1EDB, "V"), + (0x1EDC, "M", "ờ"), + (0x1EDD, "V"), + (0x1EDE, "M", "ở"), + (0x1EDF, "V"), + (0x1EE0, "M", "ỡ"), + (0x1EE1, "V"), + (0x1EE2, "M", "ợ"), + (0x1EE3, "V"), + (0x1EE4, "M", "ụ"), + (0x1EE5, "V"), + (0x1EE6, "M", "ủ"), + (0x1EE7, "V"), + (0x1EE8, "M", "ứ"), + (0x1EE9, "V"), + (0x1EEA, "M", "ừ"), + (0x1EEB, "V"), + (0x1EEC, "M", "ử"), + (0x1EED, "V"), + (0x1EEE, "M", "ữ"), + (0x1EEF, "V"), + (0x1EF0, "M", "ự"), + (0x1EF1, "V"), + (0x1EF2, "M", "ỳ"), + (0x1EF3, "V"), + (0x1EF4, "M", "ỵ"), + (0x1EF5, "V"), + (0x1EF6, "M", "ỷ"), + (0x1EF7, "V"), + (0x1EF8, "M", "ỹ"), + (0x1EF9, "V"), + (0x1EFA, "M", "ỻ"), + (0x1EFB, "V"), + (0x1EFC, "M", "ỽ"), + (0x1EFD, "V"), + (0x1EFE, "M", "ỿ"), + (0x1EFF, "V"), + (0x1F08, "M", "ἀ"), + (0x1F09, "M", "ἁ"), + (0x1F0A, "M", "ἂ"), + (0x1F0B, "M", "ἃ"), + (0x1F0C, "M", "ἄ"), + (0x1F0D, "M", "ἅ"), + (0x1F0E, "M", "ἆ"), + (0x1F0F, "M", "ἇ"), + (0x1F10, "V"), + (0x1F16, "X"), + (0x1F18, "M", "ἐ"), + (0x1F19, "M", "ἑ"), + (0x1F1A, "M", "ἒ"), + (0x1F1B, "M", "ἓ"), + (0x1F1C, "M", "ἔ"), + (0x1F1D, "M", "ἕ"), + (0x1F1E, "X"), + (0x1F20, "V"), + (0x1F28, "M", "ἠ"), + (0x1F29, "M", "ἡ"), + (0x1F2A, "M", "ἢ"), + (0x1F2B, "M", "ἣ"), + (0x1F2C, "M", "ἤ"), + (0x1F2D, "M", "ἥ"), + (0x1F2E, "M", "ἦ"), + (0x1F2F, "M", "ἧ"), + (0x1F30, "V"), + (0x1F38, "M", "ἰ"), + (0x1F39, "M", "ἱ"), + (0x1F3A, "M", "ἲ"), + (0x1F3B, "M", "ἳ"), + (0x1F3C, "M", "ἴ"), + (0x1F3D, "M", "ἵ"), + (0x1F3E, "M", "ἶ"), + (0x1F3F, "M", "ἷ"), + (0x1F40, "V"), + (0x1F46, "X"), + (0x1F48, "M", "ὀ"), + (0x1F49, "M", "ὁ"), + (0x1F4A, "M", "ὂ"), + (0x1F4B, "M", "ὃ"), + (0x1F4C, "M", "ὄ"), + (0x1F4D, "M", "ὅ"), + (0x1F4E, "X"), + (0x1F50, "V"), + (0x1F58, "X"), + (0x1F59, "M", "ὑ"), + (0x1F5A, "X"), + (0x1F5B, "M", "ὓ"), + (0x1F5C, "X"), + (0x1F5D, "M", "ὕ"), + ] + + +def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F5E, "X"), + (0x1F5F, "M", "ὗ"), + (0x1F60, "V"), + (0x1F68, "M", "ὠ"), + (0x1F69, "M", "ὡ"), + (0x1F6A, "M", "ὢ"), + (0x1F6B, "M", "ὣ"), + (0x1F6C, "M", "ὤ"), + (0x1F6D, "M", "ὥ"), + (0x1F6E, "M", "ὦ"), + (0x1F6F, "M", "ὧ"), + (0x1F70, "V"), + (0x1F71, "M", "ά"), + (0x1F72, "V"), + (0x1F73, "M", "έ"), + (0x1F74, "V"), + (0x1F75, "M", "ή"), + (0x1F76, "V"), + (0x1F77, "M", "ί"), + (0x1F78, "V"), + (0x1F79, "M", "ό"), + (0x1F7A, "V"), + (0x1F7B, "M", "ύ"), + (0x1F7C, "V"), + (0x1F7D, "M", "ώ"), + (0x1F7E, "X"), + (0x1F80, "M", "ἀι"), + (0x1F81, "M", "ἁι"), + (0x1F82, "M", "ἂι"), + (0x1F83, "M", "ἃι"), + (0x1F84, "M", "ἄι"), + (0x1F85, "M", "ἅι"), + (0x1F86, "M", "ἆι"), + (0x1F87, "M", "ἇι"), + (0x1F88, "M", "ἀι"), + (0x1F89, "M", "ἁι"), + (0x1F8A, "M", "ἂι"), + (0x1F8B, "M", "ἃι"), + (0x1F8C, "M", "ἄι"), + (0x1F8D, "M", "ἅι"), + (0x1F8E, "M", "ἆι"), + (0x1F8F, "M", "ἇι"), + (0x1F90, "M", "ἠι"), + (0x1F91, "M", "ἡι"), + (0x1F92, "M", "ἢι"), + (0x1F93, "M", "ἣι"), + (0x1F94, "M", "ἤι"), + (0x1F95, "M", "ἥι"), + (0x1F96, "M", "ἦι"), + (0x1F97, "M", "ἧι"), + (0x1F98, "M", "ἠι"), + (0x1F99, "M", "ἡι"), + (0x1F9A, "M", "ἢι"), + (0x1F9B, "M", "ἣι"), + (0x1F9C, "M", "ἤι"), + (0x1F9D, "M", "ἥι"), + (0x1F9E, "M", "ἦι"), + (0x1F9F, "M", "ἧι"), + (0x1FA0, "M", "ὠι"), + (0x1FA1, "M", "ὡι"), + (0x1FA2, "M", "ὢι"), + (0x1FA3, "M", "ὣι"), + (0x1FA4, "M", "ὤι"), + (0x1FA5, "M", "ὥι"), + (0x1FA6, "M", "ὦι"), + (0x1FA7, "M", "ὧι"), + (0x1FA8, "M", "ὠι"), + (0x1FA9, "M", "ὡι"), + (0x1FAA, "M", "ὢι"), + (0x1FAB, "M", "ὣι"), + (0x1FAC, "M", "ὤι"), + (0x1FAD, "M", "ὥι"), + (0x1FAE, "M", "ὦι"), + (0x1FAF, "M", "ὧι"), + (0x1FB0, "V"), + (0x1FB2, "M", "ὰι"), + (0x1FB3, "M", "αι"), + (0x1FB4, "M", "άι"), + (0x1FB5, "X"), + (0x1FB6, "V"), + (0x1FB7, "M", "ᾶι"), + (0x1FB8, "M", "ᾰ"), + (0x1FB9, "M", "ᾱ"), + (0x1FBA, "M", "ὰ"), + (0x1FBB, "M", "ά"), + (0x1FBC, "M", "αι"), + (0x1FBD, "M", " ̓"), + (0x1FBE, "M", "ι"), + (0x1FBF, "M", " ̓"), + (0x1FC0, "M", " ͂"), + (0x1FC1, "M", " ̈͂"), + (0x1FC2, "M", "ὴι"), + (0x1FC3, "M", "ηι"), + (0x1FC4, "M", "ήι"), + (0x1FC5, "X"), + (0x1FC6, "V"), + (0x1FC7, "M", "ῆι"), + (0x1FC8, "M", "ὲ"), + (0x1FC9, "M", "έ"), + (0x1FCA, "M", "ὴ"), + ] + + +def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1FCB, "M", "ή"), + (0x1FCC, "M", "ηι"), + (0x1FCD, "M", " ̓̀"), + (0x1FCE, "M", " ̓́"), + (0x1FCF, "M", " ̓͂"), + (0x1FD0, "V"), + (0x1FD3, "M", "ΐ"), + (0x1FD4, "X"), + (0x1FD6, "V"), + (0x1FD8, "M", "ῐ"), + (0x1FD9, "M", "ῑ"), + (0x1FDA, "M", "ὶ"), + (0x1FDB, "M", "ί"), + (0x1FDC, "X"), + (0x1FDD, "M", " ̔̀"), + (0x1FDE, "M", " ̔́"), + (0x1FDF, "M", " ̔͂"), + (0x1FE0, "V"), + (0x1FE3, "M", "ΰ"), + (0x1FE4, "V"), + (0x1FE8, "M", "ῠ"), + (0x1FE9, "M", "ῡ"), + (0x1FEA, "M", "ὺ"), + (0x1FEB, "M", "ύ"), + (0x1FEC, "M", "ῥ"), + (0x1FED, "M", " ̈̀"), + (0x1FEE, "M", " ̈́"), + (0x1FEF, "M", "`"), + (0x1FF0, "X"), + (0x1FF2, "M", "ὼι"), + (0x1FF3, "M", "ωι"), + (0x1FF4, "M", "ώι"), + (0x1FF5, "X"), + (0x1FF6, "V"), + (0x1FF7, "M", "ῶι"), + (0x1FF8, "M", "ὸ"), + (0x1FF9, "M", "ό"), + (0x1FFA, "M", "ὼ"), + (0x1FFB, "M", "ώ"), + (0x1FFC, "M", "ωι"), + (0x1FFD, "M", " ́"), + (0x1FFE, "M", " ̔"), + (0x1FFF, "X"), + (0x2000, "M", " "), + (0x200B, "I"), + (0x200C, "D", ""), + (0x200E, "X"), + (0x2010, "V"), + (0x2011, "M", "‐"), + (0x2012, "V"), + (0x2017, "M", " ̳"), + (0x2018, "V"), + (0x2024, "X"), + (0x2027, "V"), + (0x2028, "X"), + (0x202F, "M", " "), + (0x2030, "V"), + (0x2033, "M", "′′"), + (0x2034, "M", "′′′"), + (0x2035, "V"), + (0x2036, "M", "‵‵"), + (0x2037, "M", "‵‵‵"), + (0x2038, "V"), + (0x203C, "M", "!!"), + (0x203D, "V"), + (0x203E, "M", " ̅"), + (0x203F, "V"), + (0x2047, "M", "??"), + (0x2048, "M", "?!"), + (0x2049, "M", "!?"), + (0x204A, "V"), + (0x2057, "M", "′′′′"), + (0x2058, "V"), + (0x205F, "M", " "), + (0x2060, "I"), + (0x2065, "X"), + (0x206A, "I"), + (0x2070, "M", "0"), + (0x2071, "M", "i"), + (0x2072, "X"), + (0x2074, "M", "4"), + (0x2075, "M", "5"), + (0x2076, "M", "6"), + (0x2077, "M", "7"), + (0x2078, "M", "8"), + (0x2079, "M", "9"), + (0x207A, "M", "+"), + (0x207B, "M", "−"), + (0x207C, "M", "="), + (0x207D, "M", "("), + (0x207E, "M", ")"), + (0x207F, "M", "n"), + (0x2080, "M", "0"), + (0x2081, "M", "1"), + (0x2082, "M", "2"), + (0x2083, "M", "3"), + (0x2084, "M", "4"), + (0x2085, "M", "5"), + (0x2086, "M", "6"), + (0x2087, "M", "7"), + ] + + +def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2088, "M", "8"), + (0x2089, "M", "9"), + (0x208A, "M", "+"), + (0x208B, "M", "−"), + (0x208C, "M", "="), + (0x208D, "M", "("), + (0x208E, "M", ")"), + (0x208F, "X"), + (0x2090, "M", "a"), + (0x2091, "M", "e"), + (0x2092, "M", "o"), + (0x2093, "M", "x"), + (0x2094, "M", "ə"), + (0x2095, "M", "h"), + (0x2096, "M", "k"), + (0x2097, "M", "l"), + (0x2098, "M", "m"), + (0x2099, "M", "n"), + (0x209A, "M", "p"), + (0x209B, "M", "s"), + (0x209C, "M", "t"), + (0x209D, "X"), + (0x20A0, "V"), + (0x20A8, "M", "rs"), + (0x20A9, "V"), + (0x20C1, "X"), + (0x20D0, "V"), + (0x20F1, "X"), + (0x2100, "M", "a/c"), + (0x2101, "M", "a/s"), + (0x2102, "M", "c"), + (0x2103, "M", "°c"), + (0x2104, "V"), + (0x2105, "M", "c/o"), + (0x2106, "M", "c/u"), + (0x2107, "M", "ɛ"), + (0x2108, "V"), + (0x2109, "M", "°f"), + (0x210A, "M", "g"), + (0x210B, "M", "h"), + (0x210F, "M", "ħ"), + (0x2110, "M", "i"), + (0x2112, "M", "l"), + (0x2114, "V"), + (0x2115, "M", "n"), + (0x2116, "M", "no"), + (0x2117, "V"), + (0x2119, "M", "p"), + (0x211A, "M", "q"), + (0x211B, "M", "r"), + (0x211E, "V"), + (0x2120, "M", "sm"), + (0x2121, "M", "tel"), + (0x2122, "M", "tm"), + (0x2123, "V"), + (0x2124, "M", "z"), + (0x2125, "V"), + (0x2126, "M", "ω"), + (0x2127, "V"), + (0x2128, "M", "z"), + (0x2129, "V"), + (0x212A, "M", "k"), + (0x212B, "M", "å"), + (0x212C, "M", "b"), + (0x212D, "M", "c"), + (0x212E, "V"), + (0x212F, "M", "e"), + (0x2131, "M", "f"), + (0x2132, "M", "ⅎ"), + (0x2133, "M", "m"), + (0x2134, "M", "o"), + (0x2135, "M", "א"), + (0x2136, "M", "ב"), + (0x2137, "M", "ג"), + (0x2138, "M", "ד"), + (0x2139, "M", "i"), + (0x213A, "V"), + (0x213B, "M", "fax"), + (0x213C, "M", "π"), + (0x213D, "M", "γ"), + (0x213F, "M", "π"), + (0x2140, "M", "∑"), + (0x2141, "V"), + (0x2145, "M", "d"), + (0x2147, "M", "e"), + (0x2148, "M", "i"), + (0x2149, "M", "j"), + (0x214A, "V"), + (0x2150, "M", "1⁄7"), + (0x2151, "M", "1⁄9"), + (0x2152, "M", "1⁄10"), + (0x2153, "M", "1⁄3"), + (0x2154, "M", "2⁄3"), + (0x2155, "M", "1⁄5"), + (0x2156, "M", "2⁄5"), + (0x2157, "M", "3⁄5"), + (0x2158, "M", "4⁄5"), + (0x2159, "M", "1⁄6"), + (0x215A, "M", "5⁄6"), + (0x215B, "M", "1⁄8"), + ] + + +def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x215C, "M", "3⁄8"), + (0x215D, "M", "5⁄8"), + (0x215E, "M", "7⁄8"), + (0x215F, "M", "1⁄"), + (0x2160, "M", "i"), + (0x2161, "M", "ii"), + (0x2162, "M", "iii"), + (0x2163, "M", "iv"), + (0x2164, "M", "v"), + (0x2165, "M", "vi"), + (0x2166, "M", "vii"), + (0x2167, "M", "viii"), + (0x2168, "M", "ix"), + (0x2169, "M", "x"), + (0x216A, "M", "xi"), + (0x216B, "M", "xii"), + (0x216C, "M", "l"), + (0x216D, "M", "c"), + (0x216E, "M", "d"), + (0x216F, "M", "m"), + (0x2170, "M", "i"), + (0x2171, "M", "ii"), + (0x2172, "M", "iii"), + (0x2173, "M", "iv"), + (0x2174, "M", "v"), + (0x2175, "M", "vi"), + (0x2176, "M", "vii"), + (0x2177, "M", "viii"), + (0x2178, "M", "ix"), + (0x2179, "M", "x"), + (0x217A, "M", "xi"), + (0x217B, "M", "xii"), + (0x217C, "M", "l"), + (0x217D, "M", "c"), + (0x217E, "M", "d"), + (0x217F, "M", "m"), + (0x2180, "V"), + (0x2183, "M", "ↄ"), + (0x2184, "V"), + (0x2189, "M", "0⁄3"), + (0x218A, "V"), + (0x218C, "X"), + (0x2190, "V"), + (0x222C, "M", "∫∫"), + (0x222D, "M", "∫∫∫"), + (0x222E, "V"), + (0x222F, "M", "∮∮"), + (0x2230, "M", "∮∮∮"), + (0x2231, "V"), + (0x2329, "M", "〈"), + (0x232A, "M", "〉"), + (0x232B, "V"), + (0x242A, "X"), + (0x2440, "V"), + (0x244B, "X"), + (0x2460, "M", "1"), + (0x2461, "M", "2"), + (0x2462, "M", "3"), + (0x2463, "M", "4"), + (0x2464, "M", "5"), + (0x2465, "M", "6"), + (0x2466, "M", "7"), + (0x2467, "M", "8"), + (0x2468, "M", "9"), + (0x2469, "M", "10"), + (0x246A, "M", "11"), + (0x246B, "M", "12"), + (0x246C, "M", "13"), + (0x246D, "M", "14"), + (0x246E, "M", "15"), + (0x246F, "M", "16"), + (0x2470, "M", "17"), + (0x2471, "M", "18"), + (0x2472, "M", "19"), + (0x2473, "M", "20"), + (0x2474, "M", "(1)"), + (0x2475, "M", "(2)"), + (0x2476, "M", "(3)"), + (0x2477, "M", "(4)"), + (0x2478, "M", "(5)"), + (0x2479, "M", "(6)"), + (0x247A, "M", "(7)"), + (0x247B, "M", "(8)"), + (0x247C, "M", "(9)"), + (0x247D, "M", "(10)"), + (0x247E, "M", "(11)"), + (0x247F, "M", "(12)"), + (0x2480, "M", "(13)"), + (0x2481, "M", "(14)"), + (0x2482, "M", "(15)"), + (0x2483, "M", "(16)"), + (0x2484, "M", "(17)"), + (0x2485, "M", "(18)"), + (0x2486, "M", "(19)"), + (0x2487, "M", "(20)"), + (0x2488, "X"), + (0x249C, "M", "(a)"), + (0x249D, "M", "(b)"), + (0x249E, "M", "(c)"), + (0x249F, "M", "(d)"), + ] + + +def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x24A0, "M", "(e)"), + (0x24A1, "M", "(f)"), + (0x24A2, "M", "(g)"), + (0x24A3, "M", "(h)"), + (0x24A4, "M", "(i)"), + (0x24A5, "M", "(j)"), + (0x24A6, "M", "(k)"), + (0x24A7, "M", "(l)"), + (0x24A8, "M", "(m)"), + (0x24A9, "M", "(n)"), + (0x24AA, "M", "(o)"), + (0x24AB, "M", "(p)"), + (0x24AC, "M", "(q)"), + (0x24AD, "M", "(r)"), + (0x24AE, "M", "(s)"), + (0x24AF, "M", "(t)"), + (0x24B0, "M", "(u)"), + (0x24B1, "M", "(v)"), + (0x24B2, "M", "(w)"), + (0x24B3, "M", "(x)"), + (0x24B4, "M", "(y)"), + (0x24B5, "M", "(z)"), + (0x24B6, "M", "a"), + (0x24B7, "M", "b"), + (0x24B8, "M", "c"), + (0x24B9, "M", "d"), + (0x24BA, "M", "e"), + (0x24BB, "M", "f"), + (0x24BC, "M", "g"), + (0x24BD, "M", "h"), + (0x24BE, "M", "i"), + (0x24BF, "M", "j"), + (0x24C0, "M", "k"), + (0x24C1, "M", "l"), + (0x24C2, "M", "m"), + (0x24C3, "M", "n"), + (0x24C4, "M", "o"), + (0x24C5, "M", "p"), + (0x24C6, "M", "q"), + (0x24C7, "M", "r"), + (0x24C8, "M", "s"), + (0x24C9, "M", "t"), + (0x24CA, "M", "u"), + (0x24CB, "M", "v"), + (0x24CC, "M", "w"), + (0x24CD, "M", "x"), + (0x24CE, "M", "y"), + (0x24CF, "M", "z"), + (0x24D0, "M", "a"), + (0x24D1, "M", "b"), + (0x24D2, "M", "c"), + (0x24D3, "M", "d"), + (0x24D4, "M", "e"), + (0x24D5, "M", "f"), + (0x24D6, "M", "g"), + (0x24D7, "M", "h"), + (0x24D8, "M", "i"), + (0x24D9, "M", "j"), + (0x24DA, "M", "k"), + (0x24DB, "M", "l"), + (0x24DC, "M", "m"), + (0x24DD, "M", "n"), + (0x24DE, "M", "o"), + (0x24DF, "M", "p"), + (0x24E0, "M", "q"), + (0x24E1, "M", "r"), + (0x24E2, "M", "s"), + (0x24E3, "M", "t"), + (0x24E4, "M", "u"), + (0x24E5, "M", "v"), + (0x24E6, "M", "w"), + (0x24E7, "M", "x"), + (0x24E8, "M", "y"), + (0x24E9, "M", "z"), + (0x24EA, "M", "0"), + (0x24EB, "V"), + (0x2A0C, "M", "∫∫∫∫"), + (0x2A0D, "V"), + (0x2A74, "M", "::="), + (0x2A75, "M", "=="), + (0x2A76, "M", "==="), + (0x2A77, "V"), + (0x2ADC, "M", "⫝̸"), + (0x2ADD, "V"), + (0x2B74, "X"), + (0x2B76, "V"), + (0x2B96, "X"), + (0x2B97, "V"), + (0x2C00, "M", "ⰰ"), + (0x2C01, "M", "ⰱ"), + (0x2C02, "M", "ⰲ"), + (0x2C03, "M", "ⰳ"), + (0x2C04, "M", "ⰴ"), + (0x2C05, "M", "ⰵ"), + (0x2C06, "M", "ⰶ"), + (0x2C07, "M", "ⰷ"), + (0x2C08, "M", "ⰸ"), + (0x2C09, "M", "ⰹ"), + (0x2C0A, "M", "ⰺ"), + (0x2C0B, "M", "ⰻ"), + ] + + +def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2C0C, "M", "ⰼ"), + (0x2C0D, "M", "ⰽ"), + (0x2C0E, "M", "ⰾ"), + (0x2C0F, "M", "ⰿ"), + (0x2C10, "M", "ⱀ"), + (0x2C11, "M", "ⱁ"), + (0x2C12, "M", "ⱂ"), + (0x2C13, "M", "ⱃ"), + (0x2C14, "M", "ⱄ"), + (0x2C15, "M", "ⱅ"), + (0x2C16, "M", "ⱆ"), + (0x2C17, "M", "ⱇ"), + (0x2C18, "M", "ⱈ"), + (0x2C19, "M", "ⱉ"), + (0x2C1A, "M", "ⱊ"), + (0x2C1B, "M", "ⱋ"), + (0x2C1C, "M", "ⱌ"), + (0x2C1D, "M", "ⱍ"), + (0x2C1E, "M", "ⱎ"), + (0x2C1F, "M", "ⱏ"), + (0x2C20, "M", "ⱐ"), + (0x2C21, "M", "ⱑ"), + (0x2C22, "M", "ⱒ"), + (0x2C23, "M", "ⱓ"), + (0x2C24, "M", "ⱔ"), + (0x2C25, "M", "ⱕ"), + (0x2C26, "M", "ⱖ"), + (0x2C27, "M", "ⱗ"), + (0x2C28, "M", "ⱘ"), + (0x2C29, "M", "ⱙ"), + (0x2C2A, "M", "ⱚ"), + (0x2C2B, "M", "ⱛ"), + (0x2C2C, "M", "ⱜ"), + (0x2C2D, "M", "ⱝ"), + (0x2C2E, "M", "ⱞ"), + (0x2C2F, "M", "ⱟ"), + (0x2C30, "V"), + (0x2C60, "M", "ⱡ"), + (0x2C61, "V"), + (0x2C62, "M", "ɫ"), + (0x2C63, "M", "ᵽ"), + (0x2C64, "M", "ɽ"), + (0x2C65, "V"), + (0x2C67, "M", "ⱨ"), + (0x2C68, "V"), + (0x2C69, "M", "ⱪ"), + (0x2C6A, "V"), + (0x2C6B, "M", "ⱬ"), + (0x2C6C, "V"), + (0x2C6D, "M", "ɑ"), + (0x2C6E, "M", "ɱ"), + (0x2C6F, "M", "ɐ"), + (0x2C70, "M", "ɒ"), + (0x2C71, "V"), + (0x2C72, "M", "ⱳ"), + (0x2C73, "V"), + (0x2C75, "M", "ⱶ"), + (0x2C76, "V"), + (0x2C7C, "M", "j"), + (0x2C7D, "M", "v"), + (0x2C7E, "M", "ȿ"), + (0x2C7F, "M", "ɀ"), + (0x2C80, "M", "ⲁ"), + (0x2C81, "V"), + (0x2C82, "M", "ⲃ"), + (0x2C83, "V"), + (0x2C84, "M", "ⲅ"), + (0x2C85, "V"), + (0x2C86, "M", "ⲇ"), + (0x2C87, "V"), + (0x2C88, "M", "ⲉ"), + (0x2C89, "V"), + (0x2C8A, "M", "ⲋ"), + (0x2C8B, "V"), + (0x2C8C, "M", "ⲍ"), + (0x2C8D, "V"), + (0x2C8E, "M", "ⲏ"), + (0x2C8F, "V"), + (0x2C90, "M", "ⲑ"), + (0x2C91, "V"), + (0x2C92, "M", "ⲓ"), + (0x2C93, "V"), + (0x2C94, "M", "ⲕ"), + (0x2C95, "V"), + (0x2C96, "M", "ⲗ"), + (0x2C97, "V"), + (0x2C98, "M", "ⲙ"), + (0x2C99, "V"), + (0x2C9A, "M", "ⲛ"), + (0x2C9B, "V"), + (0x2C9C, "M", "ⲝ"), + (0x2C9D, "V"), + (0x2C9E, "M", "ⲟ"), + (0x2C9F, "V"), + (0x2CA0, "M", "ⲡ"), + (0x2CA1, "V"), + (0x2CA2, "M", "ⲣ"), + (0x2CA3, "V"), + (0x2CA4, "M", "ⲥ"), + (0x2CA5, "V"), + ] + + +def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2CA6, "M", "ⲧ"), + (0x2CA7, "V"), + (0x2CA8, "M", "ⲩ"), + (0x2CA9, "V"), + (0x2CAA, "M", "ⲫ"), + (0x2CAB, "V"), + (0x2CAC, "M", "ⲭ"), + (0x2CAD, "V"), + (0x2CAE, "M", "ⲯ"), + (0x2CAF, "V"), + (0x2CB0, "M", "ⲱ"), + (0x2CB1, "V"), + (0x2CB2, "M", "ⲳ"), + (0x2CB3, "V"), + (0x2CB4, "M", "ⲵ"), + (0x2CB5, "V"), + (0x2CB6, "M", "ⲷ"), + (0x2CB7, "V"), + (0x2CB8, "M", "ⲹ"), + (0x2CB9, "V"), + (0x2CBA, "M", "ⲻ"), + (0x2CBB, "V"), + (0x2CBC, "M", "ⲽ"), + (0x2CBD, "V"), + (0x2CBE, "M", "ⲿ"), + (0x2CBF, "V"), + (0x2CC0, "M", "ⳁ"), + (0x2CC1, "V"), + (0x2CC2, "M", "ⳃ"), + (0x2CC3, "V"), + (0x2CC4, "M", "ⳅ"), + (0x2CC5, "V"), + (0x2CC6, "M", "ⳇ"), + (0x2CC7, "V"), + (0x2CC8, "M", "ⳉ"), + (0x2CC9, "V"), + (0x2CCA, "M", "ⳋ"), + (0x2CCB, "V"), + (0x2CCC, "M", "ⳍ"), + (0x2CCD, "V"), + (0x2CCE, "M", "ⳏ"), + (0x2CCF, "V"), + (0x2CD0, "M", "ⳑ"), + (0x2CD1, "V"), + (0x2CD2, "M", "ⳓ"), + (0x2CD3, "V"), + (0x2CD4, "M", "ⳕ"), + (0x2CD5, "V"), + (0x2CD6, "M", "ⳗ"), + (0x2CD7, "V"), + (0x2CD8, "M", "ⳙ"), + (0x2CD9, "V"), + (0x2CDA, "M", "ⳛ"), + (0x2CDB, "V"), + (0x2CDC, "M", "ⳝ"), + (0x2CDD, "V"), + (0x2CDE, "M", "ⳟ"), + (0x2CDF, "V"), + (0x2CE0, "M", "ⳡ"), + (0x2CE1, "V"), + (0x2CE2, "M", "ⳣ"), + (0x2CE3, "V"), + (0x2CEB, "M", "ⳬ"), + (0x2CEC, "V"), + (0x2CED, "M", "ⳮ"), + (0x2CEE, "V"), + (0x2CF2, "M", "ⳳ"), + (0x2CF3, "V"), + (0x2CF4, "X"), + (0x2CF9, "V"), + (0x2D26, "X"), + (0x2D27, "V"), + (0x2D28, "X"), + (0x2D2D, "V"), + (0x2D2E, "X"), + (0x2D30, "V"), + (0x2D68, "X"), + (0x2D6F, "M", "ⵡ"), + (0x2D70, "V"), + (0x2D71, "X"), + (0x2D7F, "V"), + (0x2D97, "X"), + (0x2DA0, "V"), + (0x2DA7, "X"), + (0x2DA8, "V"), + (0x2DAF, "X"), + (0x2DB0, "V"), + (0x2DB7, "X"), + (0x2DB8, "V"), + (0x2DBF, "X"), + (0x2DC0, "V"), + (0x2DC7, "X"), + (0x2DC8, "V"), + (0x2DCF, "X"), + (0x2DD0, "V"), + (0x2DD7, "X"), + (0x2DD8, "V"), + (0x2DDF, "X"), + (0x2DE0, "V"), + (0x2E5E, "X"), + ] + + +def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2E80, "V"), + (0x2E9A, "X"), + (0x2E9B, "V"), + (0x2E9F, "M", "母"), + (0x2EA0, "V"), + (0x2EF3, "M", "龟"), + (0x2EF4, "X"), + (0x2F00, "M", "一"), + (0x2F01, "M", "丨"), + (0x2F02, "M", "丶"), + (0x2F03, "M", "丿"), + (0x2F04, "M", "乙"), + (0x2F05, "M", "亅"), + (0x2F06, "M", "二"), + (0x2F07, "M", "亠"), + (0x2F08, "M", "人"), + (0x2F09, "M", "儿"), + (0x2F0A, "M", "入"), + (0x2F0B, "M", "八"), + (0x2F0C, "M", "冂"), + (0x2F0D, "M", "冖"), + (0x2F0E, "M", "冫"), + (0x2F0F, "M", "几"), + (0x2F10, "M", "凵"), + (0x2F11, "M", "刀"), + (0x2F12, "M", "力"), + (0x2F13, "M", "勹"), + (0x2F14, "M", "匕"), + (0x2F15, "M", "匚"), + (0x2F16, "M", "匸"), + (0x2F17, "M", "十"), + (0x2F18, "M", "卜"), + (0x2F19, "M", "卩"), + (0x2F1A, "M", "厂"), + (0x2F1B, "M", "厶"), + (0x2F1C, "M", "又"), + (0x2F1D, "M", "口"), + (0x2F1E, "M", "囗"), + (0x2F1F, "M", "土"), + (0x2F20, "M", "士"), + (0x2F21, "M", "夂"), + (0x2F22, "M", "夊"), + (0x2F23, "M", "夕"), + (0x2F24, "M", "大"), + (0x2F25, "M", "女"), + (0x2F26, "M", "子"), + (0x2F27, "M", "宀"), + (0x2F28, "M", "寸"), + (0x2F29, "M", "小"), + (0x2F2A, "M", "尢"), + (0x2F2B, "M", "尸"), + (0x2F2C, "M", "屮"), + (0x2F2D, "M", "山"), + (0x2F2E, "M", "巛"), + (0x2F2F, "M", "工"), + (0x2F30, "M", "己"), + (0x2F31, "M", "巾"), + (0x2F32, "M", "干"), + (0x2F33, "M", "幺"), + (0x2F34, "M", "广"), + (0x2F35, "M", "廴"), + (0x2F36, "M", "廾"), + (0x2F37, "M", "弋"), + (0x2F38, "M", "弓"), + (0x2F39, "M", "彐"), + (0x2F3A, "M", "彡"), + (0x2F3B, "M", "彳"), + (0x2F3C, "M", "心"), + (0x2F3D, "M", "戈"), + (0x2F3E, "M", "戶"), + (0x2F3F, "M", "手"), + (0x2F40, "M", "支"), + (0x2F41, "M", "攴"), + (0x2F42, "M", "文"), + (0x2F43, "M", "斗"), + (0x2F44, "M", "斤"), + (0x2F45, "M", "方"), + (0x2F46, "M", "无"), + (0x2F47, "M", "日"), + (0x2F48, "M", "曰"), + (0x2F49, "M", "月"), + (0x2F4A, "M", "木"), + (0x2F4B, "M", "欠"), + (0x2F4C, "M", "止"), + (0x2F4D, "M", "歹"), + (0x2F4E, "M", "殳"), + (0x2F4F, "M", "毋"), + (0x2F50, "M", "比"), + (0x2F51, "M", "毛"), + (0x2F52, "M", "氏"), + (0x2F53, "M", "气"), + (0x2F54, "M", "水"), + (0x2F55, "M", "火"), + (0x2F56, "M", "爪"), + (0x2F57, "M", "父"), + (0x2F58, "M", "爻"), + (0x2F59, "M", "爿"), + (0x2F5A, "M", "片"), + (0x2F5B, "M", "牙"), + (0x2F5C, "M", "牛"), + ] + + +def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F5D, "M", "犬"), + (0x2F5E, "M", "玄"), + (0x2F5F, "M", "玉"), + (0x2F60, "M", "瓜"), + (0x2F61, "M", "瓦"), + (0x2F62, "M", "甘"), + (0x2F63, "M", "生"), + (0x2F64, "M", "用"), + (0x2F65, "M", "田"), + (0x2F66, "M", "疋"), + (0x2F67, "M", "疒"), + (0x2F68, "M", "癶"), + (0x2F69, "M", "白"), + (0x2F6A, "M", "皮"), + (0x2F6B, "M", "皿"), + (0x2F6C, "M", "目"), + (0x2F6D, "M", "矛"), + (0x2F6E, "M", "矢"), + (0x2F6F, "M", "石"), + (0x2F70, "M", "示"), + (0x2F71, "M", "禸"), + (0x2F72, "M", "禾"), + (0x2F73, "M", "穴"), + (0x2F74, "M", "立"), + (0x2F75, "M", "竹"), + (0x2F76, "M", "米"), + (0x2F77, "M", "糸"), + (0x2F78, "M", "缶"), + (0x2F79, "M", "网"), + (0x2F7A, "M", "羊"), + (0x2F7B, "M", "羽"), + (0x2F7C, "M", "老"), + (0x2F7D, "M", "而"), + (0x2F7E, "M", "耒"), + (0x2F7F, "M", "耳"), + (0x2F80, "M", "聿"), + (0x2F81, "M", "肉"), + (0x2F82, "M", "臣"), + (0x2F83, "M", "自"), + (0x2F84, "M", "至"), + (0x2F85, "M", "臼"), + (0x2F86, "M", "舌"), + (0x2F87, "M", "舛"), + (0x2F88, "M", "舟"), + (0x2F89, "M", "艮"), + (0x2F8A, "M", "色"), + (0x2F8B, "M", "艸"), + (0x2F8C, "M", "虍"), + (0x2F8D, "M", "虫"), + (0x2F8E, "M", "血"), + (0x2F8F, "M", "行"), + (0x2F90, "M", "衣"), + (0x2F91, "M", "襾"), + (0x2F92, "M", "見"), + (0x2F93, "M", "角"), + (0x2F94, "M", "言"), + (0x2F95, "M", "谷"), + (0x2F96, "M", "豆"), + (0x2F97, "M", "豕"), + (0x2F98, "M", "豸"), + (0x2F99, "M", "貝"), + (0x2F9A, "M", "赤"), + (0x2F9B, "M", "走"), + (0x2F9C, "M", "足"), + (0x2F9D, "M", "身"), + (0x2F9E, "M", "車"), + (0x2F9F, "M", "辛"), + (0x2FA0, "M", "辰"), + (0x2FA1, "M", "辵"), + (0x2FA2, "M", "邑"), + (0x2FA3, "M", "酉"), + (0x2FA4, "M", "釆"), + (0x2FA5, "M", "里"), + (0x2FA6, "M", "金"), + (0x2FA7, "M", "長"), + (0x2FA8, "M", "門"), + (0x2FA9, "M", "阜"), + (0x2FAA, "M", "隶"), + (0x2FAB, "M", "隹"), + (0x2FAC, "M", "雨"), + (0x2FAD, "M", "靑"), + (0x2FAE, "M", "非"), + (0x2FAF, "M", "面"), + (0x2FB0, "M", "革"), + (0x2FB1, "M", "韋"), + (0x2FB2, "M", "韭"), + (0x2FB3, "M", "音"), + (0x2FB4, "M", "頁"), + (0x2FB5, "M", "風"), + (0x2FB6, "M", "飛"), + (0x2FB7, "M", "食"), + (0x2FB8, "M", "首"), + (0x2FB9, "M", "香"), + (0x2FBA, "M", "馬"), + (0x2FBB, "M", "骨"), + (0x2FBC, "M", "高"), + (0x2FBD, "M", "髟"), + (0x2FBE, "M", "鬥"), + (0x2FBF, "M", "鬯"), + (0x2FC0, "M", "鬲"), + ] + + +def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2FC1, "M", "鬼"), + (0x2FC2, "M", "魚"), + (0x2FC3, "M", "鳥"), + (0x2FC4, "M", "鹵"), + (0x2FC5, "M", "鹿"), + (0x2FC6, "M", "麥"), + (0x2FC7, "M", "麻"), + (0x2FC8, "M", "黃"), + (0x2FC9, "M", "黍"), + (0x2FCA, "M", "黑"), + (0x2FCB, "M", "黹"), + (0x2FCC, "M", "黽"), + (0x2FCD, "M", "鼎"), + (0x2FCE, "M", "鼓"), + (0x2FCF, "M", "鼠"), + (0x2FD0, "M", "鼻"), + (0x2FD1, "M", "齊"), + (0x2FD2, "M", "齒"), + (0x2FD3, "M", "龍"), + (0x2FD4, "M", "龜"), + (0x2FD5, "M", "龠"), + (0x2FD6, "X"), + (0x3000, "M", " "), + (0x3001, "V"), + (0x3002, "M", "."), + (0x3003, "V"), + (0x3036, "M", "〒"), + (0x3037, "V"), + (0x3038, "M", "十"), + (0x3039, "M", "卄"), + (0x303A, "M", "卅"), + (0x303B, "V"), + (0x3040, "X"), + (0x3041, "V"), + (0x3097, "X"), + (0x3099, "V"), + (0x309B, "M", " ゙"), + (0x309C, "M", " ゚"), + (0x309D, "V"), + (0x309F, "M", "より"), + (0x30A0, "V"), + (0x30FF, "M", "コト"), + (0x3100, "X"), + (0x3105, "V"), + (0x3130, "X"), + (0x3131, "M", "ᄀ"), + (0x3132, "M", "ᄁ"), + (0x3133, "M", "ᆪ"), + (0x3134, "M", "ᄂ"), + (0x3135, "M", "ᆬ"), + (0x3136, "M", "ᆭ"), + (0x3137, "M", "ᄃ"), + (0x3138, "M", "ᄄ"), + (0x3139, "M", "ᄅ"), + (0x313A, "M", "ᆰ"), + (0x313B, "M", "ᆱ"), + (0x313C, "M", "ᆲ"), + (0x313D, "M", "ᆳ"), + (0x313E, "M", "ᆴ"), + (0x313F, "M", "ᆵ"), + (0x3140, "M", "ᄚ"), + (0x3141, "M", "ᄆ"), + (0x3142, "M", "ᄇ"), + (0x3143, "M", "ᄈ"), + (0x3144, "M", "ᄡ"), + (0x3145, "M", "ᄉ"), + (0x3146, "M", "ᄊ"), + (0x3147, "M", "ᄋ"), + (0x3148, "M", "ᄌ"), + (0x3149, "M", "ᄍ"), + (0x314A, "M", "ᄎ"), + (0x314B, "M", "ᄏ"), + (0x314C, "M", "ᄐ"), + (0x314D, "M", "ᄑ"), + (0x314E, "M", "ᄒ"), + (0x314F, "M", "ᅡ"), + (0x3150, "M", "ᅢ"), + (0x3151, "M", "ᅣ"), + (0x3152, "M", "ᅤ"), + (0x3153, "M", "ᅥ"), + (0x3154, "M", "ᅦ"), + (0x3155, "M", "ᅧ"), + (0x3156, "M", "ᅨ"), + (0x3157, "M", "ᅩ"), + (0x3158, "M", "ᅪ"), + (0x3159, "M", "ᅫ"), + (0x315A, "M", "ᅬ"), + (0x315B, "M", "ᅭ"), + (0x315C, "M", "ᅮ"), + (0x315D, "M", "ᅯ"), + (0x315E, "M", "ᅰ"), + (0x315F, "M", "ᅱ"), + (0x3160, "M", "ᅲ"), + (0x3161, "M", "ᅳ"), + (0x3162, "M", "ᅴ"), + (0x3163, "M", "ᅵ"), + (0x3164, "I"), + (0x3165, "M", "ᄔ"), + (0x3166, "M", "ᄕ"), + (0x3167, "M", "ᇇ"), + ] + + +def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3168, "M", "ᇈ"), + (0x3169, "M", "ᇌ"), + (0x316A, "M", "ᇎ"), + (0x316B, "M", "ᇓ"), + (0x316C, "M", "ᇗ"), + (0x316D, "M", "ᇙ"), + (0x316E, "M", "ᄜ"), + (0x316F, "M", "ᇝ"), + (0x3170, "M", "ᇟ"), + (0x3171, "M", "ᄝ"), + (0x3172, "M", "ᄞ"), + (0x3173, "M", "ᄠ"), + (0x3174, "M", "ᄢ"), + (0x3175, "M", "ᄣ"), + (0x3176, "M", "ᄧ"), + (0x3177, "M", "ᄩ"), + (0x3178, "M", "ᄫ"), + (0x3179, "M", "ᄬ"), + (0x317A, "M", "ᄭ"), + (0x317B, "M", "ᄮ"), + (0x317C, "M", "ᄯ"), + (0x317D, "M", "ᄲ"), + (0x317E, "M", "ᄶ"), + (0x317F, "M", "ᅀ"), + (0x3180, "M", "ᅇ"), + (0x3181, "M", "ᅌ"), + (0x3182, "M", "ᇱ"), + (0x3183, "M", "ᇲ"), + (0x3184, "M", "ᅗ"), + (0x3185, "M", "ᅘ"), + (0x3186, "M", "ᅙ"), + (0x3187, "M", "ᆄ"), + (0x3188, "M", "ᆅ"), + (0x3189, "M", "ᆈ"), + (0x318A, "M", "ᆑ"), + (0x318B, "M", "ᆒ"), + (0x318C, "M", "ᆔ"), + (0x318D, "M", "ᆞ"), + (0x318E, "M", "ᆡ"), + (0x318F, "X"), + (0x3190, "V"), + (0x3192, "M", "一"), + (0x3193, "M", "二"), + (0x3194, "M", "三"), + (0x3195, "M", "四"), + (0x3196, "M", "上"), + (0x3197, "M", "中"), + (0x3198, "M", "下"), + (0x3199, "M", "甲"), + (0x319A, "M", "乙"), + (0x319B, "M", "丙"), + (0x319C, "M", "丁"), + (0x319D, "M", "天"), + (0x319E, "M", "地"), + (0x319F, "M", "人"), + (0x31A0, "V"), + (0x31E6, "X"), + (0x31F0, "V"), + (0x3200, "M", "(ᄀ)"), + (0x3201, "M", "(ᄂ)"), + (0x3202, "M", "(ᄃ)"), + (0x3203, "M", "(ᄅ)"), + (0x3204, "M", "(ᄆ)"), + (0x3205, "M", "(ᄇ)"), + (0x3206, "M", "(ᄉ)"), + (0x3207, "M", "(ᄋ)"), + (0x3208, "M", "(ᄌ)"), + (0x3209, "M", "(ᄎ)"), + (0x320A, "M", "(ᄏ)"), + (0x320B, "M", "(ᄐ)"), + (0x320C, "M", "(ᄑ)"), + (0x320D, "M", "(ᄒ)"), + (0x320E, "M", "(가)"), + (0x320F, "M", "(나)"), + (0x3210, "M", "(다)"), + (0x3211, "M", "(라)"), + (0x3212, "M", "(마)"), + (0x3213, "M", "(바)"), + (0x3214, "M", "(사)"), + (0x3215, "M", "(아)"), + (0x3216, "M", "(자)"), + (0x3217, "M", "(차)"), + (0x3218, "M", "(카)"), + (0x3219, "M", "(타)"), + (0x321A, "M", "(파)"), + (0x321B, "M", "(하)"), + (0x321C, "M", "(주)"), + (0x321D, "M", "(오전)"), + (0x321E, "M", "(오후)"), + (0x321F, "X"), + (0x3220, "M", "(一)"), + (0x3221, "M", "(二)"), + (0x3222, "M", "(三)"), + (0x3223, "M", "(四)"), + (0x3224, "M", "(五)"), + (0x3225, "M", "(六)"), + (0x3226, "M", "(七)"), + (0x3227, "M", "(八)"), + (0x3228, "M", "(九)"), + (0x3229, "M", "(十)"), + ] + + +def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x322A, "M", "(月)"), + (0x322B, "M", "(火)"), + (0x322C, "M", "(水)"), + (0x322D, "M", "(木)"), + (0x322E, "M", "(金)"), + (0x322F, "M", "(土)"), + (0x3230, "M", "(日)"), + (0x3231, "M", "(株)"), + (0x3232, "M", "(有)"), + (0x3233, "M", "(社)"), + (0x3234, "M", "(名)"), + (0x3235, "M", "(特)"), + (0x3236, "M", "(財)"), + (0x3237, "M", "(祝)"), + (0x3238, "M", "(労)"), + (0x3239, "M", "(代)"), + (0x323A, "M", "(呼)"), + (0x323B, "M", "(学)"), + (0x323C, "M", "(監)"), + (0x323D, "M", "(企)"), + (0x323E, "M", "(資)"), + (0x323F, "M", "(協)"), + (0x3240, "M", "(祭)"), + (0x3241, "M", "(休)"), + (0x3242, "M", "(自)"), + (0x3243, "M", "(至)"), + (0x3244, "M", "問"), + (0x3245, "M", "幼"), + (0x3246, "M", "文"), + (0x3247, "M", "箏"), + (0x3248, "V"), + (0x3250, "M", "pte"), + (0x3251, "M", "21"), + (0x3252, "M", "22"), + (0x3253, "M", "23"), + (0x3254, "M", "24"), + (0x3255, "M", "25"), + (0x3256, "M", "26"), + (0x3257, "M", "27"), + (0x3258, "M", "28"), + (0x3259, "M", "29"), + (0x325A, "M", "30"), + (0x325B, "M", "31"), + (0x325C, "M", "32"), + (0x325D, "M", "33"), + (0x325E, "M", "34"), + (0x325F, "M", "35"), + (0x3260, "M", "ᄀ"), + (0x3261, "M", "ᄂ"), + (0x3262, "M", "ᄃ"), + (0x3263, "M", "ᄅ"), + (0x3264, "M", "ᄆ"), + (0x3265, "M", "ᄇ"), + (0x3266, "M", "ᄉ"), + (0x3267, "M", "ᄋ"), + (0x3268, "M", "ᄌ"), + (0x3269, "M", "ᄎ"), + (0x326A, "M", "ᄏ"), + (0x326B, "M", "ᄐ"), + (0x326C, "M", "ᄑ"), + (0x326D, "M", "ᄒ"), + (0x326E, "M", "가"), + (0x326F, "M", "나"), + (0x3270, "M", "다"), + (0x3271, "M", "라"), + (0x3272, "M", "마"), + (0x3273, "M", "바"), + (0x3274, "M", "사"), + (0x3275, "M", "아"), + (0x3276, "M", "자"), + (0x3277, "M", "차"), + (0x3278, "M", "카"), + (0x3279, "M", "타"), + (0x327A, "M", "파"), + (0x327B, "M", "하"), + (0x327C, "M", "참고"), + (0x327D, "M", "주의"), + (0x327E, "M", "우"), + (0x327F, "V"), + (0x3280, "M", "一"), + (0x3281, "M", "二"), + (0x3282, "M", "三"), + (0x3283, "M", "四"), + (0x3284, "M", "五"), + (0x3285, "M", "六"), + (0x3286, "M", "七"), + (0x3287, "M", "八"), + (0x3288, "M", "九"), + (0x3289, "M", "十"), + (0x328A, "M", "月"), + (0x328B, "M", "火"), + (0x328C, "M", "水"), + (0x328D, "M", "木"), + (0x328E, "M", "金"), + (0x328F, "M", "土"), + (0x3290, "M", "日"), + (0x3291, "M", "株"), + (0x3292, "M", "有"), + (0x3293, "M", "社"), + (0x3294, "M", "名"), + ] + + +def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x3295, "M", "特"), + (0x3296, "M", "財"), + (0x3297, "M", "祝"), + (0x3298, "M", "労"), + (0x3299, "M", "秘"), + (0x329A, "M", "男"), + (0x329B, "M", "女"), + (0x329C, "M", "適"), + (0x329D, "M", "優"), + (0x329E, "M", "印"), + (0x329F, "M", "注"), + (0x32A0, "M", "項"), + (0x32A1, "M", "休"), + (0x32A2, "M", "写"), + (0x32A3, "M", "正"), + (0x32A4, "M", "上"), + (0x32A5, "M", "中"), + (0x32A6, "M", "下"), + (0x32A7, "M", "左"), + (0x32A8, "M", "右"), + (0x32A9, "M", "医"), + (0x32AA, "M", "宗"), + (0x32AB, "M", "学"), + (0x32AC, "M", "監"), + (0x32AD, "M", "企"), + (0x32AE, "M", "資"), + (0x32AF, "M", "協"), + (0x32B0, "M", "夜"), + (0x32B1, "M", "36"), + (0x32B2, "M", "37"), + (0x32B3, "M", "38"), + (0x32B4, "M", "39"), + (0x32B5, "M", "40"), + (0x32B6, "M", "41"), + (0x32B7, "M", "42"), + (0x32B8, "M", "43"), + (0x32B9, "M", "44"), + (0x32BA, "M", "45"), + (0x32BB, "M", "46"), + (0x32BC, "M", "47"), + (0x32BD, "M", "48"), + (0x32BE, "M", "49"), + (0x32BF, "M", "50"), + (0x32C0, "M", "1月"), + (0x32C1, "M", "2月"), + (0x32C2, "M", "3月"), + (0x32C3, "M", "4月"), + (0x32C4, "M", "5月"), + (0x32C5, "M", "6月"), + (0x32C6, "M", "7月"), + (0x32C7, "M", "8月"), + (0x32C8, "M", "9月"), + (0x32C9, "M", "10月"), + (0x32CA, "M", "11月"), + (0x32CB, "M", "12月"), + (0x32CC, "M", "hg"), + (0x32CD, "M", "erg"), + (0x32CE, "M", "ev"), + (0x32CF, "M", "ltd"), + (0x32D0, "M", "ア"), + (0x32D1, "M", "イ"), + (0x32D2, "M", "ウ"), + (0x32D3, "M", "エ"), + (0x32D4, "M", "オ"), + (0x32D5, "M", "カ"), + (0x32D6, "M", "キ"), + (0x32D7, "M", "ク"), + (0x32D8, "M", "ケ"), + (0x32D9, "M", "コ"), + (0x32DA, "M", "サ"), + (0x32DB, "M", "シ"), + (0x32DC, "M", "ス"), + (0x32DD, "M", "セ"), + (0x32DE, "M", "ソ"), + (0x32DF, "M", "タ"), + (0x32E0, "M", "チ"), + (0x32E1, "M", "ツ"), + (0x32E2, "M", "テ"), + (0x32E3, "M", "ト"), + (0x32E4, "M", "ナ"), + (0x32E5, "M", "ニ"), + (0x32E6, "M", "ヌ"), + (0x32E7, "M", "ネ"), + (0x32E8, "M", "ノ"), + (0x32E9, "M", "ハ"), + (0x32EA, "M", "ヒ"), + (0x32EB, "M", "フ"), + (0x32EC, "M", "ヘ"), + (0x32ED, "M", "ホ"), + (0x32EE, "M", "マ"), + (0x32EF, "M", "ミ"), + (0x32F0, "M", "ム"), + (0x32F1, "M", "メ"), + (0x32F2, "M", "モ"), + (0x32F3, "M", "ヤ"), + (0x32F4, "M", "ユ"), + (0x32F5, "M", "ヨ"), + (0x32F6, "M", "ラ"), + (0x32F7, "M", "リ"), + (0x32F8, "M", "ル"), + ] + + +def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x32F9, "M", "レ"), + (0x32FA, "M", "ロ"), + (0x32FB, "M", "ワ"), + (0x32FC, "M", "ヰ"), + (0x32FD, "M", "ヱ"), + (0x32FE, "M", "ヲ"), + (0x32FF, "M", "令和"), + (0x3300, "M", "アパート"), + (0x3301, "M", "アルファ"), + (0x3302, "M", "アンペア"), + (0x3303, "M", "アール"), + (0x3304, "M", "イニング"), + (0x3305, "M", "インチ"), + (0x3306, "M", "ウォン"), + (0x3307, "M", "エスクード"), + (0x3308, "M", "エーカー"), + (0x3309, "M", "オンス"), + (0x330A, "M", "オーム"), + (0x330B, "M", "カイリ"), + (0x330C, "M", "カラット"), + (0x330D, "M", "カロリー"), + (0x330E, "M", "ガロン"), + (0x330F, "M", "ガンマ"), + (0x3310, "M", "ギガ"), + (0x3311, "M", "ギニー"), + (0x3312, "M", "キュリー"), + (0x3313, "M", "ギルダー"), + (0x3314, "M", "キロ"), + (0x3315, "M", "キログラム"), + (0x3316, "M", "キロメートル"), + (0x3317, "M", "キロワット"), + (0x3318, "M", "グラム"), + (0x3319, "M", "グラムトン"), + (0x331A, "M", "クルゼイロ"), + (0x331B, "M", "クローネ"), + (0x331C, "M", "ケース"), + (0x331D, "M", "コルナ"), + (0x331E, "M", "コーポ"), + (0x331F, "M", "サイクル"), + (0x3320, "M", "サンチーム"), + (0x3321, "M", "シリング"), + (0x3322, "M", "センチ"), + (0x3323, "M", "セント"), + (0x3324, "M", "ダース"), + (0x3325, "M", "デシ"), + (0x3326, "M", "ドル"), + (0x3327, "M", "トン"), + (0x3328, "M", "ナノ"), + (0x3329, "M", "ノット"), + (0x332A, "M", "ハイツ"), + (0x332B, "M", "パーセント"), + (0x332C, "M", "パーツ"), + (0x332D, "M", "バーレル"), + (0x332E, "M", "ピアストル"), + (0x332F, "M", "ピクル"), + (0x3330, "M", "ピコ"), + (0x3331, "M", "ビル"), + (0x3332, "M", "ファラッド"), + (0x3333, "M", "フィート"), + (0x3334, "M", "ブッシェル"), + (0x3335, "M", "フラン"), + (0x3336, "M", "ヘクタール"), + (0x3337, "M", "ペソ"), + (0x3338, "M", "ペニヒ"), + (0x3339, "M", "ヘルツ"), + (0x333A, "M", "ペンス"), + (0x333B, "M", "ページ"), + (0x333C, "M", "ベータ"), + (0x333D, "M", "ポイント"), + (0x333E, "M", "ボルト"), + (0x333F, "M", "ホン"), + (0x3340, "M", "ポンド"), + (0x3341, "M", "ホール"), + (0x3342, "M", "ホーン"), + (0x3343, "M", "マイクロ"), + (0x3344, "M", "マイル"), + (0x3345, "M", "マッハ"), + (0x3346, "M", "マルク"), + (0x3347, "M", "マンション"), + (0x3348, "M", "ミクロン"), + (0x3349, "M", "ミリ"), + (0x334A, "M", "ミリバール"), + (0x334B, "M", "メガ"), + (0x334C, "M", "メガトン"), + (0x334D, "M", "メートル"), + (0x334E, "M", "ヤード"), + (0x334F, "M", "ヤール"), + (0x3350, "M", "ユアン"), + (0x3351, "M", "リットル"), + (0x3352, "M", "リラ"), + (0x3353, "M", "ルピー"), + (0x3354, "M", "ルーブル"), + (0x3355, "M", "レム"), + (0x3356, "M", "レントゲン"), + (0x3357, "M", "ワット"), + (0x3358, "M", "0点"), + (0x3359, "M", "1点"), + (0x335A, "M", "2点"), + (0x335B, "M", "3点"), + (0x335C, "M", "4点"), + ] + + +def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x335D, "M", "5点"), + (0x335E, "M", "6点"), + (0x335F, "M", "7点"), + (0x3360, "M", "8点"), + (0x3361, "M", "9点"), + (0x3362, "M", "10点"), + (0x3363, "M", "11点"), + (0x3364, "M", "12点"), + (0x3365, "M", "13点"), + (0x3366, "M", "14点"), + (0x3367, "M", "15点"), + (0x3368, "M", "16点"), + (0x3369, "M", "17点"), + (0x336A, "M", "18点"), + (0x336B, "M", "19点"), + (0x336C, "M", "20点"), + (0x336D, "M", "21点"), + (0x336E, "M", "22点"), + (0x336F, "M", "23点"), + (0x3370, "M", "24点"), + (0x3371, "M", "hpa"), + (0x3372, "M", "da"), + (0x3373, "M", "au"), + (0x3374, "M", "bar"), + (0x3375, "M", "ov"), + (0x3376, "M", "pc"), + (0x3377, "M", "dm"), + (0x3378, "M", "dm2"), + (0x3379, "M", "dm3"), + (0x337A, "M", "iu"), + (0x337B, "M", "平成"), + (0x337C, "M", "昭和"), + (0x337D, "M", "大正"), + (0x337E, "M", "明治"), + (0x337F, "M", "株式会社"), + (0x3380, "M", "pa"), + (0x3381, "M", "na"), + (0x3382, "M", "μa"), + (0x3383, "M", "ma"), + (0x3384, "M", "ka"), + (0x3385, "M", "kb"), + (0x3386, "M", "mb"), + (0x3387, "M", "gb"), + (0x3388, "M", "cal"), + (0x3389, "M", "kcal"), + (0x338A, "M", "pf"), + (0x338B, "M", "nf"), + (0x338C, "M", "μf"), + (0x338D, "M", "μg"), + (0x338E, "M", "mg"), + (0x338F, "M", "kg"), + (0x3390, "M", "hz"), + (0x3391, "M", "khz"), + (0x3392, "M", "mhz"), + (0x3393, "M", "ghz"), + (0x3394, "M", "thz"), + (0x3395, "M", "μl"), + (0x3396, "M", "ml"), + (0x3397, "M", "dl"), + (0x3398, "M", "kl"), + (0x3399, "M", "fm"), + (0x339A, "M", "nm"), + (0x339B, "M", "μm"), + (0x339C, "M", "mm"), + (0x339D, "M", "cm"), + (0x339E, "M", "km"), + (0x339F, "M", "mm2"), + (0x33A0, "M", "cm2"), + (0x33A1, "M", "m2"), + (0x33A2, "M", "km2"), + (0x33A3, "M", "mm3"), + (0x33A4, "M", "cm3"), + (0x33A5, "M", "m3"), + (0x33A6, "M", "km3"), + (0x33A7, "M", "m∕s"), + (0x33A8, "M", "m∕s2"), + (0x33A9, "M", "pa"), + (0x33AA, "M", "kpa"), + (0x33AB, "M", "mpa"), + (0x33AC, "M", "gpa"), + (0x33AD, "M", "rad"), + (0x33AE, "M", "rad∕s"), + (0x33AF, "M", "rad∕s2"), + (0x33B0, "M", "ps"), + (0x33B1, "M", "ns"), + (0x33B2, "M", "μs"), + (0x33B3, "M", "ms"), + (0x33B4, "M", "pv"), + (0x33B5, "M", "nv"), + (0x33B6, "M", "μv"), + (0x33B7, "M", "mv"), + (0x33B8, "M", "kv"), + (0x33B9, "M", "mv"), + (0x33BA, "M", "pw"), + (0x33BB, "M", "nw"), + (0x33BC, "M", "μw"), + (0x33BD, "M", "mw"), + (0x33BE, "M", "kw"), + (0x33BF, "M", "mw"), + (0x33C0, "M", "kω"), + ] + + +def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x33C1, "M", "mω"), + (0x33C2, "X"), + (0x33C3, "M", "bq"), + (0x33C4, "M", "cc"), + (0x33C5, "M", "cd"), + (0x33C6, "M", "c∕kg"), + (0x33C7, "X"), + (0x33C8, "M", "db"), + (0x33C9, "M", "gy"), + (0x33CA, "M", "ha"), + (0x33CB, "M", "hp"), + (0x33CC, "M", "in"), + (0x33CD, "M", "kk"), + (0x33CE, "M", "km"), + (0x33CF, "M", "kt"), + (0x33D0, "M", "lm"), + (0x33D1, "M", "ln"), + (0x33D2, "M", "log"), + (0x33D3, "M", "lx"), + (0x33D4, "M", "mb"), + (0x33D5, "M", "mil"), + (0x33D6, "M", "mol"), + (0x33D7, "M", "ph"), + (0x33D8, "X"), + (0x33D9, "M", "ppm"), + (0x33DA, "M", "pr"), + (0x33DB, "M", "sr"), + (0x33DC, "M", "sv"), + (0x33DD, "M", "wb"), + (0x33DE, "M", "v∕m"), + (0x33DF, "M", "a∕m"), + (0x33E0, "M", "1日"), + (0x33E1, "M", "2日"), + (0x33E2, "M", "3日"), + (0x33E3, "M", "4日"), + (0x33E4, "M", "5日"), + (0x33E5, "M", "6日"), + (0x33E6, "M", "7日"), + (0x33E7, "M", "8日"), + (0x33E8, "M", "9日"), + (0x33E9, "M", "10日"), + (0x33EA, "M", "11日"), + (0x33EB, "M", "12日"), + (0x33EC, "M", "13日"), + (0x33ED, "M", "14日"), + (0x33EE, "M", "15日"), + (0x33EF, "M", "16日"), + (0x33F0, "M", "17日"), + (0x33F1, "M", "18日"), + (0x33F2, "M", "19日"), + (0x33F3, "M", "20日"), + (0x33F4, "M", "21日"), + (0x33F5, "M", "22日"), + (0x33F6, "M", "23日"), + (0x33F7, "M", "24日"), + (0x33F8, "M", "25日"), + (0x33F9, "M", "26日"), + (0x33FA, "M", "27日"), + (0x33FB, "M", "28日"), + (0x33FC, "M", "29日"), + (0x33FD, "M", "30日"), + (0x33FE, "M", "31日"), + (0x33FF, "M", "gal"), + (0x3400, "V"), + (0xA48D, "X"), + (0xA490, "V"), + (0xA4C7, "X"), + (0xA4D0, "V"), + (0xA62C, "X"), + (0xA640, "M", "ꙁ"), + (0xA641, "V"), + (0xA642, "M", "ꙃ"), + (0xA643, "V"), + (0xA644, "M", "ꙅ"), + (0xA645, "V"), + (0xA646, "M", "ꙇ"), + (0xA647, "V"), + (0xA648, "M", "ꙉ"), + (0xA649, "V"), + (0xA64A, "M", "ꙋ"), + (0xA64B, "V"), + (0xA64C, "M", "ꙍ"), + (0xA64D, "V"), + (0xA64E, "M", "ꙏ"), + (0xA64F, "V"), + (0xA650, "M", "ꙑ"), + (0xA651, "V"), + (0xA652, "M", "ꙓ"), + (0xA653, "V"), + (0xA654, "M", "ꙕ"), + (0xA655, "V"), + (0xA656, "M", "ꙗ"), + (0xA657, "V"), + (0xA658, "M", "ꙙ"), + (0xA659, "V"), + (0xA65A, "M", "ꙛ"), + (0xA65B, "V"), + (0xA65C, "M", "ꙝ"), + (0xA65D, "V"), + (0xA65E, "M", "ꙟ"), + ] + + +def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA65F, "V"), + (0xA660, "M", "ꙡ"), + (0xA661, "V"), + (0xA662, "M", "ꙣ"), + (0xA663, "V"), + (0xA664, "M", "ꙥ"), + (0xA665, "V"), + (0xA666, "M", "ꙧ"), + (0xA667, "V"), + (0xA668, "M", "ꙩ"), + (0xA669, "V"), + (0xA66A, "M", "ꙫ"), + (0xA66B, "V"), + (0xA66C, "M", "ꙭ"), + (0xA66D, "V"), + (0xA680, "M", "ꚁ"), + (0xA681, "V"), + (0xA682, "M", "ꚃ"), + (0xA683, "V"), + (0xA684, "M", "ꚅ"), + (0xA685, "V"), + (0xA686, "M", "ꚇ"), + (0xA687, "V"), + (0xA688, "M", "ꚉ"), + (0xA689, "V"), + (0xA68A, "M", "ꚋ"), + (0xA68B, "V"), + (0xA68C, "M", "ꚍ"), + (0xA68D, "V"), + (0xA68E, "M", "ꚏ"), + (0xA68F, "V"), + (0xA690, "M", "ꚑ"), + (0xA691, "V"), + (0xA692, "M", "ꚓ"), + (0xA693, "V"), + (0xA694, "M", "ꚕ"), + (0xA695, "V"), + (0xA696, "M", "ꚗ"), + (0xA697, "V"), + (0xA698, "M", "ꚙ"), + (0xA699, "V"), + (0xA69A, "M", "ꚛ"), + (0xA69B, "V"), + (0xA69C, "M", "ъ"), + (0xA69D, "M", "ь"), + (0xA69E, "V"), + (0xA6F8, "X"), + (0xA700, "V"), + (0xA722, "M", "ꜣ"), + (0xA723, "V"), + (0xA724, "M", "ꜥ"), + (0xA725, "V"), + (0xA726, "M", "ꜧ"), + (0xA727, "V"), + (0xA728, "M", "ꜩ"), + (0xA729, "V"), + (0xA72A, "M", "ꜫ"), + (0xA72B, "V"), + (0xA72C, "M", "ꜭ"), + (0xA72D, "V"), + (0xA72E, "M", "ꜯ"), + (0xA72F, "V"), + (0xA732, "M", "ꜳ"), + (0xA733, "V"), + (0xA734, "M", "ꜵ"), + (0xA735, "V"), + (0xA736, "M", "ꜷ"), + (0xA737, "V"), + (0xA738, "M", "ꜹ"), + (0xA739, "V"), + (0xA73A, "M", "ꜻ"), + (0xA73B, "V"), + (0xA73C, "M", "ꜽ"), + (0xA73D, "V"), + (0xA73E, "M", "ꜿ"), + (0xA73F, "V"), + (0xA740, "M", "ꝁ"), + (0xA741, "V"), + (0xA742, "M", "ꝃ"), + (0xA743, "V"), + (0xA744, "M", "ꝅ"), + (0xA745, "V"), + (0xA746, "M", "ꝇ"), + (0xA747, "V"), + (0xA748, "M", "ꝉ"), + (0xA749, "V"), + (0xA74A, "M", "ꝋ"), + (0xA74B, "V"), + (0xA74C, "M", "ꝍ"), + (0xA74D, "V"), + (0xA74E, "M", "ꝏ"), + (0xA74F, "V"), + (0xA750, "M", "ꝑ"), + (0xA751, "V"), + (0xA752, "M", "ꝓ"), + (0xA753, "V"), + (0xA754, "M", "ꝕ"), + (0xA755, "V"), + (0xA756, "M", "ꝗ"), + (0xA757, "V"), + ] + + +def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA758, "M", "ꝙ"), + (0xA759, "V"), + (0xA75A, "M", "ꝛ"), + (0xA75B, "V"), + (0xA75C, "M", "ꝝ"), + (0xA75D, "V"), + (0xA75E, "M", "ꝟ"), + (0xA75F, "V"), + (0xA760, "M", "ꝡ"), + (0xA761, "V"), + (0xA762, "M", "ꝣ"), + (0xA763, "V"), + (0xA764, "M", "ꝥ"), + (0xA765, "V"), + (0xA766, "M", "ꝧ"), + (0xA767, "V"), + (0xA768, "M", "ꝩ"), + (0xA769, "V"), + (0xA76A, "M", "ꝫ"), + (0xA76B, "V"), + (0xA76C, "M", "ꝭ"), + (0xA76D, "V"), + (0xA76E, "M", "ꝯ"), + (0xA76F, "V"), + (0xA770, "M", "ꝯ"), + (0xA771, "V"), + (0xA779, "M", "ꝺ"), + (0xA77A, "V"), + (0xA77B, "M", "ꝼ"), + (0xA77C, "V"), + (0xA77D, "M", "ᵹ"), + (0xA77E, "M", "ꝿ"), + (0xA77F, "V"), + (0xA780, "M", "ꞁ"), + (0xA781, "V"), + (0xA782, "M", "ꞃ"), + (0xA783, "V"), + (0xA784, "M", "ꞅ"), + (0xA785, "V"), + (0xA786, "M", "ꞇ"), + (0xA787, "V"), + (0xA78B, "M", "ꞌ"), + (0xA78C, "V"), + (0xA78D, "M", "ɥ"), + (0xA78E, "V"), + (0xA790, "M", "ꞑ"), + (0xA791, "V"), + (0xA792, "M", "ꞓ"), + (0xA793, "V"), + (0xA796, "M", "ꞗ"), + (0xA797, "V"), + (0xA798, "M", "ꞙ"), + (0xA799, "V"), + (0xA79A, "M", "ꞛ"), + (0xA79B, "V"), + (0xA79C, "M", "ꞝ"), + (0xA79D, "V"), + (0xA79E, "M", "ꞟ"), + (0xA79F, "V"), + (0xA7A0, "M", "ꞡ"), + (0xA7A1, "V"), + (0xA7A2, "M", "ꞣ"), + (0xA7A3, "V"), + (0xA7A4, "M", "ꞥ"), + (0xA7A5, "V"), + (0xA7A6, "M", "ꞧ"), + (0xA7A7, "V"), + (0xA7A8, "M", "ꞩ"), + (0xA7A9, "V"), + (0xA7AA, "M", "ɦ"), + (0xA7AB, "M", "ɜ"), + (0xA7AC, "M", "ɡ"), + (0xA7AD, "M", "ɬ"), + (0xA7AE, "M", "ɪ"), + (0xA7AF, "V"), + (0xA7B0, "M", "ʞ"), + (0xA7B1, "M", "ʇ"), + (0xA7B2, "M", "ʝ"), + (0xA7B3, "M", "ꭓ"), + (0xA7B4, "M", "ꞵ"), + (0xA7B5, "V"), + (0xA7B6, "M", "ꞷ"), + (0xA7B7, "V"), + (0xA7B8, "M", "ꞹ"), + (0xA7B9, "V"), + (0xA7BA, "M", "ꞻ"), + (0xA7BB, "V"), + (0xA7BC, "M", "ꞽ"), + (0xA7BD, "V"), + (0xA7BE, "M", "ꞿ"), + (0xA7BF, "V"), + (0xA7C0, "M", "ꟁ"), + (0xA7C1, "V"), + (0xA7C2, "M", "ꟃ"), + (0xA7C3, "V"), + (0xA7C4, "M", "ꞔ"), + (0xA7C5, "M", "ʂ"), + (0xA7C6, "M", "ᶎ"), + (0xA7C7, "M", "ꟈ"), + (0xA7C8, "V"), + ] + + +def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xA7C9, "M", "ꟊ"), + (0xA7CA, "V"), + (0xA7CB, "M", "ɤ"), + (0xA7CC, "M", "ꟍ"), + (0xA7CD, "V"), + (0xA7CE, "X"), + (0xA7D0, "M", "ꟑ"), + (0xA7D1, "V"), + (0xA7D2, "X"), + (0xA7D3, "V"), + (0xA7D4, "X"), + (0xA7D5, "V"), + (0xA7D6, "M", "ꟗ"), + (0xA7D7, "V"), + (0xA7D8, "M", "ꟙ"), + (0xA7D9, "V"), + (0xA7DA, "M", "ꟛ"), + (0xA7DB, "V"), + (0xA7DC, "M", "ƛ"), + (0xA7DD, "X"), + (0xA7F2, "M", "c"), + (0xA7F3, "M", "f"), + (0xA7F4, "M", "q"), + (0xA7F5, "M", "ꟶ"), + (0xA7F6, "V"), + (0xA7F8, "M", "ħ"), + (0xA7F9, "M", "œ"), + (0xA7FA, "V"), + (0xA82D, "X"), + (0xA830, "V"), + (0xA83A, "X"), + (0xA840, "V"), + (0xA878, "X"), + (0xA880, "V"), + (0xA8C6, "X"), + (0xA8CE, "V"), + (0xA8DA, "X"), + (0xA8E0, "V"), + (0xA954, "X"), + (0xA95F, "V"), + (0xA97D, "X"), + (0xA980, "V"), + (0xA9CE, "X"), + (0xA9CF, "V"), + (0xA9DA, "X"), + (0xA9DE, "V"), + (0xA9FF, "X"), + (0xAA00, "V"), + (0xAA37, "X"), + (0xAA40, "V"), + (0xAA4E, "X"), + (0xAA50, "V"), + (0xAA5A, "X"), + (0xAA5C, "V"), + (0xAAC3, "X"), + (0xAADB, "V"), + (0xAAF7, "X"), + (0xAB01, "V"), + (0xAB07, "X"), + (0xAB09, "V"), + (0xAB0F, "X"), + (0xAB11, "V"), + (0xAB17, "X"), + (0xAB20, "V"), + (0xAB27, "X"), + (0xAB28, "V"), + (0xAB2F, "X"), + (0xAB30, "V"), + (0xAB5C, "M", "ꜧ"), + (0xAB5D, "M", "ꬷ"), + (0xAB5E, "M", "ɫ"), + (0xAB5F, "M", "ꭒ"), + (0xAB60, "V"), + (0xAB69, "M", "ʍ"), + (0xAB6A, "V"), + (0xAB6C, "X"), + (0xAB70, "M", "Ꭰ"), + (0xAB71, "M", "Ꭱ"), + (0xAB72, "M", "Ꭲ"), + (0xAB73, "M", "Ꭳ"), + (0xAB74, "M", "Ꭴ"), + (0xAB75, "M", "Ꭵ"), + (0xAB76, "M", "Ꭶ"), + (0xAB77, "M", "Ꭷ"), + (0xAB78, "M", "Ꭸ"), + (0xAB79, "M", "Ꭹ"), + (0xAB7A, "M", "Ꭺ"), + (0xAB7B, "M", "Ꭻ"), + (0xAB7C, "M", "Ꭼ"), + (0xAB7D, "M", "Ꭽ"), + (0xAB7E, "M", "Ꭾ"), + (0xAB7F, "M", "Ꭿ"), + (0xAB80, "M", "Ꮀ"), + (0xAB81, "M", "Ꮁ"), + (0xAB82, "M", "Ꮂ"), + (0xAB83, "M", "Ꮃ"), + (0xAB84, "M", "Ꮄ"), + (0xAB85, "M", "Ꮅ"), + (0xAB86, "M", "Ꮆ"), + (0xAB87, "M", "Ꮇ"), + ] + + +def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xAB88, "M", "Ꮈ"), + (0xAB89, "M", "Ꮉ"), + (0xAB8A, "M", "Ꮊ"), + (0xAB8B, "M", "Ꮋ"), + (0xAB8C, "M", "Ꮌ"), + (0xAB8D, "M", "Ꮍ"), + (0xAB8E, "M", "Ꮎ"), + (0xAB8F, "M", "Ꮏ"), + (0xAB90, "M", "Ꮐ"), + (0xAB91, "M", "Ꮑ"), + (0xAB92, "M", "Ꮒ"), + (0xAB93, "M", "Ꮓ"), + (0xAB94, "M", "Ꮔ"), + (0xAB95, "M", "Ꮕ"), + (0xAB96, "M", "Ꮖ"), + (0xAB97, "M", "Ꮗ"), + (0xAB98, "M", "Ꮘ"), + (0xAB99, "M", "Ꮙ"), + (0xAB9A, "M", "Ꮚ"), + (0xAB9B, "M", "Ꮛ"), + (0xAB9C, "M", "Ꮜ"), + (0xAB9D, "M", "Ꮝ"), + (0xAB9E, "M", "Ꮞ"), + (0xAB9F, "M", "Ꮟ"), + (0xABA0, "M", "Ꮠ"), + (0xABA1, "M", "Ꮡ"), + (0xABA2, "M", "Ꮢ"), + (0xABA3, "M", "Ꮣ"), + (0xABA4, "M", "Ꮤ"), + (0xABA5, "M", "Ꮥ"), + (0xABA6, "M", "Ꮦ"), + (0xABA7, "M", "Ꮧ"), + (0xABA8, "M", "Ꮨ"), + (0xABA9, "M", "Ꮩ"), + (0xABAA, "M", "Ꮪ"), + (0xABAB, "M", "Ꮫ"), + (0xABAC, "M", "Ꮬ"), + (0xABAD, "M", "Ꮭ"), + (0xABAE, "M", "Ꮮ"), + (0xABAF, "M", "Ꮯ"), + (0xABB0, "M", "Ꮰ"), + (0xABB1, "M", "Ꮱ"), + (0xABB2, "M", "Ꮲ"), + (0xABB3, "M", "Ꮳ"), + (0xABB4, "M", "Ꮴ"), + (0xABB5, "M", "Ꮵ"), + (0xABB6, "M", "Ꮶ"), + (0xABB7, "M", "Ꮷ"), + (0xABB8, "M", "Ꮸ"), + (0xABB9, "M", "Ꮹ"), + (0xABBA, "M", "Ꮺ"), + (0xABBB, "M", "Ꮻ"), + (0xABBC, "M", "Ꮼ"), + (0xABBD, "M", "Ꮽ"), + (0xABBE, "M", "Ꮾ"), + (0xABBF, "M", "Ꮿ"), + (0xABC0, "V"), + (0xABEE, "X"), + (0xABF0, "V"), + (0xABFA, "X"), + (0xAC00, "V"), + (0xD7A4, "X"), + (0xD7B0, "V"), + (0xD7C7, "X"), + (0xD7CB, "V"), + (0xD7FC, "X"), + (0xF900, "M", "豈"), + (0xF901, "M", "更"), + (0xF902, "M", "車"), + (0xF903, "M", "賈"), + (0xF904, "M", "滑"), + (0xF905, "M", "串"), + (0xF906, "M", "句"), + (0xF907, "M", "龜"), + (0xF909, "M", "契"), + (0xF90A, "M", "金"), + (0xF90B, "M", "喇"), + (0xF90C, "M", "奈"), + (0xF90D, "M", "懶"), + (0xF90E, "M", "癩"), + (0xF90F, "M", "羅"), + (0xF910, "M", "蘿"), + (0xF911, "M", "螺"), + (0xF912, "M", "裸"), + (0xF913, "M", "邏"), + (0xF914, "M", "樂"), + (0xF915, "M", "洛"), + (0xF916, "M", "烙"), + (0xF917, "M", "珞"), + (0xF918, "M", "落"), + (0xF919, "M", "酪"), + (0xF91A, "M", "駱"), + (0xF91B, "M", "亂"), + (0xF91C, "M", "卵"), + (0xF91D, "M", "欄"), + (0xF91E, "M", "爛"), + (0xF91F, "M", "蘭"), + (0xF920, "M", "鸞"), + (0xF921, "M", "嵐"), + (0xF922, "M", "濫"), + ] + + +def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF923, "M", "藍"), + (0xF924, "M", "襤"), + (0xF925, "M", "拉"), + (0xF926, "M", "臘"), + (0xF927, "M", "蠟"), + (0xF928, "M", "廊"), + (0xF929, "M", "朗"), + (0xF92A, "M", "浪"), + (0xF92B, "M", "狼"), + (0xF92C, "M", "郎"), + (0xF92D, "M", "來"), + (0xF92E, "M", "冷"), + (0xF92F, "M", "勞"), + (0xF930, "M", "擄"), + (0xF931, "M", "櫓"), + (0xF932, "M", "爐"), + (0xF933, "M", "盧"), + (0xF934, "M", "老"), + (0xF935, "M", "蘆"), + (0xF936, "M", "虜"), + (0xF937, "M", "路"), + (0xF938, "M", "露"), + (0xF939, "M", "魯"), + (0xF93A, "M", "鷺"), + (0xF93B, "M", "碌"), + (0xF93C, "M", "祿"), + (0xF93D, "M", "綠"), + (0xF93E, "M", "菉"), + (0xF93F, "M", "錄"), + (0xF940, "M", "鹿"), + (0xF941, "M", "論"), + (0xF942, "M", "壟"), + (0xF943, "M", "弄"), + (0xF944, "M", "籠"), + (0xF945, "M", "聾"), + (0xF946, "M", "牢"), + (0xF947, "M", "磊"), + (0xF948, "M", "賂"), + (0xF949, "M", "雷"), + (0xF94A, "M", "壘"), + (0xF94B, "M", "屢"), + (0xF94C, "M", "樓"), + (0xF94D, "M", "淚"), + (0xF94E, "M", "漏"), + (0xF94F, "M", "累"), + (0xF950, "M", "縷"), + (0xF951, "M", "陋"), + (0xF952, "M", "勒"), + (0xF953, "M", "肋"), + (0xF954, "M", "凜"), + (0xF955, "M", "凌"), + (0xF956, "M", "稜"), + (0xF957, "M", "綾"), + (0xF958, "M", "菱"), + (0xF959, "M", "陵"), + (0xF95A, "M", "讀"), + (0xF95B, "M", "拏"), + (0xF95C, "M", "樂"), + (0xF95D, "M", "諾"), + (0xF95E, "M", "丹"), + (0xF95F, "M", "寧"), + (0xF960, "M", "怒"), + (0xF961, "M", "率"), + (0xF962, "M", "異"), + (0xF963, "M", "北"), + (0xF964, "M", "磻"), + (0xF965, "M", "便"), + (0xF966, "M", "復"), + (0xF967, "M", "不"), + (0xF968, "M", "泌"), + (0xF969, "M", "數"), + (0xF96A, "M", "索"), + (0xF96B, "M", "參"), + (0xF96C, "M", "塞"), + (0xF96D, "M", "省"), + (0xF96E, "M", "葉"), + (0xF96F, "M", "說"), + (0xF970, "M", "殺"), + (0xF971, "M", "辰"), + (0xF972, "M", "沈"), + (0xF973, "M", "拾"), + (0xF974, "M", "若"), + (0xF975, "M", "掠"), + (0xF976, "M", "略"), + (0xF977, "M", "亮"), + (0xF978, "M", "兩"), + (0xF979, "M", "凉"), + (0xF97A, "M", "梁"), + (0xF97B, "M", "糧"), + (0xF97C, "M", "良"), + (0xF97D, "M", "諒"), + (0xF97E, "M", "量"), + (0xF97F, "M", "勵"), + (0xF980, "M", "呂"), + (0xF981, "M", "女"), + (0xF982, "M", "廬"), + (0xF983, "M", "旅"), + (0xF984, "M", "濾"), + (0xF985, "M", "礪"), + (0xF986, "M", "閭"), + ] + + +def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF987, "M", "驪"), + (0xF988, "M", "麗"), + (0xF989, "M", "黎"), + (0xF98A, "M", "力"), + (0xF98B, "M", "曆"), + (0xF98C, "M", "歷"), + (0xF98D, "M", "轢"), + (0xF98E, "M", "年"), + (0xF98F, "M", "憐"), + (0xF990, "M", "戀"), + (0xF991, "M", "撚"), + (0xF992, "M", "漣"), + (0xF993, "M", "煉"), + (0xF994, "M", "璉"), + (0xF995, "M", "秊"), + (0xF996, "M", "練"), + (0xF997, "M", "聯"), + (0xF998, "M", "輦"), + (0xF999, "M", "蓮"), + (0xF99A, "M", "連"), + (0xF99B, "M", "鍊"), + (0xF99C, "M", "列"), + (0xF99D, "M", "劣"), + (0xF99E, "M", "咽"), + (0xF99F, "M", "烈"), + (0xF9A0, "M", "裂"), + (0xF9A1, "M", "說"), + (0xF9A2, "M", "廉"), + (0xF9A3, "M", "念"), + (0xF9A4, "M", "捻"), + (0xF9A5, "M", "殮"), + (0xF9A6, "M", "簾"), + (0xF9A7, "M", "獵"), + (0xF9A8, "M", "令"), + (0xF9A9, "M", "囹"), + (0xF9AA, "M", "寧"), + (0xF9AB, "M", "嶺"), + (0xF9AC, "M", "怜"), + (0xF9AD, "M", "玲"), + (0xF9AE, "M", "瑩"), + (0xF9AF, "M", "羚"), + (0xF9B0, "M", "聆"), + (0xF9B1, "M", "鈴"), + (0xF9B2, "M", "零"), + (0xF9B3, "M", "靈"), + (0xF9B4, "M", "領"), + (0xF9B5, "M", "例"), + (0xF9B6, "M", "禮"), + (0xF9B7, "M", "醴"), + (0xF9B8, "M", "隸"), + (0xF9B9, "M", "惡"), + (0xF9BA, "M", "了"), + (0xF9BB, "M", "僚"), + (0xF9BC, "M", "寮"), + (0xF9BD, "M", "尿"), + (0xF9BE, "M", "料"), + (0xF9BF, "M", "樂"), + (0xF9C0, "M", "燎"), + (0xF9C1, "M", "療"), + (0xF9C2, "M", "蓼"), + (0xF9C3, "M", "遼"), + (0xF9C4, "M", "龍"), + (0xF9C5, "M", "暈"), + (0xF9C6, "M", "阮"), + (0xF9C7, "M", "劉"), + (0xF9C8, "M", "杻"), + (0xF9C9, "M", "柳"), + (0xF9CA, "M", "流"), + (0xF9CB, "M", "溜"), + (0xF9CC, "M", "琉"), + (0xF9CD, "M", "留"), + (0xF9CE, "M", "硫"), + (0xF9CF, "M", "紐"), + (0xF9D0, "M", "類"), + (0xF9D1, "M", "六"), + (0xF9D2, "M", "戮"), + (0xF9D3, "M", "陸"), + (0xF9D4, "M", "倫"), + (0xF9D5, "M", "崙"), + (0xF9D6, "M", "淪"), + (0xF9D7, "M", "輪"), + (0xF9D8, "M", "律"), + (0xF9D9, "M", "慄"), + (0xF9DA, "M", "栗"), + (0xF9DB, "M", "率"), + (0xF9DC, "M", "隆"), + (0xF9DD, "M", "利"), + (0xF9DE, "M", "吏"), + (0xF9DF, "M", "履"), + (0xF9E0, "M", "易"), + (0xF9E1, "M", "李"), + (0xF9E2, "M", "梨"), + (0xF9E3, "M", "泥"), + (0xF9E4, "M", "理"), + (0xF9E5, "M", "痢"), + (0xF9E6, "M", "罹"), + (0xF9E7, "M", "裏"), + (0xF9E8, "M", "裡"), + (0xF9E9, "M", "里"), + (0xF9EA, "M", "離"), + ] + + +def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xF9EB, "M", "匿"), + (0xF9EC, "M", "溺"), + (0xF9ED, "M", "吝"), + (0xF9EE, "M", "燐"), + (0xF9EF, "M", "璘"), + (0xF9F0, "M", "藺"), + (0xF9F1, "M", "隣"), + (0xF9F2, "M", "鱗"), + (0xF9F3, "M", "麟"), + (0xF9F4, "M", "林"), + (0xF9F5, "M", "淋"), + (0xF9F6, "M", "臨"), + (0xF9F7, "M", "立"), + (0xF9F8, "M", "笠"), + (0xF9F9, "M", "粒"), + (0xF9FA, "M", "狀"), + (0xF9FB, "M", "炙"), + (0xF9FC, "M", "識"), + (0xF9FD, "M", "什"), + (0xF9FE, "M", "茶"), + (0xF9FF, "M", "刺"), + (0xFA00, "M", "切"), + (0xFA01, "M", "度"), + (0xFA02, "M", "拓"), + (0xFA03, "M", "糖"), + (0xFA04, "M", "宅"), + (0xFA05, "M", "洞"), + (0xFA06, "M", "暴"), + (0xFA07, "M", "輻"), + (0xFA08, "M", "行"), + (0xFA09, "M", "降"), + (0xFA0A, "M", "見"), + (0xFA0B, "M", "廓"), + (0xFA0C, "M", "兀"), + (0xFA0D, "M", "嗀"), + (0xFA0E, "V"), + (0xFA10, "M", "塚"), + (0xFA11, "V"), + (0xFA12, "M", "晴"), + (0xFA13, "V"), + (0xFA15, "M", "凞"), + (0xFA16, "M", "猪"), + (0xFA17, "M", "益"), + (0xFA18, "M", "礼"), + (0xFA19, "M", "神"), + (0xFA1A, "M", "祥"), + (0xFA1B, "M", "福"), + (0xFA1C, "M", "靖"), + (0xFA1D, "M", "精"), + (0xFA1E, "M", "羽"), + (0xFA1F, "V"), + (0xFA20, "M", "蘒"), + (0xFA21, "V"), + (0xFA22, "M", "諸"), + (0xFA23, "V"), + (0xFA25, "M", "逸"), + (0xFA26, "M", "都"), + (0xFA27, "V"), + (0xFA2A, "M", "飯"), + (0xFA2B, "M", "飼"), + (0xFA2C, "M", "館"), + (0xFA2D, "M", "鶴"), + (0xFA2E, "M", "郞"), + (0xFA2F, "M", "隷"), + (0xFA30, "M", "侮"), + (0xFA31, "M", "僧"), + (0xFA32, "M", "免"), + (0xFA33, "M", "勉"), + (0xFA34, "M", "勤"), + (0xFA35, "M", "卑"), + (0xFA36, "M", "喝"), + (0xFA37, "M", "嘆"), + (0xFA38, "M", "器"), + (0xFA39, "M", "塀"), + (0xFA3A, "M", "墨"), + (0xFA3B, "M", "層"), + (0xFA3C, "M", "屮"), + (0xFA3D, "M", "悔"), + (0xFA3E, "M", "慨"), + (0xFA3F, "M", "憎"), + (0xFA40, "M", "懲"), + (0xFA41, "M", "敏"), + (0xFA42, "M", "既"), + (0xFA43, "M", "暑"), + (0xFA44, "M", "梅"), + (0xFA45, "M", "海"), + (0xFA46, "M", "渚"), + (0xFA47, "M", "漢"), + (0xFA48, "M", "煮"), + (0xFA49, "M", "爫"), + (0xFA4A, "M", "琢"), + (0xFA4B, "M", "碑"), + (0xFA4C, "M", "社"), + (0xFA4D, "M", "祉"), + (0xFA4E, "M", "祈"), + (0xFA4F, "M", "祐"), + (0xFA50, "M", "祖"), + (0xFA51, "M", "祝"), + (0xFA52, "M", "禍"), + (0xFA53, "M", "禎"), + ] + + +def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFA54, "M", "穀"), + (0xFA55, "M", "突"), + (0xFA56, "M", "節"), + (0xFA57, "M", "練"), + (0xFA58, "M", "縉"), + (0xFA59, "M", "繁"), + (0xFA5A, "M", "署"), + (0xFA5B, "M", "者"), + (0xFA5C, "M", "臭"), + (0xFA5D, "M", "艹"), + (0xFA5F, "M", "著"), + (0xFA60, "M", "褐"), + (0xFA61, "M", "視"), + (0xFA62, "M", "謁"), + (0xFA63, "M", "謹"), + (0xFA64, "M", "賓"), + (0xFA65, "M", "贈"), + (0xFA66, "M", "辶"), + (0xFA67, "M", "逸"), + (0xFA68, "M", "難"), + (0xFA69, "M", "響"), + (0xFA6A, "M", "頻"), + (0xFA6B, "M", "恵"), + (0xFA6C, "M", "𤋮"), + (0xFA6D, "M", "舘"), + (0xFA6E, "X"), + (0xFA70, "M", "並"), + (0xFA71, "M", "况"), + (0xFA72, "M", "全"), + (0xFA73, "M", "侀"), + (0xFA74, "M", "充"), + (0xFA75, "M", "冀"), + (0xFA76, "M", "勇"), + (0xFA77, "M", "勺"), + (0xFA78, "M", "喝"), + (0xFA79, "M", "啕"), + (0xFA7A, "M", "喙"), + (0xFA7B, "M", "嗢"), + (0xFA7C, "M", "塚"), + (0xFA7D, "M", "墳"), + (0xFA7E, "M", "奄"), + (0xFA7F, "M", "奔"), + (0xFA80, "M", "婢"), + (0xFA81, "M", "嬨"), + (0xFA82, "M", "廒"), + (0xFA83, "M", "廙"), + (0xFA84, "M", "彩"), + (0xFA85, "M", "徭"), + (0xFA86, "M", "惘"), + (0xFA87, "M", "慎"), + (0xFA88, "M", "愈"), + (0xFA89, "M", "憎"), + (0xFA8A, "M", "慠"), + (0xFA8B, "M", "懲"), + (0xFA8C, "M", "戴"), + (0xFA8D, "M", "揄"), + (0xFA8E, "M", "搜"), + (0xFA8F, "M", "摒"), + (0xFA90, "M", "敖"), + (0xFA91, "M", "晴"), + (0xFA92, "M", "朗"), + (0xFA93, "M", "望"), + (0xFA94, "M", "杖"), + (0xFA95, "M", "歹"), + (0xFA96, "M", "殺"), + (0xFA97, "M", "流"), + (0xFA98, "M", "滛"), + (0xFA99, "M", "滋"), + (0xFA9A, "M", "漢"), + (0xFA9B, "M", "瀞"), + (0xFA9C, "M", "煮"), + (0xFA9D, "M", "瞧"), + (0xFA9E, "M", "爵"), + (0xFA9F, "M", "犯"), + (0xFAA0, "M", "猪"), + (0xFAA1, "M", "瑱"), + (0xFAA2, "M", "甆"), + (0xFAA3, "M", "画"), + (0xFAA4, "M", "瘝"), + (0xFAA5, "M", "瘟"), + (0xFAA6, "M", "益"), + (0xFAA7, "M", "盛"), + (0xFAA8, "M", "直"), + (0xFAA9, "M", "睊"), + (0xFAAA, "M", "着"), + (0xFAAB, "M", "磌"), + (0xFAAC, "M", "窱"), + (0xFAAD, "M", "節"), + (0xFAAE, "M", "类"), + (0xFAAF, "M", "絛"), + (0xFAB0, "M", "練"), + (0xFAB1, "M", "缾"), + (0xFAB2, "M", "者"), + (0xFAB3, "M", "荒"), + (0xFAB4, "M", "華"), + (0xFAB5, "M", "蝹"), + (0xFAB6, "M", "襁"), + (0xFAB7, "M", "覆"), + (0xFAB8, "M", "視"), + (0xFAB9, "M", "調"), + ] + + +def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFABA, "M", "諸"), + (0xFABB, "M", "請"), + (0xFABC, "M", "謁"), + (0xFABD, "M", "諾"), + (0xFABE, "M", "諭"), + (0xFABF, "M", "謹"), + (0xFAC0, "M", "變"), + (0xFAC1, "M", "贈"), + (0xFAC2, "M", "輸"), + (0xFAC3, "M", "遲"), + (0xFAC4, "M", "醙"), + (0xFAC5, "M", "鉶"), + (0xFAC6, "M", "陼"), + (0xFAC7, "M", "難"), + (0xFAC8, "M", "靖"), + (0xFAC9, "M", "韛"), + (0xFACA, "M", "響"), + (0xFACB, "M", "頋"), + (0xFACC, "M", "頻"), + (0xFACD, "M", "鬒"), + (0xFACE, "M", "龜"), + (0xFACF, "M", "𢡊"), + (0xFAD0, "M", "𢡄"), + (0xFAD1, "M", "𣏕"), + (0xFAD2, "M", "㮝"), + (0xFAD3, "M", "䀘"), + (0xFAD4, "M", "䀹"), + (0xFAD5, "M", "𥉉"), + (0xFAD6, "M", "𥳐"), + (0xFAD7, "M", "𧻓"), + (0xFAD8, "M", "齃"), + (0xFAD9, "M", "龎"), + (0xFADA, "X"), + (0xFB00, "M", "ff"), + (0xFB01, "M", "fi"), + (0xFB02, "M", "fl"), + (0xFB03, "M", "ffi"), + (0xFB04, "M", "ffl"), + (0xFB05, "M", "st"), + (0xFB07, "X"), + (0xFB13, "M", "մն"), + (0xFB14, "M", "մե"), + (0xFB15, "M", "մի"), + (0xFB16, "M", "վն"), + (0xFB17, "M", "մխ"), + (0xFB18, "X"), + (0xFB1D, "M", "יִ"), + (0xFB1E, "V"), + (0xFB1F, "M", "ײַ"), + (0xFB20, "M", "ע"), + (0xFB21, "M", "א"), + (0xFB22, "M", "ד"), + (0xFB23, "M", "ה"), + (0xFB24, "M", "כ"), + (0xFB25, "M", "ל"), + (0xFB26, "M", "ם"), + (0xFB27, "M", "ר"), + (0xFB28, "M", "ת"), + (0xFB29, "M", "+"), + (0xFB2A, "M", "שׁ"), + (0xFB2B, "M", "שׂ"), + (0xFB2C, "M", "שּׁ"), + (0xFB2D, "M", "שּׂ"), + (0xFB2E, "M", "אַ"), + (0xFB2F, "M", "אָ"), + (0xFB30, "M", "אּ"), + (0xFB31, "M", "בּ"), + (0xFB32, "M", "גּ"), + (0xFB33, "M", "דּ"), + (0xFB34, "M", "הּ"), + (0xFB35, "M", "וּ"), + (0xFB36, "M", "זּ"), + (0xFB37, "X"), + (0xFB38, "M", "טּ"), + (0xFB39, "M", "יּ"), + (0xFB3A, "M", "ךּ"), + (0xFB3B, "M", "כּ"), + (0xFB3C, "M", "לּ"), + (0xFB3D, "X"), + (0xFB3E, "M", "מּ"), + (0xFB3F, "X"), + (0xFB40, "M", "נּ"), + (0xFB41, "M", "סּ"), + (0xFB42, "X"), + (0xFB43, "M", "ףּ"), + (0xFB44, "M", "פּ"), + (0xFB45, "X"), + (0xFB46, "M", "צּ"), + (0xFB47, "M", "קּ"), + (0xFB48, "M", "רּ"), + (0xFB49, "M", "שּ"), + (0xFB4A, "M", "תּ"), + (0xFB4B, "M", "וֹ"), + (0xFB4C, "M", "בֿ"), + (0xFB4D, "M", "כֿ"), + (0xFB4E, "M", "פֿ"), + (0xFB4F, "M", "אל"), + (0xFB50, "M", "ٱ"), + (0xFB52, "M", "ٻ"), + (0xFB56, "M", "پ"), + ] + + +def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFB5A, "M", "ڀ"), + (0xFB5E, "M", "ٺ"), + (0xFB62, "M", "ٿ"), + (0xFB66, "M", "ٹ"), + (0xFB6A, "M", "ڤ"), + (0xFB6E, "M", "ڦ"), + (0xFB72, "M", "ڄ"), + (0xFB76, "M", "ڃ"), + (0xFB7A, "M", "چ"), + (0xFB7E, "M", "ڇ"), + (0xFB82, "M", "ڍ"), + (0xFB84, "M", "ڌ"), + (0xFB86, "M", "ڎ"), + (0xFB88, "M", "ڈ"), + (0xFB8A, "M", "ژ"), + (0xFB8C, "M", "ڑ"), + (0xFB8E, "M", "ک"), + (0xFB92, "M", "گ"), + (0xFB96, "M", "ڳ"), + (0xFB9A, "M", "ڱ"), + (0xFB9E, "M", "ں"), + (0xFBA0, "M", "ڻ"), + (0xFBA4, "M", "ۀ"), + (0xFBA6, "M", "ہ"), + (0xFBAA, "M", "ھ"), + (0xFBAE, "M", "ے"), + (0xFBB0, "M", "ۓ"), + (0xFBB2, "V"), + (0xFBC3, "X"), + (0xFBD3, "M", "ڭ"), + (0xFBD7, "M", "ۇ"), + (0xFBD9, "M", "ۆ"), + (0xFBDB, "M", "ۈ"), + (0xFBDD, "M", "ۇٴ"), + (0xFBDE, "M", "ۋ"), + (0xFBE0, "M", "ۅ"), + (0xFBE2, "M", "ۉ"), + (0xFBE4, "M", "ې"), + (0xFBE8, "M", "ى"), + (0xFBEA, "M", "ئا"), + (0xFBEC, "M", "ئە"), + (0xFBEE, "M", "ئو"), + (0xFBF0, "M", "ئۇ"), + (0xFBF2, "M", "ئۆ"), + (0xFBF4, "M", "ئۈ"), + (0xFBF6, "M", "ئې"), + (0xFBF9, "M", "ئى"), + (0xFBFC, "M", "ی"), + (0xFC00, "M", "ئج"), + (0xFC01, "M", "ئح"), + (0xFC02, "M", "ئم"), + (0xFC03, "M", "ئى"), + (0xFC04, "M", "ئي"), + (0xFC05, "M", "بج"), + (0xFC06, "M", "بح"), + (0xFC07, "M", "بخ"), + (0xFC08, "M", "بم"), + (0xFC09, "M", "بى"), + (0xFC0A, "M", "بي"), + (0xFC0B, "M", "تج"), + (0xFC0C, "M", "تح"), + (0xFC0D, "M", "تخ"), + (0xFC0E, "M", "تم"), + (0xFC0F, "M", "تى"), + (0xFC10, "M", "تي"), + (0xFC11, "M", "ثج"), + (0xFC12, "M", "ثم"), + (0xFC13, "M", "ثى"), + (0xFC14, "M", "ثي"), + (0xFC15, "M", "جح"), + (0xFC16, "M", "جم"), + (0xFC17, "M", "حج"), + (0xFC18, "M", "حم"), + (0xFC19, "M", "خج"), + (0xFC1A, "M", "خح"), + (0xFC1B, "M", "خم"), + (0xFC1C, "M", "سج"), + (0xFC1D, "M", "سح"), + (0xFC1E, "M", "سخ"), + (0xFC1F, "M", "سم"), + (0xFC20, "M", "صح"), + (0xFC21, "M", "صم"), + (0xFC22, "M", "ضج"), + (0xFC23, "M", "ضح"), + (0xFC24, "M", "ضخ"), + (0xFC25, "M", "ضم"), + (0xFC26, "M", "طح"), + (0xFC27, "M", "طم"), + (0xFC28, "M", "ظم"), + (0xFC29, "M", "عج"), + (0xFC2A, "M", "عم"), + (0xFC2B, "M", "غج"), + (0xFC2C, "M", "غم"), + (0xFC2D, "M", "فج"), + (0xFC2E, "M", "فح"), + (0xFC2F, "M", "فخ"), + (0xFC30, "M", "فم"), + (0xFC31, "M", "فى"), + (0xFC32, "M", "في"), + (0xFC33, "M", "قح"), + ] + + +def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFC34, "M", "قم"), + (0xFC35, "M", "قى"), + (0xFC36, "M", "قي"), + (0xFC37, "M", "كا"), + (0xFC38, "M", "كج"), + (0xFC39, "M", "كح"), + (0xFC3A, "M", "كخ"), + (0xFC3B, "M", "كل"), + (0xFC3C, "M", "كم"), + (0xFC3D, "M", "كى"), + (0xFC3E, "M", "كي"), + (0xFC3F, "M", "لج"), + (0xFC40, "M", "لح"), + (0xFC41, "M", "لخ"), + (0xFC42, "M", "لم"), + (0xFC43, "M", "لى"), + (0xFC44, "M", "لي"), + (0xFC45, "M", "مج"), + (0xFC46, "M", "مح"), + (0xFC47, "M", "مخ"), + (0xFC48, "M", "مم"), + (0xFC49, "M", "مى"), + (0xFC4A, "M", "مي"), + (0xFC4B, "M", "نج"), + (0xFC4C, "M", "نح"), + (0xFC4D, "M", "نخ"), + (0xFC4E, "M", "نم"), + (0xFC4F, "M", "نى"), + (0xFC50, "M", "ني"), + (0xFC51, "M", "هج"), + (0xFC52, "M", "هم"), + (0xFC53, "M", "هى"), + (0xFC54, "M", "هي"), + (0xFC55, "M", "يج"), + (0xFC56, "M", "يح"), + (0xFC57, "M", "يخ"), + (0xFC58, "M", "يم"), + (0xFC59, "M", "يى"), + (0xFC5A, "M", "يي"), + (0xFC5B, "M", "ذٰ"), + (0xFC5C, "M", "رٰ"), + (0xFC5D, "M", "ىٰ"), + (0xFC5E, "M", " ٌّ"), + (0xFC5F, "M", " ٍّ"), + (0xFC60, "M", " َّ"), + (0xFC61, "M", " ُّ"), + (0xFC62, "M", " ِّ"), + (0xFC63, "M", " ّٰ"), + (0xFC64, "M", "ئر"), + (0xFC65, "M", "ئز"), + (0xFC66, "M", "ئم"), + (0xFC67, "M", "ئن"), + (0xFC68, "M", "ئى"), + (0xFC69, "M", "ئي"), + (0xFC6A, "M", "بر"), + (0xFC6B, "M", "بز"), + (0xFC6C, "M", "بم"), + (0xFC6D, "M", "بن"), + (0xFC6E, "M", "بى"), + (0xFC6F, "M", "بي"), + (0xFC70, "M", "تر"), + (0xFC71, "M", "تز"), + (0xFC72, "M", "تم"), + (0xFC73, "M", "تن"), + (0xFC74, "M", "تى"), + (0xFC75, "M", "تي"), + (0xFC76, "M", "ثر"), + (0xFC77, "M", "ثز"), + (0xFC78, "M", "ثم"), + (0xFC79, "M", "ثن"), + (0xFC7A, "M", "ثى"), + (0xFC7B, "M", "ثي"), + (0xFC7C, "M", "فى"), + (0xFC7D, "M", "في"), + (0xFC7E, "M", "قى"), + (0xFC7F, "M", "قي"), + (0xFC80, "M", "كا"), + (0xFC81, "M", "كل"), + (0xFC82, "M", "كم"), + (0xFC83, "M", "كى"), + (0xFC84, "M", "كي"), + (0xFC85, "M", "لم"), + (0xFC86, "M", "لى"), + (0xFC87, "M", "لي"), + (0xFC88, "M", "ما"), + (0xFC89, "M", "مم"), + (0xFC8A, "M", "نر"), + (0xFC8B, "M", "نز"), + (0xFC8C, "M", "نم"), + (0xFC8D, "M", "نن"), + (0xFC8E, "M", "نى"), + (0xFC8F, "M", "ني"), + (0xFC90, "M", "ىٰ"), + (0xFC91, "M", "ير"), + (0xFC92, "M", "يز"), + (0xFC93, "M", "يم"), + (0xFC94, "M", "ين"), + (0xFC95, "M", "يى"), + (0xFC96, "M", "يي"), + (0xFC97, "M", "ئج"), + ] + + +def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFC98, "M", "ئح"), + (0xFC99, "M", "ئخ"), + (0xFC9A, "M", "ئم"), + (0xFC9B, "M", "ئه"), + (0xFC9C, "M", "بج"), + (0xFC9D, "M", "بح"), + (0xFC9E, "M", "بخ"), + (0xFC9F, "M", "بم"), + (0xFCA0, "M", "به"), + (0xFCA1, "M", "تج"), + (0xFCA2, "M", "تح"), + (0xFCA3, "M", "تخ"), + (0xFCA4, "M", "تم"), + (0xFCA5, "M", "ته"), + (0xFCA6, "M", "ثم"), + (0xFCA7, "M", "جح"), + (0xFCA8, "M", "جم"), + (0xFCA9, "M", "حج"), + (0xFCAA, "M", "حم"), + (0xFCAB, "M", "خج"), + (0xFCAC, "M", "خم"), + (0xFCAD, "M", "سج"), + (0xFCAE, "M", "سح"), + (0xFCAF, "M", "سخ"), + (0xFCB0, "M", "سم"), + (0xFCB1, "M", "صح"), + (0xFCB2, "M", "صخ"), + (0xFCB3, "M", "صم"), + (0xFCB4, "M", "ضج"), + (0xFCB5, "M", "ضح"), + (0xFCB6, "M", "ضخ"), + (0xFCB7, "M", "ضم"), + (0xFCB8, "M", "طح"), + (0xFCB9, "M", "ظم"), + (0xFCBA, "M", "عج"), + (0xFCBB, "M", "عم"), + (0xFCBC, "M", "غج"), + (0xFCBD, "M", "غم"), + (0xFCBE, "M", "فج"), + (0xFCBF, "M", "فح"), + (0xFCC0, "M", "فخ"), + (0xFCC1, "M", "فم"), + (0xFCC2, "M", "قح"), + (0xFCC3, "M", "قم"), + (0xFCC4, "M", "كج"), + (0xFCC5, "M", "كح"), + (0xFCC6, "M", "كخ"), + (0xFCC7, "M", "كل"), + (0xFCC8, "M", "كم"), + (0xFCC9, "M", "لج"), + (0xFCCA, "M", "لح"), + (0xFCCB, "M", "لخ"), + (0xFCCC, "M", "لم"), + (0xFCCD, "M", "له"), + (0xFCCE, "M", "مج"), + (0xFCCF, "M", "مح"), + (0xFCD0, "M", "مخ"), + (0xFCD1, "M", "مم"), + (0xFCD2, "M", "نج"), + (0xFCD3, "M", "نح"), + (0xFCD4, "M", "نخ"), + (0xFCD5, "M", "نم"), + (0xFCD6, "M", "نه"), + (0xFCD7, "M", "هج"), + (0xFCD8, "M", "هم"), + (0xFCD9, "M", "هٰ"), + (0xFCDA, "M", "يج"), + (0xFCDB, "M", "يح"), + (0xFCDC, "M", "يخ"), + (0xFCDD, "M", "يم"), + (0xFCDE, "M", "يه"), + (0xFCDF, "M", "ئم"), + (0xFCE0, "M", "ئه"), + (0xFCE1, "M", "بم"), + (0xFCE2, "M", "به"), + (0xFCE3, "M", "تم"), + (0xFCE4, "M", "ته"), + (0xFCE5, "M", "ثم"), + (0xFCE6, "M", "ثه"), + (0xFCE7, "M", "سم"), + (0xFCE8, "M", "سه"), + (0xFCE9, "M", "شم"), + (0xFCEA, "M", "شه"), + (0xFCEB, "M", "كل"), + (0xFCEC, "M", "كم"), + (0xFCED, "M", "لم"), + (0xFCEE, "M", "نم"), + (0xFCEF, "M", "نه"), + (0xFCF0, "M", "يم"), + (0xFCF1, "M", "يه"), + (0xFCF2, "M", "ـَّ"), + (0xFCF3, "M", "ـُّ"), + (0xFCF4, "M", "ـِّ"), + (0xFCF5, "M", "طى"), + (0xFCF6, "M", "طي"), + (0xFCF7, "M", "عى"), + (0xFCF8, "M", "عي"), + (0xFCF9, "M", "غى"), + (0xFCFA, "M", "غي"), + (0xFCFB, "M", "سى"), + ] + + +def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFCFC, "M", "سي"), + (0xFCFD, "M", "شى"), + (0xFCFE, "M", "شي"), + (0xFCFF, "M", "حى"), + (0xFD00, "M", "حي"), + (0xFD01, "M", "جى"), + (0xFD02, "M", "جي"), + (0xFD03, "M", "خى"), + (0xFD04, "M", "خي"), + (0xFD05, "M", "صى"), + (0xFD06, "M", "صي"), + (0xFD07, "M", "ضى"), + (0xFD08, "M", "ضي"), + (0xFD09, "M", "شج"), + (0xFD0A, "M", "شح"), + (0xFD0B, "M", "شخ"), + (0xFD0C, "M", "شم"), + (0xFD0D, "M", "شر"), + (0xFD0E, "M", "سر"), + (0xFD0F, "M", "صر"), + (0xFD10, "M", "ضر"), + (0xFD11, "M", "طى"), + (0xFD12, "M", "طي"), + (0xFD13, "M", "عى"), + (0xFD14, "M", "عي"), + (0xFD15, "M", "غى"), + (0xFD16, "M", "غي"), + (0xFD17, "M", "سى"), + (0xFD18, "M", "سي"), + (0xFD19, "M", "شى"), + (0xFD1A, "M", "شي"), + (0xFD1B, "M", "حى"), + (0xFD1C, "M", "حي"), + (0xFD1D, "M", "جى"), + (0xFD1E, "M", "جي"), + (0xFD1F, "M", "خى"), + (0xFD20, "M", "خي"), + (0xFD21, "M", "صى"), + (0xFD22, "M", "صي"), + (0xFD23, "M", "ضى"), + (0xFD24, "M", "ضي"), + (0xFD25, "M", "شج"), + (0xFD26, "M", "شح"), + (0xFD27, "M", "شخ"), + (0xFD28, "M", "شم"), + (0xFD29, "M", "شر"), + (0xFD2A, "M", "سر"), + (0xFD2B, "M", "صر"), + (0xFD2C, "M", "ضر"), + (0xFD2D, "M", "شج"), + (0xFD2E, "M", "شح"), + (0xFD2F, "M", "شخ"), + (0xFD30, "M", "شم"), + (0xFD31, "M", "سه"), + (0xFD32, "M", "شه"), + (0xFD33, "M", "طم"), + (0xFD34, "M", "سج"), + (0xFD35, "M", "سح"), + (0xFD36, "M", "سخ"), + (0xFD37, "M", "شج"), + (0xFD38, "M", "شح"), + (0xFD39, "M", "شخ"), + (0xFD3A, "M", "طم"), + (0xFD3B, "M", "ظم"), + (0xFD3C, "M", "اً"), + (0xFD3E, "V"), + (0xFD50, "M", "تجم"), + (0xFD51, "M", "تحج"), + (0xFD53, "M", "تحم"), + (0xFD54, "M", "تخم"), + (0xFD55, "M", "تمج"), + (0xFD56, "M", "تمح"), + (0xFD57, "M", "تمخ"), + (0xFD58, "M", "جمح"), + (0xFD5A, "M", "حمي"), + (0xFD5B, "M", "حمى"), + (0xFD5C, "M", "سحج"), + (0xFD5D, "M", "سجح"), + (0xFD5E, "M", "سجى"), + (0xFD5F, "M", "سمح"), + (0xFD61, "M", "سمج"), + (0xFD62, "M", "سمم"), + (0xFD64, "M", "صحح"), + (0xFD66, "M", "صمم"), + (0xFD67, "M", "شحم"), + (0xFD69, "M", "شجي"), + (0xFD6A, "M", "شمخ"), + (0xFD6C, "M", "شمم"), + (0xFD6E, "M", "ضحى"), + (0xFD6F, "M", "ضخم"), + (0xFD71, "M", "طمح"), + (0xFD73, "M", "طمم"), + (0xFD74, "M", "طمي"), + (0xFD75, "M", "عجم"), + (0xFD76, "M", "عمم"), + (0xFD78, "M", "عمى"), + (0xFD79, "M", "غمم"), + (0xFD7A, "M", "غمي"), + (0xFD7B, "M", "غمى"), + (0xFD7C, "M", "فخم"), + ] + + +def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFD7E, "M", "قمح"), + (0xFD7F, "M", "قمم"), + (0xFD80, "M", "لحم"), + (0xFD81, "M", "لحي"), + (0xFD82, "M", "لحى"), + (0xFD83, "M", "لجج"), + (0xFD85, "M", "لخم"), + (0xFD87, "M", "لمح"), + (0xFD89, "M", "محج"), + (0xFD8A, "M", "محم"), + (0xFD8B, "M", "محي"), + (0xFD8C, "M", "مجح"), + (0xFD8D, "M", "مجم"), + (0xFD8E, "M", "مخج"), + (0xFD8F, "M", "مخم"), + (0xFD90, "X"), + (0xFD92, "M", "مجخ"), + (0xFD93, "M", "همج"), + (0xFD94, "M", "همم"), + (0xFD95, "M", "نحم"), + (0xFD96, "M", "نحى"), + (0xFD97, "M", "نجم"), + (0xFD99, "M", "نجى"), + (0xFD9A, "M", "نمي"), + (0xFD9B, "M", "نمى"), + (0xFD9C, "M", "يمم"), + (0xFD9E, "M", "بخي"), + (0xFD9F, "M", "تجي"), + (0xFDA0, "M", "تجى"), + (0xFDA1, "M", "تخي"), + (0xFDA2, "M", "تخى"), + (0xFDA3, "M", "تمي"), + (0xFDA4, "M", "تمى"), + (0xFDA5, "M", "جمي"), + (0xFDA6, "M", "جحى"), + (0xFDA7, "M", "جمى"), + (0xFDA8, "M", "سخى"), + (0xFDA9, "M", "صحي"), + (0xFDAA, "M", "شحي"), + (0xFDAB, "M", "ضحي"), + (0xFDAC, "M", "لجي"), + (0xFDAD, "M", "لمي"), + (0xFDAE, "M", "يحي"), + (0xFDAF, "M", "يجي"), + (0xFDB0, "M", "يمي"), + (0xFDB1, "M", "ممي"), + (0xFDB2, "M", "قمي"), + (0xFDB3, "M", "نحي"), + (0xFDB4, "M", "قمح"), + (0xFDB5, "M", "لحم"), + (0xFDB6, "M", "عمي"), + (0xFDB7, "M", "كمي"), + (0xFDB8, "M", "نجح"), + (0xFDB9, "M", "مخي"), + (0xFDBA, "M", "لجم"), + (0xFDBB, "M", "كمم"), + (0xFDBC, "M", "لجم"), + (0xFDBD, "M", "نجح"), + (0xFDBE, "M", "جحي"), + (0xFDBF, "M", "حجي"), + (0xFDC0, "M", "مجي"), + (0xFDC1, "M", "فمي"), + (0xFDC2, "M", "بحي"), + (0xFDC3, "M", "كمم"), + (0xFDC4, "M", "عجم"), + (0xFDC5, "M", "صمم"), + (0xFDC6, "M", "سخي"), + (0xFDC7, "M", "نجي"), + (0xFDC8, "X"), + (0xFDCF, "V"), + (0xFDD0, "X"), + (0xFDF0, "M", "صلے"), + (0xFDF1, "M", "قلے"), + (0xFDF2, "M", "الله"), + (0xFDF3, "M", "اكبر"), + (0xFDF4, "M", "محمد"), + (0xFDF5, "M", "صلعم"), + (0xFDF6, "M", "رسول"), + (0xFDF7, "M", "عليه"), + (0xFDF8, "M", "وسلم"), + (0xFDF9, "M", "صلى"), + (0xFDFA, "M", "صلى الله عليه وسلم"), + (0xFDFB, "M", "جل جلاله"), + (0xFDFC, "M", "ریال"), + (0xFDFD, "V"), + (0xFE00, "I"), + (0xFE10, "M", ","), + (0xFE11, "M", "、"), + (0xFE12, "X"), + (0xFE13, "M", ":"), + (0xFE14, "M", ";"), + (0xFE15, "M", "!"), + (0xFE16, "M", "?"), + (0xFE17, "M", "〖"), + (0xFE18, "M", "〗"), + (0xFE19, "X"), + (0xFE20, "V"), + (0xFE30, "X"), + (0xFE31, "M", "—"), + (0xFE32, "M", "–"), + ] + + +def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFE33, "M", "_"), + (0xFE35, "M", "("), + (0xFE36, "M", ")"), + (0xFE37, "M", "{"), + (0xFE38, "M", "}"), + (0xFE39, "M", "〔"), + (0xFE3A, "M", "〕"), + (0xFE3B, "M", "【"), + (0xFE3C, "M", "】"), + (0xFE3D, "M", "《"), + (0xFE3E, "M", "》"), + (0xFE3F, "M", "〈"), + (0xFE40, "M", "〉"), + (0xFE41, "M", "「"), + (0xFE42, "M", "」"), + (0xFE43, "M", "『"), + (0xFE44, "M", "』"), + (0xFE45, "V"), + (0xFE47, "M", "["), + (0xFE48, "M", "]"), + (0xFE49, "M", " ̅"), + (0xFE4D, "M", "_"), + (0xFE50, "M", ","), + (0xFE51, "M", "、"), + (0xFE52, "X"), + (0xFE54, "M", ";"), + (0xFE55, "M", ":"), + (0xFE56, "M", "?"), + (0xFE57, "M", "!"), + (0xFE58, "M", "—"), + (0xFE59, "M", "("), + (0xFE5A, "M", ")"), + (0xFE5B, "M", "{"), + (0xFE5C, "M", "}"), + (0xFE5D, "M", "〔"), + (0xFE5E, "M", "〕"), + (0xFE5F, "M", "#"), + (0xFE60, "M", "&"), + (0xFE61, "M", "*"), + (0xFE62, "M", "+"), + (0xFE63, "M", "-"), + (0xFE64, "M", "<"), + (0xFE65, "M", ">"), + (0xFE66, "M", "="), + (0xFE67, "X"), + (0xFE68, "M", "\\"), + (0xFE69, "M", "$"), + (0xFE6A, "M", "%"), + (0xFE6B, "M", "@"), + (0xFE6C, "X"), + (0xFE70, "M", " ً"), + (0xFE71, "M", "ـً"), + (0xFE72, "M", " ٌ"), + (0xFE73, "V"), + (0xFE74, "M", " ٍ"), + (0xFE75, "X"), + (0xFE76, "M", " َ"), + (0xFE77, "M", "ـَ"), + (0xFE78, "M", " ُ"), + (0xFE79, "M", "ـُ"), + (0xFE7A, "M", " ِ"), + (0xFE7B, "M", "ـِ"), + (0xFE7C, "M", " ّ"), + (0xFE7D, "M", "ـّ"), + (0xFE7E, "M", " ْ"), + (0xFE7F, "M", "ـْ"), + (0xFE80, "M", "ء"), + (0xFE81, "M", "آ"), + (0xFE83, "M", "أ"), + (0xFE85, "M", "ؤ"), + (0xFE87, "M", "إ"), + (0xFE89, "M", "ئ"), + (0xFE8D, "M", "ا"), + (0xFE8F, "M", "ب"), + (0xFE93, "M", "ة"), + (0xFE95, "M", "ت"), + (0xFE99, "M", "ث"), + (0xFE9D, "M", "ج"), + (0xFEA1, "M", "ح"), + (0xFEA5, "M", "خ"), + (0xFEA9, "M", "د"), + (0xFEAB, "M", "ذ"), + (0xFEAD, "M", "ر"), + (0xFEAF, "M", "ز"), + (0xFEB1, "M", "س"), + (0xFEB5, "M", "ش"), + (0xFEB9, "M", "ص"), + (0xFEBD, "M", "ض"), + (0xFEC1, "M", "ط"), + (0xFEC5, "M", "ظ"), + (0xFEC9, "M", "ع"), + (0xFECD, "M", "غ"), + (0xFED1, "M", "ف"), + (0xFED5, "M", "ق"), + (0xFED9, "M", "ك"), + (0xFEDD, "M", "ل"), + (0xFEE1, "M", "م"), + (0xFEE5, "M", "ن"), + (0xFEE9, "M", "ه"), + (0xFEED, "M", "و"), + ] + + +def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFEEF, "M", "ى"), + (0xFEF1, "M", "ي"), + (0xFEF5, "M", "لآ"), + (0xFEF7, "M", "لأ"), + (0xFEF9, "M", "لإ"), + (0xFEFB, "M", "لا"), + (0xFEFD, "X"), + (0xFEFF, "I"), + (0xFF00, "X"), + (0xFF01, "M", "!"), + (0xFF02, "M", '"'), + (0xFF03, "M", "#"), + (0xFF04, "M", "$"), + (0xFF05, "M", "%"), + (0xFF06, "M", "&"), + (0xFF07, "M", "'"), + (0xFF08, "M", "("), + (0xFF09, "M", ")"), + (0xFF0A, "M", "*"), + (0xFF0B, "M", "+"), + (0xFF0C, "M", ","), + (0xFF0D, "M", "-"), + (0xFF0E, "M", "."), + (0xFF0F, "M", "/"), + (0xFF10, "M", "0"), + (0xFF11, "M", "1"), + (0xFF12, "M", "2"), + (0xFF13, "M", "3"), + (0xFF14, "M", "4"), + (0xFF15, "M", "5"), + (0xFF16, "M", "6"), + (0xFF17, "M", "7"), + (0xFF18, "M", "8"), + (0xFF19, "M", "9"), + (0xFF1A, "M", ":"), + (0xFF1B, "M", ";"), + (0xFF1C, "M", "<"), + (0xFF1D, "M", "="), + (0xFF1E, "M", ">"), + (0xFF1F, "M", "?"), + (0xFF20, "M", "@"), + (0xFF21, "M", "a"), + (0xFF22, "M", "b"), + (0xFF23, "M", "c"), + (0xFF24, "M", "d"), + (0xFF25, "M", "e"), + (0xFF26, "M", "f"), + (0xFF27, "M", "g"), + (0xFF28, "M", "h"), + (0xFF29, "M", "i"), + (0xFF2A, "M", "j"), + (0xFF2B, "M", "k"), + (0xFF2C, "M", "l"), + (0xFF2D, "M", "m"), + (0xFF2E, "M", "n"), + (0xFF2F, "M", "o"), + (0xFF30, "M", "p"), + (0xFF31, "M", "q"), + (0xFF32, "M", "r"), + (0xFF33, "M", "s"), + (0xFF34, "M", "t"), + (0xFF35, "M", "u"), + (0xFF36, "M", "v"), + (0xFF37, "M", "w"), + (0xFF38, "M", "x"), + (0xFF39, "M", "y"), + (0xFF3A, "M", "z"), + (0xFF3B, "M", "["), + (0xFF3C, "M", "\\"), + (0xFF3D, "M", "]"), + (0xFF3E, "M", "^"), + (0xFF3F, "M", "_"), + (0xFF40, "M", "`"), + (0xFF41, "M", "a"), + (0xFF42, "M", "b"), + (0xFF43, "M", "c"), + (0xFF44, "M", "d"), + (0xFF45, "M", "e"), + (0xFF46, "M", "f"), + (0xFF47, "M", "g"), + (0xFF48, "M", "h"), + (0xFF49, "M", "i"), + (0xFF4A, "M", "j"), + (0xFF4B, "M", "k"), + (0xFF4C, "M", "l"), + (0xFF4D, "M", "m"), + (0xFF4E, "M", "n"), + (0xFF4F, "M", "o"), + (0xFF50, "M", "p"), + (0xFF51, "M", "q"), + (0xFF52, "M", "r"), + (0xFF53, "M", "s"), + (0xFF54, "M", "t"), + (0xFF55, "M", "u"), + (0xFF56, "M", "v"), + (0xFF57, "M", "w"), + (0xFF58, "M", "x"), + (0xFF59, "M", "y"), + (0xFF5A, "M", "z"), + (0xFF5B, "M", "{"), + ] + + +def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFF5C, "M", "|"), + (0xFF5D, "M", "}"), + (0xFF5E, "M", "~"), + (0xFF5F, "M", "⦅"), + (0xFF60, "M", "⦆"), + (0xFF61, "M", "."), + (0xFF62, "M", "「"), + (0xFF63, "M", "」"), + (0xFF64, "M", "、"), + (0xFF65, "M", "・"), + (0xFF66, "M", "ヲ"), + (0xFF67, "M", "ァ"), + (0xFF68, "M", "ィ"), + (0xFF69, "M", "ゥ"), + (0xFF6A, "M", "ェ"), + (0xFF6B, "M", "ォ"), + (0xFF6C, "M", "ャ"), + (0xFF6D, "M", "ュ"), + (0xFF6E, "M", "ョ"), + (0xFF6F, "M", "ッ"), + (0xFF70, "M", "ー"), + (0xFF71, "M", "ア"), + (0xFF72, "M", "イ"), + (0xFF73, "M", "ウ"), + (0xFF74, "M", "エ"), + (0xFF75, "M", "オ"), + (0xFF76, "M", "カ"), + (0xFF77, "M", "キ"), + (0xFF78, "M", "ク"), + (0xFF79, "M", "ケ"), + (0xFF7A, "M", "コ"), + (0xFF7B, "M", "サ"), + (0xFF7C, "M", "シ"), + (0xFF7D, "M", "ス"), + (0xFF7E, "M", "セ"), + (0xFF7F, "M", "ソ"), + (0xFF80, "M", "タ"), + (0xFF81, "M", "チ"), + (0xFF82, "M", "ツ"), + (0xFF83, "M", "テ"), + (0xFF84, "M", "ト"), + (0xFF85, "M", "ナ"), + (0xFF86, "M", "ニ"), + (0xFF87, "M", "ヌ"), + (0xFF88, "M", "ネ"), + (0xFF89, "M", "ノ"), + (0xFF8A, "M", "ハ"), + (0xFF8B, "M", "ヒ"), + (0xFF8C, "M", "フ"), + (0xFF8D, "M", "ヘ"), + (0xFF8E, "M", "ホ"), + (0xFF8F, "M", "マ"), + (0xFF90, "M", "ミ"), + (0xFF91, "M", "ム"), + (0xFF92, "M", "メ"), + (0xFF93, "M", "モ"), + (0xFF94, "M", "ヤ"), + (0xFF95, "M", "ユ"), + (0xFF96, "M", "ヨ"), + (0xFF97, "M", "ラ"), + (0xFF98, "M", "リ"), + (0xFF99, "M", "ル"), + (0xFF9A, "M", "レ"), + (0xFF9B, "M", "ロ"), + (0xFF9C, "M", "ワ"), + (0xFF9D, "M", "ン"), + (0xFF9E, "M", "゙"), + (0xFF9F, "M", "゚"), + (0xFFA0, "I"), + (0xFFA1, "M", "ᄀ"), + (0xFFA2, "M", "ᄁ"), + (0xFFA3, "M", "ᆪ"), + (0xFFA4, "M", "ᄂ"), + (0xFFA5, "M", "ᆬ"), + (0xFFA6, "M", "ᆭ"), + (0xFFA7, "M", "ᄃ"), + (0xFFA8, "M", "ᄄ"), + (0xFFA9, "M", "ᄅ"), + (0xFFAA, "M", "ᆰ"), + (0xFFAB, "M", "ᆱ"), + (0xFFAC, "M", "ᆲ"), + (0xFFAD, "M", "ᆳ"), + (0xFFAE, "M", "ᆴ"), + (0xFFAF, "M", "ᆵ"), + (0xFFB0, "M", "ᄚ"), + (0xFFB1, "M", "ᄆ"), + (0xFFB2, "M", "ᄇ"), + (0xFFB3, "M", "ᄈ"), + (0xFFB4, "M", "ᄡ"), + (0xFFB5, "M", "ᄉ"), + (0xFFB6, "M", "ᄊ"), + (0xFFB7, "M", "ᄋ"), + (0xFFB8, "M", "ᄌ"), + (0xFFB9, "M", "ᄍ"), + (0xFFBA, "M", "ᄎ"), + (0xFFBB, "M", "ᄏ"), + (0xFFBC, "M", "ᄐ"), + (0xFFBD, "M", "ᄑ"), + (0xFFBE, "M", "ᄒ"), + (0xFFBF, "X"), + ] + + +def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0xFFC2, "M", "ᅡ"), + (0xFFC3, "M", "ᅢ"), + (0xFFC4, "M", "ᅣ"), + (0xFFC5, "M", "ᅤ"), + (0xFFC6, "M", "ᅥ"), + (0xFFC7, "M", "ᅦ"), + (0xFFC8, "X"), + (0xFFCA, "M", "ᅧ"), + (0xFFCB, "M", "ᅨ"), + (0xFFCC, "M", "ᅩ"), + (0xFFCD, "M", "ᅪ"), + (0xFFCE, "M", "ᅫ"), + (0xFFCF, "M", "ᅬ"), + (0xFFD0, "X"), + (0xFFD2, "M", "ᅭ"), + (0xFFD3, "M", "ᅮ"), + (0xFFD4, "M", "ᅯ"), + (0xFFD5, "M", "ᅰ"), + (0xFFD6, "M", "ᅱ"), + (0xFFD7, "M", "ᅲ"), + (0xFFD8, "X"), + (0xFFDA, "M", "ᅳ"), + (0xFFDB, "M", "ᅴ"), + (0xFFDC, "M", "ᅵ"), + (0xFFDD, "X"), + (0xFFE0, "M", "¢"), + (0xFFE1, "M", "£"), + (0xFFE2, "M", "¬"), + (0xFFE3, "M", " ̄"), + (0xFFE4, "M", "¦"), + (0xFFE5, "M", "¥"), + (0xFFE6, "M", "₩"), + (0xFFE7, "X"), + (0xFFE8, "M", "│"), + (0xFFE9, "M", "←"), + (0xFFEA, "M", "↑"), + (0xFFEB, "M", "→"), + (0xFFEC, "M", "↓"), + (0xFFED, "M", "■"), + (0xFFEE, "M", "○"), + (0xFFEF, "X"), + (0x10000, "V"), + (0x1000C, "X"), + (0x1000D, "V"), + (0x10027, "X"), + (0x10028, "V"), + (0x1003B, "X"), + (0x1003C, "V"), + (0x1003E, "X"), + (0x1003F, "V"), + (0x1004E, "X"), + (0x10050, "V"), + (0x1005E, "X"), + (0x10080, "V"), + (0x100FB, "X"), + (0x10100, "V"), + (0x10103, "X"), + (0x10107, "V"), + (0x10134, "X"), + (0x10137, "V"), + (0x1018F, "X"), + (0x10190, "V"), + (0x1019D, "X"), + (0x101A0, "V"), + (0x101A1, "X"), + (0x101D0, "V"), + (0x101FE, "X"), + (0x10280, "V"), + (0x1029D, "X"), + (0x102A0, "V"), + (0x102D1, "X"), + (0x102E0, "V"), + (0x102FC, "X"), + (0x10300, "V"), + (0x10324, "X"), + (0x1032D, "V"), + (0x1034B, "X"), + (0x10350, "V"), + (0x1037B, "X"), + (0x10380, "V"), + (0x1039E, "X"), + (0x1039F, "V"), + (0x103C4, "X"), + (0x103C8, "V"), + (0x103D6, "X"), + (0x10400, "M", "𐐨"), + (0x10401, "M", "𐐩"), + (0x10402, "M", "𐐪"), + (0x10403, "M", "𐐫"), + (0x10404, "M", "𐐬"), + (0x10405, "M", "𐐭"), + (0x10406, "M", "𐐮"), + (0x10407, "M", "𐐯"), + (0x10408, "M", "𐐰"), + (0x10409, "M", "𐐱"), + (0x1040A, "M", "𐐲"), + (0x1040B, "M", "𐐳"), + (0x1040C, "M", "𐐴"), + (0x1040D, "M", "𐐵"), + (0x1040E, "M", "𐐶"), + ] + + +def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1040F, "M", "𐐷"), + (0x10410, "M", "𐐸"), + (0x10411, "M", "𐐹"), + (0x10412, "M", "𐐺"), + (0x10413, "M", "𐐻"), + (0x10414, "M", "𐐼"), + (0x10415, "M", "𐐽"), + (0x10416, "M", "𐐾"), + (0x10417, "M", "𐐿"), + (0x10418, "M", "𐑀"), + (0x10419, "M", "𐑁"), + (0x1041A, "M", "𐑂"), + (0x1041B, "M", "𐑃"), + (0x1041C, "M", "𐑄"), + (0x1041D, "M", "𐑅"), + (0x1041E, "M", "𐑆"), + (0x1041F, "M", "𐑇"), + (0x10420, "M", "𐑈"), + (0x10421, "M", "𐑉"), + (0x10422, "M", "𐑊"), + (0x10423, "M", "𐑋"), + (0x10424, "M", "𐑌"), + (0x10425, "M", "𐑍"), + (0x10426, "M", "𐑎"), + (0x10427, "M", "𐑏"), + (0x10428, "V"), + (0x1049E, "X"), + (0x104A0, "V"), + (0x104AA, "X"), + (0x104B0, "M", "𐓘"), + (0x104B1, "M", "𐓙"), + (0x104B2, "M", "𐓚"), + (0x104B3, "M", "𐓛"), + (0x104B4, "M", "𐓜"), + (0x104B5, "M", "𐓝"), + (0x104B6, "M", "𐓞"), + (0x104B7, "M", "𐓟"), + (0x104B8, "M", "𐓠"), + (0x104B9, "M", "𐓡"), + (0x104BA, "M", "𐓢"), + (0x104BB, "M", "𐓣"), + (0x104BC, "M", "𐓤"), + (0x104BD, "M", "𐓥"), + (0x104BE, "M", "𐓦"), + (0x104BF, "M", "𐓧"), + (0x104C0, "M", "𐓨"), + (0x104C1, "M", "𐓩"), + (0x104C2, "M", "𐓪"), + (0x104C3, "M", "𐓫"), + (0x104C4, "M", "𐓬"), + (0x104C5, "M", "𐓭"), + (0x104C6, "M", "𐓮"), + (0x104C7, "M", "𐓯"), + (0x104C8, "M", "𐓰"), + (0x104C9, "M", "𐓱"), + (0x104CA, "M", "𐓲"), + (0x104CB, "M", "𐓳"), + (0x104CC, "M", "𐓴"), + (0x104CD, "M", "𐓵"), + (0x104CE, "M", "𐓶"), + (0x104CF, "M", "𐓷"), + (0x104D0, "M", "𐓸"), + (0x104D1, "M", "𐓹"), + (0x104D2, "M", "𐓺"), + (0x104D3, "M", "𐓻"), + (0x104D4, "X"), + (0x104D8, "V"), + (0x104FC, "X"), + (0x10500, "V"), + (0x10528, "X"), + (0x10530, "V"), + (0x10564, "X"), + (0x1056F, "V"), + (0x10570, "M", "𐖗"), + (0x10571, "M", "𐖘"), + (0x10572, "M", "𐖙"), + (0x10573, "M", "𐖚"), + (0x10574, "M", "𐖛"), + (0x10575, "M", "𐖜"), + (0x10576, "M", "𐖝"), + (0x10577, "M", "𐖞"), + (0x10578, "M", "𐖟"), + (0x10579, "M", "𐖠"), + (0x1057A, "M", "𐖡"), + (0x1057B, "X"), + (0x1057C, "M", "𐖣"), + (0x1057D, "M", "𐖤"), + (0x1057E, "M", "𐖥"), + (0x1057F, "M", "𐖦"), + (0x10580, "M", "𐖧"), + (0x10581, "M", "𐖨"), + (0x10582, "M", "𐖩"), + (0x10583, "M", "𐖪"), + (0x10584, "M", "𐖫"), + (0x10585, "M", "𐖬"), + (0x10586, "M", "𐖭"), + (0x10587, "M", "𐖮"), + (0x10588, "M", "𐖯"), + (0x10589, "M", "𐖰"), + (0x1058A, "M", "𐖱"), + ] + + +def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1058B, "X"), + (0x1058C, "M", "𐖳"), + (0x1058D, "M", "𐖴"), + (0x1058E, "M", "𐖵"), + (0x1058F, "M", "𐖶"), + (0x10590, "M", "𐖷"), + (0x10591, "M", "𐖸"), + (0x10592, "M", "𐖹"), + (0x10593, "X"), + (0x10594, "M", "𐖻"), + (0x10595, "M", "𐖼"), + (0x10596, "X"), + (0x10597, "V"), + (0x105A2, "X"), + (0x105A3, "V"), + (0x105B2, "X"), + (0x105B3, "V"), + (0x105BA, "X"), + (0x105BB, "V"), + (0x105BD, "X"), + (0x105C0, "V"), + (0x105F4, "X"), + (0x10600, "V"), + (0x10737, "X"), + (0x10740, "V"), + (0x10756, "X"), + (0x10760, "V"), + (0x10768, "X"), + (0x10780, "V"), + (0x10781, "M", "ː"), + (0x10782, "M", "ˑ"), + (0x10783, "M", "æ"), + (0x10784, "M", "ʙ"), + (0x10785, "M", "ɓ"), + (0x10786, "X"), + (0x10787, "M", "ʣ"), + (0x10788, "M", "ꭦ"), + (0x10789, "M", "ʥ"), + (0x1078A, "M", "ʤ"), + (0x1078B, "M", "ɖ"), + (0x1078C, "M", "ɗ"), + (0x1078D, "M", "ᶑ"), + (0x1078E, "M", "ɘ"), + (0x1078F, "M", "ɞ"), + (0x10790, "M", "ʩ"), + (0x10791, "M", "ɤ"), + (0x10792, "M", "ɢ"), + (0x10793, "M", "ɠ"), + (0x10794, "M", "ʛ"), + (0x10795, "M", "ħ"), + (0x10796, "M", "ʜ"), + (0x10797, "M", "ɧ"), + (0x10798, "M", "ʄ"), + (0x10799, "M", "ʪ"), + (0x1079A, "M", "ʫ"), + (0x1079B, "M", "ɬ"), + (0x1079C, "M", "𝼄"), + (0x1079D, "M", "ꞎ"), + (0x1079E, "M", "ɮ"), + (0x1079F, "M", "𝼅"), + (0x107A0, "M", "ʎ"), + (0x107A1, "M", "𝼆"), + (0x107A2, "M", "ø"), + (0x107A3, "M", "ɶ"), + (0x107A4, "M", "ɷ"), + (0x107A5, "M", "q"), + (0x107A6, "M", "ɺ"), + (0x107A7, "M", "𝼈"), + (0x107A8, "M", "ɽ"), + (0x107A9, "M", "ɾ"), + (0x107AA, "M", "ʀ"), + (0x107AB, "M", "ʨ"), + (0x107AC, "M", "ʦ"), + (0x107AD, "M", "ꭧ"), + (0x107AE, "M", "ʧ"), + (0x107AF, "M", "ʈ"), + (0x107B0, "M", "ⱱ"), + (0x107B1, "X"), + (0x107B2, "M", "ʏ"), + (0x107B3, "M", "ʡ"), + (0x107B4, "M", "ʢ"), + (0x107B5, "M", "ʘ"), + (0x107B6, "M", "ǀ"), + (0x107B7, "M", "ǁ"), + (0x107B8, "M", "ǂ"), + (0x107B9, "M", "𝼊"), + (0x107BA, "M", "𝼞"), + (0x107BB, "X"), + (0x10800, "V"), + (0x10806, "X"), + (0x10808, "V"), + (0x10809, "X"), + (0x1080A, "V"), + (0x10836, "X"), + (0x10837, "V"), + (0x10839, "X"), + (0x1083C, "V"), + (0x1083D, "X"), + (0x1083F, "V"), + (0x10856, "X"), + ] + + +def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x10857, "V"), + (0x1089F, "X"), + (0x108A7, "V"), + (0x108B0, "X"), + (0x108E0, "V"), + (0x108F3, "X"), + (0x108F4, "V"), + (0x108F6, "X"), + (0x108FB, "V"), + (0x1091C, "X"), + (0x1091F, "V"), + (0x1093A, "X"), + (0x1093F, "V"), + (0x10940, "X"), + (0x10980, "V"), + (0x109B8, "X"), + (0x109BC, "V"), + (0x109D0, "X"), + (0x109D2, "V"), + (0x10A04, "X"), + (0x10A05, "V"), + (0x10A07, "X"), + (0x10A0C, "V"), + (0x10A14, "X"), + (0x10A15, "V"), + (0x10A18, "X"), + (0x10A19, "V"), + (0x10A36, "X"), + (0x10A38, "V"), + (0x10A3B, "X"), + (0x10A3F, "V"), + (0x10A49, "X"), + (0x10A50, "V"), + (0x10A59, "X"), + (0x10A60, "V"), + (0x10AA0, "X"), + (0x10AC0, "V"), + (0x10AE7, "X"), + (0x10AEB, "V"), + (0x10AF7, "X"), + (0x10B00, "V"), + (0x10B36, "X"), + (0x10B39, "V"), + (0x10B56, "X"), + (0x10B58, "V"), + (0x10B73, "X"), + (0x10B78, "V"), + (0x10B92, "X"), + (0x10B99, "V"), + (0x10B9D, "X"), + (0x10BA9, "V"), + (0x10BB0, "X"), + (0x10C00, "V"), + (0x10C49, "X"), + (0x10C80, "M", "𐳀"), + (0x10C81, "M", "𐳁"), + (0x10C82, "M", "𐳂"), + (0x10C83, "M", "𐳃"), + (0x10C84, "M", "𐳄"), + (0x10C85, "M", "𐳅"), + (0x10C86, "M", "𐳆"), + (0x10C87, "M", "𐳇"), + (0x10C88, "M", "𐳈"), + (0x10C89, "M", "𐳉"), + (0x10C8A, "M", "𐳊"), + (0x10C8B, "M", "𐳋"), + (0x10C8C, "M", "𐳌"), + (0x10C8D, "M", "𐳍"), + (0x10C8E, "M", "𐳎"), + (0x10C8F, "M", "𐳏"), + (0x10C90, "M", "𐳐"), + (0x10C91, "M", "𐳑"), + (0x10C92, "M", "𐳒"), + (0x10C93, "M", "𐳓"), + (0x10C94, "M", "𐳔"), + (0x10C95, "M", "𐳕"), + (0x10C96, "M", "𐳖"), + (0x10C97, "M", "𐳗"), + (0x10C98, "M", "𐳘"), + (0x10C99, "M", "𐳙"), + (0x10C9A, "M", "𐳚"), + (0x10C9B, "M", "𐳛"), + (0x10C9C, "M", "𐳜"), + (0x10C9D, "M", "𐳝"), + (0x10C9E, "M", "𐳞"), + (0x10C9F, "M", "𐳟"), + (0x10CA0, "M", "𐳠"), + (0x10CA1, "M", "𐳡"), + (0x10CA2, "M", "𐳢"), + (0x10CA3, "M", "𐳣"), + (0x10CA4, "M", "𐳤"), + (0x10CA5, "M", "𐳥"), + (0x10CA6, "M", "𐳦"), + (0x10CA7, "M", "𐳧"), + (0x10CA8, "M", "𐳨"), + (0x10CA9, "M", "𐳩"), + (0x10CAA, "M", "𐳪"), + (0x10CAB, "M", "𐳫"), + (0x10CAC, "M", "𐳬"), + (0x10CAD, "M", "𐳭"), + ] + + +def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x10CAE, "M", "𐳮"), + (0x10CAF, "M", "𐳯"), + (0x10CB0, "M", "𐳰"), + (0x10CB1, "M", "𐳱"), + (0x10CB2, "M", "𐳲"), + (0x10CB3, "X"), + (0x10CC0, "V"), + (0x10CF3, "X"), + (0x10CFA, "V"), + (0x10D28, "X"), + (0x10D30, "V"), + (0x10D3A, "X"), + (0x10D40, "V"), + (0x10D50, "M", "𐵰"), + (0x10D51, "M", "𐵱"), + (0x10D52, "M", "𐵲"), + (0x10D53, "M", "𐵳"), + (0x10D54, "M", "𐵴"), + (0x10D55, "M", "𐵵"), + (0x10D56, "M", "𐵶"), + (0x10D57, "M", "𐵷"), + (0x10D58, "M", "𐵸"), + (0x10D59, "M", "𐵹"), + (0x10D5A, "M", "𐵺"), + (0x10D5B, "M", "𐵻"), + (0x10D5C, "M", "𐵼"), + (0x10D5D, "M", "𐵽"), + (0x10D5E, "M", "𐵾"), + (0x10D5F, "M", "𐵿"), + (0x10D60, "M", "𐶀"), + (0x10D61, "M", "𐶁"), + (0x10D62, "M", "𐶂"), + (0x10D63, "M", "𐶃"), + (0x10D64, "M", "𐶄"), + (0x10D65, "M", "𐶅"), + (0x10D66, "X"), + (0x10D69, "V"), + (0x10D86, "X"), + (0x10D8E, "V"), + (0x10D90, "X"), + (0x10E60, "V"), + (0x10E7F, "X"), + (0x10E80, "V"), + (0x10EAA, "X"), + (0x10EAB, "V"), + (0x10EAE, "X"), + (0x10EB0, "V"), + (0x10EB2, "X"), + (0x10EC2, "V"), + (0x10EC5, "X"), + (0x10EFC, "V"), + (0x10F28, "X"), + (0x10F30, "V"), + (0x10F5A, "X"), + (0x10F70, "V"), + (0x10F8A, "X"), + (0x10FB0, "V"), + (0x10FCC, "X"), + (0x10FE0, "V"), + (0x10FF7, "X"), + (0x11000, "V"), + (0x1104E, "X"), + (0x11052, "V"), + (0x11076, "X"), + (0x1107F, "V"), + (0x110BD, "X"), + (0x110BE, "V"), + (0x110C3, "X"), + (0x110D0, "V"), + (0x110E9, "X"), + (0x110F0, "V"), + (0x110FA, "X"), + (0x11100, "V"), + (0x11135, "X"), + (0x11136, "V"), + (0x11148, "X"), + (0x11150, "V"), + (0x11177, "X"), + (0x11180, "V"), + (0x111E0, "X"), + (0x111E1, "V"), + (0x111F5, "X"), + (0x11200, "V"), + (0x11212, "X"), + (0x11213, "V"), + (0x11242, "X"), + (0x11280, "V"), + (0x11287, "X"), + (0x11288, "V"), + (0x11289, "X"), + (0x1128A, "V"), + (0x1128E, "X"), + (0x1128F, "V"), + (0x1129E, "X"), + (0x1129F, "V"), + (0x112AA, "X"), + (0x112B0, "V"), + (0x112EB, "X"), + (0x112F0, "V"), + (0x112FA, "X"), + ] + + +def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x11300, "V"), + (0x11304, "X"), + (0x11305, "V"), + (0x1130D, "X"), + (0x1130F, "V"), + (0x11311, "X"), + (0x11313, "V"), + (0x11329, "X"), + (0x1132A, "V"), + (0x11331, "X"), + (0x11332, "V"), + (0x11334, "X"), + (0x11335, "V"), + (0x1133A, "X"), + (0x1133B, "V"), + (0x11345, "X"), + (0x11347, "V"), + (0x11349, "X"), + (0x1134B, "V"), + (0x1134E, "X"), + (0x11350, "V"), + (0x11351, "X"), + (0x11357, "V"), + (0x11358, "X"), + (0x1135D, "V"), + (0x11364, "X"), + (0x11366, "V"), + (0x1136D, "X"), + (0x11370, "V"), + (0x11375, "X"), + (0x11380, "V"), + (0x1138A, "X"), + (0x1138B, "V"), + (0x1138C, "X"), + (0x1138E, "V"), + (0x1138F, "X"), + (0x11390, "V"), + (0x113B6, "X"), + (0x113B7, "V"), + (0x113C1, "X"), + (0x113C2, "V"), + (0x113C3, "X"), + (0x113C5, "V"), + (0x113C6, "X"), + (0x113C7, "V"), + (0x113CB, "X"), + (0x113CC, "V"), + (0x113D6, "X"), + (0x113D7, "V"), + (0x113D9, "X"), + (0x113E1, "V"), + (0x113E3, "X"), + (0x11400, "V"), + (0x1145C, "X"), + (0x1145D, "V"), + (0x11462, "X"), + (0x11480, "V"), + (0x114C8, "X"), + (0x114D0, "V"), + (0x114DA, "X"), + (0x11580, "V"), + (0x115B6, "X"), + (0x115B8, "V"), + (0x115DE, "X"), + (0x11600, "V"), + (0x11645, "X"), + (0x11650, "V"), + (0x1165A, "X"), + (0x11660, "V"), + (0x1166D, "X"), + (0x11680, "V"), + (0x116BA, "X"), + (0x116C0, "V"), + (0x116CA, "X"), + (0x116D0, "V"), + (0x116E4, "X"), + (0x11700, "V"), + (0x1171B, "X"), + (0x1171D, "V"), + (0x1172C, "X"), + (0x11730, "V"), + (0x11747, "X"), + (0x11800, "V"), + (0x1183C, "X"), + (0x118A0, "M", "𑣀"), + (0x118A1, "M", "𑣁"), + (0x118A2, "M", "𑣂"), + (0x118A3, "M", "𑣃"), + (0x118A4, "M", "𑣄"), + (0x118A5, "M", "𑣅"), + (0x118A6, "M", "𑣆"), + (0x118A7, "M", "𑣇"), + (0x118A8, "M", "𑣈"), + (0x118A9, "M", "𑣉"), + (0x118AA, "M", "𑣊"), + (0x118AB, "M", "𑣋"), + (0x118AC, "M", "𑣌"), + (0x118AD, "M", "𑣍"), + (0x118AE, "M", "𑣎"), + (0x118AF, "M", "𑣏"), + ] + + +def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x118B0, "M", "𑣐"), + (0x118B1, "M", "𑣑"), + (0x118B2, "M", "𑣒"), + (0x118B3, "M", "𑣓"), + (0x118B4, "M", "𑣔"), + (0x118B5, "M", "𑣕"), + (0x118B6, "M", "𑣖"), + (0x118B7, "M", "𑣗"), + (0x118B8, "M", "𑣘"), + (0x118B9, "M", "𑣙"), + (0x118BA, "M", "𑣚"), + (0x118BB, "M", "𑣛"), + (0x118BC, "M", "𑣜"), + (0x118BD, "M", "𑣝"), + (0x118BE, "M", "𑣞"), + (0x118BF, "M", "𑣟"), + (0x118C0, "V"), + (0x118F3, "X"), + (0x118FF, "V"), + (0x11907, "X"), + (0x11909, "V"), + (0x1190A, "X"), + (0x1190C, "V"), + (0x11914, "X"), + (0x11915, "V"), + (0x11917, "X"), + (0x11918, "V"), + (0x11936, "X"), + (0x11937, "V"), + (0x11939, "X"), + (0x1193B, "V"), + (0x11947, "X"), + (0x11950, "V"), + (0x1195A, "X"), + (0x119A0, "V"), + (0x119A8, "X"), + (0x119AA, "V"), + (0x119D8, "X"), + (0x119DA, "V"), + (0x119E5, "X"), + (0x11A00, "V"), + (0x11A48, "X"), + (0x11A50, "V"), + (0x11AA3, "X"), + (0x11AB0, "V"), + (0x11AF9, "X"), + (0x11B00, "V"), + (0x11B0A, "X"), + (0x11BC0, "V"), + (0x11BE2, "X"), + (0x11BF0, "V"), + (0x11BFA, "X"), + (0x11C00, "V"), + (0x11C09, "X"), + (0x11C0A, "V"), + (0x11C37, "X"), + (0x11C38, "V"), + (0x11C46, "X"), + (0x11C50, "V"), + (0x11C6D, "X"), + (0x11C70, "V"), + (0x11C90, "X"), + (0x11C92, "V"), + (0x11CA8, "X"), + (0x11CA9, "V"), + (0x11CB7, "X"), + (0x11D00, "V"), + (0x11D07, "X"), + (0x11D08, "V"), + (0x11D0A, "X"), + (0x11D0B, "V"), + (0x11D37, "X"), + (0x11D3A, "V"), + (0x11D3B, "X"), + (0x11D3C, "V"), + (0x11D3E, "X"), + (0x11D3F, "V"), + (0x11D48, "X"), + (0x11D50, "V"), + (0x11D5A, "X"), + (0x11D60, "V"), + (0x11D66, "X"), + (0x11D67, "V"), + (0x11D69, "X"), + (0x11D6A, "V"), + (0x11D8F, "X"), + (0x11D90, "V"), + (0x11D92, "X"), + (0x11D93, "V"), + (0x11D99, "X"), + (0x11DA0, "V"), + (0x11DAA, "X"), + (0x11EE0, "V"), + (0x11EF9, "X"), + (0x11F00, "V"), + (0x11F11, "X"), + (0x11F12, "V"), + (0x11F3B, "X"), + (0x11F3E, "V"), + (0x11F5B, "X"), + ] + + +def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x11FB0, "V"), + (0x11FB1, "X"), + (0x11FC0, "V"), + (0x11FF2, "X"), + (0x11FFF, "V"), + (0x1239A, "X"), + (0x12400, "V"), + (0x1246F, "X"), + (0x12470, "V"), + (0x12475, "X"), + (0x12480, "V"), + (0x12544, "X"), + (0x12F90, "V"), + (0x12FF3, "X"), + (0x13000, "V"), + (0x13430, "X"), + (0x13440, "V"), + (0x13456, "X"), + (0x13460, "V"), + (0x143FB, "X"), + (0x14400, "V"), + (0x14647, "X"), + (0x16100, "V"), + (0x1613A, "X"), + (0x16800, "V"), + (0x16A39, "X"), + (0x16A40, "V"), + (0x16A5F, "X"), + (0x16A60, "V"), + (0x16A6A, "X"), + (0x16A6E, "V"), + (0x16ABF, "X"), + (0x16AC0, "V"), + (0x16ACA, "X"), + (0x16AD0, "V"), + (0x16AEE, "X"), + (0x16AF0, "V"), + (0x16AF6, "X"), + (0x16B00, "V"), + (0x16B46, "X"), + (0x16B50, "V"), + (0x16B5A, "X"), + (0x16B5B, "V"), + (0x16B62, "X"), + (0x16B63, "V"), + (0x16B78, "X"), + (0x16B7D, "V"), + (0x16B90, "X"), + (0x16D40, "V"), + (0x16D7A, "X"), + (0x16E40, "M", "𖹠"), + (0x16E41, "M", "𖹡"), + (0x16E42, "M", "𖹢"), + (0x16E43, "M", "𖹣"), + (0x16E44, "M", "𖹤"), + (0x16E45, "M", "𖹥"), + (0x16E46, "M", "𖹦"), + (0x16E47, "M", "𖹧"), + (0x16E48, "M", "𖹨"), + (0x16E49, "M", "𖹩"), + (0x16E4A, "M", "𖹪"), + (0x16E4B, "M", "𖹫"), + (0x16E4C, "M", "𖹬"), + (0x16E4D, "M", "𖹭"), + (0x16E4E, "M", "𖹮"), + (0x16E4F, "M", "𖹯"), + (0x16E50, "M", "𖹰"), + (0x16E51, "M", "𖹱"), + (0x16E52, "M", "𖹲"), + (0x16E53, "M", "𖹳"), + (0x16E54, "M", "𖹴"), + (0x16E55, "M", "𖹵"), + (0x16E56, "M", "𖹶"), + (0x16E57, "M", "𖹷"), + (0x16E58, "M", "𖹸"), + (0x16E59, "M", "𖹹"), + (0x16E5A, "M", "𖹺"), + (0x16E5B, "M", "𖹻"), + (0x16E5C, "M", "𖹼"), + (0x16E5D, "M", "𖹽"), + (0x16E5E, "M", "𖹾"), + (0x16E5F, "M", "𖹿"), + (0x16E60, "V"), + (0x16E9B, "X"), + (0x16F00, "V"), + (0x16F4B, "X"), + (0x16F4F, "V"), + (0x16F88, "X"), + (0x16F8F, "V"), + (0x16FA0, "X"), + (0x16FE0, "V"), + (0x16FE5, "X"), + (0x16FF0, "V"), + (0x16FF2, "X"), + (0x17000, "V"), + (0x187F8, "X"), + (0x18800, "V"), + (0x18CD6, "X"), + (0x18CFF, "V"), + (0x18D09, "X"), + ] + + +def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1AFF0, "V"), + (0x1AFF4, "X"), + (0x1AFF5, "V"), + (0x1AFFC, "X"), + (0x1AFFD, "V"), + (0x1AFFF, "X"), + (0x1B000, "V"), + (0x1B123, "X"), + (0x1B132, "V"), + (0x1B133, "X"), + (0x1B150, "V"), + (0x1B153, "X"), + (0x1B155, "V"), + (0x1B156, "X"), + (0x1B164, "V"), + (0x1B168, "X"), + (0x1B170, "V"), + (0x1B2FC, "X"), + (0x1BC00, "V"), + (0x1BC6B, "X"), + (0x1BC70, "V"), + (0x1BC7D, "X"), + (0x1BC80, "V"), + (0x1BC89, "X"), + (0x1BC90, "V"), + (0x1BC9A, "X"), + (0x1BC9C, "V"), + (0x1BCA0, "I"), + (0x1BCA4, "X"), + (0x1CC00, "V"), + (0x1CCD6, "M", "a"), + (0x1CCD7, "M", "b"), + (0x1CCD8, "M", "c"), + (0x1CCD9, "M", "d"), + (0x1CCDA, "M", "e"), + (0x1CCDB, "M", "f"), + (0x1CCDC, "M", "g"), + (0x1CCDD, "M", "h"), + (0x1CCDE, "M", "i"), + (0x1CCDF, "M", "j"), + (0x1CCE0, "M", "k"), + (0x1CCE1, "M", "l"), + (0x1CCE2, "M", "m"), + (0x1CCE3, "M", "n"), + (0x1CCE4, "M", "o"), + (0x1CCE5, "M", "p"), + (0x1CCE6, "M", "q"), + (0x1CCE7, "M", "r"), + (0x1CCE8, "M", "s"), + (0x1CCE9, "M", "t"), + (0x1CCEA, "M", "u"), + (0x1CCEB, "M", "v"), + (0x1CCEC, "M", "w"), + (0x1CCED, "M", "x"), + (0x1CCEE, "M", "y"), + (0x1CCEF, "M", "z"), + (0x1CCF0, "M", "0"), + (0x1CCF1, "M", "1"), + (0x1CCF2, "M", "2"), + (0x1CCF3, "M", "3"), + (0x1CCF4, "M", "4"), + (0x1CCF5, "M", "5"), + (0x1CCF6, "M", "6"), + (0x1CCF7, "M", "7"), + (0x1CCF8, "M", "8"), + (0x1CCF9, "M", "9"), + (0x1CCFA, "X"), + (0x1CD00, "V"), + (0x1CEB4, "X"), + (0x1CF00, "V"), + (0x1CF2E, "X"), + (0x1CF30, "V"), + (0x1CF47, "X"), + (0x1CF50, "V"), + (0x1CFC4, "X"), + (0x1D000, "V"), + (0x1D0F6, "X"), + (0x1D100, "V"), + (0x1D127, "X"), + (0x1D129, "V"), + (0x1D15E, "M", "𝅗𝅥"), + (0x1D15F, "M", "𝅘𝅥"), + (0x1D160, "M", "𝅘𝅥𝅮"), + (0x1D161, "M", "𝅘𝅥𝅯"), + (0x1D162, "M", "𝅘𝅥𝅰"), + (0x1D163, "M", "𝅘𝅥𝅱"), + (0x1D164, "M", "𝅘𝅥𝅲"), + (0x1D165, "V"), + (0x1D173, "I"), + (0x1D17B, "V"), + (0x1D1BB, "M", "𝆹𝅥"), + (0x1D1BC, "M", "𝆺𝅥"), + (0x1D1BD, "M", "𝆹𝅥𝅮"), + (0x1D1BE, "M", "𝆺𝅥𝅮"), + (0x1D1BF, "M", "𝆹𝅥𝅯"), + (0x1D1C0, "M", "𝆺𝅥𝅯"), + (0x1D1C1, "V"), + (0x1D1EB, "X"), + (0x1D200, "V"), + (0x1D246, "X"), + ] + + +def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D2C0, "V"), + (0x1D2D4, "X"), + (0x1D2E0, "V"), + (0x1D2F4, "X"), + (0x1D300, "V"), + (0x1D357, "X"), + (0x1D360, "V"), + (0x1D379, "X"), + (0x1D400, "M", "a"), + (0x1D401, "M", "b"), + (0x1D402, "M", "c"), + (0x1D403, "M", "d"), + (0x1D404, "M", "e"), + (0x1D405, "M", "f"), + (0x1D406, "M", "g"), + (0x1D407, "M", "h"), + (0x1D408, "M", "i"), + (0x1D409, "M", "j"), + (0x1D40A, "M", "k"), + (0x1D40B, "M", "l"), + (0x1D40C, "M", "m"), + (0x1D40D, "M", "n"), + (0x1D40E, "M", "o"), + (0x1D40F, "M", "p"), + (0x1D410, "M", "q"), + (0x1D411, "M", "r"), + (0x1D412, "M", "s"), + (0x1D413, "M", "t"), + (0x1D414, "M", "u"), + (0x1D415, "M", "v"), + (0x1D416, "M", "w"), + (0x1D417, "M", "x"), + (0x1D418, "M", "y"), + (0x1D419, "M", "z"), + (0x1D41A, "M", "a"), + (0x1D41B, "M", "b"), + (0x1D41C, "M", "c"), + (0x1D41D, "M", "d"), + (0x1D41E, "M", "e"), + (0x1D41F, "M", "f"), + (0x1D420, "M", "g"), + (0x1D421, "M", "h"), + (0x1D422, "M", "i"), + (0x1D423, "M", "j"), + (0x1D424, "M", "k"), + (0x1D425, "M", "l"), + (0x1D426, "M", "m"), + (0x1D427, "M", "n"), + (0x1D428, "M", "o"), + (0x1D429, "M", "p"), + (0x1D42A, "M", "q"), + (0x1D42B, "M", "r"), + (0x1D42C, "M", "s"), + (0x1D42D, "M", "t"), + (0x1D42E, "M", "u"), + (0x1D42F, "M", "v"), + (0x1D430, "M", "w"), + (0x1D431, "M", "x"), + (0x1D432, "M", "y"), + (0x1D433, "M", "z"), + (0x1D434, "M", "a"), + (0x1D435, "M", "b"), + (0x1D436, "M", "c"), + (0x1D437, "M", "d"), + (0x1D438, "M", "e"), + (0x1D439, "M", "f"), + (0x1D43A, "M", "g"), + (0x1D43B, "M", "h"), + (0x1D43C, "M", "i"), + (0x1D43D, "M", "j"), + (0x1D43E, "M", "k"), + (0x1D43F, "M", "l"), + (0x1D440, "M", "m"), + (0x1D441, "M", "n"), + (0x1D442, "M", "o"), + (0x1D443, "M", "p"), + (0x1D444, "M", "q"), + (0x1D445, "M", "r"), + (0x1D446, "M", "s"), + (0x1D447, "M", "t"), + (0x1D448, "M", "u"), + (0x1D449, "M", "v"), + (0x1D44A, "M", "w"), + (0x1D44B, "M", "x"), + (0x1D44C, "M", "y"), + (0x1D44D, "M", "z"), + (0x1D44E, "M", "a"), + (0x1D44F, "M", "b"), + (0x1D450, "M", "c"), + (0x1D451, "M", "d"), + (0x1D452, "M", "e"), + (0x1D453, "M", "f"), + (0x1D454, "M", "g"), + (0x1D455, "X"), + (0x1D456, "M", "i"), + (0x1D457, "M", "j"), + (0x1D458, "M", "k"), + (0x1D459, "M", "l"), + (0x1D45A, "M", "m"), + (0x1D45B, "M", "n"), + ] + + +def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D45C, "M", "o"), + (0x1D45D, "M", "p"), + (0x1D45E, "M", "q"), + (0x1D45F, "M", "r"), + (0x1D460, "M", "s"), + (0x1D461, "M", "t"), + (0x1D462, "M", "u"), + (0x1D463, "M", "v"), + (0x1D464, "M", "w"), + (0x1D465, "M", "x"), + (0x1D466, "M", "y"), + (0x1D467, "M", "z"), + (0x1D468, "M", "a"), + (0x1D469, "M", "b"), + (0x1D46A, "M", "c"), + (0x1D46B, "M", "d"), + (0x1D46C, "M", "e"), + (0x1D46D, "M", "f"), + (0x1D46E, "M", "g"), + (0x1D46F, "M", "h"), + (0x1D470, "M", "i"), + (0x1D471, "M", "j"), + (0x1D472, "M", "k"), + (0x1D473, "M", "l"), + (0x1D474, "M", "m"), + (0x1D475, "M", "n"), + (0x1D476, "M", "o"), + (0x1D477, "M", "p"), + (0x1D478, "M", "q"), + (0x1D479, "M", "r"), + (0x1D47A, "M", "s"), + (0x1D47B, "M", "t"), + (0x1D47C, "M", "u"), + (0x1D47D, "M", "v"), + (0x1D47E, "M", "w"), + (0x1D47F, "M", "x"), + (0x1D480, "M", "y"), + (0x1D481, "M", "z"), + (0x1D482, "M", "a"), + (0x1D483, "M", "b"), + (0x1D484, "M", "c"), + (0x1D485, "M", "d"), + (0x1D486, "M", "e"), + (0x1D487, "M", "f"), + (0x1D488, "M", "g"), + (0x1D489, "M", "h"), + (0x1D48A, "M", "i"), + (0x1D48B, "M", "j"), + (0x1D48C, "M", "k"), + (0x1D48D, "M", "l"), + (0x1D48E, "M", "m"), + (0x1D48F, "M", "n"), + (0x1D490, "M", "o"), + (0x1D491, "M", "p"), + (0x1D492, "M", "q"), + (0x1D493, "M", "r"), + (0x1D494, "M", "s"), + (0x1D495, "M", "t"), + (0x1D496, "M", "u"), + (0x1D497, "M", "v"), + (0x1D498, "M", "w"), + (0x1D499, "M", "x"), + (0x1D49A, "M", "y"), + (0x1D49B, "M", "z"), + (0x1D49C, "M", "a"), + (0x1D49D, "X"), + (0x1D49E, "M", "c"), + (0x1D49F, "M", "d"), + (0x1D4A0, "X"), + (0x1D4A2, "M", "g"), + (0x1D4A3, "X"), + (0x1D4A5, "M", "j"), + (0x1D4A6, "M", "k"), + (0x1D4A7, "X"), + (0x1D4A9, "M", "n"), + (0x1D4AA, "M", "o"), + (0x1D4AB, "M", "p"), + (0x1D4AC, "M", "q"), + (0x1D4AD, "X"), + (0x1D4AE, "M", "s"), + (0x1D4AF, "M", "t"), + (0x1D4B0, "M", "u"), + (0x1D4B1, "M", "v"), + (0x1D4B2, "M", "w"), + (0x1D4B3, "M", "x"), + (0x1D4B4, "M", "y"), + (0x1D4B5, "M", "z"), + (0x1D4B6, "M", "a"), + (0x1D4B7, "M", "b"), + (0x1D4B8, "M", "c"), + (0x1D4B9, "M", "d"), + (0x1D4BA, "X"), + (0x1D4BB, "M", "f"), + (0x1D4BC, "X"), + (0x1D4BD, "M", "h"), + (0x1D4BE, "M", "i"), + (0x1D4BF, "M", "j"), + (0x1D4C0, "M", "k"), + (0x1D4C1, "M", "l"), + (0x1D4C2, "M", "m"), + ] + + +def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D4C3, "M", "n"), + (0x1D4C4, "X"), + (0x1D4C5, "M", "p"), + (0x1D4C6, "M", "q"), + (0x1D4C7, "M", "r"), + (0x1D4C8, "M", "s"), + (0x1D4C9, "M", "t"), + (0x1D4CA, "M", "u"), + (0x1D4CB, "M", "v"), + (0x1D4CC, "M", "w"), + (0x1D4CD, "M", "x"), + (0x1D4CE, "M", "y"), + (0x1D4CF, "M", "z"), + (0x1D4D0, "M", "a"), + (0x1D4D1, "M", "b"), + (0x1D4D2, "M", "c"), + (0x1D4D3, "M", "d"), + (0x1D4D4, "M", "e"), + (0x1D4D5, "M", "f"), + (0x1D4D6, "M", "g"), + (0x1D4D7, "M", "h"), + (0x1D4D8, "M", "i"), + (0x1D4D9, "M", "j"), + (0x1D4DA, "M", "k"), + (0x1D4DB, "M", "l"), + (0x1D4DC, "M", "m"), + (0x1D4DD, "M", "n"), + (0x1D4DE, "M", "o"), + (0x1D4DF, "M", "p"), + (0x1D4E0, "M", "q"), + (0x1D4E1, "M", "r"), + (0x1D4E2, "M", "s"), + (0x1D4E3, "M", "t"), + (0x1D4E4, "M", "u"), + (0x1D4E5, "M", "v"), + (0x1D4E6, "M", "w"), + (0x1D4E7, "M", "x"), + (0x1D4E8, "M", "y"), + (0x1D4E9, "M", "z"), + (0x1D4EA, "M", "a"), + (0x1D4EB, "M", "b"), + (0x1D4EC, "M", "c"), + (0x1D4ED, "M", "d"), + (0x1D4EE, "M", "e"), + (0x1D4EF, "M", "f"), + (0x1D4F0, "M", "g"), + (0x1D4F1, "M", "h"), + (0x1D4F2, "M", "i"), + (0x1D4F3, "M", "j"), + (0x1D4F4, "M", "k"), + (0x1D4F5, "M", "l"), + (0x1D4F6, "M", "m"), + (0x1D4F7, "M", "n"), + (0x1D4F8, "M", "o"), + (0x1D4F9, "M", "p"), + (0x1D4FA, "M", "q"), + (0x1D4FB, "M", "r"), + (0x1D4FC, "M", "s"), + (0x1D4FD, "M", "t"), + (0x1D4FE, "M", "u"), + (0x1D4FF, "M", "v"), + (0x1D500, "M", "w"), + (0x1D501, "M", "x"), + (0x1D502, "M", "y"), + (0x1D503, "M", "z"), + (0x1D504, "M", "a"), + (0x1D505, "M", "b"), + (0x1D506, "X"), + (0x1D507, "M", "d"), + (0x1D508, "M", "e"), + (0x1D509, "M", "f"), + (0x1D50A, "M", "g"), + (0x1D50B, "X"), + (0x1D50D, "M", "j"), + (0x1D50E, "M", "k"), + (0x1D50F, "M", "l"), + (0x1D510, "M", "m"), + (0x1D511, "M", "n"), + (0x1D512, "M", "o"), + (0x1D513, "M", "p"), + (0x1D514, "M", "q"), + (0x1D515, "X"), + (0x1D516, "M", "s"), + (0x1D517, "M", "t"), + (0x1D518, "M", "u"), + (0x1D519, "M", "v"), + (0x1D51A, "M", "w"), + (0x1D51B, "M", "x"), + (0x1D51C, "M", "y"), + (0x1D51D, "X"), + (0x1D51E, "M", "a"), + (0x1D51F, "M", "b"), + (0x1D520, "M", "c"), + (0x1D521, "M", "d"), + (0x1D522, "M", "e"), + (0x1D523, "M", "f"), + (0x1D524, "M", "g"), + (0x1D525, "M", "h"), + (0x1D526, "M", "i"), + (0x1D527, "M", "j"), + ] + + +def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D528, "M", "k"), + (0x1D529, "M", "l"), + (0x1D52A, "M", "m"), + (0x1D52B, "M", "n"), + (0x1D52C, "M", "o"), + (0x1D52D, "M", "p"), + (0x1D52E, "M", "q"), + (0x1D52F, "M", "r"), + (0x1D530, "M", "s"), + (0x1D531, "M", "t"), + (0x1D532, "M", "u"), + (0x1D533, "M", "v"), + (0x1D534, "M", "w"), + (0x1D535, "M", "x"), + (0x1D536, "M", "y"), + (0x1D537, "M", "z"), + (0x1D538, "M", "a"), + (0x1D539, "M", "b"), + (0x1D53A, "X"), + (0x1D53B, "M", "d"), + (0x1D53C, "M", "e"), + (0x1D53D, "M", "f"), + (0x1D53E, "M", "g"), + (0x1D53F, "X"), + (0x1D540, "M", "i"), + (0x1D541, "M", "j"), + (0x1D542, "M", "k"), + (0x1D543, "M", "l"), + (0x1D544, "M", "m"), + (0x1D545, "X"), + (0x1D546, "M", "o"), + (0x1D547, "X"), + (0x1D54A, "M", "s"), + (0x1D54B, "M", "t"), + (0x1D54C, "M", "u"), + (0x1D54D, "M", "v"), + (0x1D54E, "M", "w"), + (0x1D54F, "M", "x"), + (0x1D550, "M", "y"), + (0x1D551, "X"), + (0x1D552, "M", "a"), + (0x1D553, "M", "b"), + (0x1D554, "M", "c"), + (0x1D555, "M", "d"), + (0x1D556, "M", "e"), + (0x1D557, "M", "f"), + (0x1D558, "M", "g"), + (0x1D559, "M", "h"), + (0x1D55A, "M", "i"), + (0x1D55B, "M", "j"), + (0x1D55C, "M", "k"), + (0x1D55D, "M", "l"), + (0x1D55E, "M", "m"), + (0x1D55F, "M", "n"), + (0x1D560, "M", "o"), + (0x1D561, "M", "p"), + (0x1D562, "M", "q"), + (0x1D563, "M", "r"), + (0x1D564, "M", "s"), + (0x1D565, "M", "t"), + (0x1D566, "M", "u"), + (0x1D567, "M", "v"), + (0x1D568, "M", "w"), + (0x1D569, "M", "x"), + (0x1D56A, "M", "y"), + (0x1D56B, "M", "z"), + (0x1D56C, "M", "a"), + (0x1D56D, "M", "b"), + (0x1D56E, "M", "c"), + (0x1D56F, "M", "d"), + (0x1D570, "M", "e"), + (0x1D571, "M", "f"), + (0x1D572, "M", "g"), + (0x1D573, "M", "h"), + (0x1D574, "M", "i"), + (0x1D575, "M", "j"), + (0x1D576, "M", "k"), + (0x1D577, "M", "l"), + (0x1D578, "M", "m"), + (0x1D579, "M", "n"), + (0x1D57A, "M", "o"), + (0x1D57B, "M", "p"), + (0x1D57C, "M", "q"), + (0x1D57D, "M", "r"), + (0x1D57E, "M", "s"), + (0x1D57F, "M", "t"), + (0x1D580, "M", "u"), + (0x1D581, "M", "v"), + (0x1D582, "M", "w"), + (0x1D583, "M", "x"), + (0x1D584, "M", "y"), + (0x1D585, "M", "z"), + (0x1D586, "M", "a"), + (0x1D587, "M", "b"), + (0x1D588, "M", "c"), + (0x1D589, "M", "d"), + (0x1D58A, "M", "e"), + (0x1D58B, "M", "f"), + (0x1D58C, "M", "g"), + (0x1D58D, "M", "h"), + ] + + +def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D58E, "M", "i"), + (0x1D58F, "M", "j"), + (0x1D590, "M", "k"), + (0x1D591, "M", "l"), + (0x1D592, "M", "m"), + (0x1D593, "M", "n"), + (0x1D594, "M", "o"), + (0x1D595, "M", "p"), + (0x1D596, "M", "q"), + (0x1D597, "M", "r"), + (0x1D598, "M", "s"), + (0x1D599, "M", "t"), + (0x1D59A, "M", "u"), + (0x1D59B, "M", "v"), + (0x1D59C, "M", "w"), + (0x1D59D, "M", "x"), + (0x1D59E, "M", "y"), + (0x1D59F, "M", "z"), + (0x1D5A0, "M", "a"), + (0x1D5A1, "M", "b"), + (0x1D5A2, "M", "c"), + (0x1D5A3, "M", "d"), + (0x1D5A4, "M", "e"), + (0x1D5A5, "M", "f"), + (0x1D5A6, "M", "g"), + (0x1D5A7, "M", "h"), + (0x1D5A8, "M", "i"), + (0x1D5A9, "M", "j"), + (0x1D5AA, "M", "k"), + (0x1D5AB, "M", "l"), + (0x1D5AC, "M", "m"), + (0x1D5AD, "M", "n"), + (0x1D5AE, "M", "o"), + (0x1D5AF, "M", "p"), + (0x1D5B0, "M", "q"), + (0x1D5B1, "M", "r"), + (0x1D5B2, "M", "s"), + (0x1D5B3, "M", "t"), + (0x1D5B4, "M", "u"), + (0x1D5B5, "M", "v"), + (0x1D5B6, "M", "w"), + (0x1D5B7, "M", "x"), + (0x1D5B8, "M", "y"), + (0x1D5B9, "M", "z"), + (0x1D5BA, "M", "a"), + (0x1D5BB, "M", "b"), + (0x1D5BC, "M", "c"), + (0x1D5BD, "M", "d"), + (0x1D5BE, "M", "e"), + (0x1D5BF, "M", "f"), + (0x1D5C0, "M", "g"), + (0x1D5C1, "M", "h"), + (0x1D5C2, "M", "i"), + (0x1D5C3, "M", "j"), + (0x1D5C4, "M", "k"), + (0x1D5C5, "M", "l"), + (0x1D5C6, "M", "m"), + (0x1D5C7, "M", "n"), + (0x1D5C8, "M", "o"), + (0x1D5C9, "M", "p"), + (0x1D5CA, "M", "q"), + (0x1D5CB, "M", "r"), + (0x1D5CC, "M", "s"), + (0x1D5CD, "M", "t"), + (0x1D5CE, "M", "u"), + (0x1D5CF, "M", "v"), + (0x1D5D0, "M", "w"), + (0x1D5D1, "M", "x"), + (0x1D5D2, "M", "y"), + (0x1D5D3, "M", "z"), + (0x1D5D4, "M", "a"), + (0x1D5D5, "M", "b"), + (0x1D5D6, "M", "c"), + (0x1D5D7, "M", "d"), + (0x1D5D8, "M", "e"), + (0x1D5D9, "M", "f"), + (0x1D5DA, "M", "g"), + (0x1D5DB, "M", "h"), + (0x1D5DC, "M", "i"), + (0x1D5DD, "M", "j"), + (0x1D5DE, "M", "k"), + (0x1D5DF, "M", "l"), + (0x1D5E0, "M", "m"), + (0x1D5E1, "M", "n"), + (0x1D5E2, "M", "o"), + (0x1D5E3, "M", "p"), + (0x1D5E4, "M", "q"), + (0x1D5E5, "M", "r"), + (0x1D5E6, "M", "s"), + (0x1D5E7, "M", "t"), + (0x1D5E8, "M", "u"), + (0x1D5E9, "M", "v"), + (0x1D5EA, "M", "w"), + (0x1D5EB, "M", "x"), + (0x1D5EC, "M", "y"), + (0x1D5ED, "M", "z"), + (0x1D5EE, "M", "a"), + (0x1D5EF, "M", "b"), + (0x1D5F0, "M", "c"), + (0x1D5F1, "M", "d"), + ] + + +def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D5F2, "M", "e"), + (0x1D5F3, "M", "f"), + (0x1D5F4, "M", "g"), + (0x1D5F5, "M", "h"), + (0x1D5F6, "M", "i"), + (0x1D5F7, "M", "j"), + (0x1D5F8, "M", "k"), + (0x1D5F9, "M", "l"), + (0x1D5FA, "M", "m"), + (0x1D5FB, "M", "n"), + (0x1D5FC, "M", "o"), + (0x1D5FD, "M", "p"), + (0x1D5FE, "M", "q"), + (0x1D5FF, "M", "r"), + (0x1D600, "M", "s"), + (0x1D601, "M", "t"), + (0x1D602, "M", "u"), + (0x1D603, "M", "v"), + (0x1D604, "M", "w"), + (0x1D605, "M", "x"), + (0x1D606, "M", "y"), + (0x1D607, "M", "z"), + (0x1D608, "M", "a"), + (0x1D609, "M", "b"), + (0x1D60A, "M", "c"), + (0x1D60B, "M", "d"), + (0x1D60C, "M", "e"), + (0x1D60D, "M", "f"), + (0x1D60E, "M", "g"), + (0x1D60F, "M", "h"), + (0x1D610, "M", "i"), + (0x1D611, "M", "j"), + (0x1D612, "M", "k"), + (0x1D613, "M", "l"), + (0x1D614, "M", "m"), + (0x1D615, "M", "n"), + (0x1D616, "M", "o"), + (0x1D617, "M", "p"), + (0x1D618, "M", "q"), + (0x1D619, "M", "r"), + (0x1D61A, "M", "s"), + (0x1D61B, "M", "t"), + (0x1D61C, "M", "u"), + (0x1D61D, "M", "v"), + (0x1D61E, "M", "w"), + (0x1D61F, "M", "x"), + (0x1D620, "M", "y"), + (0x1D621, "M", "z"), + (0x1D622, "M", "a"), + (0x1D623, "M", "b"), + (0x1D624, "M", "c"), + (0x1D625, "M", "d"), + (0x1D626, "M", "e"), + (0x1D627, "M", "f"), + (0x1D628, "M", "g"), + (0x1D629, "M", "h"), + (0x1D62A, "M", "i"), + (0x1D62B, "M", "j"), + (0x1D62C, "M", "k"), + (0x1D62D, "M", "l"), + (0x1D62E, "M", "m"), + (0x1D62F, "M", "n"), + (0x1D630, "M", "o"), + (0x1D631, "M", "p"), + (0x1D632, "M", "q"), + (0x1D633, "M", "r"), + (0x1D634, "M", "s"), + (0x1D635, "M", "t"), + (0x1D636, "M", "u"), + (0x1D637, "M", "v"), + (0x1D638, "M", "w"), + (0x1D639, "M", "x"), + (0x1D63A, "M", "y"), + (0x1D63B, "M", "z"), + (0x1D63C, "M", "a"), + (0x1D63D, "M", "b"), + (0x1D63E, "M", "c"), + (0x1D63F, "M", "d"), + (0x1D640, "M", "e"), + (0x1D641, "M", "f"), + (0x1D642, "M", "g"), + (0x1D643, "M", "h"), + (0x1D644, "M", "i"), + (0x1D645, "M", "j"), + (0x1D646, "M", "k"), + (0x1D647, "M", "l"), + (0x1D648, "M", "m"), + (0x1D649, "M", "n"), + (0x1D64A, "M", "o"), + (0x1D64B, "M", "p"), + (0x1D64C, "M", "q"), + (0x1D64D, "M", "r"), + (0x1D64E, "M", "s"), + (0x1D64F, "M", "t"), + (0x1D650, "M", "u"), + (0x1D651, "M", "v"), + (0x1D652, "M", "w"), + (0x1D653, "M", "x"), + (0x1D654, "M", "y"), + (0x1D655, "M", "z"), + ] + + +def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D656, "M", "a"), + (0x1D657, "M", "b"), + (0x1D658, "M", "c"), + (0x1D659, "M", "d"), + (0x1D65A, "M", "e"), + (0x1D65B, "M", "f"), + (0x1D65C, "M", "g"), + (0x1D65D, "M", "h"), + (0x1D65E, "M", "i"), + (0x1D65F, "M", "j"), + (0x1D660, "M", "k"), + (0x1D661, "M", "l"), + (0x1D662, "M", "m"), + (0x1D663, "M", "n"), + (0x1D664, "M", "o"), + (0x1D665, "M", "p"), + (0x1D666, "M", "q"), + (0x1D667, "M", "r"), + (0x1D668, "M", "s"), + (0x1D669, "M", "t"), + (0x1D66A, "M", "u"), + (0x1D66B, "M", "v"), + (0x1D66C, "M", "w"), + (0x1D66D, "M", "x"), + (0x1D66E, "M", "y"), + (0x1D66F, "M", "z"), + (0x1D670, "M", "a"), + (0x1D671, "M", "b"), + (0x1D672, "M", "c"), + (0x1D673, "M", "d"), + (0x1D674, "M", "e"), + (0x1D675, "M", "f"), + (0x1D676, "M", "g"), + (0x1D677, "M", "h"), + (0x1D678, "M", "i"), + (0x1D679, "M", "j"), + (0x1D67A, "M", "k"), + (0x1D67B, "M", "l"), + (0x1D67C, "M", "m"), + (0x1D67D, "M", "n"), + (0x1D67E, "M", "o"), + (0x1D67F, "M", "p"), + (0x1D680, "M", "q"), + (0x1D681, "M", "r"), + (0x1D682, "M", "s"), + (0x1D683, "M", "t"), + (0x1D684, "M", "u"), + (0x1D685, "M", "v"), + (0x1D686, "M", "w"), + (0x1D687, "M", "x"), + (0x1D688, "M", "y"), + (0x1D689, "M", "z"), + (0x1D68A, "M", "a"), + (0x1D68B, "M", "b"), + (0x1D68C, "M", "c"), + (0x1D68D, "M", "d"), + (0x1D68E, "M", "e"), + (0x1D68F, "M", "f"), + (0x1D690, "M", "g"), + (0x1D691, "M", "h"), + (0x1D692, "M", "i"), + (0x1D693, "M", "j"), + (0x1D694, "M", "k"), + (0x1D695, "M", "l"), + (0x1D696, "M", "m"), + (0x1D697, "M", "n"), + (0x1D698, "M", "o"), + (0x1D699, "M", "p"), + (0x1D69A, "M", "q"), + (0x1D69B, "M", "r"), + (0x1D69C, "M", "s"), + (0x1D69D, "M", "t"), + (0x1D69E, "M", "u"), + (0x1D69F, "M", "v"), + (0x1D6A0, "M", "w"), + (0x1D6A1, "M", "x"), + (0x1D6A2, "M", "y"), + (0x1D6A3, "M", "z"), + (0x1D6A4, "M", "ı"), + (0x1D6A5, "M", "ȷ"), + (0x1D6A6, "X"), + (0x1D6A8, "M", "α"), + (0x1D6A9, "M", "β"), + (0x1D6AA, "M", "γ"), + (0x1D6AB, "M", "δ"), + (0x1D6AC, "M", "ε"), + (0x1D6AD, "M", "ζ"), + (0x1D6AE, "M", "η"), + (0x1D6AF, "M", "θ"), + (0x1D6B0, "M", "ι"), + (0x1D6B1, "M", "κ"), + (0x1D6B2, "M", "λ"), + (0x1D6B3, "M", "μ"), + (0x1D6B4, "M", "ν"), + (0x1D6B5, "M", "ξ"), + (0x1D6B6, "M", "ο"), + (0x1D6B7, "M", "π"), + (0x1D6B8, "M", "ρ"), + (0x1D6B9, "M", "θ"), + (0x1D6BA, "M", "σ"), + ] + + +def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D6BB, "M", "τ"), + (0x1D6BC, "M", "υ"), + (0x1D6BD, "M", "φ"), + (0x1D6BE, "M", "χ"), + (0x1D6BF, "M", "ψ"), + (0x1D6C0, "M", "ω"), + (0x1D6C1, "M", "∇"), + (0x1D6C2, "M", "α"), + (0x1D6C3, "M", "β"), + (0x1D6C4, "M", "γ"), + (0x1D6C5, "M", "δ"), + (0x1D6C6, "M", "ε"), + (0x1D6C7, "M", "ζ"), + (0x1D6C8, "M", "η"), + (0x1D6C9, "M", "θ"), + (0x1D6CA, "M", "ι"), + (0x1D6CB, "M", "κ"), + (0x1D6CC, "M", "λ"), + (0x1D6CD, "M", "μ"), + (0x1D6CE, "M", "ν"), + (0x1D6CF, "M", "ξ"), + (0x1D6D0, "M", "ο"), + (0x1D6D1, "M", "π"), + (0x1D6D2, "M", "ρ"), + (0x1D6D3, "M", "σ"), + (0x1D6D5, "M", "τ"), + (0x1D6D6, "M", "υ"), + (0x1D6D7, "M", "φ"), + (0x1D6D8, "M", "χ"), + (0x1D6D9, "M", "ψ"), + (0x1D6DA, "M", "ω"), + (0x1D6DB, "M", "∂"), + (0x1D6DC, "M", "ε"), + (0x1D6DD, "M", "θ"), + (0x1D6DE, "M", "κ"), + (0x1D6DF, "M", "φ"), + (0x1D6E0, "M", "ρ"), + (0x1D6E1, "M", "π"), + (0x1D6E2, "M", "α"), + (0x1D6E3, "M", "β"), + (0x1D6E4, "M", "γ"), + (0x1D6E5, "M", "δ"), + (0x1D6E6, "M", "ε"), + (0x1D6E7, "M", "ζ"), + (0x1D6E8, "M", "η"), + (0x1D6E9, "M", "θ"), + (0x1D6EA, "M", "ι"), + (0x1D6EB, "M", "κ"), + (0x1D6EC, "M", "λ"), + (0x1D6ED, "M", "μ"), + (0x1D6EE, "M", "ν"), + (0x1D6EF, "M", "ξ"), + (0x1D6F0, "M", "ο"), + (0x1D6F1, "M", "π"), + (0x1D6F2, "M", "ρ"), + (0x1D6F3, "M", "θ"), + (0x1D6F4, "M", "σ"), + (0x1D6F5, "M", "τ"), + (0x1D6F6, "M", "υ"), + (0x1D6F7, "M", "φ"), + (0x1D6F8, "M", "χ"), + (0x1D6F9, "M", "ψ"), + (0x1D6FA, "M", "ω"), + (0x1D6FB, "M", "∇"), + (0x1D6FC, "M", "α"), + (0x1D6FD, "M", "β"), + (0x1D6FE, "M", "γ"), + (0x1D6FF, "M", "δ"), + (0x1D700, "M", "ε"), + (0x1D701, "M", "ζ"), + (0x1D702, "M", "η"), + (0x1D703, "M", "θ"), + (0x1D704, "M", "ι"), + (0x1D705, "M", "κ"), + (0x1D706, "M", "λ"), + (0x1D707, "M", "μ"), + (0x1D708, "M", "ν"), + (0x1D709, "M", "ξ"), + (0x1D70A, "M", "ο"), + (0x1D70B, "M", "π"), + (0x1D70C, "M", "ρ"), + (0x1D70D, "M", "σ"), + (0x1D70F, "M", "τ"), + (0x1D710, "M", "υ"), + (0x1D711, "M", "φ"), + (0x1D712, "M", "χ"), + (0x1D713, "M", "ψ"), + (0x1D714, "M", "ω"), + (0x1D715, "M", "∂"), + (0x1D716, "M", "ε"), + (0x1D717, "M", "θ"), + (0x1D718, "M", "κ"), + (0x1D719, "M", "φ"), + (0x1D71A, "M", "ρ"), + (0x1D71B, "M", "π"), + (0x1D71C, "M", "α"), + (0x1D71D, "M", "β"), + (0x1D71E, "M", "γ"), + (0x1D71F, "M", "δ"), + (0x1D720, "M", "ε"), + ] + + +def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D721, "M", "ζ"), + (0x1D722, "M", "η"), + (0x1D723, "M", "θ"), + (0x1D724, "M", "ι"), + (0x1D725, "M", "κ"), + (0x1D726, "M", "λ"), + (0x1D727, "M", "μ"), + (0x1D728, "M", "ν"), + (0x1D729, "M", "ξ"), + (0x1D72A, "M", "ο"), + (0x1D72B, "M", "π"), + (0x1D72C, "M", "ρ"), + (0x1D72D, "M", "θ"), + (0x1D72E, "M", "σ"), + (0x1D72F, "M", "τ"), + (0x1D730, "M", "υ"), + (0x1D731, "M", "φ"), + (0x1D732, "M", "χ"), + (0x1D733, "M", "ψ"), + (0x1D734, "M", "ω"), + (0x1D735, "M", "∇"), + (0x1D736, "M", "α"), + (0x1D737, "M", "β"), + (0x1D738, "M", "γ"), + (0x1D739, "M", "δ"), + (0x1D73A, "M", "ε"), + (0x1D73B, "M", "ζ"), + (0x1D73C, "M", "η"), + (0x1D73D, "M", "θ"), + (0x1D73E, "M", "ι"), + (0x1D73F, "M", "κ"), + (0x1D740, "M", "λ"), + (0x1D741, "M", "μ"), + (0x1D742, "M", "ν"), + (0x1D743, "M", "ξ"), + (0x1D744, "M", "ο"), + (0x1D745, "M", "π"), + (0x1D746, "M", "ρ"), + (0x1D747, "M", "σ"), + (0x1D749, "M", "τ"), + (0x1D74A, "M", "υ"), + (0x1D74B, "M", "φ"), + (0x1D74C, "M", "χ"), + (0x1D74D, "M", "ψ"), + (0x1D74E, "M", "ω"), + (0x1D74F, "M", "∂"), + (0x1D750, "M", "ε"), + (0x1D751, "M", "θ"), + (0x1D752, "M", "κ"), + (0x1D753, "M", "φ"), + (0x1D754, "M", "ρ"), + (0x1D755, "M", "π"), + (0x1D756, "M", "α"), + (0x1D757, "M", "β"), + (0x1D758, "M", "γ"), + (0x1D759, "M", "δ"), + (0x1D75A, "M", "ε"), + (0x1D75B, "M", "ζ"), + (0x1D75C, "M", "η"), + (0x1D75D, "M", "θ"), + (0x1D75E, "M", "ι"), + (0x1D75F, "M", "κ"), + (0x1D760, "M", "λ"), + (0x1D761, "M", "μ"), + (0x1D762, "M", "ν"), + (0x1D763, "M", "ξ"), + (0x1D764, "M", "ο"), + (0x1D765, "M", "π"), + (0x1D766, "M", "ρ"), + (0x1D767, "M", "θ"), + (0x1D768, "M", "σ"), + (0x1D769, "M", "τ"), + (0x1D76A, "M", "υ"), + (0x1D76B, "M", "φ"), + (0x1D76C, "M", "χ"), + (0x1D76D, "M", "ψ"), + (0x1D76E, "M", "ω"), + (0x1D76F, "M", "∇"), + (0x1D770, "M", "α"), + (0x1D771, "M", "β"), + (0x1D772, "M", "γ"), + (0x1D773, "M", "δ"), + (0x1D774, "M", "ε"), + (0x1D775, "M", "ζ"), + (0x1D776, "M", "η"), + (0x1D777, "M", "θ"), + (0x1D778, "M", "ι"), + (0x1D779, "M", "κ"), + (0x1D77A, "M", "λ"), + (0x1D77B, "M", "μ"), + (0x1D77C, "M", "ν"), + (0x1D77D, "M", "ξ"), + (0x1D77E, "M", "ο"), + (0x1D77F, "M", "π"), + (0x1D780, "M", "ρ"), + (0x1D781, "M", "σ"), + (0x1D783, "M", "τ"), + (0x1D784, "M", "υ"), + (0x1D785, "M", "φ"), + (0x1D786, "M", "χ"), + ] + + +def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D787, "M", "ψ"), + (0x1D788, "M", "ω"), + (0x1D789, "M", "∂"), + (0x1D78A, "M", "ε"), + (0x1D78B, "M", "θ"), + (0x1D78C, "M", "κ"), + (0x1D78D, "M", "φ"), + (0x1D78E, "M", "ρ"), + (0x1D78F, "M", "π"), + (0x1D790, "M", "α"), + (0x1D791, "M", "β"), + (0x1D792, "M", "γ"), + (0x1D793, "M", "δ"), + (0x1D794, "M", "ε"), + (0x1D795, "M", "ζ"), + (0x1D796, "M", "η"), + (0x1D797, "M", "θ"), + (0x1D798, "M", "ι"), + (0x1D799, "M", "κ"), + (0x1D79A, "M", "λ"), + (0x1D79B, "M", "μ"), + (0x1D79C, "M", "ν"), + (0x1D79D, "M", "ξ"), + (0x1D79E, "M", "ο"), + (0x1D79F, "M", "π"), + (0x1D7A0, "M", "ρ"), + (0x1D7A1, "M", "θ"), + (0x1D7A2, "M", "σ"), + (0x1D7A3, "M", "τ"), + (0x1D7A4, "M", "υ"), + (0x1D7A5, "M", "φ"), + (0x1D7A6, "M", "χ"), + (0x1D7A7, "M", "ψ"), + (0x1D7A8, "M", "ω"), + (0x1D7A9, "M", "∇"), + (0x1D7AA, "M", "α"), + (0x1D7AB, "M", "β"), + (0x1D7AC, "M", "γ"), + (0x1D7AD, "M", "δ"), + (0x1D7AE, "M", "ε"), + (0x1D7AF, "M", "ζ"), + (0x1D7B0, "M", "η"), + (0x1D7B1, "M", "θ"), + (0x1D7B2, "M", "ι"), + (0x1D7B3, "M", "κ"), + (0x1D7B4, "M", "λ"), + (0x1D7B5, "M", "μ"), + (0x1D7B6, "M", "ν"), + (0x1D7B7, "M", "ξ"), + (0x1D7B8, "M", "ο"), + (0x1D7B9, "M", "π"), + (0x1D7BA, "M", "ρ"), + (0x1D7BB, "M", "σ"), + (0x1D7BD, "M", "τ"), + (0x1D7BE, "M", "υ"), + (0x1D7BF, "M", "φ"), + (0x1D7C0, "M", "χ"), + (0x1D7C1, "M", "ψ"), + (0x1D7C2, "M", "ω"), + (0x1D7C3, "M", "∂"), + (0x1D7C4, "M", "ε"), + (0x1D7C5, "M", "θ"), + (0x1D7C6, "M", "κ"), + (0x1D7C7, "M", "φ"), + (0x1D7C8, "M", "ρ"), + (0x1D7C9, "M", "π"), + (0x1D7CA, "M", "ϝ"), + (0x1D7CC, "X"), + (0x1D7CE, "M", "0"), + (0x1D7CF, "M", "1"), + (0x1D7D0, "M", "2"), + (0x1D7D1, "M", "3"), + (0x1D7D2, "M", "4"), + (0x1D7D3, "M", "5"), + (0x1D7D4, "M", "6"), + (0x1D7D5, "M", "7"), + (0x1D7D6, "M", "8"), + (0x1D7D7, "M", "9"), + (0x1D7D8, "M", "0"), + (0x1D7D9, "M", "1"), + (0x1D7DA, "M", "2"), + (0x1D7DB, "M", "3"), + (0x1D7DC, "M", "4"), + (0x1D7DD, "M", "5"), + (0x1D7DE, "M", "6"), + (0x1D7DF, "M", "7"), + (0x1D7E0, "M", "8"), + (0x1D7E1, "M", "9"), + (0x1D7E2, "M", "0"), + (0x1D7E3, "M", "1"), + (0x1D7E4, "M", "2"), + (0x1D7E5, "M", "3"), + (0x1D7E6, "M", "4"), + (0x1D7E7, "M", "5"), + (0x1D7E8, "M", "6"), + (0x1D7E9, "M", "7"), + (0x1D7EA, "M", "8"), + (0x1D7EB, "M", "9"), + (0x1D7EC, "M", "0"), + (0x1D7ED, "M", "1"), + ] + + +def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1D7EE, "M", "2"), + (0x1D7EF, "M", "3"), + (0x1D7F0, "M", "4"), + (0x1D7F1, "M", "5"), + (0x1D7F2, "M", "6"), + (0x1D7F3, "M", "7"), + (0x1D7F4, "M", "8"), + (0x1D7F5, "M", "9"), + (0x1D7F6, "M", "0"), + (0x1D7F7, "M", "1"), + (0x1D7F8, "M", "2"), + (0x1D7F9, "M", "3"), + (0x1D7FA, "M", "4"), + (0x1D7FB, "M", "5"), + (0x1D7FC, "M", "6"), + (0x1D7FD, "M", "7"), + (0x1D7FE, "M", "8"), + (0x1D7FF, "M", "9"), + (0x1D800, "V"), + (0x1DA8C, "X"), + (0x1DA9B, "V"), + (0x1DAA0, "X"), + (0x1DAA1, "V"), + (0x1DAB0, "X"), + (0x1DF00, "V"), + (0x1DF1F, "X"), + (0x1DF25, "V"), + (0x1DF2B, "X"), + (0x1E000, "V"), + (0x1E007, "X"), + (0x1E008, "V"), + (0x1E019, "X"), + (0x1E01B, "V"), + (0x1E022, "X"), + (0x1E023, "V"), + (0x1E025, "X"), + (0x1E026, "V"), + (0x1E02B, "X"), + (0x1E030, "M", "а"), + (0x1E031, "M", "б"), + (0x1E032, "M", "в"), + (0x1E033, "M", "г"), + (0x1E034, "M", "д"), + (0x1E035, "M", "е"), + (0x1E036, "M", "ж"), + (0x1E037, "M", "з"), + (0x1E038, "M", "и"), + (0x1E039, "M", "к"), + (0x1E03A, "M", "л"), + (0x1E03B, "M", "м"), + (0x1E03C, "M", "о"), + (0x1E03D, "M", "п"), + (0x1E03E, "M", "р"), + (0x1E03F, "M", "с"), + (0x1E040, "M", "т"), + (0x1E041, "M", "у"), + (0x1E042, "M", "ф"), + (0x1E043, "M", "х"), + (0x1E044, "M", "ц"), + (0x1E045, "M", "ч"), + (0x1E046, "M", "ш"), + (0x1E047, "M", "ы"), + (0x1E048, "M", "э"), + (0x1E049, "M", "ю"), + (0x1E04A, "M", "ꚉ"), + (0x1E04B, "M", "ә"), + (0x1E04C, "M", "і"), + (0x1E04D, "M", "ј"), + (0x1E04E, "M", "ө"), + (0x1E04F, "M", "ү"), + (0x1E050, "M", "ӏ"), + (0x1E051, "M", "а"), + (0x1E052, "M", "б"), + (0x1E053, "M", "в"), + (0x1E054, "M", "г"), + (0x1E055, "M", "д"), + (0x1E056, "M", "е"), + (0x1E057, "M", "ж"), + (0x1E058, "M", "з"), + (0x1E059, "M", "и"), + (0x1E05A, "M", "к"), + (0x1E05B, "M", "л"), + (0x1E05C, "M", "о"), + (0x1E05D, "M", "п"), + (0x1E05E, "M", "с"), + (0x1E05F, "M", "у"), + (0x1E060, "M", "ф"), + (0x1E061, "M", "х"), + (0x1E062, "M", "ц"), + (0x1E063, "M", "ч"), + (0x1E064, "M", "ш"), + (0x1E065, "M", "ъ"), + (0x1E066, "M", "ы"), + (0x1E067, "M", "ґ"), + (0x1E068, "M", "і"), + (0x1E069, "M", "ѕ"), + (0x1E06A, "M", "џ"), + (0x1E06B, "M", "ҫ"), + (0x1E06C, "M", "ꙑ"), + (0x1E06D, "M", "ұ"), + ] + + +def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1E06E, "X"), + (0x1E08F, "V"), + (0x1E090, "X"), + (0x1E100, "V"), + (0x1E12D, "X"), + (0x1E130, "V"), + (0x1E13E, "X"), + (0x1E140, "V"), + (0x1E14A, "X"), + (0x1E14E, "V"), + (0x1E150, "X"), + (0x1E290, "V"), + (0x1E2AF, "X"), + (0x1E2C0, "V"), + (0x1E2FA, "X"), + (0x1E2FF, "V"), + (0x1E300, "X"), + (0x1E4D0, "V"), + (0x1E4FA, "X"), + (0x1E5D0, "V"), + (0x1E5FB, "X"), + (0x1E5FF, "V"), + (0x1E600, "X"), + (0x1E7E0, "V"), + (0x1E7E7, "X"), + (0x1E7E8, "V"), + (0x1E7EC, "X"), + (0x1E7ED, "V"), + (0x1E7EF, "X"), + (0x1E7F0, "V"), + (0x1E7FF, "X"), + (0x1E800, "V"), + (0x1E8C5, "X"), + (0x1E8C7, "V"), + (0x1E8D7, "X"), + (0x1E900, "M", "𞤢"), + (0x1E901, "M", "𞤣"), + (0x1E902, "M", "𞤤"), + (0x1E903, "M", "𞤥"), + (0x1E904, "M", "𞤦"), + (0x1E905, "M", "𞤧"), + (0x1E906, "M", "𞤨"), + (0x1E907, "M", "𞤩"), + (0x1E908, "M", "𞤪"), + (0x1E909, "M", "𞤫"), + (0x1E90A, "M", "𞤬"), + (0x1E90B, "M", "𞤭"), + (0x1E90C, "M", "𞤮"), + (0x1E90D, "M", "𞤯"), + (0x1E90E, "M", "𞤰"), + (0x1E90F, "M", "𞤱"), + (0x1E910, "M", "𞤲"), + (0x1E911, "M", "𞤳"), + (0x1E912, "M", "𞤴"), + (0x1E913, "M", "𞤵"), + (0x1E914, "M", "𞤶"), + (0x1E915, "M", "𞤷"), + (0x1E916, "M", "𞤸"), + (0x1E917, "M", "𞤹"), + (0x1E918, "M", "𞤺"), + (0x1E919, "M", "𞤻"), + (0x1E91A, "M", "𞤼"), + (0x1E91B, "M", "𞤽"), + (0x1E91C, "M", "𞤾"), + (0x1E91D, "M", "𞤿"), + (0x1E91E, "M", "𞥀"), + (0x1E91F, "M", "𞥁"), + (0x1E920, "M", "𞥂"), + (0x1E921, "M", "𞥃"), + (0x1E922, "V"), + (0x1E94C, "X"), + (0x1E950, "V"), + (0x1E95A, "X"), + (0x1E95E, "V"), + (0x1E960, "X"), + (0x1EC71, "V"), + (0x1ECB5, "X"), + (0x1ED01, "V"), + (0x1ED3E, "X"), + (0x1EE00, "M", "ا"), + (0x1EE01, "M", "ب"), + (0x1EE02, "M", "ج"), + (0x1EE03, "M", "د"), + (0x1EE04, "X"), + (0x1EE05, "M", "و"), + (0x1EE06, "M", "ز"), + (0x1EE07, "M", "ح"), + (0x1EE08, "M", "ط"), + (0x1EE09, "M", "ي"), + (0x1EE0A, "M", "ك"), + (0x1EE0B, "M", "ل"), + (0x1EE0C, "M", "م"), + (0x1EE0D, "M", "ن"), + (0x1EE0E, "M", "س"), + (0x1EE0F, "M", "ع"), + (0x1EE10, "M", "ف"), + (0x1EE11, "M", "ص"), + (0x1EE12, "M", "ق"), + (0x1EE13, "M", "ر"), + (0x1EE14, "M", "ش"), + ] + + +def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EE15, "M", "ت"), + (0x1EE16, "M", "ث"), + (0x1EE17, "M", "خ"), + (0x1EE18, "M", "ذ"), + (0x1EE19, "M", "ض"), + (0x1EE1A, "M", "ظ"), + (0x1EE1B, "M", "غ"), + (0x1EE1C, "M", "ٮ"), + (0x1EE1D, "M", "ں"), + (0x1EE1E, "M", "ڡ"), + (0x1EE1F, "M", "ٯ"), + (0x1EE20, "X"), + (0x1EE21, "M", "ب"), + (0x1EE22, "M", "ج"), + (0x1EE23, "X"), + (0x1EE24, "M", "ه"), + (0x1EE25, "X"), + (0x1EE27, "M", "ح"), + (0x1EE28, "X"), + (0x1EE29, "M", "ي"), + (0x1EE2A, "M", "ك"), + (0x1EE2B, "M", "ل"), + (0x1EE2C, "M", "م"), + (0x1EE2D, "M", "ن"), + (0x1EE2E, "M", "س"), + (0x1EE2F, "M", "ع"), + (0x1EE30, "M", "ف"), + (0x1EE31, "M", "ص"), + (0x1EE32, "M", "ق"), + (0x1EE33, "X"), + (0x1EE34, "M", "ش"), + (0x1EE35, "M", "ت"), + (0x1EE36, "M", "ث"), + (0x1EE37, "M", "خ"), + (0x1EE38, "X"), + (0x1EE39, "M", "ض"), + (0x1EE3A, "X"), + (0x1EE3B, "M", "غ"), + (0x1EE3C, "X"), + (0x1EE42, "M", "ج"), + (0x1EE43, "X"), + (0x1EE47, "M", "ح"), + (0x1EE48, "X"), + (0x1EE49, "M", "ي"), + (0x1EE4A, "X"), + (0x1EE4B, "M", "ل"), + (0x1EE4C, "X"), + (0x1EE4D, "M", "ن"), + (0x1EE4E, "M", "س"), + (0x1EE4F, "M", "ع"), + (0x1EE50, "X"), + (0x1EE51, "M", "ص"), + (0x1EE52, "M", "ق"), + (0x1EE53, "X"), + (0x1EE54, "M", "ش"), + (0x1EE55, "X"), + (0x1EE57, "M", "خ"), + (0x1EE58, "X"), + (0x1EE59, "M", "ض"), + (0x1EE5A, "X"), + (0x1EE5B, "M", "غ"), + (0x1EE5C, "X"), + (0x1EE5D, "M", "ں"), + (0x1EE5E, "X"), + (0x1EE5F, "M", "ٯ"), + (0x1EE60, "X"), + (0x1EE61, "M", "ب"), + (0x1EE62, "M", "ج"), + (0x1EE63, "X"), + (0x1EE64, "M", "ه"), + (0x1EE65, "X"), + (0x1EE67, "M", "ح"), + (0x1EE68, "M", "ط"), + (0x1EE69, "M", "ي"), + (0x1EE6A, "M", "ك"), + (0x1EE6B, "X"), + (0x1EE6C, "M", "م"), + (0x1EE6D, "M", "ن"), + (0x1EE6E, "M", "س"), + (0x1EE6F, "M", "ع"), + (0x1EE70, "M", "ف"), + (0x1EE71, "M", "ص"), + (0x1EE72, "M", "ق"), + (0x1EE73, "X"), + (0x1EE74, "M", "ش"), + (0x1EE75, "M", "ت"), + (0x1EE76, "M", "ث"), + (0x1EE77, "M", "خ"), + (0x1EE78, "X"), + (0x1EE79, "M", "ض"), + (0x1EE7A, "M", "ظ"), + (0x1EE7B, "M", "غ"), + (0x1EE7C, "M", "ٮ"), + (0x1EE7D, "X"), + (0x1EE7E, "M", "ڡ"), + (0x1EE7F, "X"), + (0x1EE80, "M", "ا"), + (0x1EE81, "M", "ب"), + (0x1EE82, "M", "ج"), + (0x1EE83, "M", "د"), + ] + + +def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1EE84, "M", "ه"), + (0x1EE85, "M", "و"), + (0x1EE86, "M", "ز"), + (0x1EE87, "M", "ح"), + (0x1EE88, "M", "ط"), + (0x1EE89, "M", "ي"), + (0x1EE8A, "X"), + (0x1EE8B, "M", "ل"), + (0x1EE8C, "M", "م"), + (0x1EE8D, "M", "ن"), + (0x1EE8E, "M", "س"), + (0x1EE8F, "M", "ع"), + (0x1EE90, "M", "ف"), + (0x1EE91, "M", "ص"), + (0x1EE92, "M", "ق"), + (0x1EE93, "M", "ر"), + (0x1EE94, "M", "ش"), + (0x1EE95, "M", "ت"), + (0x1EE96, "M", "ث"), + (0x1EE97, "M", "خ"), + (0x1EE98, "M", "ذ"), + (0x1EE99, "M", "ض"), + (0x1EE9A, "M", "ظ"), + (0x1EE9B, "M", "غ"), + (0x1EE9C, "X"), + (0x1EEA1, "M", "ب"), + (0x1EEA2, "M", "ج"), + (0x1EEA3, "M", "د"), + (0x1EEA4, "X"), + (0x1EEA5, "M", "و"), + (0x1EEA6, "M", "ز"), + (0x1EEA7, "M", "ح"), + (0x1EEA8, "M", "ط"), + (0x1EEA9, "M", "ي"), + (0x1EEAA, "X"), + (0x1EEAB, "M", "ل"), + (0x1EEAC, "M", "م"), + (0x1EEAD, "M", "ن"), + (0x1EEAE, "M", "س"), + (0x1EEAF, "M", "ع"), + (0x1EEB0, "M", "ف"), + (0x1EEB1, "M", "ص"), + (0x1EEB2, "M", "ق"), + (0x1EEB3, "M", "ر"), + (0x1EEB4, "M", "ش"), + (0x1EEB5, "M", "ت"), + (0x1EEB6, "M", "ث"), + (0x1EEB7, "M", "خ"), + (0x1EEB8, "M", "ذ"), + (0x1EEB9, "M", "ض"), + (0x1EEBA, "M", "ظ"), + (0x1EEBB, "M", "غ"), + (0x1EEBC, "X"), + (0x1EEF0, "V"), + (0x1EEF2, "X"), + (0x1F000, "V"), + (0x1F02C, "X"), + (0x1F030, "V"), + (0x1F094, "X"), + (0x1F0A0, "V"), + (0x1F0AF, "X"), + (0x1F0B1, "V"), + (0x1F0C0, "X"), + (0x1F0C1, "V"), + (0x1F0D0, "X"), + (0x1F0D1, "V"), + (0x1F0F6, "X"), + (0x1F101, "M", "0,"), + (0x1F102, "M", "1,"), + (0x1F103, "M", "2,"), + (0x1F104, "M", "3,"), + (0x1F105, "M", "4,"), + (0x1F106, "M", "5,"), + (0x1F107, "M", "6,"), + (0x1F108, "M", "7,"), + (0x1F109, "M", "8,"), + (0x1F10A, "M", "9,"), + (0x1F10B, "V"), + (0x1F110, "M", "(a)"), + (0x1F111, "M", "(b)"), + (0x1F112, "M", "(c)"), + (0x1F113, "M", "(d)"), + (0x1F114, "M", "(e)"), + (0x1F115, "M", "(f)"), + (0x1F116, "M", "(g)"), + (0x1F117, "M", "(h)"), + (0x1F118, "M", "(i)"), + (0x1F119, "M", "(j)"), + (0x1F11A, "M", "(k)"), + (0x1F11B, "M", "(l)"), + (0x1F11C, "M", "(m)"), + (0x1F11D, "M", "(n)"), + (0x1F11E, "M", "(o)"), + (0x1F11F, "M", "(p)"), + (0x1F120, "M", "(q)"), + (0x1F121, "M", "(r)"), + (0x1F122, "M", "(s)"), + (0x1F123, "M", "(t)"), + (0x1F124, "M", "(u)"), + (0x1F125, "M", "(v)"), + ] + + +def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F126, "M", "(w)"), + (0x1F127, "M", "(x)"), + (0x1F128, "M", "(y)"), + (0x1F129, "M", "(z)"), + (0x1F12A, "M", "〔s〕"), + (0x1F12B, "M", "c"), + (0x1F12C, "M", "r"), + (0x1F12D, "M", "cd"), + (0x1F12E, "M", "wz"), + (0x1F12F, "V"), + (0x1F130, "M", "a"), + (0x1F131, "M", "b"), + (0x1F132, "M", "c"), + (0x1F133, "M", "d"), + (0x1F134, "M", "e"), + (0x1F135, "M", "f"), + (0x1F136, "M", "g"), + (0x1F137, "M", "h"), + (0x1F138, "M", "i"), + (0x1F139, "M", "j"), + (0x1F13A, "M", "k"), + (0x1F13B, "M", "l"), + (0x1F13C, "M", "m"), + (0x1F13D, "M", "n"), + (0x1F13E, "M", "o"), + (0x1F13F, "M", "p"), + (0x1F140, "M", "q"), + (0x1F141, "M", "r"), + (0x1F142, "M", "s"), + (0x1F143, "M", "t"), + (0x1F144, "M", "u"), + (0x1F145, "M", "v"), + (0x1F146, "M", "w"), + (0x1F147, "M", "x"), + (0x1F148, "M", "y"), + (0x1F149, "M", "z"), + (0x1F14A, "M", "hv"), + (0x1F14B, "M", "mv"), + (0x1F14C, "M", "sd"), + (0x1F14D, "M", "ss"), + (0x1F14E, "M", "ppv"), + (0x1F14F, "M", "wc"), + (0x1F150, "V"), + (0x1F16A, "M", "mc"), + (0x1F16B, "M", "md"), + (0x1F16C, "M", "mr"), + (0x1F16D, "V"), + (0x1F190, "M", "dj"), + (0x1F191, "V"), + (0x1F1AE, "X"), + (0x1F1E6, "V"), + (0x1F200, "M", "ほか"), + (0x1F201, "M", "ココ"), + (0x1F202, "M", "サ"), + (0x1F203, "X"), + (0x1F210, "M", "手"), + (0x1F211, "M", "字"), + (0x1F212, "M", "双"), + (0x1F213, "M", "デ"), + (0x1F214, "M", "二"), + (0x1F215, "M", "多"), + (0x1F216, "M", "解"), + (0x1F217, "M", "天"), + (0x1F218, "M", "交"), + (0x1F219, "M", "映"), + (0x1F21A, "M", "無"), + (0x1F21B, "M", "料"), + (0x1F21C, "M", "前"), + (0x1F21D, "M", "後"), + (0x1F21E, "M", "再"), + (0x1F21F, "M", "新"), + (0x1F220, "M", "初"), + (0x1F221, "M", "終"), + (0x1F222, "M", "生"), + (0x1F223, "M", "販"), + (0x1F224, "M", "声"), + (0x1F225, "M", "吹"), + (0x1F226, "M", "演"), + (0x1F227, "M", "投"), + (0x1F228, "M", "捕"), + (0x1F229, "M", "一"), + (0x1F22A, "M", "三"), + (0x1F22B, "M", "遊"), + (0x1F22C, "M", "左"), + (0x1F22D, "M", "中"), + (0x1F22E, "M", "右"), + (0x1F22F, "M", "指"), + (0x1F230, "M", "走"), + (0x1F231, "M", "打"), + (0x1F232, "M", "禁"), + (0x1F233, "M", "空"), + (0x1F234, "M", "合"), + (0x1F235, "M", "満"), + (0x1F236, "M", "有"), + (0x1F237, "M", "月"), + (0x1F238, "M", "申"), + (0x1F239, "M", "割"), + (0x1F23A, "M", "営"), + (0x1F23B, "M", "配"), + (0x1F23C, "X"), + ] + + +def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x1F240, "M", "〔本〕"), + (0x1F241, "M", "〔三〕"), + (0x1F242, "M", "〔二〕"), + (0x1F243, "M", "〔安〕"), + (0x1F244, "M", "〔点〕"), + (0x1F245, "M", "〔打〕"), + (0x1F246, "M", "〔盗〕"), + (0x1F247, "M", "〔勝〕"), + (0x1F248, "M", "〔敗〕"), + (0x1F249, "X"), + (0x1F250, "M", "得"), + (0x1F251, "M", "可"), + (0x1F252, "X"), + (0x1F260, "V"), + (0x1F266, "X"), + (0x1F300, "V"), + (0x1F6D8, "X"), + (0x1F6DC, "V"), + (0x1F6ED, "X"), + (0x1F6F0, "V"), + (0x1F6FD, "X"), + (0x1F700, "V"), + (0x1F777, "X"), + (0x1F77B, "V"), + (0x1F7DA, "X"), + (0x1F7E0, "V"), + (0x1F7EC, "X"), + (0x1F7F0, "V"), + (0x1F7F1, "X"), + (0x1F800, "V"), + (0x1F80C, "X"), + (0x1F810, "V"), + (0x1F848, "X"), + (0x1F850, "V"), + (0x1F85A, "X"), + (0x1F860, "V"), + (0x1F888, "X"), + (0x1F890, "V"), + (0x1F8AE, "X"), + (0x1F8B0, "V"), + (0x1F8BC, "X"), + (0x1F8C0, "V"), + (0x1F8C2, "X"), + (0x1F900, "V"), + (0x1FA54, "X"), + (0x1FA60, "V"), + (0x1FA6E, "X"), + (0x1FA70, "V"), + (0x1FA7D, "X"), + (0x1FA80, "V"), + (0x1FA8A, "X"), + (0x1FA8F, "V"), + (0x1FAC7, "X"), + (0x1FACE, "V"), + (0x1FADD, "X"), + (0x1FADF, "V"), + (0x1FAEA, "X"), + (0x1FAF0, "V"), + (0x1FAF9, "X"), + (0x1FB00, "V"), + (0x1FB93, "X"), + (0x1FB94, "V"), + (0x1FBF0, "M", "0"), + (0x1FBF1, "M", "1"), + (0x1FBF2, "M", "2"), + (0x1FBF3, "M", "3"), + (0x1FBF4, "M", "4"), + (0x1FBF5, "M", "5"), + (0x1FBF6, "M", "6"), + (0x1FBF7, "M", "7"), + (0x1FBF8, "M", "8"), + (0x1FBF9, "M", "9"), + (0x1FBFA, "X"), + (0x20000, "V"), + (0x2A6E0, "X"), + (0x2A700, "V"), + (0x2B73A, "X"), + (0x2B740, "V"), + (0x2B81E, "X"), + (0x2B820, "V"), + (0x2CEA2, "X"), + (0x2CEB0, "V"), + (0x2EBE1, "X"), + (0x2EBF0, "V"), + (0x2EE5E, "X"), + (0x2F800, "M", "丽"), + (0x2F801, "M", "丸"), + (0x2F802, "M", "乁"), + (0x2F803, "M", "𠄢"), + (0x2F804, "M", "你"), + (0x2F805, "M", "侮"), + (0x2F806, "M", "侻"), + (0x2F807, "M", "倂"), + (0x2F808, "M", "偺"), + (0x2F809, "M", "備"), + (0x2F80A, "M", "僧"), + (0x2F80B, "M", "像"), + (0x2F80C, "M", "㒞"), + (0x2F80D, "M", "𠘺"), + (0x2F80E, "M", "免"), + ] + + +def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F80F, "M", "兔"), + (0x2F810, "M", "兤"), + (0x2F811, "M", "具"), + (0x2F812, "M", "𠔜"), + (0x2F813, "M", "㒹"), + (0x2F814, "M", "內"), + (0x2F815, "M", "再"), + (0x2F816, "M", "𠕋"), + (0x2F817, "M", "冗"), + (0x2F818, "M", "冤"), + (0x2F819, "M", "仌"), + (0x2F81A, "M", "冬"), + (0x2F81B, "M", "况"), + (0x2F81C, "M", "𩇟"), + (0x2F81D, "M", "凵"), + (0x2F81E, "M", "刃"), + (0x2F81F, "M", "㓟"), + (0x2F820, "M", "刻"), + (0x2F821, "M", "剆"), + (0x2F822, "M", "割"), + (0x2F823, "M", "剷"), + (0x2F824, "M", "㔕"), + (0x2F825, "M", "勇"), + (0x2F826, "M", "勉"), + (0x2F827, "M", "勤"), + (0x2F828, "M", "勺"), + (0x2F829, "M", "包"), + (0x2F82A, "M", "匆"), + (0x2F82B, "M", "北"), + (0x2F82C, "M", "卉"), + (0x2F82D, "M", "卑"), + (0x2F82E, "M", "博"), + (0x2F82F, "M", "即"), + (0x2F830, "M", "卽"), + (0x2F831, "M", "卿"), + (0x2F834, "M", "𠨬"), + (0x2F835, "M", "灰"), + (0x2F836, "M", "及"), + (0x2F837, "M", "叟"), + (0x2F838, "M", "𠭣"), + (0x2F839, "M", "叫"), + (0x2F83A, "M", "叱"), + (0x2F83B, "M", "吆"), + (0x2F83C, "M", "咞"), + (0x2F83D, "M", "吸"), + (0x2F83E, "M", "呈"), + (0x2F83F, "M", "周"), + (0x2F840, "M", "咢"), + (0x2F841, "M", "哶"), + (0x2F842, "M", "唐"), + (0x2F843, "M", "啓"), + (0x2F844, "M", "啣"), + (0x2F845, "M", "善"), + (0x2F847, "M", "喙"), + (0x2F848, "M", "喫"), + (0x2F849, "M", "喳"), + (0x2F84A, "M", "嗂"), + (0x2F84B, "M", "圖"), + (0x2F84C, "M", "嘆"), + (0x2F84D, "M", "圗"), + (0x2F84E, "M", "噑"), + (0x2F84F, "M", "噴"), + (0x2F850, "M", "切"), + (0x2F851, "M", "壮"), + (0x2F852, "M", "城"), + (0x2F853, "M", "埴"), + (0x2F854, "M", "堍"), + (0x2F855, "M", "型"), + (0x2F856, "M", "堲"), + (0x2F857, "M", "報"), + (0x2F858, "M", "墬"), + (0x2F859, "M", "𡓤"), + (0x2F85A, "M", "売"), + (0x2F85B, "M", "壷"), + (0x2F85C, "M", "夆"), + (0x2F85D, "M", "多"), + (0x2F85E, "M", "夢"), + (0x2F85F, "M", "奢"), + (0x2F860, "M", "𡚨"), + (0x2F861, "M", "𡛪"), + (0x2F862, "M", "姬"), + (0x2F863, "M", "娛"), + (0x2F864, "M", "娧"), + (0x2F865, "M", "姘"), + (0x2F866, "M", "婦"), + (0x2F867, "M", "㛮"), + (0x2F868, "M", "㛼"), + (0x2F869, "M", "嬈"), + (0x2F86A, "M", "嬾"), + (0x2F86C, "M", "𡧈"), + (0x2F86D, "M", "寃"), + (0x2F86E, "M", "寘"), + (0x2F86F, "M", "寧"), + (0x2F870, "M", "寳"), + (0x2F871, "M", "𡬘"), + (0x2F872, "M", "寿"), + (0x2F873, "M", "将"), + (0x2F874, "M", "当"), + (0x2F875, "M", "尢"), + (0x2F876, "M", "㞁"), + ] + + +def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F877, "M", "屠"), + (0x2F878, "M", "屮"), + (0x2F879, "M", "峀"), + (0x2F87A, "M", "岍"), + (0x2F87B, "M", "𡷤"), + (0x2F87C, "M", "嵃"), + (0x2F87D, "M", "𡷦"), + (0x2F87E, "M", "嵮"), + (0x2F87F, "M", "嵫"), + (0x2F880, "M", "嵼"), + (0x2F881, "M", "巡"), + (0x2F882, "M", "巢"), + (0x2F883, "M", "㠯"), + (0x2F884, "M", "巽"), + (0x2F885, "M", "帨"), + (0x2F886, "M", "帽"), + (0x2F887, "M", "幩"), + (0x2F888, "M", "㡢"), + (0x2F889, "M", "𢆃"), + (0x2F88A, "M", "㡼"), + (0x2F88B, "M", "庰"), + (0x2F88C, "M", "庳"), + (0x2F88D, "M", "庶"), + (0x2F88E, "M", "廊"), + (0x2F88F, "M", "𪎒"), + (0x2F890, "M", "廾"), + (0x2F891, "M", "𢌱"), + (0x2F893, "M", "舁"), + (0x2F894, "M", "弢"), + (0x2F896, "M", "㣇"), + (0x2F897, "M", "𣊸"), + (0x2F898, "M", "𦇚"), + (0x2F899, "M", "形"), + (0x2F89A, "M", "彫"), + (0x2F89B, "M", "㣣"), + (0x2F89C, "M", "徚"), + (0x2F89D, "M", "忍"), + (0x2F89E, "M", "志"), + (0x2F89F, "M", "忹"), + (0x2F8A0, "M", "悁"), + (0x2F8A1, "M", "㤺"), + (0x2F8A2, "M", "㤜"), + (0x2F8A3, "M", "悔"), + (0x2F8A4, "M", "𢛔"), + (0x2F8A5, "M", "惇"), + (0x2F8A6, "M", "慈"), + (0x2F8A7, "M", "慌"), + (0x2F8A8, "M", "慎"), + (0x2F8A9, "M", "慌"), + (0x2F8AA, "M", "慺"), + (0x2F8AB, "M", "憎"), + (0x2F8AC, "M", "憲"), + (0x2F8AD, "M", "憤"), + (0x2F8AE, "M", "憯"), + (0x2F8AF, "M", "懞"), + (0x2F8B0, "M", "懲"), + (0x2F8B1, "M", "懶"), + (0x2F8B2, "M", "成"), + (0x2F8B3, "M", "戛"), + (0x2F8B4, "M", "扝"), + (0x2F8B5, "M", "抱"), + (0x2F8B6, "M", "拔"), + (0x2F8B7, "M", "捐"), + (0x2F8B8, "M", "𢬌"), + (0x2F8B9, "M", "挽"), + (0x2F8BA, "M", "拼"), + (0x2F8BB, "M", "捨"), + (0x2F8BC, "M", "掃"), + (0x2F8BD, "M", "揤"), + (0x2F8BE, "M", "𢯱"), + (0x2F8BF, "M", "搢"), + (0x2F8C0, "M", "揅"), + (0x2F8C1, "M", "掩"), + (0x2F8C2, "M", "㨮"), + (0x2F8C3, "M", "摩"), + (0x2F8C4, "M", "摾"), + (0x2F8C5, "M", "撝"), + (0x2F8C6, "M", "摷"), + (0x2F8C7, "M", "㩬"), + (0x2F8C8, "M", "敏"), + (0x2F8C9, "M", "敬"), + (0x2F8CA, "M", "𣀊"), + (0x2F8CB, "M", "旣"), + (0x2F8CC, "M", "書"), + (0x2F8CD, "M", "晉"), + (0x2F8CE, "M", "㬙"), + (0x2F8CF, "M", "暑"), + (0x2F8D0, "M", "㬈"), + (0x2F8D1, "M", "㫤"), + (0x2F8D2, "M", "冒"), + (0x2F8D3, "M", "冕"), + (0x2F8D4, "M", "最"), + (0x2F8D5, "M", "暜"), + (0x2F8D6, "M", "肭"), + (0x2F8D7, "M", "䏙"), + (0x2F8D8, "M", "朗"), + (0x2F8D9, "M", "望"), + (0x2F8DA, "M", "朡"), + (0x2F8DB, "M", "杞"), + (0x2F8DC, "M", "杓"), + ] + + +def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F8DD, "M", "𣏃"), + (0x2F8DE, "M", "㭉"), + (0x2F8DF, "M", "柺"), + (0x2F8E0, "M", "枅"), + (0x2F8E1, "M", "桒"), + (0x2F8E2, "M", "梅"), + (0x2F8E3, "M", "𣑭"), + (0x2F8E4, "M", "梎"), + (0x2F8E5, "M", "栟"), + (0x2F8E6, "M", "椔"), + (0x2F8E7, "M", "㮝"), + (0x2F8E8, "M", "楂"), + (0x2F8E9, "M", "榣"), + (0x2F8EA, "M", "槪"), + (0x2F8EB, "M", "檨"), + (0x2F8EC, "M", "𣚣"), + (0x2F8ED, "M", "櫛"), + (0x2F8EE, "M", "㰘"), + (0x2F8EF, "M", "次"), + (0x2F8F0, "M", "𣢧"), + (0x2F8F1, "M", "歔"), + (0x2F8F2, "M", "㱎"), + (0x2F8F3, "M", "歲"), + (0x2F8F4, "M", "殟"), + (0x2F8F5, "M", "殺"), + (0x2F8F6, "M", "殻"), + (0x2F8F7, "M", "𣪍"), + (0x2F8F8, "M", "𡴋"), + (0x2F8F9, "M", "𣫺"), + (0x2F8FA, "M", "汎"), + (0x2F8FB, "M", "𣲼"), + (0x2F8FC, "M", "沿"), + (0x2F8FD, "M", "泍"), + (0x2F8FE, "M", "汧"), + (0x2F8FF, "M", "洖"), + (0x2F900, "M", "派"), + (0x2F901, "M", "海"), + (0x2F902, "M", "流"), + (0x2F903, "M", "浩"), + (0x2F904, "M", "浸"), + (0x2F905, "M", "涅"), + (0x2F906, "M", "𣴞"), + (0x2F907, "M", "洴"), + (0x2F908, "M", "港"), + (0x2F909, "M", "湮"), + (0x2F90A, "M", "㴳"), + (0x2F90B, "M", "滋"), + (0x2F90C, "M", "滇"), + (0x2F90D, "M", "𣻑"), + (0x2F90E, "M", "淹"), + (0x2F90F, "M", "潮"), + (0x2F910, "M", "𣽞"), + (0x2F911, "M", "𣾎"), + (0x2F912, "M", "濆"), + (0x2F913, "M", "瀹"), + (0x2F914, "M", "瀞"), + (0x2F915, "M", "瀛"), + (0x2F916, "M", "㶖"), + (0x2F917, "M", "灊"), + (0x2F918, "M", "災"), + (0x2F919, "M", "灷"), + (0x2F91A, "M", "炭"), + (0x2F91B, "M", "𠔥"), + (0x2F91C, "M", "煅"), + (0x2F91D, "M", "𤉣"), + (0x2F91E, "M", "熜"), + (0x2F91F, "M", "𤎫"), + (0x2F920, "M", "爨"), + (0x2F921, "M", "爵"), + (0x2F922, "M", "牐"), + (0x2F923, "M", "𤘈"), + (0x2F924, "M", "犀"), + (0x2F925, "M", "犕"), + (0x2F926, "M", "𤜵"), + (0x2F927, "M", "𤠔"), + (0x2F928, "M", "獺"), + (0x2F929, "M", "王"), + (0x2F92A, "M", "㺬"), + (0x2F92B, "M", "玥"), + (0x2F92C, "M", "㺸"), + (0x2F92E, "M", "瑇"), + (0x2F92F, "M", "瑜"), + (0x2F930, "M", "瑱"), + (0x2F931, "M", "璅"), + (0x2F932, "M", "瓊"), + (0x2F933, "M", "㼛"), + (0x2F934, "M", "甤"), + (0x2F935, "M", "𤰶"), + (0x2F936, "M", "甾"), + (0x2F937, "M", "𤲒"), + (0x2F938, "M", "異"), + (0x2F939, "M", "𢆟"), + (0x2F93A, "M", "瘐"), + (0x2F93B, "M", "𤾡"), + (0x2F93C, "M", "𤾸"), + (0x2F93D, "M", "𥁄"), + (0x2F93E, "M", "㿼"), + (0x2F93F, "M", "䀈"), + (0x2F940, "M", "直"), + (0x2F941, "M", "𥃳"), + ] + + +def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F942, "M", "𥃲"), + (0x2F943, "M", "𥄙"), + (0x2F944, "M", "𥄳"), + (0x2F945, "M", "眞"), + (0x2F946, "M", "真"), + (0x2F948, "M", "睊"), + (0x2F949, "M", "䀹"), + (0x2F94A, "M", "瞋"), + (0x2F94B, "M", "䁆"), + (0x2F94C, "M", "䂖"), + (0x2F94D, "M", "𥐝"), + (0x2F94E, "M", "硎"), + (0x2F94F, "M", "碌"), + (0x2F950, "M", "磌"), + (0x2F951, "M", "䃣"), + (0x2F952, "M", "𥘦"), + (0x2F953, "M", "祖"), + (0x2F954, "M", "𥚚"), + (0x2F955, "M", "𥛅"), + (0x2F956, "M", "福"), + (0x2F957, "M", "秫"), + (0x2F958, "M", "䄯"), + (0x2F959, "M", "穀"), + (0x2F95A, "M", "穊"), + (0x2F95B, "M", "穏"), + (0x2F95C, "M", "𥥼"), + (0x2F95D, "M", "𥪧"), + (0x2F95F, "M", "竮"), + (0x2F960, "M", "䈂"), + (0x2F961, "M", "𥮫"), + (0x2F962, "M", "篆"), + (0x2F963, "M", "築"), + (0x2F964, "M", "䈧"), + (0x2F965, "M", "𥲀"), + (0x2F966, "M", "糒"), + (0x2F967, "M", "䊠"), + (0x2F968, "M", "糨"), + (0x2F969, "M", "糣"), + (0x2F96A, "M", "紀"), + (0x2F96B, "M", "𥾆"), + (0x2F96C, "M", "絣"), + (0x2F96D, "M", "䌁"), + (0x2F96E, "M", "緇"), + (0x2F96F, "M", "縂"), + (0x2F970, "M", "繅"), + (0x2F971, "M", "䌴"), + (0x2F972, "M", "𦈨"), + (0x2F973, "M", "𦉇"), + (0x2F974, "M", "䍙"), + (0x2F975, "M", "𦋙"), + (0x2F976, "M", "罺"), + (0x2F977, "M", "𦌾"), + (0x2F978, "M", "羕"), + (0x2F979, "M", "翺"), + (0x2F97A, "M", "者"), + (0x2F97B, "M", "𦓚"), + (0x2F97C, "M", "𦔣"), + (0x2F97D, "M", "聠"), + (0x2F97E, "M", "𦖨"), + (0x2F97F, "M", "聰"), + (0x2F980, "M", "𣍟"), + (0x2F981, "M", "䏕"), + (0x2F982, "M", "育"), + (0x2F983, "M", "脃"), + (0x2F984, "M", "䐋"), + (0x2F985, "M", "脾"), + (0x2F986, "M", "媵"), + (0x2F987, "M", "𦞧"), + (0x2F988, "M", "𦞵"), + (0x2F989, "M", "𣎓"), + (0x2F98A, "M", "𣎜"), + (0x2F98B, "M", "舁"), + (0x2F98C, "M", "舄"), + (0x2F98D, "M", "辞"), + (0x2F98E, "M", "䑫"), + (0x2F98F, "M", "芑"), + (0x2F990, "M", "芋"), + (0x2F991, "M", "芝"), + (0x2F992, "M", "劳"), + (0x2F993, "M", "花"), + (0x2F994, "M", "芳"), + (0x2F995, "M", "芽"), + (0x2F996, "M", "苦"), + (0x2F997, "M", "𦬼"), + (0x2F998, "M", "若"), + (0x2F999, "M", "茝"), + (0x2F99A, "M", "荣"), + (0x2F99B, "M", "莭"), + (0x2F99C, "M", "茣"), + (0x2F99D, "M", "莽"), + (0x2F99E, "M", "菧"), + (0x2F99F, "M", "著"), + (0x2F9A0, "M", "荓"), + (0x2F9A1, "M", "菊"), + (0x2F9A2, "M", "菌"), + (0x2F9A3, "M", "菜"), + (0x2F9A4, "M", "𦰶"), + (0x2F9A5, "M", "𦵫"), + (0x2F9A6, "M", "𦳕"), + (0x2F9A7, "M", "䔫"), + ] + + +def _seg_82() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2F9A8, "M", "蓱"), + (0x2F9A9, "M", "蓳"), + (0x2F9AA, "M", "蔖"), + (0x2F9AB, "M", "𧏊"), + (0x2F9AC, "M", "蕤"), + (0x2F9AD, "M", "𦼬"), + (0x2F9AE, "M", "䕝"), + (0x2F9AF, "M", "䕡"), + (0x2F9B0, "M", "𦾱"), + (0x2F9B1, "M", "𧃒"), + (0x2F9B2, "M", "䕫"), + (0x2F9B3, "M", "虐"), + (0x2F9B4, "M", "虜"), + (0x2F9B5, "M", "虧"), + (0x2F9B6, "M", "虩"), + (0x2F9B7, "M", "蚩"), + (0x2F9B8, "M", "蚈"), + (0x2F9B9, "M", "蜎"), + (0x2F9BA, "M", "蛢"), + (0x2F9BB, "M", "蝹"), + (0x2F9BC, "M", "蜨"), + (0x2F9BD, "M", "蝫"), + (0x2F9BE, "M", "螆"), + (0x2F9BF, "M", "䗗"), + (0x2F9C0, "M", "蟡"), + (0x2F9C1, "M", "蠁"), + (0x2F9C2, "M", "䗹"), + (0x2F9C3, "M", "衠"), + (0x2F9C4, "M", "衣"), + (0x2F9C5, "M", "𧙧"), + (0x2F9C6, "M", "裗"), + (0x2F9C7, "M", "裞"), + (0x2F9C8, "M", "䘵"), + (0x2F9C9, "M", "裺"), + (0x2F9CA, "M", "㒻"), + (0x2F9CB, "M", "𧢮"), + (0x2F9CC, "M", "𧥦"), + (0x2F9CD, "M", "䚾"), + (0x2F9CE, "M", "䛇"), + (0x2F9CF, "M", "誠"), + (0x2F9D0, "M", "諭"), + (0x2F9D1, "M", "變"), + (0x2F9D2, "M", "豕"), + (0x2F9D3, "M", "𧲨"), + (0x2F9D4, "M", "貫"), + (0x2F9D5, "M", "賁"), + (0x2F9D6, "M", "贛"), + (0x2F9D7, "M", "起"), + (0x2F9D8, "M", "𧼯"), + (0x2F9D9, "M", "𠠄"), + (0x2F9DA, "M", "跋"), + (0x2F9DB, "M", "趼"), + (0x2F9DC, "M", "跰"), + (0x2F9DD, "M", "𠣞"), + (0x2F9DE, "M", "軔"), + (0x2F9DF, "M", "輸"), + (0x2F9E0, "M", "𨗒"), + (0x2F9E1, "M", "𨗭"), + (0x2F9E2, "M", "邔"), + (0x2F9E3, "M", "郱"), + (0x2F9E4, "M", "鄑"), + (0x2F9E5, "M", "𨜮"), + (0x2F9E6, "M", "鄛"), + (0x2F9E7, "M", "鈸"), + (0x2F9E8, "M", "鋗"), + (0x2F9E9, "M", "鋘"), + (0x2F9EA, "M", "鉼"), + (0x2F9EB, "M", "鏹"), + (0x2F9EC, "M", "鐕"), + (0x2F9ED, "M", "𨯺"), + (0x2F9EE, "M", "開"), + (0x2F9EF, "M", "䦕"), + (0x2F9F0, "M", "閷"), + (0x2F9F1, "M", "𨵷"), + (0x2F9F2, "M", "䧦"), + (0x2F9F3, "M", "雃"), + (0x2F9F4, "M", "嶲"), + (0x2F9F5, "M", "霣"), + (0x2F9F6, "M", "𩅅"), + (0x2F9F7, "M", "𩈚"), + (0x2F9F8, "M", "䩮"), + (0x2F9F9, "M", "䩶"), + (0x2F9FA, "M", "韠"), + (0x2F9FB, "M", "𩐊"), + (0x2F9FC, "M", "䪲"), + (0x2F9FD, "M", "𩒖"), + (0x2F9FE, "M", "頋"), + (0x2FA00, "M", "頩"), + (0x2FA01, "M", "𩖶"), + (0x2FA02, "M", "飢"), + (0x2FA03, "M", "䬳"), + (0x2FA04, "M", "餩"), + (0x2FA05, "M", "馧"), + (0x2FA06, "M", "駂"), + (0x2FA07, "M", "駾"), + (0x2FA08, "M", "䯎"), + (0x2FA09, "M", "𩬰"), + (0x2FA0A, "M", "鬒"), + (0x2FA0B, "M", "鱀"), + (0x2FA0C, "M", "鳽"), + ] + + +def _seg_83() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]: + return [ + (0x2FA0D, "M", "䳎"), + (0x2FA0E, "M", "䳭"), + (0x2FA0F, "M", "鵧"), + (0x2FA10, "M", "𪃎"), + (0x2FA11, "M", "䳸"), + (0x2FA12, "M", "𪄅"), + (0x2FA13, "M", "𪈎"), + (0x2FA14, "M", "𪊑"), + (0x2FA15, "M", "麻"), + (0x2FA16, "M", "䵖"), + (0x2FA17, "M", "黹"), + (0x2FA18, "M", "黾"), + (0x2FA19, "M", "鼅"), + (0x2FA1A, "M", "鼏"), + (0x2FA1B, "M", "鼖"), + (0x2FA1C, "M", "鼻"), + (0x2FA1D, "M", "𪘀"), + (0x2FA1E, "X"), + (0x30000, "V"), + (0x3134B, "X"), + (0x31350, "V"), + (0x323B0, "X"), + (0xE0100, "I"), + (0xE01F0, "X"), + ] + + +uts46data = tuple( + _seg_0() + + _seg_1() + + _seg_2() + + _seg_3() + + _seg_4() + + _seg_5() + + _seg_6() + + _seg_7() + + _seg_8() + + _seg_9() + + _seg_10() + + _seg_11() + + _seg_12() + + _seg_13() + + _seg_14() + + _seg_15() + + _seg_16() + + _seg_17() + + _seg_18() + + _seg_19() + + _seg_20() + + _seg_21() + + _seg_22() + + _seg_23() + + _seg_24() + + _seg_25() + + _seg_26() + + _seg_27() + + _seg_28() + + _seg_29() + + _seg_30() + + _seg_31() + + _seg_32() + + _seg_33() + + _seg_34() + + _seg_35() + + _seg_36() + + _seg_37() + + _seg_38() + + _seg_39() + + _seg_40() + + _seg_41() + + _seg_42() + + _seg_43() + + _seg_44() + + _seg_45() + + _seg_46() + + _seg_47() + + _seg_48() + + _seg_49() + + _seg_50() + + _seg_51() + + _seg_52() + + _seg_53() + + _seg_54() + + _seg_55() + + _seg_56() + + _seg_57() + + _seg_58() + + _seg_59() + + _seg_60() + + _seg_61() + + _seg_62() + + _seg_63() + + _seg_64() + + _seg_65() + + _seg_66() + + _seg_67() + + _seg_68() + + _seg_69() + + _seg_70() + + _seg_71() + + _seg_72() + + _seg_73() + + _seg_74() + + _seg_75() + + _seg_76() + + _seg_77() + + _seg_78() + + _seg_79() + + _seg_80() + + _seg_81() + + _seg_82() + + _seg_83() +) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...] diff --git a/py311/lib/python3.11/site-packages/imageio_ffmpeg/__init__.py b/py311/lib/python3.11/site-packages/imageio_ffmpeg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b79d867e2e6508a97ea4d27d8481afd561b1dd69 --- /dev/null +++ b/py311/lib/python3.11/site-packages/imageio_ffmpeg/__init__.py @@ -0,0 +1,8 @@ +""" imageio_ffmpeg, FFMPEG wrapper for Python. +""" + +# flake8: noqa + +from ._definitions import __version__ +from ._io import count_frames_and_secs, read_frames, write_frames +from ._utils import get_ffmpeg_exe, get_ffmpeg_version diff --git a/py311/lib/python3.11/site-packages/imageio_ffmpeg/_definitions.py b/py311/lib/python3.11/site-packages/imageio_ffmpeg/_definitions.py new file mode 100644 index 0000000000000000000000000000000000000000..d93d7587a1b5a1845e1a0632c6823a96ab54d28b --- /dev/null +++ b/py311/lib/python3.11/site-packages/imageio_ffmpeg/_definitions.py @@ -0,0 +1,69 @@ +import sys +import platform + + +__version__ = "0.6.0" + + +def get_platform(): + # get_os_string and get_arch are taken from wgpu-py + return _get_os_string() + "-" + _get_arch() + + +def _get_os_string(): + if sys.platform.startswith("win"): + return "windows" + elif sys.platform.startswith("darwin"): + return "macos" + elif sys.platform.startswith("linux"): + return "linux" + else: + return sys.platform + + +def _get_arch(): + # See e.g.: https://stackoverflow.com/questions/45124888 + is_64_bit = sys.maxsize > 2**32 + machine = platform.machine() + + if machine == "armv7l": + # Raspberry pi + detected_arch = "armv7" + elif is_64_bit and machine.startswith(("arm", "aarch64")): + # Includes MacOS M1, arm linux, ... + detected_arch = "aarch64" + elif is_64_bit: + detected_arch = "x86_64" + else: + detected_arch = "i686" + return detected_arch + + +# The Linux static builds (https://johnvansickle.com/ffmpeg/) are build +# for Linux kernels 3.2.0 and up (at the time of writing, ffmpeg v7.0.2). +# This corresponds to Ubuntu 12.04 / Debian 7. I'm not entirely sure' +# what manylinux matches that, but I think manylinux2014 should be safe. + + +# Platform string -> ffmpeg filename +FNAME_PER_PLATFORM = { + "macos-aarch64": "ffmpeg-macos-aarch64-v7.1", + "macos-x86_64": "ffmpeg-macos-x86_64-v7.1", # 10.9+ + "windows-x86_64": "ffmpeg-win-x86_64-v7.1.exe", + "windows-i686": "ffmpeg-win32-v4.2.2.exe", # Windows 7+ + "linux-aarch64": "ffmpeg-linux-aarch64-v7.0.2", # Kernel 3.2.0+ + "linux-x86_64": "ffmpeg-linux-x86_64-v7.0.2", +} + +osxplats = "macosx_10_9_intel.macosx_10_9_x86_64" +osxarmplats = "macosx_11_0_arm64" + +# Wheel tag -> platform string +WHEEL_BUILDS = { + "py3-none-manylinux2014_x86_64": "linux-x86_64", + "py3-none-manylinux2014_aarch64": "linux-aarch64", + "py3-none-" + osxplats: "macos-x86_64", + "py3-none-" + osxarmplats: "macos-aarch64", + "py3-none-win32": "windows-i686", + "py3-none-win_amd64": "windows-x86_64", +} diff --git a/py311/lib/python3.11/site-packages/imageio_ffmpeg/_io.py b/py311/lib/python3.11/site-packages/imageio_ffmpeg/_io.py new file mode 100644 index 0000000000000000000000000000000000000000..faf1ee89ea5bca973e946114a18ef5701a82f42f --- /dev/null +++ b/py311/lib/python3.11/site-packages/imageio_ffmpeg/_io.py @@ -0,0 +1,693 @@ +import pathlib +import subprocess +import sys +import time +from collections import defaultdict +from functools import lru_cache + +from ._parsing import LogCatcher, cvsecs, parse_ffmpeg_header +from ._utils import _popen_kwargs, get_ffmpeg_exe, logger + +ISWIN = sys.platform.startswith("win") + +h264_encoder_preference = defaultdict(lambda: -1) +# The libx264 was the default encoder for a longe time with imageio +h264_encoder_preference["libx264"] = 100 + +# Encoder with the nvidia graphics card dedicated hardware +h264_encoder_preference["h264_nvenc"] = 90 +# Deprecated names for the same encoder +h264_encoder_preference["nvenc_h264"] = 90 +h264_encoder_preference["nvenc"] = 90 + +# vaapi provides hardware encoding with intel integrated graphics chipsets +h264_encoder_preference["h264_vaapi"] = 80 + +# openh264 is cisco's open source encoder +h264_encoder_preference["libopenh264"] = 70 + +h264_encoder_preference["libx264rgb"] = 50 + + +def ffmpeg_test_encoder(encoder): + # Use the null streams to validate if we can encode anything + # https://trac.ffmpeg.org/wiki/Null + cmd = [ + get_ffmpeg_exe(), + "-hide_banner", + "-f", + "lavfi", + "-i", + "nullsrc=s=256x256:d=8", + "-vcodec", + encoder, + "-f", + "null", + "-", + ] + p = subprocess.run( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + return p.returncode == 0 + + +def get_compiled_h264_encoders(): + cmd = [get_ffmpeg_exe(), "-hide_banner", "-encoders"] + p = subprocess.run( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout = p.stdout.decode().replace("\r", "") + # 2022/04/08: hmaarrfk + # I couldn't find a good way to get the list of available encoders from + # the ffmpeg command + # The ffmpeg command return a table that looks like + # Notice the leading space at the very beginning + # On ubuntu with libffmpeg-nvenc-dev we get + # $ ffmpeg -hide_banner -encoders | grep -i h.264 + # + # Encoders: + # V..... = Video + # A..... = Audio + # S..... = Subtitle + # .F.... = Frame-level multithreading + # ..S... = Slice-level multithreading + # ...X.. = Codec is experimental + # ....B. = Supports draw_horiz_band + # .....D = Supports direct rendering method 1 + # ------ + # V..... libx264 libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (codec h264) + # V..... libx264rgb libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 RGB (codec h264) + # V....D h264_nvenc NVIDIA NVENC H.264 encoder (codec h264) + # V..... h264_omx OpenMAX IL H.264 video encoder (codec h264) + # V..... h264_qsv H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (Intel Quick Sync Video acceleration) (codec h264) + # V..... h264_v4l2m2m V4L2 mem2mem H.264 encoder wrapper (codec h264) + # V....D h264_vaapi H.264/AVC (VAAPI) (codec h264) + # V..... nvenc NVIDIA NVENC H.264 encoder (codec h264) + # V..... nvenc_h264 NVIDIA NVENC H.264 encoder (codec h264) + # + # However, just because ffmpeg was compiled with the options enabled + # it doesn't mean that it will be successful + header_footer = stdout.split("------") + footer = header_footer[1].strip("\n") + encoders = [] + for line in footer.split("\n"): + # Strip to remove any leading spaces + line = line.strip() + encoder = line.split(" ")[1] + + if encoder in h264_encoder_preference: + # These encoders are known to support H.264 + # We forcibly include them in case their description changes to + # not include the string "H.264" + encoders.append(encoder) + elif (line[0] == "V") and ("H.264" in line): + encoders.append(encoder) + + encoders.sort(reverse=True, key=lambda x: h264_encoder_preference[x]) + if "h264_nvenc" in encoders: + # Remove deprecated names for the same encoder + for encoder in ["nvenc", "nvenc_h264"]: + if encoder in encoders: + encoders.remove(encoder) + # Return an immutable tuple to avoid users corrupting the lru_cache + return tuple(encoders) + + +@lru_cache() +def get_first_available_h264_encoder(): + compiled_encoders = get_compiled_h264_encoders() + for encoder in compiled_encoders: + if ffmpeg_test_encoder(encoder): + return encoder + else: + raise RuntimeError( + "No valid H.264 encoder was found with the ffmpeg installation" + ) + + +def count_frames_and_secs(path): + """ + Get the number of frames and number of seconds for the given video + file. Note that this operation can be quite slow for large files. + + Disclaimer: I've seen this produce different results from actually reading + the frames with older versions of ffmpeg (2.x). Therefore I cannot say + with 100% certainty that the returned values are always exact. + """ + # https://stackoverflow.com/questions/2017843/fetch-frame-count-with-ffmpeg + + if isinstance(path, pathlib.PurePath): + path = str(path) + if not isinstance(path, str): + raise TypeError("Video path must be a string or pathlib.Path.") + + cmd = [ + get_ffmpeg_exe(), + "-i", + path, + "-map", + "0:v:0", + "-vf", + "null", + "-f", + "null", + "-", + ] + try: + out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, **_popen_kwargs()) + except subprocess.CalledProcessError as err: + out = err.output.decode(errors="ignore") + raise RuntimeError( + "FFMPEG call failed with {}:\n{}".format(err.returncode, out) + ) + + # Note that other than with the subprocess calls below, ffmpeg wont hang here. + # Worst case Python will stop/crash and ffmpeg will continue running until done. + + nframes = nsecs = None + for line in reversed(out.splitlines()): + if line.startswith(b"frame="): + line = line.decode(errors="ignore") + i = line.find("frame=") + if i >= 0: + s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip() + nframes = int(s) + i = line.find("time=") + if i >= 0: + s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip() + nsecs = cvsecs(*s.split(":")) + return nframes, nsecs + + raise RuntimeError("Could not get number of frames") # pragma: no cover + + +def read_frames( + path, + pix_fmt="rgb24", + bpp=None, + input_params=None, + output_params=None, + bits_per_pixel=None, +): + """ + Create a generator to iterate over the frames in a video file. + + It first yields a small metadata dictionary that contains: + + * ffmpeg_version: the ffmpeg version in use (as a string). + * codec: a hint about the codec used to encode the video, e.g. "h264". + * source_size: the width and height of the encoded video frames. + * size: the width and height of the frames that will be produced. + * fps: the frames per second. Can be zero if it could not be detected. + * duration: duration in seconds. Can be zero if it could not be detected. + + After that, it yields frames until the end of the video is reached. Each + frame is a bytes object. + + This function makes no assumptions about the number of frames in + the data. For one because this is hard to predict exactly, but also + because it may depend on the provided output_params. If you want + to know the number of frames in a video file, use count_frames_and_secs(). + It is also possible to estimate the number of frames from the fps and + duration, but note that even if both numbers are present, the resulting + value is not always correct. + + Example: + + gen = read_frames(path) + meta = gen.__next__() + for frame in gen: + print(len(frame)) + + Parameters: + path (str): the filename of the file to read from. + pix_fmt (str): the pixel format of the frames to be read. + The default is "rgb24" (frames are uint8 RGB images). + input_params (list): Additional ffmpeg input command line parameters. + output_params (list): Additional ffmpeg output command line parameters. + bits_per_pixel (int): The number of bits per pixel in the output frames. + This depends on the given pix_fmt. Default is 24 (RGB) + bpp (int): DEPRECATED, USE bits_per_pixel INSTEAD. The number of bytes per pixel in the output frames. + This depends on the given pix_fmt. Some pixel formats like yuv420p have 12 bits per pixel + and cannot be set in bytes as integer. For this reason the bpp argument is deprecated. + """ + + # ----- Input args + + if isinstance(path, pathlib.PurePath): + path = str(path) + if not isinstance(path, str): + raise TypeError("Video path must be a string or pathlib.Path.") + # Note: Dont check whether it exists. The source could be e.g. a camera. + + pix_fmt = pix_fmt or "rgb24" + bpp = bpp or 3 + bits_per_pixel = bits_per_pixel or bpp * 8 + input_params = input_params or [] + output_params = output_params or [] + + assert isinstance(pix_fmt, str), "pix_fmt must be a string" + assert isinstance(bits_per_pixel, int), "bpp and bits_per_pixel must be an int" + assert isinstance(input_params, list), "input_params must be a list" + assert isinstance(output_params, list), "output_params must be a list" + + # ----- Prepare + + pre_output_params = ["-pix_fmt", pix_fmt, "-vcodec", "rawvideo", "-f", "image2pipe"] + + cmd = [get_ffmpeg_exe()] + cmd += input_params + ["-i", path] + cmd += pre_output_params + output_params + ["-"] + + process = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + **_popen_kwargs(prevent_sigint=True) + ) + + log_catcher = LogCatcher(process.stderr) + + # Init policy by which to terminate ffmpeg. May be set to "kill" later. + stop_policy = "timeout" # not wait; ffmpeg should be able to quit quickly + + # Enter try block directly after opening the process. + # We terminate ffmpeg in the finally clause. + # Generators are automatically closed when they get deleted, + # so the finally block is guaranteed to run. + try: + # ----- Load meta data + + # Wait for the log catcher to get the meta information + etime = time.time() + 10.0 + while log_catcher.is_alive() and not log_catcher.header and time.time() < etime: + time.sleep(0.01) + + # Check whether we have the information + if not log_catcher.header: + err2 = log_catcher.get_text(0.2) + fmt = "Could not load meta information\n=== stderr ===\n{}" + raise IOError(fmt.format(err2)) + elif "No such file or directory" in log_catcher.header: + raise IOError("{} not found! Wrong path?".format(path)) + + meta = parse_ffmpeg_header(log_catcher.header) + yield meta + + # ----- Read frames + + width, height = meta["size"] + framesize_bits = width * height * bits_per_pixel + framesize_bytes = framesize_bits / 8 + assert ( + framesize_bytes.is_integer() + ), "incorrect bits_per_pixel, framesize in bytes must be an int" + framesize_bytes = int(framesize_bytes) + framenr = 0 + + while True: + framenr += 1 + try: + bb = bytes() + while len(bb) < framesize_bytes: + extra_bytes = process.stdout.read(framesize_bytes - len(bb)) + if not extra_bytes: + if len(bb) == 0: + return + else: + raise RuntimeError( + "End of file reached before full frame could be read." + ) + bb += extra_bytes + yield bb + except Exception as err: + err1 = str(err) + err2 = log_catcher.get_text(0.4) + fmt = "Could not read frame {}:\n{}\n=== stderr ===\n{}" + raise RuntimeError(fmt.format(framenr, err1, err2)) + + except GeneratorExit: + # Note that GeneratorExit does not inherit from Exception but BaseException + pass + + except Exception: + # Normal exceptions fall through + raise + + except BaseException: + # Detect KeyboardInterrupt / SystemExit: don't wait for ffmpeg to quit + stop_policy = "kill" + raise + + finally: + # Stop the LogCatcher thread, which reads from stderr. + log_catcher.stop_me() + + # Make sure that ffmpeg is terminated. + if process.poll() is None: + # Ask ffmpeg to quit + try: + # I read somewhere that modern ffmpeg on Linux prefers a + # "ctrl-c", but tests so far suggests sending q is more robust. + # > p.send_signal(signal.SIGINT) + # Sending q via communicate works, but can hang (see #17) + # > p.communicate(b"q") + # So let's do similar to what communicate does, but without + # reading stdout (which may block). It looks like only closing + # stdout is enough (tried Windows+Linux), but let's play safe. + # Found that writing to stdin can cause "Invalid argument" on + # Windows # and "Broken Pipe" on Unix. + # p.stdin.write(b"q") # commented out in v0.4.1 + process.stdout.close() + process.stdin.close() + # p.stderr.close() -> not here, the log_catcher closes it + except Exception as err: # pragma: no cover + logger.warning("Error while attempting stop ffmpeg (r): " + str(err)) + + if stop_policy == "timeout": + # Wait until timeout, produce a warning and kill if it still exists + try: + etime = time.time() + 1.5 + while time.time() < etime and process.poll() is None: + time.sleep(0.01) + finally: + if process.poll() is None: # pragma: no cover + logger.warning("We had to kill ffmpeg to stop it.") + process.kill() + + else: # stop_policy == "kill" + # Just kill it + process.kill() + + +def write_frames( + path, + size, + pix_fmt_in="rgb24", + pix_fmt_out="yuv420p", + fps=16, + quality=5, + bitrate=None, + codec=None, + macro_block_size=16, + ffmpeg_log_level="warning", + ffmpeg_timeout=None, + input_params=None, + output_params=None, + audio_path=None, + audio_codec=None, +): + """ + Create a generator to write frames (bytes objects) into a video file. + + The frames are written by using the generator's `send()` method. Frames + can be anything that can be written to a file. Typically these are + bytes objects, but c-contiguous Numpy arrays also work. + + Example: + + gen = write_frames(path, size) + gen.send(None) # seed the generator + for frame in frames: + gen.send(frame) + gen.close() # don't forget this + + Parameters: + path (str): the filename to write to. + size (tuple): the width and height of the frames. + pix_fmt_in (str): the pixel format of incoming frames. + E.g. "gray", "gray8a", "rgb24", or "rgba". Default "rgb24". + pix_fmt_out (str): the pixel format to store frames. Default yuv420p". + fps (float): The frames per second. Default 16. + quality (float): A measure for quality between 0 and 10. Default 5. + Ignored if bitrate is given. + bitrate (str): The bitrate, e.g. "192k". The defaults are pretty good. + codec (str): The codec. Default "libx264" for .mp4 (if available from + the ffmpeg executable) or "msmpeg4" for .wmv. + macro_block_size (int): You probably want to align the size of frames + to this value to avoid image resizing. Default 16. Can be set + to 1 to avoid block alignment, though this is not recommended. + ffmpeg_log_level (str): The ffmpeg logging level. Default "warning". + ffmpeg_timeout (float): Timeout in seconds to wait for ffmpeg process + to finish. Value of 0 or None will wait forever (default). The time that + ffmpeg needs depends on CPU speed, compression, and frame size. + input_params (list): Additional ffmpeg input command line parameters. + output_params (list): Additional ffmpeg output command line parameters. + audio_path (str): A input file path for encoding with an audio stream. + Default None, no audio. + audio_codec (str): The audio codec to use if audio_path is provided. + "copy" will try to use audio_path's audio codec without re-encoding. + Default None, but some formats must have certain codecs specified. + """ + + # ----- Input args + + if isinstance(path, pathlib.PurePath): + path = str(path) + if not isinstance(path, str): + raise TypeError("Video path must be a string or pathlib.Path.") + + # The pix_fmt_out yuv420p is the best for the outpur to work in + # QuickTime and most other players. These players only support + # the YUV planar color space with 4:2:0 chroma subsampling for + # H.264 video. Otherwise, depending on the source, ffmpeg may + # output to a pixel format that may be incompatible with these + # players. See https://trac.ffmpeg.org/wiki/Encode/H.264#Encodingfordumbplayers + + pix_fmt_in = pix_fmt_in or "rgb24" + pix_fmt_out = pix_fmt_out or "yuv420p" + fps = fps or 16 + # bitrate, codec, macro_block_size can all be None or ... + macro_block_size = macro_block_size or 16 + ffmpeg_log_level = ffmpeg_log_level or "warning" + input_params = input_params or [] + output_params = output_params or [] + ffmpeg_timeout = ffmpeg_timeout or 0 + + floatish = float, int + if isinstance(size, (tuple, list)): + assert len(size) == 2, "size must be a 2-tuple" + assert isinstance(size[0], int) and isinstance( + size[1], int + ), "size must be ints" + sizestr = "{:d}x{:d}".format(*size) + # elif isinstance(size, str): + # assert "x" in size, "size as string must have format NxM" + # sizestr = size + else: + assert False, "size must be str or tuple" + assert isinstance(pix_fmt_in, str), "pix_fmt_in must be str" + assert isinstance(pix_fmt_out, str), "pix_fmt_out must be str" + assert isinstance(fps, floatish), "fps must be float" + if quality is not None: + assert isinstance(quality, floatish), "quality must be float" + assert 1 <= quality <= 10, "quality must be between 1 and 10 inclusive" + assert isinstance(macro_block_size, int), "macro_block_size must be int" + assert isinstance(ffmpeg_log_level, str), "ffmpeg_log_level must be str" + assert isinstance(ffmpeg_timeout, floatish), "ffmpeg_timeout must be float" + assert isinstance(input_params, list), "input_params must be a list" + assert isinstance(output_params, list), "output_params must be a list" + + # ----- Prepare + + # Get parameters + if not codec: + if path.lower().endswith(".wmv"): + # This is a safer default codec on windows to get videos that + # will play in powerpoint and other apps. H264 is not always + # available on windows. + codec = "msmpeg4" + else: + codec = get_first_available_h264_encoder() + + audio_params = ["-an"] + if audio_path is not None and not path.lower().endswith(".gif"): + audio_params = ["-i", audio_path] + if audio_codec is not None: + output_params += ["-acodec", audio_codec] + output_params += ["-map", "0:v:0", "-map", "1:a:0"] + + # Get command + cmd = [ + get_ffmpeg_exe(), + "-y", + "-f", + "rawvideo", + "-vcodec", + "rawvideo", + "-s", + sizestr, + ] + cmd += ["-pix_fmt", pix_fmt_in, "-r", "{:.02f}".format(fps)] + input_params + cmd += ["-i", "-"] + audio_params + cmd += ["-vcodec", codec, "-pix_fmt", pix_fmt_out] + + # Add fixed bitrate or variable bitrate compression flags + if bitrate is not None: + cmd += ["-b:v", str(bitrate)] + elif quality is not None: # If None, then we don't add anything + quality = 1 - quality / 10.0 + if codec == "libx264": + # crf ranges 0 to 51, 51 being worst. + quality = int(quality * 51) + cmd += ["-crf", str(quality)] # for h264 + else: # Many codecs accept q:v + # q:v range can vary, 1-31, 31 being worst + # But q:v does not always have the same range. + # May need a way to find range for any codec. + quality = int(quality * 30) + 1 + cmd += ["-qscale:v", str(quality)] # for others + + # Note, for most codecs, the image dimensions must be divisible by + # 16 the default for the macro_block_size is 16. Check if image is + # divisible, if not have ffmpeg upsize to nearest size and warn + # user they should correct input image if this is not desired. + if macro_block_size > 1: + if size[0] % macro_block_size > 0 or size[1] % macro_block_size > 0: + out_w = size[0] + out_h = size[1] + if size[0] % macro_block_size > 0: + out_w += macro_block_size - (size[0] % macro_block_size) + if size[1] % macro_block_size > 0: + out_h += macro_block_size - (size[1] % macro_block_size) + cmd += ["-vf", "scale={}:{}".format(out_w, out_h)] + logger.warning( + "IMAGEIO FFMPEG_WRITER WARNING: input image is not" + " divisible by macro_block_size={}, resizing from {} " + "to {} to ensure video compatibility with most codecs " + "and players. To prevent resizing, make your input " + "image divisible by the macro_block_size or set the " + "macro_block_size to 1 (risking incompatibility).".format( + macro_block_size, size[:2], (out_w, out_h) + ) + ) + + # Rather than redirect stderr to a pipe, just set minimal + # output from ffmpeg by default. That way if there are warnings + # the user will see them. + cmd += ["-v", ffmpeg_log_level] + cmd += output_params + cmd.append(path) + cmd_str = " ".join(cmd) + if any( + [level in ffmpeg_log_level for level in ("info", "verbose", "debug", "trace")] + ): + logger.info("RUNNING FFMPEG COMMAND: " + cmd_str) + + # Launch process + p = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=None, + **_popen_kwargs(prevent_sigint=True) + ) + + # Note that directing stderr to a pipe on windows will cause ffmpeg + # to hang if the buffer is not periodically cleared using + # StreamCatcher or other means. + # Setting bufsize to 0 or a small value does not seem to have much effect + # (tried on Windows and Linux). I suspect that ffmpeg buffers + # multiple frames (before encoding in a batch). + + # Init policy by which to terminate ffmpeg. May be set to "kill" later. + stop_policy = "timeout" + if not ffmpeg_timeout: + stop_policy = "wait" + + # ----- Write frames + + # Enter try block directly after opening the process. + # We terminate ffmpeg in the finally clause. + # Generators are automatically closed when they get deleted, + # so the finally block is guaranteed to run. + try: + # Just keep going until the generator.close() is called (raises GeneratorExit). + # This could also happen when the generator is deleted somehow. + nframes = 0 + while True: + # Get frame + bb = yield + + # framesize = size[0] * size[1] * depth * bpp + # assert isinstance(bb, bytes), "Frame must be send as bytes" + # assert len(bb) == framesize, "Frame must have width*height*depth*bpp bytes" + # Actually, we accept anything that can be written to file. + # This e.g. allows writing numpy arrays without having to make a copy ... + + # Write + try: + p.stdin.write(bb) + except Exception as err: + # Show the command and stderr from pipe + msg = ( + "{0:}\n\nFFMPEG COMMAND:\n{1:}\n\nFFMPEG STDERR " + "OUTPUT:\n".format(err, cmd_str) + ) + raise IOError(msg) + + nframes += 1 + + except GeneratorExit: + # Note that GeneratorExit does not inherit from Exception but BaseException + # Detect premature closing + if nframes == 0: + logger.warning("No frames have been written; the written video is invalid.") + + except Exception: + # Normal exceptions fall through + raise + + except BaseException: + # Detect KeyboardInterrupt / SystemExit: don't wait for ffmpeg to quit + stop_policy = "kill" + raise + + finally: + # Make sure that ffmpeg is terminated. + if p.poll() is None: + # Tell ffmpeg that we're done + try: + p.stdin.close() + except Exception as err: # pragma: no cover + logger.warning("Error while attempting stop ffmpeg (w): " + str(err)) + + if stop_policy == "timeout": + # Wait until timeout, produce a warning and kill if it still exists + try: + etime = time.time() + ffmpeg_timeout + while (time.time() < etime) and p.poll() is None: + time.sleep(0.01) + finally: + if p.poll() is None: # pragma: no cover + logger.warning( + "We had to kill ffmpeg to stop it. " + + "Consider increasing ffmpeg_timeout, " + + "or setting it to zero (no timeout)." + ) + p.kill() + + elif stop_policy == "wait": + # Wait forever, kill if it if we're interrupted + try: + while p.poll() is None: + time.sleep(0.01) + finally: # the above can raise e.g. by ctrl-c or systemexit + if p.poll() is None: # pragma: no cover + p.kill() + + else: # stop_policy == "kill": + # Just kill it + p.kill() + # Just to be safe, wrap in try/except + try: + p.stdout.close() + except Exception: + pass diff --git a/py311/lib/python3.11/site-packages/imageio_ffmpeg/_parsing.py b/py311/lib/python3.11/site-packages/imageio_ffmpeg/_parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..8dd71b82e73dc54c3f1b575f403d632f85e8aa52 --- /dev/null +++ b/py311/lib/python3.11/site-packages/imageio_ffmpeg/_parsing.py @@ -0,0 +1,208 @@ +import re +import threading +import time + +from ._utils import logger + + +class LogCatcher(threading.Thread): + """Thread to keep reading from stderr so that the buffer does not + fill up and stalls the ffmpeg process. On stderr a message is send + on every few frames with some meta information. We only keep the + last ones. + """ + + def __init__(self, file): + self._file = file + self._header = "" + self._lines = [] + self._remainder = b"" + threading.Thread.__init__(self) + self.daemon = True # do not let this thread hold up Python shutdown + self._should_stop = False + self.start() + + def stop_me(self): + self._should_stop = True + + @property + def header(self): + """Get header text. Empty string if the header is not yet parsed.""" + return self._header + + def get_text(self, timeout=0): + """Get the whole text written to stderr so far. To preserve + memory, only the last 50 to 100 frames are kept. + + If a timeout is given, wait for this thread to finish. When + something goes wrong, we stop ffmpeg and want a full report of + stderr, but this thread might need a tiny bit more time. + """ + + # Wait? + if timeout > 0: + etime = time.time() + timeout + while self.is_alive() and time.time() < etime: # pragma: no cover + time.sleep(0.01) + # Return str + lines = b"\n".join(self._lines) + return self._header + "\n" + lines.decode("utf-8", "ignore") + + def run(self): + # Create ref here so it still exists even if Py is shutting down + limit_lines_local = limit_lines + + while not self._should_stop: + time.sleep(0) + # Read one line. Detect when closed, and exit + try: + line = self._file.read(20) + except ValueError: # pragma: no cover + break + if not line: + break + # Process to divide in lines + line = line.replace(b"\r", b"\n").replace(b"\n\n", b"\n") + lines = line.split(b"\n") + lines[0] = self._remainder + lines[0] + self._remainder = lines.pop(-1) + # Process each line + self._lines.extend(lines) + if not self._header: + if get_output_video_line(self._lines): + header = b"\n".join(self._lines) + self._header += header.decode("utf-8", "ignore") + elif self._lines: + self._lines = limit_lines_local(self._lines) + + # Close the file when we're done + # See #61 and #69 + try: + self._file.close() + except Exception: + pass + + +def get_output_video_line(lines): + """Get the line that defines the video stream that ffmpeg outputs, + and which we read. + """ + in_output = False + for line in lines: + sline = line.lstrip() + if sline.startswith(b"Output "): + in_output = True + elif in_output: + if sline.startswith(b"Stream ") and b" Video:" in sline: + return line + + +def limit_lines(lines, N=32): + """When number of lines > 2*N, reduce to N.""" + if len(lines) > 2 * N: + lines = [b"... showing only last few lines ..."] + lines[-N:] + return lines + + +def cvsecs(*args): + """converts a time to second. Either cvsecs(min, secs) or + cvsecs(hours, mins, secs). + """ + if len(args) == 1: + return float(args[0]) + elif len(args) == 2: + return 60 * float(args[0]) + float(args[1]) + elif len(args) == 3: + return 3600 * float(args[0]) + 60 * float(args[1]) + float(args[2]) + + +def parse_ffmpeg_header(text): + lines = text.splitlines() + meta = {} + + # meta["header"] = text # Can enable this for debugging + + # Get version + ver = lines[0].split("version", 1)[-1].split("Copyright")[0] + meta["ffmpeg_version"] = ver.strip() + " " + lines[1].strip() + + # get the output line that speaks about video + videolines = [ + l for l in lines if l.lstrip().startswith("Stream ") and " Video: " in l + ] + + # Codec and pix_fmt hint + line = videolines[0] + meta["codec"] = line.split("Video: ", 1)[-1].lstrip().split(" ", 1)[0].strip() + meta["pix_fmt"] = re.split( + # use a negative lookahead regexp to ignore commas that are contained + # within a parenthesis + # this helps consider a pix_fmt of the kind + # yuv420p(tv, progressive) + # as what it is, instead of erroneously reporting as + # yuv420p(tv + r",\s*(?![^()]*\))", + line.split("Video: ", 1)[-1], + )[1].strip() + + # get the output line that speaks about audio + audiolines = [ + l for l in lines if l.lstrip().startswith("Stream ") and " Audio: " in l + ] + + if len(audiolines) > 0: + audio_line = audiolines[0] + meta["audio_codec"] = ( + audio_line.split("Audio: ", 1)[-1].lstrip().split(" ", 1)[0].strip() + ) + + # get the frame rate. + # matches can be empty, see #171, assume nframes = inf + # the regexp omits values of "1k tbr" which seems a specific edge-case #262 + # it seems that tbr is generally to be preferred #262 + fps = 0 + for line in [videolines[0]]: + matches = re.findall(r" ([0-9]+\.?[0-9]*) (fps)", line) + if matches: + fps = float(matches[0][0].strip()) + meta["fps"] = fps + + # get the size of the original stream, of the form 460x320 (w x h) + line = videolines[0] + match = re.search(" [0-9]*x[0-9]*(,| )", line) + parts = line[match.start() : match.end() - 1].split("x") + meta["source_size"] = tuple(map(int, parts)) + + # get the size of what we receive, of the form 460x320 (w x h) + line = videolines[-1] # Pipe output + match = re.search(" [0-9]*x[0-9]*(,| )", line) + parts = line[match.start() : match.end() - 1].split("x") + meta["size"] = tuple(map(int, parts)) + + # Check the two sizes + if meta["source_size"] != meta["size"]: + logger.warning( + "The frame size for reading {} is " + "different from the source frame size {}.".format( + meta["size"], meta["source_size"] + ) + ) + + # get the rotate metadata + reo_rotate = re.compile(r"rotate\s+:\s([0-9]+)") + match = reo_rotate.search(text) + rotate = 0 + if match is not None: + rotate = match.groups()[0] + meta["rotate"] = int(rotate) + + # get duration (in seconds) + line = [l for l in lines if "Duration: " in l][0] + match = re.search(" [0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]", line) + duration = 0 + if match is not None: + hms = line[match.start() + 1 : match.end()].split(":") + duration = cvsecs(*hms) + meta["duration"] = duration + + return meta diff --git a/py311/lib/python3.11/site-packages/imageio_ffmpeg/_utils.py b/py311/lib/python3.11/site-packages/imageio_ffmpeg/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..be6f916ab5739aec1e2ebf25a82a1fe993011d85 --- /dev/null +++ b/py311/lib/python3.11/site-packages/imageio_ffmpeg/_utils.py @@ -0,0 +1,127 @@ +import logging +import os +import subprocess +import sys +from functools import lru_cache +import importlib.resources + +from ._definitions import FNAME_PER_PLATFORM, get_platform + +logger = logging.getLogger("imageio_ffmpeg") + + +def get_ffmpeg_exe(): + """ + Get the ffmpeg executable file. This can be the binary defined by + the IMAGEIO_FFMPEG_EXE environment variable, the binary distributed + with imageio-ffmpeg, an ffmpeg binary installed with conda, or the + system ffmpeg (in that order). A RuntimeError is raised if no valid + ffmpeg could be found. + """ + + # 1. Try environment variable. - Dont test it: the user is explicit here! + exe = os.getenv("IMAGEIO_FFMPEG_EXE", None) + if exe: + return exe + + # Auto-detect + exe = _get_ffmpeg_exe() + if exe: + return exe + + # Nothing was found + raise RuntimeError( + "No ffmpeg exe could be found. Install ffmpeg on your system, " + "or set the IMAGEIO_FFMPEG_EXE environment variable." + ) + + +@lru_cache() +def _get_ffmpeg_exe(): + plat = get_platform() + + # 2. Try from here + exe = os.path.join(_get_bin_dir(), FNAME_PER_PLATFORM.get(plat, "")) + if exe and os.path.isfile(exe) and _is_valid_exe(exe): + return exe + + # 3. Try binary from conda package + # (installed e.g. via `conda install ffmpeg -c conda-forge`) + if plat.startswith("win"): + exe = os.path.join(sys.prefix, "Library", "bin", "ffmpeg.exe") + else: + exe = os.path.join(sys.prefix, "bin", "ffmpeg") + if exe and os.path.isfile(exe) and _is_valid_exe(exe): + return exe + + # 4. Try system ffmpeg command + exe = "ffmpeg" + if _is_valid_exe(exe): + return exe + + return None + + +def _get_bin_dir(): + if sys.version_info < (3, 9): + context = importlib.resources.path("imageio_ffmpeg.binaries", "__init__.py") + else: + ref = importlib.resources.files("imageio_ffmpeg.binaries") / "__init__.py" + context = importlib.resources.as_file(ref) + with context as path: + pass + # Return the dir. We assume that the data files are on a normal dir on the fs. + return str(path.parent) + + +def _popen_kwargs(prevent_sigint=False): + startupinfo = None + preexec_fn = None + creationflags = 0 + if sys.platform.startswith("win"): + # Stops executable from flashing on Windows (see #22) + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + if prevent_sigint: + # Prevent propagation of sigint (see #4) + # https://stackoverflow.com/questions/5045771 + if sys.platform.startswith("win"): + creationflags = 0x00000200 + else: + preexec_fn = os.setpgrp # the _pre_exec does not seem to work + + falsy = ("", "0", "false", "no") + if os.getenv("IMAGEIO_FFMPEG_NO_PREVENT_SIGINT", "").lower() not in falsy: + # Unset preexec_fn to work around a strange hang on fork() (see #58) + preexec_fn = None + + return { + "startupinfo": startupinfo, + "creationflags": creationflags, + "preexec_fn": preexec_fn, + } + + +def _is_valid_exe(exe): + cmd = [exe, "-version"] + try: + with open(os.devnull, "w") as null: + subprocess.check_call( + cmd, stdout=null, stderr=subprocess.STDOUT, **_popen_kwargs() + ) + return True + except (OSError, ValueError, subprocess.CalledProcessError): + return False + + +def get_ffmpeg_version(): + """ + Get the version of the used ffmpeg executable (as a string). + """ + exe = get_ffmpeg_exe() + line = subprocess.check_output([exe, "-version"], **_popen_kwargs()).split( + b"\n", 1 + )[0] + line = line.decode(errors="ignore").strip() + version = line.split("version", 1)[-1].lstrip().split(" ", 1)[0].strip() + return version diff --git a/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/METADATA b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..070cab5406c516a471462e445a2af0f0d99470da --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/METADATA @@ -0,0 +1,172 @@ +Metadata-Version: 2.4 +Name: joblib +Version: 1.5.3 +Summary: Lightweight pipelining with Python functions +Author-email: Gael Varoquaux +License-Expression: BSD-3-Clause +Project-URL: Homepage, https://joblib.readthedocs.io +Project-URL: Source, https://github.com/joblib/joblib +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Education +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Utilities +Classifier: Topic :: Software Development :: Libraries +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +Dynamic: license-file + +|PyPi| |CIStatus| |ReadTheDocs| |Codecov| + +.. |PyPi| image:: https://badge.fury.io/py/joblib.svg + :target: https://badge.fury.io/py/joblib + :alt: Joblib version + +.. |CIStatus| image:: https://github.com/joblib/joblib/actions/workflows/test.yml/badge.svg + :target: https://github.com/joblib/joblib/actions/workflows/test.yml?query=branch%3Amain + :alt: CI status + +.. |ReadTheDocs| image:: https://readthedocs.org/projects/joblib/badge/?version=latest + :target: https://joblib.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +.. |Codecov| image:: https://codecov.io/gh/joblib/joblib/branch/main/graph/badge.svg + :target: https://codecov.io/gh/joblib/joblib + :alt: Codecov coverage + + +The homepage of joblib with user documentation is located on: + +https://joblib.readthedocs.io + +Getting the latest code +======================= + +To get the latest code using git, simply type:: + + git clone https://github.com/joblib/joblib.git + +If you don't have git installed, you can download a zip +of the latest code: https://github.com/joblib/joblib/archive/refs/heads/main.zip + +Installing +========== + +You can use `pip` to install joblib from any directory:: + + pip install joblib + +or install it in editable mode from the source directory:: + + pip install -e . + +Dependencies +============ + +- Joblib has no mandatory dependencies besides Python (supported versions are + 3.9+). +- Joblib has an optional dependency on Numpy (at least version 1.6.1) for array + manipulation. +- Joblib includes its own vendored copy of + `loky `_ for process management. +- Joblib can efficiently dump and load numpy arrays but does not require numpy + to be installed. +- Joblib has an optional dependency on + `python-lz4 `_ as a faster alternative to + zlib and gzip for compressed serialization. +- Joblib has an optional dependency on psutil to mitigate memory leaks in + parallel worker processes. +- Some examples require external dependencies such as pandas. See the + instructions in the `Building the docs`_ section for details. + +Workflow to contribute +====================== + +To contribute to joblib, first create an account on `github +`_. Once this is done, fork the `joblib repository +`_ to have your own repository, +clone it using ``git clone``. Make your changes in a branch of your clone, push +them to your github account, test them locally, and when you are happy with +them, send a pull request to the main repository. + +You can use `pre-commit `_ to run code style checks +before each commit:: + + pip install pre-commit + pre-commit install + +pre-commit checks can be disabled for a single commit with:: + + git commit -n + +Running the test suite +====================== + +To run the test suite, you need the pytest (version >= 3) and coverage modules. +Run the test suite using:: + + pytest joblib + +from the root of the project. + +Building the docs +================= + +To build the docs you need to have sphinx (>=1.4) and some dependencies +installed:: + + pip install -U -r .readthedocs-requirements.txt + +The docs can then be built with the following command:: + + make doc + +The html docs are located in the ``doc/_build/html`` directory. + + +Making a source tarball +======================= + +To create a source tarball, eg for packaging or distributing, run the +following command:: + + pip install build + python -m build --sdist + +The tarball will be created in the `dist` directory. This command will create +the resulting tarball that can be installed with no extra dependencies than the +Python standard library. + +Making a release and uploading it to PyPI +========================================= + +This command is only run by project manager, to make a release, and +upload in to PyPI:: + + pip install build + python -m build --sdist --wheel + twine upload dist/* + + +Note that the documentation should automatically get updated at each git +push. If that is not the case, try building th doc locally and resolve +any doc build error (in particular when running the examples). + +Updating the changelog +====================== + +Changes are listed in the CHANGES.rst file. They must be manually updated +but, the following git command may be used to generate the lines:: + + git log --abbrev-commit --date=short --no-merges --sparse diff --git a/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/RECORD b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..14c54f6f7b2893791e8b5fb0dbf2c8c0470ec8ea --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/RECORD @@ -0,0 +1,145 @@ +joblib-1.5.3.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +joblib-1.5.3.dist-info/METADATA,sha256=zunGCJauTnqWWoQPveUNER0vgBj2EgHR7S6fw91Ig-Y,5542 +joblib-1.5.3.dist-info/RECORD,, +joblib-1.5.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +joblib-1.5.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +joblib-1.5.3.dist-info/licenses/LICENSE.txt,sha256=QmEpEcGHLF5LQ_auDo7llGfNNQMyJBz3LOkGQCZPrmo,1527 +joblib-1.5.3.dist-info/top_level.txt,sha256=P0LsoZ45gBL7ckL4lqQt7tdbrHD4xlVYhffmhHeeT_U,7 +joblib/__init__.py,sha256=W-0Q8UNWm7rSJmZA9eq-ihckKM6JRKvOm1l8SN44piQ,5337 +joblib/_cloudpickle_wrapper.py,sha256=HSFxIio3jiGnwVCstAa6obbxs4-5aRAIMDDUAA-cDPc,416 +joblib/_dask.py,sha256=xUYA_2VVc0LvPavSiFy8M7TZc6KF0lIxcQhng5kPaXU,13217 +joblib/_memmapping_reducer.py,sha256=AZ6dqA6fXlm4-ehBCf9m1nq43jUPKman4_2whrOButc,28553 +joblib/_multiprocessing_helpers.py,sha256=f8-Vf_8ildmdg991eLz8xk4DJJFTS_bcrhj6CgQ4lxU,1878 +joblib/_parallel_backends.py,sha256=fgy_FgZiKeNvTWr4wKbSX4kUNx2YD6m7p5O1J96xhb4,28766 +joblib/_store_backends.py,sha256=xJWN4xvM1LU8kZXk3LEJwIjeEoj6zOmESsPHWmlUkPw,17918 +joblib/_utils.py,sha256=bE4abMGL08HTLtVeisrww8QjvCllMLHVT2qbmYtAGaA,3248 +joblib/backports.py,sha256=mITpG-yuEADimg89_LCdUY9QH9a5xQHsRNJnd7BmAMo,5450 +joblib/compressor.py,sha256=GDDVJmeOBqftc6tMkDupryojHhk_jIV8WrNoMiTxTdQ,19281 +joblib/disk.py,sha256=1J5hhMsCP5LDW65luTtArUxsMAJRrPB6wxSWf6GeBns,4332 +joblib/executor.py,sha256=fbVmE_KKywjJcIKmHO9k8M3VkaMqZXEP4YXBRz_p6xU,5229 +joblib/externals/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +joblib/externals/cloudpickle/__init__.py,sha256=ErKyvqqAErOkbCowrizhGpN9DN-TaD22QtqI9CQ9sDw,308 +joblib/externals/cloudpickle/cloudpickle.py,sha256=KcLA_uAxp2KuMJb9vaTVve7FLFUQu7mYkD3dCscxeRc,58874 +joblib/externals/cloudpickle/cloudpickle_fast.py,sha256=AI5ZKf2AbLNxD8lXyLDpKZyzeZ2ofFtdK1ZWFq_ec1c,323 +joblib/externals/loky/__init__.py,sha256=8LzBTFpYfRFrjD1loIQpRF9QQ_8wwEkssJI6hYcGbfE,1105 +joblib/externals/loky/_base.py,sha256=LsQnEoKWKGhdeqGhMc68Aqwz4MrTnEs20KAYbFiUHzo,1057 +joblib/externals/loky/backend/__init__.py,sha256=Ix9KThV1CYk7-M5OQnJ_A_JrrrWJ-Jowa-HMMeGbp18,312 +joblib/externals/loky/backend/_posix_reduction.py,sha256=xgCSrIaLI0k_MI0XNOBSp5e1ox1WN9idgrWbkWpMUr4,1776 +joblib/externals/loky/backend/_win_reduction.py,sha256=WmNB0NXtyJ_o_WzfPUEGh5dPhXIeI6FkEnFNXUxO2ws,683 +joblib/externals/loky/backend/context.py,sha256=RPdZvzkEk7iA0rtdAILSHNzl6wsHpm6XD6IL30owAPE,14284 +joblib/externals/loky/backend/fork_exec.py,sha256=4DZ1iLBB-21rlg3Z4Kh9DTVZj35JPaWFE5rzWZaSDxk,2319 +joblib/externals/loky/backend/popen_loky_posix.py,sha256=3G-2_-ovZtjWcHI-xSyW5zQjAZ-_Z9IGjzY1RrZH4nc,5541 +joblib/externals/loky/backend/popen_loky_win32.py,sha256=bYkhRA0w8qUcYFwoezeGwcnlCocEdheWXc6SZ-_rVxo,5325 +joblib/externals/loky/backend/process.py,sha256=4-Y94EoIrg4btsjTNxUBHAHhR96Nrugn_7_PGL6aU50,2018 +joblib/externals/loky/backend/queues.py,sha256=eETFvbPHwKfdoYyOgNQCyKq_Zlm-lzH3fwwpUIh-_4U,7322 +joblib/externals/loky/backend/reduction.py,sha256=861drQAefXTJjfFWAEWmYAS315d8lAyqWx0RgyxFw_0,6926 +joblib/externals/loky/backend/resource_tracker.py,sha256=Jzbmb8otLR7etqhefKuZxAs1VvT1jV8d5Zev8vUcV6s,15403 +joblib/externals/loky/backend/spawn.py,sha256=t4PzEJ3tjwoF9t8qnQUF9R7Q-LmBpDBIcHURWNznz8M,8626 +joblib/externals/loky/backend/synchronize.py,sha256=nlDwBoLZB93m_l55qfZM_Ql-4L84PSYimoQqt5TzpDk,11768 +joblib/externals/loky/backend/utils.py,sha256=RVsxqyET4TJdbjc9uUHJmfhlQ2v4Uq-fiT_5b5rfC0s,5757 +joblib/externals/loky/cloudpickle_wrapper.py,sha256=jUnfhXI3qMXTlCeTUzpABQlv0VOLMJL1V7fpRlq2LgU,3609 +joblib/externals/loky/initializers.py,sha256=dtKtRsJUmVwiJu0yZ-Ih0m8PvW_MxmouG7mShEcsStc,2567 +joblib/externals/loky/process_executor.py,sha256=QPSKet0OCAWr6g_2fHwPt4yjQaAJsjfeJYFPiKhS9RE,52348 +joblib/externals/loky/reusable_executor.py,sha256=d9ksrTnJS8549Oq50iG08u5pEhuMbhQ3oSYUSq0twNQ,10863 +joblib/func_inspect.py,sha256=bhm_GpBe3H_Dmw5ripzP5BalA6wbq7ZFI3SEuAQbfek,14017 +joblib/hashing.py,sha256=38MM0zRl0Ebk78Ij6cMdrQ8ibYZP0pCJxu3L4Yrw1sc,10694 +joblib/logger.py,sha256=HK06qwNWJYInYIIXFYINAKCxjYxi0hoX45ckNKkogHQ,5342 +joblib/memory.py,sha256=va7zWG9s_X6eE-Cm1junrH-QwKTnguin5cEJIhUXo98,45404 +joblib/numpy_pickle.py,sha256=N_wQMf6_vgI71nRYLne0dc2kO6dfh0lkTaOZn8Tq5Hc,28791 +joblib/numpy_pickle_compat.py,sha256=JOlSfMT1uDIztOyQ3dzYgp5fGVnzPVWBCqXjdIZsjLQ,8451 +joblib/numpy_pickle_utils.py,sha256=j3GlI25QFvo-DTPn7uRptu-NtW16ztHM0DuglyQyEDI,9497 +joblib/parallel.py,sha256=SkJYk-cTHC8oMvZU79SDXV61IZ10YIHbBYhrHB47yM8,86989 +joblib/pool.py,sha256=JTc00PEAyPayo8mHdktmburp5OBsnNxwSQI4zzvtKYs,14134 +joblib/test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +joblib/test/common.py,sha256=vpjpcJgMbmr8H3skc3qsr_KC-u-ZnhVFRk2vAxmJqvA,2102 +joblib/test/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +joblib/test/data/create_numpy_pickle.py,sha256=vZE7JNye9o0gYaxrn1555av6Igee0KeXacAWKNRhsu8,3334 +joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np16.gz,sha256=QYRH6Q2DSGVorjCSqWCxjTWCMOJKyew4Nl2qmfQVvQ8,769 +joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np17.gz,sha256=ofTozM_KlPJa50TR8FCwc09mMmO6OO0GQhgUBLNIsXs,757 +joblib/test/data/joblib_0.10.0_compressed_pickle_py33_np18.gz,sha256=2eIVeA-XjOaT5IEQ6tI2UuHG3hwhiRciMmkBmPcIh4g,792 +joblib/test/data/joblib_0.10.0_compressed_pickle_py34_np19.gz,sha256=Gr2z_1tVWDH1H3_wCVHmakknf8KqeHKT8Yz4d1vmUCM,794 +joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz,sha256=pWw_xuDbOkECqu1KGf1OFU7s2VbzC2v5F5iXhE7TwB4,790 +joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl,sha256=icRQjj374B-AHk5znxre0T9oWUHokoHIBQ8MqKo8l-U,986 +joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.bz2,sha256=oYQVIyMiUxyRgWSuBBSOvCWKzToA-kUpcoQWdV4UoV4,997 +joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.gzip,sha256=Jpv3iGcDgKTv-O4nZsUreIbUK7qnt2cugZ-VMgNeEDQ,798 +joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.lzma,sha256=c0wu0x8pPv4BcStj7pE61rZpf68FLG_pNzQZ4e82zH8,660 +joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz,sha256=77FG1FDG0GHQav-1bxc4Tn9ky6ubUW_MbE0_iGmz5wc,712 +joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl,sha256=4GTC7s_cWNVShERn2nvVbspZYJgyK_0man4TEqvdVzU,1068 +joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.bz2,sha256=6G1vbs_iYmz2kYJ6w4qB1k7D67UnxUMus0S4SWeBtFo,1000 +joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.gzip,sha256=tlRUWeJS1BXmcwtLNSNK9L0hDHekFl07CqWxTShinmY,831 +joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.lzma,sha256=CorPwnfv3rR5hjNtJI01-sEBMOnkSxNlRVaWTszMopA,694 +joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.xz,sha256=Dppj3MffOKsKETeptEtDaxPOv6MA6xnbpK5LzlDQ-oE,752 +joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl,sha256=HL5Fb1uR9aPLjjhoOPJ2wwM1Qyo1FCZoYYd2HVw0Fos,1068 +joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.bz2,sha256=Pyr2fqZnwfUxXdyrBr-kRwBYY8HA_Yi7fgSguKy5pUs,1021 +joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.gzip,sha256=os8NJjQI9FhnlZM-Ay9dX_Uo35gZnoJCgQSIVvcBPfE,831 +joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.lzma,sha256=Q_0y43qU7_GqAabJ8y3PWVhOisurnCAq3GzuCu04V58,697 +joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.xz,sha256=BNfmiQfpeLVpdfkwlJK4hJ5Cpgl0vreVyekyc5d_PNM,752 +joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl,sha256=l7nvLolhBDIdPFznOz3lBHiMOPBPCMi1bXop1tFSCpY,1068 +joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.bz2,sha256=pqGpuIS-ZU4uP8mkglHs8MaSDiVcPy7l3XHYJSppRgY,1005 +joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.gzip,sha256=YRFXE6LEb6qK72yPqnXdqQVY8Ts8xKUS9PWQKhLxWvk,833 +joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.lzma,sha256=Bf7gCUeTuTjCkbcIdyZYz69irblX4SAVQEzxCnMQhNU,701 +joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz,sha256=As8w2LGWwwNmKy3QNdKljK63Yq46gjRf_RJ0lh5_WqA,752 +joblib/test/data/joblib_0.11.0_compressed_pickle_py36_np111.gz,sha256=1WrnXDqDoNEPYOZX1Q5Wr2463b8vVV6fw4Wm5S4bMt4,800 +joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl,sha256=XmsOFxeC1f1aYdGETclG6yfF9rLoB11DayOAhDMULrw,1068 +joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.bz2,sha256=vI2yWb50LKL_NgZyd_XkoD5teIg93uI42mWnx9ee-AQ,991 +joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.gzip,sha256=1WrnXDqDoNEPYOZX1Q5Wr2463b8vVV6fw4Wm5S4bMt4,800 +joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.lzma,sha256=IWA0JlZG2ur53HgTUDl1m7q79dcVq6b0VOq33gKoJU0,715 +joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.xz,sha256=3Xh_NbMZdBjYx7ynfJ3Fyke28izSRSSzzNB0z5D4k9Y,752 +joblib/test/data/joblib_0.8.4_compressed_pickle_py27_np17.gz,sha256=Sp-ZT7i6pj5on2gbptszu7RarzJpOmHJ67UKOmCPQMg,659 +joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np16.gz,sha256=NLtDrvo2XIH0KvUUAvhOqMeoXEjGW0IuTk_osu5XiDw,658 +joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np17.gz,sha256=NLtDrvo2XIH0KvUUAvhOqMeoXEjGW0IuTk_osu5XiDw,658 +joblib/test/data/joblib_0.9.2_compressed_pickle_py34_np19.gz,sha256=nzO9iiGkG3KbBdrF3usOho8higkrDj_lmICUzxZyF_Y,673 +joblib/test/data/joblib_0.9.2_compressed_pickle_py35_np19.gz,sha256=nzO9iiGkG3KbBdrF3usOho8higkrDj_lmICUzxZyF_Y,673 +joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl,sha256=naijdk2xIeKdIa3mfJw0JlmOdtiN6uRM1yOJg6-M73M,670 +joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_01.npy,sha256=DvvX2c5-7DpuCg20HnleA5bMo9awN9rWxhtGSEPSiAk,120 +joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_02.npy,sha256=HBzzbLeB-8whuVO7CgtF3wktoOrg52WILlljzNcBBbE,120 +joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_03.npy,sha256=oMRa4qKJhBy-uiRDt-uqOzHAqencxzKUrKVynaAJJAU,236 +joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy,sha256=PsviRClLqT4IR5sWwbmpQR41af9mDtBFncodJBOB3wU,104 +joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl,sha256=LynX8dLOygfxDfFywOgm7wgWOhSxLG7z-oDsU6X83Dw,670 +joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_01.npy,sha256=DvvX2c5-7DpuCg20HnleA5bMo9awN9rWxhtGSEPSiAk,120 +joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_02.npy,sha256=HBzzbLeB-8whuVO7CgtF3wktoOrg52WILlljzNcBBbE,120 +joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_03.npy,sha256=oMRa4qKJhBy-uiRDt-uqOzHAqencxzKUrKVynaAJJAU,236 +joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_04.npy,sha256=PsviRClLqT4IR5sWwbmpQR41af9mDtBFncodJBOB3wU,104 +joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl,sha256=w9TLxpDTzp5TI6cU6lRvMsAasXEChcQgGE9s30sm_CU,691 +joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_01.npy,sha256=DvvX2c5-7DpuCg20HnleA5bMo9awN9rWxhtGSEPSiAk,120 +joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_02.npy,sha256=HBzzbLeB-8whuVO7CgtF3wktoOrg52WILlljzNcBBbE,120 +joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_03.npy,sha256=jt6aZKUrJdfbMJUJVsl47As5MrfRSs1avGMhbmS6vec,307 +joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_04.npy,sha256=PsviRClLqT4IR5sWwbmpQR41af9mDtBFncodJBOB3wU,104 +joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl,sha256=ilOBAOaulLFvKrD32S1NfnpiK-LfzA9rC3O2I7xROuI,691 +joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_01.npy,sha256=DvvX2c5-7DpuCg20HnleA5bMo9awN9rWxhtGSEPSiAk,120 +joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_02.npy,sha256=HBzzbLeB-8whuVO7CgtF3wktoOrg52WILlljzNcBBbE,120 +joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_03.npy,sha256=jt6aZKUrJdfbMJUJVsl47As5MrfRSs1avGMhbmS6vec,307 +joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy,sha256=PsviRClLqT4IR5sWwbmpQR41af9mDtBFncodJBOB3wU,104 +joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl,sha256=WfDVIqKcMzzh1gSAshIfzBoIpdLdZQuG79yYf5kfpOo,691 +joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_01.npy,sha256=DvvX2c5-7DpuCg20HnleA5bMo9awN9rWxhtGSEPSiAk,120 +joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy,sha256=HBzzbLeB-8whuVO7CgtF3wktoOrg52WILlljzNcBBbE,120 +joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_03.npy,sha256=jt6aZKUrJdfbMJUJVsl47As5MrfRSs1avGMhbmS6vec,307 +joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_04.npy,sha256=PsviRClLqT4IR5sWwbmpQR41af9mDtBFncodJBOB3wU,104 +joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz,sha256=8jYfWJsx0oY2J-3LlmEigK5cClnJSW2J2rfeSTZw-Ts,802 +joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_01.npy.z,sha256=YT9VvT3sEl2uWlOyvH2CkyE9Sok4od9O3kWtgeuUUqE,43 +joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_02.npy.z,sha256=txA5RDI0PRuiU_UNKY8pGp-zQgQQ9vaVvMi60hOPaVs,43 +joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_03.npy.z,sha256=d3AwICvU2MpSNjh2aPIsdJeGZLlDjANAF1Soa6uM0Po,37 +joblib/test/test_backports.py,sha256=ONt0JUPV1etZCO9DTLur1h84XmgHZYK_k73qmp4kRgg,1175 +joblib/test/test_cloudpickle_wrapper.py,sha256=9jx3hqNVO9GXdVHCxr9mN-GiLR0XK-O5d6YPaaG8Y14,729 +joblib/test/test_config.py,sha256=1Z102AO7Gb8Z8mHYahnZy2fxBA-9_vY0ZtWyNNk1cf4,5255 +joblib/test/test_dask.py,sha256=X2MBEYvz5WQwzGZRN04JNgk_75iIHF96yA1F1t1sK_Y,22932 +joblib/test/test_disk.py,sha256=0EaWGENlosrqwrSZvquPQw3jhqay1KD1NRlQ6YLHOOM,2223 +joblib/test/test_func_inspect.py,sha256=RsORR-j48SfXrNBQbb5i-SdmfU7zk2Mr0IKvcu8m1tw,9314 +joblib/test/test_func_inspect_special_encoding.py,sha256=5xILDjSO-xtjQAMLvMeVD-L7IG4ZURb2gvBiShaDE78,145 +joblib/test/test_hashing.py,sha256=wZeTJMX8C8ua3fJsKAI7MKtperUfZf1fLt0ZaOjvSKw,15820 +joblib/test/test_init.py,sha256=Y6y6Hcqa_cqwQ8S8ozUQ180y_RfkRajfZ_fDp2UXgbw,423 +joblib/test/test_logger.py,sha256=FA9ohTNcqIFViQK60_rwZ5PEGL2zoYN5qBOrDwFqVzI,941 +joblib/test/test_memmapping.py,sha256=z0aanbEs3yCDKShyW3IYlLkTARwdvqVTb4beTPRFmjk,43731 +joblib/test/test_memory.py,sha256=yHoTaoYsWEaDo0CQLNHrBwY5cCpYtIFc3Hi5PArB0iM,51011 +joblib/test/test_memory_async.py,sha256=tUoCI9dngR2AuJjAAKXElJIiz2Qm4AJGdXKn9c8lWaM,5245 +joblib/test/test_missing_multiprocessing.py,sha256=FVoS91krFZogIoDFScyZuJPpaeiq6O-aLAxug0qCQyY,1171 +joblib/test/test_module.py,sha256=IABzz5JmdeY_Adk_vZ0776JN94Ra7tWxDA7DPDNdJKI,1942 +joblib/test/test_numpy_pickle.py,sha256=QExCnBSG-EXdVKnoDkJjNFk6kbX0FDeGeR50wtLHiso,42130 +joblib/test/test_numpy_pickle_compat.py,sha256=paMz1G3Fr9SHdjFmKcG1ec6B5h_S-XE6WRtfHmX9r50,609 +joblib/test/test_numpy_pickle_utils.py,sha256=iB2Ve1TYYUEN3DQiNB5qUxk_QxeIXl7Jpgv4TwkFWTY,382 +joblib/test/test_parallel.py,sha256=_13kli8GYyclwh2QsxysXrRJa44o3gb3FEpSY61ag94,78095 +joblib/test/test_store_backends.py,sha256=DyK1f7PTSPErzhk27gaRoMe2UQrstIz6fnvZh4hKIf0,3057 +joblib/test/test_testing.py,sha256=jL-Ph5pzUJSXOgY2rqbjMRp2y3i3CCWmEi-Lbw4Wzr8,2520 +joblib/test/test_utils.py,sha256=qSD3IJnIGyLiJlUSbEQ0ILVqwG18LW6u50BqTJ3Cn3o,1015 +joblib/test/testutils.py,sha256=A1bm-A5Ydis2iZJVI2-r3aFKUufWR42NZ8Yttrp8mzg,252 +joblib/testing.py,sha256=lK8HOBvrpXcTYUCSvpE-M2ede_dTVJzcmyw-9BrBsOc,3029 diff --git a/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/WHEEL b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e7fa31b6f3f78deb1022c1f7927f07d4d16da822 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca4af27e2b6e9917d9600060588a18cc9e3cc78c --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib-1.5.3.dist-info/top_level.txt @@ -0,0 +1 @@ +joblib diff --git a/py311/lib/python3.11/site-packages/joblib/__init__.py b/py311/lib/python3.11/site-packages/joblib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7dac90813941921a848659b3810b34b364ac3596 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/__init__.py @@ -0,0 +1,163 @@ +"""Joblib is a set of tools to provide **lightweight pipelining in +Python**. In particular: + +1. transparent disk-caching of functions and lazy re-evaluation + (memoize pattern) + +2. easy simple parallel computing + +Joblib is optimized to be **fast** and **robust** on large +data in particular and has specific optimizations for `numpy` arrays. It is +**BSD-licensed**. + + + ==================== =============================================== + **Documentation:** https://joblib.readthedocs.io + + **Download:** https://pypi.python.org/pypi/joblib#downloads + + **Source code:** https://github.com/joblib/joblib + + **Report issues:** https://github.com/joblib/joblib/issues + ==================== =============================================== + + +Vision +-------- + +The vision is to provide tools to easily achieve better performance and +reproducibility when working with long running jobs. + + * **Avoid computing the same thing twice**: code is often rerun again and + again, for instance when prototyping computational-heavy jobs (as in + scientific development), but hand-crafted solutions to alleviate this + issue are error-prone and often lead to unreproducible results. + + * **Persist to disk transparently**: efficiently persisting + arbitrary objects containing large data is hard. Using + joblib's caching mechanism avoids hand-written persistence and + implicitly links the file on disk to the execution context of + the original Python object. As a result, joblib's persistence is + good for resuming an application status or computational job, eg + after a crash. + +Joblib addresses these problems while **leaving your code and your flow +control as unmodified as possible** (no framework, no new paradigms). + +Main features +------------------ + +1) **Transparent and fast disk-caching of output value:** a memoize or + make-like functionality for Python functions that works well for + arbitrary Python objects, including very large numpy arrays. Separate + persistence and flow-execution logic from domain logic or algorithmic + code by writing the operations as a set of steps with well-defined + inputs and outputs: Python functions. Joblib can save their + computation to disk and rerun it only if necessary:: + + >>> from joblib import Memory + >>> location = 'your_cache_dir_goes_here' + >>> mem = Memory(location, verbose=1) + >>> import numpy as np + >>> a = np.vander(np.arange(3)).astype(float) + >>> square = mem.cache(np.square) + >>> b = square(a) # doctest: +ELLIPSIS + ______________________________________________________________________... + [Memory] Calling ...square... + square(array([[0., 0., 1.], + [1., 1., 1.], + [4., 2., 1.]])) + _________________________________________________...square - ...s, 0.0min + + >>> c = square(a) + >>> # The above call did not trigger an evaluation + +2) **Embarrassingly parallel helper:** to make it easy to write readable + parallel code and debug it quickly:: + + >>> from joblib import Parallel, delayed + >>> from math import sqrt + >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10)) + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] + + +3) **Fast compressed Persistence**: a replacement for pickle to work + efficiently on Python objects containing large data ( + *joblib.dump* & *joblib.load* ). + +.. + >>> import shutil ; shutil.rmtree(location) + +""" + +# PEP0440 compatible formatted version, see: +# https://www.python.org/dev/peps/pep-0440/ +# +# Generic release markers: +# X.Y +# X.Y.Z # For bugfix releases +# +# Admissible pre-release markers: +# X.YaN # Alpha release +# X.YbN # Beta release +# X.YrcN # Release Candidate +# X.Y # Final release +# +# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. +# 'X.Y.dev0' is the canonical version of 'X.Y.dev' +# +__version__ = "1.5.3" + + +import os + +from ._cloudpickle_wrapper import wrap_non_picklable_objects +from ._parallel_backends import ParallelBackendBase +from ._store_backends import StoreBackendBase +from .compressor import register_compressor +from .hashing import hash +from .logger import Logger, PrintTime +from .memory import MemorizedResult, Memory, expires_after, register_store_backend +from .numpy_pickle import dump, load +from .parallel import ( + Parallel, + cpu_count, + delayed, + effective_n_jobs, + parallel_backend, + parallel_config, + register_parallel_backend, +) + +__all__ = [ + # On-disk result caching + "Memory", + "MemorizedResult", + "expires_after", + # Parallel code execution + "Parallel", + "delayed", + "cpu_count", + "effective_n_jobs", + "wrap_non_picklable_objects", + # Context to change the backend globally + "parallel_config", + "parallel_backend", + # Helpers to define and register store/parallel backends + "ParallelBackendBase", + "StoreBackendBase", + "register_compressor", + "register_parallel_backend", + "register_store_backend", + # Helpers kept for backward compatibility + "PrintTime", + "Logger", + "hash", + "dump", + "load", +] + + +# Workaround issue discovered in intel-openmp 2019.5: +# https://github.com/ContinuumIO/anaconda-issues/issues/11294 +os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE") diff --git a/py311/lib/python3.11/site-packages/joblib/_cloudpickle_wrapper.py b/py311/lib/python3.11/site-packages/joblib/_cloudpickle_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..b09ea068e80f9a64f111e93835db4d4f8cd93694 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/_cloudpickle_wrapper.py @@ -0,0 +1,18 @@ +""" +Small shim of loky's cloudpickle_wrapper to avoid failure when +multiprocessing is not available. +""" + +from ._multiprocessing_helpers import mp + + +def _my_wrap_non_picklable_objects(obj, keep_wrapper=True): + return obj + + +if mp is not None: + from .externals.loky import wrap_non_picklable_objects +else: + wrap_non_picklable_objects = _my_wrap_non_picklable_objects + +__all__ = ["wrap_non_picklable_objects"] diff --git a/py311/lib/python3.11/site-packages/joblib/_dask.py b/py311/lib/python3.11/site-packages/joblib/_dask.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2fea2d4029f2e429e3cdd6a4ff1401777b45c5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/_dask.py @@ -0,0 +1,381 @@ +from __future__ import absolute_import, division, print_function + +import asyncio +import concurrent.futures +import contextlib +import time +import weakref +from uuid import uuid4 + +from ._utils import ( + _retrieve_traceback_capturing_wrapped_call, + _TracebackCapturingWrapper, +) +from .parallel import AutoBatchingMixin, ParallelBackendBase, parallel_config + +try: + import dask + import distributed +except ImportError: + dask = None + distributed = None + +if dask is not None and distributed is not None: + from dask.distributed import ( + Client, + as_completed, + get_client, + rejoin, + secede, + ) + from dask.sizeof import sizeof + from dask.utils import funcname + from distributed.utils import thread_state + + try: + # asyncio.TimeoutError, Python3-only error thrown by recent versions of + # distributed + from distributed.utils import TimeoutError as _TimeoutError + except ImportError: + from tornado.gen import TimeoutError as _TimeoutError + + +def is_weakrefable(obj): + try: + weakref.ref(obj) + return True + except TypeError: + return False + + +class _WeakKeyDictionary: + """A variant of weakref.WeakKeyDictionary for unhashable objects. + + This datastructure is used to store futures for broadcasted data objects + such as large numpy arrays or pandas dataframes that are not hashable and + therefore cannot be used as keys of traditional python dicts. + + Furthermore using a dict with id(array) as key is not safe because the + Python is likely to reuse id of recently collected arrays. + """ + + def __init__(self): + self._data = {} + + def __getitem__(self, obj): + ref, val = self._data[id(obj)] + if ref() is not obj: + # In case of a race condition with on_destroy. + raise KeyError(obj) + return val + + def __setitem__(self, obj, value): + key = id(obj) + try: + ref, _ = self._data[key] + if ref() is not obj: + # In case of race condition with on_destroy. + raise KeyError(obj) + except KeyError: + # Insert the new entry in the mapping along with a weakref + # callback to automatically delete the entry from the mapping + # as soon as the object used as key is garbage collected. + def on_destroy(_): + del self._data[key] + + ref = weakref.ref(obj, on_destroy) + self._data[key] = ref, value + + def __len__(self): + return len(self._data) + + def clear(self): + self._data.clear() + + +def _funcname(x): + try: + if isinstance(x, list): + x = x[0][0] + except Exception: + pass + return funcname(x) + + +def _make_tasks_summary(tasks): + """Summarize of list of (func, args, kwargs) function calls""" + unique_funcs = {func for func, args, kwargs in tasks} + + if len(unique_funcs) == 1: + mixed = False + else: + mixed = True + return len(tasks), mixed, _funcname(tasks) + + +class Batch: + """dask-compatible wrapper that executes a batch of tasks""" + + def __init__(self, tasks): + # collect some metadata from the tasks to ease Batch calls + # introspection when debugging + self._num_tasks, self._mixed, self._funcname = _make_tasks_summary(tasks) + + def __call__(self, tasks=None): + results = [] + with parallel_config(backend="dask"): + for func, args, kwargs in tasks: + results.append(func(*args, **kwargs)) + return results + + def __repr__(self): + descr = f"batch_of_{self._funcname}_{self._num_tasks}_calls" + if self._mixed: + descr = "mixed_" + descr + return descr + + +def _joblib_probe_task(): + # Noop used by the joblib connector to probe when workers are ready. + pass + + +class DaskDistributedBackend(AutoBatchingMixin, ParallelBackendBase): + MIN_IDEAL_BATCH_DURATION = 0.2 + MAX_IDEAL_BATCH_DURATION = 1.0 + supports_retrieve_callback = True + default_n_jobs = -1 + + def __init__( + self, + scheduler_host=None, + scatter=None, + client=None, + loop=None, + wait_for_workers_timeout=10, + **submit_kwargs, + ): + super().__init__() + + if distributed is None: + msg = ( + "You are trying to use 'dask' as a joblib parallel backend " + "but dask is not installed. Please install dask " + "to fix this error." + ) + raise ValueError(msg) + + if client is None: + if scheduler_host: + client = Client(scheduler_host, loop=loop, set_as_default=False) + else: + try: + client = get_client() + except ValueError as e: + msg = ( + "To use Joblib with Dask first create a Dask Client" + "\n\n" + " from dask.distributed import Client\n" + " client = Client()\n" + "or\n" + " client = Client('scheduler-address:8786')" + ) + raise ValueError(msg) from e + + self.client = client + + if scatter is not None and not isinstance(scatter, (list, tuple)): + raise TypeError( + "scatter must be a list/tuple, got `%s`" % type(scatter).__name__ + ) + + if scatter is not None and len(scatter) > 0: + # Keep a reference to the scattered data to keep the ids the same + self._scatter = list(scatter) + scattered = self.client.scatter(scatter, broadcast=True) + self.data_futures = {id(x): f for x, f in zip(scatter, scattered)} + else: + self._scatter = [] + self.data_futures = {} + self.wait_for_workers_timeout = wait_for_workers_timeout + self.submit_kwargs = submit_kwargs + self.waiting_futures = as_completed( + [], loop=client.loop, with_results=True, raise_errors=False + ) + self._results = {} + self._callbacks = {} + + async def _collect(self): + while self._continue: + async for future, result in self.waiting_futures: + cf_future = self._results.pop(future) + callback = self._callbacks.pop(future) + if future.status == "error": + typ, exc, tb = result + cf_future.set_exception(exc) + else: + cf_future.set_result(result) + callback(result) + await asyncio.sleep(0.01) + + def __reduce__(self): + return (DaskDistributedBackend, ()) + + def get_nested_backend(self): + return DaskDistributedBackend(client=self.client), -1 + + def configure(self, n_jobs=1, parallel=None, **backend_args): + self.parallel = parallel + return self.effective_n_jobs(n_jobs) + + def start_call(self): + self._continue = True + self.client.loop.add_callback(self._collect) + self.call_data_futures = _WeakKeyDictionary() + + def stop_call(self): + # The explicit call to clear is required to break a cycling reference + # to the futures. + self._continue = False + # wait for the future collection routine (self._backend._collect) to + # finish in order to limit asyncio warnings due to aborting _collect + # during a following backend termination call + time.sleep(0.01) + self.call_data_futures.clear() + + def effective_n_jobs(self, n_jobs): + effective_n_jobs = sum(self.client.ncores().values()) + if effective_n_jobs != 0 or not self.wait_for_workers_timeout: + return effective_n_jobs + + # If there is no worker, schedule a probe task to wait for the workers + # to come up and be available. If the dask cluster is in adaptive mode + # task might cause the cluster to provision some workers. + try: + self.client.submit(_joblib_probe_task).result( + timeout=self.wait_for_workers_timeout + ) + except _TimeoutError as e: + error_msg = ( + "DaskDistributedBackend has no worker after {} seconds. " + "Make sure that workers are started and can properly connect " + "to the scheduler and increase the joblib/dask connection " + "timeout with:\n\n" + "parallel_config(backend='dask', wait_for_workers_timeout={})" + ).format( + self.wait_for_workers_timeout, + max(10, 2 * self.wait_for_workers_timeout), + ) + raise TimeoutError(error_msg) from e + return sum(self.client.ncores().values()) + + async def _to_func_args(self, func): + itemgetters = dict() + + # Futures that are dynamically generated during a single call to + # Parallel.__call__. + call_data_futures = getattr(self, "call_data_futures", None) + + async def maybe_to_futures(args): + out = [] + for arg in args: + arg_id = id(arg) + if arg_id in itemgetters: + out.append(itemgetters[arg_id]) + continue + + f = self.data_futures.get(arg_id, None) + if f is None and call_data_futures is not None: + try: + f = await call_data_futures[arg] + except KeyError: + pass + if f is None: + if is_weakrefable(arg) and sizeof(arg) > 1e3: + # Automatically scatter large objects to some of + # the workers to avoid duplicated data transfers. + # Rely on automated inter-worker data stealing if + # more workers need to reuse this data + # concurrently. + # set hash=False - nested scatter calls (i.e + # calling client.scatter inside a dask worker) + # using hash=True often raise CancelledError, + # see dask/distributed#3703 + _coro = self.client.scatter( + arg, asynchronous=True, hash=False + ) + # Centralize the scattering of identical arguments + # between concurrent apply_async callbacks by + # exposing the running coroutine in + # call_data_futures before it completes. + t = asyncio.Task(_coro) + call_data_futures[arg] = t + + f = await t + + if f is not None: + out.append(f) + else: + out.append(arg) + return out + + tasks = [] + for f, args, kwargs in func.items: + args = list(await maybe_to_futures(args)) + kwargs = dict(zip(kwargs.keys(), await maybe_to_futures(kwargs.values()))) + tasks.append((f, args, kwargs)) + + return (Batch(tasks), tasks) + + def apply_async(self, func, callback=None): + cf_future = concurrent.futures.Future() + cf_future.get = cf_future.result # achieve AsyncResult API + + async def f(func, callback): + batch, tasks = await self._to_func_args(func) + key = f"{repr(batch)}-{uuid4().hex}" + + dask_future = self.client.submit( + _TracebackCapturingWrapper(batch), + tasks=tasks, + key=key, + **self.submit_kwargs, + ) + self.waiting_futures.add(dask_future) + self._callbacks[dask_future] = callback + self._results[dask_future] = cf_future + + self.client.loop.add_callback(f, func, callback) + + return cf_future + + def retrieve_result_callback(self, out): + return _retrieve_traceback_capturing_wrapped_call(out) + + def abort_everything(self, ensure_ready=True): + """Tell the client to cancel any task submitted via this instance + + joblib.Parallel will never access those results + """ + with self.waiting_futures.lock: + self.waiting_futures.futures.clear() + while not self.waiting_futures.queue.empty(): + self.waiting_futures.queue.get() + + @contextlib.contextmanager + def retrieval_context(self): + """Override ParallelBackendBase.retrieval_context to avoid deadlocks. + + This removes thread from the worker's thread pool (using 'secede'). + Seceding avoids deadlock in nested parallelism settings. + """ + # See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how + # this is used. + if hasattr(thread_state, "execution_state"): + # we are in a worker. Secede to avoid deadlock. + secede() + + yield + + if hasattr(thread_state, "execution_state"): + rejoin() diff --git a/py311/lib/python3.11/site-packages/joblib/_memmapping_reducer.py b/py311/lib/python3.11/site-packages/joblib/_memmapping_reducer.py new file mode 100644 index 0000000000000000000000000000000000000000..d11ec581de8d997162397c92690ef080ace2bb33 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/_memmapping_reducer.py @@ -0,0 +1,715 @@ +""" +Reducer using memory mapping for numpy arrays +""" +# Author: Thomas Moreau +# Copyright: 2017, Thomas Moreau +# License: BSD 3 clause + +import atexit +import errno +import os +import stat +import tempfile +import threading +import time +import warnings +import weakref +from mmap import mmap +from multiprocessing import util +from pickle import HIGHEST_PROTOCOL, PicklingError, dumps, loads, whichmodule +from uuid import uuid4 + +try: + WindowsError +except NameError: + WindowsError = type(None) + +try: + import numpy as np + from numpy.lib.stride_tricks import as_strided +except ImportError: + np = None + +from .backports import make_memmap +from .disk import delete_folder +from .externals.loky.backend import resource_tracker +from .numpy_pickle import dump, load, load_temporary_memmap + +# Some system have a ramdisk mounted by default, we can use it instead of /tmp +# as the default folder to dump big arrays to share with subprocesses. +SYSTEM_SHARED_MEM_FS = "/dev/shm" + +# Minimal number of bytes available on SYSTEM_SHARED_MEM_FS to consider using +# it as the default folder to dump big arrays to share with subprocesses. +SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(2e9) + +# Folder and file permissions to chmod temporary files generated by the +# memmapping pool. Only the owner of the Python process can access the +# temporary files and folder. +FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR +FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR + +# Set used in joblib workers, referencing the filenames of temporary memmaps +# created by joblib to speed up data communication. In child processes, we add +# a finalizer to these memmaps that sends a maybe_unlink call to the +# resource_tracker, in order to free main memory as fast as possible. +JOBLIB_MMAPS = set() + + +def _log_and_unlink(filename): + from .externals.loky.backend.resource_tracker import _resource_tracker + + util.debug( + "[FINALIZER CALL] object mapping to {} about to be deleted," + " decrementing the refcount of the file (pid: {})".format( + os.path.basename(filename), os.getpid() + ) + ) + _resource_tracker.maybe_unlink(filename, "file") + + +def add_maybe_unlink_finalizer(memmap): + util.debug( + "[FINALIZER ADD] adding finalizer to {} (id {}, filename {}, pid {})".format( + type(memmap), id(memmap), os.path.basename(memmap.filename), os.getpid() + ) + ) + weakref.finalize(memmap, _log_and_unlink, memmap.filename) + + +def unlink_file(filename): + """Wrapper around os.unlink with a retry mechanism. + + The retry mechanism has been implemented primarily to overcome a race + condition happening during the finalizer of a np.memmap: when a process + holding the last reference to a mmap-backed np.memmap/np.array is about to + delete this array (and close the reference), it sends a maybe_unlink + request to the resource_tracker. This request can be processed faster than + it takes for the last reference of the memmap to be closed, yielding (on + Windows) a PermissionError in the resource_tracker loop. + """ + NUM_RETRIES = 10 + for retry_no in range(1, NUM_RETRIES + 1): + try: + os.unlink(filename) + break + except PermissionError: + util.debug( + "[ResourceTracker] tried to unlink {}, got PermissionError".format( + filename + ) + ) + if retry_no == NUM_RETRIES: + raise + else: + time.sleep(0.2) + except FileNotFoundError: + # In case of a race condition when deleting the temporary folder, + # avoid noisy FileNotFoundError exception in the resource tracker. + pass + + +resource_tracker._CLEANUP_FUNCS["file"] = unlink_file + + +class _WeakArrayKeyMap: + """A variant of weakref.WeakKeyDictionary for unhashable numpy arrays. + + This datastructure will be used with numpy arrays as obj keys, therefore we + do not use the __get__ / __set__ methods to avoid any conflict with the + numpy fancy indexing syntax. + """ + + def __init__(self): + self._data = {} + + def get(self, obj): + ref, val = self._data[id(obj)] + if ref() is not obj: + # In case of race condition with on_destroy: could never be + # triggered by the joblib tests with CPython. + raise KeyError(obj) + return val + + def set(self, obj, value): + key = id(obj) + try: + ref, _ = self._data[key] + if ref() is not obj: + # In case of race condition with on_destroy: could never be + # triggered by the joblib tests with CPython. + raise KeyError(obj) + except KeyError: + # Insert the new entry in the mapping along with a weakref + # callback to automatically delete the entry from the mapping + # as soon as the object used as key is garbage collected. + def on_destroy(_): + del self._data[key] + + ref = weakref.ref(obj, on_destroy) + self._data[key] = ref, value + + def __getstate__(self): + raise PicklingError("_WeakArrayKeyMap is not pickleable") + + +############################################################################### +# Support for efficient transient pickling of numpy data structures + + +def _get_backing_memmap(a): + """Recursively look up the original np.memmap instance base if any.""" + b = getattr(a, "base", None) + if b is None: + # TODO: check scipy sparse datastructure if scipy is installed + # a nor its descendants do not have a memmap base + return None + + elif isinstance(b, mmap): + # a is already a real memmap instance. + return a + + else: + # Recursive exploration of the base ancestry + return _get_backing_memmap(b) + + +def _get_temp_dir(pool_folder_name, temp_folder=None): + """Get the full path to a subfolder inside the temporary folder. + + Parameters + ---------- + pool_folder_name : str + Sub-folder name used for the serialization of a pool instance. + + temp_folder: str, optional + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + + - a folder pointed by the JOBLIB_TEMP_FOLDER environment + variable, + - /dev/shm if the folder exists and is writable: this is a + RAMdisk filesystem available by default on modern Linux + distributions, + - the default system temporary folder that can be + overridden with TMP, TMPDIR or TEMP environment + variables, typically /tmp under Unix operating systems. + + Returns + ------- + pool_folder : str + full path to the temporary folder + use_shared_mem : bool + whether the temporary folder is written to the system shared memory + folder or some other temporary folder. + """ + use_shared_mem = False + if temp_folder is None: + temp_folder = os.environ.get("JOBLIB_TEMP_FOLDER", None) + if temp_folder is None: + if os.path.exists(SYSTEM_SHARED_MEM_FS) and hasattr(os, "statvfs"): + try: + shm_stats = os.statvfs(SYSTEM_SHARED_MEM_FS) + available_nbytes = shm_stats.f_bsize * shm_stats.f_bavail + if available_nbytes > SYSTEM_SHARED_MEM_FS_MIN_SIZE: + # Try to see if we have write access to the shared mem + # folder only if it is reasonably large (that is 2GB or + # more). + temp_folder = SYSTEM_SHARED_MEM_FS + pool_folder = os.path.join(temp_folder, pool_folder_name) + if not os.path.exists(pool_folder): + os.makedirs(pool_folder) + use_shared_mem = True + except (IOError, OSError): + # Missing rights in the /dev/shm partition, fallback to regular + # temp folder. + temp_folder = None + if temp_folder is None: + # Fallback to the default tmp folder, typically /tmp + temp_folder = tempfile.gettempdir() + temp_folder = os.path.abspath(os.path.expanduser(temp_folder)) + pool_folder = os.path.join(temp_folder, pool_folder_name) + return pool_folder, use_shared_mem + + +def has_shareable_memory(a): + """Return True if a is backed by some mmap buffer directly or not.""" + return _get_backing_memmap(a) is not None + + +def _strided_from_memmap( + filename, + dtype, + mode, + offset, + order, + shape, + strides, + total_buffer_len, + unlink_on_gc_collect, +): + """Reconstruct an array view on a memory mapped file.""" + if mode == "w+": + # Do not zero the original data when unpickling + mode = "r+" + + if strides is None: + # Simple, contiguous memmap + return make_memmap( + filename, + dtype=dtype, + shape=shape, + mode=mode, + offset=offset, + order=order, + unlink_on_gc_collect=unlink_on_gc_collect, + ) + else: + # For non-contiguous data, memmap the total enclosing buffer and then + # extract the non-contiguous view with the stride-tricks API + base = make_memmap( + filename, + dtype=dtype, + shape=total_buffer_len, + offset=offset, + mode=mode, + order=order, + unlink_on_gc_collect=unlink_on_gc_collect, + ) + return as_strided(base, shape=shape, strides=strides) + + +def _reduce_memmap_backed(a, m): + """Pickling reduction for memmap backed arrays. + + a is expected to be an instance of np.ndarray (or np.memmap) + m is expected to be an instance of np.memmap on the top of the ``base`` + attribute ancestry of a. ``m.base`` should be the real python mmap object. + """ + # offset that comes from the striding differences between a and m + util.debug( + "[MEMMAP REDUCE] reducing a memmap-backed array (shape, {}, pid: {})".format( + a.shape, os.getpid() + ) + ) + try: + from numpy.lib.array_utils import byte_bounds + except (ModuleNotFoundError, ImportError): + # Backward-compat for numpy < 2.0 + from numpy import byte_bounds + a_start, a_end = byte_bounds(a) + m_start = byte_bounds(m)[0] + offset = a_start - m_start + + # offset from the backing memmap + offset += m.offset + + # 1D arrays are both F and C contiguous, so only set the flag in + # higher dimensions. See https://github.com/joblib/joblib/pull/1704. + if m.ndim > 1 and m.flags["F_CONTIGUOUS"]: + order = "F" + else: + # The backing memmap buffer is necessarily contiguous hence C if not + # Fortran + order = "C" + + if a.flags["F_CONTIGUOUS"] or a.flags["C_CONTIGUOUS"]: + # If the array is a contiguous view, no need to pass the strides + strides = None + total_buffer_len = None + else: + # Compute the total number of items to map from which the strided + # view will be extracted. + strides = a.strides + total_buffer_len = (a_end - a_start) // a.itemsize + + return ( + _strided_from_memmap, + ( + m.filename, + a.dtype, + m.mode, + offset, + order, + a.shape, + strides, + total_buffer_len, + False, + ), + ) + + +def reduce_array_memmap_backward(a): + """reduce a np.array or a np.memmap from a child process""" + m = _get_backing_memmap(a) + if isinstance(m, np.memmap) and m.filename not in JOBLIB_MMAPS: + # if a is backed by a memmaped file, reconstruct a using the + # memmaped file. + return _reduce_memmap_backed(a, m) + else: + # a is either a regular (not memmap-backed) numpy array, or an array + # backed by a shared temporary file created by joblib. In the latter + # case, in order to limit the lifespan of these temporary files, we + # serialize the memmap as a regular numpy array, and decref the + # file backing the memmap (done implicitly in a previously registered + # finalizer, see ``unlink_on_gc_collect`` for more details) + return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),)) + + +class ArrayMemmapForwardReducer(object): + """Reducer callable to dump large arrays to memmap files. + + Parameters + ---------- + max_nbytes: int + Threshold to trigger memmapping of large arrays to files created + a folder. + temp_folder_resolver: callable + An callable in charge of resolving a temporary folder name where files + for backing memmapped arrays are created. + mmap_mode: 'r', 'r+' or 'c' + Mode for the created memmap datastructure. See the documentation of + numpy.memmap for more details. Note: 'w+' is coerced to 'r+' + automatically to avoid zeroing the data on unpickling. + verbose: int, optional, 0 by default + If verbose > 0, memmap creations are logged. + If verbose > 1, both memmap creations, reuse and array pickling are + logged. + prewarm: bool, optional, False by default. + Force a read on newly memmapped array to make sure that OS pre-cache it + memory. This can be useful to avoid concurrent disk access when the + same data array is passed to different worker processes. + """ + + def __init__( + self, + max_nbytes, + temp_folder_resolver, + mmap_mode, + unlink_on_gc_collect, + verbose=0, + prewarm=True, + ): + self._max_nbytes = max_nbytes + self._temp_folder_resolver = temp_folder_resolver + self._mmap_mode = mmap_mode + self.verbose = int(verbose) + if prewarm == "auto": + self._prewarm = not self._temp_folder.startswith(SYSTEM_SHARED_MEM_FS) + else: + self._prewarm = prewarm + self._prewarm = prewarm + self._memmaped_arrays = _WeakArrayKeyMap() + self._temporary_memmaped_filenames = set() + self._unlink_on_gc_collect = unlink_on_gc_collect + + @property + def _temp_folder(self): + return self._temp_folder_resolver() + + def __reduce__(self): + # The ArrayMemmapForwardReducer is passed to the children processes: it + # needs to be pickled but the _WeakArrayKeyMap need to be skipped as + # it's only guaranteed to be consistent with the parent process memory + # garbage collection. + # Although this reducer is pickled, it is not needed in its destination + # process (child processes), as we only use this reducer to send + # memmaps from the parent process to the children processes. For this + # reason, we can afford skipping the resolver, (which would otherwise + # be unpicklable), and pass it as None instead. + args = (self._max_nbytes, None, self._mmap_mode, self._unlink_on_gc_collect) + kwargs = { + "verbose": self.verbose, + "prewarm": self._prewarm, + } + return ArrayMemmapForwardReducer, args, kwargs + + def __call__(self, a): + m = _get_backing_memmap(a) + if m is not None and isinstance(m, np.memmap): + # a is already backed by a memmap file, let's reuse it directly + return _reduce_memmap_backed(a, m) + + if ( + not a.dtype.hasobject + and self._max_nbytes is not None + and a.nbytes > self._max_nbytes + ): + # check that the folder exists (lazily create the pool temp folder + # if required) + try: + os.makedirs(self._temp_folder) + os.chmod(self._temp_folder, FOLDER_PERMISSIONS) + except OSError as e: + if e.errno != errno.EEXIST: + raise e + + try: + basename = self._memmaped_arrays.get(a) + except KeyError: + # Generate a new unique random filename. The process and thread + # ids are only useful for debugging purpose and to make it + # easier to cleanup orphaned files in case of hard process + # kill (e.g. by "kill -9" or segfault). + basename = "{}-{}-{}.pkl".format( + os.getpid(), id(threading.current_thread()), uuid4().hex + ) + self._memmaped_arrays.set(a, basename) + filename = os.path.join(self._temp_folder, basename) + + # In case the same array with the same content is passed several + # times to the pool subprocess children, serialize it only once + + is_new_memmap = filename not in self._temporary_memmaped_filenames + + # add the memmap to the list of temporary memmaps created by joblib + self._temporary_memmaped_filenames.add(filename) + + if self._unlink_on_gc_collect: + # Bump reference count of the memmap by 1 to account for + # shared usage of the memmap by a child process. The + # corresponding decref call will be executed upon calling + # resource_tracker.maybe_unlink, registered as a finalizer in + # the child. + # the incref/decref calls here are only possible when the child + # and the parent share the same resource_tracker. It is not the + # case for the multiprocessing backend, but it does not matter + # because unlinking a memmap from a child process is only + # useful to control the memory usage of long-lasting child + # processes, while the multiprocessing-based pools terminate + # their workers at the end of a map() call. + resource_tracker.register(filename, "file") + + if is_new_memmap: + # Incref each temporary memmap created by joblib one extra + # time. This means that these memmaps will only be deleted + # once an extra maybe_unlink() is called, which is done once + # all the jobs have completed (or been canceled) in the + # Parallel._terminate_backend() method. + resource_tracker.register(filename, "file") + + if not os.path.exists(filename): + util.debug( + "[ARRAY DUMP] Pickling new array (shape={}, dtype={}) " + "creating a new memmap at {}".format(a.shape, a.dtype, filename) + ) + for dumped_filename in dump(a, filename): + os.chmod(dumped_filename, FILE_PERMISSIONS) + + if self._prewarm: + # Warm up the data by accessing it. This operation ensures + # that the disk access required to create the memmapping + # file are performed in the reducing process and avoids + # concurrent memmap creation in multiple children + # processes. + load(filename, mmap_mode=self._mmap_mode).max() + + else: + util.debug( + "[ARRAY DUMP] Pickling known array (shape={}, dtype={}) " + "reusing memmap file: {}".format( + a.shape, a.dtype, os.path.basename(filename) + ) + ) + + # The worker process will use joblib.load to memmap the data + return ( + load_temporary_memmap, + (filename, self._mmap_mode, self._unlink_on_gc_collect), + ) + else: + # do not convert a into memmap, let pickler do its usual copy with + # the default system pickler + util.debug( + "[ARRAY DUMP] Pickling array (NO MEMMAPPING) (shape={}, " + " dtype={}).".format(a.shape, a.dtype) + ) + return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),)) + + +def get_memmapping_reducers( + forward_reducers=None, + backward_reducers=None, + temp_folder_resolver=None, + max_nbytes=1e6, + mmap_mode="r", + verbose=0, + prewarm=False, + unlink_on_gc_collect=True, + **kwargs, +): + """Construct a pair of memmapping reducer linked to a tmpdir. + + This function manage the creation and the clean up of the temporary folders + underlying the memory maps and should be use to get the reducers necessary + to construct joblib pool or executor. + """ + if forward_reducers is None: + forward_reducers = dict() + if backward_reducers is None: + backward_reducers = dict() + + if np is not None: + # Register smart numpy.ndarray reducers that detects memmap backed + # arrays and that is also able to dump to memmap large in-memory + # arrays over the max_nbytes threshold + forward_reduce_ndarray = ArrayMemmapForwardReducer( + max_nbytes, + temp_folder_resolver, + mmap_mode, + unlink_on_gc_collect, + verbose, + prewarm=prewarm, + ) + forward_reducers[np.ndarray] = forward_reduce_ndarray + forward_reducers[np.memmap] = forward_reduce_ndarray + + # Communication from child process to the parent process always + # pickles in-memory numpy.ndarray without dumping them as memmap + # to avoid confusing the caller and make it tricky to collect the + # temporary folder + backward_reducers[np.ndarray] = reduce_array_memmap_backward + backward_reducers[np.memmap] = reduce_array_memmap_backward + + return forward_reducers, backward_reducers + + +class TemporaryResourcesManager(object): + """Stateful object able to manage temporary folder and pickles + + It exposes: + - a per-context folder name resolving API that memmap-based reducers will + rely on to know where to pickle the temporary memmaps + - a temporary file/folder management API that internally uses the + resource_tracker. + """ + + def __init__(self, temp_folder_root=None, context_id=None): + self._current_temp_folder = None + self._temp_folder_root = temp_folder_root + self._use_shared_mem = None + self._cached_temp_folders = dict() + self._id = uuid4().hex + self._finalizers = {} + if context_id is None: + # It would be safer to not assign a default context id (less silent + # bugs), but doing this while maintaining backward compatibility + # with the previous, context-unaware version get_memmaping_executor + # exposes too many low-level details. + context_id = uuid4().hex + self.set_current_context(context_id) + + def set_current_context(self, context_id): + self._current_context_id = context_id + self.register_new_context(context_id) + + def register_new_context(self, context_id): + # Prepare a sub-folder name specific to a context (usually a unique id + # generated by each instance of the Parallel class). Do not create in + # advance to spare FS write access if no array is to be dumped). + if context_id in self._cached_temp_folders: + return + else: + # During its lifecycle, one Parallel object can have several + # executors associated to it (for instance, if a loky worker raises + # an exception, joblib shutdowns the executor and instantly + # recreates a new one before raising the error - see + # ``ensure_ready``. Because we don't want two executors tied to + # the same Parallel object (and thus the same context id) to + # register/use/delete the same folder, we also add an id specific + # to the current Manager (and thus specific to its associated + # executor) to the folder name. + new_folder_name = "joblib_memmapping_folder_{}_{}_{}".format( + os.getpid(), self._id, context_id + ) + new_folder_path, _ = _get_temp_dir(new_folder_name, self._temp_folder_root) + self.register_folder_finalizer(new_folder_path, context_id) + self._cached_temp_folders[context_id] = new_folder_path + + def resolve_temp_folder_name(self): + """Return a folder name specific to the currently activated context""" + return self._cached_temp_folders[self._current_context_id] + + # resource management API + + def register_folder_finalizer(self, pool_subfolder, context_id): + # Register the garbage collector at program exit in case caller forgets + # to call terminate explicitly: note we do not pass any reference to + # ensure that this callback won't prevent garbage collection of + # parallel instance and related file handler resources such as POSIX + # semaphores and pipes + pool_module_name = whichmodule(delete_folder, "delete_folder") + resource_tracker.register(pool_subfolder, "folder") + + def _cleanup(): + # In some cases the Python runtime seems to set delete_folder to + # None just before exiting when accessing the delete_folder + # function from the closure namespace. So instead we reimport + # the delete_folder function explicitly. + # https://github.com/joblib/joblib/issues/328 + # We cannot just use from 'joblib.pool import delete_folder' + # because joblib should only use relative imports to allow + # easy vendoring. + delete_folder = __import__( + pool_module_name, fromlist=["delete_folder"] + ).delete_folder + try: + delete_folder(pool_subfolder, allow_non_empty=True) + resource_tracker.unregister(pool_subfolder, "folder") + except OSError: + warnings.warn( + "Failed to delete temporary folder: {}".format(pool_subfolder) + ) + + self._finalizers[context_id] = atexit.register(_cleanup) + + def _clean_temporary_resources( + self, context_id=None, force=False, allow_non_empty=False + ): + """Clean temporary resources created by a process-based pool""" + if context_id is None: + # Iterates over a copy of the cache keys to avoid Error due to + # iterating over a changing size dictionary. + for context_id in list(self._cached_temp_folders): + self._clean_temporary_resources( + context_id, force=force, allow_non_empty=allow_non_empty + ) + else: + temp_folder = self._cached_temp_folders.get(context_id) + if temp_folder and os.path.exists(temp_folder): + for filename in os.listdir(temp_folder): + if force: + # Some workers have failed and the ref counted might + # be off. The workers should have shut down by this + # time so forcefully clean up the files. + resource_tracker.unregister( + os.path.join(temp_folder, filename), "file" + ) + else: + resource_tracker.maybe_unlink( + os.path.join(temp_folder, filename), "file" + ) + + # When forcing clean-up, try to delete the folder even if some + # files are still in it. Otherwise, try to delete the folder + allow_non_empty |= force + + # Clean up the folder if possible, either if it is empty or + # if none of the files in it are in used and allow_non_empty. + try: + delete_folder(temp_folder, allow_non_empty=allow_non_empty) + # Forget the folder once it has been deleted + self._cached_temp_folders.pop(context_id, None) + resource_tracker.unregister(temp_folder, "folder") + + # Also cancel the finalizers that gets triggered at gc. + finalizer = self._finalizers.pop(context_id, None) + if finalizer is not None: + atexit.unregister(finalizer) + + except OSError: + # Temporary folder cannot be deleted right now. + # This folder will be cleaned up by an atexit + # finalizer registered by the memmapping_reducer. + pass diff --git a/py311/lib/python3.11/site-packages/joblib/_multiprocessing_helpers.py b/py311/lib/python3.11/site-packages/joblib/_multiprocessing_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..1b4e7d20e9280cf20befbc737ac019a0ec66f4ba --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/_multiprocessing_helpers.py @@ -0,0 +1,51 @@ +"""Helper module to factorize the conditional multiprocessing import logic + +We use a distinct module to simplify import statements and avoid introducing +circular dependencies (for instance for the assert_spawning name). +""" + +import os +import warnings + +# Obtain possible configuration from the environment, assuming 1 (on) +# by default, upon 0 set to None. Should instructively fail if some non +# 0/1 value is set. +mp = int(os.environ.get("JOBLIB_MULTIPROCESSING", 1)) or None +if mp: + try: + import _multiprocessing # noqa + import multiprocessing as mp + except ImportError: + mp = None + +# 2nd stage: validate that locking is available on the system and +# issue a warning if not +if mp is not None: + try: + # try to create a named semaphore using SemLock to make sure they are + # available on this platform. We use the low level object + # _multiprocessing.SemLock to avoid spawning a resource tracker on + # Unix system or changing the default backend. + import tempfile + from _multiprocessing import SemLock + + _rand = tempfile._RandomNameSequence() + for i in range(100): + try: + name = "/joblib-{}-{}".format(os.getpid(), next(_rand)) + _sem = SemLock(0, 0, 1, name=name, unlink=True) + del _sem # cleanup + break + except FileExistsError as e: # pragma: no cover + if i >= 99: + raise FileExistsError("cannot find name for semaphore") from e + except (FileExistsError, AttributeError, ImportError, OSError) as e: + mp = None + warnings.warn("%s. joblib will operate in serial mode" % (e,)) + + +# 3rd stage: backward compat for the assert_spawning helper +if mp is not None: + from multiprocessing.context import assert_spawning +else: + assert_spawning = None diff --git a/py311/lib/python3.11/site-packages/joblib/_parallel_backends.py b/py311/lib/python3.11/site-packages/joblib/_parallel_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..53114a8512fcbcd320f2d62cebe06f1cb9102fa8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/_parallel_backends.py @@ -0,0 +1,753 @@ +""" +Backends for embarrassingly parallel code. +""" + +import contextlib +import gc +import os +import threading +import warnings +from abc import ABCMeta, abstractmethod + +from ._multiprocessing_helpers import mp +from ._utils import ( + _retrieve_traceback_capturing_wrapped_call, + _TracebackCapturingWrapper, +) + +if mp is not None: + from multiprocessing.pool import ThreadPool + + from .executor import get_memmapping_executor + + # Import loky only if multiprocessing is present + from .externals.loky import cpu_count, process_executor + from .externals.loky.process_executor import ShutdownExecutorError + from .pool import MemmappingPool + + +class ParallelBackendBase(metaclass=ABCMeta): + """Helper abc which defines all methods a ParallelBackend must implement""" + + default_n_jobs = 1 + + supports_inner_max_num_threads = False + + # This flag was introduced for backward compatibility reasons. + # New backends should always set it to True and implement the + # `retrieve_result_callback` method. + supports_retrieve_callback = False + + @property + def supports_return_generator(self): + return self.supports_retrieve_callback + + @property + def supports_timeout(self): + return self.supports_retrieve_callback + + nesting_level = None + + def __init__( + self, nesting_level=None, inner_max_num_threads=None, **backend_kwargs + ): + super().__init__() + self.nesting_level = nesting_level + self.inner_max_num_threads = inner_max_num_threads + self.backend_kwargs = backend_kwargs + + MAX_NUM_THREADS_VARS = [ + "OMP_NUM_THREADS", + "OPENBLAS_NUM_THREADS", + "MKL_NUM_THREADS", + "BLIS_NUM_THREADS", + "VECLIB_MAXIMUM_THREADS", + "NUMBA_NUM_THREADS", + "NUMEXPR_NUM_THREADS", + ] + + TBB_ENABLE_IPC_VAR = "ENABLE_IPC" + + @abstractmethod + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs that can actually run in parallel + + n_jobs is the number of workers requested by the callers. Passing + n_jobs=-1 means requesting all available workers for instance matching + the number of CPU cores on the worker host(s). + + This method should return a guesstimate of the number of workers that + can actually perform work concurrently. The primary use case is to make + it possible for the caller to know in how many chunks to slice the + work. + + In general working on larger data chunks is more efficient (less + scheduling overhead and better use of CPU cache prefetching heuristics) + as long as all the workers have enough work to do. + """ + + def apply_async(self, func, callback=None): + """Deprecated: implement `submit` instead.""" + raise NotImplementedError("Implement `submit` instead.") + + def submit(self, func, callback=None): + """Schedule a function to be run and return a future-like object. + + This method should return a future-like object that allow tracking + the progress of the task. + + If ``supports_retrieve_callback`` is False, the return value of this + method is passed to ``retrieve_result`` instead of calling + ``retrieve_result_callback``. + + Parameters + ---------- + func: callable + The function to be run in parallel. + + callback: callable + A callable that will be called when the task is completed. This callable + is a wrapper around ``retrieve_result_callback``. This should be added + to the future-like object returned by this method, so that the callback + is called when the task is completed. + + For future-like backends, this can be achieved with something like + ``future.add_done_callback(callback)``. + + Returns + ------- + future: future-like + A future-like object to track the execution of the submitted function. + """ + warnings.warn( + "`apply_async` is deprecated, implement and use `submit` instead.", + DeprecationWarning, + ) + return self.apply_async(func, callback) + + def retrieve_result_callback(self, out): + """Called within the callback function passed to `submit`. + + This method can customise how the result of the function is retrieved + from the future-like object. + + Parameters + ---------- + future: future-like + The future-like object returned by the `submit` method. + + Returns + ------- + result: object + The result of the function executed in parallel. + """ + + def retrieve_result(self, out, timeout=None): + """Hook to retrieve the result when support_retrieve_callback=False. + + The argument `out` is the result of the `submit` call. This method + should return the result of the computation or raise an exception if + the computation failed. + """ + if self.supports_timeout: + return out.get(timeout=timeout) + else: + return out.get() + + def configure( + self, n_jobs=1, parallel=None, prefer=None, require=None, **backend_kwargs + ): + """Reconfigure the backend and return the number of workers. + + This makes it possible to reuse an existing backend instance for + successive independent calls to Parallel with different parameters. + """ + self.parallel = parallel + return self.effective_n_jobs(n_jobs) + + def start_call(self): + """Call-back method called at the beginning of a Parallel call""" + + def stop_call(self): + """Call-back method called at the end of a Parallel call""" + + def terminate(self): + """Shutdown the workers and free the shared memory.""" + + def compute_batch_size(self): + """Determine the optimal batch size""" + return 1 + + def batch_completed(self, batch_size, duration): + """Callback indicate how long it took to run a batch""" + + def abort_everything(self, ensure_ready=True): + """Abort any running tasks + + This is called when an exception has been raised when executing a task + and all the remaining tasks will be ignored and can therefore be + aborted to spare computation resources. + + If ensure_ready is True, the backend should be left in an operating + state as future tasks might be re-submitted via that same backend + instance. + + If ensure_ready is False, the implementer of this method can decide + to leave the backend in a closed / terminated state as no new task + are expected to be submitted to this backend. + + Setting ensure_ready to False is an optimization that can be leveraged + when aborting tasks via killing processes from a local process pool + managed by the backend it-self: if we expect no new tasks, there is no + point in re-creating new workers. + """ + # Does nothing by default: to be overridden in subclasses when + # canceling tasks is possible. + pass + + def get_nested_backend(self): + """Backend instance to be used by nested Parallel calls. + + By default a thread-based backend is used for the first level of + nesting. Beyond, switch to sequential backend to avoid spawning too + many threads on the host. + """ + nesting_level = getattr(self, "nesting_level", 0) + 1 + if nesting_level > 1: + return SequentialBackend(nesting_level=nesting_level), None + else: + return ThreadingBackend(nesting_level=nesting_level), None + + def _prepare_worker_env(self, n_jobs): + """Return environment variables limiting threadpools in external libs. + + This function return a dict containing environment variables to pass + when creating a pool of process. These environment variables limit the + number of threads to `n_threads` for OpenMP, MKL, Accelerated and + OpenBLAS libraries in the child processes. + """ + explicit_n_threads = self.inner_max_num_threads + default_n_threads = max(cpu_count() // n_jobs, 1) + + # Set the inner environment variables to self.inner_max_num_threads if + # it is given. Else, default to cpu_count // n_jobs unless the variable + # is already present in the parent process environment. + env = {} + for var in self.MAX_NUM_THREADS_VARS: + if explicit_n_threads is None: + var_value = os.environ.get(var, default_n_threads) + else: + var_value = explicit_n_threads + + env[var] = str(var_value) + + if self.TBB_ENABLE_IPC_VAR not in os.environ: + # To avoid over-subscription when using TBB, let the TBB schedulers + # use Inter Process Communication to coordinate: + env[self.TBB_ENABLE_IPC_VAR] = "1" + return env + + @contextlib.contextmanager + def retrieval_context(self): + """Context manager to manage an execution context. + + Calls to Parallel.retrieve will be made inside this context. + + By default, this does nothing. It may be useful for subclasses to + handle nested parallelism. In particular, it may be required to avoid + deadlocks if a backend manages a fixed number of workers, when those + workers may be asked to do nested Parallel calls. Without + 'retrieval_context' this could lead to deadlock, as all the workers + managed by the backend may be "busy" waiting for the nested parallel + calls to finish, but the backend has no free workers to execute those + tasks. + """ + yield + + @staticmethod + def in_main_thread(): + return isinstance(threading.current_thread(), threading._MainThread) + + +class SequentialBackend(ParallelBackendBase): + """A ParallelBackend which will execute all batches sequentially. + + Does not use/create any threading objects, and hence has minimal + overhead. Used when n_jobs == 1. + """ + + uses_threads = True + supports_timeout = False + supports_retrieve_callback = False + supports_sharedmem = True + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel""" + if n_jobs == 0: + raise ValueError("n_jobs == 0 in Parallel has no meaning") + return 1 + + def submit(self, func, callback=None): + """Schedule a func to be run""" + raise RuntimeError("Should never be called for SequentialBackend.") + + def retrieve_result_callback(self, out): + raise RuntimeError("Should never be called for SequentialBackend.") + + def get_nested_backend(self): + # import is not top level to avoid cyclic import errors. + from .parallel import get_active_backend + + # SequentialBackend should neither change the nesting level, the + # default backend or the number of jobs. Just return the current one. + return get_active_backend() + + +class PoolManagerMixin(object): + """A helper class for managing pool of workers.""" + + _pool = None + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel""" + if n_jobs == 0: + raise ValueError("n_jobs == 0 in Parallel has no meaning") + elif mp is None or n_jobs is None: + # multiprocessing is not available or disabled, fallback + # to sequential mode + return 1 + elif n_jobs < 0: + n_jobs = max(cpu_count() + 1 + n_jobs, 1) + return n_jobs + + def terminate(self): + """Shutdown the process or thread pool""" + if self._pool is not None: + self._pool.close() + self._pool.terminate() # terminate does a join() + self._pool = None + + def _get_pool(self): + """Used by `submit` to make it possible to implement lazy init""" + return self._pool + + def submit(self, func, callback=None): + """Schedule a func to be run""" + # Here, we need a wrapper to avoid crashes on KeyboardInterruptErrors. + # We also call the callback on error, to make sure the pool does not + # wait on crashed jobs. + return self._get_pool().apply_async( + _TracebackCapturingWrapper(func), + (), + callback=callback, + error_callback=callback, + ) + + def retrieve_result_callback(self, result): + """Mimic concurrent.futures results, raising an error if needed.""" + # In the multiprocessing Pool API, the callback are called with the + # result value as an argument so `result`(`out`) is the output of + # job.get(). It's either the result or the exception raised while + # collecting the result. + return _retrieve_traceback_capturing_wrapped_call(result) + + def abort_everything(self, ensure_ready=True): + """Shutdown the pool and restart a new one with the same parameters""" + self.terminate() + if ensure_ready: + self.configure( + n_jobs=self.parallel.n_jobs, + parallel=self.parallel, + **self.parallel._backend_kwargs, + ) + + +class AutoBatchingMixin(object): + """A helper class for automagically batching jobs.""" + + # In seconds, should be big enough to hide multiprocessing dispatching + # overhead. + # This settings was found by running benchmarks/bench_auto_batching.py + # with various parameters on various platforms. + MIN_IDEAL_BATCH_DURATION = 0.2 + + # Should not be too high to avoid stragglers: long jobs running alone + # on a single worker while other workers have no work to process any more. + MAX_IDEAL_BATCH_DURATION = 2 + + # Batching counters default values + _DEFAULT_EFFECTIVE_BATCH_SIZE = 1 + _DEFAULT_SMOOTHED_BATCH_DURATION = 0.0 + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE + self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION + + def compute_batch_size(self): + """Determine the optimal batch size""" + old_batch_size = self._effective_batch_size + batch_duration = self._smoothed_batch_duration + if batch_duration > 0 and batch_duration < self.MIN_IDEAL_BATCH_DURATION: + # The current batch size is too small: the duration of the + # processing of a batch of task is not large enough to hide + # the scheduling overhead. + ideal_batch_size = int( + old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration + ) + # Multiply by two to limit oscilations between min and max. + ideal_batch_size *= 2 + + # dont increase the batch size too fast to limit huge batch sizes + # potentially leading to starving worker + batch_size = min(2 * old_batch_size, ideal_batch_size) + + batch_size = max(batch_size, 1) + + self._effective_batch_size = batch_size + if self.parallel.verbose >= 10: + self.parallel._print( + f"Batch computation too fast ({batch_duration}s.) " + f"Setting batch_size={batch_size}." + ) + elif batch_duration > self.MAX_IDEAL_BATCH_DURATION and old_batch_size >= 2: + # The current batch size is too big. If we schedule overly long + # running batches some CPUs might wait with nothing left to do + # while a couple of CPUs a left processing a few long running + # batches. Better reduce the batch size a bit to limit the + # likelihood of scheduling such stragglers. + + # decrease the batch size quickly to limit potential starving + ideal_batch_size = int( + old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration + ) + # Multiply by two to limit oscilations between min and max. + batch_size = max(2 * ideal_batch_size, 1) + self._effective_batch_size = batch_size + if self.parallel.verbose >= 10: + self.parallel._print( + f"Batch computation too slow ({batch_duration}s.) " + f"Setting batch_size={batch_size}." + ) + else: + # No batch size adjustment + batch_size = old_batch_size + + if batch_size != old_batch_size: + # Reset estimation of the smoothed mean batch duration: this + # estimate is updated in the multiprocessing apply_async + # CallBack as long as the batch_size is constant. Therefore + # we need to reset the estimate whenever we re-tune the batch + # size. + self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION + + return batch_size + + def batch_completed(self, batch_size, duration): + """Callback indicate how long it took to run a batch""" + if batch_size == self._effective_batch_size: + # Update the smoothed streaming estimate of the duration of a batch + # from dispatch to completion + old_duration = self._smoothed_batch_duration + if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION: + # First record of duration for this batch size after the last + # reset. + new_duration = duration + else: + # Update the exponentially weighted average of the duration of + # batch for the current effective size. + new_duration = 0.8 * old_duration + 0.2 * duration + self._smoothed_batch_duration = new_duration + + def reset_batch_stats(self): + """Reset batch statistics to default values. + + This avoids interferences with future jobs. + """ + self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE + self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION + + +class ThreadingBackend(PoolManagerMixin, ParallelBackendBase): + """A ParallelBackend which will use a thread pool to execute batches in. + + This is a low-overhead backend but it suffers from the Python Global + Interpreter Lock if the called function relies a lot on Python objects. + Mostly useful when the execution bottleneck is a compiled extension that + explicitly releases the GIL (for instance a Cython loop wrapped in a "with + nogil" block or an expensive call to a library such as NumPy). + + The actual thread pool is lazily initialized: the actual thread pool + construction is delayed to the first call to apply_async. + + ThreadingBackend is used as the default backend for nested calls. + """ + + supports_retrieve_callback = True + uses_threads = True + supports_sharedmem = True + + def configure(self, n_jobs=1, parallel=None, **backend_kwargs): + """Build a process or thread pool and return the number of workers""" + n_jobs = self.effective_n_jobs(n_jobs) + if n_jobs == 1: + # Avoid unnecessary overhead and use sequential backend instead. + raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level)) + self.parallel = parallel + self._n_jobs = n_jobs + return n_jobs + + def _get_pool(self): + """Lazily initialize the thread pool + + The actual pool of worker threads is only initialized at the first + call to apply_async. + """ + if self._pool is None: + self._pool = ThreadPool(self._n_jobs) + return self._pool + + +class MultiprocessingBackend(PoolManagerMixin, AutoBatchingMixin, ParallelBackendBase): + """A ParallelBackend which will use a multiprocessing.Pool. + + Will introduce some communication and memory overhead when exchanging + input and output data with the with the worker Python processes. + However, does not suffer from the Python Global Interpreter Lock. + """ + + supports_retrieve_callback = True + supports_return_generator = False + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel. + + This also checks if we are attempting to create a nested parallel + loop. + """ + if mp is None: + return 1 + + if mp.current_process().daemon: + # Daemonic processes cannot have children + if n_jobs != 1: + if inside_dask_worker(): + msg = ( + "Inside a Dask worker with daemon=True, " + "setting n_jobs=1.\nPossible work-arounds:\n" + "- dask.config.set(" + "{'distributed.worker.daemon': False})" + "- set the environment variable " + "DASK_DISTRIBUTED__WORKER__DAEMON=False\n" + "before creating your Dask cluster." + ) + else: + msg = ( + "Multiprocessing-backed parallel loops " + "cannot be nested, setting n_jobs=1" + ) + warnings.warn(msg, stacklevel=3) + return 1 + + if process_executor._CURRENT_DEPTH > 0: + # Mixing loky and multiprocessing in nested loop is not supported + if n_jobs != 1: + warnings.warn( + "Multiprocessing-backed parallel loops cannot be nested," + " below loky, setting n_jobs=1", + stacklevel=3, + ) + return 1 + + elif not (self.in_main_thread() or self.nesting_level == 0): + # Prevent posix fork inside in non-main posix threads + if n_jobs != 1: + warnings.warn( + "Multiprocessing-backed parallel loops cannot be nested" + " below threads, setting n_jobs=1", + stacklevel=3, + ) + return 1 + + return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs) + + def configure( + self, + n_jobs=1, + parallel=None, + prefer=None, + require=None, + **memmapping_pool_kwargs, + ): + """Build a process or thread pool and return the number of workers""" + n_jobs = self.effective_n_jobs(n_jobs) + if n_jobs == 1: + raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level)) + + memmapping_pool_kwargs = { + **self.backend_kwargs, + **memmapping_pool_kwargs, + } + + # Make sure to free as much memory as possible before forking + gc.collect() + self._pool = MemmappingPool(n_jobs, **memmapping_pool_kwargs) + self.parallel = parallel + return n_jobs + + def terminate(self): + """Shutdown the process or thread pool""" + super(MultiprocessingBackend, self).terminate() + self.reset_batch_stats() + + +class LokyBackend(AutoBatchingMixin, ParallelBackendBase): + """Managing pool of workers with loky instead of multiprocessing.""" + + supports_retrieve_callback = True + supports_inner_max_num_threads = True + + def configure( + self, + n_jobs=1, + parallel=None, + prefer=None, + require=None, + idle_worker_timeout=None, + **memmapping_executor_kwargs, + ): + """Build a process executor and return the number of workers""" + n_jobs = self.effective_n_jobs(n_jobs) + if n_jobs == 1: + raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level)) + + memmapping_executor_kwargs = { + **self.backend_kwargs, + **memmapping_executor_kwargs, + } + + # Prohibit the use of 'timeout' in the LokyBackend, as 'idle_worker_timeout' + # better describes the backend's behavior. + if "timeout" in memmapping_executor_kwargs: + raise ValueError( + "The 'timeout' parameter is not supported by the LokyBackend. " + "Please use the `idle_worker_timeout` parameter instead." + ) + if idle_worker_timeout is None: + idle_worker_timeout = self.backend_kwargs.get("idle_worker_timeout", 300) + + self._workers = get_memmapping_executor( + n_jobs, + timeout=idle_worker_timeout, + env=self._prepare_worker_env(n_jobs=n_jobs), + context_id=parallel._id, + **memmapping_executor_kwargs, + ) + self.parallel = parallel + return n_jobs + + def effective_n_jobs(self, n_jobs): + """Determine the number of jobs which are going to run in parallel""" + if n_jobs == 0: + raise ValueError("n_jobs == 0 in Parallel has no meaning") + elif mp is None or n_jobs is None: + # multiprocessing is not available or disabled, fallback + # to sequential mode + return 1 + elif mp.current_process().daemon: + # Daemonic processes cannot have children + if n_jobs != 1: + if inside_dask_worker(): + msg = ( + "Inside a Dask worker with daemon=True, " + "setting n_jobs=1.\nPossible work-arounds:\n" + "- dask.config.set(" + "{'distributed.worker.daemon': False})\n" + "- set the environment variable " + "DASK_DISTRIBUTED__WORKER__DAEMON=False\n" + "before creating your Dask cluster." + ) + else: + msg = ( + "Loky-backed parallel loops cannot be called in a" + " multiprocessing, setting n_jobs=1" + ) + warnings.warn(msg, stacklevel=3) + + return 1 + elif not (self.in_main_thread() or self.nesting_level == 0): + # Prevent posix fork inside in non-main posix threads + if n_jobs != 1: + warnings.warn( + "Loky-backed parallel loops cannot be nested below " + "threads, setting n_jobs=1", + stacklevel=3, + ) + return 1 + elif n_jobs < 0: + n_jobs = max(cpu_count() + 1 + n_jobs, 1) + return n_jobs + + def submit(self, func, callback=None): + """Schedule a func to be run""" + future = self._workers.submit(func) + if callback is not None: + future.add_done_callback(callback) + return future + + def retrieve_result_callback(self, future): + """Retrieve the result, here out is the future given by submit""" + try: + return future.result() + except ShutdownExecutorError: + raise RuntimeError( + "The executor underlying Parallel has been shutdown. " + "This is likely due to the garbage collection of a previous " + "generator from a call to Parallel with return_as='generator'." + " Make sure the generator is not garbage collected when " + "submitting a new job or that it is first properly exhausted." + ) + + def terminate(self): + if self._workers is not None: + # Don't terminate the workers as we want to reuse them in later + # calls, but cleanup the temporary resources that the Parallel call + # created. This 'hack' requires a private, low-level operation. + self._workers._temp_folder_manager._clean_temporary_resources( + context_id=self.parallel._id, force=False + ) + self._workers = None + + self.reset_batch_stats() + + def abort_everything(self, ensure_ready=True): + """Shutdown the workers and restart a new one with the same parameters""" + self._workers.terminate(kill_workers=True) + self._workers = None + + if ensure_ready: + self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel) + + +class FallbackToBackend(Exception): + """Raised when configuration should fallback to another backend""" + + def __init__(self, backend): + self.backend = backend + + +def inside_dask_worker(): + """Check whether the current function is executed inside a Dask worker.""" + # This function can not be in joblib._dask because there would be a + # circular import: + # _dask imports _parallel_backend that imports _dask ... + try: + from distributed import get_worker + except ImportError: + return False + + try: + get_worker() + return True + except ValueError: + return False diff --git a/py311/lib/python3.11/site-packages/joblib/_store_backends.py b/py311/lib/python3.11/site-packages/joblib/_store_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..170867b6c6316658a87e2f38a8344c3ba8504366 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/_store_backends.py @@ -0,0 +1,500 @@ +"""Storage providers backends for Memory caching.""" + +import collections +import datetime +import json +import operator +import os +import os.path +import re +import shutil +import threading +import time +import uuid +import warnings +from abc import ABCMeta, abstractmethod +from pickle import PicklingError + +from . import numpy_pickle +from .backports import concurrency_safe_rename +from .disk import memstr_to_bytes, mkdirp, rm_subdirs +from .logger import format_time + +CacheItemInfo = collections.namedtuple("CacheItemInfo", "path size last_access") + + +class CacheWarning(Warning): + """Warning to capture dump failures except for PicklingError.""" + + pass + + +def concurrency_safe_write(object_to_write, filename, write_func): + """Writes an object into a unique file in a concurrency-safe way.""" + # Temporary name is composed of UUID, process_id and thread_id to avoid + # collisions due to concurrent write. + # UUID is unique across nodes and time and help avoid collisions, even if + # the cache folder is shared by several Python processes with the same pid and + # thread id on different nodes of a cluster for instance. + thread_id = id(threading.current_thread()) + temporary_filename = f"{filename}.{uuid.uuid4().hex}-{os.getpid()}-{thread_id}" + + write_func(object_to_write, temporary_filename) + + return temporary_filename + + +class StoreBackendBase(metaclass=ABCMeta): + """Helper Abstract Base Class which defines all methods that + a StorageBackend must implement.""" + + location = None + + @abstractmethod + def _open_item(self, f, mode): + """Opens an item on the store and return a file-like object. + + This method is private and only used by the StoreBackendMixin object. + + Parameters + ---------- + f: a file-like object + The file-like object where an item is stored and retrieved + mode: string, optional + the mode in which the file-like object is opened allowed valued are + 'rb', 'wb' + + Returns + ------- + a file-like object + """ + + @abstractmethod + def _item_exists(self, location): + """Checks if an item location exists in the store. + + This method is private and only used by the StoreBackendMixin object. + + Parameters + ---------- + location: string + The location of an item. On a filesystem, this corresponds to the + absolute path, including the filename, of a file. + + Returns + ------- + True if the item exists, False otherwise + """ + + @abstractmethod + def _move_item(self, src, dst): + """Moves an item from src to dst in the store. + + This method is private and only used by the StoreBackendMixin object. + + Parameters + ---------- + src: string + The source location of an item + dst: string + The destination location of an item + """ + + @abstractmethod + def create_location(self, location): + """Creates a location on the store. + + Parameters + ---------- + location: string + The location in the store. On a filesystem, this corresponds to a + directory. + """ + + @abstractmethod + def clear_location(self, location): + """Clears a location on the store. + + Parameters + ---------- + location: string + The location in the store. On a filesystem, this corresponds to a + directory or a filename absolute path + """ + + @abstractmethod + def get_items(self): + """Returns the whole list of items available in the store. + + Returns + ------- + The list of items identified by their ids (e.g filename in a + filesystem). + """ + + @abstractmethod + def configure(self, location, verbose=0, backend_options=dict()): + """Configures the store. + + Parameters + ---------- + location: string + The base location used by the store. On a filesystem, this + corresponds to a directory. + verbose: int + The level of verbosity of the store + backend_options: dict + Contains a dictionary of named parameters used to configure the + store backend. + """ + + +class StoreBackendMixin(object): + """Class providing all logic for managing the store in a generic way. + + The StoreBackend subclass has to implement 3 methods: create_location, + clear_location and configure. The StoreBackend also has to provide + a private _open_item, _item_exists and _move_item methods. The _open_item + method has to have the same signature as the builtin open and return a + file-like object. + """ + + def load_item(self, call_id, verbose=1, timestamp=None, metadata=None): + """Load an item from the store given its id as a list of str.""" + full_path = os.path.join(self.location, *call_id) + + if verbose > 1: + ts_string = ( + "{: <16}".format(format_time(time.time() - timestamp)) + if timestamp is not None + else "" + ) + signature = os.path.basename(call_id[0]) + if metadata is not None and "input_args" in metadata: + kwargs = ", ".join( + "{}={}".format(*item) for item in metadata["input_args"].items() + ) + signature += "({})".format(kwargs) + msg = "[Memory]{}: Loading {}".format(ts_string, signature) + if verbose < 10: + print("{0}...".format(msg)) + else: + print("{0} from {1}".format(msg, full_path)) + + mmap_mode = None if not hasattr(self, "mmap_mode") else self.mmap_mode + + filename = os.path.join(full_path, "output.pkl") + if not self._item_exists(filename): + raise KeyError( + "Non-existing item (may have been " + "cleared).\nFile %s does not exist" % filename + ) + + # file-like object cannot be used when mmap_mode is set + if mmap_mode is None: + with self._open_item(filename, "rb") as f: + item = numpy_pickle.load(f) + else: + item = numpy_pickle.load(filename, mmap_mode=mmap_mode) + return item + + def dump_item(self, call_id, item, verbose=1): + """Dump an item in the store at the id given as a list of str.""" + try: + item_path = os.path.join(self.location, *call_id) + if not self._item_exists(item_path): + self.create_location(item_path) + filename = os.path.join(item_path, "output.pkl") + if verbose > 10: + print("Persisting in %s" % item_path) + + def write_func(to_write, dest_filename): + with self._open_item(dest_filename, "wb") as f: + try: + numpy_pickle.dump(to_write, f, compress=self.compress) + except PicklingError as e: + # TODO(1.5) turn into error + warnings.warn( + "Unable to cache to disk: failed to pickle " + "output. In version 1.5 this will raise an " + f"exception. Exception: {e}.", + FutureWarning, + ) + + self._concurrency_safe_write(item, filename, write_func) + except Exception as e: # noqa: E722 + warnings.warn( + "Unable to cache to disk. Possibly a race condition in the " + f"creation of the directory. Exception: {e}.", + CacheWarning, + ) + + def clear_item(self, call_id): + """Clear the item at the id, given as a list of str.""" + item_path = os.path.join(self.location, *call_id) + if self._item_exists(item_path): + self.clear_location(item_path) + + def contains_item(self, call_id): + """Check if there is an item at the id, given as a list of str.""" + item_path = os.path.join(self.location, *call_id) + filename = os.path.join(item_path, "output.pkl") + + return self._item_exists(filename) + + def get_item_info(self, call_id): + """Return information about item.""" + return {"location": os.path.join(self.location, *call_id)} + + def get_metadata(self, call_id): + """Return actual metadata of an item.""" + try: + item_path = os.path.join(self.location, *call_id) + filename = os.path.join(item_path, "metadata.json") + with self._open_item(filename, "rb") as f: + return json.loads(f.read().decode("utf-8")) + except: # noqa: E722 + return {} + + def store_metadata(self, call_id, metadata): + """Store metadata of a computation.""" + try: + item_path = os.path.join(self.location, *call_id) + self.create_location(item_path) + filename = os.path.join(item_path, "metadata.json") + + def write_func(to_write, dest_filename): + with self._open_item(dest_filename, "wb") as f: + f.write(json.dumps(to_write).encode("utf-8")) + + self._concurrency_safe_write(metadata, filename, write_func) + except: # noqa: E722 + pass + + def contains_path(self, call_id): + """Check cached function is available in store.""" + func_path = os.path.join(self.location, *call_id) + return self.object_exists(func_path) + + def clear_path(self, call_id): + """Clear all items with a common path in the store.""" + func_path = os.path.join(self.location, *call_id) + if self._item_exists(func_path): + self.clear_location(func_path) + + def store_cached_func_code(self, call_id, func_code=None): + """Store the code of the cached function.""" + func_path = os.path.join(self.location, *call_id) + if not self._item_exists(func_path): + self.create_location(func_path) + + if func_code is not None: + filename = os.path.join(func_path, "func_code.py") + with self._open_item(filename, "wb") as f: + f.write(func_code.encode("utf-8")) + + def get_cached_func_code(self, call_id): + """Store the code of the cached function.""" + filename = os.path.join(self.location, *call_id, "func_code.py") + try: + with self._open_item(filename, "rb") as f: + return f.read().decode("utf-8") + except: # noqa: E722 + raise + + def get_cached_func_info(self, call_id): + """Return information related to the cached function if it exists.""" + return {"location": os.path.join(self.location, *call_id)} + + def clear(self): + """Clear the whole store content.""" + self.clear_location(self.location) + + def enforce_store_limits(self, bytes_limit, items_limit=None, age_limit=None): + """ + Remove the store's oldest files to enforce item, byte, and age limits. + """ + items_to_delete = self._get_items_to_delete(bytes_limit, items_limit, age_limit) + + for item in items_to_delete: + if self.verbose > 10: + print("Deleting item {0}".format(item)) + try: + self.clear_location(item.path) + except OSError: + # Even with ignore_errors=True shutil.rmtree can raise OSError + # with: + # [Errno 116] Stale file handle if another process has deleted + # the folder already. + pass + + def _get_items_to_delete(self, bytes_limit, items_limit=None, age_limit=None): + """ + Get items to delete to keep the store under size, file, & age limits. + """ + if isinstance(bytes_limit, str): + bytes_limit = memstr_to_bytes(bytes_limit) + + items = self.get_items() + if not items: + return [] + + size = sum(item.size for item in items) + + if bytes_limit is not None: + to_delete_size = size - bytes_limit + else: + to_delete_size = 0 + + if items_limit is not None: + to_delete_items = len(items) - items_limit + else: + to_delete_items = 0 + + if age_limit is not None: + older_item = min(item.last_access for item in items) + if age_limit.total_seconds() < 0: + raise ValueError("age_limit has to be a positive timedelta") + deadline = datetime.datetime.now() - age_limit + else: + deadline = None + + if ( + to_delete_size <= 0 + and to_delete_items <= 0 + and (deadline is None or older_item > deadline) + ): + return [] + + # We want to delete first the cache items that were accessed a + # long time ago + items.sort(key=operator.attrgetter("last_access")) + + items_to_delete = [] + size_so_far = 0 + items_so_far = 0 + + for item in items: + if ( + (size_so_far >= to_delete_size) + and items_so_far >= to_delete_items + and (deadline is None or deadline < item.last_access) + ): + break + + items_to_delete.append(item) + size_so_far += item.size + items_so_far += 1 + + return items_to_delete + + def _concurrency_safe_write(self, to_write, filename, write_func): + """Writes an object into a file in a concurrency-safe way.""" + temporary_filename = concurrency_safe_write(to_write, filename, write_func) + self._move_item(temporary_filename, filename) + + def __repr__(self): + """Printable representation of the store location.""" + return '{class_name}(location="{location}")'.format( + class_name=self.__class__.__name__, location=self.location + ) + + +class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin): + """A StoreBackend used with local or network file systems.""" + + _open_item = staticmethod(open) + _item_exists = staticmethod(os.path.exists) + _move_item = staticmethod(concurrency_safe_rename) + + def clear_location(self, location): + """Delete location on store.""" + if location == self.location: + rm_subdirs(location) + else: + shutil.rmtree(location, ignore_errors=True) + + def create_location(self, location): + """Create object location on store""" + mkdirp(location) + + def get_items(self): + """Returns the whole list of items available in the store.""" + items = [] + + for dirpath, _, filenames in os.walk(self.location): + is_cache_hash_dir = re.match("[a-f0-9]{32}", os.path.basename(dirpath)) + + if is_cache_hash_dir: + output_filename = os.path.join(dirpath, "output.pkl") + try: + last_access = os.path.getatime(output_filename) + except OSError: + try: + last_access = os.path.getatime(dirpath) + except OSError: + # The directory has already been deleted + continue + + last_access = datetime.datetime.fromtimestamp(last_access) + try: + full_filenames = [os.path.join(dirpath, fn) for fn in filenames] + dirsize = sum(os.path.getsize(fn) for fn in full_filenames) + except OSError: + # Either output_filename or one of the files in + # dirpath does not exist any more. We assume this + # directory is being cleaned by another process already + continue + + items.append(CacheItemInfo(dirpath, dirsize, last_access)) + + return items + + def configure(self, location, verbose=1, backend_options=None): + """Configure the store backend. + + For this backend, valid store options are 'compress' and 'mmap_mode' + """ + if backend_options is None: + backend_options = {} + + # setup location directory + self.location = location + if not os.path.exists(self.location): + mkdirp(self.location) + + # Automatically add `.gitignore` file to the cache folder. + # XXX: the condition is necessary because in `Memory.__init__`, the user + # passed `location` param is modified to be either `{location}` or + # `{location}/joblib` depending on input type (`pathlib.Path` vs `str`). + # The proper resolution of this inconsistency is tracked in: + # https://github.com/joblib/joblib/issues/1684 + cache_directory = ( + os.path.dirname(location) + if os.path.dirname(location) and os.path.basename(location) == "joblib" + else location + ) + gitignore = os.path.join(cache_directory, ".gitignore") + if not os.path.exists(gitignore): + try: + with open(gitignore, "w") as file: + file.write("# Created by joblib automatically.\n") + file.write("*\n") + except OSError as e: + warnings.warn(f"Unable to write {gitignore}. Exception: {e}.") + + # item can be stored compressed for faster I/O + self.compress = backend_options.get("compress", False) + + # FileSystemStoreBackend can be used with mmap_mode options under + # certain conditions. + mmap_mode = backend_options.get("mmap_mode") + if self.compress and mmap_mode is not None: + warnings.warn( + "Compressed items cannot be memmapped in a " + "filesystem store. Option will be ignored.", + stacklevel=2, + ) + + self.mmap_mode = mmap_mode + self.verbose = verbose diff --git a/py311/lib/python3.11/site-packages/joblib/_utils.py b/py311/lib/python3.11/site-packages/joblib/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5e82173e7de40598b4dae37d42d25d977af4e1a9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/_utils.py @@ -0,0 +1,120 @@ +# Adapted from https://stackoverflow.com/a/9558001/2536294 + +import ast +import functools +import operator as op +from dataclasses import dataclass + +from ._multiprocessing_helpers import mp + +if mp is not None: + from .externals.loky.process_executor import _ExceptionWithTraceback + + +# supported operators +operators = { + ast.Add: op.add, + ast.Sub: op.sub, + ast.Mult: op.mul, + ast.Div: op.truediv, + ast.FloorDiv: op.floordiv, + ast.Mod: op.mod, + ast.Pow: op.pow, + ast.USub: op.neg, +} + + +def eval_expr(expr): + """Somewhat safely evaluate an arithmetic expression. + + >>> eval_expr('2*6') + 12 + >>> eval_expr('2**6') + 64 + >>> eval_expr('1 + 2*3**(4) / (6 + -7)') + -161.0 + + Raises ValueError if the expression is invalid, too long + or its computation involves too large values. + """ + # Restrict the length of the expression to avoid potential Python crashes + # as per the documentation of ast.parse. + max_length = 30 + if len(expr) > max_length: + raise ValueError( + f"Expression {expr[:max_length]!r}... is too long. " + f"Max length is {max_length}, got {len(expr)}." + ) + try: + return eval_(ast.parse(expr, mode="eval").body) + except (TypeError, SyntaxError, OverflowError, KeyError) as e: + raise ValueError( + f"{expr!r} is not a valid or supported arithmetic expression." + ) from e + + +def limit(max_=None): + """Return decorator that limits allowed returned values.""" + + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + ret = func(*args, **kwargs) + try: + mag = abs(ret) + except TypeError: + pass # not applicable + else: + if mag > max_: + raise ValueError( + f"Numeric literal {ret} is too large, max is {max_}." + ) + return ret + + return wrapper + + return decorator + + +@limit(max_=10**6) +def eval_(node): + if isinstance(node, ast.Constant) and isinstance(node.value, (int, float)): + return node.value + elif isinstance(node, ast.BinOp): # + return operators[type(node.op)](eval_(node.left), eval_(node.right)) + elif isinstance(node, ast.UnaryOp): # e.g., -1 + return operators[type(node.op)](eval_(node.operand)) + else: + raise TypeError(node) + + +@dataclass(frozen=True) +class _Sentinel: + """A sentinel to mark a parameter as not explicitly set""" + + default_value: object + + def __repr__(self): + return f"default({self.default_value!r})" + + +class _TracebackCapturingWrapper: + """Protect function call and return error with traceback.""" + + def __init__(self, func): + self.func = func + + def __call__(self, **kwargs): + try: + return self.func(**kwargs) + except BaseException as e: + return _ExceptionWithTraceback(e) + + +def _retrieve_traceback_capturing_wrapped_call(out): + if isinstance(out, _ExceptionWithTraceback): + rebuild, args = out.__reduce__() + out = rebuild(*args) + if isinstance(out, BaseException): + raise out + return out diff --git a/py311/lib/python3.11/site-packages/joblib/backports.py b/py311/lib/python3.11/site-packages/joblib/backports.py new file mode 100644 index 0000000000000000000000000000000000000000..495e2acb8b38e6676dd4cb0a219d9f3bb7c4bff5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/backports.py @@ -0,0 +1,195 @@ +""" +Backports of fixes for joblib dependencies +""" + +import os +import re +import time +from multiprocessing import util +from os.path import basename + + +class Version: + """Backport from deprecated distutils + + We maintain this backport to avoid introducing a new dependency on + `packaging`. + + We might rexplore this choice in the future if all major Python projects + introduce a dependency on packaging anyway. + """ + + def __init__(self, vstring=None): + if vstring: + self.parse(vstring) + + def __repr__(self): + return "%s ('%s')" % (self.__class__.__name__, str(self)) + + def __eq__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c == 0 + + def __lt__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c < 0 + + def __le__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c <= 0 + + def __gt__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c > 0 + + def __ge__(self, other): + c = self._cmp(other) + if c is NotImplemented: + return c + return c >= 0 + + +class LooseVersion(Version): + """Backport from deprecated distutils + + We maintain this backport to avoid introducing a new dependency on + `packaging`. + + We might rexplore this choice in the future if all major Python projects + introduce a dependency on packaging anyway. + """ + + component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE) + + def __init__(self, vstring=None): + if vstring: + self.parse(vstring) + + def parse(self, vstring): + # I've given up on thinking I can reconstruct the version string + # from the parsed tuple -- so I just store the string here for + # use by __str__ + self.vstring = vstring + components = [x for x in self.component_re.split(vstring) if x and x != "."] + for i, obj in enumerate(components): + try: + components[i] = int(obj) + except ValueError: + pass + + self.version = components + + def __str__(self): + return self.vstring + + def __repr__(self): + return "LooseVersion ('%s')" % str(self) + + def _cmp(self, other): + if isinstance(other, str): + other = LooseVersion(other) + elif not isinstance(other, LooseVersion): + return NotImplemented + + if self.version == other.version: + return 0 + if self.version < other.version: + return -1 + if self.version > other.version: + return 1 + + +try: + import numpy as np + + def make_memmap( + filename, + dtype="uint8", + mode="r+", + offset=0, + shape=None, + order="C", + unlink_on_gc_collect=False, + ): + """Custom memmap constructor compatible with numpy.memmap. + + This function: + - is a backport the numpy memmap offset fix (See + https://github.com/numpy/numpy/pull/8443 for more details. + The numpy fix is available starting numpy 1.13) + - adds ``unlink_on_gc_collect``, which specifies explicitly whether + the process re-constructing the memmap owns a reference to the + underlying file. If set to True, it adds a finalizer to the + newly-created memmap that sends a maybe_unlink request for the + memmaped file to resource_tracker. + """ + util.debug( + "[MEMMAP READ] creating a memmap (shape {}, filename {}, pid {})".format( + shape, basename(filename), os.getpid() + ) + ) + + mm = np.memmap( + filename, dtype=dtype, mode=mode, offset=offset, shape=shape, order=order + ) + if LooseVersion(np.__version__) < "1.13": + mm.offset = offset + if unlink_on_gc_collect: + from ._memmapping_reducer import add_maybe_unlink_finalizer + + add_maybe_unlink_finalizer(mm) + return mm +except ImportError: + + def make_memmap( + filename, + dtype="uint8", + mode="r+", + offset=0, + shape=None, + order="C", + unlink_on_gc_collect=False, + ): + raise NotImplementedError( + "'joblib.backports.make_memmap' should not be used " + "if numpy is not installed." + ) + + +if os.name == "nt": + # https://github.com/joblib/joblib/issues/540 + access_denied_errors = (5, 13) + from os import replace + + def concurrency_safe_rename(src, dst): + """Renames ``src`` into ``dst`` overwriting ``dst`` if it exists. + + On Windows os.replace can yield permission errors if executed by two + different processes. + """ + max_sleep_time = 1 + total_sleep_time = 0 + sleep_time = 0.001 + while total_sleep_time < max_sleep_time: + try: + replace(src, dst) + break + except Exception as exc: + if getattr(exc, "winerror", None) in access_denied_errors: + time.sleep(sleep_time) + total_sleep_time += sleep_time + sleep_time *= 2 + else: + raise + else: + raise +else: + from os import replace as concurrency_safe_rename # noqa diff --git a/py311/lib/python3.11/site-packages/joblib/compressor.py b/py311/lib/python3.11/site-packages/joblib/compressor.py new file mode 100644 index 0000000000000000000000000000000000000000..55bc86c4855574e9b0eec8fad29a2cdde614fbdd --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/compressor.py @@ -0,0 +1,572 @@ +"""Classes and functions for managing compressors.""" + +import io +import zlib + +from joblib.backports import LooseVersion + +try: + from threading import RLock +except ImportError: + from dummy_threading import RLock + +try: + import bz2 +except ImportError: + bz2 = None + +try: + import lz4 + from lz4.frame import LZ4FrameFile +except ImportError: + lz4 = None + +try: + import lzma +except ImportError: + lzma = None + + +LZ4_NOT_INSTALLED_ERROR = ( + "LZ4 is not installed. Install it with pip: https://python-lz4.readthedocs.io/" +) + +# Registered compressors +_COMPRESSORS = {} + +# Magic numbers of supported compression file formats. +_ZFILE_PREFIX = b"ZF" # used with pickle files created before 0.9.3. +_ZLIB_PREFIX = b"\x78" +_GZIP_PREFIX = b"\x1f\x8b" +_BZ2_PREFIX = b"BZ" +_XZ_PREFIX = b"\xfd\x37\x7a\x58\x5a" +_LZMA_PREFIX = b"\x5d\x00" +_LZ4_PREFIX = b"\x04\x22\x4d\x18" + + +def register_compressor(compressor_name, compressor, force=False): + """Register a new compressor. + + Parameters + ---------- + compressor_name: str. + The name of the compressor. + compressor: CompressorWrapper + An instance of a 'CompressorWrapper'. + """ + global _COMPRESSORS + if not isinstance(compressor_name, str): + raise ValueError( + "Compressor name should be a string, '{}' given.".format(compressor_name) + ) + + if not isinstance(compressor, CompressorWrapper): + raise ValueError( + "Compressor should implement the CompressorWrapper " + "interface, '{}' given.".format(compressor) + ) + + if compressor.fileobj_factory is not None and ( + not hasattr(compressor.fileobj_factory, "read") + or not hasattr(compressor.fileobj_factory, "write") + or not hasattr(compressor.fileobj_factory, "seek") + or not hasattr(compressor.fileobj_factory, "tell") + ): + raise ValueError( + "Compressor 'fileobj_factory' attribute should " + "implement the file object interface, '{}' given.".format( + compressor.fileobj_factory + ) + ) + + if compressor_name in _COMPRESSORS and not force: + raise ValueError("Compressor '{}' already registered.".format(compressor_name)) + + _COMPRESSORS[compressor_name] = compressor + + +class CompressorWrapper: + """A wrapper around a compressor file object. + + Attributes + ---------- + obj: a file-like object + The object must implement the buffer interface and will be used + internally to compress/decompress the data. + prefix: bytestring + A bytestring corresponding to the magic number that identifies the + file format associated to the compressor. + extension: str + The file extension used to automatically select this compressor during + a dump to a file. + """ + + def __init__(self, obj, prefix=b"", extension=""): + self.fileobj_factory = obj + self.prefix = prefix + self.extension = extension + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + if compresslevel is None: + return self.fileobj_factory(fileobj, "wb") + else: + return self.fileobj_factory(fileobj, "wb", compresslevel=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + return self.fileobj_factory(fileobj, "rb") + + +class BZ2CompressorWrapper(CompressorWrapper): + prefix = _BZ2_PREFIX + extension = ".bz2" + + def __init__(self): + if bz2 is not None: + self.fileobj_factory = bz2.BZ2File + else: + self.fileobj_factory = None + + def _check_versions(self): + if bz2 is None: + raise ValueError( + "bz2 module is not compiled on your python standard library." + ) + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + self._check_versions() + if compresslevel is None: + return self.fileobj_factory(fileobj, "wb") + else: + return self.fileobj_factory(fileobj, "wb", compresslevel=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + self._check_versions() + fileobj = self.fileobj_factory(fileobj, "rb") + return fileobj + + +class LZMACompressorWrapper(CompressorWrapper): + prefix = _LZMA_PREFIX + extension = ".lzma" + _lzma_format_name = "FORMAT_ALONE" + + def __init__(self): + if lzma is not None: + self.fileobj_factory = lzma.LZMAFile + self._lzma_format = getattr(lzma, self._lzma_format_name) + else: + self.fileobj_factory = None + + def _check_versions(self): + if lzma is None: + raise ValueError( + "lzma module is not compiled on your python standard library." + ) + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + if compresslevel is None: + return self.fileobj_factory(fileobj, "wb", format=self._lzma_format) + else: + return self.fileobj_factory( + fileobj, "wb", format=self._lzma_format, preset=compresslevel + ) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + return lzma.LZMAFile(fileobj, "rb") + + +class XZCompressorWrapper(LZMACompressorWrapper): + prefix = _XZ_PREFIX + extension = ".xz" + _lzma_format_name = "FORMAT_XZ" + + +class LZ4CompressorWrapper(CompressorWrapper): + prefix = _LZ4_PREFIX + extension = ".lz4" + + def __init__(self): + if lz4 is not None: + self.fileobj_factory = LZ4FrameFile + else: + self.fileobj_factory = None + + def _check_versions(self): + if lz4 is None: + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + lz4_version = lz4.__version__ + if lz4_version.startswith("v"): + lz4_version = lz4_version[1:] + if LooseVersion(lz4_version) < LooseVersion("0.19"): + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + + def compressor_file(self, fileobj, compresslevel=None): + """Returns an instance of a compressor file object.""" + self._check_versions() + if compresslevel is None: + return self.fileobj_factory(fileobj, "wb") + else: + return self.fileobj_factory(fileobj, "wb", compression_level=compresslevel) + + def decompressor_file(self, fileobj): + """Returns an instance of a decompressor file object.""" + self._check_versions() + return self.fileobj_factory(fileobj, "rb") + + +############################################################################### +# base file compression/decompression object definition +_MODE_CLOSED = 0 +_MODE_READ = 1 +_MODE_READ_EOF = 2 +_MODE_WRITE = 3 +_BUFFER_SIZE = 8192 + + +class BinaryZlibFile(io.BufferedIOBase): + """A file object providing transparent zlib (de)compression. + + TODO python2_drop: is it still needed since we dropped Python 2 support A + BinaryZlibFile can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + Note that BinaryZlibFile provides only a *binary* file interface: data read + is returned as bytes, and data to be written should be given as bytes. + + This object is an adaptation of the BZ2File object and is compatible with + versions of python >= 2.7. + + If filename is a str or bytes object, it gives the name + of the file to be opened. Otherwise, it should be a file object, + which will be used to read or write the compressed data. + + mode can be 'rb' for reading (default) or 'wb' for (over)writing + + If mode is 'wb', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 produces the most compression. 3 is the default. + """ + + wbits = zlib.MAX_WBITS + + def __init__(self, filename, mode="rb", compresslevel=3): + # This lock must be recursive, so that BufferedIOBase's + # readline(), readlines() and writelines() don't deadlock. + self._lock = RLock() + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._pos = 0 + self._size = -1 + self.compresslevel = compresslevel + + if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9): + raise ValueError( + "'compresslevel' must be an integer " + "between 1 and 9. You provided 'compresslevel={}'".format(compresslevel) + ) + + if mode == "rb": + self._mode = _MODE_READ + self._decompressor = zlib.decompressobj(self.wbits) + self._buffer = b"" + self._buffer_offset = 0 + elif mode == "wb": + self._mode = _MODE_WRITE + self._compressor = zlib.compressobj( + self.compresslevel, zlib.DEFLATED, self.wbits, zlib.DEF_MEM_LEVEL, 0 + ) + else: + raise ValueError("Invalid mode: %r" % (mode,)) + + if isinstance(filename, str): + self._fp = io.open(filename, mode) + self._closefp = True + elif hasattr(filename, "read") or hasattr(filename, "write"): + self._fp = filename + else: + raise TypeError("filename must be a str or bytes object, or a file") + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + with self._lock: + if self._mode == _MODE_CLOSED: + return + try: + if self._mode in (_MODE_READ, _MODE_READ_EOF): + self._decompressor = None + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._buffer = b"" + self._buffer_offset = 0 + + @property + def closed(self): + """True if this file is closed.""" + return self._mode == _MODE_CLOSED + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._fp.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode in (_MODE_READ, _MODE_READ_EOF) + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + # Mode-checking helper functions. + + def _check_not_closed(self): + if self.closed: + fname = getattr(self._fp, "name", None) + msg = "I/O operation on closed file" + if fname is not None: + msg += " {}".format(fname) + msg += "." + raise ValueError(msg) + + def _check_can_read(self): + if self._mode not in (_MODE_READ, _MODE_READ_EOF): + self._check_not_closed() + raise io.UnsupportedOperation("File not open for reading") + + def _check_can_write(self): + if self._mode != _MODE_WRITE: + self._check_not_closed() + raise io.UnsupportedOperation("File not open for writing") + + def _check_can_seek(self): + if self._mode not in (_MODE_READ, _MODE_READ_EOF): + self._check_not_closed() + raise io.UnsupportedOperation( + "Seeking is only supported on files open for reading" + ) + if not self._fp.seekable(): + raise io.UnsupportedOperation( + "The underlying file object does not support seeking" + ) + + # Fill the readahead buffer if it is empty. Returns False on EOF. + def _fill_buffer(self): + if self._mode == _MODE_READ_EOF: + return False + # Depending on the input data, our call to the decompressor may not + # return any data. In this case, try again after reading another block. + while self._buffer_offset == len(self._buffer): + try: + rawblock = self._decompressor.unused_data or self._fp.read(_BUFFER_SIZE) + if not rawblock: + raise EOFError + except EOFError: + # End-of-stream marker and end of file. We're good. + self._mode = _MODE_READ_EOF + self._size = self._pos + return False + else: + self._buffer = self._decompressor.decompress(rawblock) + self._buffer_offset = 0 + return True + + # Read data until EOF. + # If return_data is false, consume the data without returning it. + def _read_all(self, return_data=True): + # The loop assumes that _buffer_offset is 0. Ensure that this is true. + self._buffer = self._buffer[self._buffer_offset :] + self._buffer_offset = 0 + + blocks = [] + while self._fill_buffer(): + if return_data: + blocks.append(self._buffer) + self._pos += len(self._buffer) + self._buffer = b"" + if return_data: + return b"".join(blocks) + + # Read a block of up to n bytes. + # If return_data is false, consume the data without returning it. + def _read_block(self, n_bytes, return_data=True): + # If we have enough data buffered, return immediately. + end = self._buffer_offset + n_bytes + if end <= len(self._buffer): + data = self._buffer[self._buffer_offset : end] + self._buffer_offset = end + self._pos += len(data) + return data if return_data else None + + # The loop assumes that _buffer_offset is 0. Ensure that this is true. + self._buffer = self._buffer[self._buffer_offset :] + self._buffer_offset = 0 + + blocks = [] + while n_bytes > 0 and self._fill_buffer(): + if n_bytes < len(self._buffer): + data = self._buffer[:n_bytes] + self._buffer_offset = n_bytes + else: + data = self._buffer + self._buffer = b"" + if return_data: + blocks.append(data) + self._pos += len(data) + n_bytes -= len(data) + if return_data: + return b"".join(blocks) + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + """ + with self._lock: + self._check_can_read() + if size == 0: + return b"" + elif size < 0: + return self._read_all() + else: + return self._read_block(size) + + def readinto(self, b): + """Read up to len(b) bytes into b. + + Returns the number of bytes read (0 for EOF). + """ + with self._lock: + return io.BufferedIOBase.readinto(self, b) + + def write(self, data): + """Write a byte string to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + """ + with self._lock: + self._check_can_write() + # Convert data type if called by io.BufferedWriter. + if isinstance(data, memoryview): + data = data.tobytes() + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += len(data) + return len(data) + + # Rewind the file to the beginning of the data stream. + def _rewind(self): + self._fp.seek(0, 0) + self._mode = _MODE_READ + self._pos = 0 + self._decompressor = zlib.decompressobj(self.wbits) + self._buffer = b"" + self._buffer_offset = 0 + + def seek(self, offset, whence=0): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Values for whence are: + + 0: start of stream (default); offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + with self._lock: + self._check_can_seek() + + # Recalculate offset as an absolute file position. + if whence == 0: + pass + elif whence == 1: + offset = self._pos + offset + elif whence == 2: + # Seeking relative to EOF - we need to know the file's size. + if self._size < 0: + self._read_all(return_data=False) + offset = self._size + offset + else: + raise ValueError("Invalid value for whence: %s" % (whence,)) + + # Make it so that offset is the number of bytes to skip forward. + if offset < self._pos: + self._rewind() + else: + offset -= self._pos + + # Read and discard data until we reach the desired position. + self._read_block(offset, return_data=False) + + return self._pos + + def tell(self): + """Return the current file position.""" + with self._lock: + self._check_not_closed() + return self._pos + + +class ZlibCompressorWrapper(CompressorWrapper): + def __init__(self): + CompressorWrapper.__init__( + self, obj=BinaryZlibFile, prefix=_ZLIB_PREFIX, extension=".z" + ) + + +class BinaryGzipFile(BinaryZlibFile): + """A file object providing transparent gzip (de)compression. + + If filename is a str or bytes object, it gives the name + of the file to be opened. Otherwise, it should be a file object, + which will be used to read or write the compressed data. + + mode can be 'rb' for reading (default) or 'wb' for (over)writing + + If mode is 'wb', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 produces the most compression. 3 is the default. + """ + + wbits = 31 # zlib compressor/decompressor wbits value for gzip format. + + +class GzipCompressorWrapper(CompressorWrapper): + def __init__(self): + CompressorWrapper.__init__( + self, obj=BinaryGzipFile, prefix=_GZIP_PREFIX, extension=".gz" + ) diff --git a/py311/lib/python3.11/site-packages/joblib/disk.py b/py311/lib/python3.11/site-packages/joblib/disk.py new file mode 100644 index 0000000000000000000000000000000000000000..61222e2bb066b70ae92e621841c6f4f01309cca7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/disk.py @@ -0,0 +1,131 @@ +""" +Disk management utilities. +""" + +# Authors: Gael Varoquaux +# Lars Buitinck +# Copyright (c) 2010 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import errno +import os +import shutil +import sys +import time +from multiprocessing import util + +try: + WindowsError +except NameError: + WindowsError = OSError + + +def disk_used(path): + """Return the disk usage in a directory.""" + size = 0 + for file in os.listdir(path) + ["."]: + stat = os.stat(os.path.join(path, file)) + if hasattr(stat, "st_blocks"): + size += stat.st_blocks * 512 + else: + # on some platform st_blocks is not available (e.g., Windows) + # approximate by rounding to next multiple of 512 + size += (stat.st_size // 512 + 1) * 512 + # We need to convert to int to avoid having longs on some systems (we + # don't want longs to avoid problems we SQLite) + return int(size / 1024.0) + + +def memstr_to_bytes(text): + """Convert a memory text to its value in bytes.""" + kilo = 1024 + units = dict(K=kilo, M=kilo**2, G=kilo**3) + try: + size = int(units[text[-1]] * float(text[:-1])) + except (KeyError, ValueError) as e: + raise ValueError( + "Invalid literal for size give: %s (type %s) should be " + "alike '10G', '500M', '50K'." % (text, type(text)) + ) from e + return size + + +def mkdirp(d): + """Ensure directory d exists (like mkdir -p on Unix) + No guarantee that the directory is writable. + """ + try: + os.makedirs(d) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +# if a rmtree operation fails in rm_subdirs, wait for this much time (in secs), +# then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the +# exception. this mechanism ensures that the sub-process gc have the time to +# collect and close the memmaps before we fail. +RM_SUBDIRS_RETRY_TIME = 0.1 +RM_SUBDIRS_N_RETRY = 10 + + +def rm_subdirs(path, onerror=None): + """Remove all subdirectories in this path. + + The directory indicated by `path` is left in place, and its subdirectories + are erased. + + If onerror is set, it is called to handle the error with arguments (func, + path, exc_info) where func is os.listdir, os.remove, or os.rmdir; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If onerror is None, + an exception is raised. + """ + + # NOTE this code is adapted from the one in shutil.rmtree, and is + # just as fast + + names = [] + try: + names = os.listdir(path) + except os.error: + if onerror is not None: + onerror(os.listdir, path, sys.exc_info()) + else: + raise + + for name in names: + fullname = os.path.join(path, name) + delete_folder(fullname, onerror=onerror) + + +def delete_folder(folder_path, onerror=None, allow_non_empty=True): + """Utility function to cleanup a temporary folder if it still exists.""" + if os.path.isdir(folder_path): + if onerror is not None: + shutil.rmtree(folder_path, False, onerror) + else: + # allow the rmtree to fail once, wait and re-try. + # if the error is raised again, fail + err_count = 0 + while True: + files = os.listdir(folder_path) + try: + if len(files) == 0 or allow_non_empty: + shutil.rmtree(folder_path, ignore_errors=False, onerror=None) + util.debug("Successfully deleted {}".format(folder_path)) + break + else: + raise OSError( + "Expected empty folder {} but got {} files.".format( + folder_path, len(files) + ) + ) + except (OSError, WindowsError): + err_count += 1 + if err_count > RM_SUBDIRS_N_RETRY: + # the folder cannot be deleted right now. It maybe + # because some temporary files have not been deleted + # yet. + raise + time.sleep(RM_SUBDIRS_RETRY_TIME) diff --git a/py311/lib/python3.11/site-packages/joblib/executor.py b/py311/lib/python3.11/site-packages/joblib/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..60aae8f7a8ed4d48116addfdeb9bd213152eed27 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/executor.py @@ -0,0 +1,131 @@ +"""Utility function to construct a loky.ReusableExecutor with custom pickler. + +This module provides efficient ways of working with data stored in +shared memory with numpy.memmap arrays without inducing any memory +copy between the parent and child processes. +""" +# Author: Thomas Moreau +# Copyright: 2017, Thomas Moreau +# License: BSD 3 clause + +from ._memmapping_reducer import TemporaryResourcesManager, get_memmapping_reducers +from .externals.loky.reusable_executor import _ReusablePoolExecutor + +_executor_args = None + + +def get_memmapping_executor(n_jobs, **kwargs): + return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs) + + +class MemmappingExecutor(_ReusablePoolExecutor): + @classmethod + def get_memmapping_executor( + cls, + n_jobs, + timeout=300, + initializer=None, + initargs=(), + env=None, + temp_folder=None, + context_id=None, + **backend_args, + ): + """Factory for ReusableExecutor with automatic memmapping for large + numpy arrays. + """ + global _executor_args + # Check if we can reuse the executor here instead of deferring the test + # to loky as the reducers are objects that changes at each call. + executor_args = backend_args.copy() + executor_args.update(env if env else {}) + executor_args.update( + dict(timeout=timeout, initializer=initializer, initargs=initargs) + ) + reuse = _executor_args is None or _executor_args == executor_args + _executor_args = executor_args + + manager = TemporaryResourcesManager(temp_folder) + + # reducers access the temporary folder in which to store temporary + # pickles through a call to manager.resolve_temp_folder_name. resolving + # the folder name dynamically is useful to use different folders across + # calls of a same reusable executor + job_reducers, result_reducers = get_memmapping_reducers( + unlink_on_gc_collect=True, + temp_folder_resolver=manager.resolve_temp_folder_name, + **backend_args, + ) + _executor, executor_is_reused = super().get_reusable_executor( + n_jobs, + job_reducers=job_reducers, + result_reducers=result_reducers, + reuse=reuse, + timeout=timeout, + initializer=initializer, + initargs=initargs, + env=env, + ) + + if not executor_is_reused: + # Only set a _temp_folder_manager for new executors. Reused + # executors already have a _temporary_folder_manager that must not + # be re-assigned like that because it is referenced in various + # places in the reducing machinery of the executor. + _executor._temp_folder_manager = manager + + if context_id is not None: + # Only register the specified context once we know which manager + # the current executor is using, in order to not register an atexit + # finalizer twice for the same folder. + _executor._temp_folder_manager.register_new_context(context_id) + + return _executor + + def terminate(self, kill_workers=False): + self.shutdown(kill_workers=kill_workers) + + # When workers are killed in a brutal manner, they cannot execute the + # finalizer of their shared memmaps. The refcount of those memmaps may + # be off by an unknown number, so instead of decref'ing them, we force + # delete the whole temporary folder, and unregister them. There is no + # risk of PermissionError at folder deletion because at this + # point, all child processes are dead, so all references to temporary + # memmaps are closed. Otherwise, just try to delete as much as possible + # with allow_non_empty=True but if we can't, it will be clean up later + # on by the resource_tracker. + with self._submit_resize_lock: + self._temp_folder_manager._clean_temporary_resources( + force=kill_workers, allow_non_empty=True + ) + + @property + def _temp_folder(self): + # Legacy property in tests. could be removed if we refactored the + # memmapping tests. SHOULD ONLY BE USED IN TESTS! + # We cache this property because it is called late in the tests - at + # this point, all context have been unregistered, and + # resolve_temp_folder_name raises an error. + if getattr(self, "_cached_temp_folder", None) is not None: + return self._cached_temp_folder + else: + self._cached_temp_folder = ( + self._temp_folder_manager.resolve_temp_folder_name() + ) # noqa + return self._cached_temp_folder + + +class _TestingMemmappingExecutor(MemmappingExecutor): + """Wrapper around ReusableExecutor to ease memmapping testing with Pool + and Executor. This is only for testing purposes. + + """ + + def apply_async(self, func, args): + """Schedule a func to be run""" + future = self.submit(func, *args) + future.get = future.result + return future + + def map(self, f, *args): + return list(super().map(f, *args)) diff --git a/py311/lib/python3.11/site-packages/joblib/func_inspect.py b/py311/lib/python3.11/site-packages/joblib/func_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..6f28f88cd0a06052672f1ccd8c110fed2537d905 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/func_inspect.py @@ -0,0 +1,379 @@ +""" +My own variation on function-specific inspect-like features. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import collections +import inspect +import os +import re +import warnings +from itertools import islice +from tokenize import open as open_py_source + +from .logger import pformat + +full_argspec_fields = ( + "args varargs varkw defaults kwonlyargs kwonlydefaults annotations" +) +full_argspec_type = collections.namedtuple("FullArgSpec", full_argspec_fields) + + +def get_func_code(func): + """Attempts to retrieve a reliable function code hash. + + The reason we don't use inspect.getsource is that it caches the + source, whereas we want this to be modified on the fly when the + function is modified. + + Returns + ------- + func_code: string + The function code + source_file: string + The path to the file in which the function is defined. + first_line: int + The first line of the code in the source file. + + Notes + ------ + This function does a bit more magic than inspect, and is thus + more robust. + """ + source_file = None + try: + code = func.__code__ + source_file = code.co_filename + if not os.path.exists(source_file): + # Use inspect for lambda functions and functions defined in an + # interactive shell, or in doctests + source_code = "".join(inspect.getsourcelines(func)[0]) + line_no = 1 + if source_file.startswith("", source_file + ).groups() + line_no = int(line_no) + source_file = "" % source_file + return source_code, source_file, line_no + # Try to retrieve the source code. + with open_py_source(source_file) as source_file_obj: + first_line = code.co_firstlineno + # All the lines after the function definition: + source_lines = list(islice(source_file_obj, first_line - 1, None)) + return "".join(inspect.getblock(source_lines)), source_file, first_line + except: # noqa: E722 + # If the source code fails, we use the hash. This is fragile and + # might change from one session to another. + if hasattr(func, "__code__"): + # Python 3.X + return str(func.__code__.__hash__()), source_file, -1 + else: + # Weird objects like numpy ufunc don't have __code__ + # This is fragile, as quite often the id of the object is + # in the repr, so it might not persist across sessions, + # however it will work for ufuncs. + return repr(func), source_file, -1 + + +def _clean_win_chars(string): + """Windows cannot encode some characters in filename.""" + import urllib + + if hasattr(urllib, "quote"): + quote = urllib.quote + else: + # In Python 3, quote is elsewhere + import urllib.parse + + quote = urllib.parse.quote + for char in ("<", ">", "!", ":", "\\"): + string = string.replace(char, quote(char)) + return string + + +def get_func_name(func, resolv_alias=True, win_characters=True): + """Return the function import path (as a list of module names), and + a name for the function. + + Parameters + ---------- + func: callable + The func to inspect + resolv_alias: boolean, optional + If true, possible local aliases are indicated. + win_characters: boolean, optional + If true, substitute special characters using urllib.quote + This is useful in Windows, as it cannot encode some filenames + """ + if hasattr(func, "__module__"): + module = func.__module__ + else: + try: + module = inspect.getmodule(func) + except TypeError: + if hasattr(func, "__class__"): + module = func.__class__.__module__ + else: + module = "unknown" + if module is None: + # Happens in doctests, eg + module = "" + if module == "__main__": + try: + filename = os.path.abspath(inspect.getsourcefile(func)) + except: # noqa: E722 + filename = None + if filename is not None: + # mangling of full path to filename + parts = filename.split(os.sep) + if parts[-1].startswith(", where: + # - N is the cell number where the function was defined + # - XYZ is a hash representing the function's code (and name). + # It will be consistent across sessions and kernel restarts, + # and will change if the function's code/name changes + # We remove N so that cache is properly hit if the cell where + # the func is defined is re-exectuted. + # The XYZ hash should avoid collisions between functions with + # the same name, both within the same notebook but also across + # notebooks + split = parts[-1].split("-") + parts[-1] = "-".join(split[:2] + split[3:]) + elif len(parts) > 2 and parts[-2].startswith("ipykernel_"): + # In a notebook session (ipykernel). Filename seems to be 'xyz' + # of above. parts[-2] has the structure ipykernel_XXXXXX where + # XXXXXX is a six-digit number identifying the current run (?). + # If we split it off, the function again has the same + # identifier across runs. + parts[-2] = "ipykernel" + filename = "-".join(parts) + if filename.endswith(".py"): + filename = filename[:-3] + module = module + "-" + filename + module = module.split(".") + if hasattr(func, "func_name"): + name = func.func_name + elif hasattr(func, "__name__"): + name = func.__name__ + else: + name = "unknown" + # Hack to detect functions not defined at the module-level + if resolv_alias: + # TODO: Maybe add a warning here? + if hasattr(func, "func_globals") and name in func.func_globals: + if func.func_globals[name] is not func: + name = "%s-alias" % name + if hasattr(func, "__qualname__") and func.__qualname__ != name: + # Extend the module name in case of nested functions to avoid + # (module, name) collisions + module.extend(func.__qualname__.split(".")[:-1]) + if inspect.ismethod(func): + # We need to add the name of the class + if hasattr(func, "im_class"): + klass = func.im_class + module.append(klass.__name__) + if os.name == "nt" and win_characters: + # Windows can't encode certain characters in filenames + name = _clean_win_chars(name) + module = [_clean_win_chars(s) for s in module] + return module, name + + +def _signature_str(function_name, arg_sig): + """Helper function to output a function signature""" + return "{}{}".format(function_name, arg_sig) + + +def _function_called_str(function_name, args, kwargs): + """Helper function to output a function call""" + template_str = "{0}({1}, {2})" + + args_str = repr(args)[1:-1] + kwargs_str = ", ".join("%s=%s" % (k, v) for k, v in kwargs.items()) + return template_str.format(function_name, args_str, kwargs_str) + + +def filter_args(func, ignore_lst, args=(), kwargs=dict()): + """Filters the given args and kwargs using a list of arguments to + ignore, and a function specification. + + Parameters + ---------- + func: callable + Function giving the argument specification + ignore_lst: list of strings + List of arguments to ignore (either a name of an argument + in the function spec, or '*', or '**') + *args: list + Positional arguments passed to the function. + **kwargs: dict + Keyword arguments passed to the function + + Returns + ------- + filtered_args: list + List of filtered positional and keyword arguments. + """ + args = list(args) + if isinstance(ignore_lst, str): + # Catch a common mistake + raise ValueError( + "ignore_lst must be a list of parameters to ignore " + "%s (type %s) was given" % (ignore_lst, type(ignore_lst)) + ) + # Special case for functools.partial objects + if not inspect.ismethod(func) and not inspect.isfunction(func): + if ignore_lst: + warnings.warn( + "Cannot inspect object %s, ignore list will not work." % func, + stacklevel=2, + ) + return {"*": args, "**": kwargs} + arg_sig = inspect.signature(func) + arg_names = [] + arg_defaults = [] + arg_kwonlyargs = [] + arg_varargs = None + arg_varkw = None + for param in arg_sig.parameters.values(): + if param.kind is param.POSITIONAL_OR_KEYWORD: + arg_names.append(param.name) + elif param.kind is param.KEYWORD_ONLY: + arg_names.append(param.name) + arg_kwonlyargs.append(param.name) + elif param.kind is param.VAR_POSITIONAL: + arg_varargs = param.name + elif param.kind is param.VAR_KEYWORD: + arg_varkw = param.name + if param.default is not param.empty: + arg_defaults.append(param.default) + if inspect.ismethod(func): + # First argument is 'self', it has been removed by Python + # we need to add it back: + args = [ + func.__self__, + ] + args + # func is an instance method, inspect.signature(func) does not + # include self, we need to fetch it from the class method, i.e + # func.__func__ + class_method_sig = inspect.signature(func.__func__) + self_name = next(iter(class_method_sig.parameters)) + arg_names = [self_name] + arg_names + # XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such + # as on ndarrays. + + _, name = get_func_name(func, resolv_alias=False) + arg_dict = dict() + arg_position = -1 + for arg_position, arg_name in enumerate(arg_names): + if arg_position < len(args): + # Positional argument or keyword argument given as positional + if arg_name not in arg_kwonlyargs: + arg_dict[arg_name] = args[arg_position] + else: + raise ValueError( + "Keyword-only parameter '%s' was passed as " + "positional parameter for %s:\n" + " %s was called." + % ( + arg_name, + _signature_str(name, arg_sig), + _function_called_str(name, args, kwargs), + ) + ) + + else: + position = arg_position - len(arg_names) + if arg_name in kwargs: + arg_dict[arg_name] = kwargs[arg_name] + else: + try: + arg_dict[arg_name] = arg_defaults[position] + except (IndexError, KeyError) as e: + # Missing argument + raise ValueError( + "Wrong number of arguments for %s:\n" + " %s was called." + % ( + _signature_str(name, arg_sig), + _function_called_str(name, args, kwargs), + ) + ) from e + + varkwargs = dict() + for arg_name, arg_value in sorted(kwargs.items()): + if arg_name in arg_dict: + arg_dict[arg_name] = arg_value + elif arg_varkw is not None: + varkwargs[arg_name] = arg_value + else: + raise TypeError( + "Ignore list for %s() contains an unexpected " + "keyword argument '%s'" % (name, arg_name) + ) + + if arg_varkw is not None: + arg_dict["**"] = varkwargs + if arg_varargs is not None: + varargs = args[arg_position + 1 :] + arg_dict["*"] = varargs + + # Now remove the arguments to be ignored + for item in ignore_lst: + if item in arg_dict: + arg_dict.pop(item) + else: + raise ValueError( + "Ignore list: argument '%s' is not defined for " + "function %s" % (item, _signature_str(name, arg_sig)) + ) + # XXX: Return a sorted list of pairs? + return arg_dict + + +def _format_arg(arg): + formatted_arg = pformat(arg, indent=2) + if len(formatted_arg) > 1500: + formatted_arg = "%s..." % formatted_arg[:700] + return formatted_arg + + +def format_signature(func, *args, **kwargs): + # XXX: Should this use inspect.formatargvalues/formatargspec? + module, name = get_func_name(func) + module = [m for m in module if m] + if module: + module.append(name) + module_path = ".".join(module) + else: + module_path = name + arg_str = list() + previous_length = 0 + for arg in args: + formatted_arg = _format_arg(arg) + if previous_length > 80: + formatted_arg = "\n%s" % formatted_arg + previous_length = len(formatted_arg) + arg_str.append(formatted_arg) + arg_str.extend(["%s=%s" % (v, _format_arg(i)) for v, i in kwargs.items()]) + arg_str = ", ".join(arg_str) + + signature = "%s(%s)" % (name, arg_str) + return module_path, signature + + +def format_call(func, args, kwargs, object_name="Memory"): + """Returns a nicely formatted statement displaying the function + call with the given arguments. + """ + path, signature = format_signature(func, *args, **kwargs) + msg = "%s\n[%s] Calling %s...\n%s" % (80 * "_", object_name, path, signature) + return msg + # XXX: Not using logging framework + # self.debug(msg) diff --git a/py311/lib/python3.11/site-packages/joblib/hashing.py b/py311/lib/python3.11/site-packages/joblib/hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..2055acf85cbd50a81a265c02f6d9dada554424fa --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/hashing.py @@ -0,0 +1,270 @@ +""" +Fast cryptographic hash of Python objects, with a special case for fast +hashing of numpy arrays. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import decimal +import hashlib +import io +import pickle +import struct +import sys +import types + +Pickler = pickle._Pickler + + +class _ConsistentSet(object): + """Class used to ensure the hash of Sets is preserved + whatever the order of its items. + """ + + def __init__(self, set_sequence): + # Forces order of elements in set to ensure consistent hash. + try: + # Trying first to order the set assuming the type of elements is + # consistent and orderable. + # This fails on python 3 when elements are unorderable + # but we keep it in a try as it's faster. + self._sequence = sorted(set_sequence) + except (TypeError, decimal.InvalidOperation): + # If elements are unorderable, sorting them using their hash. + # This is slower but works in any case. + self._sequence = sorted((hash(e) for e in set_sequence)) + + +class _MyHash(object): + """Class used to hash objects that won't normally pickle""" + + def __init__(self, *args): + self.args = args + + +class Hasher(Pickler): + """A subclass of pickler, to do cryptographic hashing, rather than + pickling. This is used to produce a unique hash of the given + Python object that is not necessarily cryptographically secure. + """ + + def __init__(self, hash_name="md5"): + self.stream = io.BytesIO() + # By default we want a pickle protocol that only changes with + # the major python version and not the minor one + protocol = 3 + Pickler.__init__(self, self.stream, protocol=protocol) + # Initialise the hash obj + self._hash = hashlib.new(hash_name, usedforsecurity=False) + + def hash(self, obj, return_digest=True): + try: + self.dump(obj) + except pickle.PicklingError as e: + e.args += ("PicklingError while hashing %r: %r" % (obj, e),) + raise + dumps = self.stream.getvalue() + self._hash.update(dumps) + if return_digest: + return self._hash.hexdigest() + + def save(self, obj): + if isinstance(obj, (types.MethodType, type({}.pop))): + # the Pickler cannot pickle instance methods; here we decompose + # them into components that make them uniquely identifiable + if hasattr(obj, "__func__"): + func_name = obj.__func__.__name__ + else: + func_name = obj.__name__ + inst = obj.__self__ + if type(inst) is type(pickle): + obj = _MyHash(func_name, inst.__name__) + elif inst is None: + # type(None) or type(module) do not pickle + obj = _MyHash(func_name, inst) + else: + cls = obj.__self__.__class__ + obj = _MyHash(func_name, inst, cls) + Pickler.save(self, obj) + + def memoize(self, obj): + # We want hashing to be sensitive to value instead of reference. + # For example we want ['aa', 'aa'] and ['aa', 'aaZ'[:2]] + # to hash to the same value and that's why we disable memoization + # for strings + if isinstance(obj, (bytes, str)): + return + Pickler.memoize(self, obj) + + # The dispatch table of the pickler is not accessible in Python + # 3, as these lines are only bugware for IPython, we skip them. + def save_global(self, obj, name=None, pack=struct.pack): + # We have to override this method in order to deal with objects + # defined interactively in IPython that are not injected in + # __main__ + kwargs = dict(name=name, pack=pack) + del kwargs["pack"] + try: + Pickler.save_global(self, obj, **kwargs) + except pickle.PicklingError: + Pickler.save_global(self, obj, **kwargs) + module = getattr(obj, "__module__", None) + if module == "__main__": + my_name = name + if my_name is None: + my_name = obj.__name__ + mod = sys.modules[module] + if not hasattr(mod, my_name): + # IPython doesn't inject the variables define + # interactively in __main__ + setattr(mod, my_name, obj) + + dispatch = Pickler.dispatch.copy() + # builtin + dispatch[type(len)] = save_global + # type + dispatch[type(object)] = save_global + # classobj + dispatch[type(Pickler)] = save_global + # function + dispatch[type(pickle.dump)] = save_global + + # We use *args in _batch_setitems signature because _batch_setitems has an + # additional 'obj' argument in Python 3.14 + def _batch_setitems(self, items, *args): + # forces order of keys in dict to ensure consistent hash. + try: + # Trying first to compare dict assuming the type of keys is + # consistent and orderable. + # This fails on python 3 when keys are unorderable + # but we keep it in a try as it's faster. + Pickler._batch_setitems(self, iter(sorted(items)), *args) + except TypeError: + # If keys are unorderable, sorting them using their hash. This is + # slower but works in any case. + Pickler._batch_setitems( + self, iter(sorted((hash(k), v) for k, v in items)), *args + ) + + def save_set(self, set_items): + # forces order of items in Set to ensure consistent hash + Pickler.save(self, _ConsistentSet(set_items)) + + dispatch[type(set())] = save_set + + +class NumpyHasher(Hasher): + """Special case the hasher for when numpy is loaded.""" + + def __init__(self, hash_name="md5", coerce_mmap=False): + """ + Parameters + ---------- + hash_name: string + The hash algorithm to be used + coerce_mmap: boolean + Make no difference between np.memmap and np.ndarray + objects. + """ + self.coerce_mmap = coerce_mmap + Hasher.__init__(self, hash_name=hash_name) + # delayed import of numpy, to avoid tight coupling + import numpy as np + + self.np = np + if hasattr(np, "getbuffer"): + self._getbuffer = np.getbuffer + else: + self._getbuffer = memoryview + + def save(self, obj): + """Subclass the save method, to hash ndarray subclass, rather + than pickling them. Off course, this is a total abuse of + the Pickler class. + """ + if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject: + # Compute a hash of the object + # The update function of the hash requires a c_contiguous buffer. + if obj.shape == (): + # 0d arrays need to be flattened because viewing them as bytes + # raises a ValueError exception. + obj_c_contiguous = obj.flatten() + elif obj.flags.c_contiguous: + obj_c_contiguous = obj + elif obj.flags.f_contiguous: + obj_c_contiguous = obj.T + else: + # Cater for non-single-segment arrays: this creates a + # copy, and thus alleviates this issue. + # XXX: There might be a more efficient way of doing this + obj_c_contiguous = obj.flatten() + + # memoryview is not supported for some dtypes, e.g. datetime64, see + # https://github.com/numpy/numpy/issues/4983. The + # workaround is to view the array as bytes before + # taking the memoryview. + self._hash.update(self._getbuffer(obj_c_contiguous.view(self.np.uint8))) + + # We store the class, to be able to distinguish between + # Objects with the same binary content, but different + # classes. + if self.coerce_mmap and isinstance(obj, self.np.memmap): + # We don't make the difference between memmap and + # normal ndarrays, to be able to reload previously + # computed results with memmap. + klass = self.np.ndarray + else: + klass = obj.__class__ + # We also return the dtype and the shape, to distinguish + # different views on the same data with different dtypes. + + # The object will be pickled by the pickler hashed at the end. + obj = (klass, ("HASHED", obj.dtype, obj.shape, obj.strides)) + elif isinstance(obj, self.np.dtype): + # numpy.dtype consistent hashing is tricky to get right. This comes + # from the fact that atomic np.dtype objects are interned: + # ``np.dtype('f4') is np.dtype('f4')``. The situation is + # complicated by the fact that this interning does not resist a + # simple pickle.load/dump roundtrip: + # ``pickle.loads(pickle.dumps(np.dtype('f4'))) is not + # np.dtype('f4') Because pickle relies on memoization during + # pickling, it is easy to + # produce different hashes for seemingly identical objects, such as + # ``[np.dtype('f4'), np.dtype('f4')]`` + # and ``[np.dtype('f4'), pickle.loads(pickle.dumps('f4'))]``. + # To prevent memoization from interfering with hashing, we isolate + # the serialization (and thus the pickle memoization) of each dtype + # using each time a different ``pickle.dumps`` call unrelated to + # the current Hasher instance. + self._hash.update("_HASHED_DTYPE".encode("utf-8")) + self._hash.update(pickle.dumps(obj)) + return + Hasher.save(self, obj) + + +def hash(obj, hash_name="md5", coerce_mmap=False): + """Quick calculation of a hash to identify uniquely Python objects + containing numpy arrays. + + Parameters + ---------- + hash_name: 'md5' or 'sha1' + Hashing algorithm used. sha1 is supposedly safer, but md5 is + faster. + coerce_mmap: boolean + Make no difference between np.memmap and np.ndarray + """ + valid_hash_names = ("md5", "sha1") + if hash_name not in valid_hash_names: + raise ValueError( + "Valid options for 'hash_name' are {}. Got hash_name={!r} instead.".format( + valid_hash_names, hash_name + ) + ) + if "numpy" in sys.modules: + hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap) + else: + hasher = Hasher(hash_name=hash_name) + return hasher.hash(obj) diff --git a/py311/lib/python3.11/site-packages/joblib/logger.py b/py311/lib/python3.11/site-packages/joblib/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..ed250fd1f2d15e519faf62f6b4392f944b70bb94 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/logger.py @@ -0,0 +1,159 @@ +""" +Helpers for logging. + +This module needs much love to become useful. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2008 Gael Varoquaux +# License: BSD Style, 3 clauses. + +from __future__ import print_function + +import logging +import os +import pprint +import shutil +import sys +import time + +from .disk import mkdirp + + +def _squeeze_time(t): + """Remove .1s to the time under Windows: this is the time it take to + stat files. This is needed to make results similar to timings under + Unix, for tests + """ + if sys.platform.startswith("win"): + return max(0, t - 0.1) + else: + return t + + +def format_time(t): + t = _squeeze_time(t) + return "%.1fs, %.1fmin" % (t, t / 60.0) + + +def short_format_time(t): + t = _squeeze_time(t) + if t > 60: + return "%4.1fmin" % (t / 60.0) + else: + return " %5.1fs" % (t) + + +def pformat(obj, indent=0, depth=3): + if "numpy" in sys.modules: + import numpy as np + + print_options = np.get_printoptions() + np.set_printoptions(precision=6, threshold=64, edgeitems=1) + else: + print_options = None + out = pprint.pformat(obj, depth=depth, indent=indent) + if print_options: + np.set_printoptions(**print_options) + return out + + +############################################################################### +# class `Logger` +############################################################################### +class Logger(object): + """Base class for logging messages.""" + + def __init__(self, depth=3, name=None): + """ + Parameters + ---------- + depth: int, optional + The depth of objects printed. + name: str, optional + The namespace to log to. If None, defaults to joblib. + """ + self.depth = depth + self._name = name if name else "joblib" + + def warn(self, msg): + logging.getLogger(self._name).warning("[%s]: %s" % (self, msg)) + + def info(self, msg): + logging.info("[%s]: %s" % (self, msg)) + + def debug(self, msg): + # XXX: This conflicts with the debug flag used in children class + logging.getLogger(self._name).debug("[%s]: %s" % (self, msg)) + + def format(self, obj, indent=0): + """Return the formatted representation of the object.""" + return pformat(obj, indent=indent, depth=self.depth) + + +############################################################################### +# class `PrintTime` +############################################################################### +class PrintTime(object): + """Print and log messages while keeping track of time.""" + + def __init__(self, logfile=None, logdir=None): + if logfile is not None and logdir is not None: + raise ValueError("Cannot specify both logfile and logdir") + # XXX: Need argument docstring + self.last_time = time.time() + self.start_time = self.last_time + if logdir is not None: + logfile = os.path.join(logdir, "joblib.log") + self.logfile = logfile + if logfile is not None: + mkdirp(os.path.dirname(logfile)) + if os.path.exists(logfile): + # Rotate the logs + for i in range(1, 9): + try: + shutil.move(logfile + ".%i" % i, logfile + ".%i" % (i + 1)) + except: # noqa: E722 + "No reason failing here" + # Use a copy rather than a move, so that a process + # monitoring this file does not get lost. + try: + shutil.copy(logfile, logfile + ".1") + except: # noqa: E722 + "No reason failing here" + try: + with open(logfile, "w") as logfile: + logfile.write("\nLogging joblib python script\n") + logfile.write("\n---%s---\n" % time.ctime(self.last_time)) + except: # noqa: E722 + """ Multiprocessing writing to files can create race + conditions. Rather fail silently than crash the + computation. + """ + # XXX: We actually need a debug flag to disable this + # silent failure. + + def __call__(self, msg="", total=False): + """Print the time elapsed between the last call and the current + call, with an optional message. + """ + if not total: + time_lapse = time.time() - self.last_time + full_msg = "%s: %s" % (msg, format_time(time_lapse)) + else: + # FIXME: Too much logic duplicated + time_lapse = time.time() - self.start_time + full_msg = "%s: %.2fs, %.1f min" % (msg, time_lapse, time_lapse / 60) + print(full_msg, file=sys.stderr) + if self.logfile is not None: + try: + with open(self.logfile, "a") as f: + print(full_msg, file=f) + except: # noqa: E722 + """ Multiprocessing writing to files can create race + conditions. Rather fail silently than crash the + calculation. + """ + # XXX: We actually need a debug flag to disable this + # silent failure. + self.last_time = time.time() diff --git a/py311/lib/python3.11/site-packages/joblib/memory.py b/py311/lib/python3.11/site-packages/joblib/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..c4670c9121f9c1b1668b633d7ada9425cfb4aed3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/memory.py @@ -0,0 +1,1242 @@ +""" +A context object for caching a function's return value each time it +is called with the same input arguments. + +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import asyncio +import datetime +import functools +import inspect +import logging +import os +import pathlib +import pydoc +import re +import textwrap +import time +import tokenize +import traceback +import warnings +import weakref + +from . import hashing +from ._store_backends import ( + CacheWarning, # noqa + FileSystemStoreBackend, + StoreBackendBase, +) +from .func_inspect import ( + filter_args, + format_call, + format_signature, + get_func_code, + get_func_name, +) +from .logger import Logger, format_time, pformat + +FIRST_LINE_TEXT = "# first line:" + +# TODO: The following object should have a data store object as a sub +# object, and the interface to persist and query should be separated in +# the data store. +# +# This would enable creating 'Memory' objects with a different logic for +# pickling that would simply span a MemorizedFunc with the same +# store (or do we want to copy it to avoid cross-talks?), for instance to +# implement HDF5 pickling. + +# TODO: Same remark for the logger, and probably use the Python logging +# mechanism. + + +def extract_first_line(func_code): + """Extract the first line information from the function code + text if available. + """ + if func_code.startswith(FIRST_LINE_TEXT): + func_code = func_code.split("\n") + first_line = int(func_code[0][len(FIRST_LINE_TEXT) :]) + func_code = "\n".join(func_code[1:]) + else: + first_line = -1 + return func_code, first_line + + +class JobLibCollisionWarning(UserWarning): + """Warn that there might be a collision between names of functions.""" + + +_STORE_BACKENDS = {"local": FileSystemStoreBackend} + + +def register_store_backend(backend_name, backend): + """Extend available store backends. + + The Memory, MemorizeResult and MemorizeFunc objects are designed to be + agnostic to the type of store used behind. By default, the local file + system is used but this function gives the possibility to extend joblib's + memory pattern with other types of storage such as cloud storage (S3, GCS, + OpenStack, HadoopFS, etc) or blob DBs. + + Parameters + ---------- + backend_name: str + The name identifying the store backend being registered. For example, + 'local' is used with FileSystemStoreBackend. + backend: StoreBackendBase subclass + The name of a class that implements the StoreBackendBase interface. + + """ + if not isinstance(backend_name, str): + raise ValueError( + "Store backend name should be a string, '{0}' given.".format(backend_name) + ) + if backend is None or not issubclass(backend, StoreBackendBase): + raise ValueError( + "Store backend should inherit StoreBackendBase, '{0}' given.".format( + backend + ) + ) + + _STORE_BACKENDS[backend_name] = backend + + +def _store_backend_factory(backend, location, verbose=0, backend_options=None): + """Return the correct store object for the given location.""" + if backend_options is None: + backend_options = {} + + if isinstance(location, pathlib.Path): + location = str(location) + + if isinstance(location, StoreBackendBase): + return location + elif isinstance(location, str): + obj = None + location = os.path.expanduser(location) + # The location is not a local file system, we look in the + # registered backends if there's one matching the given backend + # name. + for backend_key, backend_obj in _STORE_BACKENDS.items(): + if backend == backend_key: + obj = backend_obj() + + # By default, we assume the FileSystemStoreBackend can be used if no + # matching backend could be found. + if obj is None: + raise TypeError( + "Unknown location {0} or backend {1}".format(location, backend) + ) + + # The store backend is configured with the extra named parameters, + # some of them are specific to the underlying store backend. + obj.configure(location, verbose=verbose, backend_options=backend_options) + return obj + elif location is not None: + warnings.warn( + "Instantiating a backend using a {} as a location is not " + "supported by joblib. Returning None instead.".format( + location.__class__.__name__ + ), + UserWarning, + ) + + return None + + +def _build_func_identifier(func): + """Build a roughly unique identifier for the cached function.""" + modules, funcname = get_func_name(func) + # We reuse historical fs-like way of building a function identifier + return os.path.join(*modules, funcname) + + +# An in-memory store to avoid looking at the disk-based function +# source code to check if a function definition has changed +_FUNCTION_HASHES = weakref.WeakKeyDictionary() + + +############################################################################### +# class `MemorizedResult` +############################################################################### +class MemorizedResult(Logger): + """Object representing a cached value. + + Attributes + ---------- + location: str + The location of joblib cache. Depends on the store backend used. + + func: function or str + function whose output is cached. The string case is intended only for + instantiation based on the output of repr() on another instance. + (namely eval(repr(memorized_instance)) works). + + argument_hash: str + hash of the function arguments. + + backend: str + Type of store backend for reading/writing cache files. + Default is 'local'. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'} + The memmapping mode used when loading from cache numpy arrays. See + numpy.load for the meaning of the different values. + + verbose: int + verbosity level (0 means no message). + + timestamp, metadata: string + for internal use only. + """ + + def __init__( + self, + location, + call_id, + backend="local", + mmap_mode=None, + verbose=0, + timestamp=None, + metadata=None, + ): + Logger.__init__(self) + self._call_id = call_id + self.store_backend = _store_backend_factory(backend, location, verbose=verbose) + self.mmap_mode = mmap_mode + + if metadata is not None: + self.metadata = metadata + else: + self.metadata = self.store_backend.get_metadata(self._call_id) + + self.duration = self.metadata.get("duration", None) + self.verbose = verbose + self.timestamp = timestamp + + @property + def func(self): + return self.func_id + + @property + def func_id(self): + return self._call_id[0] + + @property + def args_id(self): + return self._call_id[1] + + def get(self): + """Read value from cache and return it.""" + try: + return self.store_backend.load_item( + self._call_id, + timestamp=self.timestamp, + metadata=self.metadata, + verbose=self.verbose, + ) + except ValueError as exc: + new_exc = KeyError( + "Error while trying to load a MemorizedResult's value. " + "It seems that this folder is corrupted : {}".format( + os.path.join(self.store_backend.location, *self._call_id) + ) + ) + raise new_exc from exc + + def clear(self): + """Clear value from cache""" + self.store_backend.clear_item(self._call_id) + + def __repr__(self): + return '{}(location="{}", func="{}", args_id="{}")'.format( + self.__class__.__name__, self.store_backend.location, *self._call_id + ) + + def __getstate__(self): + state = self.__dict__.copy() + state["timestamp"] = None + return state + + +class NotMemorizedResult(object): + """Class representing an arbitrary value. + + This class is a replacement for MemorizedResult when there is no cache. + """ + + __slots__ = ("value", "valid") + + def __init__(self, value): + self.value = value + self.valid = True + + def get(self): + if self.valid: + return self.value + else: + raise KeyError("No value stored.") + + def clear(self): + self.valid = False + self.value = None + + def __repr__(self): + if self.valid: + return "{class_name}({value})".format( + class_name=self.__class__.__name__, value=pformat(self.value) + ) + else: + return self.__class__.__name__ + " with no value" + + # __getstate__ and __setstate__ are required because of __slots__ + def __getstate__(self): + return {"valid": self.valid, "value": self.value} + + def __setstate__(self, state): + self.valid = state["valid"] + self.value = state["value"] + + +############################################################################### +# class `NotMemorizedFunc` +############################################################################### +class NotMemorizedFunc(object): + """No-op object decorating a function. + + This class replaces MemorizedFunc when there is no cache. It provides an + identical API but does not write anything on disk. + + Attributes + ---------- + func: callable + Original undecorated function. + """ + + # Should be a light as possible (for speed) + def __init__(self, func): + self.func = func + + def __call__(self, *args, **kwargs): + return self.func(*args, **kwargs) + + def call_and_shelve(self, *args, **kwargs): + return NotMemorizedResult(self.func(*args, **kwargs)) + + def __repr__(self): + return "{0}(func={1})".format(self.__class__.__name__, self.func) + + def clear(self, warn=True): + # Argument "warn" is for compatibility with MemorizedFunc.clear + pass + + def call(self, *args, **kwargs): + return self.func(*args, **kwargs), {} + + def check_call_in_cache(self, *args, **kwargs): + return False + + +############################################################################### +# class `AsyncNotMemorizedFunc` +############################################################################### +class AsyncNotMemorizedFunc(NotMemorizedFunc): + async def call_and_shelve(self, *args, **kwargs): + return NotMemorizedResult(await self.func(*args, **kwargs)) + + +############################################################################### +# class `MemorizedFunc` +############################################################################### +class MemorizedFunc(Logger): + """Callable object decorating a function for caching its return value + each time it is called. + + Methods are provided to inspect the cache or clean it. + + Attributes + ---------- + func: callable + The original, undecorated, function. + + location: string + The location of joblib cache. Depends on the store backend used. + + backend: str + Type of store backend for reading/writing cache files. + Default is 'local', in which case the location is the path to a + disk storage. + + ignore: list or None + List of variable names to ignore when choosing whether to + recompute. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'} + The memmapping mode used when loading from cache + numpy arrays. See numpy.load for the meaning of the different + values. + + compress: boolean, or integer + Whether to zip the stored data on disk. If an integer is + given, it should be between 1 and 9, and sets the amount + of compression. Note that compressed arrays cannot be + read by memmapping. + + verbose: int, optional + The verbosity flag, controls messages that are issued as + the function is evaluated. + + cache_validation_callback: callable, optional + Callable to check if a result in cache is valid or is to be recomputed. + When the function is called with arguments for which a cache exists, + the callback is called with the cache entry's metadata as its sole + argument. If it returns True, the cached result is returned, else the + cache for these arguments is cleared and the result is recomputed. + """ + + # ------------------------------------------------------------------------ + # Public interface + # ------------------------------------------------------------------------ + + def __init__( + self, + func, + location, + backend="local", + ignore=None, + mmap_mode=None, + compress=False, + verbose=1, + timestamp=None, + cache_validation_callback=None, + ): + Logger.__init__(self) + self.mmap_mode = mmap_mode + self.compress = compress + self.func = func + self.cache_validation_callback = cache_validation_callback + self.func_id = _build_func_identifier(func) + self.ignore = ignore if ignore is not None else [] + self._verbose = verbose + + # retrieve store object from backend type and location. + self.store_backend = _store_backend_factory( + backend, + location, + verbose=verbose, + backend_options=dict(compress=compress, mmap_mode=mmap_mode), + ) + if self.store_backend is not None: + # Create func directory on demand. + self.store_backend.store_cached_func_code([self.func_id]) + + self.timestamp = timestamp if timestamp is not None else time.time() + try: + functools.update_wrapper(self, func) + except Exception: + pass # Objects like ufunc don't like that + if inspect.isfunction(func): + doc = pydoc.TextDoc().document(func) + # Remove blank line + doc = doc.replace("\n", "\n\n", 1) + # Strip backspace-overprints for compatibility with autodoc + doc = re.sub("\x08.", "", doc) + else: + # Pydoc does a poor job on other objects + doc = func.__doc__ + self.__doc__ = "Memoized version of %s" % doc + + self._func_code_info = None + self._func_code_id = None + + def _is_in_cache_and_valid(self, call_id): + """Check if the function call is cached and valid for given arguments. + + - Compare the function code with the one from the cached function, + asserting if it has changed. + - Check if the function call is present in the cache. + - Call `cache_validation_callback` for user define cache validation. + + Returns True if the function call is in cache and can be used, and + returns False otherwise. + """ + # Check if the code of the function has changed + if not self._check_previous_func_code(stacklevel=4): + return False + + # Check if this specific call is in the cache + if not self.store_backend.contains_item(call_id): + return False + + # Call the user defined cache validation callback + metadata = self.store_backend.get_metadata(call_id) + if ( + self.cache_validation_callback is not None + and not self.cache_validation_callback(metadata) + ): + self.store_backend.clear_item(call_id) + return False + + return True + + def _cached_call(self, args, kwargs, shelving): + """Call wrapped function and cache result, or read cache if available. + + This function returns the wrapped function output or a reference to + the cached result. + + Arguments: + ---------- + + args, kwargs: list and dict + input arguments for wrapped function + + shelving: bool + True when called via the call_and_shelve function. + + + Returns + ------- + output: Output of the wrapped function if shelving is false, or a + MemorizedResult reference to the value if shelving is true. + metadata: dict containing the metadata associated with the call. + """ + args_id = self._get_args_id(*args, **kwargs) + call_id = (self.func_id, args_id) + _, func_name = get_func_name(self.func) + func_info = self.store_backend.get_cached_func_info([self.func_id]) + location = func_info["location"] + + if self._verbose >= 20: + logging.basicConfig(level=logging.INFO) + _, signature = format_signature(self.func, *args, **kwargs) + self.info( + textwrap.dedent( + f""" + Querying {func_name} with signature + {signature}. + + (argument hash {args_id}) + + The store location is {location}. + """ + ) + ) + + # Compare the function code with the previous to see if the + # function code has changed and check if the results are present in + # the cache. + if self._is_in_cache_and_valid(call_id): + if shelving: + return self._get_memorized_result(call_id), {} + + try: + start_time = time.time() + output = self._load_item(call_id) + if self._verbose > 4: + self._print_duration( + time.time() - start_time, context="cache loaded " + ) + return output, {} + except Exception: + # XXX: Should use an exception logger + _, signature = format_signature(self.func, *args, **kwargs) + self.warn( + "Exception while loading results for {}\n {}".format( + signature, traceback.format_exc() + ) + ) + + if self._verbose > 10: + self.warn( + f"Computing func {func_name}, argument hash {args_id} " + f"in location {location}" + ) + + # Returns the output but not the metadata + return self._call(call_id, args, kwargs, shelving) + + @property + def func_code_info(self): + # 3-tuple property containing: the function source code, source file, + # and first line of the code inside the source file + if hasattr(self.func, "__code__"): + if self._func_code_id is None: + self._func_code_id = id(self.func.__code__) + elif id(self.func.__code__) != self._func_code_id: + # Be robust to dynamic reassignments of self.func.__code__ + self._func_code_info = None + + if self._func_code_info is None: + # Cache the source code of self.func . Provided that get_func_code + # (which should be called once on self) gets called in the process + # in which self.func was defined, this caching mechanism prevents + # undesired cache clearing when the cached function is called in + # an environment where the introspection utilities get_func_code + # relies on do not work (typically, in joblib child processes). + # See #1035 for more info + # TODO (pierreglaser): do the same with get_func_name? + self._func_code_info = get_func_code(self.func) + return self._func_code_info + + def call_and_shelve(self, *args, **kwargs): + """Call wrapped function, cache result and return a reference. + + This method returns a reference to the cached result instead of the + result itself. The reference object is small and picklable, allowing + to send or store it easily. Call .get() on reference object to get + result. + + Returns + ------- + cached_result: MemorizedResult or NotMemorizedResult + reference to the value returned by the wrapped function. The + class "NotMemorizedResult" is used when there is no cache + activated (e.g. location=None in Memory). + """ + # Return the wrapped output, without the metadata + return self._cached_call(args, kwargs, shelving=True)[0] + + def __call__(self, *args, **kwargs): + # Return the output, without the metadata + return self._cached_call(args, kwargs, shelving=False)[0] + + def __getstate__(self): + # Make sure self.func's source is introspected prior to being pickled - + # code introspection utilities typically do not work inside child + # processes + _ = self.func_code_info + + # We don't store the timestamp when pickling, to avoid the hash + # depending from it. + state = self.__dict__.copy() + state["timestamp"] = None + + # Invalidate the code id as id(obj) will be different in the child + state["_func_code_id"] = None + + return state + + def check_call_in_cache(self, *args, **kwargs): + """Check if the function call is cached and valid for given arguments. + + Does not call the function or do any work besides function inspection + and argument hashing. + + - Compare the function code with the one from the cached function, + asserting if it has changed. + - Check if the function call is present in the cache. + - Call `cache_validation_callback` for user define cache validation. + + Returns + ------- + is_call_in_cache: bool + Whether or not the function call is in cache and can be used. + """ + call_id = (self.func_id, self._get_args_id(*args, **kwargs)) + return self._is_in_cache_and_valid(call_id) + + # ------------------------------------------------------------------------ + # Private interface + # ------------------------------------------------------------------------ + + def _get_args_id(self, *args, **kwargs): + """Return the input parameter hash of a result.""" + return hashing.hash( + filter_args(self.func, self.ignore, args, kwargs), + coerce_mmap=self.mmap_mode is not None, + ) + + def _hash_func(self): + """Hash a function to key the online cache""" + func_code_h = hash(getattr(self.func, "__code__", None)) + return id(self.func), hash(self.func), func_code_h + + def _write_func_code(self, func_code, first_line): + """Write the function code and the filename to a file.""" + # We store the first line because the filename and the function + # name is not always enough to identify a function: people + # sometimes have several functions named the same way in a + # file. This is bad practice, but joblib should be robust to bad + # practice. + func_code = "%s %i\n%s" % (FIRST_LINE_TEXT, first_line, func_code) + self.store_backend.store_cached_func_code([self.func_id], func_code) + + # Also store in the in-memory store of function hashes + is_named_callable = ( + hasattr(self.func, "__name__") and self.func.__name__ != "" + ) + if is_named_callable: + # Don't do this for lambda functions or strange callable + # objects, as it ends up being too fragile + func_hash = self._hash_func() + try: + _FUNCTION_HASHES[self.func] = func_hash + except TypeError: + # Some callable are not hashable + pass + + def _check_previous_func_code(self, stacklevel=2): + """ + stacklevel is the depth a which this function is called, to + issue useful warnings to the user. + """ + # First check if our function is in the in-memory store. + # Using the in-memory store not only makes things faster, but it + # also renders us robust to variations of the files when the + # in-memory version of the code does not vary + try: + if self.func in _FUNCTION_HASHES: + # We use as an identifier the id of the function and its + # hash. This is more likely to falsely change than have hash + # collisions, thus we are on the safe side. + func_hash = self._hash_func() + if func_hash == _FUNCTION_HASHES[self.func]: + return True + except TypeError: + # Some callables are not hashable + pass + + # Here, we go through some effort to be robust to dynamically + # changing code and collision. We cannot inspect.getsource + # because it is not reliable when using IPython's magic "%run". + func_code, source_file, first_line = self.func_code_info + try: + old_func_code, old_first_line = extract_first_line( + self.store_backend.get_cached_func_code([self.func_id]) + ) + except (IOError, OSError): # some backend can also raise OSError + self._write_func_code(func_code, first_line) + return False + if old_func_code == func_code: + return True + + # We have differing code, is this because we are referring to + # different functions, or because the function we are referring to has + # changed? + + _, func_name = get_func_name( + self.func, resolv_alias=False, win_characters=False + ) + if old_first_line == first_line == -1 or func_name == "": + if not first_line == -1: + func_description = "{0} ({1}:{2})".format( + func_name, source_file, first_line + ) + else: + func_description = func_name + warnings.warn( + JobLibCollisionWarning( + "Cannot detect name collisions for function '{0}'".format( + func_description + ) + ), + stacklevel=stacklevel, + ) + + # Fetch the code at the old location and compare it. If it is the + # same than the code store, we have a collision: the code in the + # file has not changed, but the name we have is pointing to a new + # code block. + if not old_first_line == first_line and source_file is not None: + if os.path.exists(source_file): + _, func_name = get_func_name(self.func, resolv_alias=False) + num_lines = len(func_code.split("\n")) + with tokenize.open(source_file) as f: + on_disk_func_code = f.readlines()[ + old_first_line - 1 : old_first_line - 1 + num_lines - 1 + ] + on_disk_func_code = "".join(on_disk_func_code) + possible_collision = ( + on_disk_func_code.rstrip() == old_func_code.rstrip() + ) + else: + possible_collision = source_file.startswith(" 10: + _, func_name = get_func_name(self.func, resolv_alias=False) + self.warn( + "Function {0} (identified by {1}) has changed.".format( + func_name, self.func_id + ) + ) + self.clear(warn=True) + return False + + def clear(self, warn=True): + """Empty the function's cache.""" + func_id = self.func_id + if self._verbose > 0 and warn: + self.warn("Clearing function cache identified by %s" % func_id) + self.store_backend.clear_path( + [ + func_id, + ] + ) + + func_code, _, first_line = self.func_code_info + self._write_func_code(func_code, first_line) + + def call(self, *args, **kwargs): + """Force the execution of the function with the given arguments. + + The output values will be persisted, i.e., the cache will be updated + with any new values. + + Parameters + ---------- + *args: arguments + The arguments. + **kwargs: keyword arguments + Keyword arguments. + + Returns + ------- + output : object + The output of the function call. + metadata : dict + The metadata associated with the call. + """ + call_id = (self.func_id, self._get_args_id(*args, **kwargs)) + + # Return the output and the metadata + return self._call(call_id, args, kwargs) + + def _call(self, call_id, args, kwargs, shelving=False): + # Return the output and the metadata + self._before_call(args, kwargs) + start_time = time.time() + output = self.func(*args, **kwargs) + return self._after_call(call_id, args, kwargs, shelving, output, start_time) + + def _before_call(self, args, kwargs): + if self._verbose > 0: + print(format_call(self.func, args, kwargs)) + + def _after_call(self, call_id, args, kwargs, shelving, output, start_time): + self.store_backend.dump_item(call_id, output, verbose=self._verbose) + duration = time.time() - start_time + if self._verbose > 0: + self._print_duration(duration) + metadata = self._persist_input(duration, call_id, args, kwargs) + if shelving: + return self._get_memorized_result(call_id, metadata), metadata + + if self.mmap_mode is not None: + # Memmap the output at the first call to be consistent with + # later calls + output = self._load_item(call_id, metadata) + return output, metadata + + def _persist_input(self, duration, call_id, args, kwargs, this_duration_limit=0.5): + """Save a small summary of the call using json format in the + output directory. + + output_dir: string + directory where to write metadata. + + duration: float + time taken by hashing input arguments, calling the wrapped + function and persisting its output. + + args, kwargs: list and dict + input arguments for wrapped function + + this_duration_limit: float + Max execution time for this function before issuing a warning. + """ + start_time = time.time() + argument_dict = filter_args(self.func, self.ignore, args, kwargs) + + input_repr = dict((k, repr(v)) for k, v in argument_dict.items()) + # This can fail due to race-conditions with multiple + # concurrent joblibs removing the file or the directory + metadata = { + "duration": duration, + "input_args": input_repr, + "time": start_time, + } + + self.store_backend.store_metadata(call_id, metadata) + + this_duration = time.time() - start_time + if this_duration > this_duration_limit: + # This persistence should be fast. It will not be if repr() takes + # time and its output is large, because json.dump will have to + # write a large file. This should not be an issue with numpy arrays + # for which repr() always output a short representation, but can + # be with complex dictionaries. Fixing the problem should be a + # matter of replacing repr() above by something smarter. + warnings.warn( + "Persisting input arguments took %.2fs to run." + "If this happens often in your code, it can cause " + "performance problems " + "(results will be correct in all cases). " + "The reason for this is probably some large input " + "arguments for a wrapped function." % this_duration, + stacklevel=5, + ) + return metadata + + def _get_memorized_result(self, call_id, metadata=None): + return MemorizedResult( + self.store_backend, + call_id, + metadata=metadata, + timestamp=self.timestamp, + verbose=self._verbose - 1, + ) + + def _load_item(self, call_id, metadata=None): + return self.store_backend.load_item( + call_id, metadata=metadata, timestamp=self.timestamp, verbose=self._verbose + ) + + def _print_duration(self, duration, context=""): + _, name = get_func_name(self.func) + msg = f"{name} {context}- {format_time(duration)}" + print(max(0, (80 - len(msg))) * "_" + msg) + + # ------------------------------------------------------------------------ + # Private `object` interface + # ------------------------------------------------------------------------ + + def __repr__(self): + return "{class_name}(func={func}, location={location})".format( + class_name=self.__class__.__name__, + func=self.func, + location=self.store_backend.location, + ) + + +############################################################################### +# class `AsyncMemorizedFunc` +############################################################################### +class AsyncMemorizedFunc(MemorizedFunc): + async def __call__(self, *args, **kwargs): + out = self._cached_call(args, kwargs, shelving=False) + out = await out if asyncio.iscoroutine(out) else out + return out[0] # Don't return metadata + + async def call_and_shelve(self, *args, **kwargs): + out = self._cached_call(args, kwargs, shelving=True) + out = await out if asyncio.iscoroutine(out) else out + return out[0] # Don't return metadata + + async def call(self, *args, **kwargs): + out = super().call(*args, **kwargs) + return await out if asyncio.iscoroutine(out) else out + + async def _call(self, call_id, args, kwargs, shelving=False): + self._before_call(args, kwargs) + start_time = time.time() + output = await self.func(*args, **kwargs) + return self._after_call(call_id, args, kwargs, shelving, output, start_time) + + +############################################################################### +# class `Memory` +############################################################################### +class Memory(Logger): + """A context object for caching a function's return value each time it + is called with the same input arguments. + + All values are cached on the filesystem, in a deep directory + structure. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + location: str, pathlib.Path or None + The path of the base directory to use as a data store + or None. If None is given, no caching is done and + the Memory object is completely transparent. This option + replaces cachedir since version 0.12. + + backend: str, optional, default='local' + Type of store backend for reading/writing cache files. + The 'local' backend is using regular filesystem operations to + manipulate data (open, mv, etc) in the backend. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional + The memmapping mode used when loading from cache + numpy arrays. See numpy.load for the meaning of the + arguments. + + compress: boolean, or integer, optional + Whether to zip the stored data on disk. If an integer is + given, it should be between 1 and 9, and sets the amount + of compression. Note that compressed arrays cannot be + read by memmapping. + + verbose: int, optional + Verbosity flag, controls the debug messages that are issued + as functions are evaluated. + + backend_options: dict, optional + Contains a dictionary of named parameters used to configure + the store backend. + """ + + # ------------------------------------------------------------------------ + # Public interface + # ------------------------------------------------------------------------ + + def __init__( + self, + location=None, + backend="local", + mmap_mode=None, + compress=False, + verbose=1, + backend_options=None, + ): + Logger.__init__(self) + self._verbose = verbose + self.mmap_mode = mmap_mode + self.timestamp = time.time() + self.backend = backend + self.compress = compress + if backend_options is None: + backend_options = {} + self.backend_options = backend_options + + if compress and mmap_mode is not None: + warnings.warn("Compressed results cannot be memmapped", stacklevel=2) + + self.location = location + if isinstance(location, str): + location = os.path.join(location, "joblib") + + self.store_backend = _store_backend_factory( + backend, + location, + verbose=self._verbose, + backend_options=dict( + compress=compress, mmap_mode=mmap_mode, **backend_options + ), + ) + + def cache( + self, + func=None, + ignore=None, + verbose=None, + mmap_mode=False, + cache_validation_callback=None, + ): + """Decorates the given function func to only compute its return + value for input arguments not cached on disk. + + Parameters + ---------- + func: callable, optional + The function to be decorated + ignore: list of strings + A list of arguments name to ignore in the hashing + verbose: integer, optional + The verbosity mode of the function. By default that + of the memory object is used. + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional + The memmapping mode used when loading from cache + numpy arrays. See numpy.load for the meaning of the + arguments. By default that of the memory object is used. + cache_validation_callback: callable, optional + Callable to validate whether or not the cache is valid. When + the cached function is called with arguments for which a cache + exists, this callable is called with the metadata of the cached + result as its sole argument. If it returns True, then the + cached result is returned, else the cache for these arguments + is cleared and recomputed. + + Returns + ------- + decorated_func: MemorizedFunc object + The returned object is a MemorizedFunc object, that is + callable (behaves like a function), but offers extra + methods for cache lookup and management. See the + documentation for :class:`joblib.memory.MemorizedFunc`. + """ + if cache_validation_callback is not None and not callable( + cache_validation_callback + ): + raise ValueError( + "cache_validation_callback needs to be callable. " + f"Got {cache_validation_callback}." + ) + if func is None: + # Partial application, to be able to specify extra keyword + # arguments in decorators + return functools.partial( + self.cache, + ignore=ignore, + mmap_mode=mmap_mode, + verbose=verbose, + cache_validation_callback=cache_validation_callback, + ) + if self.store_backend is None: + cls = ( + AsyncNotMemorizedFunc + if inspect.iscoroutinefunction(func) + else NotMemorizedFunc + ) + return cls(func) + if verbose is None: + verbose = self._verbose + if mmap_mode is False: + mmap_mode = self.mmap_mode + if isinstance(func, MemorizedFunc): + func = func.func + cls = AsyncMemorizedFunc if inspect.iscoroutinefunction(func) else MemorizedFunc + return cls( + func, + location=self.store_backend, + backend=self.backend, + ignore=ignore, + mmap_mode=mmap_mode, + compress=self.compress, + verbose=verbose, + timestamp=self.timestamp, + cache_validation_callback=cache_validation_callback, + ) + + def clear(self, warn=True): + """Erase the complete cache directory.""" + if warn: + self.warn("Flushing completely the cache") + if self.store_backend is not None: + self.store_backend.clear() + + # As the cache is completely clear, make sure the _FUNCTION_HASHES + # cache is also reset. Else, for a function that is present in this + # table, results cached after this clear will be have cache miss + # as the function code is not re-written. + _FUNCTION_HASHES.clear() + + def reduce_size(self, bytes_limit=None, items_limit=None, age_limit=None): + """Remove cache elements to make the cache fit its limits. + + The limitation can impose that the cache size fits in ``bytes_limit``, + that the number of cache items is no more than ``items_limit``, and + that all files in cache are not older than ``age_limit``. + + Parameters + ---------- + bytes_limit: int | str, optional + Limit in bytes of the size of the cache. By default, the size of + the cache is unlimited. When reducing the size of the cache, + ``joblib`` keeps the most recently accessed items first. If a + str is passed, it is converted to a number of bytes using units + { K | M | G} for kilo, mega, giga. + + items_limit: int, optional + Number of items to limit the cache to. By default, the number of + items in the cache is unlimited. When reducing the size of the + cache, ``joblib`` keeps the most recently accessed items first. + + age_limit: datetime.timedelta, optional + Maximum age of items to limit the cache to. When reducing the size + of the cache, any items last accessed more than the given length of + time ago are deleted. Example: to remove files older than 5 days, + use datetime.timedelta(days=5). Negative timedelta are not + accepted. + """ + if self.store_backend is None: + # No cached results, this function does nothing. + return + + if bytes_limit is None and items_limit is None and age_limit is None: + # No limitation to impose, returning + return + + # Defers the actual limits enforcing to the store backend. + self.store_backend.enforce_store_limits(bytes_limit, items_limit, age_limit) + + def eval(self, func, *args, **kwargs): + """Eval function func with arguments `*args` and `**kwargs`, + in the context of the memory. + + This method works similarly to the builtin `apply`, except + that the function is called only if the cache is not + up to date. + + """ + if self.store_backend is None: + return func(*args, **kwargs) + return self.cache(func)(*args, **kwargs) + + # ------------------------------------------------------------------------ + # Private `object` interface + # ------------------------------------------------------------------------ + + def __repr__(self): + return "{class_name}(location={location})".format( + class_name=self.__class__.__name__, + location=( + None if self.store_backend is None else self.store_backend.location + ), + ) + + def __getstate__(self): + """We don't store the timestamp when pickling, to avoid the hash + depending from it. + """ + state = self.__dict__.copy() + state["timestamp"] = None + return state + + +############################################################################### +# cache_validation_callback helpers +############################################################################### + + +def expires_after( + days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0 +): + """Helper cache_validation_callback to force recompute after a duration. + + Parameters + ---------- + days, seconds, microseconds, milliseconds, minutes, hours, weeks: numbers + argument passed to a timedelta. + """ + delta = datetime.timedelta( + days=days, + seconds=seconds, + microseconds=microseconds, + milliseconds=milliseconds, + minutes=minutes, + hours=hours, + weeks=weeks, + ) + + def cache_validation_callback(metadata): + computation_age = time.time() - metadata["time"] + return computation_age < delta.total_seconds() + + return cache_validation_callback diff --git a/py311/lib/python3.11/site-packages/joblib/numpy_pickle.py b/py311/lib/python3.11/site-packages/joblib/numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..169016d818102f9045f71a67d5f9b40b882f031c --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/numpy_pickle.py @@ -0,0 +1,756 @@ +"""Utilities for fast persistence of big data, with optional compression.""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import io +import os +import pickle +import warnings +from pathlib import Path + +from .backports import make_memmap +from .compressor import ( + _COMPRESSORS, + LZ4_NOT_INSTALLED_ERROR, + BinaryZlibFile, + BZ2CompressorWrapper, + GzipCompressorWrapper, + LZ4CompressorWrapper, + LZMACompressorWrapper, + XZCompressorWrapper, + ZlibCompressorWrapper, + lz4, + register_compressor, +) + +# For compatibility with old versions of joblib, we need ZNDArrayWrapper +# to be visible in the current namespace. +from .numpy_pickle_compat import ( + NDArrayWrapper, + ZNDArrayWrapper, # noqa: F401 + load_compatibility, +) +from .numpy_pickle_utils import ( + BUFFER_SIZE, + Pickler, + Unpickler, + _ensure_native_byte_order, + _read_bytes, + _reconstruct, + _validate_fileobject_and_memmap, + _write_fileobject, +) + +# Register supported compressors +register_compressor("zlib", ZlibCompressorWrapper()) +register_compressor("gzip", GzipCompressorWrapper()) +register_compressor("bz2", BZ2CompressorWrapper()) +register_compressor("lzma", LZMACompressorWrapper()) +register_compressor("xz", XZCompressorWrapper()) +register_compressor("lz4", LZ4CompressorWrapper()) + + +############################################################################### +# Utility objects for persistence. + +# For convenience, 16 bytes are used to be sure to cover all the possible +# dtypes' alignments. For reference, see: +# https://numpy.org/devdocs/dev/alignment.html +NUMPY_ARRAY_ALIGNMENT_BYTES = 16 + + +class NumpyArrayWrapper(object): + """An object to be persisted instead of numpy arrays. + + This object is used to hack into the pickle machinery and read numpy + array data from our custom persistence format. + More precisely, this object is used for: + * carrying the information of the persisted array: subclass, shape, order, + dtype. Those ndarray metadata are used to correctly reconstruct the array + with low level numpy functions. + * determining if memmap is allowed on the array. + * reading the array bytes from a file. + * reading the array using memorymap from a file. + * writing the array bytes to a file. + + Attributes + ---------- + subclass: numpy.ndarray subclass + Determine the subclass of the wrapped array. + shape: numpy.ndarray shape + Determine the shape of the wrapped array. + order: {'C', 'F'} + Determine the order of wrapped array data. 'C' is for C order, 'F' is + for fortran order. + dtype: numpy.ndarray dtype + Determine the data type of the wrapped array. + allow_mmap: bool + Determine if memory mapping is allowed on the wrapped array. + Default: False. + """ + + def __init__( + self, + subclass, + shape, + order, + dtype, + allow_mmap=False, + numpy_array_alignment_bytes=NUMPY_ARRAY_ALIGNMENT_BYTES, + ): + """Constructor. Store the useful information for later.""" + self.subclass = subclass + self.shape = shape + self.order = order + self.dtype = dtype + self.allow_mmap = allow_mmap + # We make numpy_array_alignment_bytes an instance attribute to allow us + # to change our mind about the default alignment and still load the old + # pickles (with the previous alignment) correctly + self.numpy_array_alignment_bytes = numpy_array_alignment_bytes + + def safe_get_numpy_array_alignment_bytes(self): + # NumpyArrayWrapper instances loaded from joblib <= 1.1 pickles don't + # have an numpy_array_alignment_bytes attribute + return getattr(self, "numpy_array_alignment_bytes", None) + + def write_array(self, array, pickler): + """Write array bytes to pickler file handle. + + This function is an adaptation of the numpy write_array function + available in version 1.10.1 in numpy/lib/format.py. + """ + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024**2 // array.itemsize, 1) + if array.dtype.hasobject: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out with version 5 of the + # pickle protocol. + pickle.dump(array, pickler.file_handle, protocol=5) + else: + numpy_array_alignment_bytes = self.safe_get_numpy_array_alignment_bytes() + if numpy_array_alignment_bytes is not None: + current_pos = pickler.file_handle.tell() + pos_after_padding_byte = current_pos + 1 + padding_length = numpy_array_alignment_bytes - ( + pos_after_padding_byte % numpy_array_alignment_bytes + ) + # A single byte is written that contains the padding length in + # bytes + padding_length_byte = int.to_bytes( + padding_length, length=1, byteorder="little" + ) + pickler.file_handle.write(padding_length_byte) + + if padding_length != 0: + padding = b"\xff" * padding_length + pickler.file_handle.write(padding) + + for chunk in pickler.np.nditer( + array, + flags=["external_loop", "buffered", "zerosize_ok"], + buffersize=buffersize, + order=self.order, + ): + pickler.file_handle.write(chunk.tobytes("C")) + + def read_array(self, unpickler, ensure_native_byte_order): + """Read array from unpickler file handle. + + This function is an adaptation of the numpy read_array function + available in version 1.10.1 in numpy/lib/format.py. + """ + if len(self.shape) == 0: + count = 1 + else: + # joblib issue #859: we cast the elements of self.shape to int64 to + # prevent a potential overflow when computing their product. + shape_int64 = [unpickler.np.int64(x) for x in self.shape] + count = unpickler.np.multiply.reduce(shape_int64) + # Now read the actual data. + if self.dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + array = pickle.load(unpickler.file_handle) + else: + numpy_array_alignment_bytes = self.safe_get_numpy_array_alignment_bytes() + if numpy_array_alignment_bytes is not None: + padding_byte = unpickler.file_handle.read(1) + padding_length = int.from_bytes(padding_byte, byteorder="little") + if padding_length != 0: + unpickler.file_handle.read(padding_length) + + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, self.dtype.itemsize) + + array = unpickler.np.empty(count, dtype=self.dtype) + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * self.dtype.itemsize) + data = _read_bytes(unpickler.file_handle, read_size, "array data") + array[i : i + read_count] = unpickler.np.frombuffer( + data, dtype=self.dtype, count=read_count + ) + del data + + if self.order == "F": + array.shape = self.shape[::-1] + array = array.transpose() + else: + array.shape = self.shape + + if ensure_native_byte_order: + # Detect byte order mismatch and swap as needed. + array = _ensure_native_byte_order(array) + + return array + + def read_mmap(self, unpickler): + """Read an array using numpy memmap.""" + current_pos = unpickler.file_handle.tell() + offset = current_pos + numpy_array_alignment_bytes = self.safe_get_numpy_array_alignment_bytes() + + if numpy_array_alignment_bytes is not None: + padding_byte = unpickler.file_handle.read(1) + padding_length = int.from_bytes(padding_byte, byteorder="little") + # + 1 is for the padding byte + offset += padding_length + 1 + + if unpickler.mmap_mode == "w+": + unpickler.mmap_mode = "r+" + + marray = make_memmap( + unpickler.filename, + dtype=self.dtype, + shape=self.shape, + order=self.order, + mode=unpickler.mmap_mode, + offset=offset, + ) + # update the offset so that it corresponds to the end of the read array + unpickler.file_handle.seek(offset + marray.nbytes) + + if ( + numpy_array_alignment_bytes is None + and current_pos % NUMPY_ARRAY_ALIGNMENT_BYTES != 0 + ): + message = ( + f"The memmapped array {marray} loaded from the file " + f"{unpickler.file_handle.name} is not byte aligned. " + "This may cause segmentation faults if this memmapped array " + "is used in some libraries like BLAS or PyTorch. " + "To get rid of this warning, regenerate your pickle file " + "with joblib >= 1.2.0. " + "See https://github.com/joblib/joblib/issues/563 " + "for more details" + ) + warnings.warn(message) + + return marray + + def read(self, unpickler, ensure_native_byte_order): + """Read the array corresponding to this wrapper. + + Use the unpickler to get all information to correctly read the array. + + Parameters + ---------- + unpickler: NumpyUnpickler + ensure_native_byte_order: bool + If true, coerce the array to use the native endianness of the + host system. + + Returns + ------- + array: numpy.ndarray + + """ + # When requested, only use memmap mode if allowed. + if unpickler.mmap_mode is not None and self.allow_mmap: + assert not ensure_native_byte_order, ( + "Memmaps cannot be coerced to a given byte order, " + "this code path is impossible." + ) + array = self.read_mmap(unpickler) + else: + array = self.read_array(unpickler, ensure_native_byte_order) + + # Manage array subclass case + if hasattr(array, "__array_prepare__") and self.subclass not in ( + unpickler.np.ndarray, + unpickler.np.memmap, + ): + # We need to reconstruct another subclass + new_array = _reconstruct(self.subclass, (0,), "b") + return new_array.__array_prepare__(array) + else: + return array + + +############################################################################### +# Pickler classes + + +class NumpyPickler(Pickler): + """A pickler to persist big data efficiently. + + The main features of this object are: + * persistence of numpy arrays in a single file. + * optional compression with a special care on avoiding memory copies. + + Attributes + ---------- + fp: file + File object handle used for serializing the input object. + protocol: int, optional + Pickle protocol used. Default is pickle.DEFAULT_PROTOCOL. + """ + + dispatch = Pickler.dispatch.copy() + + def __init__(self, fp, protocol=None): + self.file_handle = fp + self.buffered = isinstance(self.file_handle, BinaryZlibFile) + + # By default we want a pickle protocol that only changes with + # the major python version and not the minor one + if protocol is None: + protocol = pickle.DEFAULT_PROTOCOL + + Pickler.__init__(self, self.file_handle, protocol=protocol) + # delayed import of numpy, to avoid tight coupling + try: + import numpy as np + except ImportError: + np = None + self.np = np + + def _create_array_wrapper(self, array): + """Create and returns a numpy array wrapper from a numpy array.""" + order = ( + "F" if (array.flags.f_contiguous and not array.flags.c_contiguous) else "C" + ) + allow_mmap = not self.buffered and not array.dtype.hasobject + + kwargs = {} + try: + self.file_handle.tell() + except io.UnsupportedOperation: + kwargs = {"numpy_array_alignment_bytes": None} + + wrapper = NumpyArrayWrapper( + type(array), + array.shape, + order, + array.dtype, + allow_mmap=allow_mmap, + **kwargs, + ) + + return wrapper + + def save(self, obj): + """Subclass the Pickler `save` method. + + This is a total abuse of the Pickler class in order to use the numpy + persistence function `save` instead of the default pickle + implementation. The numpy array is replaced by a custom wrapper in the + pickle persistence stack and the serialized array is written right + after in the file. Warning: the file produced does not follow the + pickle format. As such it can not be read with `pickle.load`. + """ + if self.np is not None and type(obj) in ( + self.np.ndarray, + self.np.matrix, + self.np.memmap, + ): + if type(obj) is self.np.memmap: + # Pickling doesn't work with memmapped arrays + obj = self.np.asanyarray(obj) + + # The array wrapper is pickled instead of the real array. + wrapper = self._create_array_wrapper(obj) + Pickler.save(self, wrapper) + + # A framer was introduced with pickle protocol 4 and we want to + # ensure the wrapper object is written before the numpy array + # buffer in the pickle file. + # See https://www.python.org/dev/peps/pep-3154/#framing to get + # more information on the framer behavior. + if self.proto >= 4: + self.framer.commit_frame(force=True) + + # And then array bytes are written right after the wrapper. + wrapper.write_array(obj, self) + return + + return Pickler.save(self, obj) + + +class NumpyUnpickler(Unpickler): + """A subclass of the Unpickler to unpickle our numpy pickles. + + Attributes + ---------- + mmap_mode: str + The memorymap mode to use for reading numpy arrays. + file_handle: file_like + File object to unpickle from. + ensure_native_byte_order: bool + If True, coerce the array to use the native endianness of the + host system. + filename: str + Name of the file to unpickle from. It should correspond to file_handle. + This parameter is required when using mmap_mode. + np: module + Reference to numpy module if numpy is installed else None. + + """ + + dispatch = Unpickler.dispatch.copy() + + def __init__(self, filename, file_handle, ensure_native_byte_order, mmap_mode=None): + # The next line is for backward compatibility with pickle generated + # with joblib versions less than 0.10. + self._dirname = os.path.dirname(filename) + + self.mmap_mode = mmap_mode + self.file_handle = file_handle + # filename is required for numpy mmap mode. + self.filename = filename + self.compat_mode = False + self.ensure_native_byte_order = ensure_native_byte_order + Unpickler.__init__(self, self.file_handle) + try: + import numpy as np + except ImportError: + np = None + self.np = np + + def load_build(self): + """Called to set the state of a newly created object. + + We capture it to replace our place-holder objects, NDArrayWrapper or + NumpyArrayWrapper, by the array we are interested in. We + replace them directly in the stack of pickler. + NDArrayWrapper is used for backward compatibility with joblib <= 0.9. + """ + Unpickler.load_build(self) + + # For backward compatibility, we support NDArrayWrapper objects. + if isinstance(self.stack[-1], (NDArrayWrapper, NumpyArrayWrapper)): + if self.np is None: + raise ImportError( + "Trying to unpickle an ndarray, but numpy didn't import correctly" + ) + array_wrapper = self.stack.pop() + # If any NDArrayWrapper is found, we switch to compatibility mode, + # this will be used to raise a DeprecationWarning to the user at + # the end of the unpickling. + if isinstance(array_wrapper, NDArrayWrapper): + self.compat_mode = True + _array_payload = array_wrapper.read(self) + else: + _array_payload = array_wrapper.read(self, self.ensure_native_byte_order) + + self.stack.append(_array_payload) + + # Be careful to register our new method. + dispatch[pickle.BUILD[0]] = load_build + + +############################################################################### +# Utility functions + + +def dump(value, filename, compress=0, protocol=None): + """Persist an arbitrary Python object into one file. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + value: any Python object + The object to store to disk. + filename: str, pathlib.Path, or file object. + The file object or path of the file in which it is to be stored. + The compression method corresponding to one of the supported filename + extensions ('.z', '.gz', '.bz2', '.xz' or '.lzma') will be used + automatically. + compress: int from 0 to 9 or bool or 2-tuple, optional + Optional compression level for the data. 0 or False is no compression. + Higher value means more compression, but also slower read and + write times. Using a value of 3 is often a good compromise. + See the notes for more details. + If compress is True, the compression level used is 3. + If compress is a 2-tuple, the first element must correspond to a string + between supported compressors (e.g 'zlib', 'gzip', 'bz2', 'lzma' + 'xz'), the second element must be an integer from 0 to 9, corresponding + to the compression level. + protocol: int, optional + Pickle protocol, see pickle.dump documentation for more details. + + Returns + ------- + filenames: list of strings + The list of file names in which the data is stored. If + compress is false, each array is stored in a different file. + + See Also + -------- + joblib.load : corresponding loader + + Notes + ----- + Memmapping on load cannot be used for compressed files. Thus + using compression can significantly slow down loading. In + addition, compressed files take up extra memory during + dump and load. + + """ + + if Path is not None and isinstance(filename, Path): + filename = str(filename) + + is_filename = isinstance(filename, str) + is_fileobj = hasattr(filename, "write") + + compress_method = "zlib" # zlib is the default compression method. + if compress is True: + # By default, if compress is enabled, we want the default compress + # level of the compressor. + compress_level = None + elif isinstance(compress, tuple): + # a 2-tuple was set in compress + if len(compress) != 2: + raise ValueError( + "Compress argument tuple should contain exactly 2 elements: " + "(compress method, compress level), you passed {}".format(compress) + ) + compress_method, compress_level = compress + elif isinstance(compress, str): + compress_method = compress + compress_level = None # Use default compress level + compress = (compress_method, compress_level) + else: + compress_level = compress + + if compress_method == "lz4" and lz4 is None: + raise ValueError(LZ4_NOT_INSTALLED_ERROR) + + if ( + compress_level is not None + and compress_level is not False + and compress_level not in range(10) + ): + # Raising an error if a non valid compress level is given. + raise ValueError( + 'Non valid compress level given: "{}". Possible values are {}.'.format( + compress_level, list(range(10)) + ) + ) + + if compress_method not in _COMPRESSORS: + # Raising an error if an unsupported compression method is given. + raise ValueError( + 'Non valid compression method given: "{}". Possible values are {}.'.format( + compress_method, _COMPRESSORS + ) + ) + + if not is_filename and not is_fileobj: + # People keep inverting arguments, and the resulting error is + # incomprehensible + raise ValueError( + "Second argument should be a filename or a file-like object, " + "%s (type %s) was given." % (filename, type(filename)) + ) + + if is_filename and not isinstance(compress, tuple): + # In case no explicit compression was requested using both compression + # method and level in a tuple and the filename has an explicit + # extension, we select the corresponding compressor. + + # unset the variable to be sure no compression level is set afterwards. + compress_method = None + for name, compressor in _COMPRESSORS.items(): + if filename.endswith(compressor.extension): + compress_method = name + + if compress_method in _COMPRESSORS and compress_level == 0: + # we choose the default compress_level in case it was not given + # as an argument (using compress). + compress_level = None + + if compress_level != 0: + with _write_fileobject( + filename, compress=(compress_method, compress_level) + ) as f: + NumpyPickler(f, protocol=protocol).dump(value) + elif is_filename: + with open(filename, "wb") as f: + NumpyPickler(f, protocol=protocol).dump(value) + else: + NumpyPickler(filename, protocol=protocol).dump(value) + + # If the target container is a file object, nothing is returned. + if is_fileobj: + return + + # For compatibility, the list of created filenames (e.g with one element + # after 0.10.0) is returned by default. + return [filename] + + +def _unpickle(fobj, ensure_native_byte_order, filename="", mmap_mode=None): + """Internal unpickling function.""" + # We are careful to open the file handle early and keep it open to + # avoid race-conditions on renames. + # That said, if data is stored in companion files, which can be + # the case with the old persistence format, moving the directory + # will create a race when joblib tries to access the companion + # files. + unpickler = NumpyUnpickler( + filename, fobj, ensure_native_byte_order, mmap_mode=mmap_mode + ) + obj = None + try: + obj = unpickler.load() + if unpickler.compat_mode: + warnings.warn( + "The file '%s' has been generated with a " + "joblib version less than 0.10. " + "Please regenerate this pickle file." % filename, + DeprecationWarning, + stacklevel=3, + ) + except UnicodeDecodeError as exc: + # More user-friendly error message + new_exc = ValueError( + "You may be trying to read with " + "python 3 a joblib pickle generated with python 2. " + "This feature is not supported by joblib." + ) + new_exc.__cause__ = exc + raise new_exc + return obj + + +def load_temporary_memmap(filename, mmap_mode, unlink_on_gc_collect): + from ._memmapping_reducer import JOBLIB_MMAPS, add_maybe_unlink_finalizer + + with open(filename, "rb") as f: + with _validate_fileobject_and_memmap(f, filename, mmap_mode) as ( + fobj, + validated_mmap_mode, + ): + # Memmap are used for interprocess communication, which should + # keep the objects untouched. We pass `ensure_native_byte_order=False` + # to remain consistent with the loading behavior of non-memmaped arrays + # in workers, where the byte order is preserved. + # Note that we do not implement endianness change for memmaps, as this + # would result in inconsistent behavior. + obj = _unpickle( + fobj, + ensure_native_byte_order=False, + filename=filename, + mmap_mode=validated_mmap_mode, + ) + + JOBLIB_MMAPS.add(obj.filename) + if unlink_on_gc_collect: + add_maybe_unlink_finalizer(obj) + return obj + + +def load(filename, mmap_mode=None, ensure_native_byte_order="auto"): + """Reconstruct a Python object from a file persisted with joblib.dump. + + Read more in the :ref:`User Guide `. + + WARNING: joblib.load relies on the pickle module and can therefore + execute arbitrary Python code. It should therefore never be used + to load files from untrusted sources. + + Parameters + ---------- + filename: str, pathlib.Path, or file object. + The file object or path of the file from which to load the object + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, the arrays are memory-mapped from the disk. This + mode has no effect for compressed files. Note that in this + case the reconstructed object might no longer match exactly + the originally pickled object. + ensure_native_byte_order: bool, or 'auto', default=='auto' + If True, ensures that the byte order of the loaded arrays matches the + native byte ordering (or _endianness_) of the host system. This is not + compatible with memory-mapped arrays and using non-null `mmap_mode` + parameter at the same time will raise an error. The default 'auto' + parameter is equivalent to True if `mmap_mode` is None, else False. + + Returns + ------- + result: any Python object + The object stored in the file. + + See Also + -------- + joblib.dump : function to save an object + + Notes + ----- + + This function can load numpy array files saved separately during the + dump. If the mmap_mode argument is given, it is passed to np.load and + arrays are loaded as memmaps. As a consequence, the reconstructed + object might not match the original pickled object. Note that if the + file was saved with compression, the arrays cannot be memmapped. + """ + if ensure_native_byte_order == "auto": + ensure_native_byte_order = mmap_mode is None + + if ensure_native_byte_order and mmap_mode is not None: + raise ValueError( + "Native byte ordering can only be enforced if 'mmap_mode' parameter " + f"is set to None, but got 'mmap_mode={mmap_mode}' instead." + ) + + if Path is not None and isinstance(filename, Path): + filename = str(filename) + + if hasattr(filename, "read"): + fobj = filename + filename = getattr(fobj, "name", "") + with _validate_fileobject_and_memmap(fobj, filename, mmap_mode) as (fobj, _): + obj = _unpickle(fobj, ensure_native_byte_order=ensure_native_byte_order) + else: + with open(filename, "rb") as f: + with _validate_fileobject_and_memmap(f, filename, mmap_mode) as ( + fobj, + validated_mmap_mode, + ): + if isinstance(fobj, str): + # if the returned file object is a string, this means we + # try to load a pickle file generated with an version of + # Joblib so we load it with joblib compatibility function. + return load_compatibility(fobj) + + # A memory-mapped array has to be mapped with the endianness + # it has been written with. Other arrays are coerced to the + # native endianness of the host system. + obj = _unpickle( + fobj, + ensure_native_byte_order=ensure_native_byte_order, + filename=filename, + mmap_mode=validated_mmap_mode, + ) + + return obj diff --git a/py311/lib/python3.11/site-packages/joblib/numpy_pickle_compat.py b/py311/lib/python3.11/site-packages/joblib/numpy_pickle_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..5e26c13e2feaa53b62b29e6b6690c0349898189f --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/numpy_pickle_compat.py @@ -0,0 +1,250 @@ +"""Numpy pickle compatibility functions.""" + +import inspect +import os +import pickle +import zlib +from io import BytesIO + +from .numpy_pickle_utils import ( + _ZFILE_PREFIX, + Unpickler, + _ensure_native_byte_order, + _reconstruct, +) + + +def hex_str(an_int): + """Convert an int to an hexadecimal string.""" + return "{:#x}".format(an_int) + + +def asbytes(s): + if isinstance(s, bytes): + return s + return s.encode("latin1") + + +_MAX_LEN = len(hex_str(2**64)) +_CHUNK_SIZE = 64 * 1024 + + +def read_zfile(file_handle): + """Read the z-file and return the content as a string. + + Z-files are raw data compressed with zlib used internally by joblib + for persistence. Backward compatibility is not guaranteed. Do not + use for external purposes. + """ + file_handle.seek(0) + header_length = len(_ZFILE_PREFIX) + _MAX_LEN + length = file_handle.read(header_length) + length = length[len(_ZFILE_PREFIX) :] + length = int(length, 16) + + # With python2 and joblib version <= 0.8.4 compressed pickle header is one + # character wider so we need to ignore an additional space if present. + # Note: the first byte of the zlib data is guaranteed not to be a + # space according to + # https://tools.ietf.org/html/rfc6713#section-2.1 + next_byte = file_handle.read(1) + if next_byte != b" ": + # The zlib compressed data has started and we need to go back + # one byte + file_handle.seek(header_length) + + # We use the known length of the data to tell Zlib the size of the + # buffer to allocate. + data = zlib.decompress(file_handle.read(), 15, length) + assert len(data) == length, ( + "Incorrect data length while decompressing %s." + "The file could be corrupted." % file_handle + ) + return data + + +def write_zfile(file_handle, data, compress=1): + """Write the data in the given file as a Z-file. + + Z-files are raw data compressed with zlib used internally by joblib + for persistence. Backward compatibility is not guaranteed. Do not + use for external purposes. + """ + file_handle.write(_ZFILE_PREFIX) + length = hex_str(len(data)) + # Store the length of the data + file_handle.write(asbytes(length.ljust(_MAX_LEN))) + file_handle.write(zlib.compress(asbytes(data), compress)) + + +############################################################################### +# Utility objects for persistence. + + +class NDArrayWrapper(object): + """An object to be persisted instead of numpy arrays. + + The only thing this object does, is to carry the filename in which + the array has been persisted, and the array subclass. + """ + + def __init__(self, filename, subclass, allow_mmap=True): + """Constructor. Store the useful information for later.""" + self.filename = filename + self.subclass = subclass + self.allow_mmap = allow_mmap + + def read(self, unpickler): + """Reconstruct the array.""" + filename = os.path.join(unpickler._dirname, self.filename) + # Load the array from the disk + # use getattr instead of self.allow_mmap to ensure backward compat + # with NDArrayWrapper instances pickled with joblib < 0.9.0 + allow_mmap = getattr(self, "allow_mmap", True) + kwargs = {} + if allow_mmap: + kwargs["mmap_mode"] = unpickler.mmap_mode + if "allow_pickle" in inspect.signature(unpickler.np.load).parameters: + # Required in numpy 1.16.3 and later to acknowledge the security + # risk. + kwargs["allow_pickle"] = True + array = unpickler.np.load(filename, **kwargs) + + # Detect byte order mismatch and swap as needed. + array = _ensure_native_byte_order(array) + + # Reconstruct subclasses. This does not work with old + # versions of numpy + if hasattr(array, "__array_prepare__") and self.subclass not in ( + unpickler.np.ndarray, + unpickler.np.memmap, + ): + # We need to reconstruct another subclass + new_array = _reconstruct(self.subclass, (0,), "b") + return new_array.__array_prepare__(array) + else: + return array + + +class ZNDArrayWrapper(NDArrayWrapper): + """An object to be persisted instead of numpy arrays. + + This object store the Zfile filename in which + the data array has been persisted, and the meta information to + retrieve it. + The reason that we store the raw buffer data of the array and + the meta information, rather than array representation routine + (tobytes) is that it enables us to use completely the strided + model to avoid memory copies (a and a.T store as fast). In + addition saving the heavy information separately can avoid + creating large temporary buffers when unpickling data with + large arrays. + """ + + def __init__(self, filename, init_args, state): + """Constructor. Store the useful information for later.""" + self.filename = filename + self.state = state + self.init_args = init_args + + def read(self, unpickler): + """Reconstruct the array from the meta-information and the z-file.""" + # Here we a simply reproducing the unpickling mechanism for numpy + # arrays + filename = os.path.join(unpickler._dirname, self.filename) + array = _reconstruct(*self.init_args) + with open(filename, "rb") as f: + data = read_zfile(f) + state = self.state + (data,) + array.__setstate__(state) + return array + + +class ZipNumpyUnpickler(Unpickler): + """A subclass of the Unpickler to unpickle our numpy pickles.""" + + dispatch = Unpickler.dispatch.copy() + + def __init__(self, filename, file_handle, mmap_mode=None): + """Constructor.""" + self._filename = os.path.basename(filename) + self._dirname = os.path.dirname(filename) + self.mmap_mode = mmap_mode + self.file_handle = self._open_pickle(file_handle) + Unpickler.__init__(self, self.file_handle) + try: + import numpy as np + except ImportError: + np = None + self.np = np + + def _open_pickle(self, file_handle): + return BytesIO(read_zfile(file_handle)) + + def load_build(self): + """Set the state of a newly created object. + + We capture it to replace our place-holder objects, + NDArrayWrapper, by the array we are interested in. We + replace them directly in the stack of pickler. + """ + Unpickler.load_build(self) + if isinstance(self.stack[-1], NDArrayWrapper): + if self.np is None: + raise ImportError( + "Trying to unpickle an ndarray, but numpy didn't import correctly" + ) + nd_array_wrapper = self.stack.pop() + array = nd_array_wrapper.read(self) + self.stack.append(array) + + dispatch[pickle.BUILD[0]] = load_build + + +def load_compatibility(filename): + """Reconstruct a Python object from a file persisted with joblib.dump. + + This function ensures the compatibility with joblib old persistence format + (<= 0.9.3). + + Parameters + ---------- + filename: string + The name of the file from which to load the object + + Returns + ------- + result: any Python object + The object stored in the file. + + See Also + -------- + joblib.dump : function to save an object + + Notes + ----- + + This function can load numpy array files saved separately during the + dump. + """ + with open(filename, "rb") as file_handle: + # We are careful to open the file handle early and keep it open to + # avoid race-conditions on renames. That said, if data is stored in + # companion files, moving the directory will create a race when + # joblib tries to access the companion files. + unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle) + try: + obj = unpickler.load() + except UnicodeDecodeError as exc: + # More user-friendly error message + new_exc = ValueError( + "You may be trying to read with " + "python 3 a joblib pickle generated with python 2. " + "This feature is not supported by joblib." + ) + new_exc.__cause__ = exc + raise new_exc + finally: + if hasattr(unpickler, "file_handle"): + unpickler.file_handle.close() + return obj diff --git a/py311/lib/python3.11/site-packages/joblib/numpy_pickle_utils.py b/py311/lib/python3.11/site-packages/joblib/numpy_pickle_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4f7840c78d6e5b36dc5adb8f2a2acbeb40effa1d --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/numpy_pickle_utils.py @@ -0,0 +1,291 @@ +"""Utilities for fast persistence of big data, with optional compression.""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import contextlib +import io +import pickle +import sys +import warnings + +from .compressor import _COMPRESSORS, _ZFILE_PREFIX + +try: + import numpy as np +except ImportError: + np = None + +Unpickler = pickle._Unpickler +Pickler = pickle._Pickler +xrange = range + + +try: + # The python standard library can be built without bz2 so we make bz2 + # usage optional. + # see https://github.com/scikit-learn/scikit-learn/issues/7526 for more + # details. + import bz2 +except ImportError: + bz2 = None + +# Buffer size used in io.BufferedReader and io.BufferedWriter +_IO_BUFFER_SIZE = 1024**2 + + +def _is_raw_file(fileobj): + """Check if fileobj is a raw file object, e.g created with open.""" + fileobj = getattr(fileobj, "raw", fileobj) + return isinstance(fileobj, io.FileIO) + + +def _get_prefixes_max_len(): + # Compute the max prefix len of registered compressors. + prefixes = [len(compressor.prefix) for compressor in _COMPRESSORS.values()] + prefixes += [len(_ZFILE_PREFIX)] + return max(prefixes) + + +def _is_numpy_array_byte_order_mismatch(array): + """Check if numpy array is having byte order mismatch""" + return ( + sys.byteorder == "big" + and ( + array.dtype.byteorder == "<" + or ( + array.dtype.byteorder == "|" + and array.dtype.fields + and all(e[0].byteorder == "<" for e in array.dtype.fields.values()) + ) + ) + ) or ( + sys.byteorder == "little" + and ( + array.dtype.byteorder == ">" + or ( + array.dtype.byteorder == "|" + and array.dtype.fields + and all(e[0].byteorder == ">" for e in array.dtype.fields.values()) + ) + ) + ) + + +def _ensure_native_byte_order(array): + """Use the byte order of the host while preserving values + + Does nothing if array already uses the system byte order. + """ + if _is_numpy_array_byte_order_mismatch(array): + array = array.byteswap().view(array.dtype.newbyteorder("=")) + return array + + +############################################################################### +# Cache file utilities +def _detect_compressor(fileobj): + """Return the compressor matching fileobj. + + Parameters + ---------- + fileobj: file object + + Returns + ------- + str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat', 'not-compressed'} + """ + # Read the magic number in the first bytes of the file. + max_prefix_len = _get_prefixes_max_len() + if hasattr(fileobj, "peek"): + # Peek allows to read those bytes without moving the cursor in the + # file which. + first_bytes = fileobj.peek(max_prefix_len) + else: + # Fallback to seek if the fileobject is not peekable. + first_bytes = fileobj.read(max_prefix_len) + fileobj.seek(0) + + if first_bytes.startswith(_ZFILE_PREFIX): + return "compat" + else: + for name, compressor in _COMPRESSORS.items(): + if first_bytes.startswith(compressor.prefix): + return name + + return "not-compressed" + + +def _buffered_read_file(fobj): + """Return a buffered version of a read file object.""" + return io.BufferedReader(fobj, buffer_size=_IO_BUFFER_SIZE) + + +def _buffered_write_file(fobj): + """Return a buffered version of a write file object.""" + return io.BufferedWriter(fobj, buffer_size=_IO_BUFFER_SIZE) + + +@contextlib.contextmanager +def _validate_fileobject_and_memmap(fileobj, filename, mmap_mode=None): + """Utility function opening the right fileobject from a filename. + + The magic number is used to choose between the type of file object to open: + * regular file object (default) + * zlib file object + * gzip file object + * bz2 file object + * lzma file object (for xz and lzma compressor) + + Parameters + ---------- + fileobj: file object + filename: str + filename path corresponding to the fileobj parameter. + mmap_mode: str + memory map mode that should be used to open the pickle file. This + parameter is useful to verify that the user is not trying to one with + compression. Default: None. + + Returns + ------- + a tuple with a file like object, and the validated mmap_mode. + + """ + # Detect if the fileobj contains compressed data. + compressor = _detect_compressor(fileobj) + validated_mmap_mode = mmap_mode + + if compressor == "compat": + # Compatibility with old pickle mode: simply return the input + # filename "as-is" and let the compatibility function be called by the + # caller. + warnings.warn( + "The file '%s' has been generated with a joblib " + "version less than 0.10. " + "Please regenerate this pickle file." % filename, + DeprecationWarning, + stacklevel=2, + ) + yield filename, validated_mmap_mode + else: + if compressor in _COMPRESSORS: + # based on the compressor detected in the file, we open the + # correct decompressor file object, wrapped in a buffer. + compressor_wrapper = _COMPRESSORS[compressor] + inst = compressor_wrapper.decompressor_file(fileobj) + fileobj = _buffered_read_file(inst) + + # Checking if incompatible load parameters with the type of file: + # mmap_mode cannot be used with compressed file or in memory buffers + # such as io.BytesIO. + if mmap_mode is not None: + validated_mmap_mode = None + if isinstance(fileobj, io.BytesIO): + warnings.warn( + "In memory persistence is not compatible with " + 'mmap_mode "%(mmap_mode)s" flag passed. ' + "mmap_mode option will be ignored." % locals(), + stacklevel=2, + ) + elif compressor != "not-compressed": + warnings.warn( + 'mmap_mode "%(mmap_mode)s" is not compatible ' + "with compressed file %(filename)s. " + '"%(mmap_mode)s" flag will be ignored.' % locals(), + stacklevel=2, + ) + elif not _is_raw_file(fileobj): + warnings.warn( + '"%(fileobj)r" is not a raw file, mmap_mode ' + '"%(mmap_mode)s" flag will be ignored.' % locals(), + stacklevel=2, + ) + else: + validated_mmap_mode = mmap_mode + + yield fileobj, validated_mmap_mode + + +def _write_fileobject(filename, compress=("zlib", 3)): + """Return the right compressor file object in write mode.""" + compressmethod = compress[0] + compresslevel = compress[1] + + if compressmethod in _COMPRESSORS.keys(): + file_instance = _COMPRESSORS[compressmethod].compressor_file( + filename, compresslevel=compresslevel + ) + return _buffered_write_file(file_instance) + else: + file_instance = _COMPRESSORS["zlib"].compressor_file( + filename, compresslevel=compresslevel + ) + return _buffered_write_file(file_instance) + + +# Utility functions/variables from numpy required for writing arrays. +# We need at least the functions introduced in version 1.9 of numpy. Here, +# we use the ones from numpy 1.10.2. +BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes + + +def _read_bytes(fp, size, error_template="ran out of data"): + """Read from file-like object until size bytes are read. + + TODO python2_drop: is it still needed? The docstring mentions python 2.6 + and it looks like this can be at least simplified ... + + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + + This function was taken from numpy/lib/format.py in version 1.10.2. + + Parameters + ---------- + fp: file-like object + size: int + error_template: str + + Returns + ------- + a bytes object + The data read in bytes. + + """ + data = bytes() + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except io.BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data + + +def _reconstruct(*args, **kwargs): + # Wrapper for numpy._core.multiarray._reconstruct with backward compat + # for numpy 1.X + # + # XXX: Remove this function when numpy 1.X is not supported anymore + + np_major_version = np.__version__[:2] + if np_major_version == "1.": + from numpy.core.multiarray import _reconstruct as np_reconstruct + elif np_major_version == "2.": + from numpy._core.multiarray import _reconstruct as np_reconstruct + + return np_reconstruct(*args, **kwargs) diff --git a/py311/lib/python3.11/site-packages/joblib/parallel.py b/py311/lib/python3.11/site-packages/joblib/parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..452bd3446fb2d590e7a0b7f812f3454f5ee909a0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/parallel.py @@ -0,0 +1,2075 @@ +""" +Helpers for embarrassingly parallel code. +""" +# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org > +# Copyright: 2010, Gael Varoquaux +# License: BSD 3 clause + +from __future__ import division + +import collections +import functools +import itertools +import os +import queue +import sys +import threading +import time +import warnings +import weakref +from contextlib import nullcontext +from math import floor, log10, sqrt +from multiprocessing import TimeoutError +from numbers import Integral +from uuid import uuid4 + +from ._multiprocessing_helpers import mp + +# Make sure that those two classes are part of the public joblib.parallel API +# so that 3rd party backend implementers can import them from here. +from ._parallel_backends import ( + AutoBatchingMixin, # noqa + FallbackToBackend, + LokyBackend, + MultiprocessingBackend, + ParallelBackendBase, # noqa + SequentialBackend, + ThreadingBackend, +) +from ._utils import _Sentinel, eval_expr +from .disk import memstr_to_bytes +from .logger import Logger, short_format_time + +BACKENDS = { + "threading": ThreadingBackend, + "sequential": SequentialBackend, +} +# name of the backend used by default by Parallel outside of any context +# managed by ``parallel_config`` or ``parallel_backend``. + +# threading is the only backend that is always everywhere +DEFAULT_BACKEND = "threading" +DEFAULT_THREAD_BACKEND = "threading" +DEFAULT_PROCESS_BACKEND = "threading" + +MAYBE_AVAILABLE_BACKENDS = {"multiprocessing", "loky"} + +# if multiprocessing is available, so is loky, we set it as the default +# backend +if mp is not None: + BACKENDS["multiprocessing"] = MultiprocessingBackend + from .externals import loky + + BACKENDS["loky"] = LokyBackend + DEFAULT_BACKEND = "loky" + DEFAULT_PROCESS_BACKEND = "loky" + +# Thread local value that can be overridden by the ``parallel_config`` context +# manager +_backend = threading.local() + + +def _register_dask(): + """Register Dask Backend if called with parallel_config(backend="dask")""" + try: + from ._dask import DaskDistributedBackend + + register_parallel_backend("dask", DaskDistributedBackend) + except ImportError as e: + msg = ( + "To use the dask.distributed backend you must install both " + "the `dask` and distributed modules.\n\n" + "See https://dask.pydata.org/en/latest/install.html for more " + "information." + ) + raise ImportError(msg) from e + + +EXTERNAL_BACKENDS = { + "dask": _register_dask, +} + + +# Sentinels for the default values of the Parallel constructor and +# the parallel_config and parallel_backend context managers +default_parallel_config = { + "backend": _Sentinel(default_value=None), + "n_jobs": _Sentinel(default_value=None), + "verbose": _Sentinel(default_value=0), + "temp_folder": _Sentinel(default_value=None), + "max_nbytes": _Sentinel(default_value="1M"), + "mmap_mode": _Sentinel(default_value="r"), + "prefer": _Sentinel(default_value=None), + "require": _Sentinel(default_value=None), +} + + +VALID_BACKEND_HINTS = ("processes", "threads", None) +VALID_BACKEND_CONSTRAINTS = ("sharedmem", None) + + +def _get_config_param(param, context_config, key): + """Return the value of a parallel config parameter + + Explicitly setting it in Parallel has priority over setting in a + parallel_(config/backend) context manager. + """ + if param is not default_parallel_config[key]: + # param is explicitly set, return it + return param + + if context_config[key] is not default_parallel_config[key]: + # there's a context manager and the key is set, return it + return context_config[key] + + # Otherwise, we are in the default_parallel_config, + # return the default value + return param.default_value + + +def get_active_backend( + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + verbose=default_parallel_config["verbose"], +): + """Return the active default backend""" + backend, config = _get_active_backend(prefer, require, verbose) + n_jobs = _get_config_param(default_parallel_config["n_jobs"], config, "n_jobs") + return backend, n_jobs + + +def _get_active_backend( + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + verbose=default_parallel_config["verbose"], +): + """Return the active default backend""" + + backend_config = getattr(_backend, "config", default_parallel_config) + + backend = _get_config_param( + default_parallel_config["backend"], backend_config, "backend" + ) + + prefer = _get_config_param(prefer, backend_config, "prefer") + require = _get_config_param(require, backend_config, "require") + verbose = _get_config_param(verbose, backend_config, "verbose") + + if prefer not in VALID_BACKEND_HINTS: + raise ValueError( + f"prefer={prefer} is not a valid backend hint, " + f"expected one of {VALID_BACKEND_HINTS}" + ) + if require not in VALID_BACKEND_CONSTRAINTS: + raise ValueError( + f"require={require} is not a valid backend constraint, " + f"expected one of {VALID_BACKEND_CONSTRAINTS}" + ) + if prefer == "processes" and require == "sharedmem": + raise ValueError( + "prefer == 'processes' and require == 'sharedmem' are inconsistent settings" + ) + + explicit_backend = True + if backend is None: + # We are either outside of the scope of any parallel_(config/backend) + # context manager or the context manager did not set a backend. + # create the default backend instance now. + backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0) + explicit_backend = False + + # Try to use the backend set by the user with the context manager. + + nesting_level = backend.nesting_level + uses_threads = getattr(backend, "uses_threads", False) + supports_sharedmem = getattr(backend, "supports_sharedmem", False) + # Force to use thread-based backend if the provided backend does not + # match the shared memory constraint or if the backend is not explicitly + # given and threads are preferred. + force_threads = (require == "sharedmem" and not supports_sharedmem) or ( + not explicit_backend and prefer == "threads" and not uses_threads + ) + force_processes = not explicit_backend and prefer == "processes" and uses_threads + + if force_threads: + # This backend does not match the shared memory constraint: + # fallback to the default thead-based backend. + sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND]( + nesting_level=nesting_level + ) + # Warn the user if we forced the backend to thread-based, while the + # user explicitly specified a non-thread-based backend. + if verbose >= 10 and explicit_backend: + print( + f"Using {sharedmem_backend.__class__.__name__} as " + f"joblib backend instead of {backend.__class__.__name__} " + "as the latter does not provide shared memory semantics." + ) + # Force to n_jobs=1 by default + thread_config = backend_config.copy() + thread_config["n_jobs"] = 1 + return sharedmem_backend, thread_config + + if force_processes: + # This backend does not match the prefer="processes" constraint: + # fallback to the default process-based backend. + process_backend = BACKENDS[DEFAULT_PROCESS_BACKEND](nesting_level=nesting_level) + + return process_backend, backend_config.copy() + + return backend, backend_config + + +class parallel_config: + """Set the default backend or configuration for :class:`~joblib.Parallel`. + + This is an alternative to directly passing keyword arguments to the + :class:`~joblib.Parallel` class constructor. It is particularly useful when + calling into library code that uses joblib internally but does not expose + the various parallel configuration arguments in its own API. + + Parameters + ---------- + backend: str or ParallelBackendBase instance, default=None + If ``backend`` is a string it must match a previously registered + implementation using the :func:`~register_parallel_backend` function. + + By default the following backends are available: + + - 'loky': single-host, process-based parallelism (used by default), + - 'threading': single-host, thread-based parallelism, + - 'multiprocessing': legacy single-host, process-based parallelism. + + 'loky' is recommended to run functions that manipulate Python objects. + 'threading' is a low-overhead alternative that is most efficient for + functions that release the Global Interpreter Lock: e.g. I/O-bound + code or CPU-bound code in a few calls to native code that explicitly + releases the GIL. Note that on some rare systems (such as pyodide), + multiprocessing and loky may not be available, in which case joblib + defaults to threading. + + In addition, if the ``dask`` and ``distributed`` Python packages are + installed, it is possible to use the 'dask' backend for better + scheduling of nested parallel calls without over-subscription and + potentially distribute parallel calls over a networked cluster of + several hosts. + + It is also possible to use the distributed 'ray' backend for + distributing the workload to a cluster of nodes. See more details + in the Examples section below. + + Alternatively the backend can be passed directly as an instance. + + n_jobs: int, default=None + The maximum number of concurrently running jobs, such as the number + of Python worker processes when ``backend="loky"`` or the size of the + thread-pool when ``backend="threading"``. + This argument is converted to an integer, rounded below for float. + If -1 is given, `joblib` tries to use all CPUs. The number of CPUs + ``n_cpus`` is obtained with :func:`~cpu_count`. + For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance, + using ``n_jobs=-2`` will result in all CPUs but one being used. + This argument can also go above ``n_cpus``, which will cause + oversubscription. In some cases, slight oversubscription can be + beneficial, e.g., for tasks with large I/O operations. + If 1 is given, no parallel computing code is used at all, and the + behavior amounts to a simple python `for` loop. This mode is not + compatible with `timeout`. + None is a marker for 'unset' that will be interpreted as n_jobs=1 + unless the call is performed under a :func:`~parallel_config` + context manager that sets another value for ``n_jobs``. + If n_jobs = 0 then a ValueError is raised. + + verbose: int, default=0 + The verbosity level: if non zero, progress messages are + printed. Above 50, the output is sent to stdout. + The frequency of the messages increases with the verbosity level. + If it more than 10, all iterations are reported. + + temp_folder: str or None, default=None + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + + - a folder pointed by the ``JOBLIB_TEMP_FOLDER`` environment + variable, + - ``/dev/shm`` if the folder exists and is writable: this is a + RAM disk filesystem available by default on modern Linux + distributions, + - the default system temporary folder that can be + overridden with ``TMP``, ``TMPDIR`` or ``TEMP`` environment + variables, typically ``/tmp`` under Unix operating systems. + + max_nbytes: int, str, or None, optional, default='1M' + Threshold on the size of arrays passed to the workers that + triggers automated memory mapping in temp_folder. Can be an int + in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte. + Use None to disable memmapping of large arrays. + + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r' + Memmapping mode for numpy arrays passed to workers. None will + disable memmapping, other modes defined in the numpy.memmap doc: + https://numpy.org/doc/stable/reference/generated/numpy.memmap.html + Also, see 'max_nbytes' parameter documentation for more details. + + prefer: str in {'processes', 'threads'} or None, default=None + Soft hint to choose the default backend. + The default process-based backend is 'loky' and the default + thread-based backend is 'threading'. Ignored if the ``backend`` + parameter is specified. + + require: 'sharedmem' or None, default=None + Hard constraint to select the backend. If set to 'sharedmem', + the selected backend will be single-host and thread-based. + + inner_max_num_threads: int, default=None + If not None, overwrites the limit set on the number of threads + usable in some third-party library threadpools like OpenBLAS, + MKL or OpenMP. This is only used with the ``loky`` backend. + + backend_params: dict + Additional parameters to pass to the backend constructor when + backend is a string. + + Notes + ----- + Joblib tries to limit the oversubscription by limiting the number of + threads usable in some third-party library threadpools like OpenBLAS, MKL + or OpenMP. The default limit in each worker is set to + ``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be + overwritten with the ``inner_max_num_threads`` argument which will be used + to set this limit in the child processes. + + .. versionadded:: 1.3 + + Examples + -------- + >>> from operator import neg + >>> with parallel_config(backend='threading'): + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + ... + [-1, -2, -3, -4, -5] + + To use the 'ray' joblib backend add the following lines: + + >>> from ray.util.joblib import register_ray # doctest: +SKIP + >>> register_ray() # doctest: +SKIP + >>> with parallel_config(backend="ray"): # doctest: +SKIP + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + [-1, -2, -3, -4, -5] + + """ + + def __init__( + self, + backend=default_parallel_config["backend"], + *, + n_jobs=default_parallel_config["n_jobs"], + verbose=default_parallel_config["verbose"], + temp_folder=default_parallel_config["temp_folder"], + max_nbytes=default_parallel_config["max_nbytes"], + mmap_mode=default_parallel_config["mmap_mode"], + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + inner_max_num_threads=None, + **backend_params, + ): + # Save the parallel info and set the active parallel config + self.old_parallel_config = getattr(_backend, "config", default_parallel_config) + + backend = self._check_backend(backend, inner_max_num_threads, **backend_params) + + new_config = { + "n_jobs": n_jobs, + "verbose": verbose, + "temp_folder": temp_folder, + "max_nbytes": max_nbytes, + "mmap_mode": mmap_mode, + "prefer": prefer, + "require": require, + "backend": backend, + } + self.parallel_config = self.old_parallel_config.copy() + self.parallel_config.update( + {k: v for k, v in new_config.items() if not isinstance(v, _Sentinel)} + ) + + setattr(_backend, "config", self.parallel_config) + + def _check_backend(self, backend, inner_max_num_threads, **backend_params): + if backend is default_parallel_config["backend"]: + if inner_max_num_threads is not None or len(backend_params) > 0: + raise ValueError( + "inner_max_num_threads and other constructor " + "parameters backend_params are only supported " + "when backend is not None." + ) + return backend + + if isinstance(backend, str): + # Handle non-registered or missing backends + if backend not in BACKENDS: + if backend in EXTERNAL_BACKENDS: + register = EXTERNAL_BACKENDS[backend] + register() + elif backend in MAYBE_AVAILABLE_BACKENDS: + warnings.warn( + f"joblib backend '{backend}' is not available on " + f"your system, falling back to {DEFAULT_BACKEND}.", + UserWarning, + stacklevel=2, + ) + BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND] + else: + raise ValueError( + f"Invalid backend: {backend}, expected one of " + f"{sorted(BACKENDS.keys())}" + ) + + backend = BACKENDS[backend](**backend_params) + else: + if len(backend_params) > 0: + raise ValueError( + "Constructor parameters backend_params are only " + "supported when backend is a string." + ) + + if inner_max_num_threads is not None: + msg = ( + f"{backend.__class__.__name__} does not accept setting the " + "inner_max_num_threads argument." + ) + assert backend.supports_inner_max_num_threads, msg + backend.inner_max_num_threads = inner_max_num_threads + + # If the nesting_level of the backend is not set previously, use the + # nesting level from the previous active_backend to set it + if backend.nesting_level is None: + parent_backend = self.old_parallel_config["backend"] + if parent_backend is default_parallel_config["backend"]: + nesting_level = 0 + else: + nesting_level = parent_backend.nesting_level + backend.nesting_level = nesting_level + + return backend + + def __enter__(self): + return self.parallel_config + + def __exit__(self, type, value, traceback): + self.unregister() + + def unregister(self): + setattr(_backend, "config", self.old_parallel_config) + + +class parallel_backend(parallel_config): + """Change the default backend used by Parallel inside a with block. + + .. warning:: + It is advised to use the :class:`~joblib.parallel_config` context + manager instead, which allows more fine-grained control over the + backend configuration. + + If ``backend`` is a string it must match a previously registered + implementation using the :func:`~register_parallel_backend` function. + + By default the following backends are available: + + - 'loky': single-host, process-based parallelism (used by default), + - 'threading': single-host, thread-based parallelism, + - 'multiprocessing': legacy single-host, process-based parallelism. + + 'loky' is recommended to run functions that manipulate Python objects. + 'threading' is a low-overhead alternative that is most efficient for + functions that release the Global Interpreter Lock: e.g. I/O-bound code or + CPU-bound code in a few calls to native code that explicitly releases the + GIL. Note that on some rare systems (such as Pyodide), + multiprocessing and loky may not be available, in which case joblib + defaults to threading. + + You can also use the `Dask `_ joblib + backend to distribute work across machines. This works well with + scikit-learn estimators with the ``n_jobs`` parameter, for example:: + + >>> import joblib # doctest: +SKIP + >>> from sklearn.model_selection import GridSearchCV # doctest: +SKIP + >>> from dask.distributed import Client, LocalCluster # doctest: +SKIP + + >>> # create a local Dask cluster + >>> cluster = LocalCluster() # doctest: +SKIP + >>> client = Client(cluster) # doctest: +SKIP + >>> grid_search = GridSearchCV(estimator, param_grid, n_jobs=-1) + ... # doctest: +SKIP + >>> with joblib.parallel_backend("dask", scatter=[X, y]): # doctest: +SKIP + ... grid_search.fit(X, y) + + It is also possible to use the distributed 'ray' backend for distributing + the workload to a cluster of nodes. To use the 'ray' joblib backend add + the following lines:: + + >>> from ray.util.joblib import register_ray # doctest: +SKIP + >>> register_ray() # doctest: +SKIP + >>> with parallel_backend("ray"): # doctest: +SKIP + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + [-1, -2, -3, -4, -5] + + Alternatively the backend can be passed directly as an instance. + + By default all available workers will be used (``n_jobs=-1``) unless the + caller passes an explicit value for the ``n_jobs`` parameter. + + This is an alternative to passing a ``backend='backend_name'`` argument to + the :class:`~Parallel` class constructor. It is particularly useful when + calling into library code that uses joblib internally but does not expose + the backend argument in its own API. + + >>> from operator import neg + >>> with parallel_backend('threading'): + ... print(Parallel()(delayed(neg)(i + 1) for i in range(5))) + ... + [-1, -2, -3, -4, -5] + + Joblib also tries to limit the oversubscription by limiting the number of + threads usable in some third-party library threadpools like OpenBLAS, MKL + or OpenMP. The default limit in each worker is set to + ``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be + overwritten with the ``inner_max_num_threads`` argument which will be used + to set this limit in the child processes. + + .. versionadded:: 0.10 + + See Also + -------- + joblib.parallel_config: context manager to change the backend configuration. + """ + + def __init__( + self, backend, n_jobs=-1, inner_max_num_threads=None, **backend_params + ): + super().__init__( + backend=backend, + n_jobs=n_jobs, + inner_max_num_threads=inner_max_num_threads, + **backend_params, + ) + + if self.old_parallel_config is None: + self.old_backend_and_jobs = None + else: + self.old_backend_and_jobs = ( + self.old_parallel_config["backend"], + self.old_parallel_config["n_jobs"], + ) + self.new_backend_and_jobs = ( + self.parallel_config["backend"], + self.parallel_config["n_jobs"], + ) + + def __enter__(self): + return self.new_backend_and_jobs + + +# Under Linux or OS X the default start method of multiprocessing +# can cause third party libraries to crash. Under Python 3.4+ it is possible +# to set an environment variable to switch the default start method from +# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost +# of causing semantic changes and some additional pool instantiation overhead. +DEFAULT_MP_CONTEXT = None +if hasattr(mp, "get_context"): + method = os.environ.get("JOBLIB_START_METHOD", "").strip() or None + if method is not None: + DEFAULT_MP_CONTEXT = mp.get_context(method=method) + + +class BatchedCalls(object): + """Wrap a sequence of (func, args, kwargs) tuples as a single callable""" + + def __init__( + self, iterator_slice, backend_and_jobs, reducer_callback=None, pickle_cache=None + ): + self.items = list(iterator_slice) + self._size = len(self.items) + self._reducer_callback = reducer_callback + if isinstance(backend_and_jobs, tuple): + self._backend, self._n_jobs = backend_and_jobs + else: + # this is for backward compatibility purposes. Before 0.12.6, + # nested backends were returned without n_jobs indications. + self._backend, self._n_jobs = backend_and_jobs, None + self._pickle_cache = pickle_cache if pickle_cache is not None else {} + + def __call__(self): + # Set the default nested backend to self._backend but do not set the + # change the default number of processes to -1 + with parallel_config(backend=self._backend, n_jobs=self._n_jobs): + return [func(*args, **kwargs) for func, args, kwargs in self.items] + + def __reduce__(self): + if self._reducer_callback is not None: + self._reducer_callback() + # no need to pickle the callback. + return ( + BatchedCalls, + (self.items, (self._backend, self._n_jobs), None, self._pickle_cache), + ) + + def __len__(self): + return self._size + + +# Possible exit status for a task +TASK_DONE = "Done" +TASK_ERROR = "Error" +TASK_PENDING = "Pending" + + +############################################################################### +# CPU count that works also when multiprocessing has been disabled via +# the JOBLIB_MULTIPROCESSING environment variable +def cpu_count(only_physical_cores=False): + """Return the number of CPUs. + + This delegates to loky.cpu_count that takes into account additional + constraints such as Linux CFS scheduler quotas (typically set by container + runtimes such as docker) and CPU affinity (for instance using the taskset + command on Linux). + + Parameters + ---------- + only_physical_cores : boolean, default=False + If True, does not take hyperthreading / SMT logical cores into account. + + """ + if mp is None: + return 1 + + return loky.cpu_count(only_physical_cores=only_physical_cores) + + +############################################################################### +# For verbosity + + +def _verbosity_filter(index, verbose): + """Returns False for indices increasingly apart, the distance + depending on the value of verbose. + + We use a lag increasing as the square of index + """ + if not verbose: + return True + elif verbose > 10: + return False + if index == 0: + return False + verbose = 0.5 * (11 - verbose) ** 2 + scale = sqrt(index / verbose) + next_scale = sqrt((index + 1) / verbose) + return int(next_scale) == int(scale) + + +############################################################################### +def delayed(function): + """Decorator used to capture the arguments of a function.""" + + def delayed_function(*args, **kwargs): + return function, args, kwargs + + try: + delayed_function = functools.wraps(function)(delayed_function) + except AttributeError: + " functools.wraps fails on some callable objects " + return delayed_function + + +############################################################################### +class BatchCompletionCallBack(object): + """Callback to keep track of completed results and schedule the next tasks. + + This callable is executed by the parent process whenever a worker process + has completed a batch of tasks. + + It is used for progress reporting, to update estimate of the batch + processing duration and to schedule the next batch of tasks to be + processed. + + It is assumed that this callback will always be triggered by the backend + right after the end of a task, in case of success as well as in case of + failure. + """ + + ########################################################################## + # METHODS CALLED BY THE MAIN THREAD # + ########################################################################## + def __init__(self, dispatch_timestamp, batch_size, parallel): + self.dispatch_timestamp = dispatch_timestamp + self.batch_size = batch_size + self.parallel = parallel + self.parallel_call_id = parallel._call_id + self._completion_timeout_counter = None + + # Internals to keep track of the status and outcome of the task. + + # Used to hold a reference to the future-like object returned by the + # backend after launching this task + # This will be set later when calling `register_job`, as it is only + # created once the task has been submitted. + self.job = None + + if not parallel._backend.supports_retrieve_callback: + # The status is only used for asynchronous result retrieval in the + # callback. + self.status = None + else: + # The initial status for the job is TASK_PENDING. + # Once it is done, it will be either TASK_DONE, or TASK_ERROR. + self.status = TASK_PENDING + + def register_job(self, job): + """Register the object returned by `submit`.""" + self.job = job + + def get_result(self, timeout): + """Returns the raw result of the task that was submitted. + + If the task raised an exception rather than returning, this same + exception will be raised instead. + + If the backend supports the retrieval callback, it is assumed that this + method is only called after the result has been registered. It is + ensured by checking that `self.status(timeout)` does not return + TASK_PENDING. In this case, `get_result` directly returns the + registered result (or raise the registered exception). + + For other backends, there are no such assumptions, but `get_result` + still needs to synchronously retrieve the result before it can + return it or raise. It will block at most `self.timeout` seconds + waiting for retrieval to complete, after that it raises a TimeoutError. + """ + + backend = self.parallel._backend + + if backend.supports_retrieve_callback: + # We assume that the result has already been retrieved by the + # callback thread, and is stored internally. It's just waiting to + # be returned. + return self._return_or_raise() + + # For other backends, the main thread needs to run the retrieval step. + try: + result = backend.retrieve_result(self.job, timeout=timeout) + outcome = dict(result=result, status=TASK_DONE) + except BaseException as e: + outcome = dict(result=e, status=TASK_ERROR) + self._register_outcome(outcome) + + return self._return_or_raise() + + def _return_or_raise(self): + try: + if self.status == TASK_ERROR: + raise self._result + return self._result + finally: + del self._result + + def get_status(self, timeout): + """Get the status of the task. + + This function also checks if the timeout has been reached and register + the TimeoutError outcome when it is the case. + """ + if timeout is None or self.status != TASK_PENDING: + return self.status + + # The computation are running and the status is pending. + # Check that we did not wait for this jobs more than `timeout`. + now = time.time() + if self._completion_timeout_counter is None: + self._completion_timeout_counter = now + + if (now - self._completion_timeout_counter) > timeout: + outcome = dict(result=TimeoutError(), status=TASK_ERROR) + self._register_outcome(outcome) + + return self.status + + ########################################################################## + # METHODS CALLED BY CALLBACK THREADS # + ########################################################################## + def __call__(self, *args, **kwargs): + """Function called by the callback thread after a job is completed.""" + + # If the backend doesn't support callback retrievals, the next batch of + # tasks is dispatched regardless. The result will be retrieved by the + # main thread when calling `get_result`. + if not self.parallel._backend.supports_retrieve_callback: + self._dispatch_new() + return + + # If the backend supports retrieving the result in the callback, it + # registers the task outcome (TASK_ERROR or TASK_DONE), and schedules + # the next batch if needed. + with self.parallel._lock: + # Edge case where while the task was processing, the `parallel` + # instance has been reset and a new call has been issued, but the + # worker managed to complete the task and trigger this callback + # call just before being aborted by the reset. + if self.parallel._call_id != self.parallel_call_id: + return + + # When aborting, stop as fast as possible and do not retrieve the + # result as it won't be returned by the Parallel call. + if self.parallel._aborting: + return + + # Retrieves the result of the task in the main process and dispatch + # a new batch if needed. + job_succeeded = self._retrieve_result(*args, **kwargs) + + if job_succeeded: + self._dispatch_new() + + def _dispatch_new(self): + """Schedule the next batch of tasks to be processed.""" + + # This steps ensure that auto-batching works as expected. + this_batch_duration = time.time() - self.dispatch_timestamp + self.parallel._backend.batch_completed(self.batch_size, this_batch_duration) + + # Schedule the next batch of tasks. + with self.parallel._lock: + self.parallel.n_completed_tasks += self.batch_size + self.parallel.print_progress() + if self.parallel._original_iterator is not None: + self.parallel.dispatch_next() + + def _retrieve_result(self, out): + """Fetch and register the outcome of a task. + + Return True if the task succeeded, False otherwise. + This function is only called by backends that support retrieving + the task result in the callback thread. + """ + try: + result = self.parallel._backend.retrieve_result_callback(out) + outcome = dict(status=TASK_DONE, result=result) + except BaseException as e: + # Avoid keeping references to parallel in the error. + e.__traceback__ = None + outcome = dict(result=e, status=TASK_ERROR) + + self._register_outcome(outcome) + return outcome["status"] != TASK_ERROR + + ########################################################################## + # This method can be called either in the main thread # + # or in the callback thread. # + ########################################################################## + def _register_outcome(self, outcome): + """Register the outcome of a task. + + This method can be called only once, future calls will be ignored. + """ + # Covers the edge case where the main thread tries to register a + # `TimeoutError` while the callback thread tries to register a result + # at the same time. + with self.parallel._lock: + if self.status not in (TASK_PENDING, None): + return + self.status = outcome["status"] + + self._result = outcome["result"] + + # Once the result and the status are extracted, the last reference to + # the job can be deleted. + self.job = None + + # As soon as an error as been spotted, early stopping flags are sent to + # the `parallel` instance. + if self.status == TASK_ERROR: + self.parallel._exception = True + self.parallel._aborting = True + + if self.parallel.return_ordered: + return + + with self.parallel._lock: + # For `return_as=generator_unordered`, append the job to the queue + # in the order of completion instead of submission. + self.parallel._jobs.append(self) + + +############################################################################### +def register_parallel_backend(name, factory, make_default=False): + """Register a new Parallel backend factory. + + The new backend can then be selected by passing its name as the backend + argument to the :class:`~Parallel` class. Moreover, the default backend can + be overwritten globally by setting make_default=True. + + The factory can be any callable that takes no argument and return an + instance of ``ParallelBackendBase``. + + Warning: this function is experimental and subject to change in a future + version of joblib. + + .. versionadded:: 0.10 + """ + BACKENDS[name] = factory + if make_default: + global DEFAULT_BACKEND + DEFAULT_BACKEND = name + + +def effective_n_jobs(n_jobs=-1): + """Determine the number of jobs that can actually run in parallel + + n_jobs is the number of workers requested by the callers. Passing n_jobs=-1 + means requesting all available workers for instance matching the number of + CPU cores on the worker host(s). + + This method should return a guesstimate of the number of workers that can + actually perform work concurrently with the currently enabled default + backend. The primary use case is to make it possible for the caller to know + in how many chunks to slice the work. + + In general working on larger data chunks is more efficient (less scheduling + overhead and better use of CPU cache prefetching heuristics) as long as all + the workers have enough work to do. + + Warning: this function is experimental and subject to change in a future + version of joblib. + + .. versionadded:: 0.10 + """ + if n_jobs == 1: + return 1 + + backend, backend_n_jobs = get_active_backend() + if n_jobs is None: + n_jobs = backend_n_jobs + return backend.effective_n_jobs(n_jobs=n_jobs) + + +############################################################################### +class Parallel(Logger): + """Helper class for readable parallel mapping. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_jobs: int, default=None + The maximum number of concurrently running jobs, such as the number + of Python worker processes when ``backend="loky"`` or the size of + the thread-pool when ``backend="threading"``. + This argument is converted to an integer, rounded below for float. + If -1 is given, `joblib` tries to use all CPUs. The number of CPUs + ``n_cpus`` is obtained with :func:`~cpu_count`. + For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance, + using ``n_jobs=-2`` will result in all CPUs but one being used. + This argument can also go above ``n_cpus``, which will cause + oversubscription. In some cases, slight oversubscription can be + beneficial, e.g., for tasks with large I/O operations. + If 1 is given, no parallel computing code is used at all, and the + behavior amounts to a simple python `for` loop. This mode is not + compatible with ``timeout``. + None is a marker for 'unset' that will be interpreted as n_jobs=1 + unless the call is performed under a :func:`~parallel_config` + context manager that sets another value for ``n_jobs``. + If n_jobs = 0 then a ValueError is raised. + backend: str, ParallelBackendBase instance or None, default='loky' + Specify the parallelization backend implementation. + Supported backends are: + + - "loky" used by default, can induce some + communication and memory overhead when exchanging input and + output data with the worker Python processes. On some rare + systems (such as Pyiodide), the loky backend may not be + available. + - "multiprocessing" previous process-based backend based on + `multiprocessing.Pool`. Less robust than `loky`. + - "threading" is a very low-overhead backend but it suffers + from the Python Global Interpreter Lock if the called function + relies a lot on Python objects. "threading" is mostly useful + when the execution bottleneck is a compiled extension that + explicitly releases the GIL (for instance a Cython loop wrapped + in a "with nogil" block or an expensive call to a library such + as NumPy). + - finally, you can register backends by calling + :func:`~register_parallel_backend`. This will allow you to + implement a backend of your liking. + + It is not recommended to hard-code the backend name in a call to + :class:`~Parallel` in a library. Instead it is recommended to set + soft hints (prefer) or hard constraints (require) so as to make it + possible for library users to change the backend from the outside + using the :func:`~parallel_config` context manager. + return_as: str in {'list', 'generator', 'generator_unordered'}, default='list' + If 'list', calls to this instance will return a list, only when + all results have been processed and retrieved. + If 'generator', it will return a generator that yields the results + as soon as they are available, in the order the tasks have been + submitted with. + If 'generator_unordered', the generator will immediately yield + available results independently of the submission order. The output + order is not deterministic in this case because it depends on the + concurrency of the workers. + prefer: str in {'processes', 'threads'} or None, default=None + Soft hint to choose the default backend if no specific backend + was selected with the :func:`~parallel_config` context manager. + The default process-based backend is 'loky' and the default + thread-based backend is 'threading'. Ignored if the ``backend`` + parameter is specified. + require: 'sharedmem' or None, default=None + Hard constraint to select the backend. If set to 'sharedmem', + the selected backend will be single-host and thread-based even + if the user asked for a non-thread based backend with + :func:`~joblib.parallel_config`. + verbose: int, default=0 + The verbosity level: if non zero, progress messages are + printed. Above 50, the output is sent to stdout. + The frequency of the messages increases with the verbosity level. + If it more than 10, all iterations are reported. + timeout: float or None, default=None + Timeout limit for each task to complete. If any task takes longer + a TimeOutError will be raised. Only applied when n_jobs != 1 + pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}, default='2*n_jobs' + The number of batches (of tasks) to be pre-dispatched. + Default is '2*n_jobs'. When batch_size="auto" this is reasonable + default and the workers should never starve. Note that only basic + arithmetic are allowed here and no modules can be used in this + expression. + batch_size: int or 'auto', default='auto' + The number of atomic tasks to dispatch at once to each + worker. When individual evaluations are very fast, dispatching + calls to workers can be slower than sequential computation because + of the overhead. Batching fast computations together can mitigate + this. + The ``'auto'`` strategy keeps track of the time it takes for a + batch to complete, and dynamically adjusts the batch size to keep + the time on the order of half a second, using a heuristic. The + initial batch size is 1. + ``batch_size="auto"`` with ``backend="threading"`` will dispatch + batches of a single task at a time as the threading backend has + very little overhead and using larger batch size has not proved to + bring any gain in that case. + temp_folder: str or None, default=None + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + + - a folder pointed by the JOBLIB_TEMP_FOLDER environment + variable, + - /dev/shm if the folder exists and is writable: this is a + RAM disk filesystem available by default on modern Linux + distributions, + - the default system temporary folder that can be + overridden with TMP, TMPDIR or TEMP environment + variables, typically /tmp under Unix operating systems. + + Only active when ``backend="loky"`` or ``"multiprocessing"``. + max_nbytes int, str, or None, optional, default='1M' + Threshold on the size of arrays passed to the workers that + triggers automated memory mapping in temp_folder. Can be an int + in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte. + Use None to disable memmapping of large arrays. + Only active when ``backend="loky"`` or ``"multiprocessing"``. + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r' + Memmapping mode for numpy arrays passed to workers. None will + disable memmapping, other modes defined in the numpy.memmap doc: + https://numpy.org/doc/stable/reference/generated/numpy.memmap.html + Also, see 'max_nbytes' parameter documentation for more details. + backend_kwargs: dict, optional + Additional parameters to pass to the backend `configure` method. + + Notes + ----- + + This object uses workers to compute in parallel the application of a + function to many different arguments. The main functionality it brings + in addition to using the raw multiprocessing or concurrent.futures API + are (see examples for details): + + * More readable code, in particular since it avoids + constructing list of arguments. + + * Easier debugging: + - informative tracebacks even when the error happens on + the client side + - using 'n_jobs=1' enables to turn off parallel computing + for debugging without changing the codepath + - early capture of pickling errors + + * An optional progress meter. + + * Interruption of multiprocesses jobs with 'Ctrl-C' + + * Flexible pickling control for the communication to and from + the worker processes. + + * Ability to use shared memory efficiently with worker + processes for large numpy-based datastructures. + + Note that the intended usage is to run one call at a time. Multiple + calls to the same Parallel object will result in a ``RuntimeError`` + + Examples + -------- + + A simple example: + + >>> from math import sqrt + >>> from joblib import Parallel, delayed + >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10)) + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] + + Reshaping the output when the function has several return + values: + + >>> from math import modf + >>> from joblib import Parallel, delayed + >>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10)) + >>> res, i = zip(*r) + >>> res + (0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5) + >>> i + (0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0) + + The progress meter: the higher the value of `verbose`, the more + messages: + + >>> from time import sleep + >>> from joblib import Parallel, delayed + >>> r = Parallel(n_jobs=2, verbose=10)( + ... delayed(sleep)(.2) for _ in range(10)) #doctest: +SKIP + [Parallel(n_jobs=2)]: Done 1 tasks | elapsed: 0.6s + [Parallel(n_jobs=2)]: Done 4 tasks | elapsed: 0.8s + [Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 1.4s finished + + Traceback example, note how the line of the error is indicated + as well as the values of the parameter passed to the function that + triggered the exception, even though the traceback happens in the + child process: + + >>> from heapq import nlargest + >>> from joblib import Parallel, delayed + >>> Parallel(n_jobs=2)( + ... delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) + ... # doctest: +SKIP + ----------------------------------------------------------------------- + Sub-process traceback: + ----------------------------------------------------------------------- + TypeError Mon Nov 12 11:37:46 2012 + PID: 12934 Python 2.7.3: /usr/bin/python + ........................................................................ + /usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None) + 419 if n >= size: + 420 return sorted(iterable, key=key, reverse=True)[:n] + 421 + 422 # When key is none, use simpler decoration + 423 if key is None: + --> 424 it = izip(iterable, count(0,-1)) # decorate + 425 result = _nlargest(n, it) + 426 return map(itemgetter(0), result) # undecorate + 427 + 428 # General case, slowest method + TypeError: izip argument #1 must support iteration + _______________________________________________________________________ + + + Using pre_dispatch in a producer/consumer situation, where the + data is generated on the fly. Note how the producer is first + called 3 times before the parallel loop is initiated, and then + called to generate new data on the fly: + + >>> from math import sqrt + >>> from joblib import Parallel, delayed + >>> def producer(): + ... for i in range(6): + ... print('Produced %s' % i) + ... yield i + >>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')( + ... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP + Produced 0 + Produced 1 + Produced 2 + [Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s + Produced 3 + [Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s + Produced 4 + [Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s + Produced 5 + [Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s + [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s remaining: 0.0s + [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished + + """ # noqa: E501 + + def __init__( + self, + n_jobs=default_parallel_config["n_jobs"], + backend=default_parallel_config["backend"], + return_as="list", + verbose=default_parallel_config["verbose"], + timeout=None, + pre_dispatch="2 * n_jobs", + batch_size="auto", + temp_folder=default_parallel_config["temp_folder"], + max_nbytes=default_parallel_config["max_nbytes"], + mmap_mode=default_parallel_config["mmap_mode"], + prefer=default_parallel_config["prefer"], + require=default_parallel_config["require"], + **backend_kwargs, + ): + # Initiate parent Logger class state + super().__init__() + + # Interpret n_jobs=None as 'unset' + if n_jobs is None: + n_jobs = default_parallel_config["n_jobs"] + + active_backend, context_config = _get_active_backend( + prefer=prefer, require=require, verbose=verbose + ) + + nesting_level = active_backend.nesting_level + + self.verbose = _get_config_param(verbose, context_config, "verbose") + self.timeout = timeout + self.pre_dispatch = pre_dispatch + + if return_as not in {"list", "generator", "generator_unordered"}: + raise ValueError( + 'Expected `return_as` parameter to be a string equal to "list"' + f',"generator" or "generator_unordered", but got {return_as} ' + "instead." + ) + self.return_as = return_as + self.return_generator = return_as != "list" + self.return_ordered = return_as != "generator_unordered" + + # Check if we are under a parallel_config or parallel_backend + # context manager and use the config from the context manager + # for arguments that are not explicitly set. + self._backend_kwargs = { + **backend_kwargs, + **{ + k: _get_config_param(param, context_config, k) + for param, k in [ + (max_nbytes, "max_nbytes"), + (temp_folder, "temp_folder"), + (mmap_mode, "mmap_mode"), + (prefer, "prefer"), + (require, "require"), + (verbose, "verbose"), + ] + }, + } + + if isinstance(self._backend_kwargs["max_nbytes"], str): + self._backend_kwargs["max_nbytes"] = memstr_to_bytes( + self._backend_kwargs["max_nbytes"] + ) + self._backend_kwargs["verbose"] = max(0, self._backend_kwargs["verbose"] - 50) + + if DEFAULT_MP_CONTEXT is not None: + self._backend_kwargs["context"] = DEFAULT_MP_CONTEXT + elif hasattr(mp, "get_context"): + self._backend_kwargs["context"] = mp.get_context() + + if backend is default_parallel_config["backend"] or backend is None: + backend = active_backend + + elif isinstance(backend, ParallelBackendBase): + # Use provided backend as is, with the current nesting_level if it + # is not set yet. + if backend.nesting_level is None: + backend.nesting_level = nesting_level + + elif hasattr(backend, "Pool") and hasattr(backend, "Lock"): + # Make it possible to pass a custom multiprocessing context as + # backend to change the start method to forkserver or spawn or + # preload modules on the forkserver helper process. + self._backend_kwargs["context"] = backend + backend = MultiprocessingBackend(nesting_level=nesting_level) + + elif backend not in BACKENDS and backend in MAYBE_AVAILABLE_BACKENDS: + warnings.warn( + f"joblib backend '{backend}' is not available on " + f"your system, falling back to {DEFAULT_BACKEND}.", + UserWarning, + stacklevel=2, + ) + BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND] + backend = BACKENDS[DEFAULT_BACKEND](nesting_level=nesting_level) + + else: + try: + backend_factory = BACKENDS[backend] + except KeyError as e: + raise ValueError( + "Invalid backend: %s, expected one of %r" + % (backend, sorted(BACKENDS.keys())) + ) from e + backend = backend_factory(nesting_level=nesting_level) + + n_jobs = _get_config_param(n_jobs, context_config, "n_jobs") + if n_jobs is None: + # No specific context override and no specific value request: + # default to the default of the backend. + n_jobs = backend.default_n_jobs + try: + n_jobs = int(n_jobs) + except ValueError: + raise ValueError("n_jobs could not be converted to int") + self.n_jobs = n_jobs + + if require == "sharedmem" and not getattr(backend, "supports_sharedmem", False): + raise ValueError("Backend %s does not support shared memory" % backend) + + if batch_size == "auto" or isinstance(batch_size, Integral) and batch_size > 0: + self.batch_size = batch_size + else: + raise ValueError( + "batch_size must be 'auto' or a positive integer, got: %r" % batch_size + ) + + if not isinstance(backend, SequentialBackend): + if self.return_generator and not backend.supports_return_generator: + raise ValueError( + "Backend {} does not support return_as={}".format( + backend, return_as + ) + ) + # This lock is used to coordinate the main thread of this process + # with the async callback thread of our the pool. + self._lock = threading.RLock() + self._jobs = collections.deque() + self._jobs_set = set() + self._pending_outputs = list() + self._ready_batches = queue.Queue() + self._reducer_callback = None + + # Internal variables + self._backend = backend + self._running = False + self._managed_backend = False + self._id = uuid4().hex + self._call_ref = None + + def __enter__(self): + self._managed_backend = True + self._calling = False + self._initialize_backend() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._managed_backend = False + if self.return_generator and self._calling: + self._abort() + self._terminate_and_reset() + + def _initialize_backend(self): + """Build a process or thread pool and return the number of workers""" + try: + n_jobs = self._backend.configure( + n_jobs=self.n_jobs, parallel=self, **self._backend_kwargs + ) + if self.timeout is not None and not self._backend.supports_timeout: + warnings.warn( + "The backend class {!r} does not support timeout. " + "You have set 'timeout={}' in Parallel but " + "the 'timeout' parameter will not be used.".format( + self._backend.__class__.__name__, self.timeout + ) + ) + + except FallbackToBackend as e: + # Recursively initialize the backend in case of requested fallback. + self._backend = e.backend + n_jobs = self._initialize_backend() + + return n_jobs + + def _effective_n_jobs(self): + if self._backend: + return self._backend.effective_n_jobs(self.n_jobs) + return 1 + + def _terminate_and_reset(self): + if hasattr(self._backend, "stop_call") and self._calling: + self._backend.stop_call() + self._calling = False + if not self._managed_backend: + self._backend.terminate() + + def _dispatch(self, batch): + """Queue the batch for computing, with or without multiprocessing + + WARNING: this method is not thread-safe: it should be only called + indirectly via dispatch_one_batch. + + """ + # If job.get() catches an exception, it closes the queue: + if self._aborting: + return + + batch_size = len(batch) + + self.n_dispatched_tasks += batch_size + self.n_dispatched_batches += 1 + + dispatch_timestamp = time.time() + + batch_tracker = BatchCompletionCallBack(dispatch_timestamp, batch_size, self) + + self._register_new_job(batch_tracker) + + # If return_ordered is False, the batch_tracker is not stored in the + # jobs queue at the time of submission. Instead, it will be appended to + # the queue by itself as soon as the callback is triggered to be able + # to return the results in the order of completion. + + job = self._backend.submit(batch, callback=batch_tracker) + batch_tracker.register_job(job) + + def _register_new_job(self, batch_tracker): + if self.return_ordered: + self._jobs.append(batch_tracker) + else: + self._jobs_set.add(batch_tracker) + + def dispatch_next(self): + """Dispatch more data for parallel processing + + This method is meant to be called concurrently by the multiprocessing + callback. We rely on the thread-safety of dispatch_one_batch to protect + against concurrent consumption of the unprotected iterator. + """ + if not self.dispatch_one_batch(self._original_iterator): + self._iterating = False + self._original_iterator = None + + def dispatch_one_batch(self, iterator): + """Prefetch the tasks for the next batch and dispatch them. + + The effective size of the batch is computed here. + If there are no more jobs to dispatch, return False, else return True. + + The iterator consumption and dispatching is protected by the same + lock so calling this function should be thread safe. + + """ + + if self._aborting: + return False + + batch_size = self._get_batch_size() + + with self._lock: + # to ensure an even distribution of the workload between workers, + # we look ahead in the original iterators more than batch_size + # tasks - However, we keep consuming only one batch at each + # dispatch_one_batch call. The extra tasks are stored in a local + # queue, _ready_batches, that is looked-up prior to re-consuming + # tasks from the origal iterator. + try: + tasks = self._ready_batches.get(block=False) + except queue.Empty: + # slice the iterator n_jobs * batchsize items at a time. If the + # slice returns less than that, then the current batchsize puts + # too much weight on a subset of workers, while other may end + # up starving. So in this case, re-scale the batch size + # accordingly to distribute evenly the last items between all + # workers. + n_jobs = self._cached_effective_n_jobs + big_batch_size = batch_size * n_jobs + + try: + islice = list(itertools.islice(iterator, big_batch_size)) + except Exception as e: + # Handle the fact that the generator of task raised an + # exception. As this part of the code can be executed in + # a thread internal to the backend, register a task with + # an error that will be raised in the user's thread. + if isinstance(e.__context__, queue.Empty): + # Suppress the cause of the exception if it is + # queue.Empty to avoid cluttered traceback. Only do it + # if the __context__ is really empty to avoid messing + # with causes of the original error. + e.__cause__ = None + batch_tracker = BatchCompletionCallBack(0, batch_size, self) + self._register_new_job(batch_tracker) + batch_tracker._register_outcome(dict(result=e, status=TASK_ERROR)) + return True + + if len(islice) == 0: + return False + elif ( + iterator is self._original_iterator and len(islice) < big_batch_size + ): + # We reached the end of the original iterator (unless + # iterator is the ``pre_dispatch``-long initial slice of + # the original iterator) -- decrease the batch size to + # account for potential variance in the batches running + # time. + final_batch_size = max(1, len(islice) // (10 * n_jobs)) + else: + final_batch_size = max(1, len(islice) // n_jobs) + + # enqueue n_jobs batches in a local queue + for i in range(0, len(islice), final_batch_size): + tasks = BatchedCalls( + islice[i : i + final_batch_size], + self._backend.get_nested_backend(), + self._reducer_callback, + self._pickle_cache, + ) + self._ready_batches.put(tasks) + + # finally, get one task. + tasks = self._ready_batches.get(block=False) + if len(tasks) == 0: + # No more tasks available in the iterator: tell caller to stop. + return False + else: + self._dispatch(tasks) + return True + + def _get_batch_size(self): + """Returns the effective batch size for dispatch""" + if self.batch_size == "auto": + return self._backend.compute_batch_size() + else: + # Fixed batch size strategy + return self.batch_size + + def _print(self, msg): + """Display the message on stout or stderr depending on verbosity""" + # XXX: Not using the logger framework: need to + # learn to use logger better. + if not self.verbose: + return + if self.verbose < 50: + writer = sys.stderr.write + else: + writer = sys.stdout.write + writer(f"[{self}]: {msg}\n") + + def _is_completed(self): + """Check if all tasks have been completed""" + return self.n_completed_tasks == self.n_dispatched_tasks and not ( + self._iterating or self._aborting + ) + + def print_progress(self): + """Display the process of the parallel execution only a fraction + of time, controlled by self.verbose. + """ + + if not self.verbose: + return + + if self.n_tasks is not None and self.n_tasks > 0: + width = floor(log10(self.n_tasks)) + 1 + else: + width = 3 + elapsed_time = time.time() - self._start_time + + if self._is_completed(): + # Make sure that we get a last message telling us we are done + self._print( + f"Done {self.n_completed_tasks:{width}d} out of " + f"{self.n_completed_tasks:{width}d} | elapsed: " + f"{short_format_time(elapsed_time)} finished" + ) + return + + # Original job iterator becomes None once it has been fully + # consumed: at this point we know the total number of jobs and we are + # able to display an estimation of the remaining time based on already + # completed jobs. Otherwise, we simply display the number of completed + # tasks. + elif self._original_iterator is not None: + if _verbosity_filter(self.n_dispatched_batches, self.verbose): + return + fmt_time = f"| elapsed: {short_format_time(elapsed_time)}" + index = self.n_completed_tasks + if self.n_tasks is not None: + self._print( + f"Done {index:{width}d} out of {self.n_tasks:{width}d} {fmt_time}" + ) + else: + pad = " " * (len("out of ") + width - len("tasks")) + self._print(f"Done {index:{width}d} tasks {pad}{fmt_time}") + else: + index = self.n_completed_tasks + # We are finished dispatching + total_tasks = self.n_dispatched_tasks + # We always display the first loop + if index != 0: + # Display depending on the number of remaining items + # A message as soon as we finish dispatching, cursor is 0 + cursor = total_tasks - index + 1 - self._pre_dispatch_amount + frequency = (total_tasks // self.verbose) + 1 + is_last_item = index + 1 == total_tasks + if is_last_item or cursor % frequency: + return + remaining_time = (elapsed_time / max(index, 1)) * ( + self.n_dispatched_tasks - index + ) + # only display status if remaining time is greater or equal to 0 + self._print( + f"Done {index:{width}d} out of {total_tasks:{width}d} " + f"| elapsed: {short_format_time(elapsed_time)} remaining: " + f"{short_format_time(remaining_time)}" + ) + + def _abort(self): + # Stop dispatching new jobs in the async callback thread + self._aborting = True + + # If the backend allows it, cancel or kill remaining running + # tasks without waiting for the results as we will raise + # the exception we got back to the caller instead of returning + # any result. + backend = self._backend + if not self._aborted and hasattr(backend, "abort_everything"): + # If the backend is managed externally we need to make sure + # to leave it in a working state to allow for future jobs + # scheduling. + ensure_ready = self._managed_backend + backend.abort_everything(ensure_ready=ensure_ready) + self._aborted = True + + def _start(self, iterator, pre_dispatch): + # Only set self._iterating to True if at least a batch + # was dispatched. In particular this covers the edge + # case of Parallel used with an exhausted iterator. If + # self._original_iterator is None, then this means either + # that pre_dispatch == "all", n_jobs == 1 or that the first batch + # was very quick and its callback already dispatched all the + # remaining jobs. + self._iterating = False + if self.dispatch_one_batch(iterator): + self._iterating = self._original_iterator is not None + + while self.dispatch_one_batch(iterator): + pass + + if pre_dispatch == "all": + # The iterable was consumed all at once by the above for loop. + # No need to wait for async callbacks to trigger to + # consumption. + self._iterating = False + + def _get_outputs(self, iterator, pre_dispatch): + """Iterator returning the tasks' output as soon as they are ready.""" + dispatch_thread_id = threading.get_ident() + detach_generator_exit = False + try: + self._start(iterator, pre_dispatch) + # first yield returns None, for internal use only. This ensures + # that we enter the try/except block and start dispatching the + # tasks. + yield + + with self._backend.retrieval_context(): + yield from self._retrieve() + + except GeneratorExit: + # The generator has been garbage collected before being fully + # consumed. This aborts the remaining tasks if possible and warn + # the user if necessary. + self._exception = True + + # In some interpreters such as PyPy, GeneratorExit can be raised in + # a different thread than the one used to start the dispatch of the + # parallel tasks. This can lead to hang when a thread attempts to + # join itself. As workaround, we detach the execution of the + # aborting code to a dedicated thread. We then need to make sure + # the rest of the function does not call `_terminate_and_reset` + # in finally. + if dispatch_thread_id != threading.get_ident(): + warnings.warn( + "A generator produced by joblib.Parallel has been " + "gc'ed in an unexpected thread. This behavior should " + "not cause major -issues but to make sure, please " + "report this warning and your use case at " + "https://github.com/joblib/joblib/issues so it can " + "be investigated." + ) + + detach_generator_exit = True + _parallel = self + + class _GeneratorExitThread(threading.Thread): + def run(self): + _parallel._abort() + if _parallel.return_generator: + _parallel._warn_exit_early() + _parallel._terminate_and_reset() + + _GeneratorExitThread(name="GeneratorExitThread").start() + return + + # Otherwise, we are in the thread that started the dispatch: we can + # safely abort the execution and warn the user. + self._abort() + if self.return_generator: + self._warn_exit_early() + + raise + + # Note: we catch any BaseException instead of just Exception instances + # to also include KeyboardInterrupt + except BaseException: + self._exception = True + self._abort() + raise + finally: + # Store the unconsumed tasks and terminate the workers if necessary + _remaining_outputs = [] if self._exception else self._jobs + self._jobs = collections.deque() + self._jobs_set = set() + self._running = False + if not detach_generator_exit: + self._terminate_and_reset() + + while len(_remaining_outputs) > 0: + batched_results = _remaining_outputs.popleft() + batched_results = batched_results.get_result(self.timeout) + for result in batched_results: + yield result + + def _wait_retrieval(self): + """Return True if we need to continue retrieving some tasks.""" + + # If the input load is still being iterated over, it means that tasks + # are still on the dispatch waitlist and their results will need to + # be retrieved later on. + if self._iterating: + return True + + # If some of the dispatched tasks are still being processed by the + # workers, wait for the compute to finish before starting retrieval + if self.n_completed_tasks < self.n_dispatched_tasks: + return True + + # For backends that does not support retrieving asynchronously the + # result to the main process, all results must be carefully retrieved + # in the _retrieve loop in the main thread while the backend is alive. + # For other backends, the actual retrieval is done asynchronously in + # the callback thread, and we can terminate the backend before the + # `self._jobs` result list has been emptied. The remaining results + # will be collected in the `finally` step of the generator. + if not self._backend.supports_retrieve_callback: + if len(self._jobs) > 0: + return True + + return False + + def _retrieve(self): + timeout_control_job = None + while self._wait_retrieval(): + # If the callback thread of a worker has signaled that its task + # triggered an exception, or if the retrieval loop has raised an + # exception (e.g. `GeneratorExit`), exit the loop and surface the + # worker traceback. + if self._aborting: + self._raise_error_fast() + break + + nb_jobs = len(self._jobs) + # Now wait for a job to be ready for retrieval. + if self.return_ordered: + # Case ordered: wait for completion (or error) of the next job + # that have been dispatched and not retrieved yet. If no job + # have been dispatched yet, wait for dispatch. + # We assume that the time to wait for the next job to be + # dispatched is always low, so that the timeout + # control only have to be done on the amount of time the next + # dispatched job is pending. + if (nb_jobs == 0) or ( + self._jobs[0].get_status(timeout=self.timeout) == TASK_PENDING + ): + time.sleep(0.01) + continue + + elif nb_jobs == 0: + # Case unordered: jobs are added to the list of jobs to + # retrieve `self._jobs` only once completed or in error, which + # is too late to enable timeout control in the same way than in + # the previous case. + # Instead, if no job is ready to be retrieved yet, we + # arbitrarily pick a dispatched job, and the timeout control is + # done such that an error is raised if this control job + # timeouts before any other dispatched job has completed and + # been added to `self._jobs` to be retrieved. + if timeout_control_job is None: + timeout_control_job = next(iter(self._jobs_set), None) + + # NB: it can be None if no job has been dispatched yet. + if timeout_control_job is not None: + timeout_control_job.get_status(timeout=self.timeout) + + time.sleep(0.01) + continue + + elif timeout_control_job is not None: + # Case unordered, when `nb_jobs > 0`: + # It means that a job is ready to be retrieved, so no timeout + # will occur during this iteration. + # Before proceeding to retrieval of the next ready job, reset + # the timeout control state to prepare the next iteration. + timeout_control_job._completion_timeout_counter = None + timeout_control_job = None + + # We need to be careful: the job list can be filling up as + # we empty it and Python list are not thread-safe by + # default hence the use of the lock + with self._lock: + batched_results = self._jobs.popleft() + if not self.return_ordered: + self._jobs_set.remove(batched_results) + + # Flatten the batched results to output one output at a time + batched_results = batched_results.get_result(self.timeout) + for result in batched_results: + self._nb_consumed += 1 + yield result + + def _raise_error_fast(self): + """If we are aborting, raise if a job caused an error.""" + + # Find the first job whose status is TASK_ERROR if it exists. + with self._lock: + error_job = next( + (job for job in self._jobs if job.status == TASK_ERROR), None + ) + + # If this error job exists, immediately raise the error by + # calling get_result. This job might not exists if abort has been + # called directly or if the generator is gc'ed. + if error_job is not None: + error_job.get_result(self.timeout) + + def _warn_exit_early(self): + """Warn the user if the generator is gc'ed before being consumned.""" + ready_outputs = self.n_completed_tasks - self._nb_consumed + is_completed = self._is_completed() + msg = "" + if ready_outputs: + msg += ( + f"{ready_outputs} tasks have been successfully executed but not used." + ) + if not is_completed: + msg += " Additionally, " + + if not is_completed: + msg += ( + f"{self.n_dispatched_tasks - self.n_completed_tasks} tasks " + "which were still being processed by the workers have been " + "cancelled." + ) + + if msg: + msg += ( + " You could benefit from adjusting the input task " + "iterator to limit unnecessary computation time." + ) + + warnings.warn(msg) + + def _get_sequential_output(self, iterable): + """Separate loop for sequential output. + + This simplifies the traceback in case of errors and reduces the + overhead of calling sequential tasks with `joblib`. + """ + try: + self._iterating = True + self._original_iterator = iterable + batch_size = self._get_batch_size() + + if batch_size != 1: + it = iter(iterable) + iterable_batched = iter( + lambda: tuple(itertools.islice(it, batch_size)), () + ) + iterable = (task for batch in iterable_batched for task in batch) + + # first yield returns None, for internal use only. This ensures + # that we enter the try/except block and setup the generator. + yield None + + # Sequentially call the tasks and yield the results. + for func, args, kwargs in iterable: + self.n_dispatched_batches += 1 + self.n_dispatched_tasks += 1 + res = func(*args, **kwargs) + self.n_completed_tasks += 1 + self.print_progress() + yield res + self._nb_consumed += 1 + except BaseException: + self._exception = True + self._aborting = True + self._aborted = True + raise + finally: + self._running = False + self._iterating = False + self._original_iterator = None + self.print_progress() + + def _reset_run_tracking(self): + """Reset the counters and flags used to track the execution.""" + + # Makes sur the parallel instance was not previously running in a + # thread-safe way. + with getattr(self, "_lock", nullcontext()): + if self._running: + msg = "This Parallel instance is already running !" + if self.return_generator is True: + msg += ( + " Before submitting new tasks, you must wait for the " + "completion of all the previous tasks, or clean all " + "references to the output generator." + ) + raise RuntimeError(msg) + self._running = True + + # Counter to keep track of the task dispatched and completed. + self.n_dispatched_batches = 0 + self.n_dispatched_tasks = 0 + self.n_completed_tasks = 0 + + # Following count is incremented by one each time the user iterates + # on the output generator, it is used to prepare an informative + # warning message in case the generator is deleted before all the + # dispatched tasks have been consumed. + self._nb_consumed = 0 + + # Following flags are used to synchronize the threads in case one of + # the tasks error-out to ensure that all workers abort fast and that + # the backend terminates properly. + + # Set to True as soon as a worker signals that a task errors-out + self._exception = False + # Set to True in case of early termination following an incident + self._aborting = False + # Set to True after abortion is complete + self._aborted = False + + def __call__(self, iterable): + """Main function to dispatch parallel tasks.""" + + self._reset_run_tracking() + self.n_tasks = len(iterable) if hasattr(iterable, "__len__") else None + self._start_time = time.time() + + if not self._managed_backend: + n_jobs = self._initialize_backend() + else: + n_jobs = self._effective_n_jobs() + + if n_jobs == 1: + # If n_jobs==1, run the computation sequentially and return + # immediately to avoid overheads. + output = self._get_sequential_output(iterable) + next(output) + return output if self.return_generator else list(output) + + # Let's create an ID that uniquely identifies the current call. If the + # call is interrupted early and that the same instance is immediately + # reused, this id will be used to prevent workers that were + # concurrently finalizing a task from the previous call to run the + # callback. + with self._lock: + self._call_id = uuid4().hex + + # self._effective_n_jobs should be called in the Parallel.__call__ + # thread only -- store its value in an attribute for further queries. + self._cached_effective_n_jobs = n_jobs + + if isinstance(self._backend, LokyBackend): + # For the loky backend, we add a callback executed when reducing + # BatchCalls, that makes the loky executor use a temporary folder + # specific to this Parallel object when pickling temporary memmaps. + # This callback is necessary to ensure that several Parallel + # objects using the same reusable executor don't use the same + # temporary resources. + + def _batched_calls_reducer_callback(): + # Relevant implementation detail: the following lines, called + # when reducing BatchedCalls, are called in a thread-safe + # situation, meaning that the context of the temporary folder + # manager will not be changed in between the callback execution + # and the end of the BatchedCalls pickling. The reason is that + # pickling (the only place where set_current_context is used) + # is done from a single thread (the queue_feeder_thread). + self._backend._workers._temp_folder_manager.set_current_context( # noqa + self._id + ) + + self._reducer_callback = _batched_calls_reducer_callback + + # self._effective_n_jobs should be called in the Parallel.__call__ + # thread only -- store its value in an attribute for further queries. + self._cached_effective_n_jobs = n_jobs + + backend_name = self._backend.__class__.__name__ + if n_jobs == 0: + raise RuntimeError("%s has no active worker." % backend_name) + + self._print(f"Using backend {backend_name} with {n_jobs} concurrent workers.") + if hasattr(self._backend, "start_call"): + self._backend.start_call() + + # Following flag prevents double calls to `backend.stop_call`. + self._calling = True + + iterator = iter(iterable) + pre_dispatch = self.pre_dispatch + + if pre_dispatch == "all": + # prevent further dispatch via multiprocessing callback thread + self._original_iterator = None + self._pre_dispatch_amount = 0 + else: + self._original_iterator = iterator + if hasattr(pre_dispatch, "endswith"): + pre_dispatch = eval_expr(pre_dispatch.replace("n_jobs", str(n_jobs))) + self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch) + + # The main thread will consume the first pre_dispatch items and + # the remaining items will later be lazily dispatched by async + # callbacks upon task completions. + + # TODO: this iterator should be batch_size * n_jobs + iterator = itertools.islice(iterator, self._pre_dispatch_amount) + + # Use a caching dict for callables that are pickled with cloudpickle to + # improve performances. This cache is used only in the case of + # functions that are defined in the __main__ module, functions that + # are defined locally (inside another function) and lambda expressions. + self._pickle_cache = dict() + + output = self._get_outputs(iterator, pre_dispatch) + self._call_ref = weakref.ref(output) + + # The first item from the output is blank, but it makes the interpreter + # progress until it enters the Try/Except block of the generator and + # reaches the first `yield` statement. This starts the asynchronous + # dispatch of the tasks to the workers. + next(output) + + return output if self.return_generator else list(output) + + def __repr__(self): + return "%s(n_jobs=%s)" % (self.__class__.__name__, self.n_jobs) diff --git a/py311/lib/python3.11/site-packages/joblib/pool.py b/py311/lib/python3.11/site-packages/joblib/pool.py new file mode 100644 index 0000000000000000000000000000000000000000..6e961080703cd29dfbe441417aaeff37944a7118 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/pool.py @@ -0,0 +1,362 @@ +"""Custom implementation of multiprocessing.Pool with custom pickler. + +This module provides efficient ways of working with data stored in +shared memory with numpy.memmap arrays without inducing any memory +copy between the parent and child processes. + +This module should not be imported if multiprocessing is not +available as it implements subclasses of multiprocessing Pool +that uses a custom alternative to SimpleQueue. + +""" +# Author: Olivier Grisel +# Copyright: 2012, Olivier Grisel +# License: BSD 3 clause + +import copyreg +import sys +import warnings +from time import sleep + +try: + WindowsError +except NameError: + WindowsError = type(None) + +from io import BytesIO + +# We need the class definition to derive from it, not the multiprocessing.Pool +# factory function +from multiprocessing.pool import Pool +from pickle import HIGHEST_PROTOCOL, Pickler + +from ._memmapping_reducer import TemporaryResourcesManager, get_memmapping_reducers +from ._multiprocessing_helpers import assert_spawning, mp + +try: + import numpy as np +except ImportError: + np = None + + +############################################################################### +# Enable custom pickling in Pool queues + + +class CustomizablePickler(Pickler): + """Pickler that accepts custom reducers. + + TODO python2_drop : can this be simplified ? + + HIGHEST_PROTOCOL is selected by default as this pickler is used + to pickle ephemeral datastructures for interprocess communication + hence no backward compatibility is required. + + `reducers` is expected to be a dictionary with key/values + being `(type, callable)` pairs where `callable` is a function that + give an instance of `type` will return a tuple `(constructor, + tuple_of_objects)` to rebuild an instance out of the pickled + `tuple_of_objects` as would return a `__reduce__` method. See the + standard library documentation on pickling for more details. + + """ + + # We override the pure Python pickler as its the only way to be able to + # customize the dispatch table without side effects in Python 2.7 + # to 3.2. For Python 3.3+ leverage the new dispatch_table + # feature from https://bugs.python.org/issue14166 that makes it possible + # to use the C implementation of the Pickler which is faster. + + def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): + Pickler.__init__(self, writer, protocol=protocol) + if reducers is None: + reducers = {} + if hasattr(Pickler, "dispatch"): + # Make the dispatch registry an instance level attribute instead of + # a reference to the class dictionary under Python 2 + self.dispatch = Pickler.dispatch.copy() + else: + # Under Python 3 initialize the dispatch table with a copy of the + # default registry + self.dispatch_table = copyreg.dispatch_table.copy() + for type, reduce_func in reducers.items(): + self.register(type, reduce_func) + + def register(self, type, reduce_func): + """Attach a reducer function to a given type in the dispatch table.""" + if hasattr(Pickler, "dispatch"): + # Python 2 pickler dispatching is not explicitly customizable. + # Let us use a closure to workaround this limitation. + def dispatcher(self, obj): + reduced = reduce_func(obj) + self.save_reduce(obj=obj, *reduced) + + self.dispatch[type] = dispatcher + else: + self.dispatch_table[type] = reduce_func + + +class CustomizablePicklingQueue(object): + """Locked Pipe implementation that uses a customizable pickler. + + This class is an alternative to the multiprocessing implementation + of SimpleQueue in order to make it possible to pass custom + pickling reducers, for instance to avoid memory copy when passing + memory mapped datastructures. + + `reducers` is expected to be a dict with key / values being + `(type, callable)` pairs where `callable` is a function that, given an + instance of `type`, will return a tuple `(constructor, tuple_of_objects)` + to rebuild an instance out of the pickled `tuple_of_objects` as would + return a `__reduce__` method. + + See the standard library documentation on pickling for more details. + """ + + def __init__(self, context, reducers=None): + self._reducers = reducers + self._reader, self._writer = context.Pipe(duplex=False) + self._rlock = context.Lock() + if sys.platform == "win32": + self._wlock = None + else: + self._wlock = context.Lock() + self._make_methods() + + def __getstate__(self): + assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock, self._reducers) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock, self._reducers) = state + self._make_methods() + + def empty(self): + return not self._reader.poll() + + def _make_methods(self): + self._recv = recv = self._reader.recv + racquire, rrelease = self._rlock.acquire, self._rlock.release + + def get(): + racquire() + try: + return recv() + finally: + rrelease() + + self.get = get + + if self._reducers: + + def send(obj): + buffer = BytesIO() + CustomizablePickler(buffer, self._reducers).dump(obj) + self._writer.send_bytes(buffer.getvalue()) + + self._send = send + else: + self._send = send = self._writer.send + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self.put = send + else: + wlock_acquire, wlock_release = (self._wlock.acquire, self._wlock.release) + + def put(obj): + wlock_acquire() + try: + return send(obj) + finally: + wlock_release() + + self.put = put + + +class PicklingPool(Pool): + """Pool implementation with customizable pickling reducers. + + This is useful to control how data is shipped between processes + and makes it possible to use shared memory without useless + copies induces by the default pickling methods of the original + objects passed as arguments to dispatch. + + `forward_reducers` and `backward_reducers` are expected to be + dictionaries with key/values being `(type, callable)` pairs where + `callable` is a function that, given an instance of `type`, will return a + tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the + pickled `tuple_of_objects` as would return a `__reduce__` method. + See the standard library documentation about pickling for more details. + + """ + + def __init__( + self, processes=None, forward_reducers=None, backward_reducers=None, **kwargs + ): + if forward_reducers is None: + forward_reducers = dict() + if backward_reducers is None: + backward_reducers = dict() + self._forward_reducers = forward_reducers + self._backward_reducers = backward_reducers + poolargs = dict(processes=processes) + poolargs.update(kwargs) + super(PicklingPool, self).__init__(**poolargs) + + def _setup_queues(self): + context = getattr(self, "_ctx", mp) + self._inqueue = CustomizablePicklingQueue(context, self._forward_reducers) + self._outqueue = CustomizablePicklingQueue(context, self._backward_reducers) + self._quick_put = self._inqueue._send + self._quick_get = self._outqueue._recv + + +class MemmappingPool(PicklingPool): + """Process pool that shares large arrays to avoid memory copy. + + This drop-in replacement for `multiprocessing.pool.Pool` makes + it possible to work efficiently with shared memory in a numpy + context. + + Existing instances of numpy.memmap are preserved: the child + suprocesses will have access to the same shared memory in the + original mode except for the 'w+' mode that is automatically + transformed as 'r+' to avoid zeroing the original data upon + instantiation. + + Furthermore large arrays from the parent process are automatically + dumped to a temporary folder on the filesystem such as child + processes to access their content via memmapping (file system + backed shared memory). + + Note: it is important to call the terminate method to collect + the temporary folder used by the pool. + + Parameters + ---------- + processes: int, optional + Number of worker processes running concurrently in the pool. + initializer: callable, optional + Callable executed on worker process creation. + initargs: tuple, optional + Arguments passed to the initializer callable. + temp_folder: (str, callable) optional + If str: + Folder to be used by the pool for memmapping large arrays + for sharing memory with worker processes. If None, this will try in + order: + - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, + - /dev/shm if the folder exists and is writable: this is a RAMdisk + filesystem available by default on modern Linux distributions, + - the default system temporary folder that can be overridden + with TMP, TMPDIR or TEMP environment variables, typically /tmp + under Unix operating systems. + if callable: + An callable in charge of dynamically resolving a temporary folder + for memmapping large arrays. + max_nbytes int or None, optional, 1e6 by default + Threshold on the size of arrays passed to the workers that + triggers automated memory mapping in temp_folder. + Use None to disable memmapping of large arrays. + mmap_mode: {'r+', 'r', 'w+', 'c'} + Memmapping mode for numpy arrays passed to workers. + See 'max_nbytes' parameter documentation for more details. + forward_reducers: dictionary, optional + Reducers used to pickle objects passed from main process to worker + processes: see below. + backward_reducers: dictionary, optional + Reducers used to pickle return values from workers back to the + main process. + verbose: int, optional + Make it possible to monitor how the communication of numpy arrays + with the subprocess is handled (pickling or memmapping) + prewarm: bool or str, optional, "auto" by default. + If True, force a read on newly memmapped array to make sure that OS + pre-cache it in memory. This can be useful to avoid concurrent disk + access when the same data array is passed to different worker + processes. If "auto" (by default), prewarm is set to True, unless the + Linux shared memory partition /dev/shm is available and used as temp + folder. + + `forward_reducers` and `backward_reducers` are expected to be + dictionaries with key/values being `(type, callable)` pairs where + `callable` is a function that give an instance of `type` will return + a tuple `(constructor, tuple_of_objects)` to rebuild an instance out + of the pickled `tuple_of_objects` as would return a `__reduce__` + method. See the standard library documentation on pickling for more + details. + + """ + + def __init__( + self, + processes=None, + temp_folder=None, + max_nbytes=1e6, + mmap_mode="r", + forward_reducers=None, + backward_reducers=None, + verbose=0, + prewarm=False, + **kwargs, + ): + manager = TemporaryResourcesManager(temp_folder) + self._temp_folder_manager = manager + + # The usage of a temp_folder_resolver over a simple temp_folder is + # superfluous for multiprocessing pools, as they don't get reused, see + # get_memmapping_executor for more details. We still use it for code + # simplicity. + forward_reducers, backward_reducers = get_memmapping_reducers( + temp_folder_resolver=manager.resolve_temp_folder_name, + max_nbytes=max_nbytes, + mmap_mode=mmap_mode, + forward_reducers=forward_reducers, + backward_reducers=backward_reducers, + verbose=verbose, + unlink_on_gc_collect=False, + prewarm=prewarm, + ) + + poolargs = dict( + processes=processes, + forward_reducers=forward_reducers, + backward_reducers=backward_reducers, + ) + poolargs.update(kwargs) + super(MemmappingPool, self).__init__(**poolargs) + + def terminate(self): + n_retries = 10 + for i in range(n_retries): + try: + super(MemmappingPool, self).terminate() + break + except OSError as e: + if isinstance(e, WindowsError): + # Workaround occasional "[Error 5] Access is denied" issue + # when trying to terminate a process under windows. + sleep(0.1) + if i + 1 == n_retries: + warnings.warn( + "Failed to terminate worker processes in" + " multiprocessing pool: %r" % e + ) + + # Clean up the temporary resources as the workers should now be off. + self._temp_folder_manager._clean_temporary_resources() + + @property + def _temp_folder(self): + # Legacy property in tests. could be removed if we refactored the + # memmapping tests. SHOULD ONLY BE USED IN TESTS! + # We cache this property because it is called late in the tests - at + # this point, all context have been unregistered, and + # resolve_temp_folder_name raises an error. + if getattr(self, "_cached_temp_folder", None) is not None: + return self._cached_temp_folder + else: + self._cached_temp_folder = ( + self._temp_folder_manager.resolve_temp_folder_name() + ) # noqa + return self._cached_temp_folder diff --git a/py311/lib/python3.11/site-packages/joblib/testing.py b/py311/lib/python3.11/site-packages/joblib/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..3ac3e7027c7931ddc3f72482cd86ef5fa87cfdf8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/joblib/testing.py @@ -0,0 +1,96 @@ +""" +Helper for testing. +""" + +import os.path +import re +import subprocess +import sys +import threading +import warnings + +import _pytest +import pytest + +raises = pytest.raises +warns = pytest.warns +SkipTest = _pytest.runner.Skipped +skipif = pytest.mark.skipif +fixture = pytest.fixture +parametrize = pytest.mark.parametrize +timeout = pytest.mark.timeout +xfail = pytest.mark.xfail +param = pytest.param + + +def warnings_to_stdout(): + """Redirect all warnings to stdout.""" + showwarning_orig = warnings.showwarning + + def showwarning(msg, cat, fname, lno, file=None, line=0): + showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout) + + warnings.showwarning = showwarning + # warnings.simplefilter('always') + + +def check_subprocess_call(cmd, timeout=5, stdout_regex=None, stderr_regex=None): + """Runs a command in a subprocess with timeout in seconds. + + A SIGTERM is sent after `timeout` and if it does not terminate, a + SIGKILL is sent after `2 * timeout`. + + Also checks returncode is zero, stdout if stdout_regex is set, and + stderr if stderr_regex is set. + """ + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + def terminate_process(): # pragma: no cover + """ + Attempt to terminate a leftover process spawned during test execution: + ideally this should not be needed but can help avoid clogging the CI + workers in case of deadlocks. + """ + warnings.warn(f"Timeout running {cmd}") + proc.terminate() + + def kill_process(): # pragma: no cover + """ + Kill a leftover process spawned during test execution: ideally this + should not be needed but can help avoid clogging the CI workers in + case of deadlocks. + """ + warnings.warn(f"Timeout running {cmd}") + proc.kill() + + try: + if timeout is not None: + terminate_timer = threading.Timer(timeout, terminate_process) + terminate_timer.start() + kill_timer = threading.Timer(2 * timeout, kill_process) + kill_timer.start() + stdout, stderr = proc.communicate() + stdout, stderr = stdout.decode(), stderr.decode() + if proc.returncode != 0: + message = ("Non-zero return code: {}.\nStdout:\n{}\nStderr:\n{}").format( + proc.returncode, stdout, stderr + ) + raise ValueError(message) + + if stdout_regex is not None and not re.search(stdout_regex, stdout): + raise ValueError( + "Unexpected stdout: {!r} does not match:\n{!r}".format( + stdout_regex, stdout + ) + ) + if stderr_regex is not None and not re.search(stderr_regex, stderr): + raise ValueError( + "Unexpected stderr: {!r} does not match:\n{!r}".format( + stderr_regex, stderr + ) + ) + + finally: + if timeout is not None: + terminate_timer.cancel() + kill_timer.cancel() diff --git a/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/METADATA b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..1f0723f8e6a0cf7cf4ce17a6988662cba4d90064 --- /dev/null +++ b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/METADATA @@ -0,0 +1,120 @@ +Metadata-Version: 2.4 +Name: kiwisolver +Version: 1.4.9 +Summary: A fast implementation of the Cassowary constraint solver +Author-email: The Nucleic Development Team +Maintainer-email: "Matthieu C. Dartiailh" +License: ========================= + The Kiwi licensing terms + ========================= + Kiwi is licensed under the terms of the Modified BSD License (also known as + New or Revised BSD), as follows: + + Copyright (c) 2013-2025, Nucleic Development Team + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + + Neither the name of the Nucleic Development Team nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + About Kiwi + ---------- + Chris Colbert began the Kiwi project in December 2013 in an effort to + create a blisteringly fast UI constraint solver. Chris is still the + project lead. + + The Nucleic Development Team is the set of all contributors to the Nucleic + project and its subprojects. + + The core team that coordinates development on GitHub can be found here: + http://github.com/nucleic. The current team consists of: + + * Chris Colbert + + Our Copyright Policy + -------------------- + Nucleic uses a shared copyright model. Each contributor maintains copyright + over their contributions to Nucleic. But, it is important to note that these + contributions are typically only changes to the repositories. Thus, the Nucleic + source code, in its entirety is not the copyright of any single person or + institution. Instead, it is the collective copyright of the entire Nucleic + Development Team. If individual contributors want to maintain a record of what + changes/contributions they have specific copyright on, they should indicate + their copyright in the commit message of the change, when they commit the + change to one of the Nucleic repositories. + + With this in mind, the following banner should be used in any source code file + to indicate the copyright and license terms: + + #------------------------------------------------------------------------------ + # Copyright (c) 2013-2025, Nucleic Development Team. + # + # Distributed under the terms of the Modified BSD License. + # + # The full license is in the file LICENSE, distributed with this software. + #------------------------------------------------------------------------------ + +Project-URL: homepage, https://github.com/nucleic/kiwi +Project-URL: documentation, https://kiwisolver.readthedocs.io/en/latest/ +Project-URL: repository, https://github.com/nucleic/kiwi +Project-URL: changelog, https://github.com/nucleic/kiwi/blob/main/releasenotes.rst +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=3.10 +Description-Content-Type: text/x-rst +License-File: LICENSE +Dynamic: license-file + +Welcome to Kiwi +=============== + +.. image:: https://github.com/nucleic/kiwi/workflows/Continuous%20Integration/badge.svg + :target: https://github.com/nucleic/kiwi/actions +.. image:: https://github.com/nucleic/kiwi/workflows/Documentation%20building/badge.svg + :target: https://github.com/nucleic/kiwi/actions +.. image:: https://codecov.io/gh/nucleic/kiwi/branch/main/graph/badge.svg + :target: https://codecov.io/gh/nucleic/kiwi +.. image:: https://readthedocs.org/projects/kiwisolver/badge/?version=latest + :target: https://kiwisolver.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +Kiwi is an efficient C++ implementation of the Cassowary constraint solving +algorithm. Kiwi is an implementation of the algorithm based on the +`seminal Cassowary paper `_. +It is *not* a refactoring of the original C++ solver. Kiwi has been designed +from the ground up to be lightweight and fast. Kiwi ranges from 10x to 500x +faster than the original Cassowary solver with typical use cases gaining a 40x +improvement. Memory savings are consistently > 5x. + +In addition to the C++ solver, Kiwi ships with hand-rolled Python bindings for +Python 3.7+. diff --git a/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/RECORD b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..baa2a9483db4dd51c4a3e44b19fbfdb034a4d7d8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/RECORD @@ -0,0 +1,12 @@ +kiwisolver-1.4.9.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +kiwisolver-1.4.9.dist-info/METADATA,sha256=ytNKN7XLZs-huWdvM3IQN64thQubDHE3dHih1r8MfUA,6260 +kiwisolver-1.4.9.dist-info/RECORD,, +kiwisolver-1.4.9.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +kiwisolver-1.4.9.dist-info/WHEEL,sha256=_CFvICYDmZlAYHt8L7Zn3n-BGLj8dkZLQPp22Piy5JE,151 +kiwisolver-1.4.9.dist-info/licenses/LICENSE,sha256=zyB5nTLeDu-i6pBOOsUSL0eu1dNSkwdR62FyR2LEnZA,3289 +kiwisolver-1.4.9.dist-info/top_level.txt,sha256=xqwWj7oSHlpIjcw2QMJb8puTFPdjDBO78AZp9gjTh9c,11 +kiwisolver/__init__.py,sha256=4Sa-MNI1lRh3K1n9LEgKBeZXVVI-uJrD-xsk16jYR2c,1013 +kiwisolver/_cext.cpython-311-x86_64-linux-gnu.so,sha256=tPEEK3go9i9xHsTExLiXihctrdqN3Q52qDI4W5RXP80,5557080 +kiwisolver/_cext.pyi,sha256=-w7Otijw7d-9Rh_1-EiYtRn2L-STdvjoArh02szr8tA,8657 +kiwisolver/exceptions.py,sha256=haGECAifFjVqwT5esQ1sEiqsajW6Jydip16ieHPeL04,1242 +kiwisolver/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/WHEEL b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7cc1bea2cb3d38c1dba6db468730cd8fd970d117 --- /dev/null +++ b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: false +Tag: cp311-cp311-manylinux_2_17_x86_64 +Tag: cp311-cp311-manylinux2014_x86_64 + diff --git a/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..9b85884d1a91ed346d4f939d15d27ad1e33a5894 --- /dev/null +++ b/py311/lib/python3.11/site-packages/kiwisolver-1.4.9.dist-info/top_level.txt @@ -0,0 +1 @@ +kiwisolver diff --git a/py311/lib/python3.11/site-packages/markdown/__init__.py b/py311/lib/python3.11/site-packages/markdown/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9674d6e060fd0633f920699f030ef447b025d58e --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/__init__.py @@ -0,0 +1,48 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# - Documentation: https://python-markdown.github.io/ +# - GitHub: https://github.com/Python-Markdown/markdown/ +# - PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# - Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# - Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# - Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +""" +Python-Markdown provides two public functions ([`markdown.markdown`][] and [`markdown.markdownFromFile`][]) +both of which wrap the public class [`markdown.Markdown`][]. All submodules support these public functions +and class and/or provide extension support. + +Modules: + core: Core functionality. + preprocessors: Pre-processors. + blockparser: Core Markdown block parser. + blockprocessors: Block processors. + treeprocessors: Tree processors. + inlinepatterns: Inline patterns. + postprocessors: Post-processors. + serializers: Serializers. + util: Utility functions. + htmlparser: HTML parser. + test_tools: Testing utilities. + extensions: Markdown extensions. +""" + +from __future__ import annotations + +from .core import Markdown, markdown, markdownFromFile +from .__meta__ import __version__, __version_info__ # noqa + +# For backward compatibility as some extensions expect it... +from .extensions import Extension # noqa + +__all__ = ['Markdown', 'markdown', 'markdownFromFile'] diff --git a/py311/lib/python3.11/site-packages/markdown/__main__.py b/py311/lib/python3.11/site-packages/markdown/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..259df6336f8829b27649a6f19058646d662a9213 --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/__main__.py @@ -0,0 +1,150 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +from __future__ import annotations + +import sys +import optparse +import warnings +import markdown +try: + # We use `unsafe_load` because users may need to pass in actual Python + # objects. As this is only available from the CLI, the user has much + # worse problems if an attacker can use this as an attach vector. + from yaml import unsafe_load as yaml_load +except ImportError: # pragma: no cover + try: + # Fall back to PyYAML <5.1 + from yaml import load as yaml_load + except ImportError: + # Fall back to JSON + from json import load as yaml_load + +import logging +from logging import DEBUG, WARNING, CRITICAL + +logger = logging.getLogger('MARKDOWN') + + +def parse_options(args=None, values=None): + """ + Define and parse `optparse` options for command-line usage. + """ + usage = """%prog [options] [INPUTFILE] + (STDIN is assumed if no INPUTFILE is given)""" + desc = "A Python implementation of John Gruber's Markdown. " \ + "https://Python-Markdown.github.io/" + ver = "%%prog %s" % markdown.__version__ + + parser = optparse.OptionParser(usage=usage, description=desc, version=ver) + parser.add_option("-f", "--file", dest="filename", default=None, + help="Write output to OUTPUT_FILE. Defaults to STDOUT.", + metavar="OUTPUT_FILE") + parser.add_option("-e", "--encoding", dest="encoding", + help="Encoding for input and output files.",) + parser.add_option("-o", "--output_format", dest="output_format", + default='xhtml', metavar="OUTPUT_FORMAT", + help="Use output format 'xhtml' (default) or 'html'.") + parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol", + action='store_false', default=True, + help="Observe number of first item of ordered lists.") + parser.add_option("-x", "--extension", action="append", dest="extensions", + help="Load extension EXTENSION.", metavar="EXTENSION") + parser.add_option("-c", "--extension_configs", + dest="configfile", default=None, + help="Read extension configurations from CONFIG_FILE. " + "CONFIG_FILE must be of JSON or YAML format. YAML " + "format requires that a python YAML library be " + "installed. The parsed JSON or YAML must result in a " + "python dictionary which would be accepted by the " + "'extension_configs' keyword on the markdown.Markdown " + "class. The extensions must also be loaded with the " + "`--extension` option.", + metavar="CONFIG_FILE") + parser.add_option("-q", "--quiet", default=CRITICAL, + action="store_const", const=CRITICAL+10, dest="verbose", + help="Suppress all warnings.") + parser.add_option("-v", "--verbose", + action="store_const", const=WARNING, dest="verbose", + help="Print all warnings.") + parser.add_option("--noisy", + action="store_const", const=DEBUG, dest="verbose", + help="Print debug messages.") + + (options, args) = parser.parse_args(args, values) + + if len(args) == 0: + input_file = None + else: + input_file = args[0] + + if not options.extensions: + options.extensions = [] + + extension_configs = {} + if options.configfile: + with open( + options.configfile, mode="r", encoding=options.encoding + ) as fp: + try: + extension_configs = yaml_load(fp) + except Exception as e: + message = "Failed parsing extension config file: %s" % \ + options.configfile + e.args = (message,) + e.args[1:] + raise + + opts = { + 'input': input_file, + 'output': options.filename, + 'extensions': options.extensions, + 'extension_configs': extension_configs, + 'encoding': options.encoding, + 'output_format': options.output_format, + 'lazy_ol': options.lazy_ol + } + + return opts, options.verbose + + +def run(): # pragma: no cover + """Run Markdown from the command line.""" + + # Parse options and adjust logging level if necessary + options, logging_level = parse_options() + if not options: + sys.exit(2) + logger.setLevel(logging_level) + console_handler = logging.StreamHandler() + logger.addHandler(console_handler) + if logging_level <= WARNING: + # Ensure deprecation warnings get displayed + warnings.filterwarnings('default') + logging.captureWarnings(True) + warn_logger = logging.getLogger('py.warnings') + warn_logger.addHandler(console_handler) + + # Run + markdown.markdownFromFile(**options) + + +if __name__ == '__main__': # pragma: no cover + # Support running module as a command line command. + # python -m markdown [options] [args] + run() diff --git a/py311/lib/python3.11/site-packages/markdown/__meta__.py b/py311/lib/python3.11/site-packages/markdown/__meta__.py new file mode 100644 index 0000000000000000000000000000000000000000..69823ae7469d64ece247b75a7864c8d2a8419f86 --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/__meta__.py @@ -0,0 +1,51 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +# __version_info__ format: +# (major, minor, patch, dev/alpha/beta/rc/final, #) +# (1, 1, 2, 'dev', 0) => "1.1.2.dev0" +# (1, 1, 2, 'alpha', 1) => "1.1.2a1" +# (1, 2, 0, 'beta', 2) => "1.2b2" +# (1, 2, 0, 'rc', 4) => "1.2rc4" +# (1, 2, 0, 'final', 0) => "1.2" + +from __future__ import annotations + + +__version_info__ = (3, 10, 0, 'final', 0) + + +def _get_version(version_info): + " Returns a PEP 440-compliant version number from `version_info`. " + assert len(version_info) == 5 + assert version_info[3] in ('dev', 'alpha', 'beta', 'rc', 'final') + + parts = 2 if version_info[2] == 0 else 3 + v = '.'.join(map(str, version_info[:parts])) + + if version_info[3] == 'dev': + v += '.dev' + str(version_info[4]) + elif version_info[3] != 'final': + mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} + v += mapping[version_info[3]] + str(version_info[4]) + + return v + + +__version__ = _get_version(__version_info__) diff --git a/py311/lib/python3.11/site-packages/markdown/blockparser.py b/py311/lib/python3.11/site-packages/markdown/blockparser.py new file mode 100644 index 0000000000000000000000000000000000000000..549c9ecd5a2ff01f84c2eccb53d537e18502a8fe --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/blockparser.py @@ -0,0 +1,160 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +""" +The block parser handles basic parsing of Markdown blocks. It doesn't concern +itself with inline elements such as `**bold**` or `*italics*`, but rather just +catches blocks, lists, quotes, etc. + +The `BlockParser` is made up of a bunch of `BlockProcessors`, each handling a +different type of block. Extensions may add/replace/remove `BlockProcessors` +as they need to alter how Markdown blocks are parsed. +""" + +from __future__ import annotations + +import xml.etree.ElementTree as etree +from typing import TYPE_CHECKING, Iterable, Any +from . import util + +if TYPE_CHECKING: # pragma: no cover + from markdown import Markdown + from .blockprocessors import BlockProcessor + + +class State(list): + """ Track the current and nested state of the parser. + + This utility class is used to track the state of the `BlockParser` and + support multiple levels if nesting. It's just a simple API wrapped around + a list. Each time a state is set, that state is appended to the end of the + list. Each time a state is reset, that state is removed from the end of + the list. + + Therefore, each time a state is set for a nested block, that state must be + reset when we back out of that level of nesting or the state could be + corrupted. + + While all the methods of a list object are available, only the three + defined below need be used. + + """ + + def set(self, state: Any): + """ Set a new state. """ + self.append(state) + + def reset(self) -> None: + """ Step back one step in nested state. """ + self.pop() + + def isstate(self, state: Any) -> bool: + """ Test that top (current) level is of given state. """ + if len(self): + return self[-1] == state + else: + return False + + +class BlockParser: + """ Parse Markdown blocks into an `ElementTree` object. + + A wrapper class that stitches the various `BlockProcessors` together, + looping through them and creating an `ElementTree` object. + + """ + + def __init__(self, md: Markdown): + """ Initialize the block parser. + + Arguments: + md: A Markdown instance. + + Attributes: + BlockParser.md (Markdown): A Markdown instance. + BlockParser.state (State): Tracks the nesting level of current location in document being parsed. + BlockParser.blockprocessors (util.Registry): A collection of + [`blockprocessors`][markdown.blockprocessors]. + + """ + self.blockprocessors: util.Registry[BlockProcessor] = util.Registry() + self.state = State() + self.md = md + + def parseDocument(self, lines: Iterable[str]) -> etree.ElementTree: + """ Parse a Markdown document into an `ElementTree`. + + Given a list of lines, an `ElementTree` object (not just a parent + `Element`) is created and the root element is passed to the parser + as the parent. The `ElementTree` object is returned. + + This should only be called on an entire document, not pieces. + + Arguments: + lines: A list of lines (strings). + + Returns: + An element tree. + """ + # Create an `ElementTree` from the lines + self.root = etree.Element(self.md.doc_tag) + self.parseChunk(self.root, '\n'.join(lines)) + return etree.ElementTree(self.root) + + def parseChunk(self, parent: etree.Element, text: str) -> None: + """ Parse a chunk of Markdown text and attach to given `etree` node. + + While the `text` argument is generally assumed to contain multiple + blocks which will be split on blank lines, it could contain only one + block. Generally, this method would be called by extensions when + block parsing is required. + + The `parent` `etree` Element passed in is altered in place. + Nothing is returned. + + Arguments: + parent: The parent element. + text: The text to parse. + + """ + self.parseBlocks(parent, text.split('\n\n')) + + def parseBlocks(self, parent: etree.Element, blocks: list[str]) -> None: + """ Process blocks of Markdown text and attach to given `etree` node. + + Given a list of `blocks`, each `blockprocessor` is stepped through + until there are no blocks left. While an extension could potentially + call this method directly, it's generally expected to be used + internally. + + This is a public method as an extension may need to add/alter + additional `BlockProcessors` which call this method to recursively + parse a nested block. + + Arguments: + parent: The parent element. + blocks: The blocks of text to parse. + + """ + while blocks: + for processor in self.blockprocessors: + if processor.test(parent, blocks[0]): + if processor.run(parent, blocks) is not False: + # run returns True or None + break diff --git a/py311/lib/python3.11/site-packages/markdown/blockprocessors.py b/py311/lib/python3.11/site-packages/markdown/blockprocessors.py new file mode 100644 index 0000000000000000000000000000000000000000..3ed4cf07fcf63852556939382e36bc2210e55e0d --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/blockprocessors.py @@ -0,0 +1,641 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +""" +A block processor parses blocks of text and adds new elements to the ElementTree. Blocks of text, +separated from other text by blank lines, may have a different syntax and produce a differently +structured tree than other Markdown. Block processors excel at handling code formatting, equation +layouts, tables, etc. +""" + +from __future__ import annotations + +import logging +import re +import xml.etree.ElementTree as etree +from typing import TYPE_CHECKING, Any +from . import util +from .blockparser import BlockParser + +if TYPE_CHECKING: # pragma: no cover + from markdown import Markdown + +logger = logging.getLogger('MARKDOWN') + + +def build_block_parser(md: Markdown, **kwargs: Any) -> BlockParser: + """ Build the default block parser used by Markdown. """ + parser = BlockParser(md) + parser.blockprocessors.register(EmptyBlockProcessor(parser), 'empty', 100) + parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 90) + parser.blockprocessors.register(CodeBlockProcessor(parser), 'code', 80) + parser.blockprocessors.register(HashHeaderProcessor(parser), 'hashheader', 70) + parser.blockprocessors.register(SetextHeaderProcessor(parser), 'setextheader', 60) + parser.blockprocessors.register(HRProcessor(parser), 'hr', 50) + parser.blockprocessors.register(OListProcessor(parser), 'olist', 40) + parser.blockprocessors.register(UListProcessor(parser), 'ulist', 30) + parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 20) + parser.blockprocessors.register(ReferenceProcessor(parser), 'reference', 15) + parser.blockprocessors.register(ParagraphProcessor(parser), 'paragraph', 10) + return parser + + +class BlockProcessor: + """ Base class for block processors. + + Each subclass will provide the methods below to work with the source and + tree. Each processor will need to define it's own `test` and `run` + methods. The `test` method should return True or False, to indicate + whether the current block should be processed by this processor. If the + test passes, the parser will call the processors `run` method. + + Attributes: + BlockProcessor.parser (BlockParser): The `BlockParser` instance this is attached to. + BlockProcessor.tab_length (int): The tab length set on the `Markdown` instance. + + """ + + def __init__(self, parser: BlockParser): + self.parser = parser + self.tab_length = parser.md.tab_length + + def lastChild(self, parent: etree.Element) -> etree.Element | None: + """ Return the last child of an `etree` element. """ + if len(parent): + return parent[-1] + else: + return None + + def detab(self, text: str, length: int | None = None) -> tuple[str, str]: + """ Remove a tab from the front of each line of the given text. """ + if length is None: + length = self.tab_length + newtext = [] + lines = text.split('\n') + for line in lines: + if line.startswith(' ' * length): + newtext.append(line[length:]) + elif not line.strip(): + newtext.append('') + else: + break + return '\n'.join(newtext), '\n'.join(lines[len(newtext):]) + + def looseDetab(self, text: str, level: int = 1) -> str: + """ Remove a tab from front of lines but allowing dedented lines. """ + lines = text.split('\n') + for i in range(len(lines)): + if lines[i].startswith(' '*self.tab_length*level): + lines[i] = lines[i][self.tab_length*level:] + return '\n'.join(lines) + + def test(self, parent: etree.Element, block: str) -> bool: + """ Test for block type. Must be overridden by subclasses. + + As the parser loops through processors, it will call the `test` + method on each to determine if the given block of text is of that + type. This method must return a boolean `True` or `False`. The + actual method of testing is left to the needs of that particular + block type. It could be as simple as `block.startswith(some_string)` + or a complex regular expression. As the block type may be different + depending on the parent of the block (i.e. inside a list), the parent + `etree` element is also provided and may be used as part of the test. + + Keyword arguments: + parent: An `etree` element which will be the parent of the block. + block: A block of text from the source which has been split at blank lines. + """ + pass # pragma: no cover + + def run(self, parent: etree.Element, blocks: list[str]) -> bool | None: + """ Run processor. Must be overridden by subclasses. + + When the parser determines the appropriate type of a block, the parser + will call the corresponding processor's `run` method. This method + should parse the individual lines of the block and append them to + the `etree`. + + Note that both the `parent` and `etree` keywords are pointers + to instances of the objects which should be edited in place. Each + processor must make changes to the existing objects as there is no + mechanism to return new/different objects to replace them. + + This means that this method should be adding `SubElements` or adding text + to the parent, and should remove (`pop`) or add (`insert`) items to + the list of blocks. + + If `False` is returned, this will have the same effect as returning `False` + from the `test` method. + + Keyword arguments: + parent: An `etree` element which is the parent of the current block. + blocks: A list of all remaining blocks of the document. + """ + pass # pragma: no cover + + +class ListIndentProcessor(BlockProcessor): + """ Process children of list items. + + Example + + * a list item + process this part + + or this part + + """ + + ITEM_TYPES = ['li'] + """ List of tags used for list items. """ + LIST_TYPES = ['ul', 'ol'] + """ Types of lists this processor can operate on. """ + + def __init__(self, *args): + super().__init__(*args) + self.INDENT_RE = re.compile(r'^(([ ]{%s})+)' % self.tab_length) + + def test(self, parent: etree.Element, block: str) -> bool: + return block.startswith(' '*self.tab_length) and \ + not self.parser.state.isstate('detabbed') and \ + (parent.tag in self.ITEM_TYPES or + (len(parent) and parent[-1] is not None and + (parent[-1].tag in self.LIST_TYPES))) + + def run(self, parent: etree.Element, blocks: list[str]) -> None: + block = blocks.pop(0) + level, sibling = self.get_level(parent, block) + block = self.looseDetab(block, level) + + self.parser.state.set('detabbed') + if parent.tag in self.ITEM_TYPES: + # It's possible that this parent has a `ul` or `ol` child list + # with a member. If that is the case, then that should be the + # parent. This is intended to catch the edge case of an indented + # list whose first member was parsed previous to this point + # see `OListProcessor` + if len(parent) and parent[-1].tag in self.LIST_TYPES: + self.parser.parseBlocks(parent[-1], [block]) + else: + # The parent is already a `li`. Just parse the child block. + self.parser.parseBlocks(parent, [block]) + elif sibling.tag in self.ITEM_TYPES: + # The sibling is a `li`. Use it as parent. + self.parser.parseBlocks(sibling, [block]) + elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES: + # The parent is a list (`ol` or `ul`) which has children. + # Assume the last child `li` is the parent of this block. + if sibling[-1].text: + # If the parent `li` has text, that text needs to be moved to a `p` + # The `p` must be 'inserted' at beginning of list in the event + # that other children already exist i.e.; a nested sub-list. + p = etree.Element('p') + p.text = sibling[-1].text + sibling[-1].text = '' + sibling[-1].insert(0, p) + self.parser.parseChunk(sibling[-1], block) + else: + self.create_item(sibling, block) + self.parser.state.reset() + + def create_item(self, parent: etree.Element, block: str) -> None: + """ Create a new `li` and parse the block with it as the parent. """ + li = etree.SubElement(parent, 'li') + self.parser.parseBlocks(li, [block]) + + def get_level(self, parent: etree.Element, block: str) -> tuple[int, etree.Element]: + """ Get level of indentation based on list level. """ + # Get indent level + m = self.INDENT_RE.match(block) + if m: + indent_level = len(m.group(1))/self.tab_length + else: + indent_level = 0 + if self.parser.state.isstate('list'): + # We're in a tight-list - so we already are at correct parent. + level = 1 + else: + # We're in a loose-list - so we need to find parent. + level = 0 + # Step through children of tree to find matching indent level. + while indent_level > level: + child = self.lastChild(parent) + if (child is not None and + (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES)): + if child.tag in self.LIST_TYPES: + level += 1 + parent = child + else: + # No more child levels. If we're short of `indent_level`, + # we have a code block. So we stop here. + break + return level, parent + + +class CodeBlockProcessor(BlockProcessor): + """ Process code blocks. """ + + def test(self, parent: etree.Element, block: str) -> bool: + return block.startswith(' '*self.tab_length) + + def run(self, parent: etree.Element, blocks: list[str]) -> None: + sibling = self.lastChild(parent) + block = blocks.pop(0) + theRest = '' + if (sibling is not None and sibling.tag == "pre" and + len(sibling) and sibling[0].tag == "code"): + # The previous block was a code block. As blank lines do not start + # new code blocks, append this block to the previous, adding back + # line breaks removed from the split into a list. + code = sibling[0] + block, theRest = self.detab(block) + code.text = util.AtomicString( + '{}\n{}\n'.format(code.text, util.code_escape(block.rstrip())) + ) + else: + # This is a new code block. Create the elements and insert text. + pre = etree.SubElement(parent, 'pre') + code = etree.SubElement(pre, 'code') + block, theRest = self.detab(block) + code.text = util.AtomicString('%s\n' % util.code_escape(block.rstrip())) + if theRest: + # This block contained unindented line(s) after the first indented + # line. Insert these lines as the first block of the master blocks + # list for future processing. + blocks.insert(0, theRest) + + +class BlockQuoteProcessor(BlockProcessor): + """ Process blockquotes. """ + + RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)') + + def test(self, parent: etree.Element, block: str) -> bool: + return bool(self.RE.search(block)) and not util.nearing_recursion_limit() + + def run(self, parent: etree.Element, blocks: list[str]) -> None: + block = blocks.pop(0) + m = self.RE.search(block) + if m: + before = block[:m.start()] # Lines before blockquote + # Pass lines before blockquote in recursively for parsing first. + self.parser.parseBlocks(parent, [before]) + # Remove `> ` from beginning of each line. + block = '\n'.join( + [self.clean(line) for line in block[m.start():].split('\n')] + ) + sibling = self.lastChild(parent) + if sibling is not None and sibling.tag == "blockquote": + # Previous block was a blockquote so set that as this blocks parent + quote = sibling + else: + # This is a new blockquote. Create a new parent element. + quote = etree.SubElement(parent, 'blockquote') + # Recursively parse block with blockquote as parent. + # change parser state so blockquotes embedded in lists use `p` tags + self.parser.state.set('blockquote') + self.parser.parseChunk(quote, block) + self.parser.state.reset() + + def clean(self, line: str) -> str: + """ Remove `>` from beginning of a line. """ + m = self.RE.match(line) + if line.strip() == ">": + return "" + elif m: + return m.group(2) + else: + return line + + +class OListProcessor(BlockProcessor): + """ Process ordered list blocks. """ + + TAG: str = 'ol' + """ The tag used for the the wrapping element. """ + STARTSWITH: str = '1' + """ + The integer (as a string ) with which the list starts. For example, if a list is initialized as + `3. Item`, then the `ol` tag will be assigned an HTML attribute of `starts="3"`. Default: `"1"`. + """ + LAZY_OL: bool = True + """ Ignore `STARTSWITH` if `True`. """ + SIBLING_TAGS: list[str] = ['ol', 'ul'] + """ + Markdown does not require the type of a new list item match the previous list item type. + This is the list of types which can be mixed. + """ + + def __init__(self, parser: BlockParser): + super().__init__(parser) + # Detect an item (`1. item`). `group(1)` contains contents of item. + self.RE = re.compile(r'^[ ]{0,%d}\d+\.[ ]+(.*)' % (self.tab_length - 1)) + # Detect items on secondary lines. they can be of either list type. + self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.)|[*+-])[ ]+(.*)' % + (self.tab_length - 1)) + # Detect indented (nested) items of either type + self.INDENT_RE = re.compile(r'^[ ]{%d,%d}((\d+\.)|[*+-])[ ]+.*' % + (self.tab_length, self.tab_length * 2 - 1)) + + def test(self, parent: etree.Element, block: str) -> bool: + return bool(self.RE.match(block)) + + def run(self, parent: etree.Element, blocks: list[str]) -> None: + # Check for multiple items in one block. + items = self.get_items(blocks.pop(0)) + sibling = self.lastChild(parent) + + if sibling is not None and sibling.tag in self.SIBLING_TAGS: + # Previous block was a list item, so set that as parent + lst = sibling + # make sure previous item is in a `p` - if the item has text, + # then it isn't in a `p` + if lst[-1].text: + # since it's possible there are other children for this + # sibling, we can't just `SubElement` the `p`, we need to + # insert it as the first item. + p = etree.Element('p') + p.text = lst[-1].text + lst[-1].text = '' + lst[-1].insert(0, p) + # if the last item has a tail, then the tail needs to be put in a `p` + # likely only when a header is not followed by a blank line + lch = self.lastChild(lst[-1]) + if lch is not None and lch.tail: + p = etree.SubElement(lst[-1], 'p') + p.text = lch.tail.lstrip() + lch.tail = '' + + # parse first block differently as it gets wrapped in a `p`. + li = etree.SubElement(lst, 'li') + self.parser.state.set('looselist') + firstitem = items.pop(0) + self.parser.parseBlocks(li, [firstitem]) + self.parser.state.reset() + elif parent.tag in ['ol', 'ul']: + # this catches the edge case of a multi-item indented list whose + # first item is in a blank parent-list item: + # * * subitem1 + # * subitem2 + # see also `ListIndentProcessor` + lst = parent + else: + # This is a new list so create parent with appropriate tag. + lst = etree.SubElement(parent, self.TAG) + # Check if a custom start integer is set + if not self.LAZY_OL and self.STARTSWITH != '1': + lst.attrib['start'] = self.STARTSWITH + + self.parser.state.set('list') + # Loop through items in block, recursively parsing each with the + # appropriate parent. + for item in items: + if item.startswith(' '*self.tab_length): + # Item is indented. Parse with last item as parent + self.parser.parseBlocks(lst[-1], [item]) + else: + # New item. Create `li` and parse with it as parent + li = etree.SubElement(lst, 'li') + self.parser.parseBlocks(li, [item]) + self.parser.state.reset() + + def get_items(self, block: str) -> list[str]: + """ Break a block into list items. """ + items = [] + for line in block.split('\n'): + m = self.CHILD_RE.match(line) + if m: + # This is a new list item + # Check first item for the start index + if not items and self.TAG == 'ol': + # Detect the integer value of first list item + INTEGER_RE = re.compile(r'(\d+)') + self.STARTSWITH = INTEGER_RE.match(m.group(1)).group() + # Append to the list + items.append(m.group(3)) + elif self.INDENT_RE.match(line): + # This is an indented (possibly nested) item. + if items[-1].startswith(' '*self.tab_length): + # Previous item was indented. Append to that item. + items[-1] = '{}\n{}'.format(items[-1], line) + else: + items.append(line) + else: + # This is another line of previous item. Append to that item. + items[-1] = '{}\n{}'.format(items[-1], line) + return items + + +class UListProcessor(OListProcessor): + """ Process unordered list blocks. """ + + TAG: str = 'ul' + """ The tag used for the the wrapping element. """ + + def __init__(self, parser: BlockParser): + super().__init__(parser) + # Detect an item (`1. item`). `group(1)` contains contents of item. + self.RE = re.compile(r'^[ ]{0,%d}[*+-][ ]+(.*)' % (self.tab_length - 1)) + + +class HashHeaderProcessor(BlockProcessor): + """ Process Hash Headers. """ + + # Detect a header at start of any line in block + RE = re.compile(r'(?:^|\n)(?P#{1,6})(?P
    (?:\\.|[^\\])*?)#*(?:\n|$)') + + def test(self, parent: etree.Element, block: str) -> bool: + return bool(self.RE.search(block)) + + def run(self, parent: etree.Element, blocks: list[str]) -> None: + block = blocks.pop(0) + m = self.RE.search(block) + if m: + before = block[:m.start()] # All lines before header + after = block[m.end():] # All lines after header + if before: + # As the header was not the first line of the block and the + # lines before the header must be parsed first, + # recursively parse this lines as a block. + self.parser.parseBlocks(parent, [before]) + # Create header using named groups from RE + h = etree.SubElement(parent, 'h%d' % len(m.group('level'))) + h.text = m.group('header').strip() + if after: + # Insert remaining lines as first block for future parsing. + if self.parser.state.isstate('looselist'): + # This is a weird edge case where a header is a child of a loose list + # and there is no blank line after the header. To ensure proper + # parsing, the line(s) after need to be detabbed. See #1443. + after = self.looseDetab(after) + blocks.insert(0, after) + else: # pragma: no cover + # This should never happen, but just in case... + logger.warn("We've got a problem header: %r" % block) + + +class SetextHeaderProcessor(BlockProcessor): + """ Process Setext-style Headers. """ + + # Detect Setext-style header. Must be first 2 lines of block. + RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE) + + def test(self, parent: etree.Element, block: str) -> bool: + return bool(self.RE.match(block)) + + def run(self, parent: etree.Element, blocks: list[str]) -> None: + lines = blocks.pop(0).split('\n') + # Determine level. `=` is 1 and `-` is 2. + if lines[1].startswith('='): + level = 1 + else: + level = 2 + h = etree.SubElement(parent, 'h%d' % level) + h.text = lines[0].strip() + if len(lines) > 2: + # Block contains additional lines. Add to master blocks for later. + blocks.insert(0, '\n'.join(lines[2:])) + + +class HRProcessor(BlockProcessor): + """ Process Horizontal Rules. """ + + # Python's `re` module doesn't officially support atomic grouping. However you can fake it. + # See https://stackoverflow.com/a/13577411/866026 + RE = r'^[ ]{0,3}(?=(?P(-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,}))(?P=atomicgroup)[ ]*$' + # Detect hr on any line of a block. + SEARCH_RE = re.compile(RE, re.MULTILINE) + + def test(self, parent: etree.Element, block: str) -> bool: + m = self.SEARCH_RE.search(block) + if m: + # Save match object on class instance so we can use it later. + self.match = m + return True + return False + + def run(self, parent: etree.Element, blocks: list[str]) -> None: + block = blocks.pop(0) + match = self.match + # Check for lines in block before `hr`. + prelines = block[:match.start()].rstrip('\n') + if prelines: + # Recursively parse lines before `hr` so they get parsed first. + self.parser.parseBlocks(parent, [prelines]) + # create hr + etree.SubElement(parent, 'hr') + # check for lines in block after `hr`. + postlines = block[match.end():].lstrip('\n') + if postlines: + # Add lines after `hr` to master blocks for later parsing. + blocks.insert(0, postlines) + + +class EmptyBlockProcessor(BlockProcessor): + """ Process blocks that are empty or start with an empty line. """ + + def test(self, parent: etree.Element, block: str) -> bool: + return not block or block.startswith('\n') + + def run(self, parent: etree.Element, blocks: list[str]) -> None: + block = blocks.pop(0) + filler = '\n\n' + if block: + # Starts with empty line + # Only replace a single line. + filler = '\n' + # Save the rest for later. + theRest = block[1:] + if theRest: + # Add remaining lines to master blocks for later. + blocks.insert(0, theRest) + sibling = self.lastChild(parent) + if (sibling is not None and sibling.tag == 'pre' and + len(sibling) and sibling[0].tag == 'code'): + # Last block is a code block. Append to preserve whitespace. + sibling[0].text = util.AtomicString( + '{}{}'.format(sibling[0].text, filler) + ) + + +class ReferenceProcessor(BlockProcessor): + """ Process link references. """ + RE = re.compile( + r'^[ ]{0,3}\[([^\[\]]*)\]:[ ]*\n?[ ]*([^\s]+)[ ]*(?:\n[ ]*)?((["\'])(.*)\4[ ]*|\((.*)\)[ ]*)?$', re.MULTILINE + ) + + def test(self, parent: etree.Element, block: str) -> bool: + return True + + def run(self, parent: etree.Element, blocks: list[str]) -> bool: + block = blocks.pop(0) + m = self.RE.search(block) + if m: + id = m.group(1).strip().lower() + link = m.group(2).lstrip('<').rstrip('>') + title = m.group(5) or m.group(6) + self.parser.md.references[id] = (link, title) + if block[m.end():].strip(): + # Add any content after match back to blocks as separate block + blocks.insert(0, block[m.end():].lstrip('\n')) + if block[:m.start()].strip(): + # Add any content before match back to blocks as separate block + blocks.insert(0, block[:m.start()].rstrip('\n')) + return True + # No match. Restore block. + blocks.insert(0, block) + return False + + +class ParagraphProcessor(BlockProcessor): + """ Process Paragraph blocks. """ + + def test(self, parent: etree.Element, block: str) -> bool: + return True + + def run(self, parent: etree.Element, blocks: list[str]) -> None: + block = blocks.pop(0) + if block.strip(): + # Not a blank block. Add to parent, otherwise throw it away. + if self.parser.state.isstate('list'): + # The parent is a tight-list. + # + # Check for any children. This will likely only happen in a + # tight-list when a header isn't followed by a blank line. + # For example: + # + # * # Header + # Line 2 of list item - not part of header. + sibling = self.lastChild(parent) + if sibling is not None: + # Insert after sibling. + if sibling.tail: + sibling.tail = '{}\n{}'.format(sibling.tail, block) + else: + sibling.tail = '\n%s' % block + else: + # Append to parent.text + if parent.text: + parent.text = '{}\n{}'.format(parent.text, block) + else: + parent.text = block.lstrip() + else: + # Create a regular paragraph + p = etree.SubElement(parent, 'p') + p.text = block.lstrip() diff --git a/py311/lib/python3.11/site-packages/markdown/core.py b/py311/lib/python3.11/site-packages/markdown/core.py new file mode 100644 index 0000000000000000000000000000000000000000..11cb5adc90dca19a2f2640670278561ed76eae3a --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/core.py @@ -0,0 +1,503 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +from __future__ import annotations + +import codecs +import sys +import logging +import importlib +from typing import TYPE_CHECKING, Any, BinaryIO, Callable, ClassVar, Mapping, Sequence +from . import util +from .preprocessors import build_preprocessors +from .blockprocessors import build_block_parser +from .treeprocessors import build_treeprocessors +from .inlinepatterns import build_inlinepatterns +from .postprocessors import build_postprocessors +from .extensions import Extension +from .serializers import to_html_string, to_xhtml_string +from .util import BLOCK_LEVEL_ELEMENTS + +if TYPE_CHECKING: # pragma: no cover + from xml.etree.ElementTree import Element + +__all__ = ['Markdown', 'markdown', 'markdownFromFile'] + + +logger = logging.getLogger('MARKDOWN') + + +class Markdown: + """ + A parser which converts Markdown to HTML. + + Attributes: + Markdown.tab_length (int): The number of spaces which correspond to a single tab. Default: `4`. + Markdown.ESCAPED_CHARS (list[str]): List of characters which get the backslash escape treatment. + Markdown.block_level_elements (list[str]): List of HTML tags which get treated as block-level elements. + See [`markdown.util.BLOCK_LEVEL_ELEMENTS`][] for the full list of elements. + Markdown.registeredExtensions (list[Extension]): List of extensions which have called + [`registerExtension`][markdown.Markdown.registerExtension] during setup. + Markdown.doc_tag (str): Element used to wrap document. Default: `div`. + Markdown.stripTopLevelTags (bool): Indicates whether the `doc_tag` should be removed. Default: 'True'. + Markdown.references (dict[str, tuple[str, str]]): A mapping of link references found in a parsed document + where the key is the reference name and the value is a tuple of the URL and title. + Markdown.htmlStash (util.HtmlStash): The instance of the `HtmlStash` used by an instance of this class. + Markdown.output_formats (dict[str, Callable[xml.etree.ElementTree.Element]]): A mapping of known output + formats by name and their respective serializers. Each serializer must be a callable which accepts an + [`Element`][xml.etree.ElementTree.Element] and returns a `str`. + Markdown.output_format (str): The output format set by + [`set_output_format`][markdown.Markdown.set_output_format]. + Markdown.serializer (Callable[xml.etree.ElementTree.Element]): The serializer set by + [`set_output_format`][markdown.Markdown.set_output_format]. + Markdown.preprocessors (util.Registry): A collection of [`preprocessors`][markdown.preprocessors]. + Markdown.parser (blockparser.BlockParser): A collection of [`blockprocessors`][markdown.blockprocessors]. + Markdown.inlinePatterns (util.Registry): A collection of [`inlinepatterns`][markdown.inlinepatterns]. + Markdown.treeprocessors (util.Registry): A collection of [`treeprocessors`][markdown.treeprocessors]. + Markdown.postprocessors (util.Registry): A collection of [`postprocessors`][markdown.postprocessors]. + + """ + + doc_tag = "div" # Element used to wrap document - later removed + + output_formats: ClassVar[dict[str, Callable[[Element], str]]] = { + 'html': to_html_string, + 'xhtml': to_xhtml_string, + } + """ + A mapping of known output formats by name and their respective serializers. Each serializer must be a + callable which accepts an [`Element`][xml.etree.ElementTree.Element] and returns a `str`. + """ + + def __init__(self, **kwargs): + """ + Creates a new Markdown instance. + + Keyword Arguments: + extensions (list[Extension | str]): A list of extensions. + + If an item is an instance of a subclass of [`markdown.extensions.Extension`][], + the instance will be used as-is. If an item is of type `str`, it is passed + to [`build_extension`][markdown.Markdown.build_extension] with its corresponding + `extension_configs` and the returned instance of [`markdown.extensions.Extension`][] + is used. + extension_configs (dict[str, dict[str, Any]]): Configuration settings for extensions. + output_format (str): Format of output. Supported formats are: + + * `xhtml`: Outputs XHTML style tags. Default. + * `html`: Outputs HTML style tags. + tab_length (int): Length of tabs in the source. Default: `4` + + """ + + self.tab_length: int = kwargs.get('tab_length', 4) + + self.ESCAPED_CHARS: list[str] = [ + '\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '#', '+', '-', '.', '!' + ] + """ List of characters which get the backslash escape treatment. """ + + self.block_level_elements: list[str] = BLOCK_LEVEL_ELEMENTS.copy() + + self.registeredExtensions: list[Extension] = [] + self.docType = "" # TODO: Maybe delete this. It does not appear to be used anymore. + self.stripTopLevelTags: bool = True + + self.build_parser() + + self.references: dict[str, tuple[str, str]] = {} + self.htmlStash: util.HtmlStash = util.HtmlStash() + self.registerExtensions(extensions=kwargs.get('extensions', []), + configs=kwargs.get('extension_configs', {})) + self.set_output_format(kwargs.get('output_format', 'xhtml')) + self.reset() + + def build_parser(self) -> Markdown: + """ + Build the parser from the various parts. + + Assigns a value to each of the following attributes on the class instance: + + * **`Markdown.preprocessors`** ([`Registry`][markdown.util.Registry]) -- A collection of + [`preprocessors`][markdown.preprocessors]. + * **`Markdown.parser`** ([`BlockParser`][markdown.blockparser.BlockParser]) -- A collection of + [`blockprocessors`][markdown.blockprocessors]. + * **`Markdown.inlinePatterns`** ([`Registry`][markdown.util.Registry]) -- A collection of + [`inlinepatterns`][markdown.inlinepatterns]. + * **`Markdown.treeprocessors`** ([`Registry`][markdown.util.Registry]) -- A collection of + [`treeprocessors`][markdown.treeprocessors]. + * **`Markdown.postprocessors`** ([`Registry`][markdown.util.Registry]) -- A collection of + [`postprocessors`][markdown.postprocessors]. + + This method could be redefined in a subclass to build a custom parser which is made up of a different + combination of processors and patterns. + + """ + self.preprocessors = build_preprocessors(self) + self.parser = build_block_parser(self) + self.inlinePatterns = build_inlinepatterns(self) + self.treeprocessors = build_treeprocessors(self) + self.postprocessors = build_postprocessors(self) + return self + + def registerExtensions( + self, + extensions: Sequence[Extension | str], + configs: Mapping[str, dict[str, Any]] + ) -> Markdown: + """ + Load a list of extensions into an instance of the `Markdown` class. + + Arguments: + extensions (list[Extension | str]): A list of extensions. + + If an item is an instance of a subclass of [`markdown.extensions.Extension`][], + the instance will be used as-is. If an item is of type `str`, it is passed + to [`build_extension`][markdown.Markdown.build_extension] with its corresponding `configs` and the + returned instance of [`markdown.extensions.Extension`][] is used. + configs (dict[str, dict[str, Any]]): Configuration settings for extensions. + + """ + for ext in extensions: + if isinstance(ext, str): + ext = self.build_extension(ext, configs.get(ext, {})) + if isinstance(ext, Extension): + ext.extendMarkdown(self) + logger.debug( + 'Successfully loaded extension "%s.%s".' + % (ext.__class__.__module__, ext.__class__.__name__) + ) + elif ext is not None: + raise TypeError( + 'Extension "{}.{}" must be of type: "{}.{}"'.format( + ext.__class__.__module__, ext.__class__.__name__, + Extension.__module__, Extension.__name__ + ) + ) + return self + + def build_extension(self, ext_name: str, configs: Mapping[str, Any]) -> Extension: + """ + Build extension from a string name, then return an instance using the given `configs`. + + Arguments: + ext_name: Name of extension as a string. + configs: Configuration settings for extension. + + Returns: + An instance of the extension with the given configuration settings. + + First attempt to load an entry point. The string name must be registered as an entry point in the + `markdown.extensions` group which points to a subclass of the [`markdown.extensions.Extension`][] class. + If multiple distributions have registered the same name, the first one found is returned. + + If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and + return an instance. If no class is specified, import the module and call a `makeExtension` function and return + the [`markdown.extensions.Extension`][] instance returned by that function. + """ + configs = dict(configs) + + entry_points = [ep for ep in util.get_installed_extensions() if ep.name == ext_name] + if entry_points: + ext = entry_points[0].load() + return ext(**configs) + + # Get class name (if provided): `path.to.module:ClassName` + ext_name, class_name = ext_name.split(':', 1) if ':' in ext_name else (ext_name, '') + + try: + module = importlib.import_module(ext_name) + logger.debug( + 'Successfully imported extension module "%s".' % ext_name + ) + except ImportError as e: + message = 'Failed loading extension "%s".' % ext_name + e.args = (message,) + e.args[1:] + raise + + if class_name: + # Load given class name from module. + return getattr(module, class_name)(**configs) + else: + # Expect `makeExtension()` function to return a class. + try: + return module.makeExtension(**configs) + except AttributeError as e: + message = e.args[0] + message = "Failed to initiate extension " \ + "'%s': %s" % (ext_name, message) + e.args = (message,) + e.args[1:] + raise + + def registerExtension(self, extension: Extension) -> Markdown: + """ + Register an extension as having a resettable state. + + Arguments: + extension: An instance of the extension to register. + + This should get called once by an extension during setup. A "registered" extension's + `reset` method is called by [`Markdown.reset()`][markdown.Markdown.reset]. Not all extensions have or need a + resettable state, and so it should not be assumed that all extensions are "registered." + + """ + self.registeredExtensions.append(extension) + return self + + def reset(self) -> Markdown: + """ + Resets all state variables to prepare the parser instance for new input. + + Called once upon creation of a class instance. Should be called manually between calls + to [`Markdown.convert`][markdown.Markdown.convert]. + """ + self.htmlStash.reset() + self.references.clear() + + for extension in self.registeredExtensions: + if hasattr(extension, 'reset'): + extension.reset() + + return self + + def set_output_format(self, format: str) -> Markdown: + """ + Set the output format for the class instance. + + Arguments: + format: Must be a known value in `Markdown.output_formats`. + + """ + self.output_format = format.lower().rstrip('145') # ignore number + try: + self.serializer = self.output_formats[self.output_format] + except KeyError as e: + valid_formats = list(self.output_formats.keys()) + valid_formats.sort() + message = 'Invalid Output Format: "%s". Use one of %s.' \ + % (self.output_format, + '"' + '", "'.join(valid_formats) + '"') + e.args = (message,) + e.args[1:] + raise + return self + + # Note: the `tag` argument is type annotated `Any` as ElementTree uses many various objects as tags. + # As there is no standardization in ElementTree, the type of a given tag is unpredictable. + def is_block_level(self, tag: Any) -> bool: + """ + Check if the given `tag` is a block level HTML tag. + + Returns `True` for any string listed in `Markdown.block_level_elements`. A `tag` which is + not a string always returns `False`. + + """ + if isinstance(tag, str): + return tag.lower().rstrip('/') in self.block_level_elements + # Some ElementTree tags are not strings, so return False. + return False + + def convert(self, source: str) -> str: + """ + Convert a Markdown string to a string in the specified output format. + + Arguments: + source: Markdown formatted text as Unicode or ASCII string. + + Returns: + A string in the specified output format. + + Markdown parsing takes place in five steps: + + 1. A bunch of [`preprocessors`][markdown.preprocessors] munge the input text. + 2. A [`BlockParser`][markdown.blockparser.BlockParser] parses the high-level structural elements of the + pre-processed text into an [`ElementTree`][xml.etree.ElementTree.ElementTree] object. + 3. A bunch of [`treeprocessors`][markdown.treeprocessors] are run against the + [`ElementTree`][xml.etree.ElementTree.ElementTree] object. One such `treeprocessor` + ([`markdown.treeprocessors.InlineProcessor`][]) runs [`inlinepatterns`][markdown.inlinepatterns] + against the [`ElementTree`][xml.etree.ElementTree.ElementTree] object, parsing inline markup. + 4. Some [`postprocessors`][markdown.postprocessors] are run against the text after the + [`ElementTree`][xml.etree.ElementTree.ElementTree] object has been serialized into text. + 5. The output is returned as a string. + + """ + + # Fix up the source text + if not source.strip(): + return '' # a blank Unicode string + + try: + source = str(source) + except UnicodeDecodeError as e: # pragma: no cover + # Customize error message while maintaining original traceback + e.reason += '. -- Note: Markdown only accepts Unicode input!' + raise + + # Split into lines and run the line preprocessors. + self.lines = source.split("\n") + for prep in self.preprocessors: + self.lines = prep.run(self.lines) + + # Parse the high-level elements. + root = self.parser.parseDocument(self.lines).getroot() + + # Run the tree-processors + for treeprocessor in self.treeprocessors: + newRoot = treeprocessor.run(root) + if newRoot is not None: + root = newRoot + + # Serialize _properly_. Strip top-level tags. + output = self.serializer(root) + if self.stripTopLevelTags: + try: + start = output.index( + '<%s>' % self.doc_tag) + len(self.doc_tag) + 2 + end = output.rindex('' % self.doc_tag) + output = output[start:end].strip() + except ValueError as e: # pragma: no cover + if output.strip().endswith('<%s />' % self.doc_tag): + # We have an empty document + output = '' + else: + # We have a serious problem + raise ValueError('Markdown failed to strip top-level ' + 'tags. Document=%r' % output.strip()) from e + + # Run the text post-processors + for pp in self.postprocessors: + output = pp.run(output) + + return output.strip() + + def convertFile( + self, + input: str | BinaryIO | None = None, + output: str | BinaryIO | None = None, + encoding: str | None = None, + ) -> Markdown: + """ + Converts a Markdown file and returns the HTML as a Unicode string. + + Decodes the file using the provided encoding (defaults to `utf-8`), + passes the file content to markdown, and outputs the HTML to either + the provided stream or the file with provided name, using the same + encoding as the source file. The + [`xmlcharrefreplace`](https://docs.python.org/3/library/codecs.html#error-handlers) + error handler is used when encoding the output. + + **Note:** This is the only place that decoding and encoding of Unicode + takes place in Python-Markdown. (All other code is Unicode-in / + Unicode-out.) + + Arguments: + input: File object or path. Reads from `stdin` if `None`. + output: File object or path. Writes to `stdout` if `None`. + encoding: Encoding of input and output files. Defaults to `utf-8`. + + """ + + encoding = encoding or "utf-8" + + # Read the source + if input: + if isinstance(input, str): + input_file = open(input, mode="r", encoding=encoding) + else: + input_file = codecs.getreader(encoding)(input) + text = input_file.read() + input_file.close() + else: + text = sys.stdin.read() + + text = text.lstrip('\ufeff') # remove the byte-order mark + + # Convert + html = self.convert(text) + + # Write to file or stdout + if output: + if isinstance(output, str): + output_file = codecs.open(output, "w", + encoding=encoding, + errors="xmlcharrefreplace") + output_file.write(html) + output_file.close() + else: + writer = codecs.getwriter(encoding) + output_file = writer(output, errors="xmlcharrefreplace") + output_file.write(html) + # Don't close here. User may want to write more. + else: + # Encode manually and write bytes to stdout. + html = html.encode(encoding, "xmlcharrefreplace") + sys.stdout.buffer.write(html) + + return self + + +""" +EXPORTED FUNCTIONS +============================================================================= + +Those are the two functions we really mean to export: `markdown()` and +`markdownFromFile()`. +""" + + +def markdown(text: str, **kwargs: Any) -> str: + """ + Convert a markdown string to HTML and return HTML as a Unicode string. + + This is a shortcut function for [`Markdown`][markdown.Markdown] class to cover the most + basic use case. It initializes an instance of [`Markdown`][markdown.Markdown], loads the + necessary extensions and runs the parser on the given text. + + Arguments: + text: Markdown formatted text as Unicode or ASCII string. + + Keyword arguments: + **kwargs: Any arguments accepted by the Markdown class. + + Returns: + A string in the specified output format. + + """ + md = Markdown(**kwargs) + return md.convert(text) + + +def markdownFromFile(**kwargs: Any): + """ + Read Markdown text from a file and write output to a file or a stream. + + This is a shortcut function which initializes an instance of [`Markdown`][markdown.Markdown], + and calls the [`convertFile`][markdown.Markdown.convertFile] method rather than + [`convert`][markdown.Markdown.convert]. + + Keyword arguments: + input (str | BinaryIO): A file name or readable object. + output (str | BinaryIO): A file name or writable object. + encoding (str): Encoding of input and output. + **kwargs: Any arguments accepted by the `Markdown` class. + + """ + md = Markdown(**kwargs) + md.convertFile(kwargs.get('input', None), + kwargs.get('output', None), + kwargs.get('encoding', None)) diff --git a/py311/lib/python3.11/site-packages/markdown/htmlparser.py b/py311/lib/python3.11/site-packages/markdown/htmlparser.py new file mode 100644 index 0000000000000000000000000000000000000000..658cd37e01d77a90355b7447695bf19f2079dab3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/htmlparser.py @@ -0,0 +1,413 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +""" +This module imports a copy of [`html.parser.HTMLParser`][] and modifies it heavily through monkey-patches. +A copy is imported rather than the module being directly imported as this ensures that the user can import +and use the unmodified library for their own needs. +""" + +from __future__ import annotations + +import re +import importlib.util +import sys +from typing import TYPE_CHECKING, Sequence + +if TYPE_CHECKING: # pragma: no cover + from markdown import Markdown + +# Included for versions which do not have current comment fix +commentclose = re.compile(r'--!?>') +commentabruptclose = re.compile(r'-?>') + +# Import a copy of the html.parser lib as `htmlparser` so we can monkeypatch it. +# Users can still do `from html import parser` and get the default behavior. +spec = importlib.util.find_spec('html.parser') +htmlparser = importlib.util.module_from_spec(spec) +spec.loader.exec_module(htmlparser) +sys.modules['htmlparser'] = htmlparser + +# This is a hack. We are sneaking in `` so we can capture it without the HTML parser +# throwing it away. When we see it, we will process it as data. +htmlparser.starttagopen = re.compile('<[a-zA-Z]|') + +# Monkeypatch `HTMLParser` to only accept `?>` to close Processing Instructions. +htmlparser.piclose = re.compile(r'\?>') +# Monkeypatch `HTMLParser` to only recognize entity references with a closing semicolon. +htmlparser.entityref = re.compile(r'&([a-zA-Z][-.a-zA-Z0-9]*);') +# Monkeypatch `HTMLParser` to no longer support partial entities. We are always feeding a complete block, +# so the 'incomplete' functionality is unnecessary. As the `entityref` regex is run right before incomplete, +# and the two regex are the same, then incomplete will simply never match and we avoid the logic within. +htmlparser.incomplete = htmlparser.entityref +# Monkeypatch `HTMLParser` to not accept a backtick in a tag name, attribute name, or bare value. +htmlparser.locatestarttagend_tolerant = re.compile(r""" + <[a-zA-Z][^`\t\n\r\f />\x00]* # tag name <= added backtick here + (?:[\s/]* # optional whitespace before attribute name + (?:(?<=['"\s/])[^`\s/>][^\s/=>]* # attribute name <= added backtick here + (?:\s*=+\s* # value indicator + (?:'[^']*' # LITA-enclosed value + |"[^"]*" # LIT-enclosed value + |(?!['"])[^`>\s]* # bare value <= added backtick here + ) + (?:\s*,)* # possibly followed by a comma + )?(?:\s|/(?!>))* + )* + )? + \s* # trailing whitespace +""", re.VERBOSE) +htmlparser.locatetagend = re.compile(r""" + [a-zA-Z][^`\t\n\r\f />]* # tag name + [\t\n\r\f /]* # optional whitespace before attribute name + (?:(?<=['"\t\n\r\f /])[^`\t\n\r\f />][^\t\n\r\f /=>]* # attribute name + (?:= # value indicator + (?:'[^']*' # LITA-enclosed value + |"[^"]*" # LIT-enclosed value + |(?!['"])[^>\t\n\r\f ]* # bare value + ) + )? + [\t\n\r\f /]* # possibly followed by a space + )* + >? +""", re.VERBOSE) + +# Match a blank line at the start of a block of text (two newlines). +# The newlines may be preceded by additional whitespace. +blank_line_re = re.compile(r'^([ ]*\n){2}') + + +class HTMLExtractor(htmlparser.HTMLParser): + """ + Extract raw HTML from text. + + The raw HTML is stored in the [`htmlStash`][markdown.util.HtmlStash] of the + [`Markdown`][markdown.Markdown] instance passed to `md` and the remaining text + is stored in `cleandoc` as a list of strings. + """ + + def __init__(self, md: Markdown, *args, **kwargs): + if 'convert_charrefs' not in kwargs: + kwargs['convert_charrefs'] = False + + # Block tags that should contain no content (self closing) + self.empty_tags = set(['hr']) + + self.lineno_start_cache = [0] + + self.override_comment_update = False + + # This calls self.reset + super().__init__(*args, **kwargs) + self.md = md + + def reset(self): + """Reset this instance. Loses all unprocessed data.""" + self.inraw = False + self.intail = False + self.stack: list[str] = [] # When `inraw==True`, stack contains a list of tags + self._cache: list[str] = [] + self.cleandoc: list[str] = [] + self.lineno_start_cache = [0] + + super().reset() + + def close(self): + """Handle any buffered data.""" + super().close() + if len(self.rawdata): + # Temp fix for https://bugs.python.org/issue41989 + # TODO: remove this when the bug is fixed in all supported Python versions. + if self.convert_charrefs and not self.cdata_elem: # pragma: no cover + self.handle_data(htmlparser.unescape(self.rawdata)) + else: + self.handle_data(self.rawdata) + # Handle any unclosed tags. + if len(self._cache): + self.cleandoc.append(self.md.htmlStash.store(''.join(self._cache))) + self._cache = [] + + @property + def line_offset(self) -> int: + """Returns char index in `self.rawdata` for the start of the current line. """ + for ii in range(len(self.lineno_start_cache)-1, self.lineno-1): + last_line_start_pos = self.lineno_start_cache[ii] + lf_pos = self.rawdata.find('\n', last_line_start_pos) + if lf_pos == -1: + # No more newlines found. Use end of raw data as start of line beyond end. + lf_pos = len(self.rawdata) + self.lineno_start_cache.append(lf_pos+1) + + return self.lineno_start_cache[self.lineno-1] + + def at_line_start(self) -> bool: + """ + Returns True if current position is at start of line. + + Allows for up to three blank spaces at start of line. + """ + if self.offset == 0: + return True + if self.offset > 3: + return False + # Confirm up to first 3 chars are whitespace + return self.rawdata[self.line_offset:self.line_offset + self.offset].strip() == '' + + def get_endtag_text(self, tag: str) -> str: + """ + Returns the text of the end tag. + + If it fails to extract the actual text from the raw data, it builds a closing tag with `tag`. + """ + # Attempt to extract actual tag from raw source text + start = self.line_offset + self.offset + m = htmlparser.endendtag.search(self.rawdata, start) + if m: + return self.rawdata[start:m.end()] + else: # pragma: no cover + # Failed to extract from raw data. Assume well formed and lowercase. + return ''.format(tag) + + def handle_starttag(self, tag: str, attrs: Sequence[tuple[str, str]]): + # Handle tags that should always be empty and do not specify a closing tag + if tag in self.empty_tags: + self.handle_startendtag(tag, attrs) + return + + if self.md.is_block_level(tag) and (self.intail or (self.at_line_start() and not self.inraw)): + # Started a new raw block. Prepare stack. + self.inraw = True + self.cleandoc.append('\n') + + text = self.get_starttag_text() + if self.inraw: + self.stack.append(tag) + self._cache.append(text) + else: + self.cleandoc.append(text) + if tag in self.CDATA_CONTENT_ELEMENTS: + # This is presumably a standalone tag in a code span (see #1036). + self.clear_cdata_mode() + + def handle_endtag(self, tag: str): + text = self.get_endtag_text(tag) + + if self.inraw: + self._cache.append(text) + if tag in self.stack: + # Remove tag from stack + while self.stack: + if self.stack.pop() == tag: + break + if len(self.stack) == 0: + # End of raw block. + if blank_line_re.match(self.rawdata[self.line_offset + self.offset + len(text):]): + # Preserve blank line and end of raw block. + self._cache.append('\n') + else: + # More content exists after `endtag`. + self.intail = True + # Reset stack. + self.inraw = False + self.cleandoc.append(self.md.htmlStash.store(''.join(self._cache))) + # Insert blank line between this and next line. + self.cleandoc.append('\n\n') + self._cache = [] + else: + self.cleandoc.append(text) + + def handle_data(self, data: str): + if self.intail and '\n' in data: + self.intail = False + if self.inraw: + self._cache.append(data) + else: + self.cleandoc.append(data) + + def handle_empty_tag(self, data: str, is_block: bool): + """ Handle empty tags (``). """ + if self.inraw or self.intail: + # Append this to the existing raw block + self._cache.append(data) + elif self.at_line_start() and is_block: + # Handle this as a standalone raw block + if blank_line_re.match(self.rawdata[self.line_offset + self.offset + len(data):]): + # Preserve blank line after tag in raw block. + data += '\n' + else: + # More content exists after tag. + self.intail = True + item = self.cleandoc[-1] if self.cleandoc else '' + # If we only have one newline before block element, add another + if not item.endswith('\n\n') and item.endswith('\n'): + self.cleandoc.append('\n') + self.cleandoc.append(self.md.htmlStash.store(data)) + # Insert blank line between this and next line. + self.cleandoc.append('\n\n') + else: + self.cleandoc.append(data) + + def handle_startendtag(self, tag: str, attrs): + self.handle_empty_tag(self.get_starttag_text(), is_block=self.md.is_block_level(tag)) + + def handle_charref(self, name: str): + self.handle_empty_tag('&#{};'.format(name), is_block=False) + + def handle_entityref(self, name: str): + self.handle_empty_tag('&{};'.format(name), is_block=False) + + def handle_comment(self, data: str): + # Check if the comment is unclosed, if so, we need to override position + i = self.line_offset + self.offset + len(data) + 4 + if self.rawdata[i:i + 3] != '-->': + self.handle_data('<') + self.override_comment_update = True + return + self.handle_empty_tag(''.format(data), is_block=True) + + def updatepos(self, i: int, j: int) -> int: + if self.override_comment_update: + self.override_comment_update = False + i = 0 + j = 1 + return super().updatepos(i, j) + + def handle_decl(self, data: str): + self.handle_empty_tag(''.format(data), is_block=True) + + def handle_pi(self, data: str): + self.handle_empty_tag(''.format(data), is_block=True) + + def unknown_decl(self, data: str): + end = ']]>' if data.startswith('CDATA[') else ']>' + self.handle_empty_tag(' int: + if self.at_line_start() or self.intail: + return super().parse_pi(i) + # This is not the beginning of a raw block so treat as plain data + # and avoid consuming any tags which may follow (see #1066). + self.handle_data(' int: + if self.at_line_start() or self.intail: + if self.rawdata[i:i+3] == ' int: + # Override the default behavior so that bogus comments get passed + # through unaltered by setting `report` to `0` (see #1425). + pos = super().parse_bogus_comment(i, report) + if pos == -1: # pragma: no cover + return -1 + self.handle_empty_tag(self.rawdata[i:pos], is_block=False) + return pos + + # The rest has been copied from base class in standard lib to address #1036. + # As `__startag_text` is private, all references to it must be in this subclass. + # The last few lines of `parse_starttag` are reversed so that `handle_starttag` + # can override `cdata_mode` in certain situations (in a code span). + __starttag_text: str | None = None + + def get_starttag_text(self) -> str: + """Return full source of start tag: `<...>`.""" + return self.__starttag_text + + def parse_starttag(self, i: int) -> int: # pragma: no cover + # Treat `` as normal data as it is not a real tag. + if self.rawdata[i:i + 3] == '': + self.handle_data(self.rawdata[i:i + 3]) + return i + 3 + + self.__starttag_text = None + endpos = self.check_for_whole_start_tag(i) + if endpos < 0: + self.handle_data(self.rawdata[i:i + 1]) + return i + 1 + rawdata = self.rawdata + self.__starttag_text = rawdata[i:endpos] + + # Now parse the data between `i+1` and `j` into a tag and `attrs` + attrs = [] + match = htmlparser.tagfind_tolerant.match(rawdata, i+1) + assert match, 'unexpected call to parse_starttag()' + k = match.end() + self.lasttag = tag = match.group(1).lower() + while k < endpos: + m = htmlparser.attrfind_tolerant.match(rawdata, k) + if not m: + break + attrname, rest, attrvalue = m.group(1, 2, 3) + if not rest: + attrvalue = None + elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ + attrvalue[:1] == '"' == attrvalue[-1:]: # noqa: E127 + attrvalue = attrvalue[1:-1] + if attrvalue: + attrvalue = htmlparser.unescape(attrvalue) + attrs.append((attrname.lower(), attrvalue)) + k = m.end() + + end = rawdata[k:endpos].strip() + if end not in (">", "/>"): + lineno, offset = self.getpos() + if "\n" in self.__starttag_text: + lineno = lineno + self.__starttag_text.count("\n") + offset = len(self.__starttag_text) \ + - self.__starttag_text.rfind("\n") # noqa: E127 + else: + offset = offset + len(self.__starttag_text) + self.handle_data(rawdata[i:endpos]) + return endpos + if end.endswith('/>'): + # XHTML-style empty tag: `
    ` + self.handle_startendtag(tag, attrs) + else: + # *** set `cdata_mode` first so we can override it in `handle_starttag` (see #1036) *** + if tag in self.CDATA_CONTENT_ELEMENTS: + self.set_cdata_mode(tag) + self.handle_starttag(tag, attrs) + return endpos diff --git a/py311/lib/python3.11/site-packages/markdown/inlinepatterns.py b/py311/lib/python3.11/site-packages/markdown/inlinepatterns.py new file mode 100644 index 0000000000000000000000000000000000000000..13b3c35f8dd5a6e24ce89174c9a1e395e92b1017 --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/inlinepatterns.py @@ -0,0 +1,995 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +""" +In version 3.0, a new, more flexible inline processor was added, [`markdown.inlinepatterns.InlineProcessor`][]. The +original inline patterns, which inherit from [`markdown.inlinepatterns.Pattern`][] or one of its children are still +supported, though users are encouraged to migrate. + +The new `InlineProcessor` provides two major enhancements to `Patterns`: + +1. Inline Processors no longer need to match the entire block, so regular expressions no longer need to start with + `r'^(.*?)'` and end with `r'(.*?)%'`. This runs faster. The returned [`Match`][re.Match] object will only contain + what is explicitly matched in the pattern, and extension pattern groups now start with `m.group(1)`. + +2. The `handleMatch` method now takes an additional input called `data`, which is the entire block under analysis, + not just what is matched with the specified pattern. The method now returns the element *and* the indexes relative + to `data` that the return element is replacing (usually `m.start(0)` and `m.end(0)`). If the boundaries are + returned as `None`, it is assumed that the match did not take place, and nothing will be altered in `data`. + + This allows handling of more complex constructs than regular expressions can handle, e.g., matching nested + brackets, and explicit control of the span "consumed" by the processor. + +""" + +from __future__ import annotations + +from . import util +from typing import TYPE_CHECKING, Any, Collection, NamedTuple +import re +import xml.etree.ElementTree as etree +from html import entities + +if TYPE_CHECKING: # pragma: no cover + from markdown import Markdown + + +def build_inlinepatterns(md: Markdown, **kwargs: Any) -> util.Registry[InlineProcessor]: + """ + Build the default set of inline patterns for Markdown. + + The order in which processors and/or patterns are applied is very important - e.g. if we first replace + `http://.../` links with `` tags and _then_ try to replace inline HTML, we would end up with a mess. So, we + apply the expressions in the following order: + + * backticks and escaped characters have to be handled before everything else so that we can preempt any markdown + patterns by escaping them; + + * then we handle the various types of links (auto-links must be handled before inline HTML); + + * then we handle inline HTML. At this point we will simply replace all inline HTML strings with a placeholder + and add the actual HTML to a stash; + + * finally we apply strong, emphasis, etc. + + """ + inlinePatterns = util.Registry() + inlinePatterns.register(BacktickInlineProcessor(BACKTICK_RE), 'backtick', 190) + inlinePatterns.register(EscapeInlineProcessor(ESCAPE_RE, md), 'escape', 180) + inlinePatterns.register(ReferenceInlineProcessor(REFERENCE_RE, md), 'reference', 170) + inlinePatterns.register(LinkInlineProcessor(LINK_RE, md), 'link', 160) + inlinePatterns.register(ImageInlineProcessor(IMAGE_LINK_RE, md), 'image_link', 150) + inlinePatterns.register( + ImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'image_reference', 140 + ) + inlinePatterns.register( + ShortReferenceInlineProcessor(REFERENCE_RE, md), 'short_reference', 130 + ) + inlinePatterns.register( + ShortImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'short_image_ref', 125 + ) + inlinePatterns.register(AutolinkInlineProcessor(AUTOLINK_RE, md), 'autolink', 120) + inlinePatterns.register(AutomailInlineProcessor(AUTOMAIL_RE, md), 'automail', 110) + inlinePatterns.register(SubstituteTagInlineProcessor(LINE_BREAK_RE, 'br'), 'linebreak', 100) + inlinePatterns.register(HtmlInlineProcessor(HTML_RE, md), 'html', 90) + inlinePatterns.register(HtmlInlineProcessor(ENTITY_RE, md), 'entity', 80) + inlinePatterns.register(SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 70) + inlinePatterns.register(AsteriskProcessor(r'\*'), 'em_strong', 60) + inlinePatterns.register(UnderscoreProcessor(r'_'), 'em_strong2', 50) + return inlinePatterns + + +# The actual regular expressions for patterns +# ----------------------------------------------------------------------------- + +NOIMG = r'(?)` or `[text](url "title")`). """ + +IMAGE_LINK_RE = r'\!\[' +""" Match start of in-line image link (`![alttxt](url)` or `![alttxt]()`). """ + +REFERENCE_RE = LINK_RE +""" Match start of reference link (`[Label][3]`). """ + +IMAGE_REFERENCE_RE = IMAGE_LINK_RE +""" Match start of image reference (`![alt text][2]`). """ + +NOT_STRONG_RE = r'((^|(?<=\s))(\*{1,3}|_{1,3})(?=\s|$))' +""" Match a stand-alone `*` or `_`. """ + +AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^<>]*)>' +""" Match an automatic link (``). """ + +AUTOMAIL_RE = r'<([^<> !]+@[^@<> ]+)>' +""" Match an automatic email link (``). """ + +HTML_RE = ( + r'(<(\/?[a-zA-Z][^<>@ ]*( [^<>]*)?|' # Tag + r'!--(?:(?!).)*--|' # Comment + r'[?](?:(?!<[?]|[?]>).)*[?]|' # Processing instruction + r'!\[CDATA\[(?:(?!).)*\]\]' # `CDATA` + ')>)' +) +""" Match an HTML tag (`<...>`). """ + +ENTITY_RE = r'(&(?:\#[0-9]+|\#x[0-9a-fA-F]+|[a-zA-Z0-9]+);)' +""" Match an HTML entity (`&` (decimal) or `&` (hex) or `&` (named)). """ + +LINE_BREAK_RE = r' \n' +""" Match two spaces at end of line. """ + + +def dequote(string: str) -> str: + """Remove quotes from around a string.""" + if ((string.startswith('"') and string.endswith('"')) or + (string.startswith("'") and string.endswith("'"))): + return string[1:-1] + else: + return string + + +class EmStrongItem(NamedTuple): + """Emphasis/strong pattern item.""" + pattern: re.Pattern[str] + builder: str + tags: str + + +# The pattern classes +# ----------------------------------------------------------------------------- + + +class Pattern: # pragma: no cover + """ + Base class that inline patterns subclass. + + Inline patterns are handled by means of `Pattern` subclasses, one per regular expression. + Each pattern object uses a single regular expression and must support the following methods: + [`getCompiledRegExp`][markdown.inlinepatterns.Pattern.getCompiledRegExp] and + [`handleMatch`][markdown.inlinepatterns.Pattern.handleMatch]. + + All the regular expressions used by `Pattern` subclasses must capture the whole block. For this + reason, they all start with `^(.*)` and end with `(.*)!`. When passing a regular expression on + class initialization, the `^(.*)` and `(.*)!` are added automatically and the regular expression + is pre-compiled. + + It is strongly suggested that the newer style [`markdown.inlinepatterns.InlineProcessor`][] that + use a more efficient and flexible search approach be used instead. However, the older style + `Pattern` remains for backward compatibility with many existing third-party extensions. + + """ + + ANCESTOR_EXCLUDES: Collection[str] = tuple() + """ + A collection of elements which are undesirable ancestors. The processor will be skipped if it + would cause the content to be a descendant of one of the listed tag names. + """ + + compiled_re: re.Pattern[str] + md: Markdown | None + + def __init__(self, pattern: str, md: Markdown | None = None): + """ + Create an instant of an inline pattern. + + Arguments: + pattern: A regular expression that matches a pattern. + md: An optional pointer to the instance of `markdown.Markdown` and is available as + `self.md` on the class instance. + + + """ + self.pattern = pattern + self.compiled_re = re.compile(r"^(.*?)%s(.*)$" % pattern, + re.DOTALL | re.UNICODE) + + self.md = md + + def getCompiledRegExp(self) -> re.Pattern: + """ Return a compiled regular expression. """ + return self.compiled_re + + def handleMatch(self, m: re.Match[str]) -> etree.Element | str: + """Return a ElementTree element from the given match. + + Subclasses should override this method. + + Arguments: + m: A match object containing a match of the pattern. + + Returns: An ElementTree Element object. + + """ + pass # pragma: no cover + + def type(self) -> str: + """ Return class name, to define pattern type """ + return self.__class__.__name__ + + def unescape(self, text: str) -> str: + """ Return unescaped text given text with an inline placeholder. """ + try: + stash = self.md.treeprocessors['inline'].stashed_nodes + except KeyError: # pragma: no cover + return text + + def get_stash(m): + id = m.group(1) + if id in stash: + value = stash.get(id) + if isinstance(value, str): + return value + else: + # An `etree` Element - return text content only + return ''.join(value.itertext()) + return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) + + +class InlineProcessor(Pattern): + """ + Base class that inline processors subclass. + + This is the newer style inline processor that uses a more + efficient and flexible search approach. + + """ + + def __init__(self, pattern: str, md: Markdown | None = None): + """ + Create an instant of an inline processor. + + Arguments: + pattern: A regular expression that matches a pattern. + md: An optional pointer to the instance of `markdown.Markdown` and is available as + `self.md` on the class instance. + + """ + self.pattern = pattern + self.compiled_re = re.compile(pattern, re.DOTALL | re.UNICODE) + + # API for Markdown to pass `safe_mode` into instance + self.safe_mode = False + self.md = md + + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | str | None, int | None, int | None]: + """Return a ElementTree element from the given match and the + start and end index of the matched text. + + If `start` and/or `end` are returned as `None`, it will be + assumed that the processor did not find a valid region of text. + + Subclasses should override this method. + + Arguments: + m: A re match object containing a match of the pattern. + data: The buffer currently under analysis. + + Returns: + el: The ElementTree element, text or None. + start: The start of the region that has been matched or None. + end: The end of the region that has been matched or None. + + """ + pass # pragma: no cover + + +class SimpleTextPattern(Pattern): # pragma: no cover + """ Return a simple text of `group(2)` of a Pattern. """ + def handleMatch(self, m: re.Match[str]) -> str: + """ Return string content of `group(2)` of a matching pattern. """ + return m.group(2) + + +class SimpleTextInlineProcessor(InlineProcessor): + """ Return a simple text of `group(1)` of a Pattern. """ + def handleMatch(self, m: re.Match[str], data: str) -> tuple[str, int, int]: + """ Return string content of `group(1)` of a matching pattern. """ + return m.group(1), m.start(0), m.end(0) + + +class EscapeInlineProcessor(InlineProcessor): + """ Return an escaped character. """ + + def handleMatch(self, m: re.Match[str], data: str) -> tuple[str | None, int, int]: + """ + If the character matched by `group(1)` of a pattern is in [`ESCAPED_CHARS`][markdown.Markdown.ESCAPED_CHARS] + then return the integer representing the character's Unicode code point (as returned by [`ord`][]) wrapped + in [`util.STX`][markdown.util.STX] and [`util.ETX`][markdown.util.ETX]. + + If the matched character is not in [`ESCAPED_CHARS`][markdown.Markdown.ESCAPED_CHARS], then return `None`. + """ + + char = m.group(1) + if char in self.md.ESCAPED_CHARS: + return '{}{}{}'.format(util.STX, ord(char), util.ETX), m.start(0), m.end(0) + else: + return None, m.start(0), m.end(0) + + +class SimpleTagPattern(Pattern): # pragma: no cover + """ + Return element of type `tag` with a text attribute of `group(3)` + of a Pattern. + + """ + def __init__(self, pattern: str, tag: str): + """ + Create an instant of an simple tag pattern. + + Arguments: + pattern: A regular expression that matches a pattern. + tag: Tag of element. + + """ + Pattern.__init__(self, pattern) + self.tag = tag + """ The tag of the rendered element. """ + + def handleMatch(self, m: re.Match[str]) -> etree.Element: + """ + Return [`Element`][xml.etree.ElementTree.Element] of type `tag` with the string in `group(3)` of a + matching pattern as the Element's text. + """ + el = etree.Element(self.tag) + el.text = m.group(3) + return el + + +class SimpleTagInlineProcessor(InlineProcessor): + """ + Return element of type `tag` with a text attribute of `group(2)` + of a Pattern. + + """ + def __init__(self, pattern: str, tag: str): + """ + Create an instant of an simple tag processor. + + Arguments: + pattern: A regular expression that matches a pattern. + tag: Tag of element. + + """ + InlineProcessor.__init__(self, pattern) + self.tag = tag + """ The tag of the rendered element. """ + + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]: # pragma: no cover + """ + Return [`Element`][xml.etree.ElementTree.Element] of type `tag` with the string in `group(2)` of a + matching pattern as the Element's text. + """ + el = etree.Element(self.tag) + el.text = m.group(2) + return el, m.start(0), m.end(0) + + +class SubstituteTagPattern(SimpleTagPattern): # pragma: no cover + """ Return an element of type `tag` with no children. """ + def handleMatch(self, m: re.Match[str]) -> etree.Element: + """ Return empty [`Element`][xml.etree.ElementTree.Element] of type `tag`. """ + return etree.Element(self.tag) + + +class SubstituteTagInlineProcessor(SimpleTagInlineProcessor): + """ Return an element of type `tag` with no children. """ + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]: + """ Return empty [`Element`][xml.etree.ElementTree.Element] of type `tag`. """ + return etree.Element(self.tag), m.start(0), m.end(0) + + +class BacktickInlineProcessor(InlineProcessor): + """ Return a `` element containing the escaped matching text. """ + def __init__(self, pattern: str): + InlineProcessor.__init__(self, pattern) + self.ESCAPED_BSLASH = '{}{}{}'.format(util.STX, ord('\\'), util.ETX) + self.tag = 'code' + """ The tag of the rendered element. """ + + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | str, int, int]: + """ + If the match contains `group(3)` of a pattern, then return a `code` + [`Element`][xml.etree.ElementTree.Element] which contains HTML escaped text (with + [`code_escape`][markdown.util.code_escape]) as an [`AtomicString`][markdown.util.AtomicString]. + + If the match does not contain `group(3)` then return the text of `group(1)` backslash escaped. + + """ + if m.group(3): + el = etree.Element(self.tag) + el.text = util.AtomicString(util.code_escape(m.group(3).strip())) + return el, m.start(0), m.end(0) + else: + return m.group(1).replace('\\\\', self.ESCAPED_BSLASH), m.start(0), m.end(0) + + +class DoubleTagPattern(SimpleTagPattern): # pragma: no cover + """Return a ElementTree element nested in tag2 nested in tag1. + + Useful for strong emphasis etc. + + """ + def handleMatch(self, m: re.Match[str]) -> etree.Element: + """ + Return [`Element`][xml.etree.ElementTree.Element] in following format: + `group(3)group(4)` where `group(4)` is optional. + + """ + tag1, tag2 = self.tag.split(",") + el1 = etree.Element(tag1) + el2 = etree.SubElement(el1, tag2) + el2.text = m.group(3) + if len(m.groups()) == 5: + el2.tail = m.group(4) + return el1 + + +class DoubleTagInlineProcessor(SimpleTagInlineProcessor): + """Return a ElementTree element nested in tag2 nested in tag1. + + Useful for strong emphasis etc. + + """ + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]: # pragma: no cover + """ + Return [`Element`][xml.etree.ElementTree.Element] in following format: + `group(2)group(3)` where `group(3)` is optional. + + """ + tag1, tag2 = self.tag.split(",") + el1 = etree.Element(tag1) + el2 = etree.SubElement(el1, tag2) + el2.text = m.group(2) + if len(m.groups()) == 3: + el2.tail = m.group(3) + return el1, m.start(0), m.end(0) + + +class HtmlInlineProcessor(InlineProcessor): + """ Store raw inline html and return a placeholder. """ + def handleMatch(self, m: re.Match[str], data: str) -> tuple[str, int, int]: + """ Store the text of `group(1)` of a pattern and return a placeholder string. """ + rawhtml = self.backslash_unescape(self.unescape(m.group(1))) + place_holder = self.md.htmlStash.store(rawhtml) + return place_holder, m.start(0), m.end(0) + + def unescape(self, text: str) -> str: + """ Return unescaped text given text with an inline placeholder. """ + try: + stash = self.md.treeprocessors['inline'].stashed_nodes + except KeyError: # pragma: no cover + return text + + def get_stash(m: re.Match[str]) -> str: + id = m.group(1) + value = stash.get(id) + if value is not None: + try: + return self.md.serializer(value) + except Exception: + return r'\%s' % value + + return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) + + def backslash_unescape(self, text: str) -> str: + """ Return text with backslash escapes undone (backslashes are restored). """ + try: + RE = self.md.treeprocessors['unescape'].RE + except KeyError: # pragma: no cover + return text + + def _unescape(m: re.Match[str]) -> str: + return chr(int(m.group(1))) + + return RE.sub(_unescape, text) + + +class AsteriskProcessor(InlineProcessor): + """Emphasis processor for handling strong and em matches inside asterisks.""" + + PATTERNS = [ + EmStrongItem(re.compile(EM_STRONG_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'), + EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'), + EmStrongItem(re.compile(STRONG_EM3_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'), + EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'), + EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em') + ] + """ The various strong and emphasis patterns handled by this processor. """ + + def build_single(self, m: re.Match[str], tag: str, idx: int) -> etree.Element: + """Return single tag.""" + el1 = etree.Element(tag) + text = m.group(2) + self.parse_sub_patterns(text, el1, None, idx) + return el1 + + def build_double(self, m: re.Match[str], tags: str, idx: int) -> etree.Element: + """Return double tag.""" + + tag1, tag2 = tags.split(",") + el1 = etree.Element(tag1) + el2 = etree.Element(tag2) + text = m.group(2) + self.parse_sub_patterns(text, el2, None, idx) + el1.append(el2) + if len(m.groups()) == 3: + text = m.group(3) + self.parse_sub_patterns(text, el1, el2, idx) + return el1 + + def build_double2(self, m: re.Match[str], tags: str, idx: int) -> etree.Element: + """Return double tags (variant 2): `text text`.""" + + tag1, tag2 = tags.split(",") + el1 = etree.Element(tag1) + el2 = etree.Element(tag2) + text = m.group(2) + self.parse_sub_patterns(text, el1, None, idx) + text = m.group(3) + el1.append(el2) + self.parse_sub_patterns(text, el2, None, idx) + return el1 + + def parse_sub_patterns( + self, data: str, parent: etree.Element, last: etree.Element | None, idx: int + ) -> None: + """ + Parses sub patterns. + + `data`: text to evaluate. + + `parent`: Parent to attach text and sub elements to. + + `last`: Last appended child to parent. Can also be None if parent has no children. + + `idx`: Current pattern index that was used to evaluate the parent. + """ + + offset = 0 + pos = 0 + + length = len(data) + while pos < length: + # Find the start of potential emphasis or strong tokens + if self.compiled_re.match(data, pos): + matched = False + # See if the we can match an emphasis/strong pattern + for index, item in enumerate(self.PATTERNS): + # Only evaluate patterns that are after what was used on the parent + if index <= idx: + continue + m = item.pattern.match(data, pos) + if m: + # Append child nodes to parent + # Text nodes should be appended to the last + # child if present, and if not, it should + # be added as the parent's text node. + text = data[offset:m.start(0)] + if text: + if last is not None: + last.tail = text + else: + parent.text = text + el = self.build_element(m, item.builder, item.tags, index) + parent.append(el) + last = el + # Move our position past the matched hunk + offset = pos = m.end(0) + matched = True + if not matched: + # We matched nothing, move on to the next character + pos += 1 + else: + # Increment position as no potential emphasis start was found. + pos += 1 + + # Append any leftover text as a text node. + text = data[offset:] + if text: + if last is not None: + last.tail = text + else: + parent.text = text + + def build_element(self, m: re.Match[str], builder: str, tags: str, index: int) -> etree.Element: + """Element builder.""" + + if builder == 'double2': + return self.build_double2(m, tags, index) + elif builder == 'double': + return self.build_double(m, tags, index) + else: + return self.build_single(m, tags, index) + + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]: + """Parse patterns.""" + + el = None + start = None + end = None + + for index, item in enumerate(self.PATTERNS): + m1 = item.pattern.match(data, m.start(0)) + if m1: + start = m1.start(0) + end = m1.end(0) + el = self.build_element(m1, item.builder, item.tags, index) + break + return el, start, end + + +class UnderscoreProcessor(AsteriskProcessor): + """Emphasis processor for handling strong and em matches inside underscores.""" + + PATTERNS = [ + EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'), + EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'), + EmStrongItem(re.compile(SMART_STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'), + EmStrongItem(re.compile(SMART_STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'), + EmStrongItem(re.compile(SMART_EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em') + ] + """ The various strong and emphasis patterns handled by this processor. """ + + +class LinkInlineProcessor(InlineProcessor): + """ Return a link element from the given match. """ + RE_LINK = re.compile(r'''\(\s*(?:(<[^<>]*>)\s*(?:('[^']*'|"[^"]*")\s*)?\))?''', re.DOTALL | re.UNICODE) + RE_TITLE_CLEAN = re.compile(r'\s') + + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]: + """ Return an `a` [`Element`][xml.etree.ElementTree.Element] or `(None, None, None)`. """ + text, index, handled = self.getText(data, m.end(0)) + + if not handled: + return None, None, None + + href, title, index, handled = self.getLink(data, index) + if not handled: + return None, None, None + + el = etree.Element("a") + el.text = text + + el.set("href", href) + + if title is not None: + el.set("title", title) + + return el, m.start(0), index + + def getLink(self, data: str, index: int) -> tuple[str, str | None, int, bool]: + """Parse data between `()` of `[Text]()` allowing recursive `()`. """ + + href = '' + title: str | None = None + handled = False + + m = self.RE_LINK.match(data, pos=index) + if m and m.group(1): + # Matches [Text]( "title") + href = m.group(1)[1:-1].strip() + if m.group(2): + title = m.group(2)[1:-1] + index = m.end(0) + handled = True + elif m: + # Track bracket nesting and index in string + bracket_count = 1 + backtrack_count = 1 + start_index = m.end() + index = start_index + last_bracket = -1 + + # Primary (first found) quote tracking. + quote: str | None = None + start_quote = -1 + exit_quote = -1 + ignore_matches = False + + # Secondary (second found) quote tracking. + alt_quote = None + start_alt_quote = -1 + exit_alt_quote = -1 + + # Track last character + last = '' + + for pos in range(index, len(data)): + c = data[pos] + if c == '(': + # Count nested ( + # Don't increment the bracket count if we are sure we're in a title. + if not ignore_matches: + bracket_count += 1 + elif backtrack_count > 0: + backtrack_count -= 1 + elif c == ')': + # Match nested ) to ( + # Don't decrement if we are sure we are in a title that is unclosed. + if ((exit_quote != -1 and quote == last) or (exit_alt_quote != -1 and alt_quote == last)): + bracket_count = 0 + elif not ignore_matches: + bracket_count -= 1 + elif backtrack_count > 0: + backtrack_count -= 1 + # We've found our backup end location if the title doesn't resolve. + if backtrack_count == 0: + last_bracket = index + 1 + + elif c in ("'", '"'): + # Quote has started + if not quote: + # We'll assume we are now in a title. + # Brackets are quoted, so no need to match them (except for the final one). + ignore_matches = True + backtrack_count = bracket_count + bracket_count = 1 + start_quote = index + 1 + quote = c + # Secondary quote (in case the first doesn't resolve): [text](link'"title") + elif c != quote and not alt_quote: + start_alt_quote = index + 1 + alt_quote = c + # Update primary quote match + elif c == quote: + exit_quote = index + 1 + # Update secondary quote match + elif alt_quote and c == alt_quote: + exit_alt_quote = index + 1 + + index += 1 + + # Link is closed, so let's break out of the loop + if bracket_count == 0: + # Get the title if we closed a title string right before link closed + if exit_quote >= 0 and quote == last: + href = data[start_index:start_quote - 1] + title = ''.join(data[start_quote:exit_quote - 1]) + elif exit_alt_quote >= 0 and alt_quote == last: + href = data[start_index:start_alt_quote - 1] + title = ''.join(data[start_alt_quote:exit_alt_quote - 1]) + else: + href = data[start_index:index - 1] + break + + if c != ' ': + last = c + + # We have a scenario: `[test](link"notitle)` + # When we enter a string, we stop tracking bracket resolution in the main counter, + # but we do keep a backup counter up until we discover where we might resolve all brackets + # if the title string fails to resolve. + if bracket_count != 0 and backtrack_count == 0: + href = data[start_index:last_bracket - 1] + index = last_bracket + bracket_count = 0 + + handled = bracket_count == 0 + + if title is not None: + title = self.RE_TITLE_CLEAN.sub(' ', dequote(self.unescape(title.strip()))) + + href = self.unescape(href).strip() + + return href, title, index, handled + + def getText(self, data: str, index: int) -> tuple[str, int, bool]: + """Parse the content between `[]` of the start of an image or link + resolving nested square brackets. + + """ + bracket_count = 1 + text = [] + for pos in range(index, len(data)): + c = data[pos] + if c == ']': + bracket_count -= 1 + elif c == '[': + bracket_count += 1 + index += 1 + if bracket_count == 0: + break + text.append(c) + return ''.join(text), index, bracket_count == 0 + + +class ImageInlineProcessor(LinkInlineProcessor): + """ Return a `img` element from the given match. """ + + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]: + """ Return an `img` [`Element`][xml.etree.ElementTree.Element] or `(None, None, None)`. """ + text, index, handled = self.getText(data, m.end(0)) + if not handled: + return None, None, None + + src, title, index, handled = self.getLink(data, index) + if not handled: + return None, None, None + + el = etree.Element("img") + + el.set("src", src) + + if title is not None: + el.set("title", title) + + el.set('alt', self.unescape(text)) + return el, m.start(0), index + + +class ReferenceInlineProcessor(LinkInlineProcessor): + """ Match to a stored reference and return link element. """ + NEWLINE_CLEANUP_RE = re.compile(r'\s+', re.MULTILINE) + + RE_LINK = re.compile(r'\s?\[([^\]]*)\]', re.DOTALL | re.UNICODE) + + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]: + """ + Return [`Element`][xml.etree.ElementTree.Element] returned by `makeTag` method or `(None, None, None)`. + + """ + text, index, handled = self.getText(data, m.end(0)) + if not handled: + return None, None, None + + id, end, handled = self.evalId(data, index, text) + if not handled: + return None, None, None + + # Clean up line breaks in id + id = self.NEWLINE_CLEANUP_RE.sub(' ', id) + if id not in self.md.references: # ignore undefined refs + return None, m.start(0), end + + href, title = self.md.references[id] + + return self.makeTag(href, title, text), m.start(0), end + + def evalId(self, data: str, index: int, text: str) -> tuple[str | None, int, bool]: + """ + Evaluate the id portion of `[ref][id]`. + + If `[ref][]` use `[ref]`. + """ + m = self.RE_LINK.match(data, pos=index) + if not m: + return None, index, False + else: + id = m.group(1).lower() + end = m.end(0) + if not id: + id = text.lower() + return id, end, True + + def makeTag(self, href: str, title: str, text: str) -> etree.Element: + """ Return an `a` [`Element`][xml.etree.ElementTree.Element]. """ + el = etree.Element('a') + + el.set('href', href) + if title: + el.set('title', title) + + el.text = text + return el + + +class ShortReferenceInlineProcessor(ReferenceInlineProcessor): + """Short form of reference: `[google]`. """ + def evalId(self, data: str, index: int, text: str) -> tuple[str, int, bool]: + """Evaluate the id of `[ref]`. """ + + return text.lower(), index, True + + +class ImageReferenceInlineProcessor(ReferenceInlineProcessor): + """ Match to a stored reference and return `img` element. """ + def makeTag(self, href: str, title: str, text: str) -> etree.Element: + """ Return an `img` [`Element`][xml.etree.ElementTree.Element]. """ + el = etree.Element("img") + el.set("src", href) + if title: + el.set("title", title) + el.set("alt", self.unescape(text)) + return el + + +class ShortImageReferenceInlineProcessor(ImageReferenceInlineProcessor): + """ Short form of image reference: `![ref]`. """ + def evalId(self, data: str, index: int, text: str) -> tuple[str, int, bool]: + """Evaluate the id of `[ref]`. """ + + return text.lower(), index, True + + +class AutolinkInlineProcessor(InlineProcessor): + """ Return a link Element given an auto-link (``). """ + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]: + """ Return an `a` [`Element`][xml.etree.ElementTree.Element] of `group(1)`. """ + el = etree.Element("a") + el.set('href', self.unescape(m.group(1))) + el.text = util.AtomicString(m.group(1)) + return el, m.start(0), m.end(0) + + +class AutomailInlineProcessor(InlineProcessor): + """ + Return a `mailto` link Element given an auto-mail link (``). + """ + def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]: + """ Return an [`Element`][xml.etree.ElementTree.Element] containing a `mailto` link of `group(1)`. """ + el = etree.Element('a') + email = self.unescape(m.group(1)) + if email.startswith("mailto:"): + email = email[len("mailto:"):] + + def codepoint2name(code: int) -> str: + """Return entity definition by code, or the code if not defined.""" + entity = entities.codepoint2name.get(code) + if entity: + return "{}{};".format(util.AMP_SUBSTITUTE, entity) + else: + return "%s#%d;" % (util.AMP_SUBSTITUTE, code) + + letters = [codepoint2name(ord(letter)) for letter in email] + el.text = util.AtomicString(''.join(letters)) + + mailto = "mailto:" + email + mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' % + ord(letter) for letter in mailto]) + el.set('href', mailto) + return el, m.start(0), m.end(0) diff --git a/py311/lib/python3.11/site-packages/markdown/postprocessors.py b/py311/lib/python3.11/site-packages/markdown/postprocessors.py new file mode 100644 index 0000000000000000000000000000000000000000..d4b0e1fdc7603649b2eeb2d8c0c188a2843f23a9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/postprocessors.py @@ -0,0 +1,131 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +""" + +Post-processors run on the text of the entire document after is has been serialized into a string. +Postprocessors should be used to work with the text just before output. Usually, they are used add +back sections that were extracted in a preprocessor, fix up outgoing encodings, or wrap the whole +document. + +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any +from . import util +import re + +if TYPE_CHECKING: # pragma: no cover + from markdown import Markdown + + +def build_postprocessors(md: Markdown, **kwargs: Any) -> util.Registry[Postprocessor]: + """ Build the default postprocessors for Markdown. """ + postprocessors = util.Registry() + postprocessors.register(RawHtmlPostprocessor(md), 'raw_html', 30) + postprocessors.register(AndSubstitutePostprocessor(), 'amp_substitute', 20) + return postprocessors + + +class Postprocessor(util.Processor): + """ + Postprocessors are run after the ElementTree it converted back into text. + + Each Postprocessor implements a `run` method that takes a pointer to a + text string, modifies it as necessary and returns a text string. + + Postprocessors must extend `Postprocessor`. + + """ + + def run(self, text: str) -> str: + """ + Subclasses of `Postprocessor` should implement a `run` method, which + takes the html document as a single text string and returns a + (possibly modified) string. + + """ + pass # pragma: no cover + + +class RawHtmlPostprocessor(Postprocessor): + """ Restore raw html to the document. """ + + BLOCK_LEVEL_REGEX = re.compile(r'^\<\/?([^ >]+)') + + def run(self, text: str) -> str: + """ Iterate over html stash and restore html. """ + def substitute_match(m: re.Match[str]) -> str: + if key := m.group(1): + wrapped = True + else: + key = m.group(2) + wrapped = False + if (key := int(key)) >= self.md.htmlStash.html_counter: + return m.group(0) + html = self.stash_to_string(self.md.htmlStash.rawHtmlBlocks[key]) + if not wrapped or self.isblocklevel(html): + return pattern.sub(substitute_match, html) + return pattern.sub(substitute_match, f"

    {html}

    ") + + if self.md.htmlStash.html_counter: + base_placeholder = util.HTML_PLACEHOLDER % r'([0-9]+)' + pattern = re.compile(f'

    { base_placeholder }

    |{ base_placeholder }') + return pattern.sub(substitute_match, text) + else: + return text + + def isblocklevel(self, html: str) -> bool: + """ Check is block of HTML is block-level. """ + m = self.BLOCK_LEVEL_REGEX.match(html) + if m: + if m.group(1)[0] in ('!', '?', '@', '%'): + # Comment, PHP etc... + return True + return self.md.is_block_level(m.group(1)) + return False + + def stash_to_string(self, text: str) -> str: + """ Convert a stashed object to a string. """ + return str(text) + + +class AndSubstitutePostprocessor(Postprocessor): + """ Restore valid entities """ + + def run(self, text: str) -> str: + text = text.replace(util.AMP_SUBSTITUTE, "&") + return text + + +@util.deprecated( + "This class is deprecated and will be removed in the future; " + "use [`UnescapeTreeprocessor`][markdown.treeprocessors.UnescapeTreeprocessor] instead." +) +class UnescapePostprocessor(Postprocessor): + """ Restore escaped chars. """ + + RE = re.compile(r'{}(\d+){}'.format(util.STX, util.ETX)) + + def unescape(self, m: re.Match[str]) -> str: + return chr(int(m.group(1))) + + def run(self, text: str) -> str: + return self.RE.sub(self.unescape, text) diff --git a/py311/lib/python3.11/site-packages/markdown/preprocessors.py b/py311/lib/python3.11/site-packages/markdown/preprocessors.py new file mode 100644 index 0000000000000000000000000000000000000000..0f63cdd36ab2f06222a18380bfe7ab5e47c3bfcf --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/preprocessors.py @@ -0,0 +1,91 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +""" +Preprocessors work on source text before it is broken down into its individual parts. +This is an excellent place to clean up bad characters or to extract portions for later +processing that the parser may otherwise choke on. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any +from . import util +from .htmlparser import HTMLExtractor +import re + +if TYPE_CHECKING: # pragma: no cover + from markdown import Markdown + + +def build_preprocessors(md: Markdown, **kwargs: Any) -> util.Registry[Preprocessor]: + """ Build and return the default set of preprocessors used by Markdown. """ + preprocessors = util.Registry() + preprocessors.register(NormalizeWhitespace(md), 'normalize_whitespace', 30) + preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20) + return preprocessors + + +class Preprocessor(util.Processor): + """ + Preprocessors are run after the text is broken into lines. + + Each preprocessor implements a `run` method that takes a pointer to a + list of lines of the document, modifies it as necessary and returns + either the same pointer or a pointer to a new list. + + Preprocessors must extend `Preprocessor`. + + """ + def run(self, lines: list[str]) -> list[str]: + """ + Each subclass of `Preprocessor` should override the `run` method, which + takes the document as a list of strings split by newlines and returns + the (possibly modified) list of lines. + + """ + pass # pragma: no cover + + +class NormalizeWhitespace(Preprocessor): + """ Normalize whitespace for consistent parsing. """ + + def run(self, lines: list[str]) -> list[str]: + source = '\n'.join(lines) + source = source.replace(util.STX, "").replace(util.ETX, "") + source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n" + source = source.expandtabs(self.md.tab_length) + source = re.sub(r'(?<=\n) +\n', '\n', source) + return source.split('\n') + + +class HtmlBlockPreprocessor(Preprocessor): + """ + Remove html blocks from the text and store them for later retrieval. + + The raw HTML is stored in the [`htmlStash`][markdown.util.HtmlStash] of the + [`Markdown`][markdown.Markdown] instance. + """ + + def run(self, lines: list[str]) -> list[str]: + source = '\n'.join(lines) + parser = HTMLExtractor(self.md) + parser.feed(source) + parser.close() + return ''.join(parser.cleandoc).split('\n') diff --git a/py311/lib/python3.11/site-packages/markdown/serializers.py b/py311/lib/python3.11/site-packages/markdown/serializers.py new file mode 100644 index 0000000000000000000000000000000000000000..573b2648349de11618af078d2f6c4207200a0732 --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/serializers.py @@ -0,0 +1,194 @@ +# Add x/html serialization to `Elementree` +# Taken from ElementTree 1.3 preview with slight modifications +# +# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# https://www.pythonware.com/ +# +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2007 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +""" +Python-Markdown provides two serializers which render [`ElementTree.Element`][xml.etree.ElementTree.Element] +objects to a string of HTML. Both functions wrap the same underlying code with only a few minor +differences as outlined below: + +1. Empty (self-closing) tags are rendered as `` for HTML and as `` for XHTML. +2. Boolean attributes are rendered as `attrname` for HTML and as `attrname="attrname"` for XHTML. +""" + +from __future__ import annotations + +from xml.etree.ElementTree import ProcessingInstruction +from xml.etree.ElementTree import Comment, ElementTree, Element, QName, HTML_EMPTY +import re +from typing import Callable, Literal, NoReturn + +__all__ = ['to_html_string', 'to_xhtml_string'] + +RE_AMP = re.compile(r'&(?!(?:\#[0-9]+|\#x[0-9a-f]+|[0-9a-z]+);)', re.I) + + +def _raise_serialization_error(text: str) -> NoReturn: # pragma: no cover + raise TypeError( + "cannot serialize {!r} (type {})".format(text, type(text).__name__) + ) + + +def _escape_cdata(text) -> str: + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 character, or so. assume that's, by far, + # the most common case in most applications. + if "&" in text: + # Only replace & when not part of an entity + text = RE_AMP.sub('&', text) + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text + except (TypeError, AttributeError): # pragma: no cover + _raise_serialization_error(text) + + +def _escape_attrib(text: str) -> str: + # escape attribute value + try: + if "&" in text: + # Only replace & when not part of an entity + text = RE_AMP.sub('&', text) + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + if "\n" in text: + text = text.replace("\n", " ") + return text + except (TypeError, AttributeError): # pragma: no cover + _raise_serialization_error(text) + + +def _escape_attrib_html(text: str) -> str: + # escape attribute value + try: + if "&" in text: + # Only replace & when not part of an entity + text = RE_AMP.sub('&', text) + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + return text + except (TypeError, AttributeError): # pragma: no cover + _raise_serialization_error(text) + + +def _serialize_html(write: Callable[[str], None], elem: Element, format: Literal["html", "xhtml"]) -> None: + tag = elem.tag + text = elem.text + if tag is Comment: + write("" % _escape_cdata(text)) + elif tag is ProcessingInstruction: + write("" % _escape_cdata(text)) + elif tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, format) + else: + namespace_uri = None + if isinstance(tag, QName): + # `QNAME` objects store their data as a string: `{uri}tag` + if tag.text[:1] == "{": + namespace_uri, tag = tag.text[1:].split("}", 1) + else: + raise ValueError('QName objects must define a tag.') + write("<" + tag) + items = elem.items() + if items: + items = sorted(items) # lexical order + for k, v in items: + if isinstance(k, QName): + # Assume a text only `QName` + k = k.text + if isinstance(v, QName): + # Assume a text only `QName` + v = v.text + else: + v = _escape_attrib_html(v) + if k == v and format == 'html': + # handle boolean attributes + write(" %s" % v) + else: + write(' {}="{}"'.format(k, v)) + if namespace_uri: + write(' xmlns="%s"' % (_escape_attrib(namespace_uri))) + if format == "xhtml" and tag.lower() in HTML_EMPTY: + write(" />") + else: + write(">") + if text: + if tag.lower() in ["script", "style"]: + write(text) + else: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, format) + if tag.lower() not in HTML_EMPTY: + write("") + if elem.tail: + write(_escape_cdata(elem.tail)) + + +def _write_html(root: Element, format: Literal["html", "xhtml"] = "html") -> str: + assert root is not None + data: list[str] = [] + write = data.append + _serialize_html(write, root, format) + return "".join(data) + + +# -------------------------------------------------------------------- +# public functions + + +def to_html_string(element: Element) -> str: + """ Serialize element and its children to a string of HTML5. """ + return _write_html(ElementTree(element).getroot(), format="html") + + +def to_xhtml_string(element: Element) -> str: + """ Serialize element and its children to a string of XHTML. """ + return _write_html(ElementTree(element).getroot(), format="xhtml") diff --git a/py311/lib/python3.11/site-packages/markdown/test_tools.py b/py311/lib/python3.11/site-packages/markdown/test_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..895e44ec5290435791faf3fcf72741cfb21e440e --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/test_tools.py @@ -0,0 +1,224 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +""" A collection of tools for testing the Markdown code base and extensions. """ + +from __future__ import annotations + +import os +import sys +import unittest +import textwrap +from typing import Any +from . import markdown, Markdown, util + +try: + import tidylib +except ImportError: + tidylib = None + +__all__ = ['TestCase', 'LegacyTestCase', 'Kwargs'] + + +class TestCase(unittest.TestCase): + """ + A [`unittest.TestCase`][] subclass with helpers for testing Markdown output. + + Define `default_kwargs` as a `dict` of keywords to pass to Markdown for each + test. The defaults can be overridden on individual tests. + + The `assertMarkdownRenders` method accepts the source text, the expected + output, and any keywords to pass to Markdown. The `default_kwargs` are used + except where overridden by `kwargs`. The output and expected output are passed + to `TestCase.assertMultiLineEqual`. An `AssertionError` is raised with a diff + if the actual output does not equal the expected output. + + The `dedent` method is available to dedent triple-quoted strings if + necessary. + + In all other respects, behaves as `unittest.TestCase`. + """ + + default_kwargs: dict[str, Any] = {} + """ Default options to pass to Markdown for each test. """ + + def assertMarkdownRenders(self, source, expected, expected_attrs=None, **kwargs): + """ + Test that source Markdown text renders to expected output with given keywords. + + `expected_attrs` accepts a `dict`. Each key should be the name of an attribute + on the `Markdown` instance and the value should be the expected value after + the source text is parsed by Markdown. After the expected output is tested, + the expected value for each attribute is compared against the actual + attribute of the `Markdown` instance using `TestCase.assertEqual`. + """ + + expected_attrs = expected_attrs or {} + kws = self.default_kwargs.copy() + kws.update(kwargs) + md = Markdown(**kws) + output = md.convert(source) + self.assertMultiLineEqual(output, expected) + for key, value in expected_attrs.items(): + self.assertEqual(getattr(md, key), value) + + def dedent(self, text): + """ + Dedent text. + """ + + # TODO: If/when actual output ends with a newline, then use: + # return textwrap.dedent(text.strip('/n')) + return textwrap.dedent(text).strip() + + +class recursionlimit: + """ + A context manager which temporarily modifies the Python recursion limit. + + The testing framework, coverage, etc. may add an arbitrary number of levels to the depth. To maintain consistency + in the tests, the current stack depth is determined when called, then added to the provided limit. + + Example usage: + + ``` python + with recursionlimit(20): + # test code here + ``` + + See . + """ + + def __init__(self, limit): + self.limit = util._get_stack_depth() + limit + self.old_limit = sys.getrecursionlimit() + + def __enter__(self): + sys.setrecursionlimit(self.limit) + + def __exit__(self, type, value, tb): + sys.setrecursionlimit(self.old_limit) + + +######################### +# Legacy Test Framework # +######################### + + +class Kwargs(dict): + """ A `dict` like class for holding keyword arguments. """ + pass + + +def _normalize_whitespace(text): + """ Normalize whitespace for a string of HTML using `tidylib`. """ + output, errors = tidylib.tidy_fragment(text, options={ + 'drop_empty_paras': 0, + 'fix_backslash': 0, + 'fix_bad_comments': 0, + 'fix_uri': 0, + 'join_styles': 0, + 'lower_literals': 0, + 'merge_divs': 0, + 'output_xhtml': 1, + 'quote_ampersand': 0, + 'newline': 'LF' + }) + return output + + +class LegacyTestMeta(type): + def __new__(cls, name, bases, dct): + + def generate_test(infile, outfile, normalize, kwargs): + def test(self): + with open(infile, encoding="utf-8") as f: + input = f.read() + with open(outfile, encoding="utf-8") as f: + # Normalize line endings + # (on Windows, git may have altered line endings). + expected = f.read().replace("\r\n", "\n") + output = markdown(input, **kwargs) + if tidylib and normalize: + try: + expected = _normalize_whitespace(expected) + output = _normalize_whitespace(output) + except OSError: + self.skipTest("Tidylib's c library not available.") + elif normalize: + self.skipTest('Tidylib not available.') + self.assertMultiLineEqual(output, expected) + return test + + location = dct.get('location', '') + exclude = dct.get('exclude', []) + normalize = dct.get('normalize', False) + input_ext = dct.get('input_ext', '.txt') + output_ext = dct.get('output_ext', '.html') + kwargs = dct.get('default_kwargs', Kwargs()) + + if os.path.isdir(location): + for file in os.listdir(location): + infile = os.path.join(location, file) + if os.path.isfile(infile): + tname, ext = os.path.splitext(file) + if ext == input_ext: + outfile = os.path.join(location, tname + output_ext) + tname = tname.replace(' ', '_').replace('-', '_') + kws = kwargs.copy() + if tname in dct: + kws.update(dct[tname]) + test_name = 'test_%s' % tname + if tname not in exclude: + dct[test_name] = generate_test(infile, outfile, normalize, kws) + else: + dct[test_name] = unittest.skip('Excluded')(lambda: None) + + return type.__new__(cls, name, bases, dct) + + +class LegacyTestCase(unittest.TestCase, metaclass=LegacyTestMeta): + """ + A [`unittest.TestCase`][] subclass for running Markdown's legacy file-based tests. + + A subclass should define various properties which point to a directory of + text-based test files and define various behaviors/defaults for those tests. + The following properties are supported: + + Attributes: + location (str): A path to the directory of test files. An absolute path is preferred. + exclude (list[str]): A list of tests to exclude. Each test name should comprise the filename + without an extension. + normalize (bool): A boolean value indicating if the HTML should be normalized. Default: `False`. + input_ext (str): A string containing the file extension of input files. Default: `.txt`. + output_ext (str): A string containing the file extension of expected output files. Default: `html`. + default_kwargs (Kwargs[str, Any]): The default set of keyword arguments for all test files in the directory. + + In addition, properties can be defined for each individual set of test files within + the directory. The property should be given the name of the file without the file + extension. Any spaces and dashes in the filename should be replaced with + underscores. The value of the property should be a `Kwargs` instance which + contains the keyword arguments that should be passed to `Markdown` for that + test file. The keyword arguments will "update" the `default_kwargs`. + + When the class instance is created, it will walk the given directory and create + a separate `Unitttest` for each set of test files using the naming scheme: + `test_filename`. One `Unittest` will be run for each set of input and output files. + """ + pass diff --git a/py311/lib/python3.11/site-packages/markdown/treeprocessors.py b/py311/lib/python3.11/site-packages/markdown/treeprocessors.py new file mode 100644 index 0000000000000000000000000000000000000000..9a27446d47d9bc3b386596bc16b93ac9de8c9911 --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/treeprocessors.py @@ -0,0 +1,476 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +""" +Tree processors manipulate the tree created by block processors. They can even create an entirely +new `ElementTree` object. This is an excellent place for creating summaries, adding collected +references, or last minute adjustments. + +""" + +from __future__ import annotations + +import re +import xml.etree.ElementTree as etree +from typing import TYPE_CHECKING, Any +from . import util +from . import inlinepatterns + +if TYPE_CHECKING: # pragma: no cover + from markdown import Markdown + + +def build_treeprocessors(md: Markdown, **kwargs: Any) -> util.Registry[Treeprocessor]: + """ Build the default `treeprocessors` for Markdown. """ + treeprocessors = util.Registry() + treeprocessors.register(InlineProcessor(md), 'inline', 20) + treeprocessors.register(PrettifyTreeprocessor(md), 'prettify', 10) + treeprocessors.register(UnescapeTreeprocessor(md), 'unescape', 0) + return treeprocessors + + +def isString(s: object) -> bool: + """ Return `True` if object is a string but not an [`AtomicString`][markdown.util.AtomicString]. """ + if not isinstance(s, util.AtomicString): + return isinstance(s, str) + return False + + +class Treeprocessor(util.Processor): + """ + `Treeprocessor`s are run on the `ElementTree` object before serialization. + + Each `Treeprocessor` implements a `run` method that takes a pointer to an + `Element` and modifies it as necessary. + + `Treeprocessors` must extend `markdown.Treeprocessor`. + + """ + def run(self, root: etree.Element) -> etree.Element | None: + """ + Subclasses of `Treeprocessor` should implement a `run` method, which + takes a root `Element`. This method can return another `Element` + object, and the existing root `Element` will be replaced, or it can + modify the current tree and return `None`. + """ + pass # pragma: no cover + + +class InlineProcessor(Treeprocessor): + """ + A `Treeprocessor` that traverses a tree, applying inline patterns. + """ + + def __init__(self, md: Markdown): + self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX + self.__placeholder_suffix = util.ETX + self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + + len(self.__placeholder_suffix) + self.__placeholder_re = util.INLINE_PLACEHOLDER_RE + self.md = md + self.inlinePatterns = md.inlinePatterns + self.ancestors: list[str] = [] + + def __makePlaceholder(self, type: str) -> tuple[str, str]: + """ Generate a placeholder """ + id = "%04d" % len(self.stashed_nodes) + hash = util.INLINE_PLACEHOLDER % id + return hash, id + + def __findPlaceholder(self, data: str, index: int) -> tuple[str | None, int]: + """ + Extract id from data string, start from index. + + Arguments: + data: String. + index: Index, from which we start search. + + Returns: + Placeholder id and string index, after the found placeholder. + + """ + m = self.__placeholder_re.search(data, index) + if m: + return m.group(1), m.end() + else: + return None, index + 1 + + def __stashNode(self, node: etree.Element | str, type: str) -> str: + """ Add node to stash. """ + placeholder, id = self.__makePlaceholder(type) + self.stashed_nodes[id] = node + return placeholder + + def __handleInline(self, data: str, patternIndex: int = 0) -> str: + """ + Process string with inline patterns and replace it with placeholders. + + Arguments: + data: A line of Markdown text. + patternIndex: The index of the `inlinePattern` to start with. + + Returns: + String with placeholders. + + """ + if not isinstance(data, util.AtomicString): + startIndex = 0 + count = len(self.inlinePatterns) + while patternIndex < count: + data, matched, startIndex = self.__applyPattern( + self.inlinePatterns[patternIndex], data, patternIndex, startIndex + ) + if not matched: + patternIndex += 1 + return data + + def __processElementText(self, node: etree.Element, subnode: etree.Element, isText: bool = True) -> None: + """ + Process placeholders in `Element.text` or `Element.tail` + of Elements popped from `self.stashed_nodes`. + + Arguments: + node: Parent node. + subnode: Processing node. + isText: Boolean variable, True - it's text, False - it's a tail. + + """ + if isText: + text = subnode.text + subnode.text = None + else: + text = subnode.tail + subnode.tail = None + + childResult = self.__processPlaceholders(text, subnode, isText) + + if not isText and node is not subnode: + pos = list(node).index(subnode) + 1 + else: + pos = 0 + + childResult.reverse() + for newChild in childResult: + node.insert(pos, newChild[0]) + + def __processPlaceholders( + self, + data: str | None, + parent: etree.Element, + isText: bool = True + ) -> list[tuple[etree.Element, list[str]]]: + """ + Process string with placeholders and generate `ElementTree` tree. + + Arguments: + data: String with placeholders instead of `ElementTree` elements. + parent: Element, which contains processing inline data. + isText: Boolean variable, True - it's text, False - it's a tail. + + Returns: + List with `ElementTree` elements with applied inline patterns. + + """ + def linkText(text: str | None) -> None: + if text: + if result: + if result[-1][0].tail: + result[-1][0].tail += text + else: + result[-1][0].tail = text + elif not isText: + if parent.tail: + parent.tail += text + else: + parent.tail = text + else: + if parent.text: + parent.text += text + else: + parent.text = text + result = [] + strartIndex = 0 + while data: + index = data.find(self.__placeholder_prefix, strartIndex) + if index != -1: + id, phEndIndex = self.__findPlaceholder(data, index) + + if id in self.stashed_nodes: + node = self.stashed_nodes.get(id) + + if index > 0: + text = data[strartIndex:index] + linkText(text) + + if not isinstance(node, str): # it's Element + for child in [node] + list(node): + if child.tail: + if child.tail.strip(): + self.__processElementText( + node, child, False + ) + if child.text: + if child.text.strip(): + self.__processElementText(child, child) + else: # it's just a string + linkText(node) + strartIndex = phEndIndex + continue + + strartIndex = phEndIndex + result.append((node, self.ancestors[:])) + + else: # wrong placeholder + end = index + len(self.__placeholder_prefix) + linkText(data[strartIndex:end]) + strartIndex = end + else: + text = data[strartIndex:] + if isinstance(data, util.AtomicString): + # We don't want to loose the `AtomicString` + text = util.AtomicString(text) + linkText(text) + data = "" + + return result + + def __applyPattern( + self, + pattern: inlinepatterns.Pattern, + data: str, + patternIndex: int, + startIndex: int = 0 + ) -> tuple[str, bool, int]: + """ + Check if the line fits the pattern, create the necessary + elements, add it to `stashed_nodes`. + + Arguments: + data: The text to be processed. + pattern: The pattern to be checked. + patternIndex: Index of current pattern. + startIndex: String index, from which we start searching. + + Returns: + String with placeholders instead of `ElementTree` elements. + + """ + new_style = isinstance(pattern, inlinepatterns.InlineProcessor) + + for exclude in pattern.ANCESTOR_EXCLUDES: + if exclude.lower() in self.ancestors: + return data, False, 0 + + if new_style: + match = None + # Since `handleMatch` may reject our first match, + # we iterate over the buffer looking for matches + # until we can't find any more. + for match in pattern.getCompiledRegExp().finditer(data, startIndex): + node, start, end = pattern.handleMatch(match, data) + if start is None or end is None: + startIndex += match.end(0) + match = None + continue + break + else: # pragma: no cover + match = pattern.getCompiledRegExp().match(data[startIndex:]) + leftData = data[:startIndex] + + if not match: + return data, False, 0 + + if not new_style: # pragma: no cover + node = pattern.handleMatch(match) + start = match.start(0) + end = match.end(0) + + if node is None: + return data, True, end + + if not isinstance(node, str): + if not isinstance(node.text, util.AtomicString): + # We need to process current node too + for child in [node] + list(node): + if not isString(node): + if child.text: + self.ancestors.append(child.tag.lower()) + child.text = self.__handleInline( + child.text, patternIndex + 1 + ) + self.ancestors.pop() + if child.tail: + child.tail = self.__handleInline( + child.tail, patternIndex + ) + + placeholder = self.__stashNode(node, pattern.type()) + + if new_style: + return "{}{}{}".format(data[:start], + placeholder, data[end:]), True, 0 + else: # pragma: no cover + return "{}{}{}{}".format(leftData, + match.group(1), + placeholder, match.groups()[-1]), True, 0 + + def __build_ancestors(self, parent: etree.Element | None, parents: list[str]) -> None: + """Build the ancestor list.""" + ancestors = [] + while parent is not None: + if parent is not None: + ancestors.append(parent.tag.lower()) + parent = self.parent_map.get(parent) + ancestors.reverse() + parents.extend(ancestors) + + def run(self, tree: etree.Element, ancestors: list[str] | None = None) -> etree.Element: + """Apply inline patterns to a parsed Markdown tree. + + Iterate over `Element`, find elements with inline tag, apply inline + patterns and append newly created Elements to tree. To avoid further + processing of string with inline patterns, instead of normal string, + use subclass [`AtomicString`][markdown.util.AtomicString]: + + node.text = markdown.util.AtomicString("This will not be processed.") + + Arguments: + tree: `Element` object, representing Markdown tree. + ancestors: List of parent tag names that precede the tree node (if needed). + + Returns: + An element tree object with applied inline patterns. + + """ + self.stashed_nodes: dict[str, etree.Element | str] = {} + + # Ensure a valid parent list, but copy passed in lists + # to ensure we don't have the user accidentally change it on us. + tree_parents = [] if ancestors is None else ancestors[:] + + self.parent_map = {c: p for p in tree.iter() for c in p} + stack = [(tree, tree_parents)] + + while stack: + currElement, parents = stack.pop(0) + + self.ancestors = parents + self.__build_ancestors(currElement, self.ancestors) + + insertQueue = [] + for child in currElement: + if child.text and not isinstance( + child.text, util.AtomicString + ): + self.ancestors.append(child.tag.lower()) + text = child.text + child.text = None + lst = self.__processPlaceholders( + self.__handleInline(text), child + ) + for item in lst: + self.parent_map[item[0]] = child + stack += lst + insertQueue.append((child, lst)) + self.ancestors.pop() + if child.tail: + tail = self.__handleInline(child.tail) + dumby = etree.Element('d') + child.tail = None + tailResult = self.__processPlaceholders(tail, dumby, False) + if dumby.tail: + child.tail = dumby.tail + pos = list(currElement).index(child) + 1 + tailResult.reverse() + for newChild in tailResult: + self.parent_map[newChild[0]] = currElement + currElement.insert(pos, newChild[0]) + if len(child): + self.parent_map[child] = currElement + stack.append((child, self.ancestors[:])) + + for element, lst in insertQueue: + for i, obj in enumerate(lst): + newChild = obj[0] + element.insert(i, newChild) + return tree + + +class PrettifyTreeprocessor(Treeprocessor): + """ Add line breaks to the html document. """ + + def _prettifyETree(self, elem: etree.Element) -> None: + """ Recursively add line breaks to `ElementTree` children. """ + + i = "\n" + if self.md.is_block_level(elem.tag) and elem.tag not in ['code', 'pre']: + if (not elem.text or not elem.text.strip()) \ + and len(elem) and self.md.is_block_level(elem[0].tag): + elem.text = i + for e in elem: + if self.md.is_block_level(e.tag): + self._prettifyETree(e) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + + def run(self, root: etree.Element) -> None: + """ Add line breaks to `Element` object and its children. """ + + self._prettifyETree(root) + # Do `
    `'s separately as they are often in the middle of + # inline content and missed by `_prettifyETree`. + brs = root.iter('br') + for br in brs: + if not br.tail or not br.tail.strip(): + br.tail = '\n' + else: + br.tail = '\n%s' % br.tail + # Clean up extra empty lines at end of code blocks. + pres = root.iter('pre') + for pre in pres: + if len(pre) and pre[0].tag == 'code': + code = pre[0] + # Only prettify code containing text only + if not len(code) and code.text is not None: + code.text = util.AtomicString(code.text.rstrip() + '\n') + + +class UnescapeTreeprocessor(Treeprocessor): + """ Restore escaped chars """ + + RE = re.compile(r'{}(\d+){}'.format(util.STX, util.ETX)) + + def _unescape(self, m: re.Match[str]) -> str: + return chr(int(m.group(1))) + + def unescape(self, text: str) -> str: + return self.RE.sub(self._unescape, text) + + def run(self, root: etree.Element) -> None: + """ Loop over all elements and unescape all text. """ + for elem in root.iter(): + # Unescape text content + if elem.text and not elem.tag == 'code': + elem.text = self.unescape(elem.text) + # Unescape tail content + if elem.tail: + elem.tail = self.unescape(elem.tail) + # Unescape attribute values + for key, value in elem.items(): + elem.set(key, self.unescape(value)) diff --git a/py311/lib/python3.11/site-packages/markdown/util.py b/py311/lib/python3.11/site-packages/markdown/util.py new file mode 100644 index 0000000000000000000000000000000000000000..f547721eb74c8e0b93dcd5aef7fd20f1c083768e --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown/util.py @@ -0,0 +1,409 @@ +# Python Markdown + +# A Python implementation of John Gruber's Markdown. + +# Documentation: https://python-markdown.github.io/ +# GitHub: https://github.com/Python-Markdown/markdown/ +# PyPI: https://pypi.org/project/Markdown/ + +# Started by Manfred Stienstra (http://www.dwerg.net/). +# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). +# Currently maintained by Waylan Limberg (https://github.com/waylan), +# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). + +# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later) +# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) +# Copyright 2004 Manfred Stienstra (the original version) + +# License: BSD (see LICENSE.md for details). + +""" +This module contains various contacts, classes and functions which get referenced and used +throughout the code base. +""" + +from __future__ import annotations + +import re +import sys +import warnings +from functools import wraps, lru_cache +from itertools import count +from typing import TYPE_CHECKING, Generic, Iterator, NamedTuple, TypeVar, TypedDict, overload + +if TYPE_CHECKING: # pragma: no cover + from markdown import Markdown + import xml.etree.ElementTree as etree + +_T = TypeVar('_T') + + +""" +Constants you might want to modify +----------------------------------------------------------------------------- +""" + + +BLOCK_LEVEL_ELEMENTS: list[str] = [ + # Elements which are invalid to wrap in a `

    ` tag. + # See https://w3c.github.io/html/grouping-content.html#the-p-element + 'address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl', + 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3', + 'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol', + 'p', 'pre', 'section', 'table', 'ul', + # Other elements which Markdown should not be mucking up the contents of. + 'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'html', 'iframe', 'li', 'legend', + 'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script', + 'style', 'summary', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video', + 'center' +] +""" +List of HTML tags which get treated as block-level elements. Same as the `block_level_elements` +attribute of the [`Markdown`][markdown.Markdown] class. Generally one should use the +attribute on the class. This remains for compatibility with older extensions. +""" + +# Placeholders +STX = '\u0002' +""" "Start of Text" marker for placeholder templates. """ +ETX = '\u0003' +""" "End of Text" marker for placeholder templates. """ +INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:" +""" Prefix for inline placeholder template. """ +INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX +""" Placeholder template for stashed inline text. """ +INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)') +""" Regular Expression which matches inline placeholders. """ +AMP_SUBSTITUTE = STX+"amp"+ETX +""" Placeholder template for HTML entities. """ +HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX +""" Placeholder template for raw HTML. """ +HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)') +""" Regular expression which matches HTML placeholders. """ +TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX +""" Placeholder template for tags. """ + + +# Constants you probably do not need to change +# ----------------------------------------------------------------------------- + +RTL_BIDI_RANGES = ( + ('\u0590', '\u07FF'), + # Hebrew (0590-05FF), Arabic (0600-06FF), + # Syriac (0700-074F), Arabic supplement (0750-077F), + # Thaana (0780-07BF), Nko (07C0-07FF). + ('\u2D30', '\u2D7F') # Tifinagh +) + + +# AUXILIARY GLOBAL FUNCTIONS +# ============================================================================= + + +@lru_cache(maxsize=None) +def get_installed_extensions(): + """ Return all entry_points in the `markdown.extensions` group. """ + if sys.version_info >= (3, 10): + from importlib import metadata + else: # ` bool | None: + """Parses a string representing a boolean value. If parsing was successful, + returns `True` or `False`. If `preserve_none=True`, returns `True`, `False`, + or `None`. If parsing was not successful, raises `ValueError`, or, if + `fail_on_errors=False`, returns `None`.""" + if not isinstance(value, str): + if preserve_none and value is None: + return value + return bool(value) + elif preserve_none and value.lower() == 'none': + return None + elif value.lower() in ('true', 'yes', 'y', 'on', '1'): + return True + elif value.lower() in ('false', 'no', 'n', 'off', '0', 'none'): + return False + elif fail_on_errors: + raise ValueError('Cannot parse bool value: %r' % value) + + +def code_escape(text: str) -> str: + """HTML escape a string of code.""" + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text + + +def _get_stack_depth(size: int = 2) -> int: + """Get current stack depth, performantly. + """ + frame = sys._getframe(size) + + for size in count(size): + frame = frame.f_back + if not frame: + return size + + +def nearing_recursion_limit() -> bool: + """Return true if current stack depth is within 100 of maximum limit.""" + return sys.getrecursionlimit() - _get_stack_depth() < 100 + + +# MISC AUXILIARY CLASSES +# ============================================================================= + + +class AtomicString(str): + """A string which should not be further processed.""" + pass + + +class Processor: + """ The base class for all processors. + + Attributes: + Processor.md: The `Markdown` instance passed in an initialization. + + Arguments: + md: The `Markdown` instance this processor is a part of. + + """ + def __init__(self, md: Markdown | None = None): + self.md = md + + +if TYPE_CHECKING: # pragma: no cover + class TagData(TypedDict): + tag: str + attrs: dict[str, str] + left_index: int + right_index: int + + +class HtmlStash: + """ + This class is used for stashing HTML objects that we extract + in the beginning and replace with place-holders. + """ + + def __init__(self): + """ Create an `HtmlStash`. """ + self.html_counter = 0 # for counting inline html segments + self.rawHtmlBlocks: list[str | etree.Element] = [] + self.tag_counter = 0 + self.tag_data: list[TagData] = [] # list of dictionaries in the order tags appear + + def store(self, html: str | etree.Element) -> str: + """ + Saves an HTML segment for later reinsertion. Returns a + placeholder string that needs to be inserted into the + document. + + Keyword arguments: + html: An html segment. + + Returns: + A placeholder string. + + """ + self.rawHtmlBlocks.append(html) + placeholder = self.get_placeholder(self.html_counter) + self.html_counter += 1 + return placeholder + + def reset(self) -> None: + """ Clear the stash. """ + self.html_counter = 0 + self.rawHtmlBlocks = [] + + def get_placeholder(self, key: int) -> str: + return HTML_PLACEHOLDER % key + + def store_tag(self, tag: str, attrs: dict[str, str], left_index: int, right_index: int) -> str: + """Store tag data and return a placeholder.""" + self.tag_data.append({'tag': tag, 'attrs': attrs, + 'left_index': left_index, + 'right_index': right_index}) + placeholder = TAG_PLACEHOLDER % str(self.tag_counter) + self.tag_counter += 1 # equal to the tag's index in `self.tag_data` + return placeholder + + +# Used internally by `Registry` for each item in its sorted list. +# Provides an easier to read API when editing the code later. +# For example, `item.name` is more clear than `item[0]`. +class _PriorityItem(NamedTuple): + name: str + priority: float + + +class Registry(Generic[_T]): + """ + A priority sorted registry. + + A `Registry` instance provides two public methods to alter the data of the + registry: `register` and `deregister`. Use `register` to add items and + `deregister` to remove items. See each method for specifics. + + When registering an item, a "name" and a "priority" must be provided. All + items are automatically sorted by "priority" from highest to lowest. The + "name" is used to remove ("deregister") and get items. + + A `Registry` instance it like a list (which maintains order) when reading + data. You may iterate over the items, get an item and get a count (length) + of all items. You may also check that the registry contains an item. + + When getting an item you may use either the index of the item or the + string-based "name". For example: + + registry = Registry() + registry.register(SomeItem(), 'itemname', 20) + # Get the item by index + item = registry[0] + # Get the item by name + item = registry['itemname'] + + When checking that the registry contains an item, you may use either the + string-based "name", or a reference to the actual item. For example: + + someitem = SomeItem() + registry.register(someitem, 'itemname', 20) + # Contains the name + assert 'itemname' in registry + # Contains the item instance + assert someitem in registry + + The method `get_index_for_name` is also available to obtain the index of + an item using that item's assigned "name". + """ + + def __init__(self): + self._data: dict[str, _T] = {} + self._priority: list[_PriorityItem] = [] + self._is_sorted = False + + def __contains__(self, item: str | _T) -> bool: + if isinstance(item, str): + # Check if an item exists by this name. + return item in self._data.keys() + # Check if this instance exists. + return item in self._data.values() + + def __iter__(self) -> Iterator[_T]: + self._sort() + return iter([self._data[k] for k, p in self._priority]) + + @overload + def __getitem__(self, key: str | int) -> _T: # pragma: no cover + ... + + @overload + def __getitem__(self, key: slice) -> Registry[_T]: # pragma: no cover + ... + + def __getitem__(self, key: str | int | slice) -> _T | Registry[_T]: + self._sort() + if isinstance(key, slice): + data: Registry[_T] = Registry() + for k, p in self._priority[key]: + data.register(self._data[k], k, p) + return data + if isinstance(key, int): + return self._data[self._priority[key].name] + return self._data[key] + + def __len__(self) -> int: + return len(self._priority) + + def __repr__(self): + return '<{}({})>'.format(self.__class__.__name__, list(self)) + + def get_index_for_name(self, name: str) -> int: + """ + Return the index of the given name. + """ + if name in self: + self._sort() + return self._priority.index( + [x for x in self._priority if x.name == name][0] + ) + raise ValueError('No item named "{}" exists.'.format(name)) + + def register(self, item: _T, name: str, priority: float) -> None: + """ + Add an item to the registry with the given name and priority. + + Arguments: + item: The item being registered. + name: A string used to reference the item. + priority: An integer or float used to sort against all items. + + If an item is registered with a "name" which already exists, the + existing item is replaced with the new item. Treat carefully as the + old item is lost with no way to recover it. The new item will be + sorted according to its priority and will **not** retain the position + of the old item. + """ + if name in self: + # Remove existing item of same name first + self.deregister(name) + self._is_sorted = False + self._data[name] = item + self._priority.append(_PriorityItem(name, priority)) + + def deregister(self, name: str, strict: bool = True) -> None: + """ + Remove an item from the registry. + + Set `strict=False` to fail silently. Otherwise a [`ValueError`][] is raised for an unknown `name`. + """ + try: + index = self.get_index_for_name(name) + del self._priority[index] + del self._data[name] + except ValueError: + if strict: + raise + + def _sort(self) -> None: + """ + Sort the registry by priority from highest to lowest. + + This method is called internally and should never be explicitly called. + """ + if not self._is_sorted: + self._priority.sort(key=lambda item: item.priority, reverse=True) + self._is_sorted = True diff --git a/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/METADATA b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..0f2b466a638a34e304c69fb7976ab05a736d9ab8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/METADATA @@ -0,0 +1,219 @@ +Metadata-Version: 2.4 +Name: markdown-it-py +Version: 4.0.0 +Summary: Python port of markdown-it. Markdown parsing, done right! +Keywords: markdown,lexer,parser,commonmark,markdown-it +Author-email: Chris Sewell +Requires-Python: >=3.10 +Description-Content-Type: text/markdown +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +License-File: LICENSE +License-File: LICENSE.markdown-it +Requires-Dist: mdurl~=0.1 +Requires-Dist: psutil ; extra == "benchmarking" +Requires-Dist: pytest ; extra == "benchmarking" +Requires-Dist: pytest-benchmark ; extra == "benchmarking" +Requires-Dist: commonmark~=0.9 ; extra == "compare" +Requires-Dist: markdown~=3.4 ; extra == "compare" +Requires-Dist: mistletoe~=1.0 ; extra == "compare" +Requires-Dist: mistune~=3.0 ; extra == "compare" +Requires-Dist: panflute~=2.3 ; extra == "compare" +Requires-Dist: markdown-it-pyrs ; extra == "compare" +Requires-Dist: linkify-it-py>=1,<3 ; extra == "linkify" +Requires-Dist: mdit-py-plugins>=0.5.0 ; extra == "plugins" +Requires-Dist: gprof2dot ; extra == "profiling" +Requires-Dist: mdit-py-plugins>=0.5.0 ; extra == "rtd" +Requires-Dist: myst-parser ; extra == "rtd" +Requires-Dist: pyyaml ; extra == "rtd" +Requires-Dist: sphinx ; extra == "rtd" +Requires-Dist: sphinx-copybutton ; extra == "rtd" +Requires-Dist: sphinx-design ; extra == "rtd" +Requires-Dist: sphinx-book-theme~=1.0 ; extra == "rtd" +Requires-Dist: jupyter_sphinx ; extra == "rtd" +Requires-Dist: ipykernel ; extra == "rtd" +Requires-Dist: coverage ; extra == "testing" +Requires-Dist: pytest ; extra == "testing" +Requires-Dist: pytest-cov ; extra == "testing" +Requires-Dist: pytest-regressions ; extra == "testing" +Requires-Dist: requests ; extra == "testing" +Project-URL: Documentation, https://markdown-it-py.readthedocs.io +Project-URL: Homepage, https://github.com/executablebooks/markdown-it-py +Provides-Extra: benchmarking +Provides-Extra: compare +Provides-Extra: linkify +Provides-Extra: plugins +Provides-Extra: profiling +Provides-Extra: rtd +Provides-Extra: testing + +# markdown-it-py + +[![Github-CI][github-ci]][github-link] +[![Coverage Status][codecov-badge]][codecov-link] +[![PyPI][pypi-badge]][pypi-link] +[![Conda][conda-badge]][conda-link] +[![PyPI - Downloads][install-badge]][install-link] + +

    + markdown-it-py icon +

    + +> Markdown parser done right. + +- Follows the __[CommonMark spec](http://spec.commonmark.org/)__ for baseline parsing +- Configurable syntax: you can add new rules and even replace existing ones. +- Pluggable: Adds syntax extensions to extend the parser (see the [plugin list][md-plugins]). +- High speed (see our [benchmarking tests][md-performance]) +- Easy to configure for [security][md-security] +- Member of [Google's Assured Open Source Software](https://cloud.google.com/assured-open-source-software/docs/supported-packages) + +This is a Python port of [markdown-it], and some of its associated plugins. +For more details see: . + +For details on [markdown-it] itself, see: + +- The __[Live demo](https://markdown-it.github.io)__ +- [The markdown-it README][markdown-it-readme] + +**See also:** [markdown-it-pyrs](https://github.com/chrisjsewell/markdown-it-pyrs) for an experimental Rust binding, +for even more speed! + +## Installation + +### PIP + +```bash +pip install markdown-it-py[plugins] +``` + +or with extras + +```bash +pip install markdown-it-py[linkify,plugins] +``` + +### Conda + +```bash +conda install -c conda-forge markdown-it-py +``` + +or with extras + +```bash +conda install -c conda-forge markdown-it-py linkify-it-py mdit-py-plugins +``` + +## Usage + +### Python API Usage + +Render markdown to HTML with markdown-it-py and a custom configuration +with and without plugins and features: + +```python +from markdown_it import MarkdownIt +from mdit_py_plugins.front_matter import front_matter_plugin +from mdit_py_plugins.footnote import footnote_plugin + +md = ( + MarkdownIt('commonmark', {'breaks':True,'html':True}) + .use(front_matter_plugin) + .use(footnote_plugin) + .enable('table') +) +text = (""" +--- +a: 1 +--- + +a | b +- | - +1 | 2 + +A footnote [^1] + +[^1]: some details +""") +tokens = md.parse(text) +html_text = md.render(text) + +## To export the html to a file, uncomment the lines below: +# from pathlib import Path +# Path("output.html").write_text(html_text) +``` + +### Command-line Usage + +Render markdown to HTML with markdown-it-py from the +command-line: + +```console +usage: markdown-it [-h] [-v] [filenames [filenames ...]] + +Parse one or more markdown files, convert each to HTML, and print to stdout + +positional arguments: + filenames specify an optional list of files to convert + +optional arguments: + -h, --help show this help message and exit + -v, --version show program's version number and exit + +Interactive: + + $ markdown-it + markdown-it-py [version 0.0.0] (interactive) + Type Ctrl-D to complete input, or Ctrl-C to exit. + >>> # Example + ... > markdown *input* + ... +

    Example

    +
    +

    markdown input

    +
    + +Batch: + + $ markdown-it README.md README.footer.md > index.html + +``` + +## References / Thanks + +Big thanks to the authors of [markdown-it]: + +- Alex Kocharin [github/rlidwka](https://github.com/rlidwka) +- Vitaly Puzrin [github/puzrin](https://github.com/puzrin) + +Also [John MacFarlane](https://github.com/jgm) for his work on the CommonMark spec and reference implementations. + +[github-ci]: https://github.com/executablebooks/markdown-it-py/actions/workflows/tests.yml/badge.svg?branch=master +[github-link]: https://github.com/executablebooks/markdown-it-py +[pypi-badge]: https://img.shields.io/pypi/v/markdown-it-py.svg +[pypi-link]: https://pypi.org/project/markdown-it-py +[conda-badge]: https://anaconda.org/conda-forge/markdown-it-py/badges/version.svg +[conda-link]: https://anaconda.org/conda-forge/markdown-it-py +[codecov-badge]: https://codecov.io/gh/executablebooks/markdown-it-py/branch/master/graph/badge.svg +[codecov-link]: https://codecov.io/gh/executablebooks/markdown-it-py +[install-badge]: https://img.shields.io/pypi/dw/markdown-it-py?label=pypi%20installs +[install-link]: https://pypistats.org/packages/markdown-it-py + +[CommonMark spec]: http://spec.commonmark.org/ +[markdown-it]: https://github.com/markdown-it/markdown-it +[markdown-it-readme]: https://github.com/markdown-it/markdown-it/blob/master/README.md +[md-security]: https://markdown-it-py.readthedocs.io/en/latest/security.html +[md-performance]: https://markdown-it-py.readthedocs.io/en/latest/performance.html +[md-plugins]: https://markdown-it-py.readthedocs.io/en/latest/plugins.html + diff --git a/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/RECORD b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..227a227ce9cf431c133adb14873366fa684a1fde --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/RECORD @@ -0,0 +1,77 @@ +../../../bin/markdown-it,sha256=oWBAS-GjfhHHFV03g5rvCCRhlmJYPw5gn0j0XFK8m8s,335 +markdown_it/__init__.py,sha256=R7fMvDxageYJ4Q6doBcimogy1ctcV1eBuCFu5Pr8bbA,114 +markdown_it/_compat.py,sha256=U4S_2y3zgLZVfMenHRaJFBW8yqh2mUBuI291LGQVOJ8,35 +markdown_it/_punycode.py,sha256=JvSOZJ4VKr58z7unFGM0KhfTxqHMk2w8gglxae2QszM,2373 +markdown_it/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +markdown_it/cli/parse.py,sha256=Un3N7fyGHhZAQouGVnRx-WZcpKwEK2OF08rzVAEBie8,2881 +markdown_it/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +markdown_it/common/entities.py,sha256=EYRCmUL7ZU1FRGLSXQlPx356lY8EUBdFyx96eSGc6d0,157 +markdown_it/common/html_blocks.py,sha256=QXbUDMoN9lXLgYFk2DBYllnLiFukL6dHn2X98Y6Wews,986 +markdown_it/common/html_re.py,sha256=FggAEv9IL8gHQqsGTkHcf333rTojwG0DQJMH9oVu0fU,926 +markdown_it/common/normalize_url.py,sha256=avOXnLd9xw5jU1q5PLftjAM9pvGx8l9QDEkmZSyrMgg,2568 +markdown_it/common/utils.py,sha256=pMgvMOE3ZW-BdJ7HfuzlXNKyD1Ivk7jHErc2J_B8J5M,8734 +markdown_it/helpers/__init__.py,sha256=YH2z7dS0WUc_9l51MWPvrLtFoBPh4JLGw58OuhGRCK0,253 +markdown_it/helpers/parse_link_destination.py,sha256=u-xxWVP3g1s7C1bQuQItiYyDrYoYHJzXaZXPgr-o6mY,1906 +markdown_it/helpers/parse_link_label.py,sha256=PIHG6ZMm3BUw0a2m17lCGqNrl3vaz911tuoGviWD3I4,1037 +markdown_it/helpers/parse_link_title.py,sha256=jkLoYQMKNeX9bvWQHkaSroiEo27HylkEUNmj8xBRlp4,2273 +markdown_it/main.py,sha256=vzuT23LJyKrPKNyHKKAbOHkNWpwIldOGUM-IGsv2DHM,12732 +markdown_it/parser_block.py,sha256=-MyugXB63Te71s4NcSQZiK5bE6BHkdFyZv_bviuatdI,3939 +markdown_it/parser_core.py,sha256=SRmJjqe8dC6GWzEARpWba59cBmxjCr3Gsg8h29O8sQk,1016 +markdown_it/parser_inline.py,sha256=y0jCig8CJxQO7hBz0ZY3sGvPlAKTohOwIgaqnlSaS5A,5024 +markdown_it/port.yaml,sha256=jt_rdwOnfocOV5nc35revTybAAQMIp_-1fla_527sVE,2447 +markdown_it/presets/__init__.py,sha256=22vFtwJEY7iqFRtgVZ-pJthcetfpr1Oig8XOF9x1328,970 +markdown_it/presets/commonmark.py,sha256=ygfb0R7WQ_ZoyQP3df-B0EnYMqNXCVOSw9SAdMjsGow,2869 +markdown_it/presets/default.py,sha256=FfKVUI0HH3M-_qy6RwotLStdC4PAaAxE7Dq0_KQtRtc,1811 +markdown_it/presets/zero.py,sha256=okXWTBEI-2nmwx5XKeCjxInRf65oC11gahtRl-QNtHM,2113 +markdown_it/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +markdown_it/renderer.py,sha256=Lzr0glqd5oxFL10DOfjjW8kg4Gp41idQ4viEQaE47oA,9947 +markdown_it/ruler.py,sha256=eMAtWGRAfSM33aiJed0k5923BEkuMVsMq1ct8vU-ql4,9142 +markdown_it/rules_block/__init__.py,sha256=SQpg0ocmsHeILPAWRHhzgLgJMKIcNkQyELH13o_6Ktc,553 +markdown_it/rules_block/blockquote.py,sha256=7uymS36dcrned3DsIaRcqcbFU1NlymhvsZpEXTD3_n8,8887 +markdown_it/rules_block/code.py,sha256=iTAxv0U1-MDhz88M1m1pi2vzOhEMSEROsXMo2Qq--kU,860 +markdown_it/rules_block/fence.py,sha256=BJgU-PqZ4vAlCqGcrc8UtdLpJJyMeRWN-G-Op-zxrMc,2537 +markdown_it/rules_block/heading.py,sha256=4Lh15rwoVsQjE1hVhpbhidQ0k9xKHihgjAeYSbwgO5k,1745 +markdown_it/rules_block/hr.py,sha256=QCoY5kImaQRvF7PyP8OoWft6A8JVH1v6MN-0HR9Ikpg,1227 +markdown_it/rules_block/html_block.py,sha256=wA8pb34LtZr1BkIATgGKQBIGX5jQNOkwZl9UGEqvb5M,2721 +markdown_it/rules_block/lheading.py,sha256=fWoEuUo7S2svr5UMKmyQMkh0hheYAHg2gMM266Mogs4,2625 +markdown_it/rules_block/list.py,sha256=gIodkAJFyOIyKCZCj5lAlL7jIj5kAzrDb-K-2MFNplY,9668 +markdown_it/rules_block/paragraph.py,sha256=9pmCwA7eMu4LBdV4fWKzC4EdwaOoaGw2kfeYSQiLye8,1819 +markdown_it/rules_block/reference.py,sha256=ue1qZbUaUP0GIvwTjh6nD1UtCij8uwsIMuYW1xBkckc,6983 +markdown_it/rules_block/state_block.py,sha256=HowsQyy5hGUibH4HRZWKfLIlXeDUnuWL7kpF0-rSwoM,8422 +markdown_it/rules_block/table.py,sha256=8nMd9ONGOffER7BXmc9kbbhxkLjtpX79dVLR0iatGnM,7682 +markdown_it/rules_core/__init__.py,sha256=QFGBe9TUjnRQJDU7xY4SQYpxyTHNwg8beTSwXpNGRjE,394 +markdown_it/rules_core/block.py,sha256=0_JY1CUy-H2OooFtIEZAACtuoGUMohgxo4Z6A_UinSg,372 +markdown_it/rules_core/inline.py,sha256=9oWmeBhJHE7x47oJcN9yp6UsAZtrEY_A-VmfoMvKld4,325 +markdown_it/rules_core/linkify.py,sha256=mjQqpk_lHLh2Nxw4UFaLxa47Fgi-OHnmDamlgXnhmv0,5141 +markdown_it/rules_core/normalize.py,sha256=AJm4femtFJ_QBnM0dzh0UNqTTJk9K6KMtwRPaioZFqM,403 +markdown_it/rules_core/replacements.py,sha256=CH75mie-tdzdLKQtMBuCTcXAl1ijegdZGfbV_Vk7st0,3471 +markdown_it/rules_core/smartquotes.py,sha256=izK9fSyuTzA-zAUGkRkz9KwwCQWo40iRqcCKqOhFbEE,7443 +markdown_it/rules_core/state_core.py,sha256=HqWZCUr5fW7xG6jeQZDdO0hE9hxxyl3_-bawgOy57HY,570 +markdown_it/rules_core/text_join.py,sha256=rLXxNuLh_es5RvH31GsXi7en8bMNO9UJ5nbJMDBPltY,1173 +markdown_it/rules_inline/__init__.py,sha256=qqHZk6-YE8Rc12q6PxvVKBaxv2wmZeeo45H1XMR_Vxs,696 +markdown_it/rules_inline/autolink.py,sha256=pPoqJY8i99VtFn7KgUzMackMeq1hytzioVvWs-VQPRo,2065 +markdown_it/rules_inline/backticks.py,sha256=J7bezjjNxiXlKqvHc0fJkHZwH7-2nBsXVjcKydk8E4M,2037 +markdown_it/rules_inline/balance_pairs.py,sha256=5zgBiGidqdiWmt7Io_cuZOYh5EFEfXrYRce8RXg5m7o,4852 +markdown_it/rules_inline/emphasis.py,sha256=7aDLZx0Jlekuvbu3uEUTDhJp00Z0Pj6g4C3-VLhI8Co,3123 +markdown_it/rules_inline/entity.py,sha256=CE8AIGMi5isEa24RNseo0wRmTTaj5YLbgTFdDmBesAU,1651 +markdown_it/rules_inline/escape.py,sha256=KGulwrP5FnqZM7GXY8lf7pyVv0YkR59taZDeHb5cmKg,1659 +markdown_it/rules_inline/fragments_join.py,sha256=_3JbwWYJz74gRHeZk6T8edVJT2IVSsi7FfmJJlieQlA,1493 +markdown_it/rules_inline/html_inline.py,sha256=SBg6HR0HRqCdrkkec0dfOYuQdAqyfeLRFLeQggtgjvg,1130 +markdown_it/rules_inline/image.py,sha256=Wbsg7jgnOtKXIwXGNJOlG7ORThkMkBVolxItC0ph6C0,4141 +markdown_it/rules_inline/link.py,sha256=2oD-fAdB0xyxDRtZLTjzLeWbzJ1k9bbPVQmohb58RuI,4258 +markdown_it/rules_inline/linkify.py,sha256=ifH6sb5wE8PGMWEw9Sr4x0DhMVfNOEBCfFSwKll2O-s,1706 +markdown_it/rules_inline/newline.py,sha256=329r0V3aDjzNtJcvzA3lsFYjzgBrShLAV5uf9hwQL_M,1297 +markdown_it/rules_inline/state_inline.py,sha256=d-menFzbz5FDy1JNgGBF-BASasnVI-9RuOxWz9PnKn4,5003 +markdown_it/rules_inline/strikethrough.py,sha256=pwcPlyhkh5pqFVxRCSrdW5dNCIOtU4eDit7TVDTPIVA,3214 +markdown_it/rules_inline/text.py,sha256=FQqaQRUqbnMLO9ZSWPWQUMEKH6JqWSSSmlZ5Ii9P48o,1119 +markdown_it/token.py,sha256=cWrt9kodfPdizHq_tYrzyIZNtJYNMN1813DPNlunwTg,6381 +markdown_it/tree.py,sha256=56Cdbwu2Aiks7kNYqO_fQZWpPb_n48CUllzjQQfgu1Y,11111 +markdown_it/utils.py,sha256=lVLeX7Af3GaNFfxmMgUbsn5p7cXbwhLq7RSf56UWuRE,5687 +markdown_it_py-4.0.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +markdown_it_py-4.0.0.dist-info/METADATA,sha256=6fyqHi2vP5bYQKCfuqo5T-qt83o22Ip7a2tnJIfGW_s,7288 +markdown_it_py-4.0.0.dist-info/RECORD,, +markdown_it_py-4.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +markdown_it_py-4.0.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +markdown_it_py-4.0.0.dist-info/entry_points.txt,sha256=T81l7fHQ3pllpQ4wUtQK6a8g_p6wxQbnjKVHCk2WMG4,58 +markdown_it_py-4.0.0.dist-info/licenses/LICENSE,sha256=SiJg1uLND1oVGh6G2_59PtVSseK-q_mUHBulxJy85IQ,1078 +markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it,sha256=eSxIxahJoV_fnjfovPnm0d0TsytGxkKnSKCkapkZ1HM,1073 diff --git a/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/WHEEL b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d8b9936dad9ab2513fa6979f411560d3b6b57e37 --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.12.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/entry_points.txt b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..7d829cd792a5d754844f433c6a8dd499564fdcbf --- /dev/null +++ b/py311/lib/python3.11/site-packages/markdown_it_py-4.0.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +markdown-it=markdown_it.cli.parse:main + diff --git a/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/METADATA b/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..8e5b697937d2a60d35db44aaad418900592b363b --- /dev/null +++ b/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/METADATA @@ -0,0 +1,137 @@ +Metadata-Version: 2.4 +Name: moviepy +Version: 2.2.1 +Summary: Video editing with Python +Author: Zulko 2024 +License: MIT License +Keywords: video,editing,audio,compositing,ffmpeg +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Natural Language :: English +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Multimedia +Classifier: Topic :: Multimedia :: Sound/Audio +Classifier: Topic :: Multimedia :: Sound/Audio :: Analysis +Classifier: Topic :: Multimedia :: Video +Classifier: Topic :: Multimedia :: Video :: Capture +Classifier: Topic :: Multimedia :: Video :: Conversion +Description-Content-Type: text/markdown +License-File: LICENCE.txt +Requires-Dist: decorator<6.0,>=4.0.2 +Requires-Dist: imageio<3.0,>=2.5 +Requires-Dist: imageio_ffmpeg>=0.2.0 +Requires-Dist: numpy>=1.25.0 +Requires-Dist: proglog<=1.0.0 +Requires-Dist: python-dotenv>=0.10 +Requires-Dist: pillow<12.0,>=9.2.0 +Provides-Extra: doc +Requires-Dist: numpydoc<2.0; extra == "doc" +Requires-Dist: Sphinx==6.*; extra == "doc" +Requires-Dist: pydata-sphinx-theme==0.13; extra == "doc" +Requires-Dist: sphinx_design; extra == "doc" +Provides-Extra: test +Requires-Dist: coveralls<4.0,>=3.0; extra == "test" +Requires-Dist: pytest-cov<3.0,>=2.5.1; extra == "test" +Requires-Dist: pytest<7.0.0,>=3.0.0; extra == "test" +Provides-Extra: lint +Requires-Dist: black>=23.7.0; extra == "lint" +Requires-Dist: flake8>=6.0.0; extra == "lint" +Requires-Dist: flake8-absolute-import>=1.0; extra == "lint" +Requires-Dist: flake8-docstrings>=1.7.0; extra == "lint" +Requires-Dist: flake8-rst-docstrings>=0.3; extra == "lint" +Requires-Dist: flake8-implicit-str-concat==0.4.0; extra == "lint" +Requires-Dist: isort>=5.12; extra == "lint" +Requires-Dist: pre-commit>=3.3; extra == "lint" +Dynamic: license-file + +# MoviePy + + +[![MoviePy page on the Python Package Index](https://badge.fury.io/py/moviepy.svg)](https://pypi.org/project/moviepy/) [![Discuss MoviePy on Gitter](https://img.shields.io/gitter/room/movie-py/gitter?color=46BC99&logo=gitter)](Gitter_) [![Build status on gh-actions](https://img.shields.io/github/actions/workflow/status/Zulko/moviepy/test_suite.yml?logo=github)](https://github.com/Zulko/moviepy/actions/workflows/test_suite.yml) [![Code coverage from coveralls.io](https://img.shields.io/coveralls/github/Zulko/moviepy/master?logo=coveralls)](https://coveralls.io/github/Zulko/moviepy?branch=master) + +> [!NOTE] +> MoviePy recently upgraded to v2.0, introducing major breaking changes. You can consult the last v1 docs [here](https://zulko.github.io/moviepy/v1.0.3/) but beware that v1 is no longer maintained. For more info on how to update your code from v1 to v2, see [this guide](https://zulko.github.io/moviepy/getting_started/updating_to_v2.html). + +MoviePy (online documentation [here](https://zulko.github.io/moviepy/)) is a Python library for video editing: cuts, concatenations, title insertions, video compositing (a.k.a. non-linear editing), video processing, and creation of custom effects. + +MoviePy can read and write all the most common audio and video formats, including GIF, and runs on Windows/Mac/Linux, with Python 3.9+. + +# Example + +In this example we open a video file, select the subclip between 10 and +20 seconds, add a title at the center of the screen, and write the +result to a new file: + +``` python +from moviepy import VideoFileClip, TextClip, CompositeVideoClip + +# Load file example.mp4 and keep only the subclip from 00:00:10 to 00:00:20 +# Reduce the audio volume to 80% of its original volume + +clip = ( + VideoFileClip("long_examples/example2.mp4") + .subclipped(10, 20) + .with_volume_scaled(0.8) +) + +# Generate a text clip. You can customize the font, color, etc. +txt_clip = TextClip( + font="Arial.ttf", + text="Hello there!", + font_size=70, + color='white' +).with_duration(10).with_position('center') + +# Overlay the text clip on the first video clip +final_video = CompositeVideoClip([clip, txt_clip]) +final_video.write_videofile("result.mp4") +``` + +# How MoviePy works + +Under the hood, MoviePy imports media (video frames, images, sounds) and converts them into Python objects (numpy arrays) so that every pixel becomes accessible, and video or audio effects can be defined in just a few lines of code (see the [built-in effects]() for examples). + +The library also provides ways to mix clips together (concatenations, playing clips side by side or on top of each other with transparency, etc.). The final clip is then encoded back into mp4/webm/gif/etc. + +This makes MoviePy very flexible and approachable, albeit slower than using ffmpeg directly due to heavier data import/export operations. + + +# Installation + +Intall moviepy with `pip install moviepy`. For additional installation options, such as a custom FFMPEG or for previewing, see [this section](https://zulko.github.io/moviepy/getting_started/install.html). For development, clone that repo locally and install with `pip install -e .` + +# Documentation + +The online documentation ([here](https://zulko.github.io/moviepy/)) is automatically built at every push to the master branch. To build the documentation locally, install the extra dependencies via `pip install moviepy[doc]`, then go to the `docs` folder and run `make html`. + +# Contribute + +MoviePy is open-source software originally written by +[Zulko](https://github.com/Zulko) and released under the MIT licence. +The project is hosted on [GitHub](https://github.com/Zulko/moviepy), +where everyone is welcome to contribute and open issues or give feedback Please read our [Contributing +Guidelines](https://github.com/Zulko/moviepy/blob/master/CONTRIBUTING.md). +To ask for help or simply discuss usage and examples, use [our Reddit channel](https://www.reddit.com/r/moviepy/). + +# Maintainers + +## Active maintainers +- [Zulko](https://github.com/Zulko) (owner) +- [@osaajani](https://github.com/OsaAjani) led the development of v2 ([MR](https://github.com/Zulko/moviepy/pull/2024)) +- [@tburrows13](https://github.com/tburrows13) +- [@keikoro](https://github.com/keikoro) + +## Past maintainers and thanks +- [@mgaitan](https://github.com/mgaitan) +- [@earney](https://github.com/earney) +- [@mbeacom](https://github.com/mbeacom) +- [@overdrivr](https://github.com/overdrivr) +- [@ryanfox](https://github.com/ryanfox) +- [@mondeja](https://github.com/mondeja) + +**Maintainers wanted!** this library has only been kept afloat by the involvement of its maintainers, and there are times where none of us have enough bandwidth. We'd love to hear about developers interested in giving a hand and solving some of the issues (especially the ones that affect you) or reviewing pull requests. Open +an issue or contact us directly if you are interested. Thanks! diff --git a/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/RECORD b/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..7c2f92dce98701e6cf266a5b737c4dd4de8af40e --- /dev/null +++ b/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/RECORD @@ -0,0 +1,85 @@ +moviepy-2.2.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +moviepy-2.2.1.dist-info/METADATA,sha256=ugiBQDCjPBllicsjLYdRiyvodLNC6OX23--0ZOUuVWk,6938 +moviepy-2.2.1.dist-info/RECORD,, +moviepy-2.2.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +moviepy-2.2.1.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91 +moviepy-2.2.1.dist-info/licenses/LICENCE.txt,sha256=BezGFEr4PRuHtk8-NvQ2c0msRIavvynlc0G3xHRd04o,1071 +moviepy-2.2.1.dist-info/top_level.txt,sha256=2AuAzBHXS1RtsoIY43fmVUdf9ZmzoOHlkkBOmciPVBU,8 +moviepy/Clip.py,sha256=3Qfset2AGEiMxsHxz67__oMixMUqd-nuVyfjFQ0wOvo,25257 +moviepy/Effect.py,sha256=F722c3zWI7B8tyCM_hC8nB-BIp_W4p0TBwymne7s2Wg,1339 +moviepy/__init__.py,sha256=QGJZXNFKJO0cMbehEhfXxEfkI5OaUZlcr-MFtIVXIm4,1788 +moviepy/audio/AudioClip.py,sha256=W2Cu3Qkv-et8-auFhf9_Xxx9OAYZEKQt09yU_V1MMPc,14047 +moviepy/audio/__init__.py,sha256=08RzvYdmgqAoRiE68WI1MV1RbfNg1aYZqxHLjOE2vdc,43 +moviepy/audio/fx/AudioDelay.py,sha256=FkA7LpSJHfj61G1V6jS1YsXt19k6cHruWOgbNi8OFPU,2245 +moviepy/audio/fx/AudioFadeIn.py,sha256=bZjKlZjeXxaofe9OXVo2K7F9V7O8q43kcM1tKpvUR8c,1595 +moviepy/audio/fx/AudioFadeOut.py,sha256=4TQFeHggoQCVa3SztcbVUL-87zWPzDfslwwA9zaRRDw,1750 +moviepy/audio/fx/AudioLoop.py,sha256=MPGaVP3C2Y35El1hGbaIleNN1C5mSAY342Cc5rFZiQM,1140 +moviepy/audio/fx/AudioNormalize.py,sha256=PxncuqLqRvpQvrR5IShhrd4SKYZfMN4tKwSqG72Phc4,889 +moviepy/audio/fx/MultiplyStereoVolume.py,sha256=FKx7cOtzXZZTIHEa54C-DtLX1xhUtc2iLzgqWTWWudg,1424 +moviepy/audio/fx/MultiplyVolume.py,sha256=z3b50RCjhCAZ2mD9uL87L7KJ2bvDIuvpERGaDLS6d8w,2931 +moviepy/audio/fx/__init__.py,sha256=sT1ZiG1Nz5mK4PCBWM5RLZJer33CtbaMsgXd36Xipsg,670 +moviepy/audio/io/AudioFileClip.py,sha256=ZmUAnK6f491pCUvOeSIHiaiQzO8LZDGen8lMgdVbn3c,2380 +moviepy/audio/io/__init__.py,sha256=Kj1pTpGv_4VkM1_W-Xq3D5OtK0s9guUNyezrMpIiUrY,60 +moviepy/audio/io/ffmpeg_audiowriter.py,sha256=acQwHERjmcQJa0OxBnqXIoLL0D-kT0d_0gDtlvwhy8M,7323 +moviepy/audio/io/ffplay_audiopreviewer.py,sha256=qZ0eVhoJdS-EBSijlLL1ZMlqsDwr5uvt_wj4XleVcc8,4908 +moviepy/audio/io/readers.py,sha256=JFHrkBPPgmYAAxAP-dVEwxbtQNgVkT-zTDPhdbMiwZU,11287 +moviepy/audio/tools/__init__.py,sha256=vHGrk5KjWW5INJnChi85Lcq6g68ttWt1B78oJYdIdpE,55 +moviepy/audio/tools/cuts.py,sha256=mIxtVEhFU024EG2s1vLnlhi85g7LDgIYhbZmx3Fj-nc,880 +moviepy/config.py,sha256=n4OOVvge6OMI4I0k2eNVihMYGJS2-ypKW8OA6Li4OOY,2552 +moviepy/decorators.py,sha256=YaOUptOQidcySH7vZsEH3JQcLOzVbXCm5mNKdRuVyfM,4797 +moviepy/tools.py,sha256=AMzABYFNb8d7Jb0Fbs3FGNP5lf6eizkR9r5ZPbVLQvQ,9607 +moviepy/version.py,sha256=UiuBcRXPtXxPUBDdp0ZDvWl0U9Db1kMNfT3oAfhxqLg,22 +moviepy/video/VideoClip.py,sha256=2kRoig10mexsZaot_ZVgV0NzeIVGtb-pXQ4IYngMZkA,70619 +moviepy/video/__init__.py,sha256=0px8pIBdHb2pgpfZXlZldKFlONoPPVpj2m9NqLAAMfY,43 +moviepy/video/compositing/CompositeVideoClip.py,sha256=XT4YTJYrdPB6RT5ENwPPQUAAil6ggtcg3yaQYwu9JoM,13873 +moviepy/video/compositing/__init__.py,sha256=xvUnx206gogQ74h-5hUjlroSURli708GHQE24DAGciM,39 +moviepy/video/fx/AccelDecel.py,sha256=Vh_AGYCKbDL43jZ4DKMgEqazZW2AdZiFryE1iUdZXN8,2333 +moviepy/video/fx/BlackAndWhite.py,sha256=JJeFY1lGDJZht3SC6xLRbCHEgp7YHlA-ZEizzygj5iY,1011 +moviepy/video/fx/Blink.py,sha256=oI0qRxfjpbDxBpmIJZ8Fyo_ODUwVd2YlefvfdJUNxAs,686 +moviepy/video/fx/Crop.py,sha256=vkenzFq41xpCGFWsVde30FqV4AsH_I144CnKTV2xaM8,2299 +moviepy/video/fx/CrossFadeIn.py,sha256=VFNxPAImfEfbCtSs-g3d6wd3FUiuqlxSxiWoAWh47j0,729 +moviepy/video/fx/CrossFadeOut.py,sha256=9WSmWpKEeqrAJ7IZyJe3zL1ezWP8qbUqGCmTQgWNKmY,736 +moviepy/video/fx/EvenSize.py,sha256=vmYoDxtDsjojxTW1ccbSo055JQGEQPwcuF4W4EFwjrM,765 +moviepy/video/fx/FadeIn.py,sha256=U3Tq8ujq6txX1z5dNURVfetus4HMnCj01FZ54ny6RGQ,1101 +moviepy/video/fx/FadeOut.py,sha256=T0RmTA4p_QoBgBEcbp0_J9ydLLC0NBQwM6PKYvFPwDI,1212 +moviepy/video/fx/Freeze.py,sha256=hJb699kbURTUEzOs5wNoutib6GL9mVNQZkjllssRKME,2082 +moviepy/video/fx/FreezeRegion.py,sha256=6LjwvWwj-QguSGRsfGxNfUY2ot3kuGBivZ-75AGSmcg,2231 +moviepy/video/fx/GammaCorrection.py,sha256=Lgf963HdBBQ8x0YD-ZT_6ObxQtrDhEGcGBBhUtmF710,467 +moviepy/video/fx/HeadBlur.py,sha256=_kBD2MhqLt0EVjcKTF7EqGXBxyoHC9zIYpuEqp42-lQ,1418 +moviepy/video/fx/InvertColors.py,sha256=TBw2FLmi49ssWChlUQ0edwrtxjV9v3kFZaEdi8eVSDA,501 +moviepy/video/fx/Loop.py,sha256=fyyPpJpW9jx_RHC2kjsBDPyimVth_aQsdP8bLTS_Wy4,1065 +moviepy/video/fx/LumContrast.py,sha256=w3-OJvKwtrc7OBG5epu4UX-AcZ1Of_ri4Ne7FrCZEH0,742 +moviepy/video/fx/MakeLoopable.py,sha256=B8CRXOpMGwuBJ1mXkwa0z72GJ72qugP6kNlMJayAvJ4,865 +moviepy/video/fx/Margin.py,sha256=Cg6Vdgul5FuMxJbsT2RWnjk8vT2sJOXo5mpwiu_XcsQ,2772 +moviepy/video/fx/MaskColor.py,sha256=rX8hN2tEBmtkqGWRmvj15LPBtolPWnjPxVEPi4eZ9y0,1305 +moviepy/video/fx/MasksAnd.py,sha256=L3KoneHRmdPDyVOATu_lVxZnolGewAPJaooiJDF5PIg,1492 +moviepy/video/fx/MasksOr.py,sha256=CNAUiRn15idzSi8LJzLvikYo4TVFIdwZy4RkARn5Cwg,1493 +moviepy/video/fx/MirrorX.py,sha256=PKcSaQh7qi4yMCvBzxKLjS8xwbRj1bgx2tSuac_VL_k,449 +moviepy/video/fx/MirrorY.py,sha256=oMcjUWmaZDonDtmUg6Q_RrHbERJKL3SSs60g-S6ndbg,444 +moviepy/video/fx/MultiplyColor.py,sha256=oS1RdqRrGlpeYoINuYcjk9S8vPY_vwO3jVLnOQ0fjVs,557 +moviepy/video/fx/MultiplySpeed.py,sha256=NZs8cqXfdaJbl7OOMNMT5qfJNKLEvFLAc0wPCIKwVf8,947 +moviepy/video/fx/Painting.py,sha256=UODygs-_DwF9fFMI2QrzolmMCn1YNiE-7mstXFJ7dr0,1932 +moviepy/video/fx/Resize.py,sha256=LfPDVUdJ-IdWxYqN-IseOwMHEYlPwUBY9HLzP_G3nVE,4959 +moviepy/video/fx/Rotate.py,sha256=q7AQgOXFuoGhYMu-apsgtBp9F0XR20HpJCuu_bVTmy0,4135 +moviepy/video/fx/Scroll.py,sha256=gtfPfDpqQUui-MOKUPPiICjEDdS0atKu_AhmjZMr-Fo,1432 +moviepy/video/fx/SlideIn.py,sha256=_IUz_KE9Zd2_Mlq_A47afTQ0CrnhzI69IH3U5I7XWlg,1687 +moviepy/video/fx/SlideOut.py,sha256=KiMj-k76EfqIKmXUnw8NFuU-llgmip6Ncq4E97z_krU,1867 +moviepy/video/fx/SuperSample.py,sha256=zuDqcX_qfOn7R4Nbz8iuoA-yfY7Nghps5KFUIkOjZ_c,791 +moviepy/video/fx/TimeMirror.py,sha256=jsxAaCWPqzdD3aNejabPuuhNbx3ju9K_LpJxoeU_WwA,543 +moviepy/video/fx/TimeSymmetrize.py,sha256=lM-x75kYTaBppHeBeCIwERNrFXOK-7N3JtVgaH0d4Pg,639 +moviepy/video/fx/__init__.py,sha256=5WwhMTUh1qVOQm2j7Y-OeLMR4XI3Wgqt3qJfqhr0ld0,2346 +moviepy/video/io/ImageSequenceClip.py,sha256=a7nNL12MPKq56Kv_yKXcC_qJHNVunr6D_UK01l4s3WE,5365 +moviepy/video/io/VideoFileClip.py,sha256=UUOHyBD7fQe5PrweCKiaaSbsb47Gr7UCTMKnlaE-YPY,5568 +moviepy/video/io/__init__.py,sha256=ROtXQYnSURD0_pdF2EC17EVNQ7iemz6bKBD1S4IU8Oc,75 +moviepy/video/io/display_in_notebook.py,sha256=LeXS1pxHjuHGr6-lfFybmdr4fvv5vlCUfuxFc65x76w,9080 +moviepy/video/io/ffmpeg_reader.py,sha256=reGJxrwd_7O85Mki0CBn1wx5fy0lyPtWMzLE5EXCIho,34428 +moviepy/video/io/ffmpeg_tools.py,sha256=X82V8_qDEfucXqsANHMc5vcjYyRnda-Hi9eg_X_82qQ,7697 +moviepy/video/io/ffmpeg_writer.py,sha256=NH6e5UA6DL_932IF196aizhwi9yYU_Ijg8_kmHr6YtM,11407 +moviepy/video/io/ffplay_previewer.py,sha256=xHBJHnnMgsPUocu1IyMQTK4WVdIEknDIlqa5aOoi6X0,4010 +moviepy/video/io/gif_writers.py,sha256=fk4SJwexBkicjEWWkVzCxRdYwvIQozXkds0PMvWREwI,737 +moviepy/video/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +moviepy/video/tools/credits.py,sha256=YQ-yzOokBopd0x9M70xiwo03osGA4cNbRMvO1eVGEUw,3999 +moviepy/video/tools/cuts.py,sha256=GbvZWzxenax6_Aq7YFe4b_ZuakGvvcSVW_RZUBmi9Qo,17192 +moviepy/video/tools/drawing.py,sha256=XueaSYP_0FMHPxc1Q0u0S-7aoV-8j9kIURnMQauor8I,10231 +moviepy/video/tools/interpolators.py,sha256=99jlmnsgYo_H0FT2ssd_CP6ExhkjFHkwvwRyYRb56KQ,5927 +moviepy/video/tools/subtitles.py,sha256=5dY0at_3SFlVV8mro50s2IElMSghqOEcCl86LTv9wAA,6868 diff --git a/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/WHEEL b/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..870aa26cc780862a689dbd369c2810943a81c9ed --- /dev/null +++ b/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.8.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..a384113ea47832a37586644bf23ab577aeabdb23 --- /dev/null +++ b/py311/lib/python3.11/site-packages/moviepy-2.2.1.dist-info/top_level.txt @@ -0,0 +1 @@ +moviepy diff --git a/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/METADATA b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..98acd814861037e65d72b4ababd2e0e8b86555ab --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/METADATA @@ -0,0 +1,177 @@ +Metadata-Version: 2.4 +Name: networkx +Version: 3.6.1 +Summary: Python package for creating and manipulating graphs and networks +Author-email: Aric Hagberg +Maintainer-email: NetworkX Developers +License-Expression: BSD-3-Clause +Project-URL: Homepage, https://networkx.org/ +Project-URL: Bug Tracker, https://github.com/networkx/networkx/issues +Project-URL: Documentation, https://networkx.org/documentation/stable/ +Project-URL: Source Code, https://github.com/networkx/networkx +Keywords: Networks,Graph Theory,Mathematics,network,graph,discrete mathematics,math +Platform: Linux +Platform: Mac OSX +Platform: Windows +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Scientific/Engineering :: Bio-Informatics +Classifier: Topic :: Scientific/Engineering :: Information Analysis +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Physics +Requires-Python: !=3.14.1,>=3.11 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +Provides-Extra: benchmarking +Requires-Dist: asv; extra == "benchmarking" +Requires-Dist: virtualenv; extra == "benchmarking" +Provides-Extra: default +Requires-Dist: numpy>=1.25; extra == "default" +Requires-Dist: scipy>=1.11.2; extra == "default" +Requires-Dist: matplotlib>=3.8; extra == "default" +Requires-Dist: pandas>=2.0; extra == "default" +Provides-Extra: developer +Requires-Dist: pre-commit>=4.1; extra == "developer" +Requires-Dist: mypy>=1.15; extra == "developer" +Provides-Extra: doc +Requires-Dist: sphinx>=8.0; extra == "doc" +Requires-Dist: pydata-sphinx-theme>=0.16; extra == "doc" +Requires-Dist: sphinx-gallery>=0.18; extra == "doc" +Requires-Dist: numpydoc>=1.8.0; extra == "doc" +Requires-Dist: pillow>=10; extra == "doc" +Requires-Dist: texext>=0.6.7; extra == "doc" +Requires-Dist: myst-nb>=1.1; extra == "doc" +Requires-Dist: intersphinx-registry; extra == "doc" +Provides-Extra: example +Requires-Dist: osmnx>=2.0.0; extra == "example" +Requires-Dist: momepy>=0.7.2; extra == "example" +Requires-Dist: contextily>=1.6; extra == "example" +Requires-Dist: seaborn>=0.13; extra == "example" +Requires-Dist: cairocffi>=1.7; extra == "example" +Requires-Dist: igraph>=0.11; extra == "example" +Requires-Dist: scikit-learn>=1.5; extra == "example" +Requires-Dist: iplotx>=0.9.0; extra == "example" +Provides-Extra: extra +Requires-Dist: lxml>=4.6; extra == "extra" +Requires-Dist: pygraphviz>=1.14; extra == "extra" +Requires-Dist: pydot>=3.0.1; extra == "extra" +Requires-Dist: sympy>=1.10; extra == "extra" +Provides-Extra: release +Requires-Dist: build>=0.10; extra == "release" +Requires-Dist: twine>=4.0; extra == "release" +Requires-Dist: wheel>=0.40; extra == "release" +Requires-Dist: changelist==0.5; extra == "release" +Provides-Extra: test +Requires-Dist: pytest>=7.2; extra == "test" +Requires-Dist: pytest-cov>=4.0; extra == "test" +Requires-Dist: pytest-xdist>=3.0; extra == "test" +Provides-Extra: test-extras +Requires-Dist: pytest-mpl; extra == "test-extras" +Requires-Dist: pytest-randomly; extra == "test-extras" +Dynamic: license-file + +NetworkX +======== + + +.. image:: + https://github.com/networkx/networkx/actions/workflows/test.yml/badge.svg?branch=main + :target: https://github.com/networkx/networkx/actions/workflows/test.yml + +.. image:: + https://img.shields.io/pypi/v/networkx.svg? + :target: https://pypi.python.org/pypi/networkx + +.. image:: + https://img.shields.io/pypi/l/networkx.svg? + :target: https://github.com/networkx/networkx/blob/main/LICENSE.txt + +.. image:: + https://img.shields.io/pypi/pyversions/networkx.svg? + :target: https://pypi.python.org/pypi/networkx + +.. image:: + https://img.shields.io/github/labels/networkx/networkx/good%20first%20issue?color=green&label=contribute + :target: https://github.com/networkx/networkx/contribute + +.. image:: + https://insights.linuxfoundation.org/api/badge/health-score?project=networkx + :target: https://insights.linuxfoundation.org/project/networkx + + +NetworkX is a Python package for the creation, manipulation, +and study of the structure, dynamics, and functions +of complex networks. + +- **Website (including documentation):** https://networkx.org +- **Mailing list:** https://groups.google.com/forum/#!forum/networkx-discuss +- **Source:** https://github.com/networkx/networkx +- **Bug reports:** https://github.com/networkx/networkx/issues +- **Report a security vulnerability:** https://tidelift.com/security +- **Tutorial:** https://networkx.org/documentation/latest/tutorial.html +- **GitHub Discussions:** https://github.com/networkx/networkx/discussions +- **Discord (Scientific Python) invite link:** https://discord.com/invite/vur45CbwMz +- **NetworkX meetings calendar (open to all):** https://scientific-python.org/calendars/networkx.ics + +Simple example +-------------- + +Find the shortest path between two nodes in an undirected graph: + +.. code:: pycon + + >>> import networkx as nx + >>> G = nx.Graph() + >>> G.add_edge("A", "B", weight=4) + >>> G.add_edge("B", "D", weight=2) + >>> G.add_edge("A", "C", weight=3) + >>> G.add_edge("C", "D", weight=4) + >>> nx.shortest_path(G, "A", "D", weight="weight") + ['A', 'B', 'D'] + +Install +------- + +Install the latest released version of NetworkX: + +.. code:: shell + + $ pip install networkx + +Install with all optional dependencies: + +.. code:: shell + + $ pip install networkx[default] + +For additional details, +please see the `installation guide `_. + +Bugs +---- + +Please report any bugs that you find `here `_. +Or, even better, fork the repository on `GitHub `_ +and create a pull request (PR). We welcome all changes, big or small, and we +will help you make the PR if you are new to `git` (just ask on the issue and/or +see the `contributor guide `_). + +License +------- + +Released under the `3-clause BSD license `_:: + + Copyright (c) 2004-2025, NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart diff --git a/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/RECORD b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..16a23663c7dac2527bbeb905e06fa0fc6114e0f0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/RECORD @@ -0,0 +1,603 @@ +networkx-3.6.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +networkx-3.6.1.dist-info/METADATA,sha256=rKXZSpfR9w8wHQM63bY19uZr6Jc6AKukEnY37tLvMWo,6783 +networkx-3.6.1.dist-info/RECORD,, +networkx-3.6.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx-3.6.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +networkx-3.6.1.dist-info/entry_points.txt,sha256=H2jZaDsDJ_i9H2SwWpwuFel8BrZ9xHKuvh-DQAWW9lQ,94 +networkx-3.6.1.dist-info/licenses/LICENSE.txt,sha256=PPfDoXnYF7I7i4VoIRdp_v35N5fSB9mEuLL6JNAiCzM,1763 +networkx-3.6.1.dist-info/top_level.txt,sha256=s3Mk-7KOlu-kD39w8Xg_KXoP5Z_MVvgB-upkyuOE4Hk,9 +networkx/__init__.py,sha256=OVN8C-x44LgeJWKcb77v-WwfMV7EYqieafkoQdlAl5E,1625 +networkx/algorithms/__init__.py,sha256=sXdgBEDG0qlsOAymWl9zyT-c_luwOQOhWuus6quro0A,6607 +networkx/algorithms/approximation/__init__.py,sha256=1W0c3YlSfVQtNI4-WiQdzMfPqmcrCar5hJQmrwcsVoI,1234 +networkx/algorithms/approximation/clique.py,sha256=b4cnWMJXmmgCyjMI8A_doHZeKS_RQbGqm2L01OpT_Jg,7691 +networkx/algorithms/approximation/clustering_coefficient.py,sha256=SWpSLEhW3DJc1n2fHlSbJSGg3wdoJkN5Y4_tnntn0Ws,2164 +networkx/algorithms/approximation/connectivity.py,sha256=aVXSfUiWEG4gUL0R1u6WZ-h-wheuLP1_suO_pRFB8M4,13118 +networkx/algorithms/approximation/density.py,sha256=_JU9nIH4wneHG6PtEAF_q1oLwy5UUKk--FkTVkkMAMg,15258 +networkx/algorithms/approximation/distance_measures.py,sha256=UEkmKagNw9sj8kiUDdbAeYuzvZ31pgLMXqzliqMkG84,5805 +networkx/algorithms/approximation/dominating_set.py,sha256=5fC90w1CgYR4Xkpqact8iukKY0i57bMmyJW-A9CToUQ,4710 +networkx/algorithms/approximation/kcomponents.py,sha256=MDkoyQbk0gSAm3ZZK35VOsiLJDv7wiDsxfzH5O-ObFs,13285 +networkx/algorithms/approximation/matching.py,sha256=PFof5m9AIq9Xr5Kaa_-mYxI1IBBP7HEkjf-R9wVE3bo,1175 +networkx/algorithms/approximation/maxcut.py,sha256=eTQZqsDQAAUaufni-aDJAY2UzIcajDhRMdj-AcqVkPs,4333 +networkx/algorithms/approximation/ramsey.py,sha256=W5tX7BOQJIM_qNsBeUhCXVWMD8DFdeTycYyk08k4Sqk,1358 +networkx/algorithms/approximation/steinertree.py,sha256=2t58cQQt9OAMRWP1q0RV9QrdNzgasZtPNp6PbA0LFDU,9363 +networkx/algorithms/approximation/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/approximation/tests/test_approx_clust_coeff.py,sha256=PGOVEKf2BcJu1vvjZrgTlBBpwM8V6t7yCANjyS9nWF0,1171 +networkx/algorithms/approximation/tests/test_clique.py,sha256=s6HQB-lK3RAu_ftpe2NvIiMu0Ol8tpAdbGvWzucNL6k,3021 +networkx/algorithms/approximation/tests/test_connectivity.py,sha256=gDG6tsgP3ux7Dgu0x7r0nso7_yknIxicV42Gq0It5pc,5952 +networkx/algorithms/approximation/tests/test_density.py,sha256=EgJsX9z2lxWJCpF5NPbYEbE0GIATazCmgj7wFYeObDM,5298 +networkx/algorithms/approximation/tests/test_distance_measures.py,sha256=axgOojplJIgXdopgkjxjAgvzGTQ1FV1oJ5NG-7ICalo,2023 +networkx/algorithms/approximation/tests/test_dominating_set.py,sha256=l4pBDY7pK7Fxw-S4tOlNcxf-j2j5GpHPJ9f4TrMs1sI,2686 +networkx/algorithms/approximation/tests/test_kcomponents.py,sha256=tTljP1FHzXrUwi-oBz5AQcibRw1NgR4N5UE0a2OrOUA,9346 +networkx/algorithms/approximation/tests/test_matching.py,sha256=nitZncaM0605kaIu1NO6_5TFV2--nohUCO46XTD_lnM,186 +networkx/algorithms/approximation/tests/test_maxcut.py,sha256=U6CDZFSLfYDII-1nX9XB7avSz10kTx88vNazJFoLQ1k,2804 +networkx/algorithms/approximation/tests/test_ramsey.py,sha256=h36Ol39csHbIoTDBxbxMgn4371iVUGZ3a2N6l7d56lI,1143 +networkx/algorithms/approximation/tests/test_steinertree.py,sha256=ifBKzrKjRzfMg2g9BKSXVYLvuEbygQPp9rXR4cK99Q8,10727 +networkx/algorithms/approximation/tests/test_traveling_salesman.py,sha256=odnyk7CgLbMGI2QuHxj3BPRX4cCfWHK5N65rj0jMLW8,32048 +networkx/algorithms/approximation/tests/test_treewidth.py,sha256=b_79ZKiW0XX24-GYaeQJ9Zaq7ZFYQT0DcDTkACII3EY,8868 +networkx/algorithms/approximation/tests/test_vertex_cover.py,sha256=FobHNhG9CAMeB_AOEprUs-7XQdPoc1YvfmXhozDZ8pM,1942 +networkx/algorithms/approximation/traveling_salesman.py,sha256=HZU6dbPo3Hiz2-Z3QHLFm5kdGBQwkXPefkVUuW-IC2A,56210 +networkx/algorithms/approximation/treewidth.py,sha256=hfLiPlheQMN7MG6CGR5w7AscKbLu3pTLNYRS13SO0Xo,8389 +networkx/algorithms/approximation/vertex_cover.py,sha256=oIi_yg5O-IisnfmrSof1P4HD-fsZpW69RpvkR_SM5Og,2803 +networkx/algorithms/assortativity/__init__.py,sha256=ov3HRRbeYB_6Qezvxp1OTl77GBpw-EWkWGUzgfT8G9c,294 +networkx/algorithms/assortativity/connectivity.py,sha256=-V0C5MTqtErl86N-gyrZ487MUyiG5x1QFEZKurOpIJA,4220 +networkx/algorithms/assortativity/correlation.py,sha256=0rc4FDi-e8eQRia7gpFrTqjIy-J7V2GtSwOb4QN6WZk,8689 +networkx/algorithms/assortativity/mixing.py,sha256=RRqqkuVwo71LosJLDbeVCVBikqC7I_XZORdsonQsf9Y,7586 +networkx/algorithms/assortativity/neighbor_degree.py,sha256=UMaQWKBkOZ0ZgC8xGt5fXEz8OL1rgwYjt2zKbKEqofI,5282 +networkx/algorithms/assortativity/pairs.py,sha256=w7xnaWxDDteluHoCsqunLlcM6nlcBenO_5Nz87oOEnE,3841 +networkx/algorithms/assortativity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/assortativity/tests/base_test.py,sha256=MNeQMLA3oBUCM8TSyNbBQ_uW0nDc1GEZYdNdUwePAm4,2651 +networkx/algorithms/assortativity/tests/test_connectivity.py,sha256=Js841GQLYTLWvc6xZhnyqj-JtyrnS0ska1TFYntxyXA,4978 +networkx/algorithms/assortativity/tests/test_correlation.py,sha256=ddx-yqnVcOfx1dKVNUF695hS3Q-zCmFmCGzK64B7YSE,5068 +networkx/algorithms/assortativity/tests/test_mixing.py,sha256=1kkiMoQXslY-VnT1j00mFbRdj75A4d1b6OPTUOJVgaY,6802 +networkx/algorithms/assortativity/tests/test_neighbor_degree.py,sha256=wphbir1e-h-BAq5rjvWBi4WlgWdseyQbh_KLGQvy5Pc,3934 +networkx/algorithms/assortativity/tests/test_pairs.py,sha256=t05qP_-gfkbiR6aTLtE1owYl9otBSsuJcRkuZsa63UQ,3008 +networkx/algorithms/asteroidal.py,sha256=hIGg9zI4vylRXkrY5wHverTSOeK6dt1Gn2T_JYvGNnU,5500 +networkx/algorithms/bipartite/__init__.py,sha256=FZug-pg0FUDgAdysnKXxDpi1ly8ezxf4UxBPRklqjys,3883 +networkx/algorithms/bipartite/basic.py,sha256=JPC2gGuPvFA6q2CuI5mqLX_9QUGxrsQ8cIwcS0e9P4U,8375 +networkx/algorithms/bipartite/centrality.py,sha256=G280bAqeyXyCmes5NpRqUv2Tc-EHWrMshJ3_f4uqV9U,9156 +networkx/algorithms/bipartite/cluster.py,sha256=8aZRmlQ3g0XtzHyF1kUBBwnzMSjtduquHDH8MxKNSEI,7346 +networkx/algorithms/bipartite/covering.py,sha256=B3ITc016Kk70NBv-1lb30emXnfjlMIQJ7M-FIPCZip0,2163 +networkx/algorithms/bipartite/edgelist.py,sha256=mmiM2Bvh9CxRKCsspbs-90GZedpt7Gj5AGzvJ-aYDSM,11409 +networkx/algorithms/bipartite/extendability.py,sha256=OrYHlS4ruQST-dlQOuleiqHFKpVVNOvrG5aDNFgfckg,3989 +networkx/algorithms/bipartite/generators.py,sha256=p0xgyuJ5hp52NYPu2ryPEKZ0MEktP5VSW2HMK2VdBCo,20408 +networkx/algorithms/bipartite/link_analysis.py,sha256=eVRRQgwzMcUPPu6ccskPk72yc_lmnF5EGqNIdXe-MxA,12772 +networkx/algorithms/bipartite/matching.py,sha256=xsT048Ok_uM0Zhpdc34qswV1zaCGOlJQnsbGTDsm5oo,21637 +networkx/algorithms/bipartite/matrix.py,sha256=rxmjBNHts4hQ-EIlHLE3vIAeGHaMz8Kg_FJ8je5zFyQ,8320 +networkx/algorithms/bipartite/projection.py,sha256=YIUlreqQQ6IPE37OXF32zNIdzEGeyR8aY-7iUENZYVA,17252 +networkx/algorithms/bipartite/redundancy.py,sha256=eGNVo0qW-3unhGhY5VTs2vbc8QmOGve70UDY3ykjhNo,3340 +networkx/algorithms/bipartite/spectral.py,sha256=fu2grV1the_e_G-e_lUdhk8Y9XFe6_p2tPmx3RKntFw,1902 +networkx/algorithms/bipartite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/bipartite/tests/test_basic.py,sha256=gzbtsQqPi85BznX5REdGBBJVyr9aH4nO06c3eEI4634,4291 +networkx/algorithms/bipartite/tests/test_centrality.py,sha256=PABPbrIyoAziEEQKXsZLl2jT36N8DZpNRzEO-jeu89Y,6362 +networkx/algorithms/bipartite/tests/test_cluster.py,sha256=O0VsPVt8vcY_E1FjjLJX2xaUbhVViI5MP6_gLTbEpos,2801 +networkx/algorithms/bipartite/tests/test_covering.py,sha256=EGVxYQsyLXE5yY5N5u6D4wZq2NcZe9OwlYpEuY6DF3o,1221 +networkx/algorithms/bipartite/tests/test_edgelist.py,sha256=fK35tSekG_-9Ewr5Bhl1bRdwAy247Z9zZ4dQFFDQ9xw,8471 +networkx/algorithms/bipartite/tests/test_extendability.py,sha256=XgPmg6bWiHAF1iQ75_r2NqUxExOQNZRUeYUPzlCa5-E,7043 +networkx/algorithms/bipartite/tests/test_generators.py,sha256=BehRU6SQnWzKsAFoshrN2vpxcPByLAViofGeq38v23E,13203 +networkx/algorithms/bipartite/tests/test_link_analysis.py,sha256=vJMOtYG0vxYQCif_ztnYTUm_13gApfzFiNxChRefONg,6914 +networkx/algorithms/bipartite/tests/test_matching.py,sha256=3-2DMl3tF-g4_xNHvEuY4fZW7S5cqMTO_GUpcz1gkeQ,11973 +networkx/algorithms/bipartite/tests/test_matrix.py,sha256=TlZRVHCTKO2sqhycKFsefvdqP-Se47b_8iZwjtPpQYs,5063 +networkx/algorithms/bipartite/tests/test_project.py,sha256=WhX_yteTrXOdTZLMXpkW2A5ZFIBkw0WsnJRsNITyYtQ,15294 +networkx/algorithms/bipartite/tests/test_redundancy.py,sha256=utxcrQaTrkcEN3kqtObgKNpLZai8B5sMAqLyXatOuUo,917 +networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py,sha256=1jGDgrIx3-TWOCNMSC4zxmZa7LHyMU69DXh3h12Bjag,2358 +networkx/algorithms/boundary.py,sha256=q3JtWssmn9yCB2mBdkjKZjkaxmBhkG9_dJOzmuJiQos,5339 +networkx/algorithms/bridges.py,sha256=CsxueHDOB9aFM5D8GP83u1ZKGzxF193XBpvmMReAcQk,6066 +networkx/algorithms/broadcasting.py,sha256=Amw1oRpr8pdS67aC_9wjJOQomP1IqPvqtE7c0Mf9L70,4974 +networkx/algorithms/centrality/__init__.py,sha256=Er3YoYoj76UfY4P6I0L-0fCQkO7mMU0b3NLsTT2RGWI,558 +networkx/algorithms/centrality/betweenness.py,sha256=8829b5ep0pilM2Muqk5U0rPVQ02kwwedXaVGuQ_hY_U,22814 +networkx/algorithms/centrality/betweenness_subset.py,sha256=CUX0c96gYIORsdjOxVtif2796gOAcMitYoqEnbXDi5c,8114 +networkx/algorithms/centrality/closeness.py,sha256=ehkntG-gApT9uhWJjGaEZQ-tEQ-hdxDT7luf-uVPNAE,10281 +networkx/algorithms/centrality/current_flow_betweenness.py,sha256=kbq9XsZQvrCOdCAYvF67hRni1aqA1sT93WU2i0WfKKI,12717 +networkx/algorithms/centrality/current_flow_betweenness_subset.py,sha256=2qtLgf_3ft5qdDvHFrfYUt6zeQi42Nw7XBpSZRboJIA,8107 +networkx/algorithms/centrality/current_flow_closeness.py,sha256=IvecI8BZE4SgKayEXhKowIJw7S2fD_dN__N-f9TW-ME,3327 +networkx/algorithms/centrality/degree_alg.py,sha256=EFTA1b_GWUbmBy5R9beRQp7yh1X_NwZtk5L6is-mFGk,3894 +networkx/algorithms/centrality/dispersion.py,sha256=M12L2KiVPrC2-SyCXMF0kvxLelgcmvXJkLT_cBHoCTw,3631 +networkx/algorithms/centrality/eigenvector.py,sha256=LAxVqaT3LmuQw20__t1KrgLKPF1Cz-PkTaiSrgPC1FU,13623 +networkx/algorithms/centrality/flow_matrix.py,sha256=Y65m6VbWyYjNK0CInE_lufyEkKy9-TyPmBeXb-Gkz70,3834 +networkx/algorithms/centrality/group.py,sha256=TLlK2eWlcMX7Lvbe2wAcZrmZ9LLTvbRCz-3RbXF0Zug,27960 +networkx/algorithms/centrality/harmonic.py,sha256=OfrDikASbb0Xejjbd1kJ-lQuGT3Gv-rg3pmTtncAbho,2832 +networkx/algorithms/centrality/katz.py,sha256=uVGHAyjqndSd4y4idHjkv0mUhmKmHU5vaEfNWfiKlzc,11042 +networkx/algorithms/centrality/laplacian.py,sha256=qjyW2WnxMFo_mZqiQ4d8PVHLpXi3xNEX148Yikvk_ls,5554 +networkx/algorithms/centrality/load.py,sha256=M2EdPX4gJEYGjMBIJMFKRWGI9uYHbFOWYxsILeaJuOE,6859 +networkx/algorithms/centrality/percolation.py,sha256=YJB8iYgbpjJ3EYK8pl26iSnjgfFsK31ufytRHnUTYYE,4419 +networkx/algorithms/centrality/reaching.py,sha256=Q9rda_dqXBfVaHOi8FgZSYNHKpJ0SHfCbX_myMsIe2I,7255 +networkx/algorithms/centrality/second_order.py,sha256=4CTboP95B6gUtAtSKLfeeE4s9oq0_3hXsXczxL6c_g8,5012 +networkx/algorithms/centrality/subgraph_alg.py,sha256=YXkuyhhhZHmobNFpKDUpfeqSPyBvk97MlcJy5ESWh1s,10520 +networkx/algorithms/centrality/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/centrality/tests/test_betweenness_centrality.py,sha256=LmHNSaqExAT8kE-eOb_67Z6ckSGD_XqO56OHXhNkbFc,33669 +networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py,sha256=Y7qigLxQwFJqClJiCrKWGkcOHD467W0b6C5gBb0rFUg,13085 +networkx/algorithms/centrality/tests/test_closeness_centrality.py,sha256=-LtG4ex192Xjgy4JCTfdjdJ3Cd9Op-3XnVvG2GA3FWQ,8728 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py,sha256=1sipOadh8bsuzHZ0_7haapKU6Vrn0rbZL5HXvbsto5w,10072 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py,sha256=JfRGgPuiF-vJu5fc2_pcJYREEboxcK_dmy-np39c4Aw,5839 +networkx/algorithms/centrality/tests/test_current_flow_closeness.py,sha256=vflQeoNKngrGUiRb3XNlm2X9wR4vKgMSW_sCyMUCQi8,1379 +networkx/algorithms/centrality/tests/test_degree_centrality.py,sha256=Jn_p5lThA3__ZBTDAORwo_EchjXKKkK1NwU_73HHI6M,4101 +networkx/algorithms/centrality/tests/test_dispersion.py,sha256=ROgl_5bGhcNXonNW3ylsvUcA0NCwynsQu_scic371Gw,1959 +networkx/algorithms/centrality/tests/test_eigenvector_centrality.py,sha256=AfUa7GTve0UX6QOdBF6YMipmo-gKhyCu6aT8OCSf8wc,5254 +networkx/algorithms/centrality/tests/test_group.py,sha256=833ME4tGlOGQZz8YANw4MSyeVPpjbyCdYh5X88GOprw,8685 +networkx/algorithms/centrality/tests/test_harmonic_centrality.py,sha256=wI7nStX_kIFJoZQY_i8DXXlZBOJzVnQfOP8yidX0PAU,3867 +networkx/algorithms/centrality/tests/test_katz_centrality.py,sha256=JL0bZZsJe2MQFL6urXgY82wCAwucUvhjaShYZPxpL6U,11240 +networkx/algorithms/centrality/tests/test_laplacian_centrality.py,sha256=9Nd9CfiCn2908BgRZ-cQiMQjpOFDu4Bftod1didWyCE,5898 +networkx/algorithms/centrality/tests/test_load_centrality.py,sha256=Vv3zSW89iELN-8KNbUclmkhOe1LzKdF7U_w34nYovIo,11343 +networkx/algorithms/centrality/tests/test_percolation_centrality.py,sha256=ycQ1fvEZZcWAfqL11urT7yHiEP77usJDSG25OQiDM2s,2591 +networkx/algorithms/centrality/tests/test_reaching.py,sha256=_JVeO1Ri-KybdnGCJ_yNPtJQmT_g77z0DAkU0JYFVGQ,5090 +networkx/algorithms/centrality/tests/test_second_order_centrality.py,sha256=ce0wQ4T33lu23wskzGUnBS7X4BSODlvAX1S5KxlLzOA,1999 +networkx/algorithms/centrality/tests/test_subgraph.py,sha256=vhE9Uh-_Hlk49k-ny6ORHCgqk7LWH8OHIYOEYM96uz0,3729 +networkx/algorithms/centrality/tests/test_trophic.py,sha256=mt--0AUc_8qez2SjauEHVnetC3DMwMAlLME6kgb8Lc0,8796 +networkx/algorithms/centrality/tests/test_voterank.py,sha256=tN5u7pKAnJ_4AiwhPW6EuJZz7FLIG2jYqLKcXFi2urk,1687 +networkx/algorithms/centrality/trophic.py,sha256=7mpFrpgQhwP3Ad1plpJu1WzTGR1YWrIp_SUhM0D8Zww,5328 +networkx/algorithms/centrality/voterank_alg.py,sha256=z_1eq8rSDadEO5W5BbAg1zuOJj2di4FUCkmOwiuK12I,3231 +networkx/algorithms/chains.py,sha256=PPiSq5-GsT1Lsf8fwtGwGDVf1hhv5ZLariWtfzkBbAw,6968 +networkx/algorithms/chordal.py,sha256=L-ILWdVLWE44OkWmEO_4bSo4z6Ro-_zLglfLfTrwdqQ,13411 +networkx/algorithms/clique.py,sha256=SAvORAbZrZ-IpTVuEcarETQihNes-glpNiqoZ7hEgnM,27522 +networkx/algorithms/cluster.py,sha256=sKexvbvRnjuHSAkoJ9_dzCuOWDUDQkT_KjzmkKSoAhE,24851 +networkx/algorithms/coloring/__init__.py,sha256=P1cmqrAjcaCdObkNZ1e6Hp__ZpxBAhQx0iIipOVW8jg,182 +networkx/algorithms/coloring/equitable_coloring.py,sha256=uDcza6PD9qbvwVPUX1MBZbopQdrAEKNk6DpCFkc02tU,16315 +networkx/algorithms/coloring/greedy_coloring.py,sha256=GLXbwSvitsQgmxtOsbgUt4DTkURnb2k0X-7-SNsDW9I,20043 +networkx/algorithms/coloring/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/coloring/tests/test_coloring.py,sha256=7v_d1xanjYMZCa3dq2hE2hCcyexwWBTEFV5SoLgQDv4,23697 +networkx/algorithms/communicability_alg.py,sha256=0tZvZKY-_GUUB7GsRILxabS2jEpI51Udg5ADI9ADGZw,4545 +networkx/algorithms/community/__init__.py,sha256=vfw5aY7eoL7UDk42e0JPoVzRViCzk5cQf83zbls5p90,1279 +networkx/algorithms/community/asyn_fluid.py,sha256=bsY14UgR5FKLFqhFhHKmPP4-RJsVzH6K8ogsImLnCE8,6010 +networkx/algorithms/community/bipartitions.py,sha256=jqgJyMKOCs8TwI8YM9DO8qzuVZFe4XCnYRGeVAx5P4c,12238 +networkx/algorithms/community/centrality.py,sha256=Yyv5kyf1hf_L7iQ_ZbG8_FAkP638Sc_3N4tCSoB6J1w,6635 +networkx/algorithms/community/community_utils.py,sha256=sUi-AcPYyGrYhnjI9ztt-vrSHLl28lKXxTJPfi5N0c8,908 +networkx/algorithms/community/divisive.py,sha256=yFcKfKkiI6FqEVlBVxLa1fbqI1Yeiqe_A5fpPnYvlAE,6655 +networkx/algorithms/community/kclique.py,sha256=DTr9iUT_XWv0S3Y79KQl6OXefjztNMc9SAHWhdFOxcU,2460 +networkx/algorithms/community/label_propagation.py,sha256=LhzAXSHFCPQ2kG_rPgXb06YKdppO7buApksCC4GI4w8,11878 +networkx/algorithms/community/leiden.py,sha256=dx1N_KEsy30mfJ4729rJ28qaLsnkxoqDnv3UOXV2hWQ,6964 +networkx/algorithms/community/local.py,sha256=w-LK7qlMsQ2YHbZRQP95JvqM2gHGSe4yYZi7CXNkj_M,7316 +networkx/algorithms/community/louvain.py,sha256=jscGGTF6uUnC7yGEZTcE9UFmKsLDFwlZa4ecQyVQrOU,15424 +networkx/algorithms/community/lukes.py,sha256=gzqnup95RR2UzUiPpIt8qkepzZ9dCWqHGQSVPIJDMx8,8115 +networkx/algorithms/community/modularity_max.py,sha256=Qzidmvk2QOIb-S2o4erJvLSeQWcPbLq5TVMrxw15Wc4,18093 +networkx/algorithms/community/quality.py,sha256=oEw-RZBe62janFTTs-ak62APBiF2FpQoBiHv11_4YQY,11943 +networkx/algorithms/community/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/community/tests/test_asyn_fluid.py,sha256=XziMTOiEhzX6cvJdLeODUC1zjOLWchhHf9Z_9Eb3TME,3738 +networkx/algorithms/community/tests/test_bipartitions.py,sha256=w7kxfw5WjKzIosCdq3eHsvuYhUoElaO9Q3lK0kVyNg4,4639 +networkx/algorithms/community/tests/test_centrality.py,sha256=s8q4k5aThR0OgO9CDQk_PXMxfllmf5uC1GlvyUc_8EY,2932 +networkx/algorithms/community/tests/test_divisive.py,sha256=-Ee40OR-mPDReTngTEhbpx4_uLtNI7cqFkt8cZT9t5Y,3441 +networkx/algorithms/community/tests/test_kclique.py,sha256=iA0SBqwbDfaD2u7KM6ccs6LfgAQY_xxrnW05UIT_tFA,2413 +networkx/algorithms/community/tests/test_label_propagation.py,sha256=IHidFEv7MI781zsdk7XT848rLvLwDk2wBK1FjL-CRv4,7985 +networkx/algorithms/community/tests/test_leiden.py,sha256=bl4jr-Z0m59AISIAl-OLsnATz1lDacNssc-ICKn72Nw,4803 +networkx/algorithms/community/tests/test_local.py,sha256=c-dy1rs1L0ahhDTQwNZP1zNbffjzRgw5IBFTCBV-fas,1809 +networkx/algorithms/community/tests/test_louvain.py,sha256=TwW1nlSKWGJeIKr9QOJ8xGehSY6R0Nz01xsnFqzt0Oo,8071 +networkx/algorithms/community/tests/test_lukes.py,sha256=f_JU-EzY6PwXEkPN8kk5_3NVg6phlX0nrj1f57M49lk,3961 +networkx/algorithms/community/tests/test_modularity_max.py,sha256=XYyPuDkxL4CYFwnpTdU_qD4GydpqgiRAIJO3CHQN_m4,10617 +networkx/algorithms/community/tests/test_quality.py,sha256=sZEy10hh3zlelUmww5r2pk5LxpZAht06PC5zCHxV1bs,5275 +networkx/algorithms/community/tests/test_utils.py,sha256=gomD6rFgAaywxT1Yjdi4ozY-1rC0ina4jgfvWeCvwGE,704 +networkx/algorithms/components/__init__.py,sha256=Dt74KZWp_cJ_j0lL5hd_S50_hia5DKcC2SjuRnubr6M,173 +networkx/algorithms/components/attracting.py,sha256=6az3lgqWhHTXaWUUuOPZfW9t7okliAhooFRotQY5JoM,2712 +networkx/algorithms/components/biconnected.py,sha256=_9GJdPZgqusGKZLzqT9tUSj1XZr2DgohiT6hcHVyil4,12782 +networkx/algorithms/components/connected.py,sha256=s-uvEdHkY8dNxNPS5G8jnk-qTCum-bet_yKAx8_qTXc,7904 +networkx/algorithms/components/semiconnected.py,sha256=BaBMFlQ208vuHOo5y1xeV0PDEI3yDUfH6zFb_jkcVhQ,2030 +networkx/algorithms/components/strongly_connected.py,sha256=oZGBBDzhsrCBQjIfqYlkzvXkE3sF-WTRtYEa5UYy6-k,9911 +networkx/algorithms/components/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/components/tests/test_attracting.py,sha256=b3N3ZR9E5gLSQWGgaqhcRfRs4KBW6GnnkVYeAjdxC_o,2243 +networkx/algorithms/components/tests/test_biconnected.py,sha256=N-J-dgBgI77ytYUUrXjduLxtDydH7jS-af98fyPBkYc,6036 +networkx/algorithms/components/tests/test_connected.py,sha256=KMYm55BpbFdGXk_B2WozS9rIagQROd7_k0LT3HFQmr4,4815 +networkx/algorithms/components/tests/test_semiconnected.py,sha256=q860lIxZF5M2JmDwwdzy-SGSXnrillOefMx23GcJpw0,1792 +networkx/algorithms/components/tests/test_strongly_connected.py,sha256=Zm7MgUIZbuPPJu66xZH1zfMZQ_3X1YBl2fLCOjph7NQ,6021 +networkx/algorithms/components/tests/test_weakly_connected.py,sha256=_eUx7226dxme_K2WNmvSIwZXQlKNoCuglWOOC3kFUW4,3083 +networkx/algorithms/components/weakly_connected.py,sha256=q5siwSHQYtbKDcVc4dUDzfKh-DClETmuy3jLQN2_K4o,4413 +networkx/algorithms/connectivity/__init__.py,sha256=EvYKw8LJn7wyZECHAsuEkIaSl-cV-LhymR6tqcn90p8,281 +networkx/algorithms/connectivity/connectivity.py,sha256=xck9yth1asWFAM9Hp7UP6vdUh-Kr0IOmq046plOAXPQ,29367 +networkx/algorithms/connectivity/cuts.py,sha256=o-5GRQotOVXsNq7Kx_w7yBBVuGLa57whS2YF5DVb1KQ,23199 +networkx/algorithms/connectivity/disjoint_paths.py,sha256=57ZerbGqn30B8cwomcsS0GScectvP-mgCMb0GH-RYb8,14649 +networkx/algorithms/connectivity/edge_augmentation.py,sha256=cK9S6pRnsKLyb_57guKfrAbLiXL6sMALHCf7pR4tzFM,44063 +networkx/algorithms/connectivity/edge_kcomponents.py,sha256=hqABcfCqZ-rb45I0qYE-X4NtstsKJbxl37FZzzmoXA4,20894 +networkx/algorithms/connectivity/kcomponents.py,sha256=qDDeMqPb5rWbi6EKZwetBft5Qmv6PSUngQTVunJxyy0,8086 +networkx/algorithms/connectivity/kcutsets.py,sha256=zYohzgkR2FODi_Ew2M9uMLb_a9ZP5fNqcXJwMYy6P7o,9371 +networkx/algorithms/connectivity/stoerwagner.py,sha256=WodsJEqKgsmTTcyUBk2u3wV_CXeon-cAzveWgIGgFmA,5431 +networkx/algorithms/connectivity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/connectivity/tests/test_connectivity.py,sha256=eSmsi8uQk6MI591JgtSu2elIusb08bmSZS0h9gxb76I,15027 +networkx/algorithms/connectivity/tests/test_cuts.py,sha256=q8dxYBAnsGtIBBPZCjIvMszZvXOYST2cVVsCJvXbIhw,10356 +networkx/algorithms/connectivity/tests/test_disjoint_paths.py,sha256=NLHReLoXSKoA6KPBNRbjF84ktg5PEaaktIj2AII3SDY,8392 +networkx/algorithms/connectivity/tests/test_edge_augmentation.py,sha256=gIu58oxk_uSexawlr3H1ds2HGqkTHHGhhVYUCNmfrb4,15737 +networkx/algorithms/connectivity/tests/test_edge_kcomponents.py,sha256=Bzo77zG79Lv_-OzdI1ZZKO_c13wrnFmfv60FrkDyXI0,16453 +networkx/algorithms/connectivity/tests/test_kcomponents.py,sha256=vLwQOLRevXeeid3e7SSKVpd_OyAJE8PxiAU8GYX0Tf0,9276 +networkx/algorithms/connectivity/tests/test_kcutsets.py,sha256=w5DyrjndNqyT3qxQH6WTBCuBR0ljYXatZCtcT1WeiYM,8974 +networkx/algorithms/connectivity/tests/test_stoer_wagner.py,sha256=A291C30_t2CI1erPCqN1W0DoAj3zqNA8fThPIj4Rku0,3011 +networkx/algorithms/connectivity/utils.py,sha256=gL8LmZnK4GKAZQcIPEhVNYmVi18Mqsqwg4O4j_et56s,3217 +networkx/algorithms/core.py,sha256=6SO5Wz-LSkx6t2DSq0GPmOvFALGe0gVsXdsvno9Fljs,17479 +networkx/algorithms/covering.py,sha256=I_on4DUgmwbtbo-qlBq4YWhOsNjuSHCfVGZNAL-Sefs,5278 +networkx/algorithms/cuts.py,sha256=eqwi5bhCMchItKlRAUw4wGVRtHUK0JzTkaXJFUyxXEw,10419 +networkx/algorithms/cycles.py,sha256=AGpANFoLZ2td8YkGn0cHypASJuRllAwj_-g2Vve7QW8,43302 +networkx/algorithms/d_separation.py,sha256=DpOl1YaI-72_fgUJ2NI3Piuj7EQRQ8xmyAQCRdLgjTw,26089 +networkx/algorithms/dag.py,sha256=OPgrOtnKJVMwxu2AxBGWPZyO7ti6qXa6pn57SsO8FAQ,44673 +networkx/algorithms/distance_measures.py,sha256=cQIRYjP4FtpbA3SP_dOPM6hvwpM1z77oMfmuZ4w8suI,37618 +networkx/algorithms/distance_regular.py,sha256=M9m6qitAtCXYbv2k9hDjF0CpxdeEXPu2_KOI8CCGc_I,8391 +networkx/algorithms/dominance.py,sha256=00Ng1SMzAvxodLTfAeLnXexpPb13IlB_S_mDEuT8JhY,3897 +networkx/algorithms/dominating.py,sha256=yoRB4WCe0wMvJpkbKtNo2fpd0jTVq6b09JsCYcaeR_Y,8145 +networkx/algorithms/efficiency_measures.py,sha256=VKbLKJgdIbno-YnJaLaCZt7TNXXnQPdz8N99uJCo748,4741 +networkx/algorithms/euler.py,sha256=yCqKaGchFSRPTRDXq7u1fH2IXZF94wWf9S10K9-Cd6U,14205 +networkx/algorithms/flow/__init__.py,sha256=rVtMUy6dViPLewjDRntmn15QF0bQwiDdQbZZx9j7Drc,341 +networkx/algorithms/flow/boykovkolmogorov.py,sha256=qFcppmiXz4VKKFd4RbDsiWOqJODtDTHbNr9_UFTjQaU,13334 +networkx/algorithms/flow/capacityscaling.py,sha256=8rng2qO5kawNSxq2S8BNlUMmdvNSoC6R8ekiBGU8LxU,14469 +networkx/algorithms/flow/dinitz_alg.py,sha256=I5nnZVsj0aU8-9Cje0umey407epFzpd7BDJpkI6ESK4,8341 +networkx/algorithms/flow/edmondskarp.py,sha256=PEIwLftevS2VYHaTzzZMSOLPy7QSBPsWPedjx1lR6Cs,8056 +networkx/algorithms/flow/gomory_hu.py,sha256=EuibaxPl65shGM9Jxvaa9WMwMmoczDvXXc2b0E81cqM,6345 +networkx/algorithms/flow/maxflow.py,sha256=GuVA4MlPwdOeCdPRXW2QVW1OdJqgzSpPhEyASr302u4,22975 +networkx/algorithms/flow/mincost.py,sha256=GzMYInS4QcNe0yImGrVXJ0bRd7t5TSSMa9jSeenIoOk,12853 +networkx/algorithms/flow/networksimplex.py,sha256=6F1JNT1pMEMt-C27H3PsdZYF-53SrJMrHaakQ8pD7Ng,25098 +networkx/algorithms/flow/preflowpush.py,sha256=CUKZ0-7X9l7P7qH_2n2Immbf8mFm8vocH2SY0tIwjGo,15721 +networkx/algorithms/flow/shortestaugmentingpath.py,sha256=gXXdkY3nH4d0hXVn0P2-kzfC3DHcuCdrudFdxetflKI,10372 +networkx/algorithms/flow/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/flow/tests/gl1.gpickle.bz2,sha256=z4-BzrXqruFiGqYLiS2D5ZamFz9vZRc1m2ef89qhsPg,44623 +networkx/algorithms/flow/tests/gw1.gpickle.bz2,sha256=b3nw6Q-kxR7HkWXxWWPh7YlHdXbga8qmeuYiwmBBGTE,42248 +networkx/algorithms/flow/tests/netgen-2.gpickle.bz2,sha256=OxfmbN7ajtuNHexyYmx38fZd1GdeP3bcL8T9hKoDjjA,18972 +networkx/algorithms/flow/tests/test_gomory_hu.py,sha256=aWtbI3AHofIK6LDJnmj9UH1QOfulXsi5NyB7bNyV2Vw,4471 +networkx/algorithms/flow/tests/test_maxflow.py,sha256=4CtGOqeyloAxFSajaxPfGuyVhE0R3IdJf2SuIg4kHKQ,18940 +networkx/algorithms/flow/tests/test_maxflow_large_graph.py,sha256=P2e7G8TKN17zrMeOEbSIkLwK08MjfOtNhPva3nLM-B0,4612 +networkx/algorithms/flow/tests/test_mincost.py,sha256=vI61ZCLoWAzwYU4hU0AJS8Ori8vAWEPzFDCUoiloVRk,17806 +networkx/algorithms/flow/tests/test_networksimplex.py,sha256=tCw5C1hLEwbUbt_ySWgkvRyLKj-1T2wfXV9HT4Fx77Q,14162 +networkx/algorithms/flow/tests/wlm3.gpickle.bz2,sha256=zKy6Hg-_swvsNh8OSOyIyZnTR0_Npd35O9RErOF8-g4,88132 +networkx/algorithms/flow/utils.py,sha256=jexiKM-_BQfSAy15f4iZ_Km7DvRrcdOKqi6hxu9X0MM,6246 +networkx/algorithms/graph_hashing.py,sha256=kS881g6vUvMZms856A_tblq3ljB6BanUbt5az37fSlo,16918 +networkx/algorithms/graphical.py,sha256=1NdlhXuGEgUkHPo47EoNTWUMfdeTpiv7BBVM9ty2ivw,15831 +networkx/algorithms/hierarchy.py,sha256=_KFhCF1Afr2TrkPhqx-1PXUXEtfYLhbRShC58ZKbDGE,1786 +networkx/algorithms/hybrid.py,sha256=z3sIFMOpja1wlj-lI8YI6OIbSLZWHr66uSqyVESZWXY,6209 +networkx/algorithms/isolate.py,sha256=8F4GCBHZIW21CZresBlHuZhvq85ruPH_pnlxYetbHuQ,2251 +networkx/algorithms/isomorphism/__init__.py,sha256=gPRQ-_X6xN2lJZPQNw86IVj4NemGmbQYTejf5yJ32N4,406 +networkx/algorithms/isomorphism/ismags.py,sha256=AwJKq94d5e_RU0PZs5Gm7D73v-eOBBP8QDkpPxUocd0,60177 +networkx/algorithms/isomorphism/isomorph.py,sha256=O2TZtUPe89CsZxoNchv6FyYsGU79kWrwywGFBeonksE,10561 +networkx/algorithms/isomorphism/isomorphvf2.py,sha256=NTc9uCm2RnR9RxuKsAS_70RWD1zJYCJNPhefcJZUi5U,47637 +networkx/algorithms/isomorphism/matchhelpers.py,sha256=PaZ7PjmNNsJO9KoeRrf9JgcDHIcFr1tZckQc_ol4e9I,10884 +networkx/algorithms/isomorphism/temporalisomorphvf2.py,sha256=-1NW81l8kM9orQ2ni9tcNizQzEhOUE9BaBJXjUWqhiI,10948 +networkx/algorithms/isomorphism/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/isomorphism/tests/iso_r01_s80.A99,sha256=hKzMtYLUR8Oqp9pmJR6RwG7qo31aNPZcnXy4KHDGhqU,1442 +networkx/algorithms/isomorphism/tests/iso_r01_s80.B99,sha256=AHx_W2xG4JEcz1xKoN5TwCHVE6-UO2PiMByynkd4TPE,1442 +networkx/algorithms/isomorphism/tests/si2_b06_m200.A99,sha256=NVnPFA52amNl3qM55G1V9eL9ZlP9NwugBlPf-zekTFU,310 +networkx/algorithms/isomorphism/tests/si2_b06_m200.B99,sha256=-clIDp05LFNRHA2BghhGTeyuXDqBBqA9XpEzpB7Ku7M,1602 +networkx/algorithms/isomorphism/tests/test_ismags.py,sha256=YIrF8xSbnY8zsxiqjSZkzMFXF9TKwZ91m9araTfxJwE,24323 +networkx/algorithms/isomorphism/tests/test_isomorphism.py,sha256=e92l9AcYLKISmXB3hOdKanrvgKlUKuQ0TuXz7xuCD1Q,4576 +networkx/algorithms/isomorphism/tests/test_isomorphvf2.py,sha256=fVZZQgTU_L_kgp6KsiVwy5iLBewVFzXGDl72VQ0CgS0,15303 +networkx/algorithms/isomorphism/tests/test_match_helpers.py,sha256=uuTcvjgf2LPqSQzzECPIh0dezw8-a1IN0u42u8TxwAw,2483 +networkx/algorithms/isomorphism/tests/test_temporalisomorphvf2.py,sha256=k8032J4ItZ4aFHeOraOpiF8y4aPm2O1g44UvUfrQJgg,7343 +networkx/algorithms/isomorphism/tests/test_tree_isomorphism.py,sha256=mZPWRv2sSVJ9PbpGi5MEj5GaFbrp_ha2bL1wre1-Jqo,5681 +networkx/algorithms/isomorphism/tests/test_vf2pp.py,sha256=u_baOdKDdp34CQWwPob8henY90k8_hdK82SQDGesaO0,51386 +networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py,sha256=A8Y-FcGsLPWvmci5oHHDowSeh-Fhl_y88TrvboPZwdw,90251 +networkx/algorithms/isomorphism/tests/test_vf2userfunc.py,sha256=TrXs7E3Ynva18dBmaJDO4QGsySteCwK32cUKw-WkZ0I,6309 +networkx/algorithms/isomorphism/tree_isomorphism.py,sha256=J4RfMpO5LfL5z_wI3xP2wcbK2vMFEsxAQqBddPdw3YA,9033 +networkx/algorithms/isomorphism/vf2pp.py,sha256=jM3mzJNC0EutWHDmDkKIh1JfsB9XbpTkzEC6qXD0Lpo,36681 +networkx/algorithms/isomorphism/vf2userfunc.py,sha256=HiPwyr7nJF1QS9w69MzKf6wGvO8cgjvdS5vW59iwCew,7371 +networkx/algorithms/link_analysis/__init__.py,sha256=UkcgTDdzsIu-jsJ4jBwP8sF2CsRPC1YcZZT-q5Wlj3I,118 +networkx/algorithms/link_analysis/hits_alg.py,sha256=ot5sEhIvQ-JruYgcqCoi_EvNk89UWx_skG4KayIuO9I,10439 +networkx/algorithms/link_analysis/pagerank_alg.py,sha256=e7HuXXXVCG79YG0iuIOr1wqA_zC60hKoXGlMfvh9k1o,17202 +networkx/algorithms/link_analysis/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/link_analysis/tests/test_hits.py,sha256=BQ7QHXvJDkXtK-_VDFagtdp_-VXxVerwPymjGjzFWA0,2546 +networkx/algorithms/link_analysis/tests/test_pagerank.py,sha256=rUJNa_2nKDSk75Fg1yygMPAqg_Bdk0gGWMDRKY8g1lk,7282 +networkx/algorithms/link_prediction.py,sha256=UYo_LJgoVXcM1iLMXswM2g4jvUJmvxln3e5bVfXxQ10,22253 +networkx/algorithms/lowest_common_ancestors.py,sha256=D1LgoX_ibv2hR-viKEx6l_qp3mWVCkW6YAlGGdoWgXQ,9286 +networkx/algorithms/matching.py,sha256=Xgi-zy9C2pNvGwTixTFV7ut7W-l8s7zUgJha9v8D0s8,44325 +networkx/algorithms/minors/__init__.py,sha256=ceeKdsZ6U1H40ED-KmtVGkbADxeWMTVG07Ja8P7N_Pg,587 +networkx/algorithms/minors/contraction.py,sha256=o5i1UfWpI1KhD4-pVx4yuFJvwi00yJx58Te6GYpys7Y,27606 +networkx/algorithms/minors/tests/test_contraction.py,sha256=31FgiENkElrScHzJrQ3DcKG8W2ieow6zI7PZ9wgVJKE,17674 +networkx/algorithms/mis.py,sha256=BEMv_dW8R6CjMMXJQGIhS4HpS8A8AkLJJWnz3GstuS4,2344 +networkx/algorithms/moral.py,sha256=z5lp42k4kqYk7t_FfszVj5KAC7BxXe6Adik3T2qvA6o,1535 +networkx/algorithms/node_classification.py,sha256=s2yjsHk4mj_6llCuawgGvJ59HyswbcW6ufGQC4UBmG0,6465 +networkx/algorithms/non_randomness.py,sha256=rjIoT7-aqKwGSBNoYxgKniDFgqSDwV8Hh21lkWRYGVE,5787 +networkx/algorithms/operators/__init__.py,sha256=dJ3xOXvHxSzzM3-YcfvjGTJ_ndxULF1TybkIRzUS87Y,201 +networkx/algorithms/operators/all.py,sha256=v_W9ZT3u4STNvT4YI9zYn1Z2PDMhqfh4vfhtO4glaIA,9718 +networkx/algorithms/operators/binary.py,sha256=hziSCLDIVIoTWPV56fED5gI1pErOdyLOQ3Z_E_Nz9As,13150 +networkx/algorithms/operators/product.py,sha256=FQkSIduOv-z1ktVzid2T40759S-BmAfTlya88VytuZc,19632 +networkx/algorithms/operators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/operators/tests/test_all.py,sha256=Jn8IfNZ1P4Aa51C0gU58S2A-DPksQUFersWco0kG0Yw,8280 +networkx/algorithms/operators/tests/test_binary.py,sha256=BdZQS-qsWbEbRXuWANvH2eXEv7myAKyuldtQzHj6n4Y,12190 +networkx/algorithms/operators/tests/test_product.py,sha256=i4pBb5A4NmaCsllR1XizyhUToaQFMuLZ-JrywkQFdbU,15155 +networkx/algorithms/operators/tests/test_unary.py,sha256=UZdzbt5GI9hnflEizUWXihGqBWmSFJDkzjwVv6wziQE,1415 +networkx/algorithms/operators/unary.py,sha256=Eo2yeTg-F5uODGWSWR_im5VaKZQ97LyATIuKZcAFQR8,1795 +networkx/algorithms/perfect_graph.py,sha256=aMdQkfcXEySVutIOwngHkvDEy1qinO_92uZFJx6Ydnk,2597 +networkx/algorithms/planar_drawing.py,sha256=AXuoT3aFgEtCeMnAaUsRqjxCABdNYZ8Oo9sGOKBQto0,16254 +networkx/algorithms/planarity.py,sha256=nN6QCMz9PJYRqnAa06qEDANdd9bxsbO64PQdbVEGwk4,49887 +networkx/algorithms/polynomials.py,sha256=iP30_mcOlj81Vrzt4iB_ZZxYiRokubs-O1i9RW4pgTw,11278 +networkx/algorithms/reciprocity.py,sha256=1WMhLbSMkVPxRPlfUvbgO5FgVvJHn1doXQF4WuqSLQk,2855 +networkx/algorithms/regular.py,sha256=fEHD4VBREbJwSlR48okVZBuwgvD76GXdT28NitJI8qQ,4877 +networkx/algorithms/richclub.py,sha256=kARzso3M6wnUcAJo2g8ga_ZtigL2czDNzeUDzBtRfqo,4892 +networkx/algorithms/shortest_paths/__init__.py,sha256=Rmxtsje-mPdQyeYhE8TP2NId-iZEOu4eAsWhVRm2Xqk,285 +networkx/algorithms/shortest_paths/astar.py,sha256=0pEnYNzG7Z86zWqyFe0OL8HTlQO9PwjDABy_ypC30_A,8937 +networkx/algorithms/shortest_paths/dense.py,sha256=5Y8ziU-RsWZRTzxEEzg5gB7f4j9yxbNhplTbLaMliG8,8261 +networkx/algorithms/shortest_paths/generic.py,sha256=-7wIqqDWeScy8brYFkdt68nuxkSgXVs1iXcwGWoBZAU,25337 +networkx/algorithms/shortest_paths/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/shortest_paths/tests/test_astar.py,sha256=F0zXZ7R0eXP8Et4F3mrCtdtfE1-tQTGB2ATcFveDUHI,8989 +networkx/algorithms/shortest_paths/tests/test_dense.py,sha256=ievl4gu3Exl_31hp4OKcsAGPb3g3_xFUM4t3NnvrG_A,6747 +networkx/algorithms/shortest_paths/tests/test_dense_numpy.py,sha256=IEwhjPNTlc2H1on-B3WhFoyLkDkJVdAFBjn675XALHA,2299 +networkx/algorithms/shortest_paths/tests/test_generic.py,sha256=CQJxa5by1xE1a2E8iZm8GAtiPy_DILcrB4dIyWVM_qM,20609 +networkx/algorithms/shortest_paths/tests/test_unweighted.py,sha256=r1F5qVEDZnzPn5yJ71Rp-Z7pq33CzJFfZmDhVBekR9Y,5879 +networkx/algorithms/shortest_paths/tests/test_weighted.py,sha256=dzMts7Y1mMUuz4zEz4adEqYUNoXi9BMf-oyzQ0BMG3s,35502 +networkx/algorithms/shortest_paths/unweighted.py,sha256=iuJAO3WzB1p-fwpDyGM4snuFB5k57KIsVGcAJUhCpi0,17825 +networkx/algorithms/shortest_paths/weighted.py,sha256=PphAp7PmN9qpWzkLudePIVgvySh_SGDi5GhdQN5iufk,84155 +networkx/algorithms/similarity.py,sha256=E8P8vWqCR-r2SFVSqW9Y-Bb4I4R7BzUc_jzhiCYx7tU,72679 +networkx/algorithms/simple_paths.py,sha256=8oG_bRDVRrBKVCDEjZLHW3Qyzrv5KVynp-Hl0m5SNmA,31091 +networkx/algorithms/smallworld.py,sha256=3xT-z2_CVdp5-Ap8vF6fsd3DiavDYtspFNZrcwcpXG0,13565 +networkx/algorithms/smetric.py,sha256=_Aj4BIMnafiXbJtLkvAfAnIEMdI9OcVvMy6kk9KKTns,770 +networkx/algorithms/sparsifiers.py,sha256=4T8pMlh-usEHA2-rZFh-CmZbBY9dcXIHjoqR-oJ2hSw,10048 +networkx/algorithms/structuralholes.py,sha256=rYaVkndSJ3kFxbjkhfQd6uMBpYO7NNlcsOPR0Xbiy7Y,12626 +networkx/algorithms/summarization.py,sha256=CygTsSthyCKHs0ZTZsCgWnyaT8annQbLpUtahmfY9Sw,23251 +networkx/algorithms/swap.py,sha256=NVZMmlnkdxgwwNw5GDrc8waNERcdCu52ydHcBdOA_hw,14744 +networkx/algorithms/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tests/test_asteroidal.py,sha256=DnWI5_jnaaZMxtG44XD0K690HZs8ez7HU_9dSR-p6eA,502 +networkx/algorithms/tests/test_boundary.py,sha256=1OSJh32FYFhAVYB5zqxhZGEXZLS0HPp9kvfHZvWmD3o,6227 +networkx/algorithms/tests/test_bridges.py,sha256=jSCguECho0GNHnu0vpRh1twyfGP6tWFcaYL1rgvc8mU,4026 +networkx/algorithms/tests/test_broadcasting.py,sha256=GqjkpHC5Z7ogZZdf1qsz5B1lmX-Pd5pFMD5e3QRPAkI,3034 +networkx/algorithms/tests/test_chains.py,sha256=4fR5Uh2NMrTZGT1yR6sAsQEP3uy1U3UnwjcXLFk9d1Y,4196 +networkx/algorithms/tests/test_chordal.py,sha256=DPdNPY7KtqCsCwYVb4xQfnIm-z35dUJIWxNHtAiQLAQ,4438 +networkx/algorithms/tests/test_clique.py,sha256=RX1Q38DRDmclLLJ04pfE5RIFAy86EuTZ2QAERrtkjS4,9772 +networkx/algorithms/tests/test_cluster.py,sha256=b0OXtKvUYGzDxBrnIbPh8A3vo3jmRYOFzW02MVSWYgo,19320 +networkx/algorithms/tests/test_communicability.py,sha256=4KK9wU9gAUqHAAAyHwAKpq2dV9g415s_X0qd7Tt83gU,2938 +networkx/algorithms/tests/test_core.py,sha256=Q3bi_c0LpxZ-0VopHU6Lyzkb_JKseVT16H45qDuax9A,9619 +networkx/algorithms/tests/test_covering.py,sha256=EeBjQ5mxcVctgavqXZ255T8ryFocuxjxdVpIxVUNFvw,2718 +networkx/algorithms/tests/test_cuts.py,sha256=gKm9VDtnmwFli6kgwV1ktEFI_rw84p2Sg02Em6SoW5Q,5376 +networkx/algorithms/tests/test_cycles.py,sha256=CBEQ_kNlHg5PHb9vKMS9ot-lqUuvs_cAgaPtnEQn76g,34851 +networkx/algorithms/tests/test_d_separation.py,sha256=cxoAOA_EahXQHFsFaRopz6cp3h5sMnD7D_isq4_EuWI,10714 +networkx/algorithms/tests/test_dag.py,sha256=sd6OlsrK57rY7x6vMfWSf11tEiKH5BakNp7747KvCnA,29407 +networkx/algorithms/tests/test_distance_measures.py,sha256=kF-pR5WbVBs1d_IO-U6Dr-sf8MaRHfxc_D286sBN62E,28793 +networkx/algorithms/tests/test_distance_regular.py,sha256=w27OTUtAI0VQv7cikkOdJg4bo4q7xTNIVE8nbU_x7b8,2915 +networkx/algorithms/tests/test_dominance.py,sha256=rD-m7LVQpqrOTCUJrli43lYpezYxajfkcNd3xvGhTj8,9811 +networkx/algorithms/tests/test_dominating.py,sha256=5WwPlrQ6_pFaVx_-K4D5cHNbCZL23qp-PytgsXDuS4Q,3112 +networkx/algorithms/tests/test_efficiency.py,sha256=QKWMvyjCG1Byt-oNp7Rz_qxnVeT77Zk27lrzI1qH0mA,1894 +networkx/algorithms/tests/test_euler.py,sha256=L4L1ljHVxQxjQQludO2r6k3UZU7WAY_N6WYUjFx1fEk,11209 +networkx/algorithms/tests/test_graph_hashing.py,sha256=ribxC8ZkNxkkBDyjSZ8P2f8J6y5y4g-Ik-k5hBgZcx4,31567 +networkx/algorithms/tests/test_graphical.py,sha256=uhFjvs04odxABToY4IRig_CaUTpAC3SfZRu1p1T7FwY,5366 +networkx/algorithms/tests/test_hierarchy.py,sha256=uW8DqCdXiAeypkNPKcAYX7aW86CawYH84Q0bW4cDTXo,1184 +networkx/algorithms/tests/test_hybrid.py,sha256=kQLzaMoqZcKFaJ3D7PKbY2O-FX59XDZ1pN5un8My-tk,720 +networkx/algorithms/tests/test_isolate.py,sha256=LyR0YYHJDH5vppQzGzGiJK-aaIV17_Jmla8dMf93olg,555 +networkx/algorithms/tests/test_link_prediction.py,sha256=lP6slSHwXdzdJzNSeMTUsP88ypSqe6EeVy-bUP2d-48,20451 +networkx/algorithms/tests/test_lowest_common_ancestors.py,sha256=5ZT_17q-5ipw7NNgNisaVcxIxrY6XeQQaUlBXHx9k9g,14160 +networkx/algorithms/tests/test_matching.py,sha256=1IKeUi49HR8aAOm2V4cmBL2NGXvF9cb8mqW9jnkNGuY,18319 +networkx/algorithms/tests/test_max_weight_clique.py,sha256=M1eoy8OtuQVZkEvNMauV9vqR6hHtOCrtq6INv2qzMyA,6739 +networkx/algorithms/tests/test_mis.py,sha256=Z2tKoqbs-AFPzEBDYO7S8U-F7usLfZJ2l6j2DpZUts4,1865 +networkx/algorithms/tests/test_moral.py,sha256=15PZgkx7O9aXQB1npQ2JNqBBkEqPPP2RfeZzKqY-GNU,452 +networkx/algorithms/tests/test_node_classification.py,sha256=NgJJKUHH1GoD1GE3F4QRYBLM3fUo_En3RNtZvhqCjlg,4663 +networkx/algorithms/tests/test_non_randomness.py,sha256=iDHCFzFvxGXCi_vWnv-DkXHcmO9uU6KVpYGMCpHKQG8,1678 +networkx/algorithms/tests/test_perfect_graph.py,sha256=Y2mpreh5TNCkqn4PDO57MFkgYozt-tlZFScKH3TZJRg,609 +networkx/algorithms/tests/test_planar_drawing.py,sha256=fBTd9JzDkTqNQz7GBQCS9lShU1P3lWRQ7L-E5WhSdlA,8765 +networkx/algorithms/tests/test_planarity.py,sha256=2iy22H4APB2K0DiPcoWmiQV9BTQlJ9JSOexubN14sU0,17312 +networkx/algorithms/tests/test_polynomials.py,sha256=baI0Kua1pRngRC6Scm5gRRwi1bl0iET5_Xxo3AZTP3A,1983 +networkx/algorithms/tests/test_reciprocity.py,sha256=X_PXWFOTzuEcyMWpRdwEJfm8lJOfNE_1rb9AAybf4is,1296 +networkx/algorithms/tests/test_regular.py,sha256=ppXES6zvpsppoZmzXPFim-S4ZMWPG2Zhhmlgc67y_GA,2951 +networkx/algorithms/tests/test_richclub.py,sha256=Wl8aE0CSswaXJiHCQ42IkyRvjWaJQZ2OQcbC7LpIgfo,3962 +networkx/algorithms/tests/test_similarity.py,sha256=l02q6PMnweng8EfUm6Z0yor-Uy-zd6WN0Tm28lh3pBU,41248 +networkx/algorithms/tests/test_simple_paths.py,sha256=7U9wCXz4SHK0XeYrs1k2KjYgrYVQDnts2ggQLzU18p0,25181 +networkx/algorithms/tests/test_smallworld.py,sha256=bmoNDxIuhV0bBKWh2hgFw0XYTPCc2rgg1y77uMg6rE8,2390 +networkx/algorithms/tests/test_smetric.py,sha256=VM14L4X1AABvINDL9qKXzlech_Q2g4Aee-ozWM2Qrr4,144 +networkx/algorithms/tests/test_sparsifiers.py,sha256=1GRbQy1vfmwv6eUhP4Io0aykH2VyTJfFWmncrXmTqi4,4044 +networkx/algorithms/tests/test_structuralholes.py,sha256=F0xZ1nlKJLGmQDebDiExju4fMffu79qThnyF7ZGA53A,7592 +networkx/algorithms/tests/test_summarization.py,sha256=uNyaUstobIEu6M_Hexik-3YiYTRSy_XO6LUqoE4wazw,21312 +networkx/algorithms/tests/test_swap.py,sha256=WJtGMkSbAd1Cv06VaUeDVHosNOtdigsqEspyux0ExCs,6144 +networkx/algorithms/tests/test_threshold.py,sha256=x0hqqbH65nbDNEUV0vYz0J2TSmmBbJ0daEgLIR7CRm0,9733 +networkx/algorithms/tests/test_time_dependent.py,sha256=NmuV2kDo4nh2MeN0hwcJf0QSDtqMD0dfSeeKSsYBtQ8,13342 +networkx/algorithms/tests/test_tournament.py,sha256=tfOPwN7YHnw0oxl9d7-ErPBFKz6KBfSc5S3tbGrGHs8,4107 +networkx/algorithms/tests/test_triads.py,sha256=CbnFABCMDU0X1mzu3tzW4-WL2qn5ptaDTOShNaTjIwk,8219 +networkx/algorithms/tests/test_vitality.py,sha256=p5lPWCtVMtbvxDw6TJUaf8vpb0zKPoz5pND722xiypQ,1380 +networkx/algorithms/tests/test_voronoi.py,sha256=M4B6JtkJUw56ULEWRs1kyVEUsroNrnb5FBq9OioAyHM,3477 +networkx/algorithms/tests/test_walks.py,sha256=X8cb-YvGHiiqbMEXuKMSdTAb9WtVtbHjIESNSqpJTmU,1499 +networkx/algorithms/tests/test_wiener.py,sha256=uT6Es_OFbJqxnVY7lImzlJSRTQS4eJnMaKotKbObMd8,4186 +networkx/algorithms/threshold.py,sha256=maOMixTxDibvBL2BkVcaYz1Co_09jg8d7DpAW2blNnY,30776 +networkx/algorithms/time_dependent.py,sha256=PAeJ7Yt8kUqbDgvBaz_ZfUFZg-w-vf1gPC0HO6go_TI,5762 +networkx/algorithms/tournament.py,sha256=TyWD0P-8JYXsckulbyr-bkKOoc0gLUHK59R1RQm2We4,11613 +networkx/algorithms/traversal/__init__.py,sha256=YtFrfNjciqTOI6jGePQaJ01tRSEQXTHqTGGNhDEDb_8,142 +networkx/algorithms/traversal/beamsearch.py,sha256=Vn0U4Wck8ICShIAGggv3tVtQWVW0ABEz_hcBsGrql6o,3473 +networkx/algorithms/traversal/breadth_first_search.py,sha256=KGpFnXuCQrDwcQg27Irtrqu7hcIWlFSlVWGpA26XbHA,18259 +networkx/algorithms/traversal/depth_first_search.py,sha256=2V4T3tGujcAtV3W6WcTQUjGAAe3b1rqinONowUhLsa8,16795 +networkx/algorithms/traversal/edgebfs.py,sha256=JJ1mQCmv8WydqQtIVQC1HqkqEFDMeKVzJ_hexwU8sXA,6333 +networkx/algorithms/traversal/edgedfs.py,sha256=T1xHGr8p_eC1pEUyCP-kpaYlIY_la_ms-J0-50c4EDk,6041 +networkx/algorithms/traversal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/traversal/tests/test_beamsearch.py,sha256=bzUcswZ1qo0ecDZYSER_4enbsW6SjTpb_3Nb3fqmkAo,900 +networkx/algorithms/traversal/tests/test_bfs.py,sha256=tGQSiYhc6BxaZEuw-G3ZxVfYLN7q0zrP_BeKl6EfxkU,6447 +networkx/algorithms/traversal/tests/test_dfs.py,sha256=rQMq7GToM6CVU3WwIZfmqa-HrqlmvIT1DryRU7NbU_U,10580 +networkx/algorithms/traversal/tests/test_edgebfs.py,sha256=8oplCu0fct3QipT0JB0-292EA2aOm8zWlMkPedfe6iY,4702 +networkx/algorithms/traversal/tests/test_edgedfs.py,sha256=HGmC3GUYSn9XLMHQpdefdE6g-Uh3KqbmgEEXBcckdYc,4775 +networkx/algorithms/tree/__init__.py,sha256=wOh_v70XEdFHL2qUMje7qkJI5RcjFNrDremJYl4zzkg,182 +networkx/algorithms/tree/branchings.py,sha256=B0c_uKpcnV2SwJMZJRK0BMEz8LkvIcOhv1y0AI0gTnY,34339 +networkx/algorithms/tree/coding.py,sha256=l9P4jhmhg_9uYBVMNSmLYW8btHJH07-Sl2SIKJtO1LA,13466 +networkx/algorithms/tree/decomposition.py,sha256=lY_rqx9JxnLEkp1wiAv0mX62PGPwGQ6SW4Jp48o8aiw,3071 +networkx/algorithms/tree/distance_measures.py,sha256=Dwtk2BRTXmEGjFFtpiAbk02HrX2Hb5kjXusfwn6nm9M,6981 +networkx/algorithms/tree/mst.py,sha256=UMeSnnpKrPeq7a_w_tR415w9TOLltmzuXyIy_HfqbAU,46145 +networkx/algorithms/tree/operations.py,sha256=TjfLQpxwlaLgdOnhLkdzHfZ68yFCYz6BMzzh_d_2mJc,4048 +networkx/algorithms/tree/recognition.py,sha256=bYnaDN0ZaIWTgq0tbPEHAcdxQBWZpDvWypZarBbA334,7569 +networkx/algorithms/tree/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tree/tests/test_branchings.py,sha256=euky-3tFOV5CVV5v_yFawuVGCaqTuTrKtBIkCygwhiA,17731 +networkx/algorithms/tree/tests/test_coding.py,sha256=XC6SbfA2zVGH4FyJJyv6o8eOnBu7FNzNot3SKs7QmEo,3955 +networkx/algorithms/tree/tests/test_decomposition.py,sha256=vnl_xoQzi1LnlZL25vXOZWwvaWmon3-x222OKt4eDqE,1871 +networkx/algorithms/tree/tests/test_distance_measures.py,sha256=WNzqjgDuFLUt_NgjD4R8gGhbYhQ3F5a4HZfBBipunv0,3389 +networkx/algorithms/tree/tests/test_mst.py,sha256=D3o217cjuUtf0TxZ9ks1wqQ4cpwVySoOBYfg0Jmr1AU,32376 +networkx/algorithms/tree/tests/test_operations.py,sha256=ybU96kROTVJRTyjLG7JSJjYlPxaWmYjUVJqbXV5VGGI,1961 +networkx/algorithms/tree/tests/test_recognition.py,sha256=qeMEIvg-j2MqaU-TNIQhCcXxao8vTBy0wjpU7jr2iw8,4521 +networkx/algorithms/triads.py,sha256=FqOtB5fi40k4EoA_-bV1TBwimsRihAqrVdFyNnq0hns,14233 +networkx/algorithms/vitality.py,sha256=8M1cubIydO49El2kwVCURHZ2UwCtfGVFeGS8-JYt1ko,2289 +networkx/algorithms/voronoi.py,sha256=07SnSpxLDz4k6K59Jo-VTNA-Qy5knaHfBC-y_5vAOLQ,3183 +networkx/algorithms/walks.py,sha256=5A611YwX9D8IZYbFcQ4aT3cuKZWHq1QOzCpy4mAM6XE,2312 +networkx/algorithms/wiener.py,sha256=88sTSCXt6gakAmxP4BD_ir1R-_0NOUxXlyZKeElzzvs,9221 +networkx/classes/__init__.py,sha256=Q9oONJrnTFs874SGpwcbV_kyJTDcrLI69GFt99MiE6I,364 +networkx/classes/coreviews.py,sha256=5Dgi8PjMZJz_Uun2SXL5a6mxtAfdbm3kPIdbgzSTXgM,13251 +networkx/classes/digraph.py,sha256=ys8hmrqUjto00TDwBNCwiaNAahFbizNvKkkx84BxS30,48781 +networkx/classes/filters.py,sha256=PCy7BsoIby8VcamqDjZQiNAe_5egI0WKUq-y5nc9unQ,2817 +networkx/classes/function.py,sha256=pPpy-TgC33yNX3nPMZoSjfVmaiiSTbfwYJJMFGa35hQ,43794 +networkx/classes/graph.py,sha256=bY5x7WQHoQxSZow95OUAmU8UZaYV0ZWQvhrEFjSAt0Y,72337 +networkx/classes/graphviews.py,sha256=ulUTLozEK_hj_4TGHdgvxveR2-rb92Q14jjxH4oH4Go,8520 +networkx/classes/multidigraph.py,sha256=qBhb2eoiMejz5fkv-ihLsqwQKKYnUjUANHnrWZKYQfI,37036 +networkx/classes/multigraph.py,sha256=7SD8_ZT7yE1EfIotDzIgITSj4HkKLxZfj6ZnWjQ7hIA,47931 +networkx/classes/reportviews.py,sha256=u0hNZqaWXCfLMP_lq835XCIVStkZQJ9HaQPeDPPoo88,46132 +networkx/classes/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/classes/tests/dispatch_interface.py,sha256=gffiIwS4OZWKIgRkfvghixQds5hY0tPBi86p0lpV7eY,6823 +networkx/classes/tests/historical_tests.py,sha256=oUJSS-QhvInhad9AJCXCCk0x7OzAclIF9RjBSZ6gB58,16291 +networkx/classes/tests/test_coreviews.py,sha256=qzdozzWK8vLag-CAUqrXAM2CZZwMFN5vMu6Tdrwdf-E,12128 +networkx/classes/tests/test_digraph.py,sha256=uw0FuEu3y_YI-PSGuQCRytFpXLF7Eye2fqLJaKbXkBc,12283 +networkx/classes/tests/test_digraph_historical.py,sha256=8TbDcvmLgfllfGomTRStSpT5WIQiTHLt-rS3GyJEItU,3668 +networkx/classes/tests/test_filters.py,sha256=fBLig8z548gsBBlQw6VJdGZb4IcqJj7_0mi2Fd2ncEM,5851 +networkx/classes/tests/test_function.py,sha256=PDtKrB31Ix1-abPc8NzuP-h7HOOIRtkL5bvcOE8mshU,35481 +networkx/classes/tests/test_graph.py,sha256=ST12bvoj72uzUDpnyVz-Zx0mlXV2CegcEg3txT5qS6k,31798 +networkx/classes/tests/test_graph_historical.py,sha256=EVAndjelyyZ9laQCTrj6KR6lhdmG8Y1raP2IrxkZxdA,258 +networkx/classes/tests/test_graphviews.py,sha256=JoTS_ZY7u4dBHEttD55WT1pD47FcvELBEzw-uJQiqv4,11496 +networkx/classes/tests/test_multidigraph.py,sha256=ryTKegCoYixXbAqOn3mIt9vSMb5666Dv-pfMkXEjoUE,16342 +networkx/classes/tests/test_multigraph.py,sha256=0vFQO3RCJaBpzXvnQzdWa_qYLHNo_I9DICYhPZJNUMk,18777 +networkx/classes/tests/test_reportviews.py,sha256=MgP9_cKlw4aTU5m1XbsNTM3ZjmO7roOOf-7x-Hn9xKs,41332 +networkx/classes/tests/test_special.py,sha256=wl-Idzdc55eOQ4Tn5SNqOG21AjwLb2TTp9x102Qx3Ms,4053 +networkx/classes/tests/test_subgraphviews.py,sha256=iIMkbHdtJsgDlcdphhE_uUTJ0VGTGFzKIVvMd7oamoU,13669 +networkx/conftest.py,sha256=5-BwqVMgfaZyRZICFpG1plLMJ2G1IchSQVEuRjoBtyc,8041 +networkx/convert.py,sha256=Xmkhacuyw9CDviBTZPlH6nmD-JabSk0aPcVmGgp4LeY,16143 +networkx/convert_matrix.py,sha256=9BXSEXzCEIte1nx8f35cWhXo-EVYjOkuW4H3Jutq-oE,45342 +networkx/drawing/__init__.py,sha256=rnTFNzLc4fis1hTAEpnWTC80neAR88-llVQ-LObN-i4,160 +networkx/drawing/layout.py,sha256=5MUr2NTp4L3IbOShTb_m7fD4SH1HPZKJPJ4G1mJC3Js,66346 +networkx/drawing/nx_agraph.py,sha256=4Vd8JQH4BQ7BaRO8TmYlqBKmFPm8e2XcDDJiqFkNMxM,14248 +networkx/drawing/nx_latex.py,sha256=PySHixo2YhGV0xbKLroaX-_XcI1L5u8IF8nP6xdV2NU,24846 +networkx/drawing/nx_pydot.py,sha256=9KbNVrVcU5gFmo1GPpB_UMBXcD1tnBYDBzxHvgg6mOs,9992 +networkx/drawing/nx_pylab.py,sha256=MHoDvYE4FO9kJRogqeB1FfqrStyLcjQTq3fr67vAjOE,103488 +networkx/drawing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/drawing/tests/baseline/test_display_complex.png,sha256=3YPN6JEuyq1KEpRgSnH-2yECI81frU_5fw0VxHlwo9A,142315 +networkx/drawing/tests/baseline/test_display_empty_graph.png,sha256=XsOkBCZlt-55c40ElwEapBENVfmdc4TZKIF1ufs5Dzk,3257 +networkx/drawing/tests/baseline/test_display_house_with_colors.png,sha256=uiEx6_2RddHG-516OsRVM0AG7M5CGiAo4tpDgbgPKIU,26002 +networkx/drawing/tests/baseline/test_display_labels_and_colors.png,sha256=RfA1OtWYKttP5_LDScIcx8MFbC-_5wljXeDsZAqAqDU,62571 +networkx/drawing/tests/baseline/test_display_shortest_path.png,sha256=6W7EMitF8jojJ8epmaYEWpjhYRlzBlL7QV98MStxvvQ,35264 +networkx/drawing/tests/baseline/test_house_with_colors.png,sha256=FQi9pIRFwjq4gvgB8cDdBHL5euQUJFw6sQlABf2kRVo,21918 +networkx/drawing/tests/test_agraph.py,sha256=5cbqLGEgJmAocWOMSikv_9ykRWfBymjGfalvKbR1_KE,8725 +networkx/drawing/tests/test_image_comparison_pylab_mpl.py,sha256=2UtGMJFpcWiG0sQtXIYT0j03Q2LhGqWM2G-pfKY3Sw0,6340 +networkx/drawing/tests/test_latex.py,sha256=YQjLxsZnx7_CX2sJ3nN0cUiJzC7p6UhyFN6ufbox5G0,8621 +networkx/drawing/tests/test_layout.py,sha256=2_wJfBcCcHwBm1JRrQiLMcnNWoaURX6F4LFcn5OPul4,23709 +networkx/drawing/tests/test_pydot.py,sha256=X9b66gWqMgdTEyRJ7Zmy5kL9cr22waI688K9BJUf4Bk,4973 +networkx/drawing/tests/test_pylab.py,sha256=TMGpM6IKm5vTSu9Dbfm8mvyhMqz2aAu1gafGbLiOU1k,54825 +networkx/exception.py,sha256=hC8efPfIzOFo0jiWiQbTPaNKuNTuUwhp9RPw--pdv4U,3787 +networkx/generators/__init__.py,sha256=EoYB5c5ZE4rsNKZvl1TRQy2Vo2D3T2H-YunyD2i6sa0,1366 +networkx/generators/atlas.dat.gz,sha256=c_xBbfAWSSNgd1HLdZ9K6B3rX2VQvyW-Wcht47dH5B0,8887 +networkx/generators/atlas.py,sha256=cbki_oYDi_7vniSesqJZ-PCEM3BvnzEdjSc3TA3s1xc,6955 +networkx/generators/classic.py,sha256=Wzey-Pl6Rm-ELYyg2lIb0lFZAxywRr5iQxv7zo3d9Bk,32658 +networkx/generators/cographs.py,sha256=-WR4_yrNk_X5nj7egb7A22eKPVymOdIYM-IftSRH4WA,1891 +networkx/generators/community.py,sha256=_p_4OfItbg8nS0b3EvojCXZ8cESdC-0Gj67V5w2veuM,34911 +networkx/generators/degree_seq.py,sha256=Hxv1rjY2NS8qmnvSWh4rcvublDZzor3D3IiB2OAFXoE,30493 +networkx/generators/directed.py,sha256=rtgf2Imf34SogrWA0BNgDltlA5kUv_l2NkxKPcJgGtQ,18149 +networkx/generators/duplication.py,sha256=hmYAHJBez7WlfdVGGa288JFUBHoIUdVqEGCodApKOr4,5831 +networkx/generators/ego.py,sha256=ylW-UMvg9C8cqygw9vrBNLGAVZJT4U5EWbU4gaT_H1g,1894 +networkx/generators/expanders.py,sha256=gJioNdyhBOnNcjh0qC-kfbFs1SEMU5_o9GHy85pdJXk,15184 +networkx/generators/geometric.py,sha256=6slgS0vraxql67AOP9lWzBrpNbx_OBZCoBwYuSkQQXg,39461 +networkx/generators/harary_graph.py,sha256=N2_b49xrET0bjZ4C7sgcG2xLv64ia9xjvJ2NB75CAz8,4950 +networkx/generators/internet_as_graphs.py,sha256=RuhEW9rQ8f5VzeIyZe6GqSWgxKtMVuwqnzv0llW3FeY,14226 +networkx/generators/intersection.py,sha256=EFm0AOjnqyp8KcT7kGWqANq-_vq9kQ0d_0DzVyQyP-o,4101 +networkx/generators/interval_graph.py,sha256=ZTmdgQbBx3M6sysGWXbGyngYYOC1TAXD3Ozkw4deQFw,2204 +networkx/generators/joint_degree_seq.py,sha256=nyp86NC_4XvzvwpwwzKrrCSz1i_4bESSDtVjWvpkWFg,24773 +networkx/generators/lattice.py,sha256=5iujIY5pF_VhbWEE2osM5i7uuYqs5Hv1wS33WGPMn_o,14740 +networkx/generators/line.py,sha256=DDQZvD2Y2c-4kc_xTuUWpmmViLftTSGKFukgqSiyI_c,17540 +networkx/generators/mycielski.py,sha256=xBX2m77sCzumoH5cAGitksvEEW-ocbCnbdaN7fKUtVk,3314 +networkx/generators/nonisomorphic_trees.py,sha256=62FIt4Qx-giwDflNKP5BrDoCv1HbQmlp6oVGjqGeRlE,6993 +networkx/generators/random_clustered.py,sha256=i_NdvvchHvsvbwgQtoWSY_pLwvhO9Lh02MSZXzgGb7c,4183 +networkx/generators/random_graphs.py,sha256=WIJ4v8PSNpHePclSCQ3IcLWwcipZJ2Cn9pNx6XenBEo,51669 +networkx/generators/small.py,sha256=duBVJgz4f2TByU4SdyRGHw7YyLHrlrr0UFw6spVTCbo,30967 +networkx/generators/social.py,sha256=brzAScpAxWGRExE9J7FuiJ29QZxBm8bXpRgCc0IoHP0,23416 +networkx/generators/spectral_graph_forge.py,sha256=9r6d9f5Y03KPX138sQTKWG3_RjeiYQVUJJWwCHqG4MA,4282 +networkx/generators/stochastic.py,sha256=Qg9vWm9EOug2OQVIHL_dZ5HrXc16lxnWyzX52KWNEPI,1981 +networkx/generators/sudoku.py,sha256=kLM2AP0H4966uYiNO1oAFEmv5qBftU_bOfYucRxexM0,4288 +networkx/generators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/generators/tests/test_atlas.py,sha256=nwXJL4O5jUqhTwqhkPxHY8s3KXHQTDEdsfbg4MsSzVQ,2530 +networkx/generators/tests/test_classic.py,sha256=eRtftt3fcr5kYJI8PffuY3tWOnn4u0yJZwKav1bZACU,24400 +networkx/generators/tests/test_cographs.py,sha256=v2fYoolmfwfCpJZRKRtH9YKyWcuJkaPOQUpDK9DMsn0,553 +networkx/generators/tests/test_community.py,sha256=FGcDo3Ajb-yYc5kUkFbVfOJVMG-YppbAtjgBPcVzjLc,11311 +networkx/generators/tests/test_degree_seq.py,sha256=V_X-Mc0H2sqEy7OTJb4yY9khsi-9HD-DIA_fvngW-VE,7284 +networkx/generators/tests/test_directed.py,sha256=e8C7AF2whjaqdxga7WSvCrtIx7ByBG_JyJ9gYqJ6HBA,6020 +networkx/generators/tests/test_duplication.py,sha256=UdIGDF_fishanWid1xO_aH4NDfie8xpIqd26qndhOqI,3155 +networkx/generators/tests/test_ego.py,sha256=8v1Qjmkli9wIhhUuqzgqCzysr0C1Z2C3oJMCUoNvgY4,1327 +networkx/generators/tests/test_expanders.py,sha256=Cf0cCJOplngPlN23OVqDcrny2QZlrYMrrWHeIi0APaA,6363 +networkx/generators/tests/test_geometric.py,sha256=gnVm4dam_Er88YwaNpNZC6mjJjfgwMYhyLOtU9oPn1o,18087 +networkx/generators/tests/test_harary_graph.py,sha256=GiX5LXXJaNxzjvd-Nyw_QuARzbFGkA6zE1R1eX8mclw,4936 +networkx/generators/tests/test_internet_as_graphs.py,sha256=3et6aawAPZ9bPNWqADJ3bQN_2KhyqPf8VFgrBAkgABQ,8514 +networkx/generators/tests/test_intersection.py,sha256=hcIit5fKfOn3VjMhz9KqovZK9tzxZfmC6ezvA7gZAvM,819 +networkx/generators/tests/test_interval_graph.py,sha256=JYMi-QMkJQdBU9uOdfm0Xr6MEYqIbhU5oSDa6D3tSb0,4277 +networkx/generators/tests/test_joint_degree_seq.py,sha256=8TXTZI3Um2gBXtP-4yhGKf9vCi78-NVmWZw9r9WG3F8,4270 +networkx/generators/tests/test_lattice.py,sha256=iYB2F7j1Qt3QAHkhGnGKcgiRNB1Gq9MxtE3152XdS_A,10102 +networkx/generators/tests/test_line.py,sha256=Dt1R1Hnm47aq_8FX9gRFdby0sV3Oxyap1R5Z4eK0A5o,10516 +networkx/generators/tests/test_mycielski.py,sha256=fwZLO1ybcltRy6TzCel8tPBil1oZWv9QSXs779H6Xt0,946 +networkx/generators/tests/test_nonisomorphic_trees.py,sha256=KFRwMfx-x_a9oGvPlhtMlWGk__rKgm77Gk4LtbxFrZE,2945 +networkx/generators/tests/test_random_clustered.py,sha256=SalHqWvpnXA3QrDRMjLx15dk2c4Us8Ck52clUERoUI8,1297 +networkx/generators/tests/test_random_graphs.py,sha256=R5dHYuzAbcIBduoP7PUdoV-6CFauqk5r1Ak1Slwlsho,19565 +networkx/generators/tests/test_small.py,sha256=y9rZHpwQG4Bjp-sHJp18rM3A0uEGOUkbklqlJec11ao,8074 +networkx/generators/tests/test_spectral_graph_forge.py,sha256=x4jyTiQiydaUPWYaGsNFsIB47PAzSSwQYCNXGa2B4SU,1594 +networkx/generators/tests/test_stochastic.py,sha256=f-5KD3RpoQf369gXHH7KGebE19g5lCkXR_alcwmFm_s,2179 +networkx/generators/tests/test_sudoku.py,sha256=dgOmk-B7MxCVkbHdZzsLZppQ61FAArVy4McSVL8Afzo,1968 +networkx/generators/tests/test_time_series.py,sha256=rgmFcitlKa_kF6TzJ2ze91lSmNJlqjhvgrYet0AUZx8,2230 +networkx/generators/tests/test_trees.py,sha256=Pvh0MvTKaRuZuwWL-wpJIC0zlBAcnTirpSLJi-9c7qc,7006 +networkx/generators/tests/test_triads.py,sha256=K8anVEP8R90Y172IrKIOrYRWRJBGeqxNqU9isX7Ybxs,333 +networkx/generators/time_series.py,sha256=_DMiY9X95O_9sK2BSeeTb2yMWfStBwKFWwn6FUOXN4Q,2439 +networkx/generators/trees.py,sha256=KvKYhZwwSl1KKMyKBe_6G_UTpLwLLeqdWwRkWJ2Xxz4,36517 +networkx/generators/triads.py,sha256=7kScTf3ITDi3qsSa-IvGMpa9diEaFwQnRuIf3Tv4UBI,2452 +networkx/lazy_imports.py,sha256=pZWn3f6NCY-lGtViKQh11NJ5Cn-cXBKhvacMycg3844,5764 +networkx/linalg/__init__.py,sha256=7iyNZ_YYBnlsW8zSfhUgvEkywOrUWfpIuyS86ZOKlG8,568 +networkx/linalg/algebraicconnectivity.py,sha256=qAjHk8Ew2av9HM-Sevf99alKHWkeo4R76U-1SFn2so8,20790 +networkx/linalg/attrmatrix.py,sha256=gG2cZZm4N7PQhXuq5nxZq1S_OmW61641rdRnrDiX_ws,15900 +networkx/linalg/bethehessianmatrix.py,sha256=Q5dJZOMopwWnvpSdebfo9QSQABEmLy-z1_hGCWsopDo,2513 +networkx/linalg/graphmatrix.py,sha256=NIs2uWGS_8lJJ5IQ8Og9aIWHawghtlCDWifqOIKV2-c,5623 +networkx/linalg/laplacianmatrix.py,sha256=YrzwLTLoGIisJVS2NLaD9g4fA3VDKzlYo1u3yOCVrF4,16722 +networkx/linalg/modularitymatrix.py,sha256=R_VITtgIkGenxlsCLN4u6CYxj3_HiPXfeU29yarntRo,4706 +networkx/linalg/spectrum.py,sha256=aRY7ApYv5HxrO_4O8brxpZRw3SJU3fYzlgMwhEIXcrc,4215 +networkx/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/linalg/tests/test_algebraic_connectivity.py,sha256=uXq7h1fCmvcUyJhh3DujyZvJH2SGlgo-JRdLbff3bKI,13735 +networkx/linalg/tests/test_attrmatrix.py,sha256=I8p_ME5Cq__SZJoxla1y5ynGaa_b4UruZRcuzPOh_wE,2833 +networkx/linalg/tests/test_bethehessian.py,sha256=WQM3H_y1GWrtM2B659HYQ2vvYGs-4H9V1qeo3Bx75B4,1268 +networkx/linalg/tests/test_graphmatrix.py,sha256=6y-DSezX5slSms8LtEJcoggI4wP_duBqodNYWKKvM4w,8655 +networkx/linalg/tests/test_laplacian.py,sha256=sIJ64KVVCUcK3b4ohF-443222vR64myIlV6addwwfWs,13953 +networkx/linalg/tests/test_modularity.py,sha256=Xs7iaUr_QqcxG0t0xHx54iYbM2VyQNmFeANovTMJomE,3056 +networkx/linalg/tests/test_spectrum.py,sha256=Hrl3FE0Z0u4urwY5eNMx0VcH4IRTYgudT04EmfhkosA,2769 +networkx/readwrite/__init__.py,sha256=TvSbnGEHQ5F9CY2tkpjWYOyrUj6BeW3sc6P4_IczbKA,561 +networkx/readwrite/adjlist.py,sha256=XkmtOO-aP-BGLoIjMoSXEOhCIEQz1TJcSrhMHGPCJTc,9050 +networkx/readwrite/edgelist.py,sha256=XwemCOs-xNet3mVIGpxol3RgFVJVa1iXsgBonAHDmug,14292 +networkx/readwrite/gexf.py,sha256=B5rj7Ra1XHSZR4qO5DRJMAEgWwQfiL0IKlE85fzQZ5Q,40438 +networkx/readwrite/gml.py,sha256=JcP0hsDKwpZOPzZeXdoEiKBBNd94Ox8y7_cjT5AVBT4,31193 +networkx/readwrite/graph6.py,sha256=dED6yWelHARFvyKniX8xSTtqj1-asDs1vUBWuZOG9cA,11684 +networkx/readwrite/graphml.py,sha256=sb-D5I5gmqyNj_TSNUvLn-96NBXtOAPFMEKvT3mkdTw,39326 +networkx/readwrite/json_graph/__init__.py,sha256=37XJPMmilcwwo8KqouLWUly7Yv5tZ7IKraMHbBRx3fI,677 +networkx/readwrite/json_graph/adjacency.py,sha256=WM6fdncV87WDLPOfF-IbOlOOBMX0utUjJ09UsxtwRAo,4716 +networkx/readwrite/json_graph/cytoscape.py,sha256=NaHGbwLuQCx_GB5R7Uag2ACKSmrpCFQKsRk3y9hE6Io,5841 +networkx/readwrite/json_graph/node_link.py,sha256=9hr4sk2fIHYDPuwSyyGNLNKDhahf6aFAEsIRhd5V890,7850 +networkx/readwrite/json_graph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/json_graph/tests/test_adjacency.py,sha256=jueQE3Z_W5BZuCjr0hEsOWSfoQ2fP51p0o0m7IcXUuE,2456 +networkx/readwrite/json_graph/tests/test_cytoscape.py,sha256=vFoDzcSRI9THlmp4Fu2HHhIF9AUmECWs5mftVWjaWWs,2044 +networkx/readwrite/json_graph/tests/test_node_link.py,sha256=3RbMNRlLBLN86EG4YyCvNmA3ISzmECqr17EqTcov3kI,3364 +networkx/readwrite/json_graph/tests/test_tree.py,sha256=zBXv3_db2XGxFs3XQ35btNf_ku52aLXXiHZmmX4ixAs,1352 +networkx/readwrite/json_graph/tree.py,sha256=K4rF4Kds4g0JhgcPTrrR_I3Pswpze8yCVH4M-WF9nn0,3851 +networkx/readwrite/leda.py,sha256=mtuIRKecfykiInePt-oIw91JB1vIgNXuUEbuBInO7Hs,2852 +networkx/readwrite/multiline_adjlist.py,sha256=TyvVZih3DIYaCQ6MC7XF8GuGBfcYIX-q-ccYu2Qs05M,11384 +networkx/readwrite/p2g.py,sha256=wDPg_3u42rlsv8pFtVLbZIjo-BN6dd95jCxFAoEC_6E,3253 +networkx/readwrite/pajek.py,sha256=0Xli27YbjyW5Ny6-qqVOtBmhZkPj4i3cJPvwjU3mMjM,8746 +networkx/readwrite/sparse6.py,sha256=kGEbDmnK1qcR5I6LZnPKYXLe7Qp6esT6gftkwSt1xcs,10442 +networkx/readwrite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/tests/test_adjlist.py,sha256=aCDyLpv8cekWwEZgv5rB4gtwUvor7_YDArTYvL2ngPs,12756 +networkx/readwrite/tests/test_edgelist.py,sha256=5_rZ5rI_o8VDJr-R4besR-XcPwKY8-dHn4VmrrlBorc,10190 +networkx/readwrite/tests/test_gexf.py,sha256=TJLCj0ULSewUt5PlqEuoD8yhZoc6jFPqElIyQYhoKz0,21327 +networkx/readwrite/tests/test_gml.py,sha256=SIuCCSoH9psZdjKkrJfdGHbMROhUsfms70B71rnozUU,21399 +networkx/readwrite/tests/test_graph6.py,sha256=WMQmTgrI_VDcli1Gl5cjM17EFKc8_AkwoR14FLS1sXQ,6559 +networkx/readwrite/tests/test_graphml.py,sha256=RHZqs_-0Vw_NboMK1Sq65Nmk_XPAjfvnsgiPCptnpnI,67633 +networkx/readwrite/tests/test_leda.py,sha256=_5F4nLLQ1oAZQMZtTQoFncZL0Oc-IsztFBglEdQeH3k,1392 +networkx/readwrite/tests/test_p2g.py,sha256=0kvtohQuM2zjw88ogoDMhVbFcm85R3Sk2_yFdQ7TcWg,1319 +networkx/readwrite/tests/test_pajek.py,sha256=VxMk_TXQE1R-2t2B2OTSkwvPepKNYwExDbdlTDb0SOM,4696 +networkx/readwrite/tests/test_sparse6.py,sha256=zpd0hUPtOqwAJiuke4FXWFaQVa6Fa47dgIdHpCWXQ6U,5288 +networkx/readwrite/tests/test_text.py,sha256=x1N97hD31HPkj9Wn2PYti5-gcwaFNnStkaN_38HKnIg,55319 +networkx/readwrite/text.py,sha256=u62a-zG_7NpJD4Bh6kBZbUntORVM4XZId9zGX9wJBGQ,29140 +networkx/relabel.py,sha256=0HptAQOBToKhLZzxscd6FQpzVCNMlYmiHjHul69ct8o,10300 +networkx/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/tests/test_all_random_functions.py,sha256=T6bYvfqCmBfqOHAAJAqCv95VceTqdT32cN740HZTWH0,8549 +networkx/tests/test_convert.py,sha256=SoIVrqJFF9Gu9Jff_apfbpqg8QhkfC6QW4qzoSM-ukM,12731 +networkx/tests/test_convert_numpy.py,sha256=wivMBF5pmDd2FdrHX72J4Xt26IxTIfb12G-t-TWch40,19032 +networkx/tests/test_convert_pandas.py,sha256=VXumRYNIV2jdr4dfDsRidqpUDXHlgbhAFhaiqWlcx6s,13361 +networkx/tests/test_convert_scipy.py,sha256=IO9Xfke5Vk0SC4c7UR4YbQMjVZBuADQBt_wlMaOxTSQ,10381 +networkx/tests/test_exceptions.py,sha256=XYkpPzqMepSw3MPRUJN5LcFsUsy3YT_fiRDhm0OeAeQ,927 +networkx/tests/test_import.py,sha256=Gm4ujfH9JkQtDrSjOlwXXXUuubI057wskKLCkF6Z92k,220 +networkx/tests/test_lazy_imports.py,sha256=PylxEvRtD8ekwH-1M1sjnV_4uyhCYQl4WvlyF6ah0wo,2675 +networkx/tests/test_relabel.py,sha256=2qua3Z45m9mXkBQYmD82h17-pK6fYHJLpntn6ILj9cg,14554 +networkx/tests/test_removed_functions_exception_messages.py,sha256=V6AyU-RHQmilakEMJ1kpK4HSgzKbmimIxu0pBQ1DVaY,177 +networkx/utils/__init__.py,sha256=7pxleRNpBWuL3FEQz3CzKLn17b6_eSwkM7dqnL1okDk,302 +networkx/utils/backends.py,sha256=IDXNJjBWOfcqPm3Any5-kfwU9hZTH33R16gr3vZ6wnE,96862 +networkx/utils/configs.py,sha256=1LFNHgkR6CdXxTpNT8j4QdQ5vn5r7BTzPlxFYrEm7_E,15440 +networkx/utils/decorators.py,sha256=rRgAZw6glxOBYN6n5kkbQ7RS43EH3eVbJ5F91-uk0IM,44713 +networkx/utils/heaps.py,sha256=98tdylWKqrLQKdNlpjT2NvIpuOF2mT2Lhh5OzVg3Hu8,10355 +networkx/utils/mapped_queue.py,sha256=WdIRk27D_ArmPs9tdpvQLQCV4Tmus212BQhxsFIMYgk,10184 +networkx/utils/misc.py,sha256=zifBxwKM3msQsmMVg5vAMDo84ZPByU_yEig0VPS0iek,22844 +networkx/utils/random_sequence.py,sha256=kk3F8_5cazolIeSL_eDdZzQ15aB3b5lHazef9kM1NNw,5303 +networkx/utils/rcm.py,sha256=t2LJq6BG39JnPl_353nM8eDfGWy3LVL8BdrBZCcAy2g,4618 +networkx/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/utils/tests/test__init.py,sha256=QE0i-lNE4pG2eYjB2mZ0uw7jPD-7TdL7Y9p73JoWQmo,363 +networkx/utils/tests/test_backends.py,sha256=VzzgmdT_ymvQ7EKB4BbdntkBYaO5w6OgXzvr7pbgQsY,7784 +networkx/utils/tests/test_config.py,sha256=eBAiwADWJgCAIyJjgF3-zn89VPIGftiWMWyccpFnyMU,8717 +networkx/utils/tests/test_decorators.py,sha256=dm3b5yiQPlnlT_4pSm0FwK-xBGV9dcnhv14Vh9Jiz1o,14050 +networkx/utils/tests/test_heaps.py,sha256=qCuWMzpcMH1Gwu014CAams78o151QD5YL0mB1fz16Yw,3711 +networkx/utils/tests/test_mapped_queue.py,sha256=l1Nguzz68Fv91FnAT7y7B0GXSoje9uoWiObHo7TliGM,7354 +networkx/utils/tests/test_misc.py,sha256=3rKc5wy3qAwk1JdVZYD8FaTF4_4Y1_Tq1nbza6eR_jc,13965 +networkx/utils/tests/test_random_sequence.py,sha256=yvfWhJQJJqM1W-_etHo7BsLXA18YiHcLaF-IdhSwcYA,1502 +networkx/utils/tests/test_rcm.py,sha256=UvUAkgmQMGk_Nn94TJyQsle4A5SLQFqMQWld1tiQ2lk,1421 +networkx/utils/tests/test_unionfind.py,sha256=j-DF5XyeJzq1hoeAgN5Nye2Au7EPD040t8oS4Aw2IwU,1579 +networkx/utils/union_find.py,sha256=NxKlBlyS71A1Wlnt28L-wyZoI9ExZvJth_0e2XSVris,3338 diff --git a/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/WHEEL b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e7fa31b6f3f78deb1022c1f7927f07d4d16da822 --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/entry_points.txt b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..377282a2e4c3dc8b924035d0dfd32d7ae50920c8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[networkx.backends] +nx_loopback = networkx.classes.tests.dispatch_interface:backend_interface diff --git a/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d07dfe2f85d6849d7f416dcce756b2501ba847e --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx-3.6.1.dist-info/top_level.txt @@ -0,0 +1 @@ +networkx diff --git a/py311/lib/python3.11/site-packages/networkx/__init__.py b/py311/lib/python3.11/site-packages/networkx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b975a1ff66b1b1e87de65a9966dafdb09cb00031 --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx/__init__.py @@ -0,0 +1,62 @@ +""" +NetworkX +======== + +NetworkX is a Python package for the creation, manipulation, and study of the +structure, dynamics, and functions of complex networks. + +See https://networkx.org for complete documentation. +""" + +__version__ = "3.6.1" + + +# These are imported in order as listed +from networkx.lazy_imports import _lazy_import + +from networkx.exception import * + +from networkx import utils +from networkx.utils import _clear_cache, _dispatchable + +# load_and_call entry_points, set configs +config = utils.backends._set_configs_from_environment() +utils.config = utils.configs.config = config # type: ignore[attr-defined] + +from networkx import classes +from networkx.classes import filters +from networkx.classes import * + +from networkx import convert +from networkx.convert import * + +from networkx import convert_matrix +from networkx.convert_matrix import * + +from networkx import relabel +from networkx.relabel import * + +from networkx import generators +from networkx.generators import * + +from networkx import readwrite +from networkx.readwrite import * + +# Need to test with SciPy, when available +from networkx import algorithms +from networkx.algorithms import * + +from networkx import linalg +from networkx.linalg import * + +from networkx import drawing +from networkx.drawing import * + + +def __getattr__(name): + if name == "random_tree": + raise AttributeError( + "nx.random_tree was removed in version 3.4. Use `nx.random_labeled_tree` instead.\n" + "See: https://networkx.org/documentation/latest/release/release_3.4.html" + ) + raise AttributeError(f"module 'networkx' has no attribute '{name}'") diff --git a/py311/lib/python3.11/site-packages/networkx/conftest.py b/py311/lib/python3.11/site-packages/networkx/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..186b33bb7039f202656b5072c2452b369f1cae8a --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx/conftest.py @@ -0,0 +1,261 @@ +""" +Testing +======= + +General guidelines for writing good tests: + +- doctests always assume ``import networkx as nx`` so don't add that +- prefer pytest fixtures over classes with setup methods. +- use the ``@pytest.mark.parametrize`` decorator +- use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy. + and add the module to the relevant entries below. + +""" + +import os +import warnings +from importlib.metadata import entry_points + +import pytest + +import networkx as nx + + +def pytest_addoption(parser): + parser.addoption( + "--runslow", action="store_true", default=False, help="run slow tests" + ) + parser.addoption( + "--backend", + action="store", + default=None, + help="Run tests with a backend by auto-converting nx graphs to backend graphs", + ) + parser.addoption( + "--fallback-to-nx", + action="store_true", + default=False, + help="Run nx function if a backend doesn't implement a dispatchable function" + " (use with --backend)", + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "slow: mark test as slow to run") + backend = config.getoption("--backend") + if backend is None: + backend = os.environ.get("NETWORKX_TEST_BACKEND") + # nx_loopback backend is only available when testing with a backend + loopback_ep = entry_points(name="nx_loopback", group="networkx.backends") + if not loopback_ep: + warnings.warn( + "\n\n WARNING: Mixed NetworkX configuration! \n\n" + " This environment has mixed configuration for networkx.\n" + " The test object nx_loopback is not configured correctly.\n" + " You should not be seeing this message.\n" + " Try `pip install -e .`, or change your PYTHONPATH\n" + " Make sure python finds the networkx repo you are testing\n\n" + ) + config.backend = backend + if backend: + # We will update `networkx.config.backend_priority` below in `*_modify_items` + # to allow tests to get set up with normal networkx graphs. + nx.utils.backends.backends["nx_loopback"] = loopback_ep["nx_loopback"] + nx.utils.backends.backend_info["nx_loopback"] = {} + nx.config.backends = nx.utils.Config( + nx_loopback=nx.utils.Config(), + **nx.config.backends, + ) + fallback_to_nx = config.getoption("--fallback-to-nx") + if not fallback_to_nx: + fallback_to_nx = os.environ.get("NETWORKX_FALLBACK_TO_NX") + nx.config.fallback_to_nx = bool(fallback_to_nx) + nx.utils.backends._dispatchable.__call__ = ( + nx.utils.backends._dispatchable._call_if_any_backends_installed + ) + + +def pytest_collection_modifyitems(config, items): + # Setting this to True here allows tests to be set up before dispatching + # any function call to a backend. + if config.backend: + # Allow pluggable backends to add markers to tests (such as skip or xfail) + # when running in auto-conversion test mode + backend_name = config.backend + if backend_name != "networkx": + nx.utils.backends._dispatchable._is_testing = True + nx.config.backend_priority.algos = [backend_name] + nx.config.backend_priority.generators = [backend_name] + backend = nx.utils.backends.backends[backend_name].load() + if hasattr(backend, "on_start_tests"): + getattr(backend, "on_start_tests")(items) + + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) + + +# TODO: The warnings below need to be dealt with, but for now we silence them. +@pytest.fixture(autouse=True) +def set_warnings(): + warnings.filterwarnings( + "ignore", + category=UserWarning, + message=r"Exited (at iteration \d+|postprocessing) with accuracies.*", + ) + warnings.filterwarnings( + "ignore", + category=UserWarning, + message=r"The hashes produced for ", + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\n\nThe `normalized`" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="maybe_regular_expander" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="metric_closure is deprecated" + ) + + +@pytest.fixture(autouse=True) +def add_nx(doctest_namespace): + doctest_namespace["nx"] = nx + + +# What dependencies are installed? + +try: + import numpy as np + + has_numpy = True +except ImportError: + has_numpy = False + +try: + import scipy as sp + + has_scipy = True +except ImportError: + has_scipy = False + +try: + import matplotlib as mpl + + has_matplotlib = True +except ImportError: + has_matplotlib = False + +try: + import pandas as pd + + has_pandas = True +except ImportError: + has_pandas = False + +try: + import pygraphviz + + has_pygraphviz = True +except ImportError: + has_pygraphviz = False + +try: + import pydot + + has_pydot = True +except ImportError: + has_pydot = False + +try: + import sympy + + has_sympy = True +except ImportError: + has_sympy = False + + +# List of files that pytest should ignore + +collect_ignore = [] + +needs_numpy = [ + "algorithms/approximation/traveling_salesman.py", + "algorithms/centrality/current_flow_closeness.py", + "algorithms/centrality/laplacian.py", + "algorithms/node_classification.py", + "algorithms/non_randomness.py", + "algorithms/polynomials.py", + "algorithms/shortest_paths/dense.py", + "algorithms/tree/mst.py", + "drawing/nx_latex.py", + "generators/expanders.py", + "linalg/bethehessianmatrix.py", + "linalg/laplacianmatrix.py", + "utils/misc.py", +] +needs_scipy = [ + "algorithms/approximation/traveling_salesman.py", + "algorithms/assortativity/correlation.py", + "algorithms/assortativity/mixing.py", + "algorithms/assortativity/pairs.py", + "algorithms/bipartite/matrix.py", + "algorithms/bipartite/spectral.py", + "algorithms/bipartite/link_analysis.py", + "algorithms/centrality/current_flow_betweenness.py", + "algorithms/centrality/current_flow_betweenness_subset.py", + "algorithms/centrality/eigenvector.py", + "algorithms/centrality/katz.py", + "algorithms/centrality/laplacian.py", + "algorithms/centrality/second_order.py", + "algorithms/centrality/subgraph_alg.py", + "algorithms/communicability_alg.py", + "algorithms/community/divisive.py", + "algorithms/community/bipartitions.py", + "algorithms/distance_measures.py", + "algorithms/link_analysis/hits_alg.py", + "algorithms/link_analysis/pagerank_alg.py", + "algorithms/node_classification.py", + "algorithms/similarity.py", + "algorithms/tree/mst.py", + "algorithms/walks.py", + "convert_matrix.py", + "drawing/layout.py", + "drawing/nx_pylab.py", + "generators/spectral_graph_forge.py", + "generators/geometric.py", + "generators/expanders.py", + "linalg/algebraicconnectivity.py", + "linalg/attrmatrix.py", + "linalg/bethehessianmatrix.py", + "linalg/graphmatrix.py", + "linalg/laplacianmatrix.py", + "linalg/modularitymatrix.py", + "linalg/spectrum.py", + "utils/rcm.py", +] +needs_matplotlib = ["drawing/nx_pylab.py", "generators/classic.py"] +needs_pandas = ["convert_matrix.py"] +needs_pygraphviz = ["drawing/nx_agraph.py"] +needs_pydot = ["drawing/nx_pydot.py"] +needs_sympy = ["algorithms/polynomials.py"] + +if not has_numpy: + collect_ignore += needs_numpy +if not has_scipy: + collect_ignore += needs_scipy +if not has_matplotlib: + collect_ignore += needs_matplotlib +if not has_pandas: + collect_ignore += needs_pandas +if not has_pygraphviz: + collect_ignore += needs_pygraphviz +if not has_pydot: + collect_ignore += needs_pydot +if not has_sympy: + collect_ignore += needs_sympy diff --git a/py311/lib/python3.11/site-packages/networkx/convert.py b/py311/lib/python3.11/site-packages/networkx/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..bf0a502a271f04594d7384f9eb7c4e6cf92f4edb --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx/convert.py @@ -0,0 +1,502 @@ +"""Functions to convert NetworkX graphs to and from other formats. + +The preferred way of converting data to a NetworkX graph is through the +graph constructor. The constructor calls the to_networkx_graph() function +which attempts to guess the input type and convert it automatically. + +Examples +-------- +Create a graph with a single edge from a dictionary of dictionaries + +>>> d = {0: {1: 1}} # dict-of-dicts single edge (0,1) +>>> G = nx.Graph(d) + +See Also +-------- +nx_agraph, nx_pydot +""" + +from collections.abc import Collection, Generator, Iterator + +import networkx as nx + +__all__ = [ + "to_networkx_graph", + "from_dict_of_dicts", + "to_dict_of_dicts", + "from_dict_of_lists", + "to_dict_of_lists", + "from_edgelist", + "to_edgelist", +] + + +def to_networkx_graph(data, create_using=None, multigraph_input=False): + """Make a NetworkX graph from a known data structure. + + The preferred way to call this is automatically + from the class constructor + + >>> d = {0: {1: {"weight": 1}}} # dict-of-dicts single edge (0,1) + >>> G = nx.Graph(d) + + instead of the equivalent + + >>> G = nx.from_dict_of_dicts(d) + + Parameters + ---------- + data : object to be converted + + Current known types are: + any NetworkX graph + dict-of-dicts + dict-of-lists + container (e.g. set, list, tuple) of edges + iterator (e.g. itertools.chain) that produces edges + generator of edges + Pandas DataFrame (row per edge) + 2D numpy array + scipy sparse array + pygraphviz agraph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + multigraph_input : bool (default False) + If True and data is a dict_of_dicts, + try to create a multigraph assuming dict_of_dict_of_lists. + If data and create_using are both multigraphs then create + a multigraph from a multigraph. + + """ + # NX graph + if hasattr(data, "adj"): + try: + result = from_dict_of_dicts( + data.adj, + create_using=create_using, + multigraph_input=data.is_multigraph(), + ) + # data.graph should be dict-like + result.graph.update(data.graph) + # data.nodes should be dict-like + # result.add_node_from(data.nodes.items()) possible but + # for custom node_attr_dict_factory which may be hashable + # will be unexpected behavior + for n, dd in data.nodes.items(): + result._node[n].update(dd) + return result + except Exception as err: + raise nx.NetworkXError("Input is not a correct NetworkX graph.") from err + + # dict of dicts/lists + if isinstance(data, dict): + try: + return from_dict_of_dicts( + data, create_using=create_using, multigraph_input=multigraph_input + ) + except Exception as err1: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err1)}: {err1}" + ) + try: + return from_dict_of_lists(data, create_using=create_using) + except Exception as err2: + raise TypeError("Input is not known type.") from err2 + + # edgelists + if isinstance(data, list | tuple | nx.reportviews.EdgeViewABC | Iterator): + try: + return from_edgelist(data, create_using=create_using) + except: + pass + + # pygraphviz agraph + if hasattr(data, "is_strict"): + try: + return nx.nx_agraph.from_agraph(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError("Input is not a correct pygraphviz graph.") from err + + # Pandas DataFrame + try: + import pandas as pd + + if isinstance(data, pd.DataFrame): + if data.shape[0] == data.shape[1]: + try: + return nx.from_pandas_adjacency(data, create_using=create_using) + except Exception as err: + msg = "Input is not a correct Pandas DataFrame adjacency matrix." + raise nx.NetworkXError(msg) from err + else: + try: + return nx.from_pandas_edgelist( + data, edge_attr=True, create_using=create_using + ) + except Exception as err: + msg = "Input is not a correct Pandas DataFrame edge-list." + raise nx.NetworkXError(msg) from err + except ImportError: + pass + + # numpy array + try: + import numpy as np + + if isinstance(data, np.ndarray): + try: + return nx.from_numpy_array(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError( + f"Failed to interpret array as an adjacency matrix." + ) from err + except ImportError: + pass + + # scipy sparse array - any format + try: + import scipy as sp + + if hasattr(data, "format"): + try: + return nx.from_scipy_sparse_array(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError( + "Input is not a correct scipy sparse array type." + ) from err + except ImportError: + pass + + # Note: most general check - should remain last in order of execution + # Includes containers (e.g. list, set, dict, etc.), generators, and + # iterators (e.g. itertools.chain) of edges + + if isinstance(data, Collection | Generator | Iterator): + try: + return from_edgelist(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError("Input is not a valid edge list") from err + + raise nx.NetworkXError("Input is not a known data type for conversion.") + + +@nx._dispatchable +def to_dict_of_lists(G, nodelist=None): + """Returns adjacency representation of graph as a dictionary of lists. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + Notes + ----- + Completely ignores edge data for MultiGraph and MultiDiGraph. + + """ + if nodelist is None: + nodelist = G + + d = {} + for n in nodelist: + d[n] = [nbr for nbr in G.neighbors(n) if nbr in nodelist] + return d + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_dict_of_lists(d, create_using=None): + """Returns a graph from a dictionary of lists. + + Parameters + ---------- + d : dictionary of lists + A dictionary of lists adjacency representation. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> dol = {0: [1]} # single edge (0,1) + >>> G = nx.from_dict_of_lists(dol) + + or + + >>> G = nx.Graph(dol) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_nodes_from(d) + if G.is_multigraph() and not G.is_directed(): + # a dict_of_lists can't show multiedges. BUT for undirected graphs, + # each edge shows up twice in the dict_of_lists. + # So we need to treat this case separately. + seen = {} + for node, nbrlist in d.items(): + for nbr in nbrlist: + if nbr not in seen: + G.add_edge(node, nbr) + seen[node] = 1 # don't allow reverse edge to show up + else: + G.add_edges_from( + ((node, nbr) for node, nbrlist in d.items() for nbr in nbrlist) + ) + return G + + +def to_dict_of_dicts(G, nodelist=None, edge_data=None): + """Returns adjacency representation of graph as a dictionary of dictionaries. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist. If None, all nodes in G. + + edge_data : scalar, optional (default: the G edgedatadict for each edge) + If provided, the value of the dictionary will be set to `edge_data` for + all edges. Usual values could be `1` or `True`. If `edge_data` is + `None` (the default), the edgedata in `G` is used, resulting in a + dict-of-dict-of-dicts. If `G` is a MultiGraph, the result will be a + dict-of-dict-of-dict-of-dicts. See Notes for an approach to customize + handling edge data. `edge_data` should *not* be a container as it will + be the same container for all the edges. + + Returns + ------- + dod : dict + A nested dictionary representation of `G`. Note that the level of + nesting depends on the type of `G` and the value of `edge_data` + (see Examples). + + See Also + -------- + from_dict_of_dicts, to_dict_of_lists + + Notes + ----- + For a more custom approach to handling edge data, try:: + + dod = { + n: {nbr: custom(n, nbr, dd) for nbr, dd in nbrdict.items()} + for n, nbrdict in G.adj.items() + } + + where `custom` returns the desired edge data for each edge between `n` and + `nbr`, given existing edge data `dd`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> nx.to_dict_of_dicts(G) + {0: {1: {}}, 1: {0: {}, 2: {}}, 2: {1: {}}} + + Edge data is preserved by default (``edge_data=None``), resulting + in dict-of-dict-of-dicts where the innermost dictionary contains the + edge data: + + >>> G = nx.Graph() + >>> G.add_edges_from( + ... [ + ... (0, 1, {"weight": 1.0}), + ... (1, 2, {"weight": 2.0}), + ... (2, 0, {"weight": 1.0}), + ... ] + ... ) + >>> d = nx.to_dict_of_dicts(G) + >>> d # doctest: +SKIP + {0: {1: {'weight': 1.0}, 2: {'weight': 1.0}}, + 1: {0: {'weight': 1.0}, 2: {'weight': 2.0}}, + 2: {1: {'weight': 2.0}, 0: {'weight': 1.0}}} + >>> d[1][2]["weight"] + 2.0 + + If `edge_data` is not `None`, edge data in the original graph (if any) is + replaced: + + >>> d = nx.to_dict_of_dicts(G, edge_data=1) + >>> d + {0: {1: 1, 2: 1}, 1: {0: 1, 2: 1}, 2: {1: 1, 0: 1}} + >>> d[1][2] + 1 + + This also applies to MultiGraphs: edge data is preserved by default: + + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1, key="a", weight=1.0) + 'a' + >>> G.add_edge(0, 1, key="b", weight=5.0) + 'b' + >>> d = nx.to_dict_of_dicts(G) + >>> d # doctest: +SKIP + {0: {1: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}, + 1: {0: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}} + >>> d[0][1]["b"]["weight"] + 5.0 + + But multi edge data is lost if `edge_data` is not `None`: + + >>> d = nx.to_dict_of_dicts(G, edge_data=10) + >>> d + {0: {1: 10}, 1: {0: 10}} + """ + dod = {} + if nodelist is None: + if edge_data is None: + for u, nbrdict in G.adjacency(): + dod[u] = nbrdict.copy() + else: # edge_data is not None + for u, nbrdict in G.adjacency(): + dod[u] = dod.fromkeys(nbrdict, edge_data) + else: # nodelist is not None + if edge_data is None: + for u in nodelist: + dod[u] = {} + for v, data in ((v, data) for v, data in G[u].items() if v in nodelist): + dod[u][v] = data + else: # nodelist and edge_data are not None + for u in nodelist: + dod[u] = {} + for v in (v for v in G[u] if v in nodelist): + dod[u][v] = edge_data + return dod + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_dict_of_dicts(d, create_using=None, multigraph_input=False): + """Returns a graph from a dictionary of dictionaries. + + Parameters + ---------- + d : dictionary of dictionaries + A dictionary of dictionaries adjacency representation. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + multigraph_input : bool (default False) + When True, the dict `d` is assumed + to be a dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + Otherwise this routine assumes dict-of-dict-of-dict keyed by + node to neighbor to edge data. + + Examples + -------- + >>> dod = {0: {1: {"weight": 1}}} # single edge (0,1) + >>> G = nx.from_dict_of_dicts(dod) + + or + + >>> G = nx.Graph(dod) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_nodes_from(d) + # does dict d represent a MultiGraph or MultiDiGraph? + if multigraph_input: + if G.is_directed(): + if G.is_multigraph(): + G.add_edges_from( + (u, v, key, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: + G.add_edges_from( + (u, v, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: # Undirected + if G.is_multigraph(): + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from( + (u, v, key, data) for key, data in datadict.items() + ) + seen.add((v, u)) + else: + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from( + (u, v, data) for key, data in datadict.items() + ) + seen.add((v, u)) + + else: # not a multigraph to multigraph transfer + if G.is_multigraph() and not G.is_directed(): + # d can have both representations u-v, v-u in dict. Only add one. + # We don't need this check for digraphs since we add both directions, + # or for Graph() since it is done implicitly (parallel edges not allowed) + seen = set() + for u, nbrs in d.items(): + for v, data in nbrs.items(): + if (u, v) not in seen: + G.add_edge(u, v, key=0) + G[u][v][0].update(data) + seen.add((v, u)) + else: + G.add_edges_from( + ((u, v, data) for u, nbrs in d.items() for v, data in nbrs.items()) + ) + return G + + +@nx._dispatchable(preserve_edge_attrs=True) +def to_edgelist(G, nodelist=None): + """Returns a list of edges in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + """ + if nodelist is None: + return G.edges(data=True) + return G.edges(nodelist, data=True) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_edgelist(edgelist, create_using=None): + """Returns a graph from a list of edges. + + Parameters + ---------- + edgelist : list or iterator + Edge tuples + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> edgelist = [(0, 1)] # single edge (0,1) + >>> G = nx.from_edgelist(edgelist) + + or + + >>> G = nx.Graph(edgelist) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_edges_from(edgelist) + return G diff --git a/py311/lib/python3.11/site-packages/networkx/convert_matrix.py b/py311/lib/python3.11/site-packages/networkx/convert_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..72a551798d13a16f8666d3a70274beaafb63d6a7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx/convert_matrix.py @@ -0,0 +1,1314 @@ +"""Functions to convert NetworkX graphs to and from common data containers +like numpy arrays, scipy sparse arrays, and pandas DataFrames. + +The preferred way of converting data to a NetworkX graph is through the +graph constructor. The constructor calls the `~networkx.convert.to_networkx_graph` +function which attempts to guess the input type and convert it automatically. + +Examples +-------- +Create a 10 node random graph from a numpy array + +>>> import numpy as np +>>> rng = np.random.default_rng() +>>> a = rng.integers(low=0, high=2, size=(10, 10)) +>>> DG = nx.from_numpy_array(a, create_using=nx.DiGraph) + +or equivalently: + +>>> DG = nx.DiGraph(a) + +which calls `from_numpy_array` internally based on the type of ``a``. + +See Also +-------- +nx_agraph, nx_pydot +""" + +import itertools +from collections import defaultdict + +import networkx as nx + +__all__ = [ + "from_pandas_adjacency", + "to_pandas_adjacency", + "from_pandas_edgelist", + "to_pandas_edgelist", + "from_scipy_sparse_array", + "to_scipy_sparse_array", + "from_numpy_array", + "to_numpy_array", +] + + +@nx._dispatchable(edge_attrs="weight") +def to_pandas_adjacency( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + multigraph_weight : {sum, min, max}, optional + An operator that determines how weights in multigraphs are handled. + The default is to sum the weights of the multiple edges. + + weight : string or None, optional + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. + + nonedge : float, optional + The matrix values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are matrix values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as nan. + + Returns + ------- + df : Pandas DataFrame + Graph adjacency matrix + + Notes + ----- + For directed graphs, entry i,j corresponds to an edge from i to j. + + The DataFrame entries are assigned to the weight edge attribute. When + an edge does not have a weight attribute, the value of the entry is set to + the number 1. For multiple (parallel) edges, the values of the entries + are determined by the 'multigraph_weight' parameter. The default is to + sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Pandas DataFrame can be modified as follows:: + + >>> import pandas as pd + >>> G = nx.Graph([(1, 1), (2, 2)]) + >>> df = nx.to_pandas_adjacency(G) + >>> df + 1 2 + 1 1.0 0.0 + 2 0.0 1.0 + >>> diag_idx = list(range(len(df))) + >>> df.iloc[diag_idx, diag_idx] *= 2 + >>> df + 1 2 + 1 2.0 0.0 + 2 0.0 2.0 + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_pandas_adjacency(G, nodelist=[0, 1, 2], dtype=int) + 0 1 2 + 0 0 2 0 + 1 1 0 0 + 2 0 0 4 + + """ + import pandas as pd + + M = to_numpy_array( + G, + nodelist=nodelist, + dtype=dtype, + order=order, + multigraph_weight=multigraph_weight, + weight=weight, + nonedge=nonedge, + ) + if nodelist is None: + nodelist = list(G) + return pd.DataFrame(data=M, index=nodelist, columns=nodelist) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_pandas_adjacency(df, create_using=None): + r"""Returns a graph from Pandas DataFrame. + + The Pandas DataFrame is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + df : Pandas DataFrame + An adjacency matrix representation of a graph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of df corresponds to an edge from i to j. + + If `df` has a single data type for each entry it will be converted to an + appropriate Python data type. + + If you have node attributes stored in a separate dataframe `df_nodes`, + you can load those attributes to the graph `G` using the following code:: + + df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]}) + G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows()) + + If `df` has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_pandas_adjacency + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> df = pd.DataFrame([[1, 1], [2, 1]]) + >>> df + 0 1 + 0 1 1 + 1 2 1 + >>> G = nx.from_pandas_adjacency(df) + >>> G.name = "Graph from pandas adjacency matrix" + >>> print(G) + Graph named 'Graph from pandas adjacency matrix' with 2 nodes and 3 edges + """ + + try: + df = df[df.index] + except Exception as err: + missing = list(set(df.index).difference(set(df.columns))) + msg = f"{missing} not in columns" + raise nx.NetworkXError("Columns must match Indices.", msg) from err + + A = df.values + G = from_numpy_array(A, create_using=create_using, nodelist=df.columns) + + return G + + +@nx._dispatchable(preserve_edge_attrs=True) +def to_pandas_edgelist( + G, + source="source", + target="target", + nodelist=None, + dtype=None, + edge_key=None, +): + """Returns the graph edge list as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + source : str or int, optional + A valid column name (string or integer) for the source nodes (for the + directed case). + + target : str or int, optional + A valid column name (string or integer) for the target nodes (for the + directed case). + + nodelist : list, optional + Use only nodes specified in nodelist + + dtype : dtype, default None + Use to create the DataFrame. Data type to force. + Only a single dtype is allowed. If None, infer. + + edge_key : str or int or None, optional (default=None) + A valid column name (string or integer) for the edge keys (for the + multigraph case). If None, edge keys are not stored in the DataFrame. + + Returns + ------- + df : Pandas DataFrame + Graph edge list + + Examples + -------- + >>> G = nx.Graph( + ... [ + ... ("A", "B", {"cost": 1, "weight": 7}), + ... ("C", "E", {"cost": 9, "weight": 10}), + ... ] + ... ) + >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"]) + >>> df[["source", "target", "cost", "weight"]] + source target cost weight + 0 A B 1 7 + 1 C E 9 10 + + >>> G = nx.MultiGraph([("A", "B", {"cost": 1}), ("A", "B", {"cost": 9})]) + >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"], edge_key="ekey") + >>> df[["source", "target", "cost", "ekey"]] + source target cost ekey + 0 A B 1 0 + 1 A B 9 1 + + """ + import pandas as pd + + if nodelist is None: + edgelist = G.edges(data=True) + else: + edgelist = G.edges(nodelist, data=True) + source_nodes = [s for s, _, _ in edgelist] + target_nodes = [t for _, t, _ in edgelist] + + all_attrs = set().union(*(d.keys() for _, _, d in edgelist)) + if source in all_attrs: + raise nx.NetworkXError(f"Source name {source!r} is an edge attr name") + if target in all_attrs: + raise nx.NetworkXError(f"Target name {target!r} is an edge attr name") + + nan = float("nan") + edge_attr = {k: [d.get(k, nan) for _, _, d in edgelist] for k in all_attrs} + + if G.is_multigraph() and edge_key is not None: + if edge_key in all_attrs: + raise nx.NetworkXError(f"Edge key name {edge_key!r} is an edge attr name") + edge_keys = [k for _, _, k in G.edges(keys=True)] + edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys} + else: + edgelistdict = {source: source_nodes, target: target_nodes} + + edgelistdict.update(edge_attr) + return pd.DataFrame(edgelistdict, dtype=dtype) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_pandas_edgelist( + df, + source="source", + target="target", + edge_attr=None, + create_using=None, + edge_key=None, +): + """Returns a graph from Pandas DataFrame containing an edge list. + + The Pandas DataFrame should contain at least two columns of node names and + zero or more columns of edge attributes. Each row will be processed as one + edge instance. + + Note: This function iterates over DataFrame.values, which is not + guaranteed to retain the data type across columns in the row. This is only + a problem if your row is entirely numeric and a mix of ints and floats. In + that case, all values will be returned as floats. See the + DataFrame.iterrows documentation for an example. + + Parameters + ---------- + df : Pandas DataFrame + An edge list representation of a graph + + source : str or int + A valid column name (string or integer) for the source nodes (for the + directed case). + + target : str or int + A valid column name (string or integer) for the target nodes (for the + directed case). + + edge_attr : str or int, iterable, True, or None + A valid column name (str or int) or iterable of column names that are + used to retrieve items and add them to the graph as edge attributes. + If `True`, all columns will be added except `source`, `target` and `edge_key`. + If `None`, no edge attributes are added to the graph. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_key : str or None, optional (default=None) + A valid column name for the edge keys (for a MultiGraph). The values in + this column are used for the edge keys when adding edges if create_using + is a multigraph. + + Notes + ----- + If you have node attributes stored in a separate dataframe `df_nodes`, + you can load those attributes to the graph `G` using the following code:: + + df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]}) + G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows()) + + See Also + -------- + to_pandas_edgelist + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> import numpy as np + >>> rng = np.random.RandomState(seed=5) + >>> ints = rng.randint(1, 11, size=(3, 2)) + >>> a = ["A", "B", "C"] + >>> b = ["D", "A", "E"] + >>> df = pd.DataFrame(ints, columns=["weight", "cost"]) + >>> df[0] = a + >>> df["b"] = b + >>> df[["weight", "cost", 0, "b"]] + weight cost 0 b + 0 4 7 A D + 1 7 1 B A + 2 10 9 C E + >>> G = nx.from_pandas_edgelist(df, 0, "b", ["weight", "cost"]) + >>> G["E"]["C"]["weight"] + 10 + >>> G["E"]["C"]["cost"] + 9 + >>> edges = pd.DataFrame( + ... { + ... "source": [0, 1, 2], + ... "target": [2, 2, 3], + ... "weight": [3, 4, 5], + ... "color": ["red", "blue", "blue"], + ... } + ... ) + >>> G = nx.from_pandas_edgelist(edges, edge_attr=True) + >>> G[0][2]["color"] + 'red' + + Build multigraph with custom keys: + + >>> edges = pd.DataFrame( + ... { + ... "source": [0, 1, 2, 0], + ... "target": [2, 2, 3, 2], + ... "my_edge_key": ["A", "B", "C", "D"], + ... "weight": [3, 4, 5, 6], + ... "color": ["red", "blue", "blue", "blue"], + ... } + ... ) + >>> G = nx.from_pandas_edgelist( + ... edges, + ... edge_key="my_edge_key", + ... edge_attr=["weight", "color"], + ... create_using=nx.MultiGraph(), + ... ) + >>> G[0][2] + AtlasView({'A': {'weight': 3, 'color': 'red'}, 'D': {'weight': 6, 'color': 'blue'}}) + + + """ + g = nx.empty_graph(0, create_using) + + if edge_attr is None: + if g.is_multigraph() and edge_key is not None: + for u, v, k in zip(df[source], df[target], df[edge_key]): + g.add_edge(u, v, k) + else: + g.add_edges_from(zip(df[source], df[target])) + return g + + reserved_columns = [source, target] + if g.is_multigraph() and edge_key is not None: + reserved_columns.append(edge_key) + + # Additional columns requested + attr_col_headings = [] + attribute_data = [] + if edge_attr is True: + attr_col_headings = [c for c in df.columns if c not in reserved_columns] + elif isinstance(edge_attr, list | tuple): + attr_col_headings = edge_attr + else: + attr_col_headings = [edge_attr] + if len(attr_col_headings) == 0: + raise nx.NetworkXError( + f"Invalid edge_attr argument: No columns found with name: {attr_col_headings}" + ) + + try: + attribute_data = zip(*[df[col] for col in attr_col_headings]) + except (KeyError, TypeError) as err: + msg = f"Invalid edge_attr argument: {edge_attr}" + raise nx.NetworkXError(msg) from err + + if g.is_multigraph(): + # => append the edge keys from the df to the bundled data + if edge_key is not None: + try: + multigraph_edge_keys = df[edge_key] + attribute_data = zip(attribute_data, multigraph_edge_keys) + except (KeyError, TypeError) as err: + msg = f"Invalid edge_key argument: {edge_key}" + raise nx.NetworkXError(msg) from err + + for s, t, attrs in zip(df[source], df[target], attribute_data): + if edge_key is not None: + attrs, multigraph_edge_key = attrs + key = g.add_edge(s, t, key=multigraph_edge_key) + else: + key = g.add_edge(s, t) + + g[s][t][key].update(zip(attr_col_headings, attrs)) + else: + for s, t, attrs in zip(df[source], df[target], attribute_data): + g.add_edge(s, t) + g[s][t].update(zip(attr_col_headings, attrs)) + + return g + + +@nx._dispatchable(edge_attrs="weight") +def to_scipy_sparse_array(G, nodelist=None, dtype=None, weight="weight", format="csr"): + """Returns the graph adjacency matrix as a SciPy sparse array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the sparse array. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by ``G.nodes()``. + + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. If None, then the + NumPy default is used. + + weight : string or None, optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} + The format of the sparse array to be returned (default 'csr'). For + some algorithms different implementations of sparse arrays + can perform better. See [1]_ for details. + + Returns + ------- + A : SciPy sparse array + Graph adjacency matrix. + + Notes + ----- + For directed graphs, matrix entry ``i, j`` corresponds to an edge from + ``i`` to ``j``. + + The values of the adjacency matrix are populated using the edge attribute held in + parameter `weight`. When an edge does not have that attribute, the + value of the entry is 1. + + For multiple edges the matrix values are the sums of the edge weights. + + When `nodelist` does not contain every node in `G`, the adjacency matrix + is built from the subgraph of `G` that is induced by the nodes in + `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting array can be modified as follows:: + + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_scipy_sparse_array(G) + >>> A.toarray() + array([[1]]) + >>> A.setdiag(A.diagonal() * 2) + >>> A.toarray() + array([[2]]) + + Examples + -------- + + Basic usage: + + >>> G = nx.path_graph(4) + >>> A = nx.to_scipy_sparse_array(G) + >>> A # doctest: +SKIP + + + >>> A.toarray() + array([[0, 1, 0, 0], + [1, 0, 1, 0], + [0, 1, 0, 1], + [0, 0, 1, 0]]) + + .. note:: The `toarray` method is used in these examples to better visualize + the adjacency matrix. For a dense representation of the adjaceny matrix, + use `to_numpy_array` instead. + + Directed graphs: + + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3)]) + >>> nx.to_scipy_sparse_array(G).toarray() + array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]]) + + >>> H = G.reverse() + >>> H.edges + OutEdgeView([(1, 0), (2, 1), (3, 2)]) + >>> nx.to_scipy_sparse_array(H).toarray() + array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]]) + + By default, the order of the rows/columns of the adjacency matrix is determined + by the ordering of the nodes in `G`: + + >>> G = nx.Graph() + >>> G.add_nodes_from([3, 5, 0, 1]) + >>> G.add_edges_from([(1, 3), (1, 5)]) + >>> nx.to_scipy_sparse_array(G).toarray() + array([[0, 0, 0, 1], + [0, 0, 0, 1], + [0, 0, 0, 0], + [1, 1, 0, 0]]) + + The ordering of the rows can be changed with `nodelist`: + + >>> ordered = [0, 1, 3, 5] + >>> nx.to_scipy_sparse_array(G, nodelist=ordered).toarray() + array([[0, 0, 0, 0], + [0, 0, 1, 1], + [0, 1, 0, 0], + [0, 1, 0, 0]]) + + If `nodelist` contains a subset of the nodes in `G`, the adjacency matrix + for the node-induced subgraph is produced: + + >>> nx.to_scipy_sparse_array(G, nodelist=[1, 3, 5]).toarray() + array([[0, 1, 1], + [1, 0, 0], + [1, 0, 0]]) + + The values of the adjacency matrix are drawn from the edge attribute + specified by the `weight` parameter: + + >>> G = nx.path_graph(4) + >>> nx.set_edge_attributes( + ... G, values={(0, 1): 1, (1, 2): 10, (2, 3): 2}, name="weight" + ... ) + >>> nx.set_edge_attributes( + ... G, values={(0, 1): 50, (1, 2): 35, (2, 3): 10}, name="capacity" + ... ) + >>> nx.to_scipy_sparse_array(G).toarray() # Default weight="weight" + array([[ 0, 1, 0, 0], + [ 1, 0, 10, 0], + [ 0, 10, 0, 2], + [ 0, 0, 2, 0]]) + >>> nx.to_scipy_sparse_array(G, weight="capacity").toarray() + array([[ 0, 50, 0, 0], + [50, 0, 35, 0], + [ 0, 35, 0, 10], + [ 0, 0, 10, 0]]) + + Any edges that don't have a `weight` attribute default to 1: + + >>> G[1][2].pop("capacity") + 35 + >>> nx.to_scipy_sparse_array(G, weight="capacity").toarray() + array([[ 0, 50, 0, 0], + [50, 0, 1, 0], + [ 0, 1, 0, 10], + [ 0, 0, 10, 0]]) + + When `G` is a multigraph, the values in the adjacency matrix are given by + the sum of the `weight` edge attribute over each edge key: + + >>> G = nx.MultiDiGraph([(0, 1), (0, 1), (0, 1), (2, 0)]) + >>> nx.to_scipy_sparse_array(G).toarray() + array([[0, 3, 0], + [0, 0, 0], + [1, 0, 0]]) + + References + ---------- + .. [1] Scipy Dev. References, "Sparse Arrays", + https://docs.scipy.org/doc/scipy/reference/sparse.html + """ + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXError("Graph has no nodes or edges") + + if nodelist is None: + nodelist = list(G) + nlen = len(G) + else: + nlen = len(nodelist) + if nlen == 0: + raise nx.NetworkXError("nodelist has no nodes") + nodeset = set(G.nbunch_iter(nodelist)) + if nlen != len(nodeset): + for n in nodelist: + if n not in G: + raise nx.NetworkXError(f"Node {n} in nodelist is not in G") + raise nx.NetworkXError("nodelist contains duplicates.") + if nlen < len(G): + G = G.subgraph(nodelist) + + index = dict(zip(nodelist, range(nlen))) + coefficients = zip( + *((index[u], index[v], wt) for u, v, wt in G.edges(data=weight, default=1)) + ) + try: + row, col, data = coefficients + except ValueError: + # there is no edge in the subgraph + row, col, data = [], [], [] + + if G.is_directed(): + A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, nlen), dtype=dtype) + else: + # symmetrize matrix + d = data + data + r = row + col + c = col + row + # selfloop entries get double counted when symmetrizing + # so we subtract the data on the diagonal + selfloops = list(nx.selfloop_edges(G, data=weight, default=1)) + if selfloops: + diag_index, diag_data = zip(*((index[u], -wt) for u, v, wt in selfloops)) + d += diag_data + r += diag_index + c += diag_index + A = sp.sparse.coo_array((d, (r, c)), shape=(nlen, nlen), dtype=dtype) + try: + return A.asformat(format) + except ValueError as err: + raise nx.NetworkXError(f"Unknown sparse matrix format: {format}") from err + + +def _csr_gen_triples(A): + """Converts a SciPy sparse array in **Compressed Sparse Row** format to + an iterable of weighted edge triples. + + """ + nrows = A.shape[0] + indptr, dst_indices, data = A.indptr, A.indices, A.data + import numpy as np + + src_indices = np.repeat(np.arange(nrows), np.diff(indptr)) + return zip(src_indices.tolist(), dst_indices.tolist(), A.data.tolist()) + + +def _csc_gen_triples(A): + """Converts a SciPy sparse array in **Compressed Sparse Column** format to + an iterable of weighted edge triples. + + """ + ncols = A.shape[1] + indptr, src_indices, data = A.indptr, A.indices, A.data + import numpy as np + + dst_indices = np.repeat(np.arange(ncols), np.diff(indptr)) + return zip(src_indices.tolist(), dst_indices.tolist(), A.data.tolist()) + + +def _coo_gen_triples(A): + """Converts a SciPy sparse array in **Coordinate** format to an iterable + of weighted edge triples. + + """ + return zip(A.row.tolist(), A.col.tolist(), A.data.tolist()) + + +def _dok_gen_triples(A): + """Converts a SciPy sparse array in **Dictionary of Keys** format to an + iterable of weighted edge triples. + + """ + for (r, c), v in A.items(): + # Use `v.item()` to convert a NumPy scalar to the appropriate Python scalar + yield int(r), int(c), v.item() + + +def _generate_weighted_edges(A): + """Returns an iterable over (u, v, w) triples, where u and v are adjacent + vertices and w is the weight of the edge joining u and v. + + `A` is a SciPy sparse array (in any format). + + """ + if A.format == "csr": + return _csr_gen_triples(A) + if A.format == "csc": + return _csc_gen_triples(A) + if A.format == "dok": + return _dok_gen_triples(A) + # If A is in any other format (including COO), convert it to COO format. + return _coo_gen_triples(A.tocoo()) + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_scipy_sparse_array( + A, parallel_edges=False, create_using=None, edge_attribute="weight" +): + """Creates a new graph from an adjacency matrix given as a SciPy sparse + array. + + Parameters + ---------- + A: scipy.sparse array + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer matrix, then entry *(i, j)* in the matrix is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the matrix are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_attribute: string + Name of edge attribute to store matrix numeric value. The data will + have the same type as the matrix entry (int, float, (real,imag)). + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (constructed from `create_using`) with parallel edges. + In this case, `edge_attribute` will be ignored. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the matrix `A` will be added to the + graph. + + Examples + -------- + >>> import scipy as sp + >>> A = sp.sparse.eye(2, 2, 1) + >>> G = nx.from_scipy_sparse_array(A) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = sp.sparse.csr_array([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_array(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = sp.sparse.csr_array([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_array( + ... A, parallel_edges=True, create_using=nx.MultiGraph + ... ) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + """ + G = nx.empty_graph(0, create_using) + n, m = A.shape + if n != m: + raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}") + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n)) + # Create an iterable over (u, v, w) triples and for each triple, add an + # edge from u to v with weight w. + triples = _generate_weighted_edges(A) + # If the entries in the adjacency matrix are integers, the graph is a + # multigraph, and parallel_edges is True, then create parallel edges, each + # with weight 1, for each entry in the adjacency matrix. Otherwise, create + # one edge for each positive entry in the adjacency matrix and set the + # weight of that edge to be the entry in the matrix. + if A.dtype.kind in ("i", "u") and G.is_multigraph() and parallel_edges: + chain = itertools.chain.from_iterable + # The following line is equivalent to: + # + # for (u, v) in edges: + # for d in range(A[u, v]): + # G.add_edge(u, v, weight=1) + # + triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) + # If we are creating an undirected multigraph, only add the edges from the + # upper triangle of the matrix. Otherwise, add all the edges. This relies + # on the fact that the vertices created in the + # `_generated_weighted_edges()` function are actually the row/column + # indices for the matrix `A`. + # + # Without this check, we run into a problem where each edge is added twice + # when `G.add_weighted_edges_from()` is invoked below. + if G.is_multigraph() and not G.is_directed(): + triples = ((u, v, d) for u, v, d in triples if u <= v) + G.add_weighted_edges_from(triples, weight=edge_attribute) + return G + + +@nx._dispatchable(edge_attrs="weight") # edge attrs may also be obtained from `dtype` +def to_numpy_array( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a NumPy array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy array. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is ``None``, then the ordering is produced by ``G.nodes()``. + + dtype : NumPy data type, optional + A NumPy data type used to initialize the array. If None, then the NumPy + default is used. The dtype can be structured if `weight=None`, in which + case the dtype field names are used to look up edge attributes. The + result is a structured array where each named field in the dtype + corresponds to the adjacency for that edge attribute. See examples for + details. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + multigraph_weight : callable, optional + An function that determines how weights in multigraphs are handled. + The function should accept a sequence of weights and return a single + value. The default is to sum the weights of the multiple edges. + + weight : string or None optional (default = 'weight') + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. `weight` must be ``None`` if a structured + dtype is used. + + nonedge : array_like (default = 0.0) + The value used to represent non-edges in the adjacency matrix. + The array values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are array values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as ``nan``. + + Returns + ------- + A : NumPy ndarray + Graph adjacency matrix + + Raises + ------ + NetworkXError + If `dtype` is a structured dtype and `G` is a multigraph + ValueError + If `dtype` is a structured dtype and `weight` is not `None` + + See Also + -------- + from_numpy_array + + Notes + ----- + For directed graphs, entry ``i, j`` corresponds to an edge from ``i`` to ``j``. + + Entries in the adjacency matrix are given by the `weight` edge attribute. + When an edge does not have a weight attribute, the value of the entry is + set to the number 1. For multiple (parallel) edges, the values of the + entries are determined by the `multigraph_weight` parameter. The default is + to sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the adjacency matrix is + built from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal array entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting NumPy array can be modified as follows: + + >>> import numpy as np + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_numpy_array(G) + >>> A + array([[1.]]) + >>> A[np.diag_indices_from(A)] *= 2 + >>> A + array([[2.]]) + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_numpy_array(G, nodelist=[0, 1, 2]) + array([[0., 2., 0.], + [1., 0., 0.], + [0., 0., 4.]]) + + When `nodelist` argument is used, nodes of `G` which do not appear in the `nodelist` + and their edges are not included in the adjacency matrix. Here is an example: + + >>> G = nx.Graph() + >>> G.add_edge(3, 1) + >>> G.add_edge(2, 0) + >>> G.add_edge(2, 1) + >>> G.add_edge(3, 0) + >>> nx.to_numpy_array(G, nodelist=[1, 2, 3]) + array([[0., 1., 1.], + [1., 0., 0.], + [1., 0., 0.]]) + + This function can also be used to create adjacency matrices for multiple + edge attributes with structured dtypes: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, weight=10) + >>> G.add_edge(1, 2, cost=5) + >>> G.add_edge(2, 3, weight=3, cost=-4.0) + >>> dtype = np.dtype([("weight", int), ("cost", float)]) + >>> A = nx.to_numpy_array(G, dtype=dtype, weight=None) + >>> A["weight"] + array([[ 0, 10, 0, 0], + [10, 0, 1, 0], + [ 0, 1, 0, 3], + [ 0, 0, 3, 0]]) + >>> A["cost"] + array([[ 0., 1., 0., 0.], + [ 1., 0., 5., 0.], + [ 0., 5., 0., -4.], + [ 0., 0., -4., 0.]]) + + As stated above, the argument "nonedge" is useful especially when there are + actually edges with weight 0 in the graph. Setting a nonedge value different than 0, + makes it much clearer to differentiate such 0-weighted edges and actual nonedge values. + + >>> G = nx.Graph() + >>> G.add_edge(3, 1, weight=2) + >>> G.add_edge(2, 0, weight=0) + >>> G.add_edge(2, 1, weight=0) + >>> G.add_edge(3, 0, weight=1) + >>> nx.to_numpy_array(G, nonedge=-1.0) + array([[-1., 2., -1., 1.], + [ 2., -1., 0., -1.], + [-1., 0., -1., 0.], + [ 1., -1., 0., -1.]]) + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + nlen = len(nodelist) + + # Input validation + nodeset = set(nodelist) + if nodeset - set(G): + raise nx.NetworkXError(f"Nodes {nodeset - set(G)} in nodelist is not in G") + if len(nodeset) < nlen: + raise nx.NetworkXError("nodelist contains duplicates.") + + A = np.full((nlen, nlen), fill_value=nonedge, dtype=dtype, order=order) + + # Corner cases: empty nodelist or graph without any edges + if nlen == 0 or G.number_of_edges() == 0: + return A + + # If dtype is structured and weight is None, use dtype field names as + # edge attributes + edge_attrs = None # Only single edge attribute by default + if A.dtype.names: + if weight is None: + edge_attrs = dtype.names + else: + raise ValueError( + "Specifying `weight` not supported for structured dtypes\n." + "To create adjacency matrices from structured dtypes, use `weight=None`." + ) + + # Map nodes to row/col in matrix + idx = dict(zip(nodelist, range(nlen))) + if len(nodelist) < len(G): + G = G.subgraph(nodelist).copy() + + # Collect all edge weights and reduce with `multigraph_weights` + if G.is_multigraph(): + if edge_attrs: + raise nx.NetworkXError( + "Structured arrays are not supported for MultiGraphs" + ) + d = defaultdict(list) + for u, v, wt in G.edges(data=weight, default=1.0): + d[(idx[u], idx[v])].append(wt) + i, j = np.array(list(d.keys())).T # indices + wts = [multigraph_weight(ws) for ws in d.values()] # reduced weights + else: + i, j, wts = [], [], [] + + # Special branch: multi-attr adjacency from structured dtypes + if edge_attrs: + # Extract edges with all data + for u, v, data in G.edges(data=True): + i.append(idx[u]) + j.append(idx[v]) + wts.append(data) + # Map each attribute to the appropriate named field in the + # structured dtype + for attr in edge_attrs: + attr_data = [wt.get(attr, 1.0) for wt in wts] + A[attr][i, j] = attr_data + if not G.is_directed(): + A[attr][j, i] = attr_data + return A + + for u, v, wt in G.edges(data=weight, default=1.0): + i.append(idx[u]) + j.append(idx[v]) + wts.append(wt) + + # Set array values with advanced indexing + A[i, j] = wts + if not G.is_directed(): + A[j, i] = wts + + return A + + +@nx._dispatchable(graphs=None, returns_graph=True) +def from_numpy_array( + A, parallel_edges=False, create_using=None, edge_attr="weight", *, nodelist=None +): + """Returns a graph from a 2D NumPy array. + + The 2D NumPy array is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + A : a 2D numpy.ndarray + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer array, then entry *(i, j)* in the array is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the array are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_attr : String, optional (default="weight") + The attribute to which the array values are assigned on each edge. If + it is None, edge attributes will not be assigned. + + nodelist : sequence of nodes, optional + A sequence of objects to use as the nodes in the graph. If provided, the + list of nodes must be the same length as the dimensions of `A`. The + default is `None`, in which case the nodes are drawn from ``range(n)``. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (of the same type as `create_using`) with parallel edges. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the array `A` will be added to the + graph. + + If `edge_attr` is Falsy (False or None), edge attributes will not be + assigned, and the array data will be treated like a binary mask of + edge presence or absence. Otherwise, the attributes will be assigned + as follows: + + If the NumPy array has a single data type for each array entry it + will be converted to an appropriate Python data type. + + If the NumPy array has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_numpy_array + + Examples + -------- + Simple integer weights on edges: + + >>> import numpy as np + >>> A = np.array([[1, 1], [2, 1]]) + >>> G = nx.from_numpy_array(A) + >>> G.edges(data=True) + EdgeDataView([(0, 0, {'weight': 1}), (0, 1, {'weight': 2}), (1, 1, {'weight': 1})]) + + If `create_using` indicates a multigraph and the array has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = np.array([[1, 1], [1, 2]]) + >>> G = nx.from_numpy_array(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the array has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = np.array([[1, 1], [1, 2]]) + >>> temp = nx.MultiGraph() + >>> G = nx.from_numpy_array(A, parallel_edges=True, create_using=temp) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + User defined compound data type on edges: + + >>> dt = [("weight", float), ("cost", int)] + >>> A = np.array([[(1.0, 2)]], dtype=dt) + >>> G = nx.from_numpy_array(A) + >>> G.edges() + EdgeView([(0, 0)]) + >>> G[0][0]["cost"] + 2 + >>> G[0][0]["weight"] + 1.0 + + """ + kind_to_python_type = { + "f": float, + "i": int, + "u": int, + "b": bool, + "c": complex, + "S": str, + "U": str, + "V": "void", + } + G = nx.empty_graph(0, create_using) + if A.ndim != 2: + raise nx.NetworkXError(f"Input array must be 2D, not {A.ndim}") + n, m = A.shape + if n != m: + raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}") + dt = A.dtype + try: + python_type = kind_to_python_type[dt.kind] + except Exception as err: + raise TypeError(f"Unknown numpy data type: {dt}") from err + if _default_nodes := (nodelist is None): + nodelist = range(n) + else: + if len(nodelist) != n: + raise ValueError("nodelist must have the same length as A.shape[0]") + + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(nodelist) + # Get a list of all the entries in the array with nonzero entries. These + # coordinates become edges in the graph. (convert to int from np.int64) + edges = ((int(e[0]), int(e[1])) for e in zip(*A.nonzero())) + # handle numpy constructed data type + if python_type == "void": + # Sort the fields by their offset, then by dtype, then by name. + fields = sorted( + (offset, dtype, name) for name, (dtype, offset) in A.dtype.fields.items() + ) + triples = ( + ( + u, + v, + {} + if edge_attr in [False, None] + else { + name: kind_to_python_type[dtype.kind](val) + for (_, dtype, name), val in zip(fields, A[u, v]) + }, + ) + for u, v in edges + ) + # If the entries in the adjacency matrix are integers, the graph is a + # multigraph, and parallel_edges is True, then create parallel edges, each + # with weight 1, for each entry in the adjacency matrix. Otherwise, create + # one edge for each positive entry in the adjacency matrix and set the + # weight of that edge to be the entry in the matrix. + elif python_type is int and G.is_multigraph() and parallel_edges: + chain = itertools.chain.from_iterable + # The following line is equivalent to: + # + # for (u, v) in edges: + # for d in range(A[u, v]): + # G.add_edge(u, v, weight=1) + # + if edge_attr in [False, None]: + triples = chain(((u, v, {}) for d in range(A[u, v])) for (u, v) in edges) + else: + triples = chain( + ((u, v, {edge_attr: 1}) for d in range(A[u, v])) for (u, v) in edges + ) + else: # basic data type + if edge_attr in [False, None]: + triples = ((u, v, {}) for u, v in edges) + else: + triples = ((u, v, {edge_attr: python_type(A[u, v])}) for u, v in edges) + # If we are creating an undirected multigraph, only add the edges from the + # upper triangle of the matrix. Otherwise, add all the edges. This relies + # on the fact that the vertices created in the + # `_generated_weighted_edges()` function are actually the row/column + # indices for the matrix `A`. + # + # Without this check, we run into a problem where each edge is added twice + # when `G.add_edges_from()` is invoked below. + if G.is_multigraph() and not G.is_directed(): + triples = ((u, v, d) for u, v, d in triples if u <= v) + # Remap nodes if user provided custom `nodelist` + if not _default_nodes: + idx_to_node = dict(enumerate(nodelist)) + triples = ((idx_to_node[u], idx_to_node[v], d) for u, v, d in triples) + G.add_edges_from(triples) + return G diff --git a/py311/lib/python3.11/site-packages/networkx/exception.py b/py311/lib/python3.11/site-packages/networkx/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..c960cf13fd5a8e4da0ca68c66350b8baa1728c34 --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx/exception.py @@ -0,0 +1,131 @@ +""" +********** +Exceptions +********** + +Base exceptions and errors for NetworkX. +""" + +__all__ = [ + "HasACycle", + "NodeNotFound", + "PowerIterationFailedConvergence", + "ExceededMaxIterations", + "AmbiguousSolution", + "NetworkXAlgorithmError", + "NetworkXException", + "NetworkXError", + "NetworkXNoCycle", + "NetworkXNoPath", + "NetworkXNotImplemented", + "NetworkXPointlessConcept", + "NetworkXUnbounded", + "NetworkXUnfeasible", +] + + +class NetworkXException(Exception): + """Base class for exceptions in NetworkX.""" + + +class NetworkXError(NetworkXException): + """Exception for a serious error in NetworkX""" + + +class NetworkXPointlessConcept(NetworkXException): + """Raised when a null graph is provided as input to an algorithm + that cannot use it. + + The null graph is sometimes considered a pointless concept [1]_, + thus the name of the exception. + + Notes + ----- + Null graphs and empty graphs are often used interchangeably but they + are well defined in NetworkX. An ``empty_graph`` is a graph with ``n`` nodes + and 0 edges, and a ``null_graph`` is a graph with 0 nodes and 0 edges. + + References + ---------- + .. [1] Harary, F. and Read, R. "Is the Null Graph a Pointless + Concept?" In Graphs and Combinatorics Conference, George + Washington University. New York: Springer-Verlag, 1973. + + """ + + +class NetworkXAlgorithmError(NetworkXException): + """Exception for unexpected termination of algorithms.""" + + +class NetworkXUnfeasible(NetworkXAlgorithmError): + """Exception raised by algorithms trying to solve a problem + instance that has no feasible solution.""" + + +class NetworkXNoPath(NetworkXUnfeasible): + """Exception for algorithms that should return a path when running + on graphs where such a path does not exist.""" + + +class NetworkXNoCycle(NetworkXUnfeasible): + """Exception for algorithms that should return a cycle when running + on graphs where such a cycle does not exist.""" + + +class HasACycle(NetworkXException): + """Raised if a graph has a cycle when an algorithm expects that it + will have no cycles. + + """ + + +class NetworkXUnbounded(NetworkXAlgorithmError): + """Exception raised by algorithms trying to solve a maximization + or a minimization problem instance that is unbounded.""" + + +class NetworkXNotImplemented(NetworkXException): + """Exception raised by algorithms not implemented for a type of graph.""" + + +class NodeNotFound(NetworkXException): + """Exception raised if requested node is not present in the graph""" + + +class AmbiguousSolution(NetworkXException): + """Raised if more than one valid solution exists for an intermediary step + of an algorithm. + + In the face of ambiguity, refuse the temptation to guess. + This may occur, for example, when trying to determine the + bipartite node sets in a disconnected bipartite graph when + computing bipartite matchings. + + """ + + +class ExceededMaxIterations(NetworkXException): + """Raised if a loop iterates too many times without breaking. + + This may occur, for example, in an algorithm that computes + progressively better approximations to a value but exceeds an + iteration bound specified by the user. + + """ + + +class PowerIterationFailedConvergence(ExceededMaxIterations): + """Raised when the power iteration method fails to converge within a + specified iteration limit. + + `num_iterations` is the number of iterations that have been + completed when this exception was raised. + + """ + + def __init__(self, num_iterations, *args, **kw): + msg = f"power iteration failed to converge within {num_iterations} iterations" + exception_message = msg + superinit = super().__init__ + superinit(self, exception_message, *args, **kw) diff --git a/py311/lib/python3.11/site-packages/networkx/lazy_imports.py b/py311/lib/python3.11/site-packages/networkx/lazy_imports.py new file mode 100644 index 0000000000000000000000000000000000000000..c5c05ca5462868fbccc270ea1e93b301037dfc8c --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx/lazy_imports.py @@ -0,0 +1,188 @@ +import importlib +import importlib.util +import inspect +import os +import sys +import types + +__all__ = ["attach", "_lazy_import"] + + +def attach(module_name, submodules=None, submod_attrs=None): + """Attach lazily loaded submodules, and functions or other attributes. + + Typically, modules import submodules and attributes as follows:: + + import mysubmodule + import anothersubmodule + + from .foo import someattr + + The idea of this function is to replace the `__init__.py` + module's `__getattr__`, `__dir__`, and `__all__` attributes such that + all imports work exactly the way they normally would, except that the + actual import is delayed until the resulting module object is first used. + + The typical way to call this function, replacing the above imports, is:: + + __getattr__, __lazy_dir__, __all__ = lazy.attach( + __name__, ["mysubmodule", "anothersubmodule"], {"foo": "someattr"} + ) + + This functionality requires Python 3.7 or higher. + + Parameters + ---------- + module_name : str + Typically use __name__. + submodules : set + List of submodules to lazily import. + submod_attrs : dict + Dictionary of submodule -> list of attributes / functions. + These attributes are imported as they are used. + + Returns + ------- + __getattr__, __dir__, __all__ + + """ + if submod_attrs is None: + submod_attrs = {} + + if submodules is None: + submodules = set() + else: + submodules = set(submodules) + + attr_to_modules = { + attr: mod for mod, attrs in submod_attrs.items() for attr in attrs + } + + __all__ = list(submodules | attr_to_modules.keys()) + + def __getattr__(name): + if name in submodules: + return importlib.import_module(f"{module_name}.{name}") + elif name in attr_to_modules: + submod = importlib.import_module(f"{module_name}.{attr_to_modules[name]}") + return getattr(submod, name) + else: + raise AttributeError(f"No {module_name} attribute {name}") + + def __dir__(): + return __all__ + + if os.environ.get("EAGER_IMPORT", ""): + for attr in set(attr_to_modules.keys()) | submodules: + __getattr__(attr) + + return __getattr__, __dir__, list(__all__) + + +class DelayedImportErrorModule(types.ModuleType): + def __init__(self, frame_data, *args, **kwargs): + self.__frame_data = frame_data + super().__init__(*args, **kwargs) + + def __getattr__(self, x): + if x in ("__class__", "__file__", "__frame_data"): + super().__getattr__(x) + else: + fd = self.__frame_data + raise ModuleNotFoundError( + f"No module named '{fd['spec']}'\n\n" + "This error is lazily reported, having originally occurred in\n" + f" File {fd['filename']}, line {fd['lineno']}, in {fd['function']}\n\n" + f"----> {''.join(fd['code_context'] or '').strip()}" + ) + + +def _lazy_import(fullname): + """Return a lazily imported proxy for a module or library. + + Warning + ------- + Importing using this function can currently cause trouble + when the user tries to import from a subpackage of a module before + the package is fully imported. In particular, this idiom may not work: + + np = lazy_import("numpy") + from numpy.lib import recfunctions + + This is due to a difference in the way Python's LazyLoader handles + subpackage imports compared to the normal import process. Hopefully + we will get Python's LazyLoader to fix this, or find a workaround. + In the meantime, this is a potential problem. + + The workaround is to import numpy before importing from the subpackage. + + Notes + ----- + We often see the following pattern:: + + def myfunc(): + import scipy as sp + sp.argmin(...) + .... + + This is to prevent a library, in this case `scipy`, from being + imported at function definition time, since that can be slow. + + This function provides a proxy module that, upon access, imports + the actual module. So the idiom equivalent to the above example is:: + + sp = lazy.load("scipy") + + def myfunc(): + sp.argmin(...) + .... + + The initial import time is fast because the actual import is delayed + until the first attribute is requested. The overall import time may + decrease as well for users that don't make use of large portions + of the library. + + Parameters + ---------- + fullname : str + The full name of the package or subpackage to import. For example:: + + sp = lazy.load("scipy") # import scipy as sp + spla = lazy.load("scipy.linalg") # import scipy.linalg as spla + + Returns + ------- + pm : importlib.util._LazyModule + Proxy module. Can be used like any regularly imported module. + Actual loading of the module occurs upon first attribute request. + + """ + try: + return sys.modules[fullname] + except: + pass + + # Not previously loaded -- look it up + spec = importlib.util.find_spec(fullname) + + if spec is None: + try: + parent = inspect.stack()[1] + frame_data = { + "spec": fullname, + "filename": parent.filename, + "lineno": parent.lineno, + "function": parent.function, + "code_context": parent.code_context, + } + return DelayedImportErrorModule(frame_data, "DelayedImportErrorModule") + finally: + del parent + + module = importlib.util.module_from_spec(spec) + sys.modules[fullname] = module + + loader = importlib.util.LazyLoader(spec.loader) + loader.exec_module(module) + + return module diff --git a/py311/lib/python3.11/site-packages/networkx/relabel.py b/py311/lib/python3.11/site-packages/networkx/relabel.py new file mode 100644 index 0000000000000000000000000000000000000000..4b870f726ef42e0bcaa7bf724e2ae6ab4145f288 --- /dev/null +++ b/py311/lib/python3.11/site-packages/networkx/relabel.py @@ -0,0 +1,285 @@ +import networkx as nx + +__all__ = ["convert_node_labels_to_integers", "relabel_nodes"] + + +@nx._dispatchable( + preserve_all_attrs=True, mutates_input={"not copy": 2}, returns_graph=True +) +def relabel_nodes(G, mapping, copy=True): + """Relabel the nodes of the graph G according to a given mapping. + + The original node ordering may not be preserved if `copy` is `False` and the + mapping includes overlap between old and new labels. + + Parameters + ---------- + G : graph + A NetworkX graph + + mapping : dictionary + A dictionary with the old labels as keys and new labels as values. + A partial mapping is allowed. Mapping 2 nodes to a single node is allowed. + Any non-node keys in the mapping are ignored. + + copy : bool (optional, default=True) + If True return a copy, or if False relabel the nodes in place. + + Examples + -------- + To create a new graph with nodes relabeled according to a given + dictionary: + + >>> G = nx.path_graph(3) + >>> sorted(G) + [0, 1, 2] + >>> mapping = {0: "a", 1: "b", 2: "c"} + >>> H = nx.relabel_nodes(G, mapping) + >>> sorted(H) + ['a', 'b', 'c'] + + Nodes can be relabeled with any hashable object, including numbers + and strings: + + >>> import string + >>> G = nx.path_graph(26) # nodes are integers 0 through 25 + >>> sorted(G)[:3] + [0, 1, 2] + >>> mapping = dict(zip(G, string.ascii_lowercase)) + >>> G = nx.relabel_nodes(G, mapping) # nodes are characters a through z + >>> sorted(G)[:3] + ['a', 'b', 'c'] + >>> mapping = dict(zip(G, range(1, 27))) + >>> G = nx.relabel_nodes(G, mapping) # nodes are integers 1 through 26 + >>> sorted(G)[:3] + [1, 2, 3] + + To perform a partial in-place relabeling, provide a dictionary + mapping only a subset of the nodes, and set the `copy` keyword + argument to False: + + >>> G = nx.path_graph(3) # nodes 0-1-2 + >>> mapping = {0: "a", 1: "b"} # 0->'a' and 1->'b' + >>> G = nx.relabel_nodes(G, mapping, copy=False) + >>> sorted(G, key=str) + [2, 'a', 'b'] + + A mapping can also be given as a function: + + >>> G = nx.path_graph(3) + >>> H = nx.relabel_nodes(G, lambda x: x**2) + >>> list(H) + [0, 1, 4] + + In a multigraph, relabeling two or more nodes to the same new node + will retain all edges, but may change the edge keys in the process: + + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1, value="a") # returns the key for this edge + 0 + >>> G.add_edge(0, 2, value="b") + 0 + >>> G.add_edge(0, 3, value="c") + 0 + >>> mapping = {1: 4, 2: 4, 3: 4} + >>> H = nx.relabel_nodes(G, mapping, copy=True) + >>> print(H[0]) + {4: {0: {'value': 'a'}, 1: {'value': 'b'}, 2: {'value': 'c'}}} + + This works for in-place relabeling too: + + >>> G = nx.relabel_nodes(G, mapping, copy=False) + >>> print(G[0]) + {4: {0: {'value': 'a'}, 1: {'value': 'b'}, 2: {'value': 'c'}}} + + Notes + ----- + Only the nodes specified in the mapping will be relabeled. + Any non-node keys in the mapping are ignored. + + The keyword setting copy=False modifies the graph in place. + Relabel_nodes avoids naming collisions by building a + directed graph from ``mapping`` which specifies the order of + relabelings. Naming collisions, such as a->b, b->c, are ordered + such that "b" gets renamed to "c" before "a" gets renamed "b". + In cases of circular mappings (e.g. a->b, b->a), modifying the + graph is not possible in-place and an exception is raised. + In that case, use copy=True. + + If a relabel operation on a multigraph would cause two or more + edges to have the same source, target and key, the second edge must + be assigned a new key to retain all edges. The new key is set + to the lowest non-negative integer not already used as a key + for edges between these two nodes. Note that this means non-numeric + keys may be replaced by numeric keys. + + See Also + -------- + convert_node_labels_to_integers + """ + # you can pass any callable e.g. f(old_label) -> new_label or + # e.g. str(old_label) -> new_label, but we'll just make a dictionary here regardless + m = {n: mapping(n) for n in G} if callable(mapping) else mapping + + if copy: + return _relabel_copy(G, m) + else: + return _relabel_inplace(G, m) + + +def _relabel_inplace(G, mapping): + if len(mapping.keys() & mapping.values()) > 0: + # labels sets overlap + # can we topological sort and still do the relabeling? + D = nx.DiGraph(list(mapping.items())) + D.remove_edges_from(nx.selfloop_edges(D)) + try: + nodes = reversed(list(nx.topological_sort(D))) + except nx.NetworkXUnfeasible as err: + raise nx.NetworkXUnfeasible( + "The node label sets are overlapping and no ordering can " + "resolve the mapping. Use copy=True." + ) from err + else: + # non-overlapping label sets, sort them in the order of G nodes + nodes = [n for n in G if n in mapping] + + multigraph = G.is_multigraph() + directed = G.is_directed() + + for old in nodes: + # Test that old is in both mapping and G, otherwise ignore. + try: + new = mapping[old] + G.add_node(new, **G.nodes[old]) + except KeyError: + continue + if new == old: + continue + if multigraph: + new_edges = [ + (new, new if old == target else target, key, data) + for (_, target, key, data) in G.edges(old, data=True, keys=True) + ] + if directed: + new_edges += [ + (new if old == source else source, new, key, data) + for (source, _, key, data) in G.in_edges(old, data=True, keys=True) + ] + # Ensure new edges won't overwrite existing ones + seen = set() + for i, (source, target, key, data) in enumerate(new_edges): + if target in G[source] and key in G[source][target]: + new_key = 0 if not isinstance(key, int | float) else key + while new_key in G[source][target] or (target, new_key) in seen: + new_key += 1 + new_edges[i] = (source, target, new_key, data) + seen.add((target, new_key)) + else: + new_edges = [ + (new, new if old == target else target, data) + for (_, target, data) in G.edges(old, data=True) + ] + if directed: + new_edges += [ + (new if old == source else source, new, data) + for (source, _, data) in G.in_edges(old, data=True) + ] + G.remove_node(old) + G.add_edges_from(new_edges) + return G + + +def _relabel_copy(G, mapping): + H = G.__class__() + H.add_nodes_from(mapping.get(n, n) for n in G) + H._node.update((mapping.get(n, n), d.copy()) for n, d in G.nodes.items()) + if G.is_multigraph(): + new_edges = [ + (mapping.get(n1, n1), mapping.get(n2, n2), k, d.copy()) + for (n1, n2, k, d) in G.edges(keys=True, data=True) + ] + + # check for conflicting edge-keys + undirected = not G.is_directed() + seen_edges = set() + for i, (source, target, key, data) in enumerate(new_edges): + while (source, target, key) in seen_edges: + if not isinstance(key, int | float): + key = 0 + key += 1 + seen_edges.add((source, target, key)) + if undirected: + seen_edges.add((target, source, key)) + new_edges[i] = (source, target, key, data) + + H.add_edges_from(new_edges) + else: + H.add_edges_from( + (mapping.get(n1, n1), mapping.get(n2, n2), d.copy()) + for (n1, n2, d) in G.edges(data=True) + ) + H.graph.update(G.graph) + return H + + +@nx._dispatchable(preserve_all_attrs=True, returns_graph=True) +def convert_node_labels_to_integers( + G, first_label=0, ordering="default", label_attribute=None +): + """Returns a copy of the graph G with the nodes relabeled using + consecutive integers. + + Parameters + ---------- + G : graph + A NetworkX graph + + first_label : int, optional (default=0) + An integer specifying the starting offset in numbering nodes. + The new integer labels are numbered first_label, ..., n-1+first_label. + + ordering : string + "default" : inherit node ordering from G.nodes() + "sorted" : inherit node ordering from sorted(G.nodes()) + "increasing degree" : nodes are sorted by increasing degree + "decreasing degree" : nodes are sorted by decreasing degree + + label_attribute : string, optional (default=None) + Name of node attribute to store old label. If None no attribute + is created. + + Notes + ----- + Node and edge attribute data are copied to the new (relabeled) graph. + + There is no guarantee that the relabeling of nodes to integers will + give the same two integers for two (even identical graphs). + Use the `ordering` argument to try to preserve the order. + + See Also + -------- + relabel_nodes + """ + N = G.number_of_nodes() + first_label + if ordering == "default": + mapping = dict(zip(G.nodes(), range(first_label, N))) + elif ordering == "sorted": + nlist = sorted(G.nodes()) + mapping = dict(zip(nlist, range(first_label, N))) + elif ordering == "increasing degree": + dv_pairs = [(d, n) for (n, d) in G.degree()] + dv_pairs.sort() # in-place sort from lowest to highest degree + mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N))) + elif ordering == "decreasing degree": + dv_pairs = [(d, n) for (n, d) in G.degree()] + dv_pairs.sort() # in-place sort from lowest to highest degree + dv_pairs.reverse() + mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N))) + else: + raise nx.NetworkXError(f"Unknown node ordering: {ordering}") + H = relabel_nodes(G, mapping) + # create node attribute with the old label + if label_attribute is not None: + nx.set_node_attributes(H, {v: k for k, v in mapping.items()}, label_attribute) + return H diff --git a/py311/lib/python3.11/site-packages/numpy/__config__.py b/py311/lib/python3.11/site-packages/numpy/__config__.py new file mode 100644 index 0000000000000000000000000000000000000000..ad3b0f3978ed203d4823155b18e3cbd594e6247e --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/__config__.py @@ -0,0 +1,170 @@ +# This file is generated by numpy's build process +# It contains system_info results at the time of building this package. +from enum import Enum +from numpy._core._multiarray_umath import ( + __cpu_features__, + __cpu_baseline__, + __cpu_dispatch__, +) + +__all__ = ["show_config"] +_built_with_meson = True + + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + + +def _cleanup(d): + """ + Removes empty values in a `dict` recursively + This ensures we remove values that Meson could not provide to CONFIG + """ + if isinstance(d, dict): + return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)} + else: + return d + + +CONFIG = _cleanup( + { + "Compilers": { + "c": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "14.2.1", + "commands": r"cc", + "args": r"", + "linker args": r"", + }, + "cython": { + "name": "cython", + "linker": r"cython", + "version": "3.2.4", + "commands": r"cython", + "args": r"", + "linker args": r"", + }, + "c++": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "14.2.1", + "commands": r"c++", + "args": r"", + "linker args": r"", + }, + }, + "Machine Information": { + "host": { + "cpu": "x86_64", + "family": "x86_64", + "endian": "little", + "system": "linux", + }, + "build": { + "cpu": "x86_64", + "family": "x86_64", + "endian": "little", + "system": "linux", + }, + "cross-compiled": bool("False".lower().replace("false", "")), + }, + "Build Dependencies": { + "blas": { + "name": "scipy-openblas", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.30", + "detection method": "pkgconfig", + "include directory": r"/opt/_internal/cpython-3.11.14/lib/python3.11/site-packages/scipy_openblas64/include", + "lib directory": r"/opt/_internal/cpython-3.11.14/lib/python3.11/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.30 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64", + "pc file directory": r"/project/.openblas", + }, + "lapack": { + "name": "scipy-openblas", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.30", + "detection method": "pkgconfig", + "include directory": r"/opt/_internal/cpython-3.11.14/lib/python3.11/site-packages/scipy_openblas64/include", + "lib directory": r"/opt/_internal/cpython-3.11.14/lib/python3.11/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.30 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64", + "pc file directory": r"/project/.openblas", + }, + }, + "Python Information": { + "path": r"/tmp/build-env-o7xqgpgv/bin/python", + "version": "3.11", + }, + "SIMD Extensions": { + "baseline": __cpu_baseline__, + "found": [ + feature for feature in __cpu_dispatch__ if __cpu_features__[feature] + ], + "not found": [ + feature for feature in __cpu_dispatch__ if not __cpu_features__[feature] + ], + }, + } +) + + +def _check_pyyaml(): + import yaml + + return yaml + + +def show(mode=DisplayModes.stdout.value): + """ + Show libraries and system information on which NumPy was built + and is being used + + Parameters + ---------- + mode : {`'stdout'`, `'dicts'`}, optional. + Indicates how to display the config information. + `'stdout'` prints to console, `'dicts'` returns a dictionary + of the configuration. + + Returns + ------- + out : {`dict`, `None`} + If mode is `'dicts'`, a dict is returned, else None + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + 1. The `'stdout'` mode will give more readable + output if ``pyyaml`` is installed + + """ + if mode == DisplayModes.stdout.value: + try: # Non-standard library, check import + yaml = _check_pyyaml() + + print(yaml.dump(CONFIG)) + except ModuleNotFoundError: + import warnings + import json + + warnings.warn("Install `pyyaml` for better output", stacklevel=1) + print(json.dumps(CONFIG, indent=2)) + elif mode == DisplayModes.dicts.value: + return CONFIG + else: + raise AttributeError( + f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" + ) + + +def show_config(mode=DisplayModes.stdout.value): + return show(mode) + + +show_config.__doc__ = show.__doc__ +show_config.__module__ = "numpy" diff --git a/py311/lib/python3.11/site-packages/numpy/__config__.pyi b/py311/lib/python3.11/site-packages/numpy/__config__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..21e8b01fdd96dcb5044758d1fa0a14839c5d1e3a --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/__config__.pyi @@ -0,0 +1,108 @@ +from enum import Enum +from types import ModuleType +from typing import ( + Final, + Literal as L, + NotRequired, + TypedDict, + overload, + type_check_only, +) + +_CompilerConfigDictValue = TypedDict( + "_CompilerConfigDictValue", + { + "name": str, + "linker": str, + "version": str, + "commands": str, + "args": str, + "linker args": str, + }, +) +_CompilerConfigDict = TypedDict( + "_CompilerConfigDict", + { + "c": _CompilerConfigDictValue, + "cython": _CompilerConfigDictValue, + "c++": _CompilerConfigDictValue, + }, +) +_MachineInformationDict = TypedDict( + "_MachineInformationDict", + { + "host": _MachineInformationDictValue, + "build": _MachineInformationDictValue, + "cross-compiled": NotRequired[L[True]], + }, +) + +@type_check_only +class _MachineInformationDictValue(TypedDict): + cpu: str + family: str + endian: L["little", "big"] + system: str + +_BuildDependenciesDictValue = TypedDict( + "_BuildDependenciesDictValue", + { + "name": str, + "found": NotRequired[L[True]], + "version": str, + "include directory": str, + "lib directory": str, + "openblas configuration": str, + "pc file directory": str, + }, +) + +class _BuildDependenciesDict(TypedDict): + blas: _BuildDependenciesDictValue + lapack: _BuildDependenciesDictValue + +class _PythonInformationDict(TypedDict): + path: str + version: str + +_SIMDExtensionsDict = TypedDict( + "_SIMDExtensionsDict", + { + "baseline": list[str], + "found": list[str], + "not found": list[str], + }, +) + +_ConfigDict = TypedDict( + "_ConfigDict", + { + "Compilers": _CompilerConfigDict, + "Machine Information": _MachineInformationDict, + "Build Dependencies": _BuildDependenciesDict, + "Python Information": _PythonInformationDict, + "SIMD Extensions": _SIMDExtensionsDict, + }, +) + +### + +__all__ = ["show_config"] + +CONFIG: Final[_ConfigDict] = ... + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + +def _check_pyyaml() -> ModuleType: ... + +@overload +def show(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show(mode: L["dicts"]) -> _ConfigDict: ... + +@overload +def show_config(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show_config(mode: L["dicts"]) -> _ConfigDict: ... diff --git a/py311/lib/python3.11/site-packages/numpy/__init__.cython-30.pxd b/py311/lib/python3.11/site-packages/numpy/__init__.cython-30.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c71898626070b23f1ce6bdc029fbf8082b3d913c --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/__init__.cython-30.pxd @@ -0,0 +1,1242 @@ +# NumPy static imports for Cython >= 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. This is done automatically by Cython 3.0+ if a call +# is not detected inside of the module. +# +# Author: Dag Sverre Seljebotn +# + +from cpython.ref cimport Py_INCREF +from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */ + """ + + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VSTRING + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + + NPY_INTP + NPY_UINTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef int type_num + + @property + cdef inline npy_intp itemsize(self) noexcept nogil: + return PyDataType_ELSIZE(self) + + @property + cdef inline npy_intp alignment(self) noexcept nogil: + return PyDataType_ALIGNMENT(self) + + # Use fields/names with care as they may be NULL. You must check + # for this using PyDataType_HASFIELDS. + @property + cdef inline object fields(self): + return PyDataType_FIELDS(self) + + @property + cdef inline tuple names(self): + return PyDataType_NAMES(self) + + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + @property + cdef inline PyArray_ArrayDescr* subarray(self) noexcept nogil: + return PyDataType_SUBARRAY(self) + + @property + cdef inline npy_uint64 flags(self) noexcept nogil: + """The data types flags.""" + return PyDataType_FLAGS(self) + + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + + @property + cdef inline int numiter(self) noexcept nogil: + """The number of arrays that need to be broadcast to the same shape.""" + return PyArray_MultiIter_NUMITER(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """The total broadcasted size.""" + return PyArray_MultiIter_SIZE(self) + + @property + cdef inline npy_intp index(self) noexcept nogil: + """The current (1-d) index into the broadcasted result.""" + return PyArray_MultiIter_INDEX(self) + + @property + cdef inline int nd(self) noexcept nogil: + """The number of dimensions in the broadcasted result.""" + return PyArray_MultiIter_NDIM(self) + + @property + cdef inline npy_intp* dimensions(self) noexcept nogil: + """The shape of the broadcasted result.""" + return PyArray_MultiIter_DIMS(self) + + @property + cdef inline void** iters(self) noexcept nogil: + """An array of iterator objects that holds the iterators for the arrays to be broadcast together. + On return, the iterators are adjusted for broadcasting.""" + return PyArray_MultiIter_ITERS(self) + + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + # NOTE: no field declarations since direct access is deprecated since NumPy 1.7 + # Instead, we use properties that map to the corresponding C-API functions. + + @property + cdef inline PyObject* base(self) noexcept nogil: + """Returns a borrowed reference to the object owning the data/memory. + """ + return PyArray_BASE(self) + + @property + cdef inline dtype descr(self): + """Returns an owned reference to the dtype of the array. + """ + return PyArray_DESCR(self) + + @property + cdef inline int ndim(self) noexcept nogil: + """Returns the number of dimensions in the array. + """ + return PyArray_NDIM(self) + + @property + cdef inline npy_intp *shape(self) noexcept nogil: + """Returns a pointer to the dimensions/shape of the array. + The number of elements matches the number of dimensions of the array (ndim). + Can return NULL for 0-dimensional arrays. + """ + return PyArray_DIMS(self) + + @property + cdef inline npy_intp *strides(self) noexcept nogil: + """Returns a pointer to the strides of the array. + The number of elements matches the number of dimensions of the array (ndim). + """ + return PyArray_STRIDES(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """Returns the total size (in number of elements) of the array. + """ + return PyArray_SIZE(self) + + @property + cdef inline char* data(self) noexcept nogil: + """The pointer to the data buffer as a char*. + This is provided for legacy reasons to avoid direct struct field access. + For new code that needs this access, you probably want to cast the result + of `PyArray_DATA()` instead, which returns a 'void*'. + """ + return PyArray_BYTES(self) + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + # deprecated + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID + + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base) except *: + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/py311/lib/python3.11/site-packages/numpy/__init__.pxd b/py311/lib/python3.11/site-packages/numpy/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..40a24b6c7cc1e60c213e5365c2f3c793b10ee0ae --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/__init__.pxd @@ -0,0 +1,1155 @@ +# NumPy static imports for Cython < 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. +# +# Author: Dag Sverre Seljebotn +# + +DEF _buffer_format_string_len = 255 + +cimport cpython.buffer as pybuf +from cpython.ref cimport Py_INCREF +from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.object cimport PyObject, PyTypeObject +from cpython.buffer cimport PyObject_GetBuffer +from cpython.type cimport type +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.pxd" */ + """ + + +cdef extern from "Python.h": + ctypedef int Py_intptr_t + bint PyObject_TypeCheck(object obj, PyTypeObject* type) + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VSTRING + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + + NPY_INTP + NPY_UINTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + # Flags are not directly accessible on Cython <3. Use PyDataType_FLAGS. + # cdef char flags + cdef int type_num + # itemsize/elsize, alignment, fields, names, and subarray must + # use the `PyDataType_*` accessor macros. With Cython 3 you can + # still use getter attributes `dtype.itemsize` + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + cdef int numiter + cdef npy_intp size, index + cdef int nd + cdef npy_intp *dimensions + cdef void **iters + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + cdef: + # Only taking a few of the most commonly used and stable fields. + # One should use PyArray_* macros instead to access the C fields. + char *data + int ndim "nd" + npy_intp *shape "dimensions" + npy_intp *strides + dtype descr # deprecated since NumPy 1.7 ! + PyObject* base # NOT PUBLIC, DO NOT USE ! + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + # deprecated + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/py311/lib/python3.11/site-packages/numpy/__init__.py b/py311/lib/python3.11/site-packages/numpy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef7c1ed7678aeb40101965a153fa5ba24579a544 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/__init__.py @@ -0,0 +1,942 @@ +""" +NumPy +===== + +Provides + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation + +How to use the documentation +---------------------------- +Documentation is available in two forms: docstrings provided +with the code, and a loose standing reference guide, available from +`the NumPy homepage `_. + +We recommend exploring the docstrings using +`IPython `_, an advanced Python shell with +TAB-completion and introspection capabilities. See below for further +instructions. + +The docstring examples assume that `numpy` has been imported as ``np``:: + + >>> import numpy as np + +Code snippets are indicated by three greater-than signs:: + + >>> x = 42 + >>> x = x + 1 + +Use the built-in ``help`` function to view a function's docstring:: + + >>> help(np.sort) + ... # doctest: +SKIP + +For some objects, ``np.info(obj)`` may provide additional help. This is +particularly true if you see the line "Help on ufunc object:" at the top +of the help() page. Ufuncs are implemented in C, not Python, for speed. +The native Python help() does not know how to view their help, but our +np.info() function does. + +Available subpackages +--------------------- +lib + Basic functions used by several sub-packages. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +polynomial + Polynomial tools +testing + NumPy testing tools +distutils + Enhancements to distutils with support for + Fortran compilers support and more (for Python <= 3.11) + +Utilities +--------- +test + Run numpy unittests +show_config + Show numpy build configuration +__version__ + NumPy version string + +Viewing documentation using IPython +----------------------------------- + +Start IPython and import `numpy` usually under the alias ``np``: `import +numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste +examples into the shell. To see which functions are available in `numpy`, +type ``np.`` (where ```` refers to the TAB key), or use +``np.*cos*?`` (where ```` refers to the ENTER key) to narrow +down the list. To view the docstring for a function, use +``np.cos?`` (to view the docstring) and ``np.cos??`` (to view +the source code). + +Copies vs. in-place operation +----------------------------- +Most of the functions in `numpy` return a copy of the array argument +(e.g., `np.sort`). In-place versions of these functions are often +available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. +Exceptions to this rule are documented. + +""" +import os +import sys +import warnings + +# If a version with git hash was stored, use that instead +from . import version +from ._expired_attrs_2_0 import __expired_attributes__ +from ._globals import _CopyMode, _NoValue +from .version import __version__ + +# We first need to detect if we're being called as part of the numpy setup +# procedure itself in a reliable manner. +try: + __NUMPY_SETUP__ # noqa: B018 +except NameError: + __NUMPY_SETUP__ = False + +if __NUMPY_SETUP__: + sys.stderr.write('Running from numpy source directory.\n') +else: + # Allow distributors to run custom init code before importing numpy._core + from . import _distributor_init + + try: + from numpy.__config__ import show_config + except ImportError as e: + if isinstance(e, ModuleNotFoundError) and e.name == "numpy.__config__": + # The __config__ module itself was not found, so add this info: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + raise + + from . import _core + from ._core import ( + False_, + ScalarType, + True_, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + any, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_str, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + asin, + asinh, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + base_repr, + binary_repr, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + block, + bool, + bool_, + broadcast, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + can_cast, + cbrt, + cdouble, + ceil, + character, + choose, + clip, + clongdouble, + complex64, + complex128, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copysign, + copyto, + correlate, + cos, + cosh, + count_nonzero, + cross, + csingle, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + diagonal, + divide, + divmod, + dot, + double, + dtype, + e, + einsum, + einsum_path, + empty, + empty_like, + equal, + errstate, + euler_gamma, + exp, + exp2, + expm1, + fabs, + finfo, + flatiter, + flatnonzero, + flexible, + float16, + float32, + float64, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromstring, + full, + full_like, + gcd, + generic, + geomspace, + get_printoptions, + getbufsize, + geterr, + geterrcall, + greater, + greater_equal, + half, + heaviside, + hstack, + hypot, + identity, + iinfo, + indices, + inexact, + inf, + inner, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + invert, + is_busday, + isclose, + isdtype, + isfinite, + isfortran, + isinf, + isnan, + isnat, + isscalar, + issubdtype, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + linspace, + little_endian, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + matmul, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + memmap, + min, + min_scalar_type, + minimum, + mod, + modf, + moveaxis, + multiply, + nan, + ndarray, + ndim, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ones, + ones_like, + outer, + partition, + permute_dims, + pi, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + putmask, + rad2deg, + radians, + ravel, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + round, + sctypeDict, + searchsorted, + set_printoptions, + setbufsize, + seterr, + seterrcall, + shape, + shares_memory, + short, + sign, + signbit, + signedinteger, + sin, + single, + sinh, + size, + sort, + spacing, + sqrt, + square, + squeeze, + stack, + std, + str_, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + tensordot, + timedelta64, + trace, + transpose, + true_divide, + trunc, + typecodes, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + unstack, + ushort, + var, + vdot, + vecdot, + vecmat, + void, + vstack, + where, + zeros, + zeros_like, + ) + + # NOTE: It's still under discussion whether these aliases + # should be removed. + for ta in ["float96", "float128", "complex192", "complex256"]: + try: + globals()[ta] = getattr(_core, ta) + except AttributeError: + pass + del ta + + from . import lib, matrixlib as _mat + from .lib import scimath as emath + from .lib._arraypad_impl import pad + from .lib._arraysetops_impl import ( + ediff1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + ) + from .lib._function_base_impl import ( + angle, + append, + asarray_chkfinite, + average, + bartlett, + bincount, + blackman, + copy, + corrcoef, + cov, + delete, + diff, + digitize, + extract, + flip, + gradient, + hamming, + hanning, + i0, + insert, + interp, + iterable, + kaiser, + median, + meshgrid, + percentile, + piecewise, + place, + quantile, + rot90, + select, + sinc, + sort_complex, + trapezoid, + trim_zeros, + unwrap, + vectorize, + ) + from .lib._histograms_impl import histogram, histogram_bin_edges, histogramdd + from .lib._index_tricks_impl import ( + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + ravel_multi_index, + s_, + unravel_index, + ) + from .lib._nanfunctions_impl import ( + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ) + from .lib._npyio_impl import ( + fromregex, + genfromtxt, + load, + loadtxt, + packbits, + save, + savetxt, + savez, + savez_compressed, + unpackbits, + ) + from .lib._polynomial_impl import ( + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polysub, + polyval, + roots, + ) + from .lib._shape_base_impl import ( + apply_along_axis, + apply_over_axes, + array_split, + column_stack, + dsplit, + dstack, + expand_dims, + hsplit, + kron, + put_along_axis, + row_stack, + split, + take_along_axis, + tile, + vsplit, + ) + from .lib._stride_tricks_impl import ( + broadcast_arrays, + broadcast_shapes, + broadcast_to, + ) + from .lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + tri, + tril, + tril_indices, + tril_indices_from, + triu, + triu_indices, + triu_indices_from, + vander, + ) + from .lib._type_check_impl import ( + common_type, + imag, + iscomplex, + iscomplexobj, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real, + real_if_close, + typename, + ) + from .lib._ufunclike_impl import fix, isneginf, isposinf + from .lib._utils_impl import get_include, info, show_runtime + from .matrixlib import asmatrix, bmat, matrix + + # public submodules are imported lazily, therefore are accessible from + # __getattr__. Note that `distutils` (deprecated) and `array_api` + # (experimental label) are not added here, because `from numpy import *` + # must not raise any warnings - that's too disruptive. + __numpy_submodules__ = { + "linalg", "fft", "dtypes", "random", "polynomial", "ma", + "exceptions", "lib", "ctypeslib", "testing", "typing", + "f2py", "test", "rec", "char", "core", "strings", + } + + # We build warning messages for former attributes + _msg = ( + "module 'numpy' has no attribute '{n}'.\n" + "`np.{n}` was a deprecated alias for the builtin `{n}`. " + "To avoid this error in existing code, use `{n}` by itself. " + "Doing this will not modify any behavior and is safe. {extended_msg}\n" + "The aliases was originally deprecated in NumPy 1.20; for more " + "details and guidance see the original release note at:\n" + " https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + _specific_msg = ( + "If you specifically wanted the numpy scalar type, use `np.{}` here.") + + _int_extended_msg = ( + "When replacing `np.{}`, you may wish to use e.g. `np.int64` " + "or `np.int32` to specify the precision. If you wish to review " + "your current use, check the release note link for " + "additional information.") + + _type_info = [ + ("object", ""), # The NumPy scalar only exists by name. + ("float", _specific_msg.format("float64")), + ("complex", _specific_msg.format("complex128")), + ("str", _specific_msg.format("str_")), + ("int", _int_extended_msg.format("int"))] + + __former_attrs__ = { + n: _msg.format(n=n, extended_msg=extended_msg) + for n, extended_msg in _type_info + } + + # Some of these could be defined right away, but most were aliases to + # the Python objects and only removed in NumPy 1.24. Defining them should + # probably wait for NumPy 1.26 or 2.0. + # When defined, these should possibly not be added to `__all__` to avoid + # import with `from numpy import *`. + __future_scalars__ = {"str", "bytes", "object"} + + __array_api_version__ = "2024.12" + + from ._array_api_info import __array_namespace_info__ + + __all__ = list( + __numpy_submodules__ | + set(_core.__all__) | + set(_mat.__all__) | + set(lib._histograms_impl.__all__) | + set(lib._nanfunctions_impl.__all__) | + set(lib._function_base_impl.__all__) | + set(lib._twodim_base_impl.__all__) | + set(lib._shape_base_impl.__all__) | + set(lib._type_check_impl.__all__) | + set(lib._arraysetops_impl.__all__) | + set(lib._ufunclike_impl.__all__) | + set(lib._arraypad_impl.__all__) | + set(lib._utils_impl.__all__) | + set(lib._stride_tricks_impl.__all__) | + set(lib._polynomial_impl.__all__) | + set(lib._npyio_impl.__all__) | + set(lib._index_tricks_impl.__all__) | + {"emath", "show_config", "__version__", "__array_namespace_info__"} + ) + + # Filter out Cython harmless warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + + def __getattr__(attr): + # Warn for expired attributes + import warnings + + if attr == "linalg": + import numpy.linalg as linalg + return linalg + elif attr == "fft": + import numpy.fft as fft + return fft + elif attr == "dtypes": + import numpy.dtypes as dtypes + return dtypes + elif attr == "random": + import numpy.random as random + return random + elif attr == "polynomial": + import numpy.polynomial as polynomial + return polynomial + elif attr == "ma": + import numpy.ma as ma + return ma + elif attr == "ctypeslib": + import numpy.ctypeslib as ctypeslib + return ctypeslib + elif attr == "exceptions": + import numpy.exceptions as exceptions + return exceptions + elif attr == "testing": + import numpy.testing as testing + return testing + elif attr == "matlib": + import numpy.matlib as matlib + return matlib + elif attr == "f2py": + import numpy.f2py as f2py + return f2py + elif attr == "typing": + import numpy.typing as typing + return typing + elif attr == "rec": + import numpy.rec as rec + return rec + elif attr == "char": + import numpy.char as char + return char + elif attr == "array_api": + raise AttributeError("`numpy.array_api` is not available from " + "numpy 2.0 onwards", name=None) + elif attr == "core": + import numpy.core as core + return core + elif attr == "strings": + import numpy.strings as strings + return strings + elif attr == "distutils": + if 'distutils' in __numpy_submodules__: + import numpy.distutils as distutils + return distutils + else: + raise AttributeError("`numpy.distutils` is not available from " + "Python 3.12 onwards", name=None) + + if attr in __future_scalars__: + # And future warnings for those that will change, but also give + # the AttributeError + warnings.warn( + f"In the future `np.{attr}` will be defined as the " + "corresponding NumPy scalar.", FutureWarning, stacklevel=2) + + if attr in __former_attrs__: + raise AttributeError(__former_attrs__[attr], name=None) + + if attr in __expired_attributes__: + raise AttributeError( + f"`np.{attr}` was removed in the NumPy 2.0 release. " + f"{__expired_attributes__[attr]}", + name=None + ) + + if attr == "chararray": + warnings.warn( + "`np.chararray` is deprecated and will be removed from " + "the main namespace in the future. Use an array with a string " + "or bytes dtype instead.", DeprecationWarning, stacklevel=2) + import numpy.char as char + return char.chararray + + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") + + def __dir__(): + public_symbols = ( + globals().keys() | __numpy_submodules__ + ) + public_symbols -= { + "matrixlib", "matlib", "tests", "conftest", "version", + "distutils", "array_api" + } + return list(public_symbols) + + # Pytest testing + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + def _sanity_check(): + """ + Quick sanity checks for common bugs caused by environment. + There are some cases e.g. with wrong BLAS ABI that cause wrong + results under specific runtime conditions that are not necessarily + achieved during test suite runs, and it is useful to catch those early. + + See https://github.com/numpy/numpy/issues/8577 and other + similar bug reports. + + """ + try: + x = ones(2, dtype=float32) + if not abs(x.dot(x) - float32(2.0)) < 1e-5: + raise AssertionError + except AssertionError: + msg = ("The current Numpy installation ({!r}) fails to " + "pass simple sanity checks. This can be caused for example " + "by incorrect BLAS library being linked in, or by mixing " + "package managers (pip, conda, apt, ...). Search closed " + "numpy issues for similar problems.") + raise RuntimeError(msg.format(__file__)) from None + + _sanity_check() + del _sanity_check + + def _mac_os_check(): + """ + Quick Sanity check for Mac OS look for accelerate build bugs. + Testing numpy polyfit calls init_dgelsd(LAPACK) + """ + try: + c = array([3., 2., 1.]) + x = linspace(0, 2, 5) + y = polyval(c, x) + _ = polyfit(x, y, 2, cov=True) + except ValueError: + pass + + if sys.platform == "darwin": + from . import exceptions + with warnings.catch_warnings(record=True) as w: + _mac_os_check() + # Throw runtime error, if the test failed + # Check for warning and report the error_message + if len(w) > 0: + for _wn in w: + if _wn.category is exceptions.RankWarning: + # Ignore other warnings, they may not be relevant (see gh-25433) + error_message = ( + f"{_wn.category.__name__}: {_wn.message}" + ) + msg = ( + "Polyfit sanity test emitted a warning, most likely due " + "to using a buggy Accelerate backend." + "\nIf you compiled yourself, more information is available at:" # noqa: E501 + "\nhttps://numpy.org/devdocs/building/index.html" + "\nOtherwise report this to the vendor " + f"that provided NumPy.\n\n{error_message}\n") + raise RuntimeError(msg) + del _wn + del w + del _mac_os_check + + def blas_fpe_check(): + # Check if BLAS adds spurious FPEs, mostly seen on M4 arms with Accelerate. + with errstate(all='raise'): + x = ones((20, 20)) + try: + x @ x + except FloatingPointError: + res = _core._multiarray_umath._blas_supports_fpe(False) + if res: # res was not modified (hardcoded to True for now) + warnings.warn( + "Spurious warnings given by blas but suppression not " + "set up on this platform. Please open a NumPy issue.", + UserWarning, stacklevel=2) + + blas_fpe_check() + del blas_fpe_check + + def hugepage_setup(): + """ + We usually use madvise hugepages support, but on some old kernels it + is slow and thus better avoided. Specifically kernel version 4.6 + had a bug fix which probably fixed this: + https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff + """ + use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None) + if sys.platform == "linux" and use_hugepage is None: + # If there is an issue with parsing the kernel version, + # set use_hugepage to 0. Usage of LooseVersion will handle + # the kernel version parsing better, but avoided since it + # will increase the import time. + # See: #16679 for related discussion. + try: + use_hugepage = 1 + kernel_version = os.uname().release.split(".")[:2] + kernel_version = tuple(int(v) for v in kernel_version) + if kernel_version < (4, 6): + use_hugepage = 0 + except ValueError: + use_hugepage = 0 + elif use_hugepage is None: + # This is not Linux, so it should not matter, just enable anyway + use_hugepage = 1 + else: + use_hugepage = int(use_hugepage) + return use_hugepage + + # Note that this will currently only make a difference on Linux + _core.multiarray._set_madvise_hugepage(hugepage_setup()) + del hugepage_setup + + # Give a warning if NumPy is reloaded or imported on a sub-interpreter + # We do this from python, since the C-module may not be reloaded and + # it is tidier organized. + _core.multiarray._multiarray_umath._reload_guard() + + # TODO: Remove the environment variable entirely now that it is "weak" + if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"): + warnings.warn( + "NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 " + "transition and is ignored after NumPy 2.2.", + UserWarning, stacklevel=2) + + # Tell PyInstaller where to find hook-numpy.py + def _pyinstaller_hooks_dir(): + from pathlib import Path + return [str(Path(__file__).with_name("_pyinstaller").resolve())] + + +# Remove symbols imported for internal use +del os, sys, warnings diff --git a/py311/lib/python3.11/site-packages/numpy/__init__.pyi b/py311/lib/python3.11/site-packages/numpy/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..28bbe31ca98a35eb6e576af2ca959d1cb4a148b4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/__init__.pyi @@ -0,0 +1,6202 @@ +# ruff: noqa: I001 +import builtins +import sys +import mmap +import ctypes as ct +import array as _array +import datetime as dt +import inspect +from abc import abstractmethod +from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from decimal import Decimal +from fractions import Fraction +from uuid import UUID + +import numpy as np +from numpy.__config__ import show as show_config +from numpy._pytesttester import PytestTester +from numpy._core._internal import _ctypes + +from numpy._typing import ( # type: ignore[deprecated] + # Arrays + ArrayLike, + NDArray, + _SupportsArray, + _NestedSequence, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeFloat64_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex128_co, + _ArrayLikeComplex_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeBytes_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + # DTypes + DTypeLike, + _DTypeLike, + _DTypeLikeVoid, + _VoidDTypeLike, + # Shapes + _AnyShape, + _Shape, + _ShapeLike, + # Scalars + _CharLike_co, + _IntLike_co, + _FloatLike_co, + _TD64Like_co, + _NumberLike_co, + _ScalarLike_co, + # `number` precision + NBitBase, + # NOTE: Do not remove the extended precision bit-types even if seemingly unused; + # they're used by the mypy plugin + _128Bit, + _96Bit, + _64Bit, + _32Bit, + _16Bit, + _8Bit, + _NBitByte, + _NBitShort, + _NBitIntC, + _NBitIntP, + _NBitLong, + _NBitLongLong, + _NBitHalf, + _NBitSingle, + _NBitDouble, + _NBitLongDouble, + # Character codes + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _LongCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _ULongCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, + _StringCodes, + _UnsignedIntegerCodes, + _SignedIntegerCodes, + _IntegerCodes, + _FloatingCodes, + _ComplexFloatingCodes, + _InexactCodes, + _CharacterCodes, + # Ufuncs + _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1, +) + +# NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform +from numpy._typing._extended_precision import ( + float96, + float128, + complex192, + complex256, +) + +from numpy._array_api_info import __array_namespace_info__ + +from collections.abc import ( + Callable, + Iterable, + Iterator, + Mapping, + Sequence, +) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _SupportsBuffer +else: + _SupportsBuffer: TypeAlias = ( + bytes + | bytearray + | memoryview + | _array.array[Any] + | mmap.mmap + | NDArray[Any] + | generic + ) + +from typing import ( + Any, + ClassVar, + Final, + Generic, + Literal as L, + LiteralString, + Never, + NoReturn, + Protocol, + Self, + SupportsComplex, + SupportsFloat, + SupportsInt, + SupportsIndex, + TypeAlias, + TypedDict, + final, + overload, + type_check_only, +) + +# NOTE: `typing_extensions` and `_typeshed` are always available in `.pyi` stubs, even +# if not available at runtime. This is because the `typeshed` stubs for the standard +# library include `typing_extensions` stubs: +# https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi +from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite +from typing_extensions import CapsuleType, TypeVar, deprecated, override + +from numpy import ( + char, + core, + ctypeslib, + dtypes, + exceptions, + f2py, + fft, + lib, + linalg, + ma, + polynomial, + random, + rec, + strings, + testing, + typing, +) + +# available through `__getattr__`, but not in `__all__` or `__dir__` +from numpy import ( + __config__ as __config__, + matlib as matlib, + matrixlib as matrixlib, + version as version, +) +if sys.version_info < (3, 12): + from numpy import distutils as distutils + +from numpy._core.records import ( + record, + recarray, +) + +from numpy._core.function_base import ( + linspace, + logspace, + geomspace, +) + +from numpy._core.fromnumeric import ( + take, + reshape, + choose, + repeat, + put, + swapaxes, + transpose, + matrix_transpose, + partition, + argpartition, + sort, + argsort, + argmax, + argmin, + searchsorted, + resize, + squeeze, + diagonal, + trace, + ravel, + nonzero, + shape, + compress, + clip, + sum, + all, + any, + cumsum, + cumulative_sum, + ptp, + max, + min, + amax, + amin, + prod, + cumprod, + cumulative_prod, + ndim, + size, + around, + round, + mean, + std, + var, +) + +from numpy._core._asarray import ( + require, +) + +from numpy._core._type_aliases import ( + sctypeDict, +) + +from numpy._core._ufunc_config import ( + seterr, + geterr, + setbufsize, + getbufsize, + seterrcall, + geterrcall, + errstate, +) + +from numpy._core.arrayprint import ( + set_printoptions, + get_printoptions, + array2string, + format_float_scientific, + format_float_positional, + array_repr, + array_str, + printoptions, +) + +from numpy._core.einsumfunc import ( + einsum, + einsum_path, +) +from numpy._core.getlimits import ( + finfo, + iinfo, +) + +from numpy._core.multiarray import ( + array, + empty_like, + empty, + zeros, + concatenate, + inner, + where, + lexsort, + can_cast, + min_scalar_type, + result_type, + dot, + vdot, + bincount, + copyto, + putmask, + packbits, + unpackbits, + shares_memory, + may_share_memory, + asarray, + asanyarray, + ascontiguousarray, + asfortranarray, + arange, + busday_count, + busday_offset, + datetime_as_string, + datetime_data, + frombuffer, + fromfile, + fromiter, + is_busday, + promote_types, + fromstring, + frompyfunc, + nested_iters, + flagsobj, +) + +from numpy._core.numeric import ( + zeros_like, + ones, + ones_like, + full, + full_like, + count_nonzero, + isfortran, + argwhere, + flatnonzero, + correlate, + convolve, + outer, + tensordot, + roll, + rollaxis, + moveaxis, + cross, + indices, + fromfunction, + isscalar, + binary_repr, + base_repr, + identity, + allclose, + isclose, + array_equal, + array_equiv, + astype, +) + +from numpy._core.numerictypes import ( + isdtype, + issubdtype, + ScalarType, + typecodes, +) + +from numpy._core.shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + vstack, + unstack, +) + +from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ +from ._globals import _CopyMode as _CopyMode +from ._globals import _NoValue as _NoValue, _NoValueType + +from numpy.lib import ( + scimath as emath, +) + +from numpy.lib._arraypad_impl import ( + pad, +) + +from numpy.lib._arraysetops_impl import ( + ediff1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, +) + +from numpy.lib._function_base_impl import ( # type: ignore[deprecated] + select, + piecewise, + trim_zeros, + copy, + iterable, + percentile, + diff, + gradient, + angle, + unwrap, + sort_complex, + flip, + rot90, + extract, + place, + asarray_chkfinite, + average, + digitize, + cov, + corrcoef, + median, + sinc, + hamming, + hanning, + bartlett, + blackman, + kaiser, + trapezoid, + i0, + meshgrid, + delete, + insert, + append, + interp, + quantile, + vectorize, +) + +from numpy.lib._histograms_impl import ( + histogram_bin_edges, + histogram, + histogramdd, +) + +from numpy.lib._index_tricks_impl import ( + ndenumerate, + ndindex, + ravel_multi_index, + unravel_index, + mgrid, + ogrid, + r_, + c_, + s_, + index_exp, + ix_, + fill_diagonal, + diag_indices, + diag_indices_from, +) + +from numpy.lib._nanfunctions_impl import ( + nansum, + nanmax, + nanmin, + nanargmax, + nanargmin, + nanmean, + nanmedian, + nanpercentile, + nanvar, + nanstd, + nanprod, + nancumsum, + nancumprod, + nanquantile, +) + +from numpy.lib._npyio_impl import ( + savetxt, + loadtxt, + genfromtxt, + load, + save, + savez, + savez_compressed, + fromregex, +) + +from numpy.lib._polynomial_impl import ( + poly, + roots, + polyint, + polyder, + polyadd, + polysub, + polymul, + polydiv, + polyval, + polyfit, +) + +from numpy.lib._shape_base_impl import ( # type: ignore[deprecated] + column_stack, + row_stack, + dstack, + array_split, + split, + hsplit, + vsplit, + dsplit, + apply_over_axes, + expand_dims, + apply_along_axis, + kron, + tile, + take_along_axis, + put_along_axis, +) + +from numpy.lib._stride_tricks_impl import ( + broadcast_to, + broadcast_arrays, + broadcast_shapes, +) + +from numpy.lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + tri, + triu, + tril, + vander, + histogram2d, + mask_indices, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, +) + +from numpy.lib._type_check_impl import ( + mintypecode, + real, + imag, + iscomplex, + isreal, + iscomplexobj, + isrealobj, + nan_to_num, + real_if_close, + typename, + common_type, +) + +from numpy.lib._ufunclike_impl import ( + fix, + isposinf, + isneginf, +) + +from numpy.lib._utils_impl import ( + get_include, + info, + show_runtime, +) + +from numpy.matrixlib import ( + asmatrix, + bmat, + matrix, +) + +__all__ = [ # noqa: RUF022 + # __numpy_submodules__ + "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg", + "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing", + + # _core.__all__ + "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", "bitwise_invert", + "bitwise_left_shift", "bitwise_right_shift", "concat", "pow", "permute_dims", + "memmap", "sctypeDict", "record", "recarray", + + # _core.numeric.__all__ + "newaxis", "ndarray", "flatiter", "nditer", "nested_iters", "ufunc", "arange", + "array", "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "zeros", + "count_nonzero", "empty", "broadcast", "dtype", "fromstring", "fromfile", + "frombuffer", "from_dlpack", "where", "argwhere", "copyto", "concatenate", + "lexsort", "astype", "can_cast", "promote_types", "min_scalar_type", "result_type", + "isfortran", "empty_like", "zeros_like", "ones_like", "correlate", "convolve", + "inner", "dot", "outer", "vdot", "roll", "rollaxis", "moveaxis", "cross", + "tensordot", "little_endian", "fromiter", "array_equal", "array_equiv", "indices", + "fromfunction", "isclose", "isscalar", "binary_repr", "base_repr", "ones", + "identity", "allclose", "putmask", "flatnonzero", "inf", "nan", "False_", "True_", + "bitwise_not", "full", "full_like", "matmul", "vecdot", "vecmat", + "shares_memory", "may_share_memory", + "all", "amax", "amin", "any", "argmax", "argmin", "argpartition", "argsort", + "around", "choose", "clip", "compress", "cumprod", "cumsum", "cumulative_prod", + "cumulative_sum", "diagonal", "mean", "max", "min", "matrix_transpose", "ndim", + "nonzero", "partition", "prod", "ptp", "put", "ravel", "repeat", "reshape", + "resize", "round", "searchsorted", "shape", "size", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "trace", "transpose", "var", + "absolute", "add", "arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctan2", + "arctanh", "bitwise_and", "bitwise_or", "bitwise_xor", "cbrt", "ceil", "conj", + "conjugate", "copysign", "cos", "cosh", "bitwise_count", "deg2rad", "degrees", + "divide", "divmod", "e", "equal", "euler_gamma", "exp", "exp2", "expm1", "fabs", + "floor", "floor_divide", "float_power", "fmax", "fmin", "fmod", "frexp", + "frompyfunc", "gcd", "greater", "greater_equal", "heaviside", "hypot", "invert", + "isfinite", "isinf", "isnan", "isnat", "lcm", "ldexp", "left_shift", "less", + "less_equal", "log", "log10", "log1p", "log2", "logaddexp", "logaddexp2", + "logical_and", "logical_not", "logical_or", "logical_xor", "matvec", "maximum", "minimum", + "mod", "modf", "multiply", "negative", "nextafter", "not_equal", "pi", "positive", + "power", "rad2deg", "radians", "reciprocal", "remainder", "right_shift", "rint", + "sign", "signbit", "sin", "sinh", "spacing", "sqrt", "square", "subtract", "tan", + "tanh", "true_divide", "trunc", "ScalarType", "typecodes", "issubdtype", + "datetime_data", "datetime_as_string", "busday_offset", "busday_count", "is_busday", + "busdaycalendar", "isdtype", + "complexfloating", "character", "unsignedinteger", "inexact", "generic", "floating", + "integer", "signedinteger", "number", "flexible", "bool", "float16", "float32", + "float64", "longdouble", "complex64", "complex128", "clongdouble", + "bytes_", "str_", "void", "object_", "datetime64", "timedelta64", "int8", "byte", + "uint8", "ubyte", "int16", "short", "uint16", "ushort", "int32", "intc", "uint32", + "uintc", "int64", "long", "uint64", "ulong", "longlong", "ulonglong", "intp", + "uintp", "double", "cdouble", "single", "csingle", "half", "bool_", "int_", "uint", + "float96", "float128", "complex192", "complex256", + "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", + "printoptions", "format_float_positional", "format_float_scientific", "require", + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate", + # _core.function_base.__all__ + "logspace", "linspace", "geomspace", + # _core.getlimits.__all__ + "finfo", "iinfo", + # _core.shape_base.__all__ + "atleast_1d", "atleast_2d", "atleast_3d", "block", "hstack", "stack", "unstack", + "vstack", + # _core.einsumfunc.__all__ + "einsum", "einsum_path", + # matrixlib.__all__ + "matrix", "bmat", "asmatrix", + # lib._histograms_impl.__all__ + "histogram", "histogramdd", "histogram_bin_edges", + # lib._nanfunctions_impl.__all__ + "nansum", "nanmax", "nanmin", "nanargmax", "nanargmin", "nanmean", "nanmedian", + "nanpercentile", "nanvar", "nanstd", "nanprod", "nancumsum", "nancumprod", + "nanquantile", + # lib._function_base_impl.__all__ + "select", "piecewise", "trim_zeros", "copy", "iterable", "percentile", "diff", + "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", + "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", + "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", + "kaiser", "trapezoid", "i0", "meshgrid", "delete", "insert", "append", + "interp", "quantile", + # lib._twodim_base_impl.__all__ + "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", + "histogram2d", "mask_indices", "tril_indices", "tril_indices_from", "triu_indices", + "triu_indices_from", + # lib._shape_base_impl.__all__ + "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit", + "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile", + "take_along_axis", "put_along_axis", "row_stack", + # lib._type_check_impl.__all__ + "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", + "real_if_close", "typename", "mintypecode", "common_type", + # lib._arraysetops_impl.__all__ + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", + # lib._ufunclike_impl.__all__ + "fix", "isneginf", "isposinf", + # lib._arraypad_impl.__all__ + "pad", + # lib._utils_impl.__all__ + "get_include", "info", "show_runtime", + # lib._stride_tricks_impl.__all__ + "broadcast_to", "broadcast_arrays", "broadcast_shapes", + # lib._polynomial_impl.__all__ + "poly", "roots", "polyint", "polyder", "polyadd", "polysub", "polymul", "polydiv", + "polyval", "poly1d", "polyfit", + # lib._npyio_impl.__all__ + "savetxt", "loadtxt", "genfromtxt", "load", "save", "savez", "savez_compressed", + "packbits", "unpackbits", "fromregex", + # lib._index_tricks_impl.__all__ + "ravel_multi_index", "unravel_index", "mgrid", "ogrid", "r_", "c_", "s_", + "index_exp", "ix_", "ndenumerate", "ndindex", "fill_diagonal", "diag_indices", + "diag_indices_from", + + # __init__.__all__ + "emath", "show_config", "__version__", "__array_namespace_info__", +] # fmt: skip + +### Constrained types (for internal use only) +# Only use these for functions; never as generic type parameter. + +_AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes) +_AnyShapeT = TypeVar( + "_AnyShapeT", + tuple[()], # 0-d + tuple[int], # 1-d + tuple[int, int], # 2-d + tuple[int, int, int], # 3-d + tuple[int, int, int, int], # 4-d + tuple[int, int, int, int, int], # 5-d + tuple[int, int, int, int, int, int], # 6-d + tuple[int, int, int, int, int, int, int], # 7-d + tuple[int, int, int, int, int, int, int, int], # 8-d + tuple[int, ...], # N-d +) +_AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) +_AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) +_AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) +_AnyDate = TypeVar("_AnyDate", dt.date, dt.datetime) +_AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) + +### Type parameters (for internal use only) + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_T_contra = TypeVar("_T_contra", contravariant=True) +_RealT_co = TypeVar("_RealT_co", covariant=True) +_ImagT_co = TypeVar("_ImagT_co", covariant=True) + +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) +_FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) + +_ArrayT = TypeVar("_ArrayT", bound=ndarray) +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) +_BoolArrayT = TypeVar("_BoolArrayT", bound=NDArray[np.bool]) +_IntegerArrayT = TypeVar("_IntegerArrayT", bound=NDArray[integer]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[np.bool | integer | object_]) +_FloatingArrayT = TypeVar("_FloatingArrayT", bound=NDArray[floating]) +_FloatingTimedeltaArrayT = TypeVar("_FloatingTimedeltaArrayT", bound=NDArray[floating | timedelta64]) +_ComplexFloatingArrayT = TypeVar("_ComplexFloatingArrayT", bound=NDArray[complexfloating]) +_InexactArrayT = TypeVar("_InexactArrayT", bound=NDArray[inexact]) +_InexactTimedeltaArrayT = TypeVar("_InexactTimedeltaArrayT", bound=NDArray[inexact | timedelta64]) +_NumberArrayT = TypeVar("_NumberArrayT", bound=NDArray[number]) +_NumberCharacterArrayT = TypeVar("_NumberCharacterArrayT", bound=ndarray[Any, dtype[number | character] | dtypes.StringDType]) +_TimedeltaArrayT = TypeVar("_TimedeltaArrayT", bound=NDArray[timedelta64]) +_TimeArrayT = TypeVar("_TimeArrayT", bound=NDArray[datetime64 | timedelta64]) +_ObjectArrayT = TypeVar("_ObjectArrayT", bound=NDArray[object_]) +_BytesArrayT = TypeVar("_BytesArrayT", bound=NDArray[bytes_]) +_StringArrayT = TypeVar("_StringArrayT", bound=ndarray[Any, dtype[str_] | dtypes.StringDType]) +_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) +_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_Shape1T = TypeVar("_Shape1T", bound=tuple[int, *tuple[int, ...]]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) +_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) +_InexactT = TypeVar("_InexactT", bound=inexact) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_IntegerT = TypeVar("_IntegerT", bound=integer) +_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) + +_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated] + +_ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) +_BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) +_BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) +_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) +_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) +_FlexibleItemT_co = TypeVar( + "_FlexibleItemT_co", + bound=_CharLike_co | tuple[Any, ...], + default=_CharLike_co | tuple[Any, ...], + covariant=True, +) +_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) +_TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) + +### Type Aliases (for internal use only) + +_Falsy: TypeAlias = L[False, 0] | np.bool[L[False]] +_Truthy: TypeAlias = L[True, 1] | np.bool[L[True]] + +_1D: TypeAlias = tuple[int] +_2D: TypeAlias = tuple[int, int] +_2Tuple: TypeAlias = tuple[_T, _T] + +_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] +_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] +_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] +_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] +_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] +_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] + +_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool +_Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool +_Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co + +_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +_UnsignedIntegerCType: TypeAlias = type[ + ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 + | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong + | ct.c_size_t | ct.c_void_p +] # fmt: skip +_SignedIntegerCType: TypeAlias = type[ + ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64 + | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong + | ct.c_ssize_t +] # fmt: skip +_FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] +_IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType + +# some commonly used builtin types that are known to result in a +# `dtype[object_]`, when their *type* is passed to the `dtype` constructor +# NOTE: `builtins.object` should not be included here +_BuiltinObjectLike: TypeAlias = ( + slice | Decimal | Fraction | UUID + | dt.date | dt.time | dt.timedelta | dt.tzinfo + | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any] +) # fmt: skip + +# Introduce an alias for `dtype` to avoid naming conflicts. +_dtype: TypeAlias = dtype[_ScalarT] + +_ByteOrderChar: TypeAlias = L["<", ">", "=", "|"] +# can be anything, is case-insensitive, and only the first character matters +_ByteOrder: TypeAlias = L[ + "S", # swap the current order (default) + "<", "L", "little", # little-endian + ">", "B", "big", # big endian + "=", "N", "native", # native order + "|", "I", # ignore +] # fmt: skip +_DTypeKind: TypeAlias = L[ + "b", # boolean + "i", # signed integer + "u", # unsigned integer + "f", # floating-point + "c", # complex floating-point + "m", # timedelta64 + "M", # datetime64 + "O", # python object + "S", # byte-string (fixed-width) + "U", # unicode-string (fixed-width) + "V", # void + "T", # unicode-string (variable-width) +] +_DTypeChar: TypeAlias = L[ + "?", # bool + "b", # byte + "B", # ubyte + "h", # short + "H", # ushort + "i", # intc + "I", # uintc + "l", # long + "L", # ulong + "q", # longlong + "Q", # ulonglong + "e", # half + "f", # single + "d", # double + "g", # longdouble + "F", # csingle + "D", # cdouble + "G", # clongdouble + "O", # object + "S", # bytes_ (S0) + "a", # bytes_ (deprecated) + "U", # str_ + "V", # void + "M", # datetime64 + "m", # timedelta64 + "c", # bytes_ (S1) + "T", # StringDType +] +_DTypeNum: TypeAlias = L[ + 0, # bool + 1, # byte + 2, # ubyte + 3, # short + 4, # ushort + 5, # intc + 6, # uintc + 7, # long + 8, # ulong + 9, # longlong + 10, # ulonglong + 23, # half + 11, # single + 12, # double + 13, # longdouble + 14, # csingle + 15, # cdouble + 16, # clongdouble + 17, # object + 18, # bytes_ + 19, # str_ + 20, # void + 21, # datetime64 + 22, # timedelta64 + 25, # no type + 256, # user-defined + 2056, # StringDType +] +_DTypeBuiltinKind: TypeAlias = L[0, 1, 2] + +_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] + +_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] + +_OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None +_OrderACF: TypeAlias = L["A", "C", "F"] | None +_OrderCF: TypeAlias = L["C", "F"] | None # noqa: PYI047 + +_ModeKind: TypeAlias = L["raise", "wrap", "clip"] +_PartitionKind: TypeAlias = L["introselect"] +# in practice, only the first case-insensitive character is considered (so e.g. +# "QuantumSort3000" will be interpreted as quicksort). +_SortKind: TypeAlias = L[ + "Q", "quick", "quicksort", + "M", "merge", "mergesort", + "H", "heap", "heapsort", + "S", "stable", "stablesort", +] +_SortSide: TypeAlias = L["left", "right"] + +_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None + +_NDIterFlagsKind: TypeAlias = L[ + "buffered", + "c_index", + "copy_if_overlap", + "common_dtype", + "delay_bufalloc", + "external_loop", + "f_index", + "grow_inner", "growinner", + "multi_index", + "ranged", + "refs_ok", + "reduce_ok", + "zerosize_ok", +] +_NDIterFlagsOp: TypeAlias = L[ + "aligned", + "allocate", + "arraymask", + "copy", + "config", + "nbo", + "no_subtype", + "no_broadcast", + "overlap_assume_elementwise", + "readonly", + "readwrite", + "updateifcopy", + "virtual", + "writeonly", + "writemasked", +] + +_MemMapModeKind: TypeAlias = L[ + "readonly", "r", + "copyonwrite", "c", + "readwrite", "r+", + "write", "w+", +] + +_DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] +_DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"] +_NaTValue: TypeAlias = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] + +_MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"] +_DayUnit: TypeAlias = L["W", "D", b"W", b"D"] +_DateUnit: TypeAlias = L[_MonthUnit, _DayUnit] +_NativeTimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "μs", b"h", b"m", b"s", b"ms", b"us"] +_IntTimeUnit: TypeAlias = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] +_TimeUnit: TypeAlias = L[_NativeTimeUnit, _IntTimeUnit] +_NativeTD64Unit: TypeAlias = L[_DayUnit, _NativeTimeUnit] +_IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] +_TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] +_TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] + +### TypedDict's (for internal use only) + +@type_check_only +class _FormerAttrsDict(TypedDict): + object: LiteralString + float: LiteralString + complex: LiteralString + str: LiteralString + int: LiteralString + +### Protocols (for internal use only) + +@final +@type_check_only +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + +@type_check_only +class _SupportsFileMethods(SupportsFlush, Protocol): + # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile` + def fileno(self) -> SupportsIndex: ... + def tell(self) -> SupportsIndex: ... + def seek(self, offset: int, whence: int, /) -> object: ... + +@type_check_only +class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... + +@type_check_only +class _SupportsDLPack(Protocol[_T_contra]): + def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... + +@type_check_only +class _HasDType(Protocol[_T_co]): + @property + def dtype(self, /) -> _T_co: ... + +@type_check_only +class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def real(self, /) -> _RealT_co: ... + @property + def imag(self, /) -> _ImagT_co: ... + +@type_check_only +class _HasTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def type(self, /) -> type[_HasRealAndImag[_RealT_co, _ImagT_co]]: ... + +@type_check_only +class _HasDTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def dtype(self, /) -> _HasTypeWithRealAndImag[_RealT_co, _ImagT_co]: ... + +@type_check_only +class _HasDateAttributes(Protocol): + # The `datetime64` constructors requires an object with the three attributes below, + # and thus supports datetime duck typing + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + +### Mixins (for internal use only) + +@type_check_only +class _RealMixin: + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + +@type_check_only +class _RoundMixin: + @overload + def __round__(self, /, ndigits: None = None) -> int: ... + @overload + def __round__(self, /, ndigits: SupportsIndex) -> Self: ... + +@type_check_only +class _IntegralMixin(_RealMixin): + @property + def numerator(self) -> Self: ... + @property + def denominator(self) -> L[1]: ... + + def is_integer(self, /) -> L[True]: ... + +### Public API + +__version__: Final[LiteralString] = ... + +e: Final[float] = ... +euler_gamma: Final[float] = ... +pi: Final[float] = ... +inf: Final[float] = ... +nan: Final[float] = ... +little_endian: Final[builtins.bool] = ... +False_: Final[np.bool[L[False]]] = ... +True_: Final[np.bool[L[True]]] = ... +newaxis: Final[None] = None + +# not in __all__ +__NUMPY_SETUP__: Final[L[False]] = False +__numpy_submodules__: Final[set[LiteralString]] = ... +__former_attrs__: Final[_FormerAttrsDict] = ... +__future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... +__array_api_version__: Final[L["2024.12"]] = "2024.12" +test: Final[PytestTester] = ... + +@type_check_only +class _DTypeMeta(type): + @property + def type(cls, /) -> type[generic] | None: ... + @property + def _abstract(cls, /) -> bool: ... + @property + def _is_numeric(cls, /) -> bool: ... + @property + def _parametric(cls, /) -> bool: ... + @property + def _legacy(cls, /) -> bool: ... + +@final +class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): + names: tuple[builtins.str, ...] | None + def __hash__(self) -> int: ... + + # `None` results in the default dtype + @overload + def __new__( + cls, + dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ... + ) -> dtype[float64]: ... + + # Overload for `dtype` instances, scalar types, and instances that have a + # `dtype: dtype[_ScalarT]` attribute + @overload + def __new__( + cls, + dtype: _DTypeLike[_ScalarT], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[_ScalarT]: ... + + # Builtin types + # + # NOTE: Typecheckers act as if `bool <: int <: float <: complex <: object`, + # even though at runtime `int`, `float`, and `complex` aren't subtypes.. + # This makes it impossible to express e.g. "a float that isn't an int", + # since type checkers treat `_: float` like `_: float | int`. + # + # For more details, see: + # - https://github.com/numpy/numpy/issues/27032#issuecomment-2278958251 + # - https://typing.readthedocs.io/en/latest/spec/special-types.html#special-cases-for-float-and-complex + @overload + def __new__( + cls, + dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[np.bool]: ... + @overload + def __new__( + cls, + dtype: type[int], # also accepts `type[builtins.bool]` + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[int_ | np.bool]: ... + @overload + def __new__( + cls, + dtype: type[float], # also accepts `type[int | bool]` + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[float64 | int_ | np.bool]: ... + @overload + def __new__( + cls, + dtype: type[complex], # also accepts `type[float | int | bool]` + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[complex128 | float64 | int_ | np.bool]: ... + @overload + def __new__( + cls, + dtype: type[bytes | ct.c_char] | _BytesCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[bytes_]: ... + @overload + def __new__( + cls, + dtype: type[str] | _StrCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[str_]: ... + # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to + # be run with the (undocumented) `--disable-memoryview-promotion` flag, + # This will be the default in a future mypy release, see: + # https://github.com/python/mypy/issues/15313 + # Pyright / Pylance requires setting `disableBytesTypePromotions=true`, + # which is the default in strict mode + @overload + def __new__( + cls, + dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[void]: ... + # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, + # and is therefore not included here + @overload + def __new__( + cls, + dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[object_]: ... + + # `unsignedinteger` string-based representations and ctypes + @overload + def __new__( + cls, + dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint8]: ... + @overload + def __new__( + cls, + dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint16]: ... + @overload + def __new__( + cls, + dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint32]: ... + @overload + def __new__( + cls, + dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint64]: ... + @overload + def __new__( + cls, + dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uintp]: ... + @overload + def __new__( + cls, + dtype: _ULongCodes | type[ct.c_ulong], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[ulong]: ... + + # `signedinteger` string-based representations and ctypes + @overload + def __new__( + cls, + dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int8]: ... + @overload + def __new__( + cls, + dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int16]: ... + @overload + def __new__( + cls, + dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int32]: ... + @overload + def __new__( + cls, + dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int64]: ... + @overload + def __new__( + cls, + dtype: _IntPCodes | type[intp | ct.c_ssize_t], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[intp]: ... + @overload + def __new__( + cls, + dtype: _LongCodes | type[ct.c_long], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[long]: ... + + # `floating` string-based representations and ctypes + @overload + def __new__( + cls, + dtype: _Float16Codes | _HalfCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float16]: ... + @overload + def __new__( + cls, + dtype: _Float32Codes | _SingleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float32]: ... + # float64 codes are covered by overload 1 + @overload + def __new__( + cls, + dtype: _LongDoubleCodes | type[ct.c_longdouble], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[longdouble]: ... + + # `complexfloating` string-based representations + @overload + def __new__( + cls, + dtype: _Complex64Codes | _CSingleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes | _CDoubleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... + + # Miscellaneous string-based representations and ctypes + @overload + def __new__( + cls, + dtype: _TD64Codes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[timedelta64]: ... + @overload + def __new__( + cls, + dtype: _DT64Codes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[datetime64]: ... + + # `StringDType` requires special treatment because it has no scalar type + @overload + def __new__( + cls, + dtype: dtypes.StringDType | _StringCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtypes.StringDType: ... + + # Combined char-codes and ctypes, analogous to the scalar-type hierarchy + @overload + def __new__( + cls, + dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[unsignedinteger]: ... + @overload + def __new__( + cls, + dtype: _SignedIntegerCodes | _SignedIntegerCType, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[signedinteger]: ... + @overload + def __new__( + cls, + dtype: _IntegerCodes | _IntegerCType, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[integer]: ... + @overload + def __new__( + cls, + dtype: _FloatingCodes | _FloatingCType, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[floating]: ... + @overload + def __new__( + cls, + dtype: _ComplexFloatingCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complexfloating]: ... + @overload + def __new__( + cls, + dtype: _InexactCodes | _FloatingCType, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[inexact]: ... + @overload + def __new__( + cls, + dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[character]: ... + + # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... + @overload + def __new__( + cls, + dtype: builtins.str, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype: ... + + # Catch-all overload for object-likes + # NOTE: `object_ | Any` is NOT equivalent to `Any`. It is specified to behave + # like a "sum type" (a.k.a. variant type, discriminated union, or tagged union). + # So the union of a type and `Any` is not the same "union type" that all other + # unions are (by definition). + # https://typing.python.org/en/latest/spec/concepts.html#union-types + @overload + def __new__( + cls, + dtype: type[object], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[object_ | Any]: ... + + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + @overload + def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... + @overload + def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype: ... + + # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes + @overload + def __mul__(self: _DTypeT, value: L[1], /) -> _DTypeT: ... + @overload + def __mul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + @overload + def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... + + # NOTE: `__rmul__` seems to be broken when used in combination with + # literals as of mypy 0.902. Set the return-type to `dtype` for + # now for non-flexible dtypes. + @overload + def __rmul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + @overload + def __rmul__(self, value: SupportsIndex, /) -> dtype: ... + + def __gt__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __ge__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __lt__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __le__(self, other: DTypeLike | None, /) -> builtins.bool: ... + + # Explicitly defined `__eq__` and `__ne__` to get around mypy's + # `strict_equality` option; even though their signatures are + # identical to their `object`-based counterpart + def __eq__(self, other: Any, /) -> builtins.bool: ... + def __ne__(self, other: Any, /) -> builtins.bool: ... + + @property + def alignment(self) -> int: ... + @property + def base(self) -> dtype: ... + @property + def byteorder(self) -> _ByteOrderChar: ... + @property + def char(self) -> _DTypeChar: ... + @property + def descr(self) -> list[tuple[LiteralString, LiteralString] | tuple[LiteralString, LiteralString, _Shape]]: ... + @property + def fields(self,) -> MappingProxyType[LiteralString, tuple[dtype, int] | tuple[dtype, int, Any]] | None: ... + @property + def flags(self) -> int: ... + @property + def hasobject(self) -> builtins.bool: ... + @property + def isbuiltin(self) -> _DTypeBuiltinKind: ... + @property + def isnative(self) -> builtins.bool: ... + @property + def isalignedstruct(self) -> builtins.bool: ... + @property + def itemsize(self) -> int: ... + @property + def kind(self) -> _DTypeKind: ... + @property + def metadata(self) -> MappingProxyType[builtins.str, Any] | None: ... + @property + def name(self) -> LiteralString: ... + @property + def num(self) -> _DTypeNum: ... + @property + def shape(self) -> _AnyShape: ... + @property + def ndim(self) -> int: ... + @property + def subdtype(self) -> tuple[dtype, _AnyShape] | None: ... + def newbyteorder(self, new_order: _ByteOrder = ..., /) -> Self: ... + @property + def str(self) -> LiteralString: ... + @property + def type(self) -> type[_ScalarT_co]: ... + +@final +class flatiter(Generic[_ArrayT_co]): + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @property + def base(self, /) -> _ArrayT_co: ... + @property + def coords(self: flatiter[ndarray[_ShapeT]], /) -> _ShapeT: ... + @property + def index(self, /) -> int: ... + + # iteration + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Self: ... + def __next__(self: flatiter[NDArray[_ScalarT]], /) -> _ScalarT: ... + + # indexing + @overload # nd: _[()] + def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... + @overload # 0d; _[] + def __getitem__(self: flatiter[NDArray[_ScalarT]], key: int | integer, /) -> _ScalarT: ... + @overload # 1d; _[[*]], _[:], _[...] + def __getitem__( + self: flatiter[ndarray[Any, _DTypeT]], + key: list[int] | slice | EllipsisType | flatiter[NDArray[integer]], + /, + ) -> ndarray[tuple[int], _DTypeT]: ... + @overload # 2d; _[[*[*]]] + def __getitem__( + self: flatiter[ndarray[Any, _DTypeT]], + key: list[list[int]], + /, + ) -> ndarray[tuple[int, int], _DTypeT]: ... + @overload # ?d + def __getitem__( + self: flatiter[ndarray[Any, _DTypeT]], + key: NDArray[integer] | _NestedSequence[int], + /, + ) -> ndarray[_AnyShape, _DTypeT]: ... + + # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any + # type accepted by the relevant underlying `np.generic` constructor, which isn't + # known statically. So we cannot meaningfully annotate the value parameter. + def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... + + # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to + # avoid confusion + def __array__( + self: flatiter[ndarray[Any, _DTypeT]], + dtype: None = None, + /, + *, + copy: None = None, + ) -> ndarray[tuple[int], _DTypeT]: ... + + # This returns a flat copy of the underlying array, not of the iterator itself + def copy(self: flatiter[ndarray[Any, _DTypeT]], /) -> ndarray[tuple[int], _DTypeT]: ... + +@type_check_only +class _ArrayOrScalarCommon: + @property + def real(self, /) -> Any: ... + @property + def imag(self, /) -> Any: ... + @property + def T(self) -> Self: ... + @property + def mT(self) -> Self: ... + @property + def data(self) -> memoryview: ... + @property + def flags(self) -> flagsobj: ... + @property + def itemsize(self) -> int: ... + @property + def nbytes(self) -> int: ... + @property + def device(self) -> L["cpu"]: ... + + def __bool__(self, /) -> builtins.bool: ... + def __int__(self, /) -> int: ... + def __float__(self, /) -> float: ... + def __copy__(self) -> Self: ... + def __deepcopy__(self, memo: dict[int, Any] | None, /) -> Self: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Any, /) -> Any: ... + def __ne__(self, other: Any, /) -> Any: ... + + def copy(self, order: _OrderKACF = ...) -> Self: ... + def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ... + def dumps(self) -> bytes: ... + def tobytes(self, order: _OrderKACF = ...) -> bytes: ... + def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, /, sep: str = "", format: str = "%s") -> None: ... + # generics and 0d arrays return builtin scalars + def tolist(self) -> Any: ... + def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... + + # NOTE: for `generic`, these two methods don't do anything + def fill(self, /, value: Incomplete) -> None: ... + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, /, mode: _ModeKind = "raise") -> None: ... + + # NOTE: even on `generic` this seems to work + def setflags( + self, + /, + *, + write: builtins.bool | None = None, + align: builtins.bool | None = None, + uic: builtins.bool | None = None, + ) -> None: ... + + @property + def __array_interface__(self) -> dict[str, Any]: ... + @property + def __array_priority__(self) -> float: ... + @property + def __array_struct__(self) -> CapsuleType: ... # builtins.PyCapsule + def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ... + def __setstate__(self, state: tuple[ + SupportsIndex, # version + _ShapeLike, # Shape + _DTypeT_co, # DType + np.bool, # F-continuous + bytes | list[Any], # Data + ], /) -> None: ... + + def conj(self) -> Self: ... + def conjugate(self) -> Self: ... + + def argsort( + self, + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: builtins.bool | None = ..., + ) -> NDArray[intp]: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + @overload + def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + @overload + def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + + # Keep in sync with `MaskedArray.round` + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + @overload + def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + + @overload # out=None (default) + def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ... + @overload # out=ndarray + def choose(self, /, choices: ArrayLike, out: _ArrayT, mode: _ModeKind = "raise") -> _ArrayT: ... + + # TODO: Annotate kwargs with an unpacked `TypedDict` + @overload # out: None (default) + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload + def clip(self, /, min: None, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload + def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload # out: ndarray + def clip(self, /, min: ArrayLike, max: ArrayLike | None, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: None, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: None = None, *, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ... + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT) -> _ArrayT: ... + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + + # Keep in sync with `MaskedArray.cumprod` + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + # Keep in sync with `MaskedArray.cumsum` + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + @overload + def max( + self, + /, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload + def max( + self, + /, + axis: _ShapeLike | None, + out: _ArrayT, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def max( + self, + /, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def min( + self, + /, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload + def min( + self, + /, + axis: _ShapeLike | None, + out: _ArrayT, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def min( + self, + /, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> Any: ... + @overload + def std( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> Any: ... + @overload + def var( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> _ArrayT: ... + +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + @property + def base(self) -> NDArray[Any] | None: ... + @property + def ndim(self) -> int: ... + @property + def size(self) -> int: ... + @property + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @real.setter + def real(self, value: ArrayLike, /) -> None: ... + @property + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @imag.setter + def imag(self, value: ArrayLike, /) -> None: ... + + def __new__( + cls, + shape: _ShapeLike, + dtype: DTypeLike | None = ..., + buffer: _SupportsBuffer | None = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike | None = ..., + order: _OrderKACF = ..., + ) -> Self: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + @overload + def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... + + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + + def __array_function__( + self, + func: Callable[..., Any], + types: Iterable[type], + args: Iterable[Any], + kwargs: Mapping[str, Any], + ) -> Any: ... + + # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__` + # is a pseudo-abstract method the type has been narrowed down in order to + # grant subclasses a bit more flexibility + def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... + + def __array_wrap__( + self, + array: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + return_scalar: builtins.bool = ..., + /, + ) -> ndarray[_ShapeT, _DTypeT]: ... + + # Keep in sync with `MaskedArray.__getitem__` + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # can be of any shape + def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co | _AnyShape]: ... + @overload + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, dtype[void]]: ... + + @overload # flexible | object_ | bool + def __setitem__( + self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + key: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: NDArray[integer], + key: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: NDArray[floating], + key: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: NDArray[complexfloating], + key: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: NDArray[timedelta64], + key: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: NDArray[datetime64], + key: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # void + def __setitem__(self: NDArray[void], key: str | list[str], value: object, /) -> None: ... + @overload # catch-all + def __setitem__(self, key: _ToIndices, value: ArrayLike, /) -> None: ... + + @property + def ctypes(self) -> _ctypes[int]: ... + + # + @property + def shape(self) -> _ShapeT_co: ... + @shape.setter + @deprecated("In-place shape modification will be deprecated in NumPy 2.5.", category=PendingDeprecationWarning) + def shape(self, value: _ShapeLike) -> None: ... + + # + @property + def strides(self) -> _Shape: ... + @strides.setter + @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") + def strides(self, value: _ShapeLike) -> None: ... + + # + def byteswap(self, inplace: builtins.bool = ...) -> Self: ... + @property + def flat(self) -> flatiter[Self]: ... + + @overload # use the same output type as that of the underlying `generic` + def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ... + @overload # special casing for `StringDType`, which has no scalar type + def item( + self: ndarray[Any, dtypes.StringDType], + arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., + /, + *args: SupportsIndex, + ) -> str: ... + + # keep in sync with `ma.MaskedArray.tolist` + @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` + def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... + @overload + def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... + @overload + def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ... + @overload + def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ... + @overload + def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ... + @overload + def tolist(self, /) -> Any: ... + + @overload + def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = True) -> None: ... + @overload + def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = True) -> None: ... + + # keep in sync with `ma.MaskedArray.squeeze` + def squeeze( + self, + /, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... + + @overload + def transpose(self, axes: _ShapeLike | None, /) -> Self: ... + @overload + def transpose(self, /, *axes: SupportsIndex) -> Self: ... + + @overload + def all( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> np.bool: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> np.bool | NDArray[np.bool]: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def any( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> np.bool: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> np.bool | NDArray[np.bool]: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + # + @overload + def partition( + self, + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> None: ... + @overload + def partition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # + @overload + def argpartition( + self, + kth: _ArrayLikeInt, + /, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> NDArray[intp]: ... + @overload + def argpartition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> NDArray[intp]: ... + + # keep in sync with `ma.MaskedArray.diagonal` + def diagonal( + self, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + # 1D + 1D returns a scalar; + # all other with at least 1 non-0D array return an ndarray. + @overload + def dot(self, b: _ScalarLike_co, /, out: None = None) -> NDArray[Any]: ... + @overload + def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... + @overload + def dot(self, b: ArrayLike, /, out: _ArrayT) -> _ArrayT: ... + + # `nonzero()` raises for 0d arrays/generics + def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... + + @overload + def searchsorted( + self, # >= 1D array + v: _ScalarLike_co, # 0D array-like + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, + ) -> intp: ... + @overload + def searchsorted( + self, # >= 1D array + v: ArrayLike, + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, + ) -> NDArray[intp]: ... + + def sort( + self, + /, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: builtins.bool | None = None, + ) -> None: ... + + # Keep in sync with `MaskedArray.trace` + @overload + def trace( + self, # >= 2D array + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, + ) -> Any: ... + @overload + def trace( + self, # >= 2D array + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D array + /, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: _ArrayT, + ) -> _ArrayT: ... + + @overload + def take( + self: NDArray[_ScalarT], + indices: _IntLike_co, + /, + axis: SupportsIndex | None = ..., + out: None = None, + mode: _ModeKind = ..., + ) -> _ScalarT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = ..., + out: None = None, + mode: _ModeKind = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + + # keep in sync with `ma.MaskedArray.repeat` + @overload + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: None = None) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex) -> ndarray[_AnyShape, _DTypeT_co]: ... + + # keep in sync with `ma.MaskedArray.flatten` and `ma.MaskedArray.ravel` + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + + # Keep in sync with `MaskedArray.reshape` + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # type: ignore[overload-overlap] # mypy false positive + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape( + self, + shape: _AnyShapeT, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + @overload + def astype( + self, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload + def astype( + self, + dtype: DTypeLike | None, + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, dtype]: ... + + # + @overload # () + def view(self, /) -> Self: ... + @overload # (dtype: T) + def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... + @overload # (dtype: dtype[T]) + def view(self, /, dtype: _DTypeLike[_ScalarT]) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload # (type: T) + def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... + @overload # (_: T) + def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... + @overload # (dtype: ?) + def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... + @overload # (dtype: ?, type: T) + def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... + + def setfield(self, val: ArrayLike, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... + @overload + def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... + @overload + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... + + def __index__(self: NDArray[integer], /) -> int: ... + def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... + + def __len__(self) -> int: ... + def __contains__(self, value: object, /) -> builtins.bool: ... + + # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns + # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. + # This way the bug only occurs for 9-D arrays, which are probably not very common. + @overload + def __iter__( + self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / + ) -> Iterator[Any]: ... + @overload # == 1-d & dtype[T \ object_] + def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + @overload # == 1-d & StringDType + def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... + @overload # >= 2-d + def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], _DTypeT], /) -> Iterator[ndarray[_AnyShape, _DTypeT]]: ... + @overload # ?-d + def __iter__(self, /) -> Iterator[Any]: ... + + # + @overload + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __le__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # Unary ops + + # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.Complex64DType], /) -> ndarray[_ShapeT, dtypes.Float32DType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.Complex128DType], /) -> ndarray[_ShapeT, dtypes.Float64DType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeT, dtypes.LongDoubleDType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtype[complex128]], /) -> ndarray[_ShapeT, dtype[float64]]: ... + @overload + def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... + @overload + def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + + # Binary ops + + # TODO: Support the "1d @ 1d -> scalar" case + @overload + def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __matmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __matmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __matmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __matmul__ + def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmatmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmatmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __mod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __mod__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __mod__ + def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rmod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + @overload # signature equivalent to __divmod__ + def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + # Keep in sync with `MaskedArray.__add__` + @overload + def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __add__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __add__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __add__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload + def __add__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__radd__` + @overload # signature equivalent to __add__ + def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __radd__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __radd__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __radd__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload + def __radd__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__sub__` + @overload + def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __sub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rsub__` + @overload + def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rsub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__mul__` + @overload + def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __mul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __mul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __mul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rmul__` + @overload # signature equivalent to __mul__ + def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rmul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rmul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__truediv__` + @overload + def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __truediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __truediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... + @overload + def __truediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... + @overload + def __truediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... + @overload + def __truediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __truediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rtruediv__` + @overload + def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rtruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... + @overload + def __rtruediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... + @overload + def __rtruediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... + @overload + def __rtruediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __rtruediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rtruediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[integer | floating], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__floordiv__` + @overload + def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rfloordiv__` + @overload + def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __rfloordiv__(self: NDArray[floating | integer], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__pow__` + @overload + def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... + @overload + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... + @overload + def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... + @overload + def __pow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... + @overload + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... + @overload + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... + @overload + def __pow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rpow__` + @overload + def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... + @overload + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... + @overload + def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... + @overload + def __rpow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... + @overload + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... + @overload + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... + @overload + def __rpow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + @overload + def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + @overload + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __lshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + @overload + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rlshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + @overload + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + @overload + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rrshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __and__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rand__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __xor__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rxor__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __or__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __ror__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # `np.generic` does not support inplace operations + + # NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left + # operand. An exception to this rule are unsigned integers though, which + # also accepts a signed integer for the right operand as long it is a 0D + # object and its value is >= 0 + # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't + # work, as this will lead to `false negatives` when using these inplace ops. + + # += + @overload # type: ignore[misc] + def __iadd__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __iadd__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __iadd__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + @overload + def __iadd__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + @overload + def __iadd__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... + @overload + def __iadd__(self: _BytesArrayT, other: _ArrayLikeBytes_co, /) -> _BytesArrayT: ... + @overload + def __iadd__(self: _StringArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> _StringArrayT: ... + @overload + def __iadd__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # -= + @overload # type: ignore[misc] + def __isub__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __isub__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + @overload + def __isub__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + @overload + def __isub__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... + @overload + def __isub__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # *= + @overload # type: ignore[misc] + def __imul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __imul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __imul__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... + @overload + def __imul__(self: _NumberCharacterArrayT, other: _ArrayLikeInt_co, /) -> _NumberCharacterArrayT: ... + @overload + def __imul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # @= + @overload # type: ignore[misc] + def __imatmul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __imatmul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __imatmul__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + @overload + def __imatmul__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + @overload + def __imatmul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # **= + @overload # type: ignore[misc] + def __ipow__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __ipow__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + @overload + def __ipow__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + @overload + def __ipow__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # /= + @overload # type: ignore[misc] + def __itruediv__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __itruediv__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... + @overload + def __itruediv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # //= + # keep in sync with `__imod__` + @overload # type: ignore[misc] + def __ifloordiv__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __ifloordiv__(self: _FloatingTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingTimedeltaArrayT: ... + @overload + def __ifloordiv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # %= + # keep in sync with `__ifloordiv__` + @overload # type: ignore[misc] + def __imod__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __imod__(self: _FloatingArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingArrayT: ... + @overload + def __imod__(self: _TimedeltaArrayT, other: _ArrayLike[timedelta64], /) -> _TimedeltaArrayT: ... + @overload + def __imod__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # <<= + # keep in sync with `__irshift__` + @overload # type: ignore[misc] + def __ilshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __ilshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # >>= + # keep in sync with `__ilshift__` + @overload # type: ignore[misc] + def __irshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __irshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # &= + # keep in sync with `__ixor__` and `__ior__` + @overload # type: ignore[misc] + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __iand__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __iand__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # ^= + # keep in sync with `__iand__` and `__ior__` + @overload # type: ignore[misc] + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ixor__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __ixor__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # |= + # keep in sync with `__iand__` and `__ixor__` + @overload # type: ignore[misc] + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ior__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __ior__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # + def __dlpack__( + self: NDArray[number], + /, + *, + stream: int | Any | None = None, + max_version: tuple[int, int] | None = None, + dl_device: tuple[int, int] | None = None, + copy: builtins.bool | None = None, + ) -> CapsuleType: ... + def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + +# NOTE: while `np.generic` is not technically an instance of `ABCMeta`, +# the `@abstractmethod` decorator is herein used to (forcefully) deny +# the creation of `np.generic` instances. +# The `# type: ignore` comments are necessary to silence mypy errors regarding +# the missing `ABCMeta` metaclass. +# See https://github.com/numpy/numpy-stubs/pull/80 for more details. +class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): + @abstractmethod + def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... + + # NOTE: Technically this doesn't exist at runtime, but it is unlikely to lead to + # type-unsafe situations (the abstract scalar types cannot be instantiated + # themselves) and is convenient to have, so we include it regardless. See + # https://github.com/numpy/numpy/issues/30445 for use-cases and discussion. + def __hash__(self, /) -> int: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + @overload + def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... + + @overload + def __array_wrap__( + self, + array: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None, + return_scalar: L[False], + /, + ) -> ndarray[_ShapeT, _DTypeT]: ... + @overload + def __array_wrap__( + self, + array: ndarray[tuple[()], dtype[_ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> _ScalarT: ... + @overload + def __array_wrap__( + self, + array: ndarray[_Shape1T, _DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ndarray[_Shape1T, _DTypeT]: ... + @overload + def __array_wrap__( + self, + array: ndarray[_ShapeT, dtype[_ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> _ScalarT | ndarray[_ShapeT, dtype[_ScalarT]]: ... + + @property + def base(self) -> None: ... + @property + def ndim(self) -> L[0]: ... + @property + def size(self) -> L[1]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def strides(self) -> tuple[()]: ... + @property + def flat(self) -> flatiter[ndarray[tuple[int], dtype[Self]]]: ... + + @overload + def item(self, /) -> _ItemT_co: ... + @overload + def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ... + @override + def tolist(self, /) -> _ItemT_co: ... + + # NOTE: these technically exist, but will always raise when called + def trace( # type: ignore[misc] + self: Never, + /, + offset: L[0] = 0, + axis1: L[0] = 0, + axis2: L[1] = 1, + dtype: None = None, + out: None = None, + ) -> Never: ... + def diagonal(self: Never, /, offset: L[0] = 0, axis1: L[0] = 0, axis2: L[1] = 1) -> Never: ... # type: ignore[misc] + def swapaxes(self: Never, axis1: Never, axis2: Never, /) -> Never: ... # type: ignore[misc] + def sort(self: Never, /, axis: L[-1] = -1, kind: None = None, order: None = None, *, stable: None = None) -> Never: ... # type: ignore[misc] + def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] + def setfield(self: Never, val: Never, /, dtype: Never, offset: L[0] = 0) -> None: ... # type: ignore[misc] + def searchsorted(self: Never, v: Never, /, side: L["left"] = "left", sorter: None = None) -> Never: ... # type: ignore[misc] + + # NOTE: this wont't raise, but won't do anything either + @overload + def resize(self, /, *, refcheck: builtins.bool = True) -> None: ... + @overload + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = True) -> None: ... + + # + def byteswap(self, /, inplace: L[False] = False) -> Self: ... + + # + @overload + def astype( + self, + /, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: builtins.bool = True, + copy: builtins.bool | _CopyMode = True, + ) -> _ScalarT: ... + @overload + def astype( + self, + /, + dtype: DTypeLike | None, + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: builtins.bool = True, + copy: builtins.bool | _CopyMode = True, + ) -> Incomplete: ... + + # NOTE: `view` will perform a 0D->scalar cast, + # thus the array `type` is irrelevant to the output type + @overload + def view(self, type: type[ndarray] = ...) -> Self: ... + @overload + def view(self, /, dtype: _DTypeLike[_ScalarT], type: type[ndarray] = ...) -> _ScalarT: ... + @overload + def view(self, /, dtype: DTypeLike, type: type[ndarray] = ...) -> Incomplete: ... + + @overload + def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> _ScalarT: ... + @overload + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> Incomplete: ... + + @overload + def take( + self, + indices: _IntLike_co, + /, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", + ) -> Self: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", + ) -> NDArray[Self]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = "raise", + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = "raise", + ) -> _ArrayT: ... + + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + + @overload # (() | []) + def reshape( + self, + shape: tuple[()] | list[Never], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> Self: ... + @overload # ((1, *(1, ...))@_ShapeT) + def reshape( + self, + shape: _1NShapeT, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_1NShapeT, dtype[Self]]: ... + @overload # (Sequence[index, ...]) # not recommended + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> Self | ndarray[tuple[L[1], ...], dtype[Self]]: ... + @overload # _(index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1]], dtype[Self]]: ... + @overload # _(index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index, index, index, *index) # ndim >= 5 + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + size5: SupportsIndex, + /, + *sizes6_: SupportsIndex, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], *tuple[L[1], ...]], dtype[Self]]: ... + + def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... + def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... + + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True + ) -> np.bool: ... + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True + ) -> np.bool: ... + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _dtype[Self]: ... + +class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): + @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` + def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + def __neg__(self) -> Self: ... + def __pos__(self) -> Self: ... + def __abs__(self) -> Self: ... + + def __add__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __radd__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __sub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rsub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __mul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rmul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __pow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __rpow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __truediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rtruediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... + +class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): + @property + def itemsize(self) -> L[1]: ... + @property + def nbytes(self) -> L[1]: ... + @property + def real(self) -> Self: ... + @property + def imag(self) -> np.bool[L[False]]: ... + + @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 + def __new__(cls, value: Never, /) -> np.bool[builtins.bool]: ... + @overload + def __new__(cls, value: _Falsy = ..., /) -> np.bool[L[False]]: ... + @overload + def __new__(cls, value: _Truthy, /) -> np.bool[L[True]]: ... + @overload + def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... + + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + + def __bool__(self, /) -> _BoolItemT_co: ... + + @overload + def __int__(self: np.bool[L[False]], /) -> L[0]: ... + @overload + def __int__(self: np.bool[L[True]], /) -> L[1]: ... + @overload + def __int__(self, /) -> L[0, 1]: ... + + def __abs__(self) -> Self: ... + + @overload + def __invert__(self: np.bool[L[False]], /) -> np.bool[L[True]]: ... + @overload + def __invert__(self: np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __invert__(self, /) -> np.bool: ... + + @overload + def __add__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __add__(self, other: int, /) -> int_: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + + @overload + def __radd__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __radd__(self, other: builtins.bool, /) -> bool_: ... + @overload + def __radd__(self, other: int, /) -> int_: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + + @overload + def __sub__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __sub__(self, other: int, /) -> int_: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + + @overload + def __rsub__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __rsub__(self, other: int, /) -> int_: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + + @overload + def __mul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __mul__(self, other: int, /) -> int_: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + + @overload + def __rmul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __rmul__(self, other: builtins.bool, /) -> bool_: ... + @overload + def __rmul__(self, other: int, /) -> int_: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + + @overload + def __pow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... + @overload + def __pow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + @overload + def __rpow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __truediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __truediv__(self, other: float | integer | bool_, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @overload + def __rtruediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + @overload + def __floordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __floordiv__(self, other: int, /) -> int_: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + + @overload + def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rfloordiv__(self, other: int, /) -> int_: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + + # keep in sync with __floordiv__ + @overload + def __mod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __mod__(self, other: int, /) -> int_: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + + # keep in sync with __rfloordiv__ + @overload + def __rmod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rmod__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rmod__(self, other: int, /) -> int_: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + + # keep in sync with __mod__ + @overload + def __divmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... + @overload + def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... + @overload + def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + + @overload + def __lshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __lshift__(self, other: int, /) -> int_: ... + + @overload + def __rlshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rlshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rlshift__(self, other: int, /) -> int_: ... + + # keep in sync with __lshift__ + @overload + def __rshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __rshift__(self, other: int, /) -> int_: ... + + # keep in sync with __rlshift__ + @overload + def __rrshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rrshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rrshift__(self, other: int, /) -> int_: ... + + @overload + def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + @overload + def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __and__(self, other: int, /) -> np.bool | intp: ... + __rand__ = __and__ + + @overload + def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... + @overload + def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __xor__(self, other: int, /) -> np.bool | intp: ... + __rxor__ = __xor__ + + @overload + def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __or__(self, other: int, /) -> np.bool | intp: ... + __ror__ = __or__ + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... + +# NOTE: This should _not_ be `Final` or a `TypeAlias` +bool_ = bool + +# NOTE: The `object_` constructor returns the passed object, so instances with type +# `object_` cannot exists (at runtime). +# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't +# be made generic. +@final +class object_(_RealMixin, generic): + @overload + def __new__(cls, value: None = None, /) -> None: ... # type: ignore[misc] + @overload + def __new__(cls, value: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + @overload + def __new__(cls, value: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + @overload + def __new__(cls, value: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + @overload + def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] + @overload # catch-all + def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] + + def __hash__(self, /) -> int: ... + def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ + def __call__(self, /, *args: object, **kwargs: object) -> Any: ... + + if sys.version_info >= (3, 12): + def __release_buffer__(self, buffer: memoryview, /) -> None: ... + +class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): + @abstractmethod + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes + def bit_count(self, /) -> int: ... + def __index__(self, /) -> int: ... + def __invert__(self, /) -> Self: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: float | integer, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + def __floordiv__(self, value: _IntLike_co, /) -> integer: ... + def __rfloordiv__(self, value: _IntLike_co, /) -> integer: ... + def __mod__(self, value: _IntLike_co, /) -> integer: ... + def __rmod__(self, value: _IntLike_co, /) -> integer: ... + def __divmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + def __rdivmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + + # Ensure that objects annotated as `integer` support bit-wise operations + def __lshift__(self, other: _IntLike_co, /) -> integer: ... + def __rlshift__(self, other: _IntLike_co, /) -> integer: ... + def __rshift__(self, other: _IntLike_co, /) -> integer: ... + def __rrshift__(self, other: _IntLike_co, /) -> integer: ... + def __and__(self, other: _IntLike_co, /) -> integer: ... + def __rand__(self, other: _IntLike_co, /) -> integer: ... + def __or__(self, other: _IntLike_co, /) -> integer: ... + def __ror__(self, other: _IntLike_co, /) -> integer: ... + def __xor__(self, other: _IntLike_co, /) -> integer: ... + def __rxor__(self, other: _IntLike_co, /) -> integer: ... + +class signedinteger(integer[_NBit]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: integer, /) -> signedinteger: ... + +int8 = signedinteger[_8Bit] +int16 = signedinteger[_16Bit] +int32 = signedinteger[_32Bit] +int64 = signedinteger[_64Bit] + +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int_ = intp +long = signedinteger[_NBitLong] +longlong = signedinteger[_NBitLongLong] + +class unsignedinteger(integer[_NBit1]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | uint8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | uint8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | uint8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | uint8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __lshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rlshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rrshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __and__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rand__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __xor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rxor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __or__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __ror__(self, other: signedinteger, /) -> signedinteger: ... + +uint8: TypeAlias = unsignedinteger[_8Bit] +uint16: TypeAlias = unsignedinteger[_16Bit] +uint32: TypeAlias = unsignedinteger[_32Bit] +uint64: TypeAlias = unsignedinteger[_64Bit] + +ubyte: TypeAlias = unsignedinteger[_NBitByte] +ushort: TypeAlias = unsignedinteger[_NBitShort] +uintc: TypeAlias = unsignedinteger[_NBitIntC] +uintp: TypeAlias = unsignedinteger[_NBitIntP] +uint: TypeAlias = uintp +ulong: TypeAlias = unsignedinteger[_NBitLong] +ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] + +class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): + @abstractmethod + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + +class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: integer | floating, /) -> floating: ... + @overload + def __add__(self, other: float, /) -> Self: ... + @overload + def __add__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: integer | floating, /) -> floating: ... + @overload + def __radd__(self, other: float, /) -> Self: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: integer | floating, /) -> floating: ... + @overload + def __sub__(self, other: float, /) -> Self: ... + @overload + def __sub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: integer | floating, /) -> floating: ... + @overload + def __rsub__(self, other: float, /) -> Self: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: integer | floating, /) -> floating: ... + @overload + def __mul__(self, other: float, /) -> Self: ... + @overload + def __mul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmul__(self, other: float, /) -> Self: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | float16 | uint8 | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | float16 | uint8 | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __truediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __truediv__(self, other: float, /) -> Self: ... + @overload + def __truediv__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rtruediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rtruediv__(self, other: float, /) -> Self: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating: ... + + # modular division ops + + @overload + def __floordiv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __floordiv__(self, other: float, /) -> Self: ... + + @overload + def __rfloordiv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rfloordiv__(self, other: float, /) -> Self: ... + + @overload + def __mod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: integer | floating, /) -> floating: ... + @overload + def __mod__(self, other: float, /) -> Self: ... + + @overload + def __rmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmod__(self, other: float, /) -> Self: ... + + @overload + def __divmod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[Self]: ... + + @overload + def __rdivmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... + + # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes + def is_integer(self, /) -> builtins.bool: ... + def as_integer_ratio(self, /) -> tuple[int, int]: ... + +float16: TypeAlias = floating[_16Bit] +float32: TypeAlias = floating[_32Bit] + +# either a C `double`, `float`, or `longdouble` +class float64(floating[_64Bit], float): # type: ignore[misc] + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + # overrides for `floating` and `builtins.float` compatibility (`_RealMixin` doesn't work) + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + def conjugate(self) -> Self: ... + def __getnewargs__(self, /) -> tuple[float]: ... + + @classmethod + def __getformat__(cls, typestr: L["double", "float"], /) -> str: ... # undocumented + + # float64-specific operator overrides + # NOTE: Mypy reports [misc] errors about "unsafely overlapping signatures" for the + # reflected methods. But since they are identical to the non-reflected versions, + # these errors appear to be false positives. + + @overload # type: ignore[override] + def __add__(self, other: _Float64_co, /) -> float64: ... + @overload + def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __add__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __radd__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __radd__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __sub__(self, other: _Float64_co, /) -> float64: ... + @overload + def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __sub__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rsub__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rsub__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __mul__(self, other: _Float64_co, /) -> float64: ... + @overload + def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __mul__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rmul__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rmul__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __truediv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __truediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rtruediv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __floordiv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __floordiv__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rfloordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... + @overload + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... # type: ignore[misc] + @overload + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] + @overload + def __rpow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + + def __mod__(self, other: _Float64_co, /) -> float64: ... + def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + + def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... + def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[misc] + +half: TypeAlias = float16 +single: TypeAlias = float32 +double: TypeAlias = float64 +longdouble: TypeAlias = floating[_NBitLongDouble] + +# The main reason for `complexfloating` having two typevars is cosmetic. +# It is used to clarify why `complex128`s precision is `_64Bit`, the latter +# describing the two 64 bit floats representing its real and imaginary component + +class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): + @overload + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = 0, + imag: complex | SupportsFloat | SupportsIndex = 0, + /, + ) -> Self: ... + @overload + def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... + + @property + def real(self) -> floating[_NBit1]: ... + @property + def imag(self) -> floating[_NBit2]: ... + + # NOTE: `__complex__` is technically defined in the concrete subtypes + def __complex__(self, /) -> complex: ... + def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] + + @overload # type: ignore[override] + def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __pow__( + self, other: complex | float64 | complex128, mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __pow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rpow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + +complex64: TypeAlias = complexfloating[_32Bit] + +class complex128(complexfloating[_64Bit, _64Bit], complex): + @property + def itemsize(self) -> L[16]: ... + @property + def nbytes(self) -> L[16]: ... + + # overrides for `floating` and `builtins.float` compatibility + @property + def real(self) -> float64: ... + @property + def imag(self) -> float64: ... + def conjugate(self) -> Self: ... + def __abs__(self) -> float64: ... # type: ignore[override] + def __getnewargs__(self, /) -> tuple[float, float]: ... + + # complex128-specific operator overrides + @overload # type: ignore[override] + def __add__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __radd__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] + + @overload # type: ignore[override] + def __sub__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rsub__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] + + @overload # type: ignore[override] + def __mul__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rmul__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] + + @overload # type: ignore[override] + def __truediv__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] + + @overload # type: ignore[override] + def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... + @overload + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... # type: ignore[override] + +csingle: TypeAlias = complex64 +cdouble: TypeAlias = complex128 +clongdouble: TypeAlias = complexfloating[_NBitLongDouble] + +class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + @overload + def __new__(cls, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> Self: ... + @overload + def __new__(cls, /) -> timedelta64[L[0]]: ... + @overload + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec, /) -> timedelta64[None]: ... + @overload + def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... + @overload + def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[int]: ... + @overload + def __new__(cls, value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> timedelta64[int]: ... + @overload + def __new__( + cls, + value: dt.timedelta | _IntLike_co, + format: _TimeUnitSpec[_NativeTD64Unit] = ..., + /, + ) -> timedelta64[dt.timedelta]: ... + @overload + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> Self: ... + + # inherited at runtime from `signedinteger` + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + + # NOTE: Only a limited number of units support conversion + # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` + def __int__(self: timedelta64[int], /) -> int: ... + def __float__(self: timedelta64[int], /) -> float: ... + + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __abs__(self, /) -> Self: ... + + @overload + def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + @overload + def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + __radd__ = __add__ + + @overload + def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer | np.bool, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __mul__(self, x: float | np.floating | np.integer | np.bool, /) -> timedelta64: ... + __rmul__ = __mul__ + + @overload + def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # NOTE: The L[0] makes __mod__ non-commutative, which the first two overloads + # reflect. However, mypy does not seem to like this, so we ignore the errors. + @overload + def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[misc] + @overload + def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... # type: ignore[misc] + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... # type: ignore[misc] + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... # type: ignore[misc] + @overload + def __rmod__(self, x: timedelta64, /) -> timedelta64: ... # type: ignore[misc] + + # keep in sync with __mod__ + @overload + def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] + @overload + def __rdivmod__( # type: ignore[misc] + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... # type: ignore[misc] + + @overload + def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + + # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. + # This confuses mypy, so we ignore the [misc] errors it reports. + @overload + def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + @overload + def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + @overload + def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + @overload + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + @overload + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... # type: ignore[misc] + + @overload + def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... + @overload + def __truediv__(self, b: timedelta64, /) -> float64: ... + @overload + def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... + + @overload + def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... + @overload + def __rtruediv__(self, a: timedelta64, /) -> float64: ... + + @overload + def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... + @overload + def __floordiv__(self, b: timedelta64, /) -> int64: ... + @overload + def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + + @overload + def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... + @overload + def __rfloordiv__(self, a: timedelta64, /) -> int64: ... + + # comparison ops + + @overload + def __lt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... + +class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + @overload + def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... + @overload + def __new__(cls, value: _AnyDT64Arg, /) -> datetime64[_AnyDT64Arg]: ... + @overload + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> datetime64[None]: ... + @overload + def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... + @overload + def __new__(cls, value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> datetime64[dt.date]: ... + @overload + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... + @overload + def __new__( + cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> datetime64[dt.datetime]: ... + @overload + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... + @overload + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + + @overload + def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... + @overload + def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ... + @overload + def __add__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ... + @overload + def __add__(self, x: datetime64[None], /) -> datetime64[None]: ... + @overload + def __add__(self, x: _TD64Like_co, /) -> datetime64: ... + __radd__ = __add__ + + @overload + def __sub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + @overload + def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... + @overload + def __sub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __sub__(self: datetime64[int], x: timedelta64, /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[int], /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[dt.date | int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __sub__(self: datetime64[dt.date], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + @overload + def __sub__(self, x: timedelta64[None], /) -> datetime64[None]: ... + @overload + def __sub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + @overload + def __sub__(self, x: _TD64Like_co, /) -> datetime64: ... + @overload + def __sub__(self, x: datetime64, /) -> timedelta64: ... + + # NOTE: mypy gets confused by the non-commutativity of subtraction here + @overload + def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + @overload + def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... # type: ignore[misc] + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... # type: ignore[misc] + @overload + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... # type: ignore[misc] + @overload + def __rsub__(self, x: datetime64, /) -> timedelta64: ... # type: ignore[misc] + + @overload + def __lt__(self, other: datetime64, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: datetime64, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: datetime64, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: datetime64, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... + +@final # cannot be subclassed at runtime +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... # type: ignore[misc] + +class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] + @overload + def __new__(cls, length_or_data: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... + @overload + def __new__(cls, length_or_data: object, /, dtype: _DTypeLikeVoid) -> Self: ... + + @overload + def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... + @overload + def __getitem__(self, key: list[str], /) -> void: ... + def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ... + + def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... + +class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # type: ignore[misc] + @abstractmethod + def __new__(cls, value: object = ..., /) -> Self: ... + +# NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart + +class bytes_(character[bytes], bytes): # type: ignore[misc] + @overload + def __new__(cls, value: object = b"", /) -> Self: ... + @overload + def __new__(cls, value: str, /, encoding: str, errors: str = "strict") -> Self: ... + + # + @override + def __hash__(self, /) -> int: ... + + # + def __bytes__(self, /) -> bytes: ... + +class str_(character[str], str): # type: ignore[misc] + @overload + def __new__(cls, value: object = "", /) -> Self: ... + @overload + def __new__(cls, value: bytes, /, encoding: str, errors: str = "strict") -> Self: ... + + # + @override + def __hash__(self, /) -> int: ... + +# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs +@final +class ufunc: + __signature__: Final[inspect.Signature] + + @property + def __name__(self) -> LiteralString: ... + @property + def __qualname__(self) -> LiteralString: ... # pyright: ignore[reportIncompatibleVariableOverride] + @property + def __doc__(self) -> str: ... # type: ignore[override] + @property + def nin(self) -> int: ... + @property + def nout(self) -> int: ... + @property + def nargs(self) -> int: ... + @property + def ntypes(self) -> int: ... + @property + def types(self) -> list[LiteralString]: ... + # Broad return type because it has to encompass things like + # + # >>> np.logical_and.identity is True + # True + # >>> np.add.identity is 0 + # True + # >>> np.sin.identity is None + # True + # + # and any user-defined ufuncs. + @property + def identity(self) -> Any: ... + # This is None for ufuncs and a string for gufuncs. + @property + def signature(self) -> LiteralString | None: ... + + def __call__(self, /, *args: Any, **kwargs: Any) -> Any: ... + + # The next four methods will always exist, but they will just + # raise a ValueError ufuncs with that don't accept two input + # arguments and return one output argument. Because of that we + # can't type them very precisely. + def accumulate( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + **kwargs: Incomplete, + ) -> Incomplete: ... + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def outer(self, A: ArrayLike, B: ArrayLike, /, **kwargs: Incomplete) -> NDArray[Incomplete]: ... + + # Similarly `at` won't be defined for ufuncs that return multiple + # outputs, so we can't type it very precisely. + def at(self, a: ndarray, indices: _ArrayLikeInt_co, b: ArrayLike | None = None, /) -> None: ... + + # + def resolve_dtypes( + self, + /, + dtypes: tuple[dtype | type | None, ...], + *, + signature: tuple[dtype | None, ...] | None = None, + casting: _CastingKind | None = None, + reduction: builtins.bool = False, + ) -> tuple[dtype, ...]: ... + +# Parameters: `__name__`, `ntypes` and `identity` +absolute: _UFunc_Nin1_Nout1[L["absolute"], L[20], None] +add: _UFunc_Nin2_Nout1[L["add"], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L["arccos"], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L["arccosh"], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L["arcsin"], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L["arcsinh"], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L["arctan2"], L[5], None] +arctan: _UFunc_Nin1_Nout1[L["arctan"], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L["arctanh"], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L["bitwise_and"], L[12], L[-1]] +bitwise_count: _UFunc_Nin1_Nout1[L["bitwise_count"], L[11], None] +bitwise_or: _UFunc_Nin2_Nout1[L["bitwise_or"], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L["bitwise_xor"], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L["cbrt"], L[5], None] +ceil: _UFunc_Nin1_Nout1[L["ceil"], L[7], None] +conjugate: _UFunc_Nin1_Nout1[L["conjugate"], L[18], None] +copysign: _UFunc_Nin2_Nout1[L["copysign"], L[4], None] +cos: _UFunc_Nin1_Nout1[L["cos"], L[9], None] +cosh: _UFunc_Nin1_Nout1[L["cosh"], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L["deg2rad"], L[5], None] +degrees: _UFunc_Nin1_Nout1[L["degrees"], L[5], None] +divide: _UFunc_Nin2_Nout1[L["divide"], L[11], None] +divmod: _UFunc_Nin2_Nout2[L["divmod"], L[15], None] +equal: _UFunc_Nin2_Nout1[L["equal"], L[23], None] +exp2: _UFunc_Nin1_Nout1[L["exp2"], L[8], None] +exp: _UFunc_Nin1_Nout1[L["exp"], L[10], None] +expm1: _UFunc_Nin1_Nout1[L["expm1"], L[8], None] +fabs: _UFunc_Nin1_Nout1[L["fabs"], L[5], None] +float_power: _UFunc_Nin2_Nout1[L["float_power"], L[4], None] +floor: _UFunc_Nin1_Nout1[L["floor"], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L["floor_divide"], L[21], None] +fmax: _UFunc_Nin2_Nout1[L["fmax"], L[21], None] +fmin: _UFunc_Nin2_Nout1[L["fmin"], L[21], None] +fmod: _UFunc_Nin2_Nout1[L["fmod"], L[15], None] +frexp: _UFunc_Nin1_Nout2[L["frexp"], L[4], None] +gcd: _UFunc_Nin2_Nout1[L["gcd"], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L["greater"], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L["greater_equal"], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L["heaviside"], L[4], None] +hypot: _UFunc_Nin2_Nout1[L["hypot"], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L["invert"], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L["isfinite"], L[20], None] +isinf: _UFunc_Nin1_Nout1[L["isinf"], L[20], None] +isnan: _UFunc_Nin1_Nout1[L["isnan"], L[20], None] +isnat: _UFunc_Nin1_Nout1[L["isnat"], L[2], None] +lcm: _UFunc_Nin2_Nout1[L["lcm"], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L["ldexp"], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L["left_shift"], L[11], None] +less: _UFunc_Nin2_Nout1[L["less"], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L["less_equal"], L[23], None] +log10: _UFunc_Nin1_Nout1[L["log10"], L[8], None] +log1p: _UFunc_Nin1_Nout1[L["log1p"], L[8], None] +log2: _UFunc_Nin1_Nout1[L["log2"], L[8], None] +log: _UFunc_Nin1_Nout1[L["log"], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L["logaddexp2"], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L["logaddexp"], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L["logical_and"], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L["logical_not"], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L["logical_or"], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L["logical_xor"], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L["matmul"], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L["matvec"], L[19], None, L["(m,n),(n)->(m)"]] +maximum: _UFunc_Nin2_Nout1[L["maximum"], L[21], None] +minimum: _UFunc_Nin2_Nout1[L["minimum"], L[21], None] +modf: _UFunc_Nin1_Nout2[L["modf"], L[4], None] +multiply: _UFunc_Nin2_Nout1[L["multiply"], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L["negative"], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L["nextafter"], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L["not_equal"], L[23], None] +positive: _UFunc_Nin1_Nout1[L["positive"], L[19], None] +power: _UFunc_Nin2_Nout1[L["power"], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L["rad2deg"], L[5], None] +radians: _UFunc_Nin1_Nout1[L["radians"], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L["reciprocal"], L[18], None] +remainder: _UFunc_Nin2_Nout1[L["remainder"], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L["right_shift"], L[11], None] +rint: _UFunc_Nin1_Nout1[L["rint"], L[10], None] +sign: _UFunc_Nin1_Nout1[L["sign"], L[19], None] +signbit: _UFunc_Nin1_Nout1[L["signbit"], L[4], None] +sin: _UFunc_Nin1_Nout1[L["sin"], L[9], None] +sinh: _UFunc_Nin1_Nout1[L["sinh"], L[8], None] +spacing: _UFunc_Nin1_Nout1[L["spacing"], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L["sqrt"], L[10], None] +square: _UFunc_Nin1_Nout1[L["square"], L[18], None] +subtract: _UFunc_Nin2_Nout1[L["subtract"], L[21], None] +tan: _UFunc_Nin1_Nout1[L["tan"], L[8], None] +tanh: _UFunc_Nin1_Nout1[L["tanh"], L[8], None] +trunc: _UFunc_Nin1_Nout1[L["trunc"], L[7], None] +vecdot: _GUFunc_Nin2_Nout1[L["vecdot"], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L["vecmat"], L[19], None, L["(n),(n,m)->(m)"]] + +abs = absolute +acos = arccos +acosh = arccosh +asin = arcsin +asinh = arcsinh +atan = arctan +atanh = arctanh +atan2 = arctan2 +concat = concatenate +bitwise_left_shift = left_shift +bitwise_not = invert +bitwise_invert = invert +bitwise_right_shift = right_shift +conj = conjugate +mod = remainder +permute_dims = transpose +pow = power +true_divide = divide + +# TODO: The type of each `__next__` and `iters` return-type depends +# on the length and dtype of `args`; we can't describe this behavior yet +# as we lack variadics (PEP 646). +@final +class broadcast: + def __new__(cls, *args: ArrayLike) -> broadcast: ... + @property + def index(self) -> int: ... + @property + def iters(self) -> tuple[flatiter[Any], ...]: ... + @property + def nd(self) -> int: ... + @property + def ndim(self) -> int: ... + @property + def numiter(self) -> int: ... + @property + def shape(self) -> _AnyShape: ... + @property + def size(self) -> int: ... + def __next__(self) -> tuple[Any, ...]: ... + def __iter__(self) -> Self: ... + def reset(self) -> None: ... + +@final +class busdaycalendar: + def __init__( + self, + /, + weekmask: str | Sequence[int | bool_ | integer] | _SupportsArray[dtype[bool_ | integer]] = "1111100", + holidays: Sequence[dt.date | datetime64] | _SupportsArray[dtype[datetime64]] | None = None, + ) -> None: ... + @property + def weekmask(self) -> ndarray[tuple[int], dtype[bool_]]: ... + @property + def holidays(self) -> ndarray[tuple[int], dtype[datetime64[dt.date]]]: ... + +@final +class nditer: + @overload + def __init__( + self, + /, + op: ArrayLike, + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[_NDIterFlagsOp] | None = None, + op_dtypes: DTypeLike | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[SupportsIndex] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + @overload + def __init__( + self, + /, + op: Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, + op_dtypes: Sequence[DTypeLike | None] | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[Sequence[SupportsIndex]] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + + def __enter__(self) -> nditer: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: ... + def __iter__(self) -> nditer: ... + def __next__(self) -> tuple[NDArray[Any], ...]: ... + def __len__(self) -> int: ... + def __copy__(self) -> nditer: ... + @overload + def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... + @overload + def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... + def close(self) -> None: ... + def copy(self) -> nditer: ... + def debug_print(self) -> None: ... + def enable_external_loop(self) -> None: ... + def iternext(self) -> builtins.bool: ... + def remove_axis(self, i: SupportsIndex, /) -> None: ... + def remove_multi_index(self) -> None: ... + def reset(self) -> None: ... + @property + def dtypes(self) -> tuple[dtype, ...]: ... + @property + def finished(self) -> builtins.bool: ... + @property + def has_delayed_bufalloc(self) -> builtins.bool: ... + @property + def has_index(self) -> builtins.bool: ... + @property + def has_multi_index(self) -> builtins.bool: ... + @property + def index(self) -> int: ... + @property + def iterationneedsapi(self) -> builtins.bool: ... + @property + def iterindex(self) -> int: ... + @property + def iterrange(self) -> tuple[int, ...]: ... + @property + def itersize(self) -> int: ... + @property + def itviews(self) -> tuple[NDArray[Any], ...]: ... + @property + def multi_index(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def nop(self) -> int: ... + @property + def operands(self) -> tuple[NDArray[Any], ...]: ... + @property + def shape(self) -> tuple[int, ...]: ... + @property + def value(self) -> tuple[NDArray[Any], ...]: ... + +class memmap(ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] + filename: str | None + offset: int + mode: str + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: type[uint8] = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[Any, dtype[uint8]]: ... + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeLike[_ScalarT], + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[Any, dtype[_ScalarT]]: ... + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: DTypeLike, + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[Any, dtype]: ... + def __array_finalize__(self, obj: object) -> None: ... + def __array_wrap__( + self, + array: memmap[_ShapeT_co, _DTypeT_co], # type: ignore[override] + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: builtins.bool = False, + ) -> Any: ... + def flush(self) -> None: ... + +class poly1d: + @property + def variable(self) -> LiteralString: ... + @property + def order(self) -> int: ... + @property + def o(self) -> int: ... + @property + def roots(self) -> NDArray[Any]: ... + @property + def r(self) -> NDArray[Any]: ... + + @property + def coeffs(self) -> NDArray[Any]: ... + @coeffs.setter + def coeffs(self, value: NDArray[Any]) -> None: ... + + @property + def c(self) -> NDArray[Any]: ... + @c.setter + def c(self, value: NDArray[Any]) -> None: ... + + @property + def coef(self) -> NDArray[Any]: ... + @coef.setter + def coef(self, value: NDArray[Any]) -> None: ... + + @property + def coefficients(self) -> NDArray[Any]: ... + @coefficients.setter + def coefficients(self, value: NDArray[Any]) -> None: ... + + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @overload + def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype]: ... + @overload + def __array__(self, /, t: _DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DTypeT]: ... + + @overload + def __call__(self, val: _ScalarLike_co) -> Any: ... + @overload + def __call__(self, val: poly1d) -> poly1d: ... + @overload + def __call__(self, val: ArrayLike) -> NDArray[Any]: ... + + def __init__( + self, + c_or_r: ArrayLike, + r: builtins.bool = False, + variable: str | None = None, + ) -> None: ... + def __len__(self) -> int: ... + def __neg__(self) -> poly1d: ... + def __pos__(self) -> poly1d: ... + def __mul__(self, other: ArrayLike, /) -> poly1d: ... + def __rmul__(self, other: ArrayLike, /) -> poly1d: ... + def __add__(self, other: ArrayLike, /) -> poly1d: ... + def __radd__(self, other: ArrayLike, /) -> poly1d: ... + def __pow__(self, val: _FloatLike_co, /) -> poly1d: ... # Integral floats are accepted + def __sub__(self, other: ArrayLike, /) -> poly1d: ... + def __rsub__(self, other: ArrayLike, /) -> poly1d: ... + def __truediv__(self, other: ArrayLike, /) -> poly1d: ... + def __rtruediv__(self, other: ArrayLike, /) -> poly1d: ... + def __getitem__(self, val: int, /) -> Any: ... + def __setitem__(self, key: int, val: Any, /) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + def deriv(self, m: SupportsInt | SupportsIndex = 1) -> poly1d: ... + def integ( + self, + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0, + ) -> poly1d: ... + +def from_dlpack( + x: _SupportsDLPack[None], + /, + *, + device: L["cpu"] | None = None, + copy: builtins.bool | None = None, +) -> NDArray[number | np.bool]: ... diff --git a/py311/lib/python3.11/site-packages/numpy/_array_api_info.py b/py311/lib/python3.11/site-packages/numpy/_array_api_info.py new file mode 100644 index 0000000000000000000000000000000000000000..067e38798718e37383c9550688c4080ef73c1539 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_array_api_info.py @@ -0,0 +1,346 @@ +""" +Array API Inspection namespace + +This is the namespace for inspection functions as defined by the array API +standard. See +https://data-apis.org/array-api/latest/API_specification/inspection.html for +more details. + +""" +from numpy._core import ( + bool, + complex64, + complex128, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + intp, + uint8, + uint16, + uint32, + uint64, +) + + +class __array_namespace_info__: + """ + Get the array API inspection namespace for NumPy. + + The array API inspection namespace defines the following functions: + + - capabilities() + - default_device() + - default_dtypes() + - dtypes() + - devices() + + See + https://data-apis.org/array-api/latest/API_specification/inspection.html + for more details. + + Returns + ------- + info : ModuleType + The array API inspection namespace for NumPy. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + + __module__ = 'numpy' + + def capabilities(self): + """ + Return a dictionary of array API library capabilities. + + The resulting dictionary has the following keys: + + - **"boolean indexing"**: boolean indicating whether an array library + supports boolean indexing. Always ``True`` for NumPy. + + - **"data-dependent shapes"**: boolean indicating whether an array + library supports data-dependent output shapes. Always ``True`` for + NumPy. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html + for more details. + + See Also + -------- + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Returns + ------- + capabilities : dict + A dictionary of array API library capabilities. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.capabilities() + {'boolean indexing': True, + 'data-dependent shapes': True, + 'max dimensions': 64} + + """ + return { + "boolean indexing": True, + "data-dependent shapes": True, + "max dimensions": 64, + } + + def default_device(self): + """ + The default device used for new NumPy arrays. + + For NumPy, this always returns ``'cpu'``. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Returns + ------- + device : str + The default device used for new NumPy arrays. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_device() + 'cpu' + + """ + return "cpu" + + def default_dtypes(self, *, device=None): + """ + The default data types used for new NumPy arrays. + + For NumPy, this always returns the following dictionary: + + - **"real floating"**: ``numpy.float64`` + - **"complex floating"**: ``numpy.complex128`` + - **"integral"**: ``numpy.intp`` + - **"indexing"**: ``numpy.intp`` + + Parameters + ---------- + device : str, optional + The device to get the default data types for. For NumPy, only + ``'cpu'`` is allowed. + + Returns + ------- + dtypes : dict + A dictionary describing the default data types used for new NumPy + arrays. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + return { + "real floating": dtype(float64), + "complex floating": dtype(complex128), + "integral": dtype(intp), + "indexing": dtype(intp), + } + + def dtypes(self, *, device=None, kind=None): + """ + The array API data types supported by NumPy. + + Note that this function only returns data types that are defined by + the array API. + + Parameters + ---------- + device : str, optional + The device to get the data types for. For NumPy, only ``'cpu'`` is + allowed. + kind : str or tuple of str, optional + The kind of data types to return. If ``None``, all data types are + returned. If a string, only data types of that kind are returned. + If a tuple, a dictionary containing the union of the given kinds + is returned. The following kinds are supported: + + - ``'bool'``: boolean data types (i.e., ``bool``). + - ``'signed integer'``: signed integer data types (i.e., ``int8``, + ``int16``, ``int32``, ``int64``). + - ``'unsigned integer'``: unsigned integer data types (i.e., + ``uint8``, ``uint16``, ``uint32``, ``uint64``). + - ``'integral'``: integer data types. Shorthand for ``('signed + integer', 'unsigned integer')``. + - ``'real floating'``: real-valued floating-point data types + (i.e., ``float32``, ``float64``). + - ``'complex floating'``: complex floating-point data types (i.e., + ``complex64``, ``complex128``). + - ``'numeric'``: numeric data types. Shorthand for ``('integral', + 'real floating', 'complex floating')``. + + Returns + ------- + dtypes : dict + A dictionary mapping the names of data types to the corresponding + NumPy data types. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.dtypes(kind='signed integer') + {'int8': numpy.int8, + 'int16': numpy.int16, + 'int32': numpy.int32, + 'int64': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + if kind is None: + return { + "bool": dtype(bool), + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if kind == "bool": + return {"bool": bool} + if kind == "signed integer": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + } + if kind == "unsigned integer": + return { + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + } + if kind == "integral": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + } + if kind == "real floating": + return { + "float32": dtype(float32), + "float64": dtype(float64), + } + if kind == "complex floating": + return { + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if kind == "numeric": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if isinstance(kind, tuple): + res = {} + for k in kind: + res.update(self.dtypes(kind=k)) + return res + raise ValueError(f"unsupported kind: {kind!r}") + + def devices(self): + """ + The devices supported by NumPy. + + For NumPy, this always returns ``['cpu']``. + + Returns + ------- + devices : list of str + The devices supported by NumPy. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.devices() + ['cpu'] + + """ + return ["cpu"] diff --git a/py311/lib/python3.11/site-packages/numpy/_array_api_info.pyi b/py311/lib/python3.11/site-packages/numpy/_array_api_info.pyi new file mode 100644 index 0000000000000000000000000000000000000000..396125143e922a0d4c7a6c84b5ab61b6cf7b5319 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_array_api_info.pyi @@ -0,0 +1,206 @@ +from typing import ( + Literal, + Never, + TypeAlias, + TypedDict, + TypeVar, + final, + overload, + type_check_only, +) + +import numpy as np + +_Device: TypeAlias = Literal["cpu"] +_DeviceLike: TypeAlias = _Device | None + +_Capabilities = TypedDict( + "_Capabilities", + { + "boolean indexing": Literal[True], + "data-dependent shapes": Literal[True], + }, +) + +_DefaultDTypes = TypedDict( + "_DefaultDTypes", + { + "real floating": np.dtype[np.float64], + "complex floating": np.dtype[np.complex128], + "integral": np.dtype[np.intp], + "indexing": np.dtype[np.intp], + }, +) + +_KindBool: TypeAlias = Literal["bool"] +_KindInt: TypeAlias = Literal["signed integer"] +_KindUInt: TypeAlias = Literal["unsigned integer"] +_KindInteger: TypeAlias = Literal["integral"] +_KindFloat: TypeAlias = Literal["real floating"] +_KindComplex: TypeAlias = Literal["complex floating"] +_KindNumber: TypeAlias = Literal["numeric"] +_Kind: TypeAlias = ( + _KindBool + | _KindInt + | _KindUInt + | _KindInteger + | _KindFloat + | _KindComplex + | _KindNumber +) + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_Permute1: TypeAlias = _T1 | tuple[_T1] +_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1] +_Permute3: TypeAlias = ( + tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2] + | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1] + | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] +) + +@type_check_only +class _DTypesBool(TypedDict): + bool: np.dtype[np.bool] + +@type_check_only +class _DTypesInt(TypedDict): + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + +@type_check_only +class _DTypesUInt(TypedDict): + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + +@type_check_only +class _DTypesInteger(_DTypesInt, _DTypesUInt): ... + +@type_check_only +class _DTypesFloat(TypedDict): + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + +@type_check_only +class _DTypesComplex(TypedDict): + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +@type_check_only +class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): ... + +@type_check_only +class _DTypes(_DTypesBool, _DTypesNumber): ... + +@type_check_only +class _DTypesUnion(TypedDict, total=False): + bool: np.dtype[np.bool] + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +_EmptyDict: TypeAlias = dict[Never, Never] + +@final +class __array_namespace_info__: + __module__: Literal["numpy"] = "numpy" + + def capabilities(self) -> _Capabilities: ... + def default_device(self) -> _Device: ... + def default_dtypes( + self, + *, + device: _DeviceLike = None, + ) -> _DefaultDTypes: ... + def devices(self) -> list[_Device]: ... + + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: None = None, + ) -> _DTypes: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindBool], + ) -> _DTypesBool: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindInt], + ) -> _DTypesInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindUInt], + ) -> _DTypesUInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindFloat], + ) -> _DTypesFloat: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindComplex], + ) -> _DTypesComplex: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: ( + _Permute1[_KindInteger] + | _Permute2[_KindInt, _KindUInt] + ), + ) -> _DTypesInteger: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: ( + _Permute1[_KindNumber] + | _Permute3[_KindInteger, _KindFloat, _KindComplex] + ), + ) -> _DTypesNumber: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: tuple[()], + ) -> _EmptyDict: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: tuple[_Kind, ...], + ) -> _DTypesUnion: ... diff --git a/py311/lib/python3.11/site-packages/numpy/_configtool.py b/py311/lib/python3.11/site-packages/numpy/_configtool.py new file mode 100644 index 0000000000000000000000000000000000000000..db7831c339518ba3a2a939eaa0006eded43ad129 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_configtool.py @@ -0,0 +1,39 @@ +import argparse +import sys +from pathlib import Path + +from .lib._utils_impl import get_include +from .version import __version__ + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument( + "--version", + action="version", + version=__version__, + help="Print the version and exit.", + ) + parser.add_argument( + "--cflags", + action="store_true", + help="Compile flag needed when using the NumPy headers.", + ) + parser.add_argument( + "--pkgconfigdir", + action="store_true", + help=("Print the pkgconfig directory in which `numpy.pc` is stored " + "(useful for setting $PKG_CONFIG_PATH)."), + ) + args = parser.parse_args() + if not sys.argv[1:]: + parser.print_help() + if args.cflags: + print("-I" + get_include()) + if args.pkgconfigdir: + _path = Path(get_include()) / '..' / 'lib' / 'pkgconfig' + print(_path.resolve()) + + +if __name__ == "__main__": + main() diff --git a/py311/lib/python3.11/site-packages/numpy/_configtool.pyi b/py311/lib/python3.11/site-packages/numpy/_configtool.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7e7363e797f3f5a33f66efd0349814c562e349e6 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_configtool.pyi @@ -0,0 +1 @@ +def main() -> None: ... diff --git a/py311/lib/python3.11/site-packages/numpy/_distributor_init.py b/py311/lib/python3.11/site-packages/numpy/_distributor_init.py new file mode 100644 index 0000000000000000000000000000000000000000..f608036a2405528c0ddc4874b25b223dbe22c385 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_distributor_init.py @@ -0,0 +1,15 @@ +""" Distributor init file + +Distributors: you can add custom code here to support particular distributions +of numpy. + +For example, this is a good place to put any BLAS/LAPACK initialization code. + +The numpy standard source distribution will not put code in this file, so you +can safely replace this file with your own version. +""" + +try: + from . import _distributor_init_local # noqa: F401 +except ImportError: + pass diff --git a/py311/lib/python3.11/site-packages/numpy/_distributor_init.pyi b/py311/lib/python3.11/site-packages/numpy/_distributor_init.pyi new file mode 100644 index 0000000000000000000000000000000000000000..94456aba2bcfaf1166eeb81199dff4515c8b9474 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_distributor_init.pyi @@ -0,0 +1 @@ +# intentionally left blank diff --git a/py311/lib/python3.11/site-packages/numpy/_expired_attrs_2_0.py b/py311/lib/python3.11/site-packages/numpy/_expired_attrs_2_0.py new file mode 100644 index 0000000000000000000000000000000000000000..2eebf95bc558c609bd665ab120746420a5ccb432 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_expired_attrs_2_0.py @@ -0,0 +1,78 @@ +""" +Dict of expired attributes that are discontinued since 2.0 release. +Each item is associated with a migration note. +""" + +__expired_attributes__ = { + "geterrobj": "Use the np.errstate context manager instead.", + "seterrobj": "Use the np.errstate context manager instead.", + "cast": "Use `np.asarray(arr, dtype=dtype)` instead.", + "source": "Use `inspect.getsource` instead.", + "lookfor": "Search NumPy's documentation directly.", + "who": "Use an IDE variable explorer or `locals()` instead.", + "fastCopyAndTranspose": "Use `arr.T.copy()` instead.", + "set_numeric_ops": + "For the general case, use `PyUFunc_ReplaceLoopBySignature`. " + "For ndarray subclasses, define the ``__array_ufunc__`` method " + "and override the relevant ufunc.", + "NINF": "Use `-np.inf` instead.", + "PINF": "Use `np.inf` instead.", + "NZERO": "Use `-0.0` instead.", + "PZERO": "Use `0.0` instead.", + "add_newdoc": + "It's still available as `np.lib.add_newdoc`.", + "add_docstring": + "It's still available as `np.lib.add_docstring`.", + "add_newdoc_ufunc": + "It's an internal function and doesn't have a replacement.", + "safe_eval": "Use `ast.literal_eval` instead.", + "float_": "Use `np.float64` instead.", + "complex_": "Use `np.complex128` instead.", + "longfloat": "Use `np.longdouble` instead.", + "singlecomplex": "Use `np.complex64` instead.", + "cfloat": "Use `np.complex128` instead.", + "longcomplex": "Use `np.clongdouble` instead.", + "clongfloat": "Use `np.clongdouble` instead.", + "string_": "Use `np.bytes_` instead.", + "unicode_": "Use `np.str_` instead.", + "Inf": "Use `np.inf` instead.", + "Infinity": "Use `np.inf` instead.", + "NaN": "Use `np.nan` instead.", + "infty": "Use `np.inf` instead.", + "issctype": "Use `issubclass(rep, np.generic)` instead.", + "maximum_sctype": + "Use a specific dtype instead. You should avoid relying " + "on any implicit mechanism and select the largest dtype of " + "a kind explicitly in the code.", + "obj2sctype": "Use `np.dtype(obj).type` instead.", + "sctype2char": "Use `np.dtype(obj).char` instead.", + "sctypes": "Access dtypes explicitly instead.", + "issubsctype": "Use `np.issubdtype` instead.", + "set_string_function": + "Use `np.set_printoptions` instead with a formatter for " + "custom printing of NumPy objects.", + "asfarray": "Use `np.asarray` with a proper dtype instead.", + "issubclass_": "Use `issubclass` builtin instead.", + "tracemalloc_domain": "It's now available from `np.lib`.", + "mat": "Use `np.asmatrix` instead.", + "recfromcsv": "Use `np.genfromtxt` with comma delimiter instead.", + "recfromtxt": "Use `np.genfromtxt` instead.", + "deprecate": "Emit `DeprecationWarning` with `warnings.warn` directly, " + "or use `typing.deprecated`.", + "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " + "directly, or use `typing.deprecated`.", + "find_common_type": + "Use `numpy.promote_types` or `numpy.result_type` instead. " + "To achieve semantics for the `scalar_types` argument, use " + "`numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.", + "round_": "Use `np.round` instead.", + "get_array_wrap": "", + "DataSource": "It's still available as `np.lib.npyio.DataSource`.", + "nbytes": "Use `np.dtype().itemsize` instead.", + "byte_bounds": "Now it's available under `np.lib.array_utils.byte_bounds`", + "compare_chararrays": + "It's still available as `np.char.compare_chararrays`.", + "format_parser": "It's still available as `np.rec.format_parser`.", + "alltrue": "Use `np.all` instead.", + "sometrue": "Use `np.any` instead.", +} diff --git a/py311/lib/python3.11/site-packages/numpy/_expired_attrs_2_0.pyi b/py311/lib/python3.11/site-packages/numpy/_expired_attrs_2_0.pyi new file mode 100644 index 0000000000000000000000000000000000000000..de6c2d10f9b0264238094e3ce9f8affafe5ca7a8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_expired_attrs_2_0.pyi @@ -0,0 +1,61 @@ +from typing import Final, TypedDict, final, type_check_only + +@final +@type_check_only +class _ExpiredAttributesType(TypedDict): + geterrobj: str + seterrobj: str + cast: str + source: str + lookfor: str + who: str + fastCopyAndTranspose: str + set_numeric_ops: str + NINF: str + PINF: str + NZERO: str + PZERO: str + add_newdoc: str + add_docstring: str + add_newdoc_ufunc: str + safe_eval: str + float_: str + complex_: str + longfloat: str + singlecomplex: str + cfloat: str + longcomplex: str + clongfloat: str + string_: str + unicode_: str + Inf: str + Infinity: str + NaN: str + infty: str + issctype: str + maximum_sctype: str + obj2sctype: str + sctype2char: str + sctypes: str + issubsctype: str + set_string_function: str + asfarray: str + issubclass_: str + tracemalloc_domain: str + mat: str + recfromcsv: str + recfromtxt: str + deprecate: str + deprecate_with_doc: str + find_common_type: str + round_: str + get_array_wrap: str + DataSource: str + nbytes: str + byte_bounds: str + compare_chararrays: str + format_parser: str + alltrue: str + sometrue: str + +__expired_attributes__: Final[_ExpiredAttributesType] = ... diff --git a/py311/lib/python3.11/site-packages/numpy/_globals.py b/py311/lib/python3.11/site-packages/numpy/_globals.py new file mode 100644 index 0000000000000000000000000000000000000000..ada8d5c41af0c0ff939a28b6d690e2e392deedb9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_globals.py @@ -0,0 +1,121 @@ +""" +Module defining global singleton classes. + +This module raises a RuntimeError if an attempt to reload it is made. In that +way the identities of the classes defined here are fixed and will remain so +even if numpy itself is reloaded. In particular, a function like the following +will still work correctly after numpy is reloaded:: + + def foo(arg=np._NoValue): + if arg is np._NoValue: + ... + +That was not the case when the singleton classes were defined in the numpy +``__init__.py`` file. See gh-7844 for a discussion of the reload problem that +motivated this module. + +""" +import enum + +from ._utils import set_module as _set_module + +__all__ = ['_NoValue', '_CopyMode'] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class _NoValueType: + """Special keyword value. + + The instance of this class may be used as the default value assigned to a + keyword if no other obvious default (e.g., `None`) is suitable, + + Common reasons for using this keyword are: + + - A new keyword is added to a function, and that function forwards its + inputs to another function or method which can be defined outside of + NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims`` + keyword was added that could only be forwarded if the user explicitly + specified ``keepdims``; downstream array libraries may not have added + the same keyword, so adding ``x.std(..., keepdims=keepdims)`` + unconditionally could have broken previously working code. + - A keyword is being deprecated, and a deprecation warning must only be + emitted when the keyword is used. + + """ + __instance = None + + def __new__(cls): + # ensure that only one instance exists + if not cls.__instance: + cls.__instance = super().__new__(cls) + return cls.__instance + + def __repr__(self): + return "" + + +_NoValue = _NoValueType() + + +@_set_module("numpy") +class _CopyMode(enum.Enum): + """ + An enumeration for the copy modes supported + by numpy.copy() and numpy.array(). The following three modes are supported, + + - ALWAYS: This means that a deep copy of the input + array will always be taken. + - IF_NEEDED: This means that a deep copy of the input + array will be taken only if necessary. + - NEVER: This means that the deep copy will never be taken. + If a copy cannot be avoided then a `ValueError` will be + raised. + + Note that the buffer-protocol could in theory do copies. NumPy currently + assumes an object exporting the buffer protocol will never do this. + """ + + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self): + # For backwards compatibility + if self == _CopyMode.ALWAYS: + return True + + if self == _CopyMode.NEVER: + return False + + raise ValueError(f"{self} is neither True nor False.") + + +class _SignatureDescriptor: + # A descriptor to store on the ufunc __dict__ that avoids definig a + # signature for the ufunc class/type but allows the instance to have one. + # This is needed because inspect.signature() chokes on normal properties + # (as of 3.14 at least). + # We could also set __signature__ on the instance but this allows deferred + # computation of the signature. + def __get__(self, obj, objtype=None): + # Delay import, not a critical path but need to avoid circular import. + from numpy._core._internal import _ufunc_inspect_signature_builder + + if obj is None: + # could also return None, which is accepted as "not set" by + # inspect.signature(). + raise AttributeError( + "type object 'numpy.ufunc' has no attribute '__signature__'") + + # Store on the instance, after this the descriptor won't be used. + obj.__signature__ = _ufunc_inspect_signature_builder(obj) + return obj.__signature__ + + +_signature_descriptor = _SignatureDescriptor() diff --git a/py311/lib/python3.11/site-packages/numpy/_globals.pyi b/py311/lib/python3.11/site-packages/numpy/_globals.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b2231a9636b0863be24555734d66df6da3464ac4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_globals.pyi @@ -0,0 +1,17 @@ +__all__ = ["_CopyMode", "_NoValue"] + +import enum +from typing import Final, final + +@final +class _CopyMode(enum.Enum): + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self, /) -> bool: ... + +@final +class _NoValueType: ... + +_NoValue: Final[_NoValueType] = ... diff --git a/py311/lib/python3.11/site-packages/numpy/_pytesttester.py b/py311/lib/python3.11/site-packages/numpy/_pytesttester.py new file mode 100644 index 0000000000000000000000000000000000000000..77342e44aea0251c513dd0ed7a8fb67b074e298f --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_pytesttester.py @@ -0,0 +1,201 @@ +""" +Pytest test running. + +This module implements the ``test()`` function for NumPy modules. The usual +boiler plate for doing that is to put the following in the module +``__init__.py`` file:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + +Warnings filtering and other runtime settings should be dealt with in the +``pytest.ini`` file in the numpy repo root. The behavior of the test depends on +whether or not that file is found as follows: + +* ``pytest.ini`` is present (develop mode) + All warnings except those explicitly filtered out are raised as error. +* ``pytest.ini`` is absent (release mode) + DeprecationWarnings and PendingDeprecationWarnings are ignored, other + warnings are passed through. + +In practice, tests run from the numpy repo are run in development mode with +``spin``, through the standard ``spin test`` invocation or from an inplace +build with ``pytest numpy``. + +This module is imported by every numpy subpackage, so lies at the top level to +simplify circular import issues. For the same reason, it contains no numpy +imports at module scope, instead importing numpy within function calls. +""" +import os +import sys + +__all__ = ['PytestTester'] + + +def _show_numpy_info(): + import numpy as np + + print(f"NumPy version {np.__version__}") + info = np.lib._utils_impl._opt_info() + print("NumPy CPU features: ", (info or 'nothing enabled')) + + +class PytestTester: + """ + Pytest test runner. + + A test function is typically added to a package's __init__.py like so:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__).test + del PytestTester + + Calling this test function finds and runs all tests associated with the + module and all its sub-modules. + + Attributes + ---------- + module_name : str + Full path to the package to test. + + Parameters + ---------- + module_name : module name + The name of the module to test. + + Notes + ----- + Unlike the previous ``nose``-based implementation, this class is not + publicly exposed as it performs some ``numpy``-specific warning + suppression. + + """ + def __init__(self, module_name): + self.module_name = module_name + self.__module__ = module_name + + def __call__(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, durations=-1, tests=None): + """ + Run tests for module using pytest. + + Parameters + ---------- + label : {'fast', 'full'}, optional + Identifies the tests to run. When set to 'fast', tests decorated + with `pytest.mark.slow` are skipped, when 'full', the slow marker + is ignored. + verbose : int, optional + Verbosity value for test outputs, in the range 1-3. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytests. + doctests : bool, optional + .. note:: Not supported + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + Requires installation of (pip) pytest-cov. + durations : int, optional + If < 0, do nothing, If 0, report time of all tests, if > 0, + report the time of the slowest `timer` tests. Default is -1. + tests : test or list of tests + Tests to be executed with pytest '--pyargs' + + Returns + ------- + result : bool + Return True on success, false otherwise. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for + it. For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + ... + 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds + >>> result + True + + """ + import warnings + + import pytest + + module = sys.modules[self.module_name] + module_path = os.path.abspath(module.__path__[0]) + + # setup the pytest arguments + pytest_args = ["-l"] + + # offset verbosity. The "-q" cancels a "-v". + pytest_args += ["-q"] + + if sys.version_info < (3, 12): + with warnings.catch_warnings(): + warnings.simplefilter("always") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + from numpy.distutils import cpuinfo # noqa: F401 + + # Filter out annoying import messages. Want these in both develop and + # release mode. + pytest_args += [ + "-W ignore:Not importing directory", + "-W ignore:numpy.dtype size changed", + "-W ignore:numpy.ufunc size changed", + "-W ignore::UserWarning:cpuinfo", + ] + + # When testing matrices, ignore their PendingDeprecationWarnings + pytest_args += [ + "-W ignore:the matrix subclass is not", + "-W ignore:Importing from numpy.matlib is", + ] + + if doctests: + pytest_args += ["--doctest-modules"] + + if extra_argv: + pytest_args += list(extra_argv) + + if verbose > 1: + pytest_args += ["-" + "v" * (verbose - 1)] + + if coverage: + pytest_args += ["--cov=" + module_path] + + if label == "fast": + # not importing at the top level to avoid circular import of module + from numpy.testing import IS_PYPY + if IS_PYPY: + pytest_args += ["-m", "not slow and not slow_pypy"] + else: + pytest_args += ["-m", "not slow"] + + elif label != "full": + pytest_args += ["-m", label] + + if durations >= 0: + pytest_args += [f"--durations={durations}"] + + if tests is None: + tests = [self.module_name] + + pytest_args += ["--pyargs"] + list(tests) + + # run tests. + _show_numpy_info() + + try: + code = pytest.main(pytest_args) + except SystemExit as exc: + code = exc.code + + return code == 0 diff --git a/py311/lib/python3.11/site-packages/numpy/_pytesttester.pyi b/py311/lib/python3.11/site-packages/numpy/_pytesttester.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bd71239314b409102656a2a2826867f737876227 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/_pytesttester.pyi @@ -0,0 +1,18 @@ +from collections.abc import Iterable +from typing import Literal as L + +__all__ = ["PytestTester"] + +class PytestTester: + module_name: str + def __init__(self, module_name: str) -> None: ... + def __call__( + self, + label: L["fast", "full"] = "fast", + verbose: int = 1, + extra_argv: Iterable[str] | None = None, + doctests: L[False] = False, + coverage: bool = False, + durations: int = -1, + tests: Iterable[str] | None = None, + ) -> bool: ... diff --git a/py311/lib/python3.11/site-packages/numpy/conftest.py b/py311/lib/python3.11/site-packages/numpy/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c96ef3bc39f27f069feb269eb65c9bea55f7a0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/conftest.py @@ -0,0 +1,248 @@ +""" +Pytest configuration and fixtures for the Numpy test suite. +""" +import os +import sys +import tempfile +import warnings +from contextlib import contextmanager +from pathlib import Path + +import hypothesis +import pytest + +import numpy +from numpy._core._multiarray_tests import get_fpu_mode +from numpy.testing._private.utils import NOGIL_BUILD + +try: + from scipy_doctest.conftest import dt_config + HAVE_SCPDT = True +except ModuleNotFoundError: + HAVE_SCPDT = False + +try: + import pytest_run_parallel # noqa: F401 + PARALLEL_RUN_AVALIABLE = True +except ModuleNotFoundError: + PARALLEL_RUN_AVALIABLE = False + +_old_fpu_mode = None +_collect_results = {} + +# Use a known and persistent tmpdir for hypothesis' caches, which +# can be automatically cleared by the OS or user. +hypothesis.configuration.set_hypothesis_home_dir( + os.path.join(tempfile.gettempdir(), ".hypothesis") +) + +# We register two custom profiles for Numpy - for details see +# https://hypothesis.readthedocs.io/en/latest/settings.html +# The first is designed for our own CI runs; the latter also +# forces determinism and is designed for use via np.test() +hypothesis.settings.register_profile( + name="numpy-profile", deadline=None, print_blob=True, +) +hypothesis.settings.register_profile( + name="np.test() profile", + deadline=None, print_blob=True, database=None, derandomize=True, + suppress_health_check=list(hypothesis.HealthCheck), +) +# Note that the default profile is chosen based on the presence +# of pytest.ini, but can be overridden by passing the +# --hypothesis-profile=NAME argument to pytest. +_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini") +hypothesis.settings.load_profile( + "numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile" +) + +# The experimentalAPI is used in _umath_tests +os.environ["NUMPY_EXPERIMENTAL_DTYPE_API"] = "1" + +def pytest_configure(config): + config.addinivalue_line("markers", + "valgrind_error: Tests that are known to error under valgrind.") + config.addinivalue_line("markers", + "leaks_references: Tests that are known to leak references.") + config.addinivalue_line("markers", + "slow: Tests that are very slow.") + config.addinivalue_line("markers", + "slow_pypy: Tests that are very slow on pypy.") + if not PARALLEL_RUN_AVALIABLE: + config.addinivalue_line("markers", + "parallel_threads(n): run the given test function in parallel " + "using `n` threads.", + ) + config.addinivalue_line("markers", + "iterations(n): run the given test function `n` times in each thread", + ) + config.addinivalue_line("markers", + "thread_unsafe: mark the test function as single-threaded", + ) + + +def pytest_addoption(parser): + parser.addoption("--available-memory", action="store", default=None, + help=("Set amount of memory available for running the " + "test suite. This can result to tests requiring " + "especially large amounts of memory to be skipped. " + "Equivalent to setting environment variable " + "NPY_AVAILABLE_MEM. Default: determined" + "automatically.")) + + +gil_enabled_at_start = True +if NOGIL_BUILD: + gil_enabled_at_start = sys._is_gil_enabled() + + +def pytest_sessionstart(session): + available_mem = session.config.getoption('available_memory') + if available_mem is not None: + os.environ['NPY_AVAILABLE_MEM'] = available_mem + + +def pytest_terminal_summary(terminalreporter, exitstatus, config): + if NOGIL_BUILD and not gil_enabled_at_start and sys._is_gil_enabled(): + tr = terminalreporter + tr.ensure_newline() + tr.section("GIL re-enabled", sep="=", red=True, bold=True) + tr.line("The GIL was re-enabled at runtime during the tests.") + tr.line("This can happen with no test failures if the RuntimeWarning") + tr.line("raised by Python when this happens is filtered by a test.") + tr.line("") + tr.line("Please ensure all new C modules declare support for running") + tr.line("without the GIL. Any new tests that intentionally imports ") + tr.line("code that re-enables the GIL should do so in a subprocess.") + pytest.exit("GIL re-enabled during tests", returncode=1) + +# FIXME when yield tests are gone. +@pytest.hookimpl(tryfirst=True) +def pytest_itemcollected(item): + """ + Check FPU precision mode was not changed during test collection. + + The clumsy way we do it here is mainly necessary because numpy + still uses yield tests, which can execute code at test collection + time. + """ + global _old_fpu_mode + + mode = get_fpu_mode() + + if _old_fpu_mode is None: + _old_fpu_mode = mode + elif mode != _old_fpu_mode: + _collect_results[item] = (_old_fpu_mode, mode) + _old_fpu_mode = mode + + # mark f2py tests as thread unsafe + if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests': + item.add_marker(pytest.mark.thread_unsafe( + reason="f2py tests are thread-unsafe")) + + +@pytest.fixture(scope="function", autouse=True) +def check_fpu_mode(request): + """ + Check FPU precision mode was not changed during the test. + """ + old_mode = get_fpu_mode() + yield + new_mode = get_fpu_mode() + + if old_mode != new_mode: + raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to " + f"{new_mode:#x} during the test") + + collect_result = _collect_results.get(request.node) + if collect_result is not None: + old_mode, new_mode = collect_result + raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to " + f"{new_mode:#x} when collecting the test") + + +@pytest.fixture(autouse=True) +def add_np(doctest_namespace): + doctest_namespace['np'] = numpy + + +if HAVE_SCPDT: + + @contextmanager + def warnings_errors_and_rng(test=None): + """Filter out the wall of DeprecationWarnings. + """ + msgs = ["The numpy.linalg.linalg", + "The numpy.fft.helper", + "dep_util", + "pkg_resources", + "numpy.core.umath", + "msvccompiler", + "Deprecated call", + "numpy.core", + "Importing from numpy.matlib", + "This function is deprecated.", # random_integers + "Data type alias 'a'", # numpy.rec.fromfile + "Arrays of 2-dimensional vectors", # matlib.cross + "NumPy warning suppression and assertion utilities are deprecated." + ] + msg = "|".join(msgs) + + msgs_r = [ + "invalid value encountered", + "divide by zero encountered" + ] + msg_r = "|".join(msgs_r) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', category=DeprecationWarning, message=msg + ) + warnings.filterwarnings( + 'ignore', category=RuntimeWarning, message=msg_r + ) + yield + + # find and check doctests under this context manager + dt_config.user_context_mgr = warnings_errors_and_rng + + # numpy specific tweaks from refguide-check + dt_config.rndm_markers.add('#uninitialized') + dt_config.rndm_markers.add('# uninitialized') + + # make the checker pick on mismatched dtypes + dt_config.strict_check = True + + import doctest + dt_config.optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS + + # recognize the StringDType repr + dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType + + # temporary skips + dt_config.skiplist = { + 'numpy.savez', # unclosed file + 'numpy.matlib.savez', + 'numpy.__array_namespace_info__', + 'numpy.matlib.__array_namespace_info__', + } + + # xfail problematic tutorials + dt_config.pytest_extra_xfail = { + 'how-to-verify-bug.rst': '', + 'c-info.ufunc-tutorial.rst': '', + 'basics.interoperability.rst': 'needs pandas', + 'basics.dispatch.rst': 'errors out in /testing/overrides.py', + 'basics.subclassing.rst': '.. testcode:: admonitions not understood', + 'misc.rst': 'manipulates warnings', + } + + # ignores are for things fail doctest collection (optionals etc) + dt_config.pytest_extra_ignore = [ + 'numpy/distutils', + 'numpy/_core/cversions.py', + 'numpy/_pyinstaller', + 'numpy/random/_examples', + 'numpy/f2py/_backends/_distutils.py', + ] diff --git a/py311/lib/python3.11/site-packages/numpy/dtypes.py b/py311/lib/python3.11/site-packages/numpy/dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..550a29e18f292e65600108804636b833c75d1be4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/dtypes.py @@ -0,0 +1,41 @@ +""" +This module is home to specific dtypes related functionality and their classes. +For more general information about dtypes, also see `numpy.dtype` and +:ref:`arrays.dtypes`. + +Similar to the builtin ``types`` module, this submodule defines types (classes) +that are not widely used directly. + +.. versionadded:: NumPy 1.25 + + The dtypes module is new in NumPy 1.25. Previously DType classes were + only accessible indirectly. + + +DType classes +------------- + +The following are the classes of the corresponding NumPy dtype instances and +NumPy scalar types. The classes can be used in ``isinstance`` checks and can +also be instantiated or used directly. Direct use of these classes is not +typical, since their scalar counterparts (e.g. ``np.float64``) or strings +like ``"float64"`` can be used. +""" + +# See doc/source/reference/routines.dtypes.rst for module-level docs + +__all__ = [] + + +def _add_dtype_helper(DType, alias): + # Function to add DTypes a bit more conveniently without channeling them + # through `numpy._core._multiarray_umath` namespace or similar. + from numpy import dtypes + + setattr(dtypes, DType.__name__, DType) + __all__.append(DType.__name__) + + if alias: + alias = alias.removeprefix("numpy.dtypes.") + setattr(dtypes, alias, DType) + __all__.append(alias) diff --git a/py311/lib/python3.11/site-packages/numpy/dtypes.pyi b/py311/lib/python3.11/site-packages/numpy/dtypes.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3e34113edd4f4d87847e01ebe5a90a002a4aab00 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/dtypes.pyi @@ -0,0 +1,630 @@ +# ruff: noqa: ANN401 +from typing import ( + Any, + Generic, + Literal as L, + LiteralString, + Never, + NoReturn, + Self, + TypeAlias, + final, + overload, + type_check_only, +) +from typing_extensions import TypeVar + +import numpy as np + +__all__ = [ # noqa: RUF022 + "BoolDType", + "Int8DType", + "ByteDType", + "UInt8DType", + "UByteDType", + "Int16DType", + "ShortDType", + "UInt16DType", + "UShortDType", + "Int32DType", + "IntDType", + "UInt32DType", + "UIntDType", + "Int64DType", + "LongDType", + "UInt64DType", + "ULongDType", + "LongLongDType", + "ULongLongDType", + "Float16DType", + "Float32DType", + "Float64DType", + "LongDoubleDType", + "Complex64DType", + "Complex128DType", + "CLongDoubleDType", + "ObjectDType", + "BytesDType", + "StrDType", + "VoidDType", + "DateTime64DType", + "TimeDelta64DType", + "StringDType", +] + +# Helper base classes (typing-only) + +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) + +@type_check_only +class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + names: None # pyright: ignore[reportIncompatibleVariableOverride] + def __new__(cls, /) -> Self: ... + def __getitem__(self, key: Any, /) -> NoReturn: ... + @property + def base(self) -> np.dtype[_ScalarT_co]: ... + @property + def fields(self) -> None: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def subdtype(self) -> None: ... + +@type_check_only +class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] + @property + def flags(self) -> L[0]: ... + @property + def hasobject(self) -> L[False]: ... + +# Helper mixins (typing-only): + +_KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True) +_CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True) +_NumT_co = TypeVar("_NumT_co", bound=int, covariant=True) + +@type_check_only +class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]): + @final + @property + def kind(self) -> _KindT_co: ... + @final + @property + def char(self) -> _CharT_co: ... + @final + @property + def num(self) -> _NumT_co: ... + +@type_check_only +class _NoOrder: + @final + @property + def byteorder(self) -> L["|"]: ... + +@type_check_only +class _NativeOrder: + @final + @property + def byteorder(self) -> L["="]: ... + +_DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True) +_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True, default=int) + +@type_check_only +class _NBit(Generic[_DataSize_co, _ItemSize_co]): + @final + @property + def alignment(self) -> _DataSize_co: ... + @final + @property + def itemsize(self) -> _ItemSize_co: ... + +@type_check_only +class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ... + +# Boolean: + +@final +class BoolDType( # type: ignore[misc] + _TypeCodes[L["b"], L["?"], L[0]], + _8Bit, + _LiteralDType[np.bool], +): + @property + def name(self) -> L["bool"]: ... + @property + def str(self) -> L["|b1"]: ... + +# Sized integers: + +@final +class Int8DType( # type: ignore[misc] + _TypeCodes[L["i"], L["b"], L[1]], + _8Bit, + _LiteralDType[np.int8], +): + @property + def name(self) -> L["int8"]: ... + @property + def str(self) -> L["|i1"]: ... + +@final +class UInt8DType( # type: ignore[misc] + _TypeCodes[L["u"], L["B"], L[2]], + _8Bit, + _LiteralDType[np.uint8], +): + @property + def name(self) -> L["uint8"]: ... + @property + def str(self) -> L["|u1"]: ... + +@final +class Int16DType( # type: ignore[misc] + _TypeCodes[L["i"], L["h"], L[3]], + _NativeOrder, + _NBit[L[2], L[2]], + _LiteralDType[np.int16], +): + @property + def name(self) -> L["int16"]: ... + @property + def str(self) -> L["i2"]: ... + +@final +class UInt16DType( # type: ignore[misc] + _TypeCodes[L["u"], L["H"], L[4]], + _NativeOrder, + _NBit[L[2], L[2]], + _LiteralDType[np.uint16], +): + @property + def name(self) -> L["uint16"]: ... + @property + def str(self) -> L["u2"]: ... + +@final +class Int32DType( # type: ignore[misc] + _TypeCodes[L["i"], L["i", "l"], L[5, 7]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.int32], +): + @property + def name(self) -> L["int32"]: ... + @property + def str(self) -> L["i4"]: ... + +@final +class UInt32DType( # type: ignore[misc] + _TypeCodes[L["u"], L["I", "L"], L[6, 8]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.uint32], +): + @property + def name(self) -> L["uint32"]: ... + @property + def str(self) -> L["u4"]: ... + +@final +class Int64DType( # type: ignore[misc] + _TypeCodes[L["i"], L["l", "q"], L[7, 9]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.int64], +): + @property + def name(self) -> L["int64"]: ... + @property + def str(self) -> L["i8"]: ... + +@final +class UInt64DType( # type: ignore[misc] + _TypeCodes[L["u"], L["L", "Q"], L[8, 10]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.uint64], +): + @property + def name(self) -> L["uint64"]: ... + @property + def str(self) -> L["u8"]: ... + +# Standard C-named version/alias: +# NOTE: Don't make these `Final`: it will break stubtest +ByteDType = Int8DType +UByteDType = UInt8DType +ShortDType = Int16DType +UShortDType = UInt16DType + +@final +class IntDType( # type: ignore[misc] + _TypeCodes[L["i"], L["i"], L[5]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.intc], +): + @property + def name(self) -> L["int32"]: ... + @property + def str(self) -> L["i4"]: ... + +@final +class UIntDType( # type: ignore[misc] + _TypeCodes[L["u"], L["I"], L[6]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.uintc], +): + @property + def name(self) -> L["uint32"]: ... + @property + def str(self) -> L["u4"]: ... + +@final +class LongDType( # type: ignore[misc] + _TypeCodes[L["i"], L["l"], L[7]], + _NativeOrder, + _NBit[L[4, 8], L[4, 8]], + _LiteralDType[np.long], +): + @property + def name(self) -> L["int32", "int64"]: ... + @property + def str(self) -> L["i4", "i8"]: ... + +@final +class ULongDType( # type: ignore[misc] + _TypeCodes[L["u"], L["L"], L[8]], + _NativeOrder, + _NBit[L[4, 8], L[4, 8]], + _LiteralDType[np.ulong], +): + @property + def name(self) -> L["uint32", "uint64"]: ... + @property + def str(self) -> L["u4", "u8"]: ... + +@final +class LongLongDType( # type: ignore[misc] + _TypeCodes[L["i"], L["q"], L[9]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.longlong], +): + @property + def name(self) -> L["int64"]: ... + @property + def str(self) -> L["i8"]: ... + +@final +class ULongLongDType( # type: ignore[misc] + _TypeCodes[L["u"], L["Q"], L[10]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.ulonglong], +): + @property + def name(self) -> L["uint64"]: ... + @property + def str(self) -> L["u8"]: ... + +# Floats: + +@final +class Float16DType( # type: ignore[misc] + _TypeCodes[L["f"], L["e"], L[23]], + _NativeOrder, + _NBit[L[2], L[2]], + _LiteralDType[np.float16], +): + @property + def name(self) -> L["float16"]: ... + @property + def str(self) -> L["f2"]: ... + +@final +class Float32DType( # type: ignore[misc] + _TypeCodes[L["f"], L["f"], L[11]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.float32], +): + @property + def name(self) -> L["float32"]: ... + @property + def str(self) -> L["f4"]: ... + +@final +class Float64DType( # type: ignore[misc] + _TypeCodes[L["f"], L["d"], L[12]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.float64], +): + @property + def name(self) -> L["float64"]: ... + @property + def str(self) -> L["f8"]: ... + +@final +class LongDoubleDType( # type: ignore[misc] + _TypeCodes[L["f"], L["g"], L[13]], + _NativeOrder, + _NBit[L[8, 12, 16], L[8, 12, 16]], + _LiteralDType[np.longdouble], +): + @property + def name(self) -> L["float64", "float96", "float128"]: ... + @property + def str(self) -> L["f8", "f12", "f16"]: ... + +# Complex: + +@final +class Complex64DType( # type: ignore[misc] + _TypeCodes[L["c"], L["F"], L[14]], + _NativeOrder, + _NBit[L[4], L[8]], + _LiteralDType[np.complex64], +): + @property + def name(self) -> L["complex64"]: ... + @property + def str(self) -> L["c8"]: ... + +@final +class Complex128DType( # type: ignore[misc] + _TypeCodes[L["c"], L["D"], L[15]], + _NativeOrder, + _NBit[L[8], L[16]], + _LiteralDType[np.complex128], +): + @property + def name(self) -> L["complex128"]: ... + @property + def str(self) -> L["c16"]: ... + +@final +class CLongDoubleDType( # type: ignore[misc] + _TypeCodes[L["c"], L["G"], L[16]], + _NativeOrder, + _NBit[L[8, 12, 16], L[16, 24, 32]], + _LiteralDType[np.clongdouble], +): + @property + def name(self) -> L["complex128", "complex192", "complex256"]: ... + @property + def str(self) -> L["c16", "c24", "c32"]: ... + +# Python objects: + +@final +class ObjectDType( # type: ignore[misc] + _TypeCodes[L["O"], L["O"], L[17]], + _NoOrder, + _NBit[L[8], L[8]], + _SimpleDType[np.object_], +): + @property + def hasobject(self) -> L[True]: ... + @property + def name(self) -> L["object"]: ... + @property + def str(self) -> L["|O"]: ... + +# Flexible: + +@final +class BytesDType( # type: ignore[misc] + _TypeCodes[L["S"], L["S"], L[18]], + _NoOrder, + _NBit[L[1], _ItemSize_co], + _SimpleDType[np.bytes_], + Generic[_ItemSize_co], +): + def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +@final +class StrDType( # type: ignore[misc] + _TypeCodes[L["U"], L["U"], L[19]], + _NativeOrder, + _NBit[L[4], _ItemSize_co], + _SimpleDType[np.str_], + Generic[_ItemSize_co], +): + def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +@final +class VoidDType( # type: ignore[misc] + _TypeCodes[L["V"], L["V"], L[20]], + _NoOrder, + _NBit[L[1], _ItemSize_co], + np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] + Generic[_ItemSize_co], +): + # NOTE: `VoidDType(...)` raises a `TypeError` at the moment + def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... + @property + def base(self) -> Self: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def subdtype(self) -> None: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +# Other: + +_DateUnit: TypeAlias = L["Y", "M", "W", "D"] +_TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] +_DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit + +@final +class DateTime64DType( # type: ignore[misc] + _TypeCodes[L["M"], L["M"], L[21]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.datetime64], +): + # NOTE: `DateTime64DType(...)` raises a `TypeError` at the moment + # TODO: Once implemented, don't forget the`unit: L["μs"]` overload. + def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ... + @property + def name(self) -> L[ + "datetime64", + "datetime64[Y]", + "datetime64[M]", + "datetime64[W]", + "datetime64[D]", + "datetime64[h]", + "datetime64[m]", + "datetime64[s]", + "datetime64[ms]", + "datetime64[us]", + "datetime64[ns]", + "datetime64[ps]", + "datetime64[fs]", + "datetime64[as]", + ]: ... + @property + def str(self) -> L[ + "M8", + "M8[Y]", + "M8[M]", + "M8[W]", + "M8[D]", + "M8[h]", + "M8[m]", + "M8[s]", + "M8[ms]", + "M8[us]", + "M8[ns]", + "M8[ps]", + "M8[fs]", + "M8[as]", + ]: ... + +@final +class TimeDelta64DType( # type: ignore[misc] + _TypeCodes[L["m"], L["m"], L[22]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.timedelta64], +): + # NOTE: `TimeDelta64DType(...)` raises a `TypeError` at the moment + # TODO: Once implemented, don't forget to overload on `unit: L["μs"]`. + def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ... + @property + def name(self) -> L[ + "timedelta64", + "timedelta64[Y]", + "timedelta64[M]", + "timedelta64[W]", + "timedelta64[D]", + "timedelta64[h]", + "timedelta64[m]", + "timedelta64[s]", + "timedelta64[ms]", + "timedelta64[us]", + "timedelta64[ns]", + "timedelta64[ps]", + "timedelta64[fs]", + "timedelta64[as]", + ]: ... + @property + def str(self) -> L[ + "m8", + "m8[Y]", + "m8[M]", + "m8[W]", + "m8[D]", + "m8[h]", + "m8[m]", + "m8[s]", + "m8[ms]", + "m8[us]", + "m8[ns]", + "m8[ps]", + "m8[fs]", + "m8[as]", + ]: ... + +_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) + +@final +class StringDType( # type: ignore[misc] + _TypeCodes[L["T"], L["T"], L[2056]], + _NativeOrder, + _NBit[L[8], L[16]], + # TODO(jorenham): change once we have a string scalar type: + # https://github.com/numpy/numpy/issues/28165 + np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues, reportInvalidTypeArguments] + Generic[_NaObjectT_co], +): + @property + def na_object(self) -> _NaObjectT_co: ... + @property + def coerce(self) -> L[True]: ... + + # + @overload + def __new__(cls, /, *, coerce: bool = True) -> Self: ... + @overload + def __new__(cls, /, *, na_object: _NaObjectT_co, coerce: bool = True) -> Self: ... + + # + def __getitem__(self, key: Never, /) -> NoReturn: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @property + def fields(self) -> None: ... + @property + def base(self) -> Self: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + + # + @property + def name(self) -> L["StringDType64", "StringDType128"]: ... + @property + def subdtype(self) -> None: ... + @property + def type(self) -> type[str]: ... + @property + def str(self) -> L["|T8", "|T16"]: ... + + # + @property + def hasobject(self) -> L[True]: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... diff --git a/py311/lib/python3.11/site-packages/numpy/exceptions.py b/py311/lib/python3.11/site-packages/numpy/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..cf70b4a4ce3bd752fc9dc10d32de8646217d9ef1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/exceptions.py @@ -0,0 +1,246 @@ +""" +Exceptions and Warnings +======================= + +General exceptions used by NumPy. Note that some exceptions may be module +specific, such as linear algebra errors. + +.. versionadded:: NumPy 1.25 + + The exceptions module is new in NumPy 1.25. + +.. currentmodule:: numpy.exceptions + +Warnings +-------- +.. autosummary:: + :toctree: generated/ + + ComplexWarning Given when converting complex to real. + VisibleDeprecationWarning Same as a DeprecationWarning, but more visible. + RankWarning Issued when the design matrix is rank deficient. + +Exceptions +---------- +.. autosummary:: + :toctree: generated/ + + AxisError Given when an axis was invalid. + DTypePromotionError Given when no common dtype could be found. + TooHardError Error specific to `numpy.shares_memory`. + +""" + + +__all__ = [ + "ComplexWarning", "VisibleDeprecationWarning", "ModuleDeprecationWarning", + "TooHardError", "AxisError", "DTypePromotionError"] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class ComplexWarning(RuntimeWarning): + """ + The warning raised when casting a complex dtype to a real dtype. + + As implemented, casting a complex number to a real discards its imaginary + part, but this behavior may not be what the user actually wants. + + """ + pass + + +class ModuleDeprecationWarning(DeprecationWarning): + """Module deprecation warning. + + .. warning:: + + This warning should not be used, since nose testing is not relevant + anymore. + + The nose tester turns ordinary Deprecation warnings into test failures. + That makes it hard to deprecate whole modules, because they get + imported by default. So this is a special Deprecation warning that the + nose tester will let pass without making tests fail. + + """ + pass + + +class VisibleDeprecationWarning(UserWarning): + """Visible deprecation warning. + + By default, python will not show deprecation warnings, so this class + can be used when a very visible warning is helpful, for example because + the usage is most likely a user bug. + + """ + pass + + +class RankWarning(RuntimeWarning): + """Matrix rank warning. + + Issued by polynomial functions when the design matrix is rank deficient. + + """ + pass + + +# Exception used in shares_memory() +class TooHardError(RuntimeError): + """``max_work`` was exceeded. + + This is raised whenever the maximum number of candidate solutions + to consider specified by the ``max_work`` parameter is exceeded. + Assigning a finite number to ``max_work`` may have caused the operation + to fail. + + """ + pass + + +class AxisError(ValueError, IndexError): + """Axis supplied was invalid. + + This is raised whenever an ``axis`` parameter is specified that is larger + than the number of array dimensions. + For compatibility with code written against older numpy versions, which + raised a mixture of :exc:`ValueError` and :exc:`IndexError` for this + situation, this exception subclasses both to ensure that + ``except ValueError`` and ``except IndexError`` statements continue + to catch ``AxisError``. + + Parameters + ---------- + axis : int or str + The out of bounds axis or a custom exception message. + If an axis is provided, then `ndim` should be specified as well. + ndim : int, optional + The number of array dimensions. + msg_prefix : str, optional + A prefix for the exception message. + + Attributes + ---------- + axis : int, optional + The out of bounds axis or ``None`` if a custom exception + message was provided. This should be the axis as passed by + the user, before any normalization to resolve negative indices. + + .. versionadded:: 1.22 + ndim : int, optional + The number of array dimensions or ``None`` if a custom exception + message was provided. + + .. versionadded:: 1.22 + + + Examples + -------- + >>> import numpy as np + >>> array_1d = np.arange(10) + >>> np.cumsum(array_1d, axis=1) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1 + + Negative axes are preserved: + + >>> np.cumsum(array_1d, axis=-2) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1 + + The class constructor generally takes the axis and arrays' + dimensionality as arguments: + + >>> print(np.exceptions.AxisError(2, 1, msg_prefix='error')) + error: axis 2 is out of bounds for array of dimension 1 + + Alternatively, a custom exception message can be passed: + + >>> print(np.exceptions.AxisError('Custom error message')) + Custom error message + + """ + + __slots__ = ("_msg", "axis", "ndim") + + def __init__(self, axis, ndim=None, msg_prefix=None): + if ndim is msg_prefix is None: + # single-argument form: directly set the error message + self._msg = axis + self.axis = None + self.ndim = None + else: + self._msg = msg_prefix + self.axis = axis + self.ndim = ndim + + def __str__(self): + axis = self.axis + ndim = self.ndim + + if axis is ndim is None: + return self._msg + else: + msg = f"axis {axis} is out of bounds for array of dimension {ndim}" + if self._msg is not None: + msg = f"{self._msg}: {msg}" + return msg + + +class DTypePromotionError(TypeError): + """Multiple DTypes could not be converted to a common one. + + This exception derives from ``TypeError`` and is raised whenever dtypes + cannot be converted to a single common one. This can be because they + are of a different category/class or incompatible instances of the same + one (see Examples). + + Notes + ----- + Many functions will use promotion to find the correct result and + implementation. For these functions the error will typically be chained + with a more specific error indicating that no implementation was found + for the input dtypes. + + Typically promotion should be considered "invalid" between the dtypes of + two arrays when `arr1 == arr2` can safely return all ``False`` because the + dtypes are fundamentally different. + + Examples + -------- + Datetimes and complex numbers are incompatible classes and cannot be + promoted: + + >>> import numpy as np + >>> np.result_type(np.dtype("M8[s]"), np.complex128) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + DTypePromotionError: The DType could not + be promoted by . This means that no common + DType exists for the given inputs. For example they cannot be stored in a + single array unless the dtype is `object`. The full list of DTypes is: + (, ) + + For example for structured dtypes, the structure can mismatch and the + same ``DTypePromotionError`` is given when two structured dtypes with + a mismatch in their number of fields is given: + + >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)]) + >>> dtype2 = np.dtype([("field1", np.float64)]) + >>> np.promote_types(dtype1, dtype2) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + DTypePromotionError: field names `('field1', 'field2')` and `('field1',)` + mismatch. + + """ # noqa: E501 + pass diff --git a/py311/lib/python3.11/site-packages/numpy/exceptions.pyi b/py311/lib/python3.11/site-packages/numpy/exceptions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4cc4eff5d321324e0a8e8d522f7e7f95ecc6279f --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/exceptions.pyi @@ -0,0 +1,27 @@ +from typing import overload + +__all__ = [ + "ComplexWarning", + "VisibleDeprecationWarning", + "ModuleDeprecationWarning", + "TooHardError", + "AxisError", + "DTypePromotionError", +] + +class ComplexWarning(RuntimeWarning): ... +class ModuleDeprecationWarning(DeprecationWarning): ... +class VisibleDeprecationWarning(UserWarning): ... +class RankWarning(RuntimeWarning): ... +class TooHardError(RuntimeError): ... +class DTypePromotionError(TypeError): ... + +class AxisError(ValueError, IndexError): + __slots__ = "_msg", "axis", "ndim" + + axis: int | None + ndim: int | None + @overload + def __init__(self, axis: str, ndim: None = None, msg_prefix: None = None) -> None: ... + @overload + def __init__(self, axis: int, ndim: int, msg_prefix: str | None = None) -> None: ... diff --git a/py311/lib/python3.11/site-packages/numpy/matlib.py b/py311/lib/python3.11/site-packages/numpy/matlib.py new file mode 100644 index 0000000000000000000000000000000000000000..f27d503cdbca80c745b14b8854afecc56125df9a --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/matlib.py @@ -0,0 +1,380 @@ +import warnings + +# 2018-05-29, PendingDeprecationWarning added to matrix.__new__ +# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning +warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. " + "The matrix subclass is not the recommended way to represent " + "matrices or deal with linear algebra (see " + "https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). " + "Please adjust your code to use regular ndarray. ", + PendingDeprecationWarning, stacklevel=2) + +import numpy as np + +# Matlib.py contains all functions in the numpy namespace with a few +# replacements. See doc/source/reference/routines.matlib.rst for details. +# Need * as we're copying the numpy namespace. +from numpy import * # noqa: F403 +from numpy.matrixlib.defmatrix import asmatrix, matrix + +__version__ = np.__version__ + +__all__ = ['rand', 'randn', 'repmat'] +__all__ += np.__all__ + +def empty(shape, dtype=None, order='C'): + """Return a new matrix of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty matrix. + dtype : data-type, optional + Desired output data-type. + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + See Also + -------- + numpy.empty : Equivalent array function. + matlib.zeros : Return a matrix of zeros. + matlib.ones : Return a matrix of ones. + + Notes + ----- + Unlike other matrix creation functions (e.g. `matlib.zeros`, + `matlib.ones`), `matlib.empty` does not initialize the values of the + matrix, and may therefore be marginally faster. However, the values + stored in the newly allocated matrix are arbitrary. For reproducible + behavior, be sure to set each element of the matrix before reading. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], # random + [ 7.39337286e-309, 3.22135945e-309]]) + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], # random + [ 6586976, 22740995]]) + + """ + return ndarray.__new__(matrix, shape, dtype, order=order) + +def ones(shape, dtype=None, order='C'): + """ + Matrix of ones. + + Return a matrix of given shape and type, filled with ones. + + Parameters + ---------- + shape : {sequence of ints, int} + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is np.float64. + order : {'C', 'F'}, optional + Whether to store matrix in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Matrix of ones of given shape, dtype, and order. + + See Also + -------- + ones : Array of ones. + matlib.zeros : Zero matrix. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> np.matlib.ones((2,3)) + matrix([[1., 1., 1.], + [1., 1., 1.]]) + + >>> np.matlib.ones(2) + matrix([[1., 1.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(1) + return a + +def zeros(shape, dtype=None, order='C'): + """ + Return a matrix of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is float. + order : {'C', 'F'}, optional + Whether to store the result in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Zero matrix of given shape, dtype, and order. + + See Also + -------- + numpy.zeros : Equivalent array function. + matlib.ones : Return a matrix of ones. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.zeros((2, 3)) + matrix([[0., 0., 0.], + [0., 0., 0.]]) + + >>> np.matlib.zeros(2) + matrix([[0., 0.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(0) + return a + +def identity(n, dtype=None): + """ + Returns the square identity matrix of given size. + + Parameters + ---------- + n : int + Size of the returned identity matrix. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : matrix + `n` x `n` matrix with its main diagonal set to one, + and all other elements zero. + + See Also + -------- + numpy.identity : Equivalent array function. + matlib.eye : More general matrix identity function. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.identity(3, dtype=int) + matrix([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + + """ + a = array([1] + n * [0], dtype=dtype) + b = empty((n, n), dtype=dtype) + b.flat = a + return b + +def eye(n, M=None, k=0, dtype=float, order='C'): + """ + Return a matrix with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + n : int + Number of rows in the output. + M : int, optional + Number of columns in the output, defaults to `n`. + k : int, optional + Index of the diagonal: 0 refers to the main diagonal, + a positive value refers to an upper diagonal, + and a negative value to a lower diagonal. + dtype : dtype, optional + Data-type of the returned matrix. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + Returns + ------- + I : matrix + A `n` x `M` matrix where all elements are equal to zero, + except for the `k`-th diagonal, whose values are equal to one. + + See Also + -------- + numpy.eye : Equivalent array function. + identity : Square identity matrix. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.eye(3, k=1, dtype=float) + matrix([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) + +def rand(*args): + """ + Return a matrix of random values with given shape. + + Create a matrix of the given shape and propagate it with + random samples from a uniform distribution over ``[0, 1)``. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. + If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + out : ndarray + The matrix of random values with shape given by `\\*args`. + + See Also + -------- + randn, numpy.random.RandomState.rand + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.rand(2, 3) + matrix([[0.69646919, 0.28613933, 0.22685145], + [0.55131477, 0.71946897, 0.42310646]]) + >>> np.matlib.rand((2, 3)) + matrix([[0.9807642 , 0.68482974, 0.4809319 ], + [0.39211752, 0.34317802, 0.72904971]]) + + If the first argument is a tuple, other arguments are ignored: + + >>> np.matlib.rand((2, 3), 4) + matrix([[0.43857224, 0.0596779 , 0.39804426], + [0.73799541, 0.18249173, 0.17545176]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.rand(*args)) + +def randn(*args): + """ + Return a random matrix with data from the "standard normal" distribution. + + `randn` generates a matrix filled with random floats sampled from a + univariate "normal" (Gaussian) distribution of mean 0 and variance 1. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + Z : matrix of floats + A matrix of floating-point samples drawn from the standard normal + distribution. + + See Also + -------- + rand, numpy.random.RandomState.randn + + Notes + ----- + For random samples from the normal distribution with mean ``mu`` and + standard deviation ``sigma``, use:: + + sigma * np.matlib.randn(...) + mu + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.randn(1) + matrix([[-1.0856306]]) + >>> np.matlib.randn(1, 2, 3) + matrix([[ 0.99734545, 0.2829785 , -1.50629471], + [-0.57860025, 1.65143654, -2.42667924]]) + + Two-by-four matrix of samples from the normal distribution with + mean 3 and standard deviation 2.5: + + >>> 2.5 * np.matlib.randn((2, 4)) + 3 + matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462], + [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.randn(*args)) + +def repmat(a, m, n): + """ + Repeat a 0-D to 2-D array or matrix MxN times. + + Parameters + ---------- + a : array_like + The array or matrix to be repeated. + m, n : int + The number of times `a` is repeated along the first and second axes. + + Returns + ------- + out : ndarray + The result of repeating `a`. + + Examples + -------- + >>> import numpy.matlib + >>> a0 = np.array(1) + >>> np.matlib.repmat(a0, 2, 3) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> a1 = np.arange(4) + >>> np.matlib.repmat(a1, 2, 2) + array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + + >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) + >>> np.matlib.repmat(a2, 2, 3) + matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5], + [0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5]]) + + """ + a = asanyarray(a) + ndim = a.ndim + if ndim == 0: + origrows, origcols = (1, 1) + elif ndim == 1: + origrows, origcols = (1, a.shape[0]) + else: + origrows, origcols = a.shape + rows = origrows * m + cols = origcols * n + c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) + return c.reshape(rows, cols) diff --git a/py311/lib/python3.11/site-packages/numpy/matlib.pyi b/py311/lib/python3.11/site-packages/numpy/matlib.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d653a5a6cc98b89382b3b1a616affb00e234df10 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/matlib.pyi @@ -0,0 +1,580 @@ +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +import numpy.typing as npt +from numpy import ( # noqa: F401 + False_, + ScalarType, + True_, + __array_namespace_info__, + __version__, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + angle, + any, + append, + apply_along_axis, + apply_over_axes, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_split, + array_str, + asanyarray, + asarray, + asarray_chkfinite, + ascontiguousarray, + asfortranarray, + asin, + asinh, + asmatrix, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + average, + bartlett, + base_repr, + binary_repr, + bincount, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + blackman, + block, + bmat, + bool, + bool_, + broadcast, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + c_, + can_cast, + cbrt, + cdouble, + ceil, + char, + character, + choose, + clip, + clongdouble, + column_stack, + common_type, + complex64, + complex128, + complex256, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copy, + copysign, + copyto, + core, + corrcoef, + correlate, + cos, + cosh, + count_nonzero, + cov, + cross, + csingle, + ctypeslib, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + delete, + diag, + diag_indices, + diag_indices_from, + diagflat, + diagonal, + diff, + digitize, + divide, + divmod, + dot, + double, + dsplit, + dstack, + dtype, + dtypes, + e, + ediff1d, + einsum, + einsum_path, + emath, + empty_like, + equal, + errstate, + euler_gamma, + exceptions, + exp, + exp2, + expand_dims, + expm1, + extract, + f2py, + fabs, + fft, + fill_diagonal, + finfo, + fix, + flatiter, + flatnonzero, + flexible, + flip, + fliplr, + flipud, + float16, + float32, + float64, + float128, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromregex, + fromstring, + full, + full_like, + gcd, + generic, + genfromtxt, + geomspace, + get_include, + get_printoptions, + getbufsize, + geterr, + geterrcall, + gradient, + greater, + greater_equal, + half, + hamming, + hanning, + heaviside, + histogram, + histogram2d, + histogram_bin_edges, + histogramdd, + hsplit, + hstack, + hypot, + i0, + iinfo, + imag, + index_exp, + indices, + inexact, + inf, + info, + inner, + insert, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + interp, + intersect1d, + intp, + invert, + is_busday, + isclose, + iscomplex, + iscomplexobj, + isdtype, + isfinite, + isfortran, + isin, + isinf, + isnan, + isnat, + isneginf, + isposinf, + isreal, + isrealobj, + isscalar, + issubdtype, + iterable, + ix_, + kaiser, + kron, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + lib, + linalg, + linspace, + little_endian, + load, + loadtxt, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + ma, + mask_indices, + matmul, + matrix, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + median, + memmap, + meshgrid, + mgrid, + min, + min_scalar_type, + minimum, + mintypecode, + mod, + modf, + moveaxis, + multiply, + nan, + nan_to_num, + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ndarray, + ndenumerate, + ndim, + ndindex, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ogrid, + ones_like, + outer, + packbits, + pad, + partition, + percentile, + permute_dims, + pi, + piecewise, + place, + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polynomial, + polysub, + polyval, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + put_along_axis, + putmask, + quantile, + r_, + rad2deg, + radians, + random, + ravel, + ravel_multi_index, + real, + real_if_close, + rec, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + roots, + rot90, + round, + row_stack, + s_, + save, + savetxt, + savez, + savez_compressed, + sctypeDict, + searchsorted, + select, + set_printoptions, + setbufsize, + setdiff1d, + seterr, + seterrcall, + setxor1d, + shape, + shares_memory, + short, + show_config, + show_runtime, + sign, + signbit, + signedinteger, + sin, + sinc, + single, + sinh, + size, + sort, + sort_complex, + spacing, + split, + sqrt, + square, + squeeze, + stack, + std, + str_, + strings, + subtract, + sum, + swapaxes, + take, + take_along_axis, + tan, + tanh, + tensordot, + test, + testing, + tile, + timedelta64, + trace, + transpose, + trapezoid, + tri, + tril, + tril_indices, + tril_indices_from, + trim_zeros, + triu, + triu_indices, + triu_indices_from, + true_divide, + trunc, + typecodes, + typename, + typing, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + unpackbits, + unravel_index, + unsignedinteger, + unstack, + unwrap, + ushort, + vander, + var, + vdot, + vecdot, + vecmat, + vectorize, + void, + vsplit, + vstack, + where, + zeros_like, +) +from numpy._typing import _ArrayLike, _DTypeLike + +__all__ = ["rand", "randn", "repmat"] +__all__ += np.__all__ + +### + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_ScalarT]] +_Order: TypeAlias = Literal["C", "F"] + +### + +# +@overload +def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... +@overload +def identity(n: int, dtype: _DTypeLike[_ScalarT]) -> _Matrix[_ScalarT]: ... +@overload +def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... + +# +@overload +def eye( + n: int, + M: int | None = None, + k: int = 0, + dtype: type[np.float64] | None = ..., + order: _Order = "C", +) -> _Matrix[np.float64]: ... +@overload +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike | None = ..., order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def rand(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def rand(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def randn(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def repmat(a: _Matrix[_ScalarT], m: int, n: int) -> _Matrix[_ScalarT]: ... +@overload +def repmat(a: _ArrayLike[_ScalarT], m: int, n: int) -> npt.NDArray[_ScalarT]: ... +@overload +def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/py311/lib/python3.11/site-packages/numpy/py.typed b/py311/lib/python3.11/site-packages/numpy/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/numpy/version.py b/py311/lib/python3.11/site-packages/numpy/version.py new file mode 100644 index 0000000000000000000000000000000000000000..26dcf0375f1fab3b5cc13f8742e3d5812c275ac2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/version.py @@ -0,0 +1,11 @@ + +""" +Module to expose more detailed version info for the installed `numpy` +""" +version = "2.4.1" +__version__ = version +full_version = version + +git_revision = "d24bb7f48d3b0e3471c68f1309c130d0b65ee72a" +release = 'dev' not in version and '+' not in version +short_version = version.split("+")[0] diff --git a/py311/lib/python3.11/site-packages/numpy/version.pyi b/py311/lib/python3.11/site-packages/numpy/version.pyi new file mode 100644 index 0000000000000000000000000000000000000000..073885c017c2a9c34e41431d0d9361de4b59ff90 --- /dev/null +++ b/py311/lib/python3.11/site-packages/numpy/version.pyi @@ -0,0 +1,9 @@ +from typing import Final, LiteralString + +version: Final[LiteralString] = ... +__version__: Final[LiteralString] = ... +full_version: Final[LiteralString] = ... + +git_revision: Final[LiteralString] = ... +release: Final[bool] = ... +short_version: Final[LiteralString] = ... diff --git a/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/LICENSE b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4a071fc533d4fd07dbe81e1e8f0f0998b17220be --- /dev/null +++ b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 pre-commit dev team: Anthony Sottile, Ken Struys + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/METADATA b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..4767141d43febc53737546cfc5d2e352cf649df1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/METADATA @@ -0,0 +1,29 @@ +Metadata-Version: 2.1 +Name: pre_commit +Version: 4.5.1 +Summary: A framework for managing and maintaining multi-language pre-commit hooks. +Home-page: https://github.com/pre-commit/pre-commit +Author: Anthony Sottile +Author-email: asottile@umich.edu +License: MIT +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=3.10 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: cfgv>=2.0.0 +Requires-Dist: identify>=1.0.0 +Requires-Dist: nodeenv>=0.11.1 +Requires-Dist: pyyaml>=5.1 +Requires-Dist: virtualenv>=20.10.0 + +[![build status](https://github.com/pre-commit/pre-commit/actions/workflows/main.yml/badge.svg)](https://github.com/pre-commit/pre-commit/actions/workflows/main.yml) +[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/pre-commit/pre-commit/main.svg)](https://results.pre-commit.ci/latest/github/pre-commit/pre-commit/main) + +## pre-commit + +A framework for managing and maintaining multi-language pre-commit hooks. + +For more information see: https://pre-commit.com/ diff --git a/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/RECORD b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..52a9dcf3b6b61e44c84e08a21873eb087d5567ec --- /dev/null +++ b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/RECORD @@ -0,0 +1,94 @@ +../../../bin/pre-commit,sha256=jrquBzzyNYEcSAUgDjz_yOx0F9jdfSBDRUnkTzrqNKo,329 +pre_commit-4.5.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +pre_commit-4.5.1.dist-info/LICENSE,sha256=6iyifLp8w1gi2VpG1ZvNPMiOGWWS5jkNGUmjWf_JkOg,1092 +pre_commit-4.5.1.dist-info/METADATA,sha256=zQcgq0wT8QW7P1u7m6WRkCV5GP2iE-zCktEn25m3OBo,1222 +pre_commit-4.5.1.dist-info/RECORD,, +pre_commit-4.5.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pre_commit-4.5.1.dist-info/WHEEL,sha256=0VNUDWQJzfRahYI3neAhz2UVbRCtztpN5dPHAGvmGXc,109 +pre_commit-4.5.1.dist-info/entry_points.txt,sha256=9QCW6lC130XmXhOB-3XA71fK9Ef5R5YlUma7E93lrPc,52 +pre_commit-4.5.1.dist-info/top_level.txt,sha256=KMCc5TrDQcN3MCxwR72948Y2zIrILKmtnSAdqWBUa_I,11 +pre_commit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pre_commit/__main__.py,sha256=uPnSXQQC50TKhEO4moFj7DjjJ3xrVVJpcYpD7JkA9gc,127 +pre_commit/all_languages.py,sha256=hiD3IaWeCj3EJbC45ZW8RrU_yCHMkR8gtfR69k0YblU,1463 +pre_commit/clientlib.py,sha256=B0YZGU-gsqfU11mfk2sZq9gYU0B0fC2DQwcLqFmfuF0,16815 +pre_commit/color.py,sha256=WzrGPdQevNxYSx3howvTlGG--uZMjhdgCPvtALpvGVQ,3219 +pre_commit/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pre_commit/commands/autoupdate.py,sha256=j1MAsszreh_1iYNp2epNVFmhgrwq2TwaI9h_-svG5EA,7157 +pre_commit/commands/clean.py,sha256=emsIYwprEt4LmcRgFuTSurwgAo5aqwStHylW1tGFSsY,429 +pre_commit/commands/gc.py,sha256=mUSdJKGlv2rIywGVzV98au86v3Yb-bpxFp6rMDesxfg,3042 +pre_commit/commands/hazmat.py,sha256=sGZCMFDR2ErnoBh-ht6EYB4_G9pSSmu7dLParXIVJxM,2573 +pre_commit/commands/hook_impl.py,sha256=WS96jCHe7BboTKidvIqALXQr_ekX3d_LPQmQ2Sb79JA,9425 +pre_commit/commands/init_templatedir.py,sha256=jFDNEUx51k5KX1ADbWTFzivGj_9f7SRq1nnb8uFo37U,1135 +pre_commit/commands/install_uninstall.py,sha256=VoJZ18lRghIadh7IJRl1BLq7feI5DbzZwlIV3_uz4ZQ,5341 +pre_commit/commands/migrate_config.py,sha256=oXOxuXzjNRy9RsIoVxpWfqIn4p28uiJEEP5Ul4WGAoM,4163 +pre_commit/commands/run.py,sha256=ZaZ_AwSOPfvoZNMKzS4MLr_sB82AtELb85mmsSR7VpM,14163 +pre_commit/commands/sample_config.py,sha256=pR9SRhdjl05cqOLTkc8b8Grse8B_tGCCVZqAJcv3nRk,453 +pre_commit/commands/try_repo.py,sha256=AZ8-JdITKeSnqajT4E6y6q2ROERmM1ppBw5Vz4t6bXw,2578 +pre_commit/commands/validate_config.py,sha256=prustTJy8a9DI1m8RGaopiy_tf6YUp9Eyx_u4ep9Qts,371 +pre_commit/commands/validate_manifest.py,sha256=cOl6ua8aIHu0Pa5tC-1c_P4ObnG8Y7m2sAaLu9zB3F4,377 +pre_commit/constants.py,sha256=8DVzmf1Z_uEpiZll873n6YJB4oRIHt87lM3US4CqKvY,282 +pre_commit/envcontext.py,sha256=2jtR6ozx8azT-lANC-OuNTO5CojRKpro5OqeDAFm5sQ,1593 +pre_commit/error_handler.py,sha256=f-YwuV3GW1SEQJdftfVAqulONn3AyA9fHnni-jg1jvo,2621 +pre_commit/errors.py,sha256=XxCmTt7WTCv70fF2XA6yktoE3zjkpDE8LJrXM9sCuYU,78 +pre_commit/file_lock.py,sha256=oATNmSMegySipxClNzy2At8NeyVtYXC4RJLdb5mfx_I,2351 +pre_commit/git.py,sha256=YiOS9u-Cp5I1ojLRRkvtJPU9Kppb3zXRZc87YbGZSUY,8527 +pre_commit/hook.py,sha256=a66O4OI4uEDMS_sDna_oKnyN8zf1hS99j4yhmv0kdqI,1513 +pre_commit/lang_base.py,sha256=15qiLtDuhB6590GUTLHbo3JVpanuxscVmwodZFQxL4Y,5384 +pre_commit/languages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pre_commit/languages/conda.py,sha256=XKZVBtNs-PS929EMH303vM4iYdyONfcsQXfyKpewdm0,2421 +pre_commit/languages/coursier.py,sha256=jVK8aMxGdVN-gopf0JlTLc0viMrHYolSKvC_rSR5N0o,2508 +pre_commit/languages/dart.py,sha256=J8LMuAD7nQ6Q1Pn8HaDSWQmrri8jxHCBbSnngBnlBeQ,3118 +pre_commit/languages/docker.py,sha256=P-ZdEFhgWhZHzM1ug_jXwfWej7uffjYzQ4nYQRGBG2A,5412 +pre_commit/languages/docker_image.py,sha256=tzUiZjADKhpocwKoMDzAYwblV4Bqg45s4z0Jp-cpoK0,847 +pre_commit/languages/dotnet.py,sha256=e190Qjdreyerq52TjPt1yYNS9DMbh9pajt6JROt2bm0,3463 +pre_commit/languages/fail.py,sha256=52hRwZYy0s9ICuB-lT321z6QETnGcwXMEMN24_5q2p4,685 +pre_commit/languages/golang.py,sha256=nlrL3kSTGn1Ct7g43VHK1vltfiuVGD4fydMhuInBy4g,4636 +pre_commit/languages/haskell.py,sha256=y2wYjQair58TjIWl0p5CWuPUM3UNCnOp_AuZ2P6Z9Rw,1662 +pre_commit/languages/julia.py,sha256=XyuspCn33EAZNEu2znzO3CWpbh8Q1_BfTu3acvX3L_I,4432 +pre_commit/languages/lua.py,sha256=Y1JDnE0cQBoYgfkQ7uXbUTtUsoPD_2GLiZHos5ZOaaY,2535 +pre_commit/languages/node.py,sha256=zPfCk4PtBI27Wcg0fWM_4-2lbWaw96xyG-w6ZFKEww4,3853 +pre_commit/languages/perl.py,sha256=DatBJxV8gtvYaCMwUefrvURk3l5G7h_6OGuP2CEVQhM,1491 +pre_commit/languages/pygrep.py,sha256=JXTKuPPJyFLXx8wK7zwHx5ELZXRzBVITU6VYUuIb7U8,3816 +pre_commit/languages/python.py,sha256=0MjdMkEJZ_zE7kEhQajYz7Y94Vexx2dSvaJ-Q6fyxSs,7320 +pre_commit/languages/r.py,sha256=f5DCQxeheC0SOu6DOQBLjLbyq2X18Yg78D0h6Vws2k8,7901 +pre_commit/languages/ruby.py,sha256=VX1ha8QSSJ88WJtrB3ccSc3gEj8_Mio2QCI5PeV__EQ,4636 +pre_commit/languages/rust.py,sha256=XuEW01Crv6jfjNFGjHSHF2xRgtv33hstshtW_XzEyc4,5492 +pre_commit/languages/swift.py,sha256=80WKNNZaX6TqfvcZGIUimD_g8MUFBQSnZNTNjn_SlrY,1605 +pre_commit/languages/unsupported.py,sha256=cMeR-yXRE-dsPaybIYWjD1_ZBRa7eTdyUJnsyGL_tq4,300 +pre_commit/languages/unsupported_script.py,sha256=hHIqfZfoJReANiDI0mDVLhSfqk7QLrJKapcWnt2idAE,786 +pre_commit/logging_handler.py,sha256=Yl_rj7peBY23rswiddSlTnpgYMMD0x9Tw41EK8nb3G4,1019 +pre_commit/main.py,sha256=B0fBTgffpJ0ittBWbLkK2jc-G6MINtdfnDremmexDxM,15918 +pre_commit/meta_hooks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pre_commit/meta_hooks/check_hooks_apply.py,sha256=4e_D4eLrs45ZF_Ggoc0rqa9Rb6jh7dyUeQX4nItjE9k,1207 +pre_commit/meta_hooks/check_useless_excludes.py,sha256=yFUoTJeipbG9rNQ6gusdzYFrGppw2mewZ6N_6fY-qNQ,2553 +pre_commit/meta_hooks/identity.py,sha256=7RzSVW2tiVL4lEvft9uaa_ntuR7gPx4F5Ciqe_0X80Q,346 +pre_commit/output.py,sha256=2Z0iN6P7qCu-fJf-vuI4CvC11z9HFVOzKBRD3krtV7o,911 +pre_commit/parse_shebang.py,sha256=6_KS-obQ_iFlLa1r9SByMkwvWew5rMXMGYgXjB-qfjc,2481 +pre_commit/prefix.py,sha256=wj3T2eiHeOp7G6UbF8v1pfC1kDhhsUranKlEUfMW3uk,495 +pre_commit/repository.py,sha256=kRYFKOjOZQolAcO_SzSzSuiEAudrTgH4wo2M3cHMZiY,7608 +pre_commit/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pre_commit/resources/empty_template_.npmignore,sha256=zbyuFRBda3geYggTx5x-hodA1OnMU85vX8u8Ejh630s,2 +pre_commit/resources/empty_template_Cargo.toml,sha256=rjclCuspy_DoGro5K_Yx-cjKO88rvQU5qkkbAYIiatg,96 +pre_commit/resources/empty_template_LICENSE.renv,sha256=vJiU4spBSQJkgro6DzPpoTeFvYYGMdkKEhZQQh54scE,1052 +pre_commit/resources/empty_template_Makefile.PL,sha256=oNZjJpS1Ba38XJ4VmQArf7ZfaSFbD4vDFNZ8ERoAy2A,104 +pre_commit/resources/empty_template_activate.R,sha256=ydOQGgFscRyMU4bIqm1TzGQN8Dxfgc79eOXujP7uVjE,12037 +pre_commit/resources/empty_template_environment.yml,sha256=SBs7AN3M19I6EX_lrIDhFavgAaBRubazR6imzTbhBCI,302 +pre_commit/resources/empty_template_go.mod,sha256=35v54fbcB8Hs9TOPi6uHNiu9GTtuYs4M3ofWXpdG3j8,43 +pre_commit/resources/empty_template_main.go,sha256=VaYLuXFRsrS2gEYkR85g7DRRGxT6ENd0QMl7l3cQFWY,29 +pre_commit/resources/empty_template_main.rs,sha256=U25Qa7kJFMJDoSs5e5qZj4WuLL2boC39A6nhVcpcoPQ,13 +pre_commit/resources/empty_template_package.json,sha256=iBN43GUUn05DYKA7vpOcdU27H-nTsr6aVFCJVq4G3-E,73 +pre_commit/resources/empty_template_pre-commit-package-dev-1.rockspec,sha256=Jd8cePFPSzN0YO5T7vZLAVTX_KrXQiSNu5uFQXmETEU,212 +pre_commit/resources/empty_template_pre_commit_placeholder_package.gemspec,sha256=UCWGrtzSq_bUTvDgJuV94ZPQ6KHrGbFywWyppbR8ZIQ,195 +pre_commit/resources/empty_template_pubspec.yaml,sha256=8Y4tjESkd-QugVwZ-vCyaXj8G6FoTbrM7ie2-twEQAk,78 +pre_commit/resources/empty_template_renv.lock,sha256=Oa6eSaJs4lZn8A4qo1OkEvDC0H_g-2VrmzFuATmMhuM,351 +pre_commit/resources/empty_template_setup.py,sha256=JVpaGB6F5T2smTCWsqZkX6DstgxkPFjBL57hraqredk,108 +pre_commit/resources/hook-tmpl,sha256=OJybNgJvZYMlvJbUZkzzdGDlxBXVMFmiIb0DYCP3b_U,528 +pre_commit/resources/rbenv.tar.gz,sha256=46dZEjCK0iv69qmQfvcHDdNHxH9SW51SriWJfUvRU1Y,31297 +pre_commit/resources/ruby-build.tar.gz,sha256=NjIvrF3cF3xRDDrgt5yuIgBfhcT9TPn3qNQDuDXQSDk,93998 +pre_commit/resources/ruby-download.tar.gz,sha256=mU_7nLFc-7FEbf9OCNVtHGlKGNhe6uGZaLNKeV4qnCc,5269 +pre_commit/staged_files_only.py,sha256=oMWqz7-qAt5DcKLp0u4fCSAWyDBKwtxsSk1EURWKq5Y,4155 +pre_commit/store.py,sha256=3vgbkV8tHde6RBEcPqC6JxHU77XezCOjrrD4FkcLMmA,8470 +pre_commit/util.py,sha256=JGti6eiboOIIiYHflIiWoH9yHT4WP_gNNnAZ0v5zYw0,7047 +pre_commit/xargs.py,sha256=wE3sDuX040bFEnOBSjRCjflu0I1QnKSf2F2Ekp3qYdY,5550 +pre_commit/yaml.py,sha256=61sHsBF4ZGPugQUXGih8L6a5gTzgsD_X9wYibDlN560,561 +pre_commit/yaml_rewrite.py,sha256=ICKYxX8Ku54Ci9guVS2J1J2H6ErdqEMKHZm5PbR1WQk,1337 diff --git a/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/WHEEL b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d02d2a26499bc369f32d3c2b98955f80480633fa --- /dev/null +++ b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.5.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/entry_points.txt b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..9175762423717cfe6a84cd3f09d6deea5eb6c312 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +pre-commit = pre_commit.main:main diff --git a/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..493444d8a9fc3fabab05163ba88a146136068987 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pre_commit-4.5.1.dist-info/top_level.txt @@ -0,0 +1 @@ +pre_commit diff --git a/py311/lib/python3.11/site-packages/proglog/__init__.py b/py311/lib/python3.11/site-packages/proglog/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..50592124535b1da73076e3fb695d898982d7af64 --- /dev/null +++ b/py311/lib/python3.11/site-packages/proglog/__init__.py @@ -0,0 +1,16 @@ +"""geneblocks/__init__.py""" + +# __all__ = [] + +from .proglog import ( + ProgressLogger, + ProgressBarLogger, + TqdmProgressBarLogger, + notebook, + RqWorkerProgressLogger, + RqWorkerBarLogger, + MuteProgressBarLogger, + default_bar_logger, +) + +from .version import __version__ diff --git a/py311/lib/python3.11/site-packages/proglog/proglog.py b/py311/lib/python3.11/site-packages/proglog/proglog.py new file mode 100644 index 0000000000000000000000000000000000000000..e8f9abdb958d9613788fe668d2500f58dcd2facf --- /dev/null +++ b/py311/lib/python3.11/site-packages/proglog/proglog.py @@ -0,0 +1,429 @@ +"""Implements the generic progress logger class, and the ProgressBar class.""" + +from tqdm import tqdm, tqdm_notebook +from collections import OrderedDict +import time + +SETTINGS = {"notebook": False} + + +def notebook(turn="on"): + SETTINGS["notebook"] = True if (turn == "on") else False + + +def troncate_string(s, max_length=25): + return s if (len(s) < max_length) else (s[:max_length] + "...") + + +class ProgressLogger: + """Generic class for progress loggers. + + A progress logger contains a "state" dictionary. + + Parameters + ---------- + init_state : dict + Dictionary representing the initial state. + """ + + def __init__(self, init_state=None): + + self.state = {} + self.stored = {} + self.logs = [] + self.log_indent = 0 + if init_state is not None: + self.state.update(init_state) + + def log(self, message): + self.logs.append((" " * self.log_indent) + message) + + def dump_logs(self, filepath=None): + if filepath is not None: + with open(filepath, "a") as f: + f.write("\n".join(self.logs)) + else: + return "\n".join(self.logs) + + def callback(self, **kw): + """Execute something after the state has been updated by the given + state elements. + + This default callback does nothing, overwrite it by subclassing. + """ + pass + + def store(self, **kw): + """Store objects in the logger and trigger ``self.store_callback``. + + This works exactly like ``logger()``, but the later is meant for simple + data objects (text, numbers) that will be sent over the network or + written to a file. The ``store`` method expects rather large objects + which are not necessarily serializable, and will be used eg to draw + plots on the fly. + """ + self.stored.update(kw) + self.store_callback(**kw) + + def store_callback(self, **kw): + """Execute something after the store has been updated by the given + state elements. + + This default callback does nothing, overwrite it by subclassing + """ + pass + + def iter(self, **kw): + """Iterate through a list while updating the state. + + Examples + -------- + >>> for username in logger.iter(user=['tom', 'tim', 'lea']): + >>> # At every loop, logger.state['user'] is updated + >>> print(username) + """ + for field, iterable in kw.items(): + for it in iterable: + self(**{field: it}) + yield it + + def __call__(self, **kw): + self.state.update(kw) + self.callback(**kw) + + +class ProgressBarLogger(ProgressLogger): + """Generic class for progress loggers. + + A progress logger contains a "state" dictionary. + + Parameters + ---------- + init_state : dict + Initial state of the logger. + bars : None, list, tuple, or dict, optional + Either None (will be initialized with no bar) or a list/tuple of bar + names (e.g., ['main', 'sub']) which will be initialized with index -1 and + no total, or a dictionary (possibly ordered) of bars, of the form + `{bar_1: {title: 'bar1', index: 2, total: 23}, bar_2: {...}}`. + ignored_bars : None, list of str, or 'all_others', optional + Either None (newly met bars will be added) or a list of blacklisted bar + names, or 'all_others' to signify that all bar names not already in + `self.bars` will be ignored. + logged_bars + min_time_interval : int or float + Time in seconds between progress bar updates. + ignore_bars_under : int + """ + + bar_indent = 2 + + def __init__( + self, + init_state=None, + bars=None, + ignored_bars=None, + logged_bars="all", + min_time_interval=0, + ignore_bars_under=0, + ): + ProgressLogger.__init__(self, init_state) + if bars is None: + bars = OrderedDict() + elif isinstance(bars, (list, tuple)): + bars = OrderedDict( + [ + (b, dict(title=b, index=-1, total=None, message=None, indent=0)) + for b in bars + ] + ) + if isinstance(ignored_bars, (list, tuple)): + ignored_bars = set(ignored_bars) + self.ignored_bars = ignored_bars + self.logged_bars = logged_bars + self.state["bars"] = bars + self.min_time_interval = min_time_interval + self.ignore_bars_under = ignore_bars_under + + @property + def bars(self): + """Return ``self.state['bars'].``""" + return self.state["bars"] + + def bar_is_ignored(self, bar): + if self.ignored_bars is None: + return False + elif self.ignored_bars == "all_others": + return bar not in self.bars + else: + return bar in self.ignored_bars + + def bar_is_logged(self, bar): + if not self.logged_bars: + return False + elif self.logged_bars == "all": + return True + else: + return bar in self.logged_bars + + def iterable_is_too_short(self, iterable): + length = len(iterable) if hasattr(iterable, "__len__") else None + return (length is not None) and (length < self.ignore_bars_under) + + def iter_bar(self, bar_prefix="", **kw): + """Iterate through a list while updating a state bar. + + Parameters + ---------- + bar_prefix : str + Bar prefix. + + Examples + -------- + >>> for username in logger.iter_bar(user=['tom', 'tim', 'lea']): + >>> # At every loop, logger.state['bars']['user'] is updated + >>> # to {index: i, total: 3, title:'user'} + >>> print (username) + + """ + if "bar_message" in kw: + bar_message = kw.pop("bar_message") + else: + bar_message = None + bar, iterable = kw.popitem() + + if self.bar_is_ignored(bar) or self.iterable_is_too_short(iterable): + return iterable + bar = bar_prefix + bar + if hasattr(iterable, "__len__"): + self(**{bar + "__total": len(iterable)}) + + def new_iterable(): + last_time = time.time() + i = 0 # necessary in case the iterator is empty + for i, it in enumerate(iterable): + now_time = time.time() + if (i == 0) or (now_time - last_time > self.min_time_interval): + if bar_message is not None: + self(**{bar + "__message": bar_message(it)}) + self(**{bar + "__index": i}) + last_time = now_time + yield it + + if self.bars[bar]["index"] != i: + self(**{bar + "__index": i}) + self(**{bar + "__index": i + 1}) + + return new_iterable() + + def bars_callback(self, bar, attr, value, old_value=None): + """Execute a custom action after the progress bars are updated. + + Parameters + ---------- + bar + Name/ID of the bar to be modified. + + attr + Attribute of the bar attribute to be modified + + value + New value of the attribute + + old_value + Previous value of this bar's attribute. + + This default callback does nothing, overwrite it by subclassing. + """ + pass + + def __call__(self, **kw): + + items = sorted(kw.items(), key=lambda kv: not kv[0].endswith("total")) + + for key, value in items: + if "__" in key: + bar, attr = key.split("__") + if self.bar_is_ignored(bar): + continue + kw.pop(key) + if bar not in self.bars: + self.bars[bar] = dict(title=bar, index=-1, total=None, message=None) + old_value = self.bars[bar][attr] + + if self.bar_is_logged(bar): + new_bar = (attr == "index") and (value < old_value) + if (attr == "total") or (new_bar): + self.bars[bar]["indent"] = self.log_indent + else: + self.log_indent = self.bars[bar]["indent"] + self.log("[%s] %s: %s" % (bar, attr, value)) + self.log_indent += self.bar_indent + self.bars[bar][attr] = value + self.bars_callback(bar, attr, value, old_value) + self.state.update(kw) + self.callback(**kw) + + +class TqdmProgressBarLogger(ProgressBarLogger): + """Tqdm-powered progress bar for console or Notebooks. + + Parameters + ---------- + init_state : dict + Initial state of the logger. + bars : None, list, tuple, or dict, optional + Either None (will be initialized with no bar) or a list/tuple of bar + names (e.g., ['main', 'sub']) which will be initialized with index -1 and + no total, or a dictionary (possibly ordered) of bars, of the form + `{bar_1: {title: 'bar1', index: 2, total: 23}, bar_2: {...}}`. + leave_bars : bool, optional + Whether to leave the progress bars displayed upon completion. + ignored_bars : None, list of str, or 'all_others', optional + Either None (newly met bars will be added) or a list of blacklisted bar + names, or 'all_others' to signify that all bar names not already in + `self.bars` will be ignored. + notebook : bool, optional + True will make the bars look nice (HTML) in the Jupyter notebook. It is + advised to leave to 'default' as the default can be globally set from + inside a notebook with `import proglog; proglog.notebook_mode()`. + print_messages : bool + If True, every `logger(message='something')` will print a message in + the console or notebook. + min_time_interval : int or float + Time in seconds between progress bar updates. + ignore_bars_under : int + """ + + def __init__( + self, + init_state=None, + bars=None, + leave_bars=False, + ignored_bars=None, + logged_bars="all", + notebook="default", + print_messages=True, + min_time_interval=0, + ignore_bars_under=0, + ): + ProgressBarLogger.__init__( + self, + init_state=init_state, + bars=bars, + ignored_bars=ignored_bars, + logged_bars=logged_bars, + ignore_bars_under=ignore_bars_under, + min_time_interval=min_time_interval, + ) + self.leave_bars = leave_bars + self.tqdm_bars = OrderedDict([(bar, None) for bar in self.bars]) + if notebook == "default": + notebook = SETTINGS["notebook"] + self.notebook = notebook + self.print_messages = print_messages + self.tqdm = tqdm_notebook if self.notebook else tqdm + + def new_tqdm_bar(self, bar): + """Create a new tqdm bar, possibly replacing an existing one.""" + if (bar in self.tqdm_bars) and (self.tqdm_bars[bar] is not None): + self.close_tqdm_bar(bar) + infos = self.bars[bar] + self.tqdm_bars[bar] = self.tqdm( + total=infos["total"], + desc=infos["title"], + postfix=dict(now=troncate_string(str(infos["message"]))), + leave=self.leave_bars, + ) + + def close_tqdm_bar(self, bar): + """Close and erase the tqdm bar.""" + self.tqdm_bars[bar].close() + if not self.notebook: + self.tqdm_bars[bar] = None + + def bars_callback(self, bar, attr, value, old_value): + if (bar not in self.tqdm_bars) or (self.tqdm_bars[bar] is None): + self.new_tqdm_bar(bar) + if attr == "index": + if value >= old_value: + total = self.bars[bar]["total"] + if total and (value >= total): + self.close_tqdm_bar(bar) + else: + self.tqdm_bars[bar].update(value - old_value) + else: + self.new_tqdm_bar(bar) + self.tqdm_bars[bar].update(value + 1) + elif attr == "message": + self.tqdm_bars[bar].set_postfix(now=troncate_string(str(value))) + self.tqdm_bars[bar].update(0) + + def callback(self, **kw): + if self.print_messages and ("message" in kw) and kw["message"]: + if self.notebook: + print(kw["message"]) + else: + self.tqdm.write(kw["message"]) + + +class RqWorkerProgressLogger: + def __init__(self, job): + self.job = job + if "progress_data" not in self.job.meta: + self.job.meta["progress_data"] = {} + self.job.save() + + def callback(self, **kw): + self.job.meta["progress_data"] = self.state + self.job.save() + + +class RqWorkerBarLogger(RqWorkerProgressLogger, ProgressBarLogger): + + def __init__( + self, + job, + init_state=None, + bars=None, + ignored_bars=(), + logged_bars="all", + min_time_interval=0, + ): + RqWorkerProgressLogger.__init__(self, job) + ProgressBarLogger.__init__( + self, + init_state=init_state, + bars=bars, + ignored_bars=ignored_bars, + logged_bars=logged_bars, + min_time_interval=min_time_interval, + ) + + +class MuteProgressBarLogger(ProgressBarLogger): + + def bar_is_ignored(self, bar): + return True + + +def default_bar_logger( + logger, + bars=None, + ignored_bars=None, + logged_bars="all", + min_time_interval=0, + ignore_bars_under=0, +): + if logger == "bar": + return TqdmProgressBarLogger( + bars=bars, + ignored_bars=ignored_bars, + logged_bars=logged_bars, + min_time_interval=min_time_interval, + ignore_bars_under=ignore_bars_under, + ) + elif logger is None: + return MuteProgressBarLogger() + else: + return logger diff --git a/py311/lib/python3.11/site-packages/proglog/version.py b/py311/lib/python3.11/site-packages/proglog/version.py new file mode 100644 index 0000000000000000000000000000000000000000..74acd0efba6163dac383c668505aaf2f29fdd65d --- /dev/null +++ b/py311/lib/python3.11/site-packages/proglog/version.py @@ -0,0 +1 @@ +__version__ = "0.1.12" diff --git a/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/METADATA b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..0ca131f6dba7215a3e395fdb3875f1ec9bf39ded --- /dev/null +++ b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/METADATA @@ -0,0 +1,58 @@ +Metadata-Version: 2.4 +Name: proto-plus +Version: 1.27.0 +Summary: Beautiful, Pythonic protocol buffers +Author-email: Google LLC +License: Apache 2.0 +Project-URL: Documentation, https://googleapis.dev/python/proto-plus/latest/ +Project-URL: Repository, https://github.com/googleapis/proto-plus-python +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development :: Code Generators +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: protobuf<7.0.0,>=3.19.0 +Provides-Extra: testing +Requires-Dist: google-api-core>=1.31.5; extra == "testing" +Dynamic: license-file + +Proto Plus for Python +===================== + +|pypi| |release level| + + Beautiful, Pythonic protocol buffers. + +This is a wrapper around `protocol buffers`_. Protocol buffers is a +specification format for APIs, such as those inside Google. +This library provides protocol buffer message classes and objects that +largely behave like native Python types. + +.. _protocol buffers: https://developers.google.com/protocol-buffers/ + + +Documentation +------------- + +`API Documentation`_ is available on Read the Docs. + +.. _API Documentation: https://googleapis.dev/python/proto-plus/latest/ + +.. |pypi| image:: https://img.shields.io/pypi/v/proto-plus.svg + :target: https://pypi.org/project/proto-plus +.. |release level| image:: https://img.shields.io/badge/release%20level-ga-gold.svg?style=flat + :target: https://cloud.google.com/terms/launch-stages diff --git a/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/RECORD b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..55689828894dca5fb64381845a25a04f86c3ffe0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/RECORD @@ -0,0 +1,33 @@ +proto/__init__.py,sha256=UtrqR9GHCxPRf_W6wpLYuiMOy74XyYyF6LuafAOWe5A,1712 +proto/_file_info.py,sha256=4aR7FZynZ0oYUe586ta04M7_eQ8Xful--T-ACUtAF30,7914 +proto/_package_info.py,sha256=UdF-V5E193kvOt_IqUqeylJzdxY1B_RRYc7-aCMMl84,1906 +proto/datetime_helpers.py,sha256=foMj8XA_6a9ZghbwfTK4DplXmpcMmGHQU2fEmdg54x8,7371 +proto/enums.py,sha256=6cyHYqdDG7e-jQST67k8U1n-7sU8pGcb0IBvZ0KDFcM,5961 +proto/fields.py,sha256=qBoQ_Tl7ZRheD17PzmZR6DXkbET_nZYEGRpQUtMfixE,5372 +proto/marshal/__init__.py,sha256=ughdxBgTpZ_GtOWnS2Waz1yVEbCno-61m5eRUmYxbSo,630 +proto/marshal/collections/__init__.py,sha256=mX2bO-HTvp4vcWw1Due9WMf717-QP97ZhNeHvkXGJsw,755 +proto/marshal/collections/maps.py,sha256=PeBFLnGHyHdwRjuZWEyIvs2ZrWttYN_cNk8Z6HUguno,2921 +proto/marshal/collections/repeated.py,sha256=Yum54SAaqwUSCmtJ4-7SbAJjAKuwjTmHj4O7fV8I-QI,6934 +proto/marshal/compat.py,sha256=MLA_C3zXO8QUJA3VeMXBgySmaqroGqjN8Ta520SkZlw,2349 +proto/marshal/marshal.py,sha256=pH47NsNP-a1xnD-E4EumGLd18A3oK_CBmfGs-ZoeoGM,12030 +proto/marshal/rules/__init__.py,sha256=rdttj_i0e4IpRZYX9oZSTs5AnjAW4Zl__BoNHfUUHpU,575 +proto/marshal/rules/bytes.py,sha256=Af26mG5e1Hqn0ty5NxCxhq0m7CxaeYWTatOQ8hTN0jM,1593 +proto/marshal/rules/dates.py,sha256=XbVAF4OYifQAnVLrYiUWndMlMoaL-VYzav52Vtu143I,3135 +proto/marshal/rules/enums.py,sha256=9NZIQ214sgY3RcyR640TRWnKLH4rl36mRWGyU56EA-4,2213 +proto/marshal/rules/field_mask.py,sha256=_CG9kvla4-32pqHVYzoXJDPfC9yySJzEJCAKZQC-2Io,1182 +proto/marshal/rules/message.py,sha256=1NaW9-HvV8dtfWyWzvJZ5CNguHby7E7W8pkmJxN967U,2364 +proto/marshal/rules/stringy_numbers.py,sha256=sOsCol-dB7epbVCj_AZu1ZEYo5ualmg5zm4hk0X1klU,1787 +proto/marshal/rules/struct.py,sha256=kY2iGZO0c8IxojgF8OllfuDb63lmdpjH-5tBod5zLoQ,5226 +proto/marshal/rules/wrappers.py,sha256=FDVuwLxXCnhm77j0kZ-F1hVGY4PllmgQha_TH9vjE3c,2280 +proto/message.py,sha256=0PM6ULYveFZ0lPM7jgGdOsjI42mAx-9md92BDjfi434,38971 +proto/modules.py,sha256=1g3HAZW0JWBx7LCrlMPamUOMHxaaf-QFwbYa6zgiplg,1585 +proto/primitives.py,sha256=Fwu6KQdn1i6vVPPD3MxQXHKvbz7z5B42QlOwSTkcidI,1000 +proto/utils.py,sha256=5-Dqe2WOPJQ0gsT5MTiMiPTDa8q56VfD-EcqIJD4eD0,1651 +proto/version.py,sha256=b6L-N_UiDBuT-LpBng1NoOB8HemgkLHTUTNYeWBWANM,599 +proto_plus-1.27.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +proto_plus-1.27.0.dist-info/METADATA,sha256=WuE6hN8OpWwH0sUVDfMC07SmXDlB_edOk-DqnBHxUY4,2243 +proto_plus-1.27.0.dist-info/RECORD,, +proto_plus-1.27.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +proto_plus-1.27.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +proto_plus-1.27.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +proto_plus-1.27.0.dist-info/top_level.txt,sha256=HFg_NW9VxhDySzqGDmUxqUh6w8QZlVLh4ZDIxYksTCM,6 diff --git a/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/WHEEL b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e7fa31b6f3f78deb1022c1f7927f07d4d16da822 --- /dev/null +++ b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..2ff540d5764b76cf7bac64fc2bb9df6e9c1b398a --- /dev/null +++ b/py311/lib/python3.11/site-packages/proto_plus-1.27.0.dist-info/top_level.txt @@ -0,0 +1 @@ +proto diff --git a/py311/lib/python3.11/site-packages/pyasn1/__init__.py b/py311/lib/python3.11/site-packages/pyasn1/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7fa1d9e630154884d4a2d1a650c9351f4f5c2991 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1/__init__.py @@ -0,0 +1,2 @@ +# https://www.python.org/dev/peps/pep-0396/ +__version__ = '0.6.1' diff --git a/py311/lib/python3.11/site-packages/pyasn1/debug.py b/py311/lib/python3.11/site-packages/pyasn1/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..07194235466a40c96ed0bb81f4874793e7680cbf --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1/debug.py @@ -0,0 +1,146 @@ +# +# This file is part of pyasn1 software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: https://pyasn1.readthedocs.io/en/latest/license.html +# +import logging +import sys + +from pyasn1 import __version__ +from pyasn1 import error + +__all__ = ['Debug', 'setLogger', 'hexdump'] + +DEBUG_NONE = 0x0000 +DEBUG_ENCODER = 0x0001 +DEBUG_DECODER = 0x0002 +DEBUG_ALL = 0xffff + +FLAG_MAP = { + 'none': DEBUG_NONE, + 'encoder': DEBUG_ENCODER, + 'decoder': DEBUG_DECODER, + 'all': DEBUG_ALL +} + +LOGGEE_MAP = {} + + +class Printer(object): + # noinspection PyShadowingNames + def __init__(self, logger=None, handler=None, formatter=None): + if logger is None: + logger = logging.getLogger('pyasn1') + + logger.setLevel(logging.DEBUG) + + if handler is None: + handler = logging.StreamHandler() + + if formatter is None: + formatter = logging.Formatter('%(asctime)s %(name)s: %(message)s') + + handler.setFormatter(formatter) + handler.setLevel(logging.DEBUG) + logger.addHandler(handler) + + self.__logger = logger + + def __call__(self, msg): + self.__logger.debug(msg) + + def __str__(self): + return '' + + +class Debug(object): + defaultPrinter = Printer() + + def __init__(self, *flags, **options): + self._flags = DEBUG_NONE + + if 'loggerName' in options: + # route our logs to parent logger + self._printer = Printer( + logger=logging.getLogger(options['loggerName']), + handler=logging.NullHandler() + ) + + elif 'printer' in options: + self._printer = options.get('printer') + + else: + self._printer = self.defaultPrinter + + self._printer('running pyasn1 %s, debug flags %s' % (__version__, ', '.join(flags))) + + for flag in flags: + inverse = flag and flag[0] in ('!', '~') + if inverse: + flag = flag[1:] + try: + if inverse: + self._flags &= ~FLAG_MAP[flag] + else: + self._flags |= FLAG_MAP[flag] + except KeyError: + raise error.PyAsn1Error('bad debug flag %s' % flag) + + self._printer("debug category '%s' %s" % (flag, inverse and 'disabled' or 'enabled')) + + def __str__(self): + return 'logger %s, flags %x' % (self._printer, self._flags) + + def __call__(self, msg): + self._printer(msg) + + def __and__(self, flag): + return self._flags & flag + + def __rand__(self, flag): + return flag & self._flags + +_LOG = DEBUG_NONE + + +def setLogger(userLogger): + global _LOG + + if userLogger: + _LOG = userLogger + else: + _LOG = DEBUG_NONE + + # Update registered logging clients + for module, (name, flags) in LOGGEE_MAP.items(): + setattr(module, name, _LOG & flags and _LOG or DEBUG_NONE) + + +def registerLoggee(module, name='LOG', flags=DEBUG_NONE): + LOGGEE_MAP[sys.modules[module]] = name, flags + setLogger(_LOG) + return _LOG + + +def hexdump(octets): + return ' '.join( + ['%s%.2X' % (n % 16 == 0 and ('\n%.5d: ' % n) or '', x) + for n, x in zip(range(len(octets)), octets)] + ) + + +class Scope(object): + def __init__(self): + self._list = [] + + def __str__(self): return '.'.join(self._list) + + def push(self, token): + self._list.append(token) + + def pop(self): + return self._list.pop() + + +scope = Scope() diff --git a/py311/lib/python3.11/site-packages/pyasn1/error.py b/py311/lib/python3.11/site-packages/pyasn1/error.py new file mode 100644 index 0000000000000000000000000000000000000000..75c9a3f4cd09dd531f7eea8738d9ba4191389b78 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1/error.py @@ -0,0 +1,116 @@ +# +# This file is part of pyasn1 software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: https://pyasn1.readthedocs.io/en/latest/license.html +# + + +class PyAsn1Error(Exception): + """Base pyasn1 exception + + `PyAsn1Error` is the base exception class (based on + :class:`Exception`) that represents all possible ASN.1 related + errors. + + Parameters + ---------- + args: + Opaque positional parameters + + Keyword Args + ------------ + kwargs: + Opaque keyword parameters + + """ + def __init__(self, *args, **kwargs): + self._args = args + self._kwargs = kwargs + + @property + def context(self): + """Return exception context + + When exception object is created, the caller can supply some opaque + context for the upper layers to better understand the cause of the + exception. + + Returns + ------- + : :py:class:`dict` + Dict holding context specific data + """ + return self._kwargs.get('context', {}) + + +class ValueConstraintError(PyAsn1Error): + """ASN.1 type constraints violation exception + + The `ValueConstraintError` exception indicates an ASN.1 value + constraint violation. + + It might happen on value object instantiation (for scalar types) or on + serialization (for constructed types). + """ + + +class SubstrateUnderrunError(PyAsn1Error): + """ASN.1 data structure deserialization error + + The `SubstrateUnderrunError` exception indicates insufficient serialised + data on input of a de-serialization codec. + """ + + +class EndOfStreamError(SubstrateUnderrunError): + """ASN.1 data structure deserialization error + + The `EndOfStreamError` exception indicates the condition of the input + stream has been closed. + """ + + +class UnsupportedSubstrateError(PyAsn1Error): + """Unsupported substrate type to parse as ASN.1 data.""" + + +class PyAsn1UnicodeError(PyAsn1Error, UnicodeError): + """Unicode text processing error + + The `PyAsn1UnicodeError` exception is a base class for errors relating to + unicode text de/serialization. + + Apart from inheriting from :class:`PyAsn1Error`, it also inherits from + :class:`UnicodeError` to help the caller catching unicode-related errors. + """ + def __init__(self, message, unicode_error=None): + if isinstance(unicode_error, UnicodeError): + UnicodeError.__init__(self, *unicode_error.args) + PyAsn1Error.__init__(self, message) + + +class PyAsn1UnicodeDecodeError(PyAsn1UnicodeError, UnicodeDecodeError): + """Unicode text decoding error + + The `PyAsn1UnicodeDecodeError` exception represents a failure to + deserialize unicode text. + + Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits + from :class:`UnicodeDecodeError` to help the caller catching unicode-related + errors. + """ + + +class PyAsn1UnicodeEncodeError(PyAsn1UnicodeError, UnicodeEncodeError): + """Unicode text encoding error + + The `PyAsn1UnicodeEncodeError` exception represents a failure to + serialize unicode text. + + Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits + from :class:`UnicodeEncodeError` to help the caller catching + unicode-related errors. + """ + + diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/__init__.py b/py311/lib/python3.11/site-packages/pyasn1_modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5b90010d7a0834169ef5324cf1d07ce985813038 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/__init__.py @@ -0,0 +1,2 @@ +# http://www.python.org/dev/peps/pep-0396/ +__version__ = '0.4.2' diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/pem.py b/py311/lib/python3.11/site-packages/pyasn1_modules/pem.py new file mode 100644 index 0000000000000000000000000000000000000000..29235ab5cf9a3c8c340e978c307f1424d03cebd6 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/pem.py @@ -0,0 +1,58 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +import base64 + +stSpam, stHam, stDump = 0, 1, 2 + + +# The markers parameters is in form ('start1', 'stop1'), ('start2', 'stop2')... +# Return is (marker-index, substrate) +def readPemBlocksFromFile(fileObj, *markers): + startMarkers = dict(map(lambda x: (x[1], x[0]), + enumerate(map(lambda y: y[0], markers)))) + stopMarkers = dict(map(lambda x: (x[1], x[0]), + enumerate(map(lambda y: y[1], markers)))) + idx = -1 + substrate = '' + certLines = [] + state = stSpam + while True: + certLine = fileObj.readline() + if not certLine: + break + certLine = certLine.strip() + if state == stSpam: + if certLine in startMarkers: + certLines = [] + idx = startMarkers[certLine] + state = stHam + continue + if state == stHam: + if certLine in stopMarkers and stopMarkers[certLine] == idx: + state = stDump + else: + certLines.append(certLine) + if state == stDump: + substrate = ''.encode().join([base64.b64decode(x.encode()) for x in certLines]) + break + return idx, substrate + + +# Backward compatibility routine +def readPemFromFile(fileObj, + startMarker='-----BEGIN CERTIFICATE-----', + endMarker='-----END CERTIFICATE-----'): + idx, substrate = readPemBlocksFromFile(fileObj, (startMarker, endMarker)) + return substrate + + +def readBase64fromText(text): + return base64.b64decode(text.encode()) + + +def readBase64FromFile(fileObj): + return readBase64fromText(fileObj.read()) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1155.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1155.py new file mode 100644 index 0000000000000000000000000000000000000000..18702345d136e30da968206754a757ca6afddd8c --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1155.py @@ -0,0 +1,96 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# SNMPv1 message syntax +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc1155.txt +# +# Sample captures from: +# http://wiki.wireshark.org/SampleCaptures/ +# +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + + +class ObjectName(univ.ObjectIdentifier): + pass + + +class SimpleSyntax(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('number', univ.Integer()), + namedtype.NamedType('string', univ.OctetString()), + namedtype.NamedType('object', univ.ObjectIdentifier()), + namedtype.NamedType('empty', univ.Null()) + ) + + +class IpAddress(univ.OctetString): + tagSet = univ.OctetString.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0) + ) + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint( + 4, 4 + ) + + +class NetworkAddress(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('internet', IpAddress()) + ) + + +class Counter(univ.Integer): + tagSet = univ.Integer.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 1) + ) + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + 0, 4294967295 + ) + + +class Gauge(univ.Integer): + tagSet = univ.Integer.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2) + ) + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + 0, 4294967295 + ) + + +class TimeTicks(univ.Integer): + tagSet = univ.Integer.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 3) + ) + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + 0, 4294967295 + ) + + +class Opaque(univ.OctetString): + tagSet = univ.OctetString.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 4) + ) + + +class ApplicationSyntax(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('address', NetworkAddress()), + namedtype.NamedType('counter', Counter()), + namedtype.NamedType('gauge', Gauge()), + namedtype.NamedType('ticks', TimeTicks()), + namedtype.NamedType('arbitrary', Opaque()) + ) + + +class ObjectSyntax(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('simple', SimpleSyntax()), + namedtype.NamedType('application-wide', ApplicationSyntax()) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1157.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1157.py new file mode 100644 index 0000000000000000000000000000000000000000..df49e482db687471f80cef2fdd542f72719e7783 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1157.py @@ -0,0 +1,126 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# SNMPv1 message syntax +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc1157.txt +# +# Sample captures from: +# http://wiki.wireshark.org/SampleCaptures/ +# +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc1155 + + +class Version(univ.Integer): + namedValues = namedval.NamedValues( + ('version-1', 0) + ) + defaultValue = 0 + + +class Community(univ.OctetString): + pass + + +class RequestID(univ.Integer): + pass + + +class ErrorStatus(univ.Integer): + namedValues = namedval.NamedValues( + ('noError', 0), + ('tooBig', 1), + ('noSuchName', 2), + ('badValue', 3), + ('readOnly', 4), + ('genErr', 5) + ) + + +class ErrorIndex(univ.Integer): + pass + + +class VarBind(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('name', rfc1155.ObjectName()), + namedtype.NamedType('value', rfc1155.ObjectSyntax()) + ) + + +class VarBindList(univ.SequenceOf): + componentType = VarBind() + + +class _RequestBase(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('request-id', RequestID()), + namedtype.NamedType('error-status', ErrorStatus()), + namedtype.NamedType('error-index', ErrorIndex()), + namedtype.NamedType('variable-bindings', VarBindList()) + ) + + +class GetRequestPDU(_RequestBase): + tagSet = _RequestBase.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0) + ) + + +class GetNextRequestPDU(_RequestBase): + tagSet = _RequestBase.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1) + ) + + +class GetResponsePDU(_RequestBase): + tagSet = _RequestBase.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2) + ) + + +class SetRequestPDU(_RequestBase): + tagSet = _RequestBase.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3) + ) + + +class TrapPDU(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('enterprise', univ.ObjectIdentifier()), + namedtype.NamedType('agent-addr', rfc1155.NetworkAddress()), + namedtype.NamedType('generic-trap', univ.Integer().clone( + namedValues=namedval.NamedValues(('coldStart', 0), ('warmStart', 1), ('linkDown', 2), ('linkUp', 3), + ('authenticationFailure', 4), ('egpNeighborLoss', 5), + ('enterpriseSpecific', 6)))), + namedtype.NamedType('specific-trap', univ.Integer()), + namedtype.NamedType('time-stamp', rfc1155.TimeTicks()), + namedtype.NamedType('variable-bindings', VarBindList()) + ) + + +class Pdus(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('get-request', GetRequestPDU()), + namedtype.NamedType('get-next-request', GetNextRequestPDU()), + namedtype.NamedType('get-response', GetResponsePDU()), + namedtype.NamedType('set-request', SetRequestPDU()), + namedtype.NamedType('trap', TrapPDU()) + ) + + +class Message(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('community', Community()), + namedtype.NamedType('data', Pdus()) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1901.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1901.py new file mode 100644 index 0000000000000000000000000000000000000000..658dcb938169eed38a4a2da5d43e523487223308 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1901.py @@ -0,0 +1,22 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# SNMPv2c message syntax +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc1901.txt +# +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import univ + + +class Message(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('version-2c', 1)))), + namedtype.NamedType('community', univ.OctetString()), + namedtype.NamedType('data', univ.Any()) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1902.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1902.py new file mode 100644 index 0000000000000000000000000000000000000000..063998a9481ebdbdcf38c7af824719d9e544c2fe --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1902.py @@ -0,0 +1,129 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# SNMPv2c message syntax +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc1902.txt +# +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + + +class Integer(univ.Integer): + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + -2147483648, 2147483647 + ) + + +class Integer32(univ.Integer): + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + -2147483648, 2147483647 + ) + + +class OctetString(univ.OctetString): + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint( + 0, 65535 + ) + + +class IpAddress(univ.OctetString): + tagSet = univ.OctetString.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x00) + ) + subtypeSpec = univ.OctetString.subtypeSpec + constraint.ValueSizeConstraint( + 4, 4 + ) + + +class Counter32(univ.Integer): + tagSet = univ.Integer.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x01) + ) + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + 0, 4294967295 + ) + + +class Gauge32(univ.Integer): + tagSet = univ.Integer.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x02) + ) + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + 0, 4294967295 + ) + + +class Unsigned32(univ.Integer): + tagSet = univ.Integer.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x02) + ) + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + 0, 4294967295 + ) + + +class TimeTicks(univ.Integer): + tagSet = univ.Integer.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x03) + ) + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + 0, 4294967295 + ) + + +class Opaque(univ.OctetString): + tagSet = univ.OctetString.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x04) + ) + + +class Counter64(univ.Integer): + tagSet = univ.Integer.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x06) + ) + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + 0, 18446744073709551615 + ) + + +class Bits(univ.OctetString): + pass + + +class ObjectName(univ.ObjectIdentifier): + pass + + +class SimpleSyntax(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('integer-value', Integer()), + namedtype.NamedType('string-value', OctetString()), + namedtype.NamedType('objectID-value', univ.ObjectIdentifier()) + ) + + +class ApplicationSyntax(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('ipAddress-value', IpAddress()), + namedtype.NamedType('counter-value', Counter32()), + namedtype.NamedType('timeticks-value', TimeTicks()), + namedtype.NamedType('arbitrary-value', Opaque()), + namedtype.NamedType('big-counter-value', Counter64()), + # This conflicts with Counter32 + # namedtype.NamedType('unsigned-integer-value', Unsigned32()), + namedtype.NamedType('gauge32-value', Gauge32()) + ) # BITS misplaced? + + +class ObjectSyntax(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('simple', SimpleSyntax()), + namedtype.NamedType('application-wide', ApplicationSyntax()) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1905.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1905.py new file mode 100644 index 0000000000000000000000000000000000000000..435427b2bc2ed528ceafe561c54d2072ce53be59 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc1905.py @@ -0,0 +1,135 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# SNMPv2c PDU syntax +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc1905.txt +# +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc1902 + +max_bindings = rfc1902.Integer(2147483647) + + +class _BindValue(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('value', rfc1902.ObjectSyntax()), + namedtype.NamedType('unSpecified', univ.Null()), + namedtype.NamedType('noSuchObject', + univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('noSuchInstance', + univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('endOfMibView', + univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class VarBind(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('name', rfc1902.ObjectName()), + namedtype.NamedType('', _BindValue()) + ) + + +class VarBindList(univ.SequenceOf): + componentType = VarBind() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint( + 0, max_bindings + ) + + +class PDU(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('request-id', rfc1902.Integer32()), + namedtype.NamedType('error-status', univ.Integer( + namedValues=namedval.NamedValues(('noError', 0), ('tooBig', 1), ('noSuchName', 2), ('badValue', 3), + ('readOnly', 4), ('genErr', 5), ('noAccess', 6), ('wrongType', 7), + ('wrongLength', 8), ('wrongEncoding', 9), ('wrongValue', 10), + ('noCreation', 11), ('inconsistentValue', 12), ('resourceUnavailable', 13), + ('commitFailed', 14), ('undoFailed', 15), ('authorizationError', 16), + ('notWritable', 17), ('inconsistentName', 18)))), + namedtype.NamedType('error-index', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))), + namedtype.NamedType('variable-bindings', VarBindList()) + ) + + +class BulkPDU(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('request-id', rfc1902.Integer32()), + namedtype.NamedType('non-repeaters', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))), + namedtype.NamedType('max-repetitions', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))), + namedtype.NamedType('variable-bindings', VarBindList()) + ) + + +class GetRequestPDU(PDU): + tagSet = PDU.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0) + ) + + +class GetNextRequestPDU(PDU): + tagSet = PDU.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1) + ) + + +class ResponsePDU(PDU): + tagSet = PDU.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2) + ) + + +class SetRequestPDU(PDU): + tagSet = PDU.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3) + ) + + +class GetBulkRequestPDU(BulkPDU): + tagSet = PDU.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5) + ) + + +class InformRequestPDU(PDU): + tagSet = PDU.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6) + ) + + +class SNMPv2TrapPDU(PDU): + tagSet = PDU.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7) + ) + + +class ReportPDU(PDU): + tagSet = PDU.tagSet.tagImplicitly( + tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8) + ) + + +class PDUs(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('get-request', GetRequestPDU()), + namedtype.NamedType('get-next-request', GetNextRequestPDU()), + namedtype.NamedType('get-bulk-request', GetBulkRequestPDU()), + namedtype.NamedType('response', ResponsePDU()), + namedtype.NamedType('set-request', SetRequestPDU()), + namedtype.NamedType('inform-request', InformRequestPDU()), + namedtype.NamedType('snmpV2-trap', SNMPv2TrapPDU()), + namedtype.NamedType('report', ReportPDU()) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2251.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2251.py new file mode 100644 index 0000000000000000000000000000000000000000..094922cad0cd052e5b20d6e1078cc740b68de1eb --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2251.py @@ -0,0 +1,563 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# LDAP message syntax +# +# ASN.1 source from: +# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/ldap.asn +# +# Sample captures from: +# http://wiki.wireshark.org/SampleCaptures/ +# +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + +maxInt = univ.Integer(2147483647) + + +class LDAPString(univ.OctetString): + pass + + +class LDAPOID(univ.OctetString): + pass + + +class LDAPDN(LDAPString): + pass + + +class RelativeLDAPDN(LDAPString): + pass + + +class AttributeType(LDAPString): + pass + + +class AttributeDescription(LDAPString): + pass + + +class AttributeDescriptionList(univ.SequenceOf): + componentType = AttributeDescription() + + +class AttributeValue(univ.OctetString): + pass + + +class AssertionValue(univ.OctetString): + pass + + +class AttributeValueAssertion(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('attributeDesc', AttributeDescription()), + namedtype.NamedType('assertionValue', AssertionValue()) + ) + + +class Attribute(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeDescription()), + namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue())) + ) + + +class MatchingRuleId(LDAPString): + pass + + +class Control(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('controlType', LDAPOID()), + namedtype.DefaultedNamedType('criticality', univ.Boolean('False')), + namedtype.OptionalNamedType('controlValue', univ.OctetString()) + ) + + +class Controls(univ.SequenceOf): + componentType = Control() + + +class LDAPURL(LDAPString): + pass + + +class Referral(univ.SequenceOf): + componentType = LDAPURL() + + +class SaslCredentials(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('mechanism', LDAPString()), + namedtype.OptionalNamedType('credentials', univ.OctetString()) + ) + + +class AuthenticationChoice(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('simple', univ.OctetString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('reserved-1', univ.OctetString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('reserved-2', univ.OctetString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('sasl', + SaslCredentials().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) + ) + + +class BindRequest(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 0) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 127))), + namedtype.NamedType('name', LDAPDN()), + namedtype.NamedType('authentication', AuthenticationChoice()) + ) + + +class PartialAttributeList(univ.SequenceOf): + componentType = univ.Sequence( + componentType=namedtype.NamedTypes( + namedtype.NamedType('type', AttributeDescription()), + namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue())) + ) + ) + + +class SearchResultEntry(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 4) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('objectName', LDAPDN()), + namedtype.NamedType('attributes', PartialAttributeList()) + ) + + +class MatchingRuleAssertion(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('matchingRule', MatchingRuleId().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('type', AttributeDescription().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('matchValue', + AssertionValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.DefaultedNamedType('dnAttributes', univ.Boolean('False').subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))) + ) + + +class SubstringFilter(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeDescription()), + namedtype.NamedType('substrings', + univ.SequenceOf( + componentType=univ.Choice( + componentType=namedtype.NamedTypes( + namedtype.NamedType( + 'initial', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)) + ), + namedtype.NamedType( + 'any', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)) + ), + namedtype.NamedType( + 'final', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)) + ) + ) + ) + ) + ) + ) + + +# Ugly hack to handle recursive Filter reference (up to 3-levels deep). + +class Filter3(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.NamedType('substrings', SubstringFilter().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))), + namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))), + namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))), + namedtype.NamedType('present', AttributeDescription().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))), + namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))), + namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9))) + ) + + +class Filter2(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('and', univ.SetOf(componentType=Filter3()).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('or', univ.SetOf(componentType=Filter3()).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.NamedType('not', + Filter3().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.NamedType('substrings', SubstringFilter().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))), + namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))), + namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))), + namedtype.NamedType('present', AttributeDescription().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))), + namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))), + namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9))) + ) + + +class Filter(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('and', univ.SetOf(componentType=Filter2()).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('or', univ.SetOf(componentType=Filter2()).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.NamedType('not', + Filter2().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.NamedType('substrings', SubstringFilter().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))), + namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))), + namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))), + namedtype.NamedType('present', AttributeDescription().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))), + namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))), + namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9))) + ) + + +# End of Filter hack + +class SearchRequest(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 3) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('baseObject', LDAPDN()), + namedtype.NamedType('scope', univ.Enumerated( + namedValues=namedval.NamedValues(('baseObject', 0), ('singleLevel', 1), ('wholeSubtree', 2)))), + namedtype.NamedType('derefAliases', univ.Enumerated( + namedValues=namedval.NamedValues(('neverDerefAliases', 0), ('derefInSearching', 1), + ('derefFindingBaseObj', 2), ('derefAlways', 3)))), + namedtype.NamedType('sizeLimit', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))), + namedtype.NamedType('timeLimit', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))), + namedtype.NamedType('typesOnly', univ.Boolean()), + namedtype.NamedType('filter', Filter()), + namedtype.NamedType('attributes', AttributeDescriptionList()) + ) + + +class UnbindRequest(univ.Null): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2) + ) + + +class BindResponse(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('resultCode', univ.Enumerated( + namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2), + ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5), + ('compareTrue', 6), ('authMethodNotSupported', 7), + ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10), + ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12), + ('confidentialityRequired', 13), ('saslBindInProgress', 14), + ('noSuchAttribute', 16), ('undefinedAttributeType', 17), + ('inappropriateMatching', 18), ('constraintViolation', 19), + ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21), + ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34), + ('reserved-35', 35), ('aliasDereferencingProblem', 36), + ('inappropriateAuthentication', 48), ('invalidCredentials', 49), + ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52), + ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64), + ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66), + ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68), + ('objectClassModsProhibited', 69), ('reserved-70', 70), + ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81), + ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84), + ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87), + ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))), + namedtype.NamedType('matchedDN', LDAPDN()), + namedtype.NamedType('errorMessage', LDAPString()), + namedtype.OptionalNamedType('referral', Referral().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.OptionalNamedType('serverSaslCreds', univ.OctetString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))) + ) + + +class LDAPResult(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('resultCode', univ.Enumerated( + namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2), + ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5), + ('compareTrue', 6), ('authMethodNotSupported', 7), + ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10), + ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12), + ('confidentialityRequired', 13), ('saslBindInProgress', 14), + ('noSuchAttribute', 16), ('undefinedAttributeType', 17), + ('inappropriateMatching', 18), ('constraintViolation', 19), + ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21), + ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34), + ('reserved-35', 35), ('aliasDereferencingProblem', 36), + ('inappropriateAuthentication', 48), ('invalidCredentials', 49), + ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52), + ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64), + ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66), + ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68), + ('objectClassModsProhibited', 69), ('reserved-70', 70), + ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81), + ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84), + ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87), + ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))), + namedtype.NamedType('matchedDN', LDAPDN()), + namedtype.NamedType('errorMessage', LDAPString()), + namedtype.OptionalNamedType('referral', Referral().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))) + ) + + +class SearchResultReference(univ.SequenceOf): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 19) + ) + componentType = LDAPURL() + + +class SearchResultDone(LDAPResult): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 5) + ) + + +class AttributeTypeAndValues(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeDescription()), + namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue())) + ) + + +class ModifyRequest(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 6) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('object', LDAPDN()), + namedtype.NamedType('modification', + univ.SequenceOf( + componentType=univ.Sequence( + componentType=namedtype.NamedTypes( + namedtype.NamedType( + 'operation', univ.Enumerated(namedValues=namedval.NamedValues(('add', 0), ('delete', 1), ('replace', 2))) + ), + namedtype.NamedType('modification', AttributeTypeAndValues()))) + ) + ) + ) + + +class ModifyResponse(LDAPResult): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 7) + ) + + +class AttributeList(univ.SequenceOf): + componentType = univ.Sequence( + componentType=namedtype.NamedTypes( + namedtype.NamedType('type', AttributeDescription()), + namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue())) + ) + ) + + +class AddRequest(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 8) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('entry', LDAPDN()), + namedtype.NamedType('attributes', AttributeList()) + ) + + +class AddResponse(LDAPResult): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 9) + ) + + +class DelRequest(LDAPResult): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 10) + ) + + +class DelResponse(LDAPResult): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 11) + ) + + +class ModifyDNRequest(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 12) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('entry', LDAPDN()), + namedtype.NamedType('newrdn', RelativeLDAPDN()), + namedtype.NamedType('deleteoldrdn', univ.Boolean()), + namedtype.OptionalNamedType('newSuperior', + LDAPDN().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + + ) + + +class ModifyDNResponse(LDAPResult): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 13) + ) + + +class CompareRequest(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 14) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('entry', LDAPDN()), + namedtype.NamedType('ava', AttributeValueAssertion()) + ) + + +class CompareResponse(LDAPResult): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 15) + ) + + +class AbandonRequest(LDAPResult): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 16) + ) + + +class ExtendedRequest(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 23) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('requestName', + LDAPOID().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('requestValue', univ.OctetString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class ExtendedResponse(univ.Sequence): + tagSet = univ.Sequence.tagSet.tagImplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 24) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('resultCode', univ.Enumerated( + namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2), + ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5), + ('compareTrue', 6), ('authMethodNotSupported', 7), + ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10), + ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12), + ('confidentialityRequired', 13), ('saslBindInProgress', 14), + ('noSuchAttribute', 16), ('undefinedAttributeType', 17), + ('inappropriateMatching', 18), ('constraintViolation', 19), + ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21), + ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34), + ('reserved-35', 35), ('aliasDereferencingProblem', 36), + ('inappropriateAuthentication', 48), ('invalidCredentials', 49), + ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52), + ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64), + ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66), + ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68), + ('objectClassModsProhibited', 69), ('reserved-70', 70), + ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81), + ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84), + ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87), + ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))), + namedtype.NamedType('matchedDN', LDAPDN()), + namedtype.NamedType('errorMessage', LDAPString()), + namedtype.OptionalNamedType('referral', Referral().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + + namedtype.OptionalNamedType('responseName', LDAPOID().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 10))), + namedtype.OptionalNamedType('response', univ.OctetString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11))) + ) + + +class MessageID(univ.Integer): + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint( + 0, maxInt + ) + + +class LDAPMessage(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('messageID', MessageID()), + namedtype.NamedType( + 'protocolOp', univ.Choice( + componentType=namedtype.NamedTypes( + namedtype.NamedType('bindRequest', BindRequest()), + namedtype.NamedType('bindResponse', BindResponse()), + namedtype.NamedType('unbindRequest', UnbindRequest()), + namedtype.NamedType('searchRequest', SearchRequest()), + namedtype.NamedType('searchResEntry', SearchResultEntry()), + namedtype.NamedType('searchResDone', SearchResultDone()), + namedtype.NamedType('searchResRef', SearchResultReference()), + namedtype.NamedType('modifyRequest', ModifyRequest()), + namedtype.NamedType('modifyResponse', ModifyResponse()), + namedtype.NamedType('addRequest', AddRequest()), + namedtype.NamedType('addResponse', AddResponse()), + namedtype.NamedType('delRequest', DelRequest()), + namedtype.NamedType('delResponse', DelResponse()), + namedtype.NamedType('modDNRequest', ModifyDNRequest()), + namedtype.NamedType('modDNResponse', ModifyDNResponse()), + namedtype.NamedType('compareRequest', CompareRequest()), + namedtype.NamedType('compareResponse', CompareResponse()), + namedtype.NamedType('abandonRequest', AbandonRequest()), + namedtype.NamedType('extendedReq', ExtendedRequest()), + namedtype.NamedType('extendedResp', ExtendedResponse()) + ) + ) + ), + namedtype.OptionalNamedType('controls', Controls().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2314.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2314.py new file mode 100644 index 0000000000000000000000000000000000000000..b0edfe09170a6b688a532fc8fc5ae9d4cf4d2abb --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2314.py @@ -0,0 +1,48 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# PKCS#10 syntax +# +# ASN.1 source from: +# http://tools.ietf.org/html/rfc2314 +# +# Sample captures could be obtained with "openssl req" command +# +from pyasn1_modules.rfc2459 import * + + +class Attributes(univ.SetOf): + componentType = Attribute() + + +class Version(univ.Integer): + pass + + +class CertificationRequestInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('subject', Name()), + namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()), + namedtype.NamedType('attributes', + Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) + ) + + +class Signature(univ.BitString): + pass + + +class SignatureAlgorithmIdentifier(AlgorithmIdentifier): + pass + + +class CertificationRequest(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('certificationRequestInfo', CertificationRequestInfo()), + namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()), + namedtype.NamedType('signature', Signature()) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2315.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2315.py new file mode 100644 index 0000000000000000000000000000000000000000..1069fc27dd7ca67f49bb34cf52296adeb3ea396c --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2315.py @@ -0,0 +1,294 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# PKCS#7 message syntax +# +# ASN.1 source from: +# https://opensource.apple.com/source/Security/Security-55179.1/libsecurity_asn1/asn1/pkcs7.asn.auto.html +# +# Sample captures from: +# openssl crl2pkcs7 -nocrl -certfile cert1.cer -out outfile.p7b +# +from pyasn1_modules.rfc2459 import * + + +class Attribute(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeType()), + namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue())) + ) + + +class AttributeValueAssertion(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('attributeType', AttributeType()), + namedtype.NamedType('attributeValue', AttributeValue(), + openType=opentype.OpenType('type', certificateAttributesMap)) + ) + + +pkcs_7 = univ.ObjectIdentifier('1.2.840.113549.1.7') +data = univ.ObjectIdentifier('1.2.840.113549.1.7.1') +signedData = univ.ObjectIdentifier('1.2.840.113549.1.7.2') +envelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.3') +signedAndEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.4') +digestedData = univ.ObjectIdentifier('1.2.840.113549.1.7.5') +encryptedData = univ.ObjectIdentifier('1.2.840.113549.1.7.6') + + +class ContentType(univ.ObjectIdentifier): + pass + + +class ContentEncryptionAlgorithmIdentifier(AlgorithmIdentifier): + pass + + +class EncryptedContent(univ.OctetString): + pass + + +contentTypeMap = {} + + +class EncryptedContentInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('contentType', ContentType()), + namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()), + namedtype.OptionalNamedType( + 'encryptedContent', EncryptedContent().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0) + ), + openType=opentype.OpenType('contentType', contentTypeMap) + ) + ) + + +class Version(univ.Integer): # overrides x509.Version + pass + + +class EncryptedData(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()) + ) + + +class DigestAlgorithmIdentifier(AlgorithmIdentifier): + pass + + +class DigestAlgorithmIdentifiers(univ.SetOf): + componentType = DigestAlgorithmIdentifier() + + +class Digest(univ.OctetString): + pass + + +class ContentInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('contentType', ContentType()), + namedtype.OptionalNamedType( + 'content', + univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)), + openType=opentype.OpenType('contentType', contentTypeMap) + ) + ) + + +class DigestedData(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()), + namedtype.NamedType('contentInfo', ContentInfo()), + namedtype.NamedType('digest', Digest()) + ) + + +class IssuerAndSerialNumber(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('issuer', Name()), + namedtype.NamedType('serialNumber', CertificateSerialNumber()) + ) + + +class KeyEncryptionAlgorithmIdentifier(AlgorithmIdentifier): + pass + + +class EncryptedKey(univ.OctetString): + pass + + +class RecipientInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('encryptedKey', EncryptedKey()) + ) + + +class RecipientInfos(univ.SetOf): + componentType = RecipientInfo() + + +class Attributes(univ.SetOf): + componentType = Attribute() + + +class ExtendedCertificateInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('certificate', Certificate()), + namedtype.NamedType('attributes', Attributes()) + ) + + +class SignatureAlgorithmIdentifier(AlgorithmIdentifier): + pass + + +class Signature(univ.BitString): + pass + + +class ExtendedCertificate(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()), + namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()), + namedtype.NamedType('signature', Signature()) + ) + + +class ExtendedCertificateOrCertificate(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('certificate', Certificate()), + namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) + ) + + +class ExtendedCertificatesAndCertificates(univ.SetOf): + componentType = ExtendedCertificateOrCertificate() + + +class SerialNumber(univ.Integer): + pass + + +class CRLEntry(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('userCertificate', SerialNumber()), + namedtype.NamedType('revocationDate', useful.UTCTime()) + ) + + +class TBSCertificateRevocationList(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('signature', AlgorithmIdentifier()), + namedtype.NamedType('issuer', Name()), + namedtype.NamedType('lastUpdate', useful.UTCTime()), + namedtype.NamedType('nextUpdate', useful.UTCTime()), + namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=CRLEntry())) + ) + + +class CertificateRevocationList(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsCertificateRevocationList', TBSCertificateRevocationList()), + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) + ) + + +class CertificateRevocationLists(univ.SetOf): + componentType = CertificateRevocationList() + + +class DigestEncryptionAlgorithmIdentifier(AlgorithmIdentifier): + pass + + +class EncryptedDigest(univ.OctetString): + pass + + +class SignerInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()), + namedtype.OptionalNamedType('authenticatedAttributes', Attributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('digestEncryptionAlgorithm', DigestEncryptionAlgorithmIdentifier()), + namedtype.NamedType('encryptedDigest', EncryptedDigest()), + namedtype.OptionalNamedType('unauthenticatedAttributes', Attributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +class SignerInfos(univ.SetOf): + componentType = SignerInfo() + + +class SignedAndEnvelopedData(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('recipientInfos', RecipientInfos()), + namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()), + namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()), + namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.NamedType('signerInfos', SignerInfos()) + ) + + +class EnvelopedData(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('recipientInfos', RecipientInfos()), + namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()) + ) + + +class DigestInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()), + namedtype.NamedType('digest', Digest()) + ) + + +class SignedData(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.OptionalNamedType('digestAlgorithms', DigestAlgorithmIdentifiers()), + namedtype.NamedType('contentInfo', ContentInfo()), + namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('signerInfos', SignerInfos()) + ) + + +class Data(univ.OctetString): + pass + +_contentTypeMapUpdate = { + data: Data(), + signedData: SignedData(), + envelopedData: EnvelopedData(), + signedAndEnvelopedData: SignedAndEnvelopedData(), + digestedData: DigestedData(), + encryptedData: EncryptedData() +} + +contentTypeMap.update(_contentTypeMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2437.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2437.py new file mode 100644 index 0000000000000000000000000000000000000000..88641cf07d4edd3639a7fce4f8085c921c40f9c0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2437.py @@ -0,0 +1,69 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# PKCS#1 syntax +# +# ASN.1 source from: +# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2.asn +# +# Sample captures could be obtained with "openssl genrsa" command +# +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules.rfc2459 import AlgorithmIdentifier + +pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1') +rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1') +md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2') +md4WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.3') +md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4') +sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5') +rsaOAEPEncryptionSET = univ.ObjectIdentifier('1.2.840.113549.1.1.6') +id_RSAES_OAEP = univ.ObjectIdentifier('1.2.840.113549.1.1.7') +id_mgf1 = univ.ObjectIdentifier('1.2.840.113549.1.1.8') +id_pSpecified = univ.ObjectIdentifier('1.2.840.113549.1.1.9') +id_sha1 = univ.ObjectIdentifier('1.3.14.3.2.26') + +MAX = float('inf') + + +class Version(univ.Integer): + pass + + +class RSAPrivateKey(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('modulus', univ.Integer()), + namedtype.NamedType('publicExponent', univ.Integer()), + namedtype.NamedType('privateExponent', univ.Integer()), + namedtype.NamedType('prime1', univ.Integer()), + namedtype.NamedType('prime2', univ.Integer()), + namedtype.NamedType('exponent1', univ.Integer()), + namedtype.NamedType('exponent2', univ.Integer()), + namedtype.NamedType('coefficient', univ.Integer()) + ) + + +class RSAPublicKey(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('modulus', univ.Integer()), + namedtype.NamedType('publicExponent', univ.Integer()) + ) + + +# XXX defaults not set +class RSAES_OAEP_params(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('hashFunc', AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('maskGenFunc', AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.NamedType('pSourceFunc', AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2459.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2459.py new file mode 100644 index 0000000000000000000000000000000000000000..57f783e45159f9886758dd73d4892f63c2fc844f --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2459.py @@ -0,0 +1,1339 @@ +# +# This file is part of pyasn1-modules software. +# +# Updated by Russ Housley to resolve the TODO regarding the Certificate +# Policies Certificate Extension. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# X.509 message syntax +# +# ASN.1 source from: +# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/x509.asn +# http://www.ietf.org/rfc/rfc2459.txt +# +# Sample captures from: +# http://wiki.wireshark.org/SampleCaptures/ +# +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +MAX = float('inf') + +# +# PKIX1Explicit88 +# + +# Upper Bounds +ub_name = univ.Integer(32768) +ub_common_name = univ.Integer(64) +ub_locality_name = univ.Integer(128) +ub_state_name = univ.Integer(128) +ub_organization_name = univ.Integer(64) +ub_organizational_unit_name = univ.Integer(64) +ub_title = univ.Integer(64) +ub_match = univ.Integer(128) +ub_emailaddress_length = univ.Integer(128) +ub_common_name_length = univ.Integer(64) +ub_country_name_alpha_length = univ.Integer(2) +ub_country_name_numeric_length = univ.Integer(3) +ub_domain_defined_attributes = univ.Integer(4) +ub_domain_defined_attribute_type_length = univ.Integer(8) +ub_domain_defined_attribute_value_length = univ.Integer(128) +ub_domain_name_length = univ.Integer(16) +ub_extension_attributes = univ.Integer(256) +ub_e163_4_number_length = univ.Integer(15) +ub_e163_4_sub_address_length = univ.Integer(40) +ub_generation_qualifier_length = univ.Integer(3) +ub_given_name_length = univ.Integer(16) +ub_initials_length = univ.Integer(5) +ub_integer_options = univ.Integer(256) +ub_numeric_user_id_length = univ.Integer(32) +ub_organization_name_length = univ.Integer(64) +ub_organizational_unit_name_length = univ.Integer(32) +ub_organizational_units = univ.Integer(4) +ub_pds_name_length = univ.Integer(16) +ub_pds_parameter_length = univ.Integer(30) +ub_pds_physical_address_lines = univ.Integer(6) +ub_postal_code_length = univ.Integer(16) +ub_surname_length = univ.Integer(40) +ub_terminal_id_length = univ.Integer(24) +ub_unformatted_address_length = univ.Integer(180) +ub_x121_address_length = univ.Integer(16) + + +class UniversalString(char.UniversalString): + pass + + +class BMPString(char.BMPString): + pass + + +class UTF8String(char.UTF8String): + pass + + +id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7') +id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1') +id_qt = univ.ObjectIdentifier('1.3.6.1.5.5.7.2') +id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3') +id_ad = univ.ObjectIdentifier('1.3.6.1.5.5.7.48') + +id_qt_cps = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.1') +id_qt_unotice = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.2') + +id_ad_ocsp = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.1') +id_ad_caIssuers = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.2') + + + + +id_at = univ.ObjectIdentifier('2.5.4') +id_at_name = univ.ObjectIdentifier('2.5.4.41') +# preserve misspelled variable for compatibility +id_at_sutname = id_at_surname = univ.ObjectIdentifier('2.5.4.4') +id_at_givenName = univ.ObjectIdentifier('2.5.4.42') +id_at_initials = univ.ObjectIdentifier('2.5.4.43') +id_at_generationQualifier = univ.ObjectIdentifier('2.5.4.44') + + +class X520name(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))) + ) + + +id_at_commonName = univ.ObjectIdentifier('2.5.4.3') + + +class X520CommonName(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))) + ) + + +id_at_localityName = univ.ObjectIdentifier('2.5.4.7') + + +class X520LocalityName(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))) + ) + + +id_at_stateOrProvinceName = univ.ObjectIdentifier('2.5.4.8') + + +class X520StateOrProvinceName(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))) + ) + + +id_at_organizationName = univ.ObjectIdentifier('2.5.4.10') + + +class X520OrganizationName(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('utf8String', char.UTF8String().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('bmpString', char.BMPString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))) + ) + + +id_at_organizationalUnitName = univ.ObjectIdentifier('2.5.4.11') + + +class X520OrganizationalUnitName(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('utf8String', char.UTF8String().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('bmpString', char.BMPString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))) + ) + + +id_at_title = univ.ObjectIdentifier('2.5.4.12') + + +class X520Title(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))) + ) + + +id_at_dnQualifier = univ.ObjectIdentifier('2.5.4.46') + + +class X520dnQualifier(char.PrintableString): + pass + + +id_at_countryName = univ.ObjectIdentifier('2.5.4.6') + + +class X520countryName(char.PrintableString): + subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(2, 2) + + +pkcs_9 = univ.ObjectIdentifier('1.2.840.113549.1.9') + +emailAddress = univ.ObjectIdentifier('1.2.840.113549.1.9.1') + + +class Pkcs9email(char.IA5String): + subtypeSpec = char.IA5String.subtypeSpec + constraint.ValueSizeConstraint(1, ub_emailaddress_length) + + +# ---- + +class DSAPrivateKey(univ.Sequence): + """PKIX compliant DSA private key structure""" + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 0)))), + namedtype.NamedType('p', univ.Integer()), + namedtype.NamedType('q', univ.Integer()), + namedtype.NamedType('g', univ.Integer()), + namedtype.NamedType('public', univ.Integer()), + namedtype.NamedType('private', univ.Integer()) + ) + + +# ---- + + +class DirectoryString(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) + # hm, this should not be here!? XXX + ) + + +# certificate and CRL specific structures begin here + +class AlgorithmIdentifier(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithm', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('parameters', univ.Any()) + ) + + + +# Algorithm OIDs and parameter structures + +pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1') +rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1') +md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2') +md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4') +sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5') +id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3') + + +class Dss_Sig_Value(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('r', univ.Integer()), + namedtype.NamedType('s', univ.Integer()) + ) + + +dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1') + + +class ValidationParms(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('seed', univ.BitString()), + namedtype.NamedType('pgenCounter', univ.Integer()) + ) + + +class DomainParameters(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('p', univ.Integer()), + namedtype.NamedType('g', univ.Integer()), + namedtype.NamedType('q', univ.Integer()), + namedtype.NamedType('j', univ.Integer()), + namedtype.OptionalNamedType('validationParms', ValidationParms()) + ) + + +id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1') + + +class Dss_Parms(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('p', univ.Integer()), + namedtype.NamedType('q', univ.Integer()), + namedtype.NamedType('g', univ.Integer()) + ) + + +# x400 address syntax starts here + +teletex_domain_defined_attributes = univ.Integer(6) + + +class TeletexDomainDefinedAttribute(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))), + namedtype.NamedType('value', char.TeletexString()) + ) + + +class TeletexDomainDefinedAttributes(univ.SequenceOf): + componentType = TeletexDomainDefinedAttribute() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes) + + +terminal_type = univ.Integer(23) + + +class TerminalType(univ.Integer): + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, ub_integer_options) + namedValues = namedval.NamedValues( + ('telex', 3), + ('teletelex', 4), + ('g3-facsimile', 5), + ('g4-facsimile', 6), + ('ia5-terminal', 7), + ('videotex', 8) + ) + + +class PresentationAddress(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3), + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + ) + + +extended_network_address = univ.Integer(22) + + +class E163_4_address(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('number', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('sub-address', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class ExtendedNetworkAddress(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('e163-4-address', E163_4_address()), + namedtype.NamedType('psap-address', PresentationAddress().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +class PDSParameter(univ.Set): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))), + namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))) + ) + + +local_postal_attributes = univ.Integer(21) + + +class LocalPostalAttributes(PDSParameter): + pass + + +class UniquePostalName(PDSParameter): + pass + + +unique_postal_name = univ.Integer(20) + +poste_restante_address = univ.Integer(19) + + +class PosteRestanteAddress(PDSParameter): + pass + + +post_office_box_address = univ.Integer(18) + + +class PostOfficeBoxAddress(PDSParameter): + pass + + +street_address = univ.Integer(17) + + +class StreetAddress(PDSParameter): + pass + + +class UnformattedPostalAddress(univ.Set): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_physical_address_lines)))), + namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length))) + ) + + +physical_delivery_office_name = univ.Integer(10) + + +class PhysicalDeliveryOfficeName(PDSParameter): + pass + + +physical_delivery_office_number = univ.Integer(11) + + +class PhysicalDeliveryOfficeNumber(PDSParameter): + pass + + +extension_OR_address_components = univ.Integer(12) + + +class ExtensionORAddressComponents(PDSParameter): + pass + + +physical_delivery_personal_name = univ.Integer(13) + + +class PhysicalDeliveryPersonalName(PDSParameter): + pass + + +physical_delivery_organization_name = univ.Integer(14) + + +class PhysicalDeliveryOrganizationName(PDSParameter): + pass + + +extension_physical_delivery_address_components = univ.Integer(15) + + +class ExtensionPhysicalDeliveryAddressComponents(PDSParameter): + pass + + +unformatted_postal_address = univ.Integer(16) + +postal_code = univ.Integer(9) + + +class PostalCode(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('numeric-code', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))), + namedtype.NamedType('printable-code', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))) + ) + + +class PhysicalDeliveryCountryName(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('x121-dcc-code', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, + ub_country_name_numeric_length))), + namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length))) + ) + + +class PDSName(char.PrintableString): + subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_pds_name_length) + + +physical_delivery_country_name = univ.Integer(8) + + +class TeletexOrganizationalUnitName(char.TeletexString): + subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length) + + +pds_name = univ.Integer(7) + +teletex_organizational_unit_names = univ.Integer(5) + + +class TeletexOrganizationalUnitNames(univ.SequenceOf): + componentType = TeletexOrganizationalUnitName() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units) + + +teletex_personal_name = univ.Integer(4) + + +class TeletexPersonalName(univ.Set): + componentType = namedtype.NamedTypes( + namedtype.NamedType('surname', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('given-name', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('initials', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) + ) + + +teletex_organization_name = univ.Integer(3) + + +class TeletexOrganizationName(char.TeletexString): + subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length) + + +teletex_common_name = univ.Integer(2) + + +class TeletexCommonName(char.TeletexString): + subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length) + + +class CommonName(char.PrintableString): + subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length) + + +common_name = univ.Integer(1) + + +class ExtensionAttribute(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('extension-attribute-type', univ.Integer().subtype( + subtypeSpec=constraint.ValueSizeConstraint(0, ub_extension_attributes), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('extension-attribute-value', + univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class ExtensionAttributes(univ.SetOf): + componentType = ExtensionAttribute() + sizeSpec = univ.SetOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_extension_attributes) + + +class BuiltInDomainDefinedAttribute(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))), + namedtype.NamedType('value', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length))) + ) + + +class BuiltInDomainDefinedAttributes(univ.SequenceOf): + componentType = BuiltInDomainDefinedAttribute() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes) + + +class OrganizationalUnitName(char.PrintableString): + subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length) + + +class OrganizationalUnitNames(univ.SequenceOf): + componentType = OrganizationalUnitName() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units) + + +class PersonalName(univ.Set): + componentType = namedtype.NamedTypes( + namedtype.NamedType('surname', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('given-name', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('initials', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) + ) + + +class NumericUserIdentifier(char.NumericString): + subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_numeric_user_id_length) + + +class OrganizationName(char.PrintableString): + subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length) + + +class PrivateDomainName(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('numeric', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))), + namedtype.NamedType('printable', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))) + ) + + +class TerminalIdentifier(char.PrintableString): + subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_terminal_id_length) + + +class X121Address(char.NumericString): + subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_x121_address_length) + + +class NetworkAddress(X121Address): + pass + + +class AdministrationDomainName(univ.Choice): + tagSet = univ.Choice.tagSet.tagExplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('numeric', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))), + namedtype.NamedType('printable', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))) + ) + + +class CountryName(univ.Choice): + tagSet = univ.Choice.tagSet.tagExplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1) + ) + componentType = namedtype.NamedTypes( + namedtype.NamedType('x121-dcc-code', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, + ub_country_name_numeric_length))), + namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length))) + ) + + +class BuiltInStandardAttributes(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('country-name', CountryName()), + namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()), + namedtype.OptionalNamedType('network-address', NetworkAddress().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('organization-name', OrganizationName().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))), + namedtype.OptionalNamedType('personal-name', PersonalName().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))), + namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))) + ) + + +class ORAddress(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()), + namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()), + namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes()) + ) + + +# +# PKIX1Implicit88 +# + +id_ce_invalidityDate = univ.ObjectIdentifier('2.5.29.24') + + +class InvalidityDate(useful.GeneralizedTime): + pass + + +id_holdinstruction_none = univ.ObjectIdentifier('2.2.840.10040.2.1') +id_holdinstruction_callissuer = univ.ObjectIdentifier('2.2.840.10040.2.2') +id_holdinstruction_reject = univ.ObjectIdentifier('2.2.840.10040.2.3') + +holdInstruction = univ.ObjectIdentifier('2.2.840.10040.2') + +id_ce_holdInstructionCode = univ.ObjectIdentifier('2.5.29.23') + + +class HoldInstructionCode(univ.ObjectIdentifier): + pass + + +id_ce_cRLReasons = univ.ObjectIdentifier('2.5.29.21') + + +class CRLReason(univ.Enumerated): + namedValues = namedval.NamedValues( + ('unspecified', 0), + ('keyCompromise', 1), + ('cACompromise', 2), + ('affiliationChanged', 3), + ('superseded', 4), + ('cessationOfOperation', 5), + ('certificateHold', 6), + ('removeFromCRL', 8) + ) + + +id_ce_cRLNumber = univ.ObjectIdentifier('2.5.29.20') + + +class CRLNumber(univ.Integer): + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX) + + +class BaseCRLNumber(CRLNumber): + pass + + +id_kp_serverAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.1') +id_kp_clientAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.2') +id_kp_codeSigning = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.3') +id_kp_emailProtection = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.4') +id_kp_ipsecEndSystem = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.5') +id_kp_ipsecTunnel = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.6') +id_kp_ipsecUser = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.7') +id_kp_timeStamping = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.8') +id_pe_authorityInfoAccess = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.1') +id_ce_extKeyUsage = univ.ObjectIdentifier('2.5.29.37') + + +class KeyPurposeId(univ.ObjectIdentifier): + pass + + +class ExtKeyUsageSyntax(univ.SequenceOf): + componentType = KeyPurposeId() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +class ReasonFlags(univ.BitString): + namedValues = namedval.NamedValues( + ('unused', 0), + ('keyCompromise', 1), + ('cACompromise', 2), + ('affiliationChanged', 3), + ('superseded', 4), + ('cessationOfOperation', 5), + ('certificateHold', 6) + ) + + +class SkipCerts(univ.Integer): + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX) + + +id_ce_policyConstraints = univ.ObjectIdentifier('2.5.29.36') + + +class PolicyConstraints(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('requireExplicitPolicy', SkipCerts().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('inhibitPolicyMapping', SkipCerts().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +id_ce_basicConstraints = univ.ObjectIdentifier('2.5.29.19') + + +class BasicConstraints(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('cA', univ.Boolean(False)), + namedtype.OptionalNamedType('pathLenConstraint', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))) + ) + + +id_ce_subjectDirectoryAttributes = univ.ObjectIdentifier('2.5.29.9') + + +class EDIPartyName(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('partyName', + DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + + +id_ce_deltaCRLIndicator = univ.ObjectIdentifier('2.5.29.27') + + + +class BaseDistance(univ.Integer): + subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(0, MAX) + + +id_ce_cRLDistributionPoints = univ.ObjectIdentifier('2.5.29.31') + + +id_ce_issuingDistributionPoint = univ.ObjectIdentifier('2.5.29.28') + + + + +id_ce_nameConstraints = univ.ObjectIdentifier('2.5.29.30') + + +class DisplayText(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('visibleString', + char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))), + namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))), + namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))) + ) + + +class NoticeReference(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('organization', DisplayText()), + namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer())) + ) + + +class UserNotice(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('noticeRef', NoticeReference()), + namedtype.OptionalNamedType('explicitText', DisplayText()) + ) + + +class CPSuri(char.IA5String): + pass + + +class PolicyQualifierId(univ.ObjectIdentifier): + subtypeSpec = univ.ObjectIdentifier.subtypeSpec + constraint.SingleValueConstraint(id_qt_cps, id_qt_unotice) + + +class CertPolicyId(univ.ObjectIdentifier): + pass + + +class PolicyQualifierInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('policyQualifierId', PolicyQualifierId()), + namedtype.NamedType('qualifier', univ.Any()) + ) + + +id_ce_certificatePolicies = univ.ObjectIdentifier('2.5.29.32') + + +class PolicyInformation(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('policyIdentifier', CertPolicyId()), + namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) + ) + + +class CertificatePolicies(univ.SequenceOf): + componentType = PolicyInformation() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +id_ce_policyMappings = univ.ObjectIdentifier('2.5.29.33') + + +class PolicyMapping(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerDomainPolicy', CertPolicyId()), + namedtype.NamedType('subjectDomainPolicy', CertPolicyId()) + ) + + +class PolicyMappings(univ.SequenceOf): + componentType = PolicyMapping() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +id_ce_privateKeyUsagePeriod = univ.ObjectIdentifier('2.5.29.16') + + +class PrivateKeyUsagePeriod(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +id_ce_keyUsage = univ.ObjectIdentifier('2.5.29.15') + + +class KeyUsage(univ.BitString): + namedValues = namedval.NamedValues( + ('digitalSignature', 0), + ('nonRepudiation', 1), + ('keyEncipherment', 2), + ('dataEncipherment', 3), + ('keyAgreement', 4), + ('keyCertSign', 5), + ('cRLSign', 6), + ('encipherOnly', 7), + ('decipherOnly', 8) + ) + + +id_ce = univ.ObjectIdentifier('2.5.29') + +id_ce_authorityKeyIdentifier = univ.ObjectIdentifier('2.5.29.35') + + +class KeyIdentifier(univ.OctetString): + pass + + +id_ce_subjectKeyIdentifier = univ.ObjectIdentifier('2.5.29.14') + + +class SubjectKeyIdentifier(KeyIdentifier): + pass + + +id_ce_certificateIssuer = univ.ObjectIdentifier('2.5.29.29') + + +id_ce_subjectAltName = univ.ObjectIdentifier('2.5.29.17') + + +id_ce_issuerAltName = univ.ObjectIdentifier('2.5.29.18') + + +class AttributeValue(univ.Any): + pass + + +class AttributeType(univ.ObjectIdentifier): + pass + +certificateAttributesMap = {} + + +class AttributeTypeAndValue(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeType()), + namedtype.NamedType('value', AttributeValue(), + openType=opentype.OpenType('type', certificateAttributesMap)) + ) + + +class Attribute(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeType()), + namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue())) + ) + + +class SubjectDirectoryAttributes(univ.SequenceOf): + componentType = Attribute() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +class RelativeDistinguishedName(univ.SetOf): + componentType = AttributeTypeAndValue() + + +class RDNSequence(univ.SequenceOf): + componentType = RelativeDistinguishedName() + + +class Name(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('', RDNSequence()) + ) + +class CertificateSerialNumber(univ.Integer): + pass + + +class AnotherName(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type-id', univ.ObjectIdentifier()), + namedtype.NamedType('value', + univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +class GeneralName(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('otherName', + AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('rfc822Name', + char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('dNSName', + char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('x400Address', + ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.NamedType('directoryName', + Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))), + namedtype.NamedType('ediPartyName', + EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))), + namedtype.NamedType('uniformResourceIdentifier', + char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))), + namedtype.NamedType('iPAddress', univ.OctetString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))), + namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))) + ) + + +class GeneralNames(univ.SequenceOf): + componentType = GeneralName() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +class AccessDescription(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('accessMethod', univ.ObjectIdentifier()), + namedtype.NamedType('accessLocation', GeneralName()) + ) + + +class AuthorityInfoAccessSyntax(univ.SequenceOf): + componentType = AccessDescription() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +class AuthorityKeyIdentifier(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class DistributionPointName(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('fullName', GeneralNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +class DistributionPoint(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('reasons', ReasonFlags().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) + ) + + +class CRLDistPointsSyntax(univ.SequenceOf): + componentType = DistributionPoint() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +class IssuingDistributionPoint(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('onlyContainsUserCerts', univ.Boolean(False).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('onlyContainsCACerts', univ.Boolean(False).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.NamedType('indirectCRL', univ.Boolean(False).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))) + ) + + +class GeneralSubtree(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('base', GeneralName()), + namedtype.DefaultedNamedType('minimum', BaseDistance(0).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('maximum', BaseDistance().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +class GeneralSubtrees(univ.SequenceOf): + componentType = GeneralSubtree() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +class NameConstraints(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +class CertificateIssuer(GeneralNames): + pass + + +class SubjectAltName(GeneralNames): + pass + + +class IssuerAltName(GeneralNames): + pass + + +certificateExtensionsMap = {} + + +class Extension(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('extnID', univ.ObjectIdentifier()), + namedtype.DefaultedNamedType('critical', univ.Boolean('False')), + namedtype.NamedType('extnValue', univ.OctetString(), + openType=opentype.OpenType('extnID', certificateExtensionsMap)) + ) + + +class Extensions(univ.SequenceOf): + componentType = Extension() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +class SubjectPublicKeyInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithm', AlgorithmIdentifier()), + namedtype.NamedType('subjectPublicKey', univ.BitString()) + ) + + +class UniqueIdentifier(univ.BitString): + pass + + +class Time(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('utcTime', useful.UTCTime()), + namedtype.NamedType('generalTime', useful.GeneralizedTime()) + ) + + +class Validity(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('notBefore', Time()), + namedtype.NamedType('notAfter', Time()) + ) + + +class Version(univ.Integer): + namedValues = namedval.NamedValues( + ('v1', 0), ('v2', 1), ('v3', 2) + ) + + +class TBSCertificate(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', Version('v1').subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('serialNumber', CertificateSerialNumber()), + namedtype.NamedType('signature', AlgorithmIdentifier()), + namedtype.NamedType('issuer', Name()), + namedtype.NamedType('validity', Validity()), + namedtype.NamedType('subject', Name()), + namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()), + namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('extensions', Extensions().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) + ) + + +class Certificate(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsCertificate', TBSCertificate()), + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signatureValue', univ.BitString()) + ) + +# CRL structures + +class RevokedCertificate(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('userCertificate', CertificateSerialNumber()), + namedtype.NamedType('revocationDate', Time()), + namedtype.OptionalNamedType('crlEntryExtensions', Extensions()) + ) + + +class TBSCertList(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('version', Version()), + namedtype.NamedType('signature', AlgorithmIdentifier()), + namedtype.NamedType('issuer', Name()), + namedtype.NamedType('thisUpdate', Time()), + namedtype.OptionalNamedType('nextUpdate', Time()), + namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=RevokedCertificate())), + namedtype.OptionalNamedType('crlExtensions', Extensions().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) + ) + + +class CertificateList(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsCertList', TBSCertList()), + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) + ) + +# map of AttributeType -> AttributeValue + +_certificateAttributesMapUpdate = { + id_at_name: X520name(), + id_at_surname: X520name(), + id_at_givenName: X520name(), + id_at_initials: X520name(), + id_at_generationQualifier: X520name(), + id_at_commonName: X520CommonName(), + id_at_localityName: X520LocalityName(), + id_at_stateOrProvinceName: X520StateOrProvinceName(), + id_at_organizationName: X520OrganizationName(), + id_at_organizationalUnitName: X520OrganizationalUnitName(), + id_at_title: X520Title(), + id_at_dnQualifier: X520dnQualifier(), + id_at_countryName: X520countryName(), + emailAddress: Pkcs9email(), +} + +certificateAttributesMap.update(_certificateAttributesMapUpdate) + + +# map of Certificate Extension OIDs to Extensions + +_certificateExtensionsMapUpdate = { + id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(), + id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(), + id_ce_keyUsage: KeyUsage(), + id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(), + id_ce_certificatePolicies: CertificatePolicies(), + id_ce_policyMappings: PolicyMappings(), + id_ce_subjectAltName: SubjectAltName(), + id_ce_issuerAltName: IssuerAltName(), + id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(), + id_ce_basicConstraints: BasicConstraints(), + id_ce_nameConstraints: NameConstraints(), + id_ce_policyConstraints: PolicyConstraints(), + id_ce_extKeyUsage: ExtKeyUsageSyntax(), + id_ce_cRLDistributionPoints: CRLDistPointsSyntax(), + id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(), + id_ce_cRLNumber: univ.Integer(), + id_ce_deltaCRLIndicator: BaseCRLNumber(), + id_ce_issuingDistributionPoint: IssuingDistributionPoint(), + id_ce_cRLReasons: CRLReason(), + id_ce_holdInstructionCode: univ.ObjectIdentifier(), + id_ce_invalidityDate: useful.GeneralizedTime(), + id_ce_certificateIssuer: GeneralNames(), +} + +certificateExtensionsMap.update(_certificateExtensionsMapUpdate) + diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2511.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2511.py new file mode 100644 index 0000000000000000000000000000000000000000..8935cdabe33251cc5d6e1ebc51578845143194d5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2511.py @@ -0,0 +1,258 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# X.509 certificate Request Message Format (CRMF) syntax +# +# ASN.1 source from: +# http://tools.ietf.org/html/rfc2511 +# +# Sample captures could be obtained with OpenSSL +# +from pyasn1_modules import rfc2315 +from pyasn1_modules.rfc2459 import * + +MAX = float('inf') + +id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7') +id_pkip = univ.ObjectIdentifier('1.3.6.1.5.5.7.5') +id_regCtrl = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1') +id_regCtrl_regToken = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.1') +id_regCtrl_authenticator = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.2') +id_regCtrl_pkiPublicationInfo = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.3') +id_regCtrl_pkiArchiveOptions = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.4') +id_regCtrl_oldCertID = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.5') +id_regCtrl_protocolEncrKey = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.6') +id_regInfo = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2') +id_regInfo_utf8Pairs = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2.1') +id_regInfo_certReq = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2.2') + + +# This should be in PKIX Certificate Extensions module + +class GeneralName(univ.OctetString): + pass + + +# end of PKIX Certificate Extensions module + +class UTF8Pairs(char.UTF8String): + pass + + +class ProtocolEncrKey(SubjectPublicKeyInfo): + pass + + +class CertId(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('issuer', GeneralName()), + namedtype.NamedType('serialNumber', univ.Integer()) + ) + + +class OldCertId(CertId): + pass + + +class KeyGenParameters(univ.OctetString): + pass + + +class EncryptedValue(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('intendedAlg', AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('symmAlg', AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('encSymmKey', univ.BitString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('keyAlg', AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.OptionalNamedType('valueHint', univ.OctetString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))), + namedtype.NamedType('encValue', univ.BitString()) + ) + + +class EncryptedKey(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('encryptedValue', EncryptedValue()), + namedtype.NamedType('envelopedData', rfc2315.EnvelopedData().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) + ) + + +class PKIArchiveOptions(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('encryptedPrivKey', EncryptedKey().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('keyGenParameters', KeyGenParameters().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('archiveRemGenPrivKey', + univ.Boolean().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class SinglePubInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('pubMethod', univ.Integer( + namedValues=namedval.NamedValues(('dontCare', 0), ('x500', 1), ('web', 2), ('ldap', 3)))), + namedtype.OptionalNamedType('pubLocation', GeneralName()) + ) + + +class PKIPublicationInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('action', + univ.Integer(namedValues=namedval.NamedValues(('dontPublish', 0), ('pleasePublish', 1)))), + namedtype.OptionalNamedType('pubInfos', univ.SequenceOf(componentType=SinglePubInfo()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX))) + ) + + +class Authenticator(char.UTF8String): + pass + + +class RegToken(char.UTF8String): + pass + + +class SubsequentMessage(univ.Integer): + namedValues = namedval.NamedValues( + ('encrCert', 0), + ('challengeResp', 1) + ) + + +class POPOPrivKey(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('thisMessage', + univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('subsequentMessage', SubsequentMessage().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('dhMAC', + univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class PBMParameter(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('salt', univ.OctetString()), + namedtype.NamedType('owf', AlgorithmIdentifier()), + namedtype.NamedType('iterationCount', univ.Integer()), + namedtype.NamedType('mac', AlgorithmIdentifier()) + ) + + +class PKMACValue(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('algId', AlgorithmIdentifier()), + namedtype.NamedType('value', univ.BitString()) + ) + + +class POPOSigningKeyInput(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType( + 'authInfo', univ.Choice( + componentType=namedtype.NamedTypes( + namedtype.NamedType( + 'sender', GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)) + ), + namedtype.NamedType('publicKeyMAC', PKMACValue()) + ) + ) + ), + namedtype.NamedType('publicKey', SubjectPublicKeyInfo()) + ) + + +class POPOSigningKey(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('poposkInput', POPOSigningKeyInput().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('algorithmIdentifier', AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) + ) + + +class ProofOfPossession(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('raVerified', + univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('signature', POPOSigningKey().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.NamedType('keyEncipherment', POPOPrivKey().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.NamedType('keyAgreement', POPOPrivKey().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))) + ) + + +class Controls(univ.SequenceOf): + componentType = AttributeTypeAndValue() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +class OptionalValidity(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('notBefore', + Time().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('notAfter', + Time().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class CertTemplate(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('version', Version().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('serialNumber', univ.Integer().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('signingAlg', AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('issuer', Name().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.OptionalNamedType('validity', OptionalValidity().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))), + namedtype.OptionalNamedType('subject', Name().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))), + namedtype.OptionalNamedType('publicKey', SubjectPublicKeyInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))), + namedtype.OptionalNamedType('issuerUID', UniqueIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))), + namedtype.OptionalNamedType('subjectUID', UniqueIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))), + namedtype.OptionalNamedType('extensions', Extensions().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9))) + ) + + +class CertRequest(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('certReqId', univ.Integer()), + namedtype.NamedType('certTemplate', CertTemplate()), + namedtype.OptionalNamedType('controls', Controls()) + ) + + +class CertReq(CertRequest): + pass + + +class CertReqMsg(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('certReq', CertRequest()), + namedtype.OptionalNamedType('pop', ProofOfPossession()), + namedtype.OptionalNamedType('regInfo', univ.SequenceOf(componentType=AttributeTypeAndValue()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX))) + ) + + +class CertReqMessages(univ.SequenceOf): + componentType = CertReqMsg() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2560.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2560.py new file mode 100644 index 0000000000000000000000000000000000000000..017ac0b66e638e1dcff4a038699f426e9ce8ac84 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2560.py @@ -0,0 +1,225 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# OCSP request/response syntax +# +# Derived from a minimal OCSP library (RFC2560) code written by +# Bud P. Bruegger +# Copyright: Ancitel, S.p.a, Rome, Italy +# License: BSD +# + +# +# current limitations: +# * request and response works only for a single certificate +# * only some values are parsed out of the response +# * the request does't set a nonce nor signature +# * there is no signature validation of the response +# * dates are left as strings in GeneralizedTime format -- datetime.datetime +# would be nicer +# +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc2459 + + +# Start of OCSP module definitions + +# This should be in directory Authentication Framework (X.509) module + +class CRLReason(univ.Enumerated): + namedValues = namedval.NamedValues( + ('unspecified', 0), + ('keyCompromise', 1), + ('cACompromise', 2), + ('affiliationChanged', 3), + ('superseded', 4), + ('cessationOfOperation', 5), + ('certificateHold', 6), + ('removeFromCRL', 8), + ('privilegeWithdrawn', 9), + ('aACompromise', 10) + ) + + +# end of directory Authentication Framework (X.509) module + +# This should be in PKIX Certificate Extensions module + +class GeneralName(univ.OctetString): + pass + + +# end of PKIX Certificate Extensions module + +id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9)) +id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1)) +id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1)) +id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2)) +id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3)) +id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4)) +id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5)) +id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6)) +id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7)) + + +class AcceptableResponses(univ.SequenceOf): + componentType = univ.ObjectIdentifier() + + +class ArchiveCutoff(useful.GeneralizedTime): + pass + + +class UnknownInfo(univ.Null): + pass + + +class RevokedInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('revocationTime', useful.GeneralizedTime()), + namedtype.OptionalNamedType('revocationReason', CRLReason().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +class CertID(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()), + namedtype.NamedType('issuerNameHash', univ.OctetString()), + namedtype.NamedType('issuerKeyHash', univ.OctetString()), + namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber()) + ) + + +class CertStatus(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('good', + univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('revoked', + RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('unknown', + UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class SingleResponse(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('certID', CertID()), + namedtype.NamedType('certStatus', CertStatus()), + namedtype.NamedType('thisUpdate', useful.GeneralizedTime()), + namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class KeyHash(univ.OctetString): + pass + + +class ResponderID(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('byName', + rfc2459.Name().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('byKey', + KeyHash().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class Version(univ.Integer): + namedValues = namedval.NamedValues(('v1', 0)) + + +class ResponseData(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', Version('v1').subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('responderID', ResponderID()), + namedtype.NamedType('producedAt', useful.GeneralizedTime()), + namedtype.NamedType('responses', univ.SequenceOf(componentType=SingleResponse())), + namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class BasicOCSPResponse(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsResponseData', ResponseData()), + namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()), + namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +class ResponseBytes(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('responseType', univ.ObjectIdentifier()), + namedtype.NamedType('response', univ.OctetString()) + ) + + +class OCSPResponseStatus(univ.Enumerated): + namedValues = namedval.NamedValues( + ('successful', 0), + ('malformedRequest', 1), + ('internalError', 2), + ('tryLater', 3), + ('undefinedStatus', 4), # should never occur + ('sigRequired', 5), + ('unauthorized', 6) + ) + + +class OCSPResponse(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('responseStatus', OCSPResponseStatus()), + namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +class Request(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('reqCert', CertID()), + namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +class Signature(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()), + namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +class TBSRequest(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', Version('v1').subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('requestorName', GeneralName().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('requestList', univ.SequenceOf(componentType=Request())), + namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class OCSPRequest(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsRequest', TBSRequest()), + namedtype.OptionalNamedType('optionalSignature', Signature().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2631.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2631.py new file mode 100644 index 0000000000000000000000000000000000000000..44e537101c433bf35b87b2b2947f33f58fb58f98 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2631.py @@ -0,0 +1,37 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Diffie-Hellman Key Agreement +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc2631.txt +# https://www.rfc-editor.org/errata/eid5897 +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + + +class KeySpecificInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithm', univ.ObjectIdentifier()), + namedtype.NamedType('counter', univ.OctetString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(4, 4))) + ) + + +class OtherInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('keyInfo', KeySpecificInfo()), + namedtype.OptionalNamedType('partyAInfo', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('suppPubInfo', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2634.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2634.py new file mode 100644 index 0000000000000000000000000000000000000000..2099a4b206ef1323fa0fbd8b7d3d5c9b83d61dac --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2634.py @@ -0,0 +1,336 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add a map for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Enhanced Security Services for S/MIME +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc2634.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedval +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc5280 + +MAX = float('inf') + +ContentType = rfc5652.ContentType + +IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber + +SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier + +PolicyInformation = rfc5280.PolicyInformation + +GeneralNames = rfc5280.GeneralNames + +CertificateSerialNumber = rfc5280.CertificateSerialNumber + + +# Signing Certificate Attribute +# Warning: It is better to use SigningCertificateV2 from RFC 5035 + +id_aa_signingCertificate = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.12') + +class Hash(univ.OctetString): + pass # SHA-1 hash of entire certificate; RFC 5035 supports other hash algorithms + + +class IssuerSerial(univ.Sequence): + pass + +IssuerSerial.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuer', GeneralNames()), + namedtype.NamedType('serialNumber', CertificateSerialNumber()) +) + + +class ESSCertID(univ.Sequence): + pass + +ESSCertID.componentType = namedtype.NamedTypes( + namedtype.NamedType('certHash', Hash()), + namedtype.OptionalNamedType('issuerSerial', IssuerSerial()) +) + + +class SigningCertificate(univ.Sequence): + pass + +SigningCertificate.componentType = namedtype.NamedTypes( + namedtype.NamedType('certs', univ.SequenceOf( + componentType=ESSCertID())), + namedtype.OptionalNamedType('policies', univ.SequenceOf( + componentType=PolicyInformation())) +) + + +# Mail List Expansion History Attribute + +id_aa_mlExpandHistory = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.3') + +ub_ml_expansion_history = univ.Integer(64) + + +class EntityIdentifier(univ.Choice): + pass + +EntityIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()) +) + + +class MLReceiptPolicy(univ.Choice): + pass + +MLReceiptPolicy.componentType = namedtype.NamedTypes( + namedtype.NamedType('none', univ.Null().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('insteadOf', univ.SequenceOf( + componentType=GeneralNames()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('inAdditionTo', univ.SequenceOf( + componentType=GeneralNames()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +class MLData(univ.Sequence): + pass + +MLData.componentType = namedtype.NamedTypes( + namedtype.NamedType('mailListIdentifier', EntityIdentifier()), + namedtype.NamedType('expansionTime', useful.GeneralizedTime()), + namedtype.OptionalNamedType('mlReceiptPolicy', MLReceiptPolicy()) +) + +class MLExpansionHistory(univ.SequenceOf): + pass + +MLExpansionHistory.componentType = MLData() +MLExpansionHistory.sizeSpec = constraint.ValueSizeConstraint(1, ub_ml_expansion_history) + + +# ESS Security Label Attribute + +id_aa_securityLabel = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.2') + +ub_privacy_mark_length = univ.Integer(128) + +ub_security_categories = univ.Integer(64) + +ub_integer_options = univ.Integer(256) + + +class ESSPrivacyMark(univ.Choice): + pass + +ESSPrivacyMark.componentType = namedtype.NamedTypes( + namedtype.NamedType('pString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_privacy_mark_length))), + namedtype.NamedType('utf8String', char.UTF8String().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) +) + + +class SecurityClassification(univ.Integer): + pass + +SecurityClassification.subtypeSpec=constraint.ValueRangeConstraint(0, ub_integer_options) + +SecurityClassification.namedValues = namedval.NamedValues( + ('unmarked', 0), + ('unclassified', 1), + ('restricted', 2), + ('confidential', 3), + ('secret', 4), + ('top-secret', 5) +) + + +class SecurityPolicyIdentifier(univ.ObjectIdentifier): + pass + + +class SecurityCategory(univ.Sequence): + pass + +SecurityCategory.componentType = namedtype.NamedTypes( + namedtype.NamedType('type', univ.ObjectIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class SecurityCategories(univ.SetOf): + pass + +SecurityCategories.componentType = SecurityCategory() +SecurityCategories.sizeSpec = constraint.ValueSizeConstraint(1, ub_security_categories) + + +class ESSSecurityLabel(univ.Set): + pass + +ESSSecurityLabel.componentType = namedtype.NamedTypes( + namedtype.NamedType('security-policy-identifier', SecurityPolicyIdentifier()), + namedtype.OptionalNamedType('security-classification', SecurityClassification()), + namedtype.OptionalNamedType('privacy-mark', ESSPrivacyMark()), + namedtype.OptionalNamedType('security-categories', SecurityCategories()) +) + + +# Equivalent Labels Attribute + +id_aa_equivalentLabels = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.9') + +class EquivalentLabels(univ.SequenceOf): + pass + +EquivalentLabels.componentType = ESSSecurityLabel() + + +# Content Identifier Attribute + +id_aa_contentIdentifier = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.7') + +class ContentIdentifier(univ.OctetString): + pass + + +# Content Reference Attribute + +id_aa_contentReference = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.10') + +class ContentReference(univ.Sequence): + pass + +ContentReference.componentType = namedtype.NamedTypes( + namedtype.NamedType('contentType', ContentType()), + namedtype.NamedType('signedContentIdentifier', ContentIdentifier()), + namedtype.NamedType('originatorSignatureValue', univ.OctetString()) +) + + +# Message Signature Digest Attribute + +id_aa_msgSigDigest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.5') + +class MsgSigDigest(univ.OctetString): + pass + + +# Content Hints Attribute + +id_aa_contentHint = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.4') + +class ContentHints(univ.Sequence): + pass + +ContentHints.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('contentDescription', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('contentType', ContentType()) +) + + +# Receipt Request Attribute + +class AllOrFirstTier(univ.Integer): + pass + +AllOrFirstTier.namedValues = namedval.NamedValues( + ('allReceipts', 0), + ('firstTierRecipients', 1) +) + + +class ReceiptsFrom(univ.Choice): + pass + +ReceiptsFrom.componentType = namedtype.NamedTypes( + namedtype.NamedType('allOrFirstTier', AllOrFirstTier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('receiptList', univ.SequenceOf( + componentType=GeneralNames()).subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +id_aa_receiptRequest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.1') + +ub_receiptsTo = univ.Integer(16) + +class ReceiptRequest(univ.Sequence): + pass + +ReceiptRequest.componentType = namedtype.NamedTypes( + namedtype.NamedType('signedContentIdentifier', ContentIdentifier()), + namedtype.NamedType('receiptsFrom', ReceiptsFrom()), + namedtype.NamedType('receiptsTo', univ.SequenceOf(componentType=GeneralNames()).subtype(sizeSpec=constraint.ValueSizeConstraint(1, ub_receiptsTo))) +) + +# Receipt Content Type + +class ESSVersion(univ.Integer): + pass + +ESSVersion.namedValues = namedval.NamedValues( + ('v1', 1) +) + + +id_ct_receipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.1') + +class Receipt(univ.Sequence): + pass + +Receipt.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', ESSVersion()), + namedtype.NamedType('contentType', ContentType()), + namedtype.NamedType('signedContentIdentifier', ContentIdentifier()), + namedtype.NamedType('originatorSignatureValue', univ.OctetString()) +) + + +# Map of Attribute Type to the Attribute structure is added to the +# ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_signingCertificate: SigningCertificate(), + id_aa_mlExpandHistory: MLExpansionHistory(), + id_aa_securityLabel: ESSSecurityLabel(), + id_aa_equivalentLabels: EquivalentLabels(), + id_aa_contentIdentifier: ContentIdentifier(), + id_aa_contentReference: ContentReference(), + id_aa_msgSigDigest: MsgSigDigest(), + id_aa_contentHint: ContentHints(), + id_aa_receiptRequest: ReceiptRequest(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) + + +# Map of Content Type OIDs to Content Types is added to the +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_receipt: Receipt(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2876.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2876.py new file mode 100644 index 0000000000000000000000000000000000000000..04c402b7ea6cddf9058e587bd6e3299838eebd58 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2876.py @@ -0,0 +1,56 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# KEA and SKIPJACK Algorithms in CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc2876.txt +# + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5751 + + +id_fortezzaConfidentialityAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.4') + + +id_fortezzaWrap80 = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.23') + + +id_kEAKeyEncryptionAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.24') + + +id_keyExchangeAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.22') + + +class Skipjack_Parm(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('initialization-vector', univ.OctetString()) + ) + + +# Update the Algorithm Identifier map in rfc5280.py. + +_algorithmIdentifierMapUpdate = { + id_fortezzaConfidentialityAlgorithm: Skipjack_Parm(), + id_kEAKeyEncryptionAlgorithm: rfc5280.AlgorithmIdentifier(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) + + +# Update the SMIMECapabilities Attribute map in rfc5751.py + +_smimeCapabilityMapUpdate = { + id_kEAKeyEncryptionAlgorithm: rfc5280.AlgorithmIdentifier(), +} + +rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2985.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2985.py new file mode 100644 index 0000000000000000000000000000000000000000..75bccf097dcd4c1f704f3207e5c35c562a90097b --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2985.py @@ -0,0 +1,588 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# PKCS#9: Selected Attribute Types (Version 2.0) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc2985.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc7292 +from pyasn1_modules import rfc5958 +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc5280 + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +MAX = float('inf') + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +Attribute = rfc5280.Attribute + +EmailAddress = rfc5280.EmailAddress + +Extensions = rfc5280.Extensions + +Time = rfc5280.Time + +X520countryName = rfc5280.X520countryName + +X520SerialNumber = rfc5280.X520SerialNumber + + +# Imports from RFC 5652 + +ContentInfo = rfc5652.ContentInfo + +ContentType = rfc5652.ContentType + +Countersignature = rfc5652.Countersignature + +MessageDigest = rfc5652.MessageDigest + +SignerInfo = rfc5652.SignerInfo + +SigningTime = rfc5652.SigningTime + + +# Imports from RFC 5958 + +EncryptedPrivateKeyInfo = rfc5958.EncryptedPrivateKeyInfo + + +# Imports from RFC 7292 + +PFX = rfc7292.PFX + + +# TODO: +# Need a place to import PKCS15Token; it does not yet appear in an RFC + + +# SingleAttribute is the same as Attribute in RFC 5280, except that the +# attrValues SET must have one and only one member + +class AttributeType(univ.ObjectIdentifier): + pass + + +class AttributeValue(univ.Any): + pass + + +class AttributeValues(univ.SetOf): + pass + +AttributeValues.componentType = AttributeValue() + + +class SingleAttributeValues(univ.SetOf): + pass + +SingleAttributeValues.componentType = AttributeValue() + + +class SingleAttribute(univ.Sequence): + pass + +SingleAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeType()), + namedtype.NamedType('values', + AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)), + openType=opentype.OpenType('type', rfc5280.certificateAttributesMap) + ) +) + + +# CMSAttribute is the same as Attribute in RFC 5652, and CMSSingleAttribute +# is the companion where the attrValues SET must have one and only one member + +CMSAttribute = rfc5652.Attribute + + +class CMSSingleAttribute(univ.Sequence): + pass + +CMSSingleAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('attrType', AttributeType()), + namedtype.NamedType('attrValues', + AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)), + openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap) + ) +) + + +# DirectoryString is the same as RFC 5280, except the length is limited to 255 + +class DirectoryString(univ.Choice): + pass + +DirectoryString.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))), + namedtype.NamedType('utf8String', char.UTF8String().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))), + namedtype.NamedType('bmpString', char.BMPString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))) +) + + +# PKCS9String is DirectoryString with an additional choice of IA5String, +# and the SIZE is limited to 255 + +class PKCS9String(univ.Choice): + pass + +PKCS9String.componentType = namedtype.NamedTypes( + namedtype.NamedType('ia5String', char.IA5String().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))), + namedtype.NamedType('directoryString', DirectoryString()) +) + + +# Upper Bounds + +pkcs_9_ub_pkcs9String = univ.Integer(255) + +pkcs_9_ub_challengePassword = univ.Integer(pkcs_9_ub_pkcs9String) + +pkcs_9_ub_emailAddress = univ.Integer(pkcs_9_ub_pkcs9String) + +pkcs_9_ub_friendlyName = univ.Integer(pkcs_9_ub_pkcs9String) + +pkcs_9_ub_match = univ.Integer(pkcs_9_ub_pkcs9String) + +pkcs_9_ub_signingDescription = univ.Integer(pkcs_9_ub_pkcs9String) + +pkcs_9_ub_unstructuredAddress = univ.Integer(pkcs_9_ub_pkcs9String) + +pkcs_9_ub_unstructuredName = univ.Integer(pkcs_9_ub_pkcs9String) + + +ub_name = univ.Integer(32768) + +pkcs_9_ub_placeOfBirth = univ.Integer(ub_name) + +pkcs_9_ub_pseudonym = univ.Integer(ub_name) + + +# Object Identifier Arcs + +ietf_at = _OID(1, 3, 6, 1, 5, 5, 7, 9) + +id_at = _OID(2, 5, 4) + +pkcs_9 = _OID(1, 2, 840, 113549, 1, 9) + +pkcs_9_mo = _OID(pkcs_9, 0) + +smime = _OID(pkcs_9, 16) + +certTypes = _OID(pkcs_9, 22) + +crlTypes = _OID(pkcs_9, 23) + +pkcs_9_oc = _OID(pkcs_9, 24) + +pkcs_9_at = _OID(pkcs_9, 25) + +pkcs_9_sx = _OID(pkcs_9, 26) + +pkcs_9_mr = _OID(pkcs_9, 27) + + +# Object Identifiers for Syntaxes for use with LDAP-accessible directories + +pkcs_9_sx_pkcs9String = _OID(pkcs_9_sx, 1) + +pkcs_9_sx_signingTime = _OID(pkcs_9_sx, 2) + + +# Object Identifiers for object classes + +pkcs_9_oc_pkcsEntity = _OID(pkcs_9_oc, 1) + +pkcs_9_oc_naturalPerson = _OID(pkcs_9_oc, 2) + + +# Object Identifiers for matching rules + +pkcs_9_mr_caseIgnoreMatch = _OID(pkcs_9_mr, 1) + +pkcs_9_mr_signingTimeMatch = _OID(pkcs_9_mr, 2) + + +# PKCS #7 PDU + +pkcs_9_at_pkcs7PDU = _OID(pkcs_9_at, 5) + +pKCS7PDU = Attribute() +pKCS7PDU['type'] = pkcs_9_at_pkcs7PDU +pKCS7PDU['values'][0] = ContentInfo() + + +# PKCS #12 token + +pkcs_9_at_userPKCS12 = _OID(2, 16, 840, 1, 113730, 3, 1, 216) + +userPKCS12 = Attribute() +userPKCS12['type'] = pkcs_9_at_userPKCS12 +userPKCS12['values'][0] = PFX() + + +# PKCS #15 token + +pkcs_9_at_pkcs15Token = _OID(pkcs_9_at, 1) + +# TODO: Once PKCS15Token can be imported, this can be included +# +# pKCS15Token = Attribute() +# userPKCS12['type'] = pkcs_9_at_pkcs15Token +# userPKCS12['values'][0] = PKCS15Token() + + +# PKCS #8 encrypted private key information + +pkcs_9_at_encryptedPrivateKeyInfo = _OID(pkcs_9_at, 2) + +encryptedPrivateKeyInfo = Attribute() +encryptedPrivateKeyInfo['type'] = pkcs_9_at_encryptedPrivateKeyInfo +encryptedPrivateKeyInfo['values'][0] = EncryptedPrivateKeyInfo() + + +# Electronic-mail address + +pkcs_9_at_emailAddress = rfc5280.id_emailAddress + +emailAddress = Attribute() +emailAddress['type'] = pkcs_9_at_emailAddress +emailAddress['values'][0] = EmailAddress() + + +# Unstructured name + +pkcs_9_at_unstructuredName = _OID(pkcs_9, 2) + +unstructuredName = Attribute() +unstructuredName['type'] = pkcs_9_at_unstructuredName +unstructuredName['values'][0] = PKCS9String() + + +# Unstructured address + +pkcs_9_at_unstructuredAddress = _OID(pkcs_9, 8) + +unstructuredAddress = Attribute() +unstructuredAddress['type'] = pkcs_9_at_unstructuredAddress +unstructuredAddress['values'][0] = DirectoryString() + + +# Date of birth + +pkcs_9_at_dateOfBirth = _OID(ietf_at, 1) + +dateOfBirth = SingleAttribute() +dateOfBirth['type'] = pkcs_9_at_dateOfBirth +dateOfBirth['values'][0] = useful.GeneralizedTime() + + +# Place of birth + +pkcs_9_at_placeOfBirth = _OID(ietf_at, 2) + +placeOfBirth = SingleAttribute() +placeOfBirth['type'] = pkcs_9_at_placeOfBirth +placeOfBirth['values'][0] = DirectoryString() + + +# Gender + +class GenderString(char.PrintableString): + pass + +GenderString.subtypeSpec = constraint.ValueSizeConstraint(1, 1) +GenderString.subtypeSpec = constraint.SingleValueConstraint("M", "F", "m", "f") + + +pkcs_9_at_gender = _OID(ietf_at, 3) + +gender = SingleAttribute() +gender['type'] = pkcs_9_at_gender +gender['values'][0] = GenderString() + + +# Country of citizenship + +pkcs_9_at_countryOfCitizenship = _OID(ietf_at, 4) + +countryOfCitizenship = Attribute() +countryOfCitizenship['type'] = pkcs_9_at_countryOfCitizenship +countryOfCitizenship['values'][0] = X520countryName() + + +# Country of residence + +pkcs_9_at_countryOfResidence = _OID(ietf_at, 5) + +countryOfResidence = Attribute() +countryOfResidence['type'] = pkcs_9_at_countryOfResidence +countryOfResidence['values'][0] = X520countryName() + + +# Pseudonym + +id_at_pseudonym = _OID(2, 5, 4, 65) + +pseudonym = Attribute() +pseudonym['type'] = id_at_pseudonym +pseudonym['values'][0] = DirectoryString() + + +# Serial number + +id_at_serialNumber = rfc5280.id_at_serialNumber + +serialNumber = Attribute() +serialNumber['type'] = id_at_serialNumber +serialNumber['values'][0] = X520SerialNumber() + + +# Content type + +pkcs_9_at_contentType = rfc5652.id_contentType + +contentType = CMSSingleAttribute() +contentType['attrType'] = pkcs_9_at_contentType +contentType['attrValues'][0] = ContentType() + + +# Message digest + +pkcs_9_at_messageDigest = rfc5652.id_messageDigest + +messageDigest = CMSSingleAttribute() +messageDigest['attrType'] = pkcs_9_at_messageDigest +messageDigest['attrValues'][0] = MessageDigest() + + +# Signing time + +pkcs_9_at_signingTime = rfc5652.id_signingTime + +signingTime = CMSSingleAttribute() +signingTime['attrType'] = pkcs_9_at_signingTime +signingTime['attrValues'][0] = SigningTime() + + +# Random nonce + +class RandomNonce(univ.OctetString): + pass + +RandomNonce.subtypeSpec = constraint.ValueSizeConstraint(4, MAX) + + +pkcs_9_at_randomNonce = _OID(pkcs_9_at, 3) + +randomNonce = CMSSingleAttribute() +randomNonce['attrType'] = pkcs_9_at_randomNonce +randomNonce['attrValues'][0] = RandomNonce() + + +# Sequence number + +class SequenceNumber(univ.Integer): + pass + +SequenceNumber.subtypeSpec = constraint.ValueRangeConstraint(1, MAX) + + +pkcs_9_at_sequenceNumber = _OID(pkcs_9_at, 4) + +sequenceNumber = CMSSingleAttribute() +sequenceNumber['attrType'] = pkcs_9_at_sequenceNumber +sequenceNumber['attrValues'][0] = SequenceNumber() + + +# Countersignature + +pkcs_9_at_counterSignature = rfc5652.id_countersignature + +counterSignature = CMSAttribute() +counterSignature['attrType'] = pkcs_9_at_counterSignature +counterSignature['attrValues'][0] = Countersignature() + + +# Challenge password + +pkcs_9_at_challengePassword = _OID(pkcs_9, 7) + +challengePassword = SingleAttribute() +challengePassword['type'] = pkcs_9_at_challengePassword +challengePassword['values'][0] = DirectoryString() + + +# Extension request + +class ExtensionRequest(Extensions): + pass + + +pkcs_9_at_extensionRequest = _OID(pkcs_9, 14) + +extensionRequest = SingleAttribute() +extensionRequest['type'] = pkcs_9_at_extensionRequest +extensionRequest['values'][0] = ExtensionRequest() + + +# Extended-certificate attributes (deprecated) + +class AttributeSet(univ.SetOf): + pass + +AttributeSet.componentType = Attribute() + + +pkcs_9_at_extendedCertificateAttributes = _OID(pkcs_9, 9) + +extendedCertificateAttributes = SingleAttribute() +extendedCertificateAttributes['type'] = pkcs_9_at_extendedCertificateAttributes +extendedCertificateAttributes['values'][0] = AttributeSet() + + +# Friendly name + +class FriendlyName(char.BMPString): + pass + +FriendlyName.subtypeSpec = constraint.ValueSizeConstraint(1, pkcs_9_ub_friendlyName) + + +pkcs_9_at_friendlyName = _OID(pkcs_9, 20) + +friendlyName = SingleAttribute() +friendlyName['type'] = pkcs_9_at_friendlyName +friendlyName['values'][0] = FriendlyName() + + +# Local key identifier + +pkcs_9_at_localKeyId = _OID(pkcs_9, 21) + +localKeyId = SingleAttribute() +localKeyId['type'] = pkcs_9_at_localKeyId +localKeyId['values'][0] = univ.OctetString() + + +# Signing description + +pkcs_9_at_signingDescription = _OID(pkcs_9, 13) + +signingDescription = CMSSingleAttribute() +signingDescription['attrType'] = pkcs_9_at_signingDescription +signingDescription['attrValues'][0] = DirectoryString() + + +# S/MIME capabilities + +class SMIMECapability(AlgorithmIdentifier): + pass + + +class SMIMECapabilities(univ.SequenceOf): + pass + +SMIMECapabilities.componentType = SMIMECapability() + + +pkcs_9_at_smimeCapabilities = _OID(pkcs_9, 15) + +smimeCapabilities = CMSSingleAttribute() +smimeCapabilities['attrType'] = pkcs_9_at_smimeCapabilities +smimeCapabilities['attrValues'][0] = SMIMECapabilities() + + +# Certificate Attribute Map + +_certificateAttributesMapUpdate = { + # Attribute types for use with the "pkcsEntity" object class + pkcs_9_at_pkcs7PDU: ContentInfo(), + pkcs_9_at_userPKCS12: PFX(), + # TODO: Once PKCS15Token can be imported, this can be included + # pkcs_9_at_pkcs15Token: PKCS15Token(), + pkcs_9_at_encryptedPrivateKeyInfo: EncryptedPrivateKeyInfo(), + # Attribute types for use with the "naturalPerson" object class + pkcs_9_at_emailAddress: EmailAddress(), + pkcs_9_at_unstructuredName: PKCS9String(), + pkcs_9_at_unstructuredAddress: DirectoryString(), + pkcs_9_at_dateOfBirth: useful.GeneralizedTime(), + pkcs_9_at_placeOfBirth: DirectoryString(), + pkcs_9_at_gender: GenderString(), + pkcs_9_at_countryOfCitizenship: X520countryName(), + pkcs_9_at_countryOfResidence: X520countryName(), + id_at_pseudonym: DirectoryString(), + id_at_serialNumber: X520SerialNumber(), + # Attribute types for use with PKCS #10 certificate requests + pkcs_9_at_challengePassword: DirectoryString(), + pkcs_9_at_extensionRequest: ExtensionRequest(), + pkcs_9_at_extendedCertificateAttributes: AttributeSet(), +} + +rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate) + + +# CMS Attribute Map + +# Note: pkcs_9_at_smimeCapabilities is not included in the map because +# the definition in RFC 5751 is preferred, which produces the same +# encoding, but it allows different parameters for SMIMECapability +# and AlgorithmIdentifier. + +_cmsAttributesMapUpdate = { + # Attribute types for use in PKCS #7 data (a.k.a. CMS) + pkcs_9_at_contentType: ContentType(), + pkcs_9_at_messageDigest: MessageDigest(), + pkcs_9_at_signingTime: SigningTime(), + pkcs_9_at_randomNonce: RandomNonce(), + pkcs_9_at_sequenceNumber: SequenceNumber(), + pkcs_9_at_counterSignature: Countersignature(), + # Attributes for use in PKCS #12 "PFX" PDUs or PKCS #15 tokens + pkcs_9_at_friendlyName: FriendlyName(), + pkcs_9_at_localKeyId: univ.OctetString(), + pkcs_9_at_signingDescription: DirectoryString(), + # pkcs_9_at_smimeCapabilities: SMIMECapabilities(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2986.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2986.py new file mode 100644 index 0000000000000000000000000000000000000000..309637d1fe275f1ce34fb9711ff4f704431bc78a --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc2986.py @@ -0,0 +1,75 @@ +# coding: utf-8 +# +# This file is part of pyasn1-modules software. +# +# Created by Joel Johnson with asn1ate tool. +# Modified by Russ Housley to add support for opentypes by importing +# definitions from rfc5280 so that the same maps are used. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# PKCS #10: Certification Request Syntax Specification +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc2986.txt +# +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +AttributeType = rfc5280.AttributeType + +AttributeValue = rfc5280.AttributeValue + +AttributeTypeAndValue = rfc5280.AttributeTypeAndValue + +Attribute = rfc5280.Attribute + +RelativeDistinguishedName = rfc5280.RelativeDistinguishedName + +RDNSequence = rfc5280.RDNSequence + +Name = rfc5280.Name + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo + + +class Attributes(univ.SetOf): + pass + + +Attributes.componentType = Attribute() + + +class CertificationRequestInfo(univ.Sequence): + pass + + +CertificationRequestInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', univ.Integer()), + namedtype.NamedType('subject', Name()), + namedtype.NamedType('subjectPKInfo', SubjectPublicKeyInfo()), + namedtype.NamedType('attributes', + Attributes().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0)) + ) +) + + +class CertificationRequest(univ.Sequence): + pass + + +CertificationRequest.componentType = namedtype.NamedTypes( + namedtype.NamedType('certificationRequestInfo', CertificationRequestInfo()), + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) +) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3058.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3058.py new file mode 100644 index 0000000000000000000000000000000000000000..725de82ae71866187e685b32ea67f7e7e821926d --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3058.py @@ -0,0 +1,42 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# IDEA Encryption Algorithm in CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3058.txt +# https://www.rfc-editor.org/errata/eid5913 +# + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +id_IDEA_CBC = univ.ObjectIdentifier('1.3.6.1.4.1.188.7.1.1.2') + + +id_alg_CMSIDEAwrap = univ.ObjectIdentifier('1.3.6.1.4.1.188.7.1.1.6') + + +class IDEA_CBCPar(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('iv', univ.OctetString()) + # exactly 8 octets, when present + ) + + +# Update the Algorithm Identifier map in rfc5280.py. + +_algorithmIdentifierMapUpdate = { + id_IDEA_CBC: IDEA_CBCPar(), + id_alg_CMSIDEAwrap: univ.Null("") +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3114.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3114.py new file mode 100644 index 0000000000000000000000000000000000000000..badcb1f2140383a12b286f7db1be1cf640df89ac --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3114.py @@ -0,0 +1,77 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# TEST Company Classification Policies +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3114.txt +# + +from pyasn1.type import char +from pyasn1.type import namedval +from pyasn1.type import univ + +from pyasn1_modules import rfc5755 + + +id_smime = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, )) + +id_tsp = id_smime + (7, ) + +id_tsp_TEST_Amoco = id_tsp + (1, ) + +class Amoco_SecurityClassification(univ.Integer): + namedValues = namedval.NamedValues( + ('amoco-general', 6), + ('amoco-confidential', 7), + ('amoco-highly-confidential', 8) + ) + + +id_tsp_TEST_Caterpillar = id_tsp + (2, ) + +class Caterpillar_SecurityClassification(univ.Integer): + namedValues = namedval.NamedValues( + ('caterpillar-public', 6), + ('caterpillar-green', 7), + ('caterpillar-yellow', 8), + ('caterpillar-red', 9) + ) + + +id_tsp_TEST_Whirlpool = id_tsp + (3, ) + +class Whirlpool_SecurityClassification(univ.Integer): + namedValues = namedval.NamedValues( + ('whirlpool-public', 6), + ('whirlpool-internal', 7), + ('whirlpool-confidential', 8) + ) + + +id_tsp_TEST_Whirlpool_Categories = id_tsp + (4, ) + +class SecurityCategoryValues(univ.SequenceOf): + componentType = char.UTF8String() + +# Example SecurityCategoryValues: "LAW DEPARTMENT USE ONLY" +# Example SecurityCategoryValues: "HUMAN RESOURCES USE ONLY" + + +# Also, the privacy mark in the security label can contain a string, +# such as: "ATTORNEY-CLIENT PRIVILEGED INFORMATION" + + +# Map of security category type OIDs to security category added +# to the ones that are in rfc5755.py + +_securityCategoryMapUpdate = { + id_tsp_TEST_Whirlpool_Categories: SecurityCategoryValues(), +} + +rfc5755.securityCategoryMap.update(_securityCategoryMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3125.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3125.py new file mode 100644 index 0000000000000000000000000000000000000000..00ff9bff48046eb43c7b611cae59a0fb61987991 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3125.py @@ -0,0 +1,469 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Electronic Signature Policies +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3125.txt +# https://www.rfc-editor.org/errata/eid5901 +# https://www.rfc-editor.org/errata/eid5902 +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import useful +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +Attribute = rfc5280.Attribute + +AttributeType = rfc5280.AttributeType + +AttributeTypeAndValue = rfc5280.AttributeTypeAndValue + +AttributeValue = rfc5280.AttributeValue + +Certificate = rfc5280.Certificate + +CertificateList = rfc5280.CertificateList + +DirectoryString = rfc5280.DirectoryString + +GeneralName = rfc5280.GeneralName + +GeneralNames = rfc5280.GeneralNames + +Name = rfc5280.Name + +PolicyInformation = rfc5280.PolicyInformation + + +# Electronic Signature Policies + +class CertPolicyId(univ.ObjectIdentifier): + pass + + +class AcceptablePolicySet(univ.SequenceOf): + componentType = CertPolicyId() + + +class SignPolExtn(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('extnID', univ.ObjectIdentifier()), + namedtype.NamedType('extnValue', univ.OctetString()) + ) + + +class SignPolExtensions(univ.SequenceOf): + componentType = SignPolExtn() + + +class AlgAndLength(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('algID', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('minKeyLength', univ.Integer()), + namedtype.OptionalNamedType('other', SignPolExtensions()) + ) + + +class AlgorithmConstraints(univ.SequenceOf): + componentType = AlgAndLength() + + +class AlgorithmConstraintSet(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('signerAlgorithmConstraints', + AlgorithmConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('eeCertAlgorithmConstraints', + AlgorithmConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('caCertAlgorithmConstraints', + AlgorithmConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('aaCertAlgorithmConstraints', + AlgorithmConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.OptionalNamedType('tsaCertAlgorithmConstraints', + AlgorithmConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 4))) + ) + + +class AttributeValueConstraints(univ.SequenceOf): + componentType = AttributeTypeAndValue() + + +class AttributeTypeConstraints(univ.SequenceOf): + componentType = AttributeType() + + +class AttributeConstraints(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('attributeTypeConstarints', + AttributeTypeConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('attributeValueConstarints', + AttributeValueConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class HowCertAttribute(univ.Enumerated): + namedValues = namedval.NamedValues( + ('claimedAttribute', 0), + ('certifiedAttribtes', 1), + ('either', 2) + ) + + +class SkipCerts(univ.Integer): + subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +class PolicyConstraints(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('requireExplicitPolicy', + SkipCerts().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('inhibitPolicyMapping', + SkipCerts().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class BaseDistance(univ.Integer): + subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +class GeneralSubtree(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('base', GeneralName()), + namedtype.DefaultedNamedType('minimum', + BaseDistance().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0)).subtype( + value=0)), + namedtype.OptionalNamedType('maximum', + BaseDistance().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class GeneralSubtrees(univ.SequenceOf): + componentType = GeneralSubtree() + subtypeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class NameConstraints(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('permittedSubtrees', + GeneralSubtrees().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('excludedSubtrees', + GeneralSubtrees().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class PathLenConstraint(univ.Integer): + subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +class CertificateTrustPoint(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('trustpoint', Certificate()), + namedtype.OptionalNamedType('pathLenConstraint', + PathLenConstraint().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('acceptablePolicySet', + AcceptablePolicySet().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('nameConstraints', + NameConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('policyConstraints', + PolicyConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 3))) + ) + + +class CertificateTrustTrees(univ.SequenceOf): + componentType = CertificateTrustPoint() + + +class EnuRevReq(univ.Enumerated): + namedValues = namedval.NamedValues( + ('clrCheck', 0), + ('ocspCheck', 1), + ('bothCheck', 2), + ('eitherCheck', 3), + ('noCheck', 4), + ('other', 5) + ) + + +class RevReq(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('enuRevReq', EnuRevReq()), + namedtype.OptionalNamedType('exRevReq', SignPolExtensions()) + ) + + +class CertRevReq(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('endCertRevReq', RevReq()), + namedtype.NamedType('caCerts', + RevReq().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 0))) + ) + + +class AttributeTrustCondition(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('attributeMandated', univ.Boolean()), + namedtype.NamedType('howCertAttribute', HowCertAttribute()), + namedtype.OptionalNamedType('attrCertificateTrustTrees', + CertificateTrustTrees().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('attrRevReq', + CertRevReq().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('attributeConstraints', + AttributeConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 2))) + ) + + +class CMSAttrs(univ.SequenceOf): + componentType = univ.ObjectIdentifier() + + +class CertInfoReq(univ.Enumerated): + namedValues = namedval.NamedValues( + ('none', 0), + ('signerOnly', 1), + ('fullPath', 2) + ) + + +class CertRefReq(univ.Enumerated): + namedValues = namedval.NamedValues( + ('signerOnly', 1), + ('fullPath', 2) + ) + + +class DeltaTime(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('deltaSeconds', univ.Integer()), + namedtype.NamedType('deltaMinutes', univ.Integer()), + namedtype.NamedType('deltaHours', univ.Integer()), + namedtype.NamedType('deltaDays', univ.Integer()) + ) + + +class TimestampTrustCondition(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('ttsCertificateTrustTrees', + CertificateTrustTrees().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('ttsRevReq', + CertRevReq().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('ttsNameConstraints', + NameConstraints().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('cautionPeriod', + DeltaTime().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.OptionalNamedType('signatureTimestampDelay', + DeltaTime().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 4))) + ) + + +class SignerRules(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('externalSignedData', univ.Boolean()), + namedtype.NamedType('mandatedSignedAttr', CMSAttrs()), + namedtype.NamedType('mandatedUnsignedAttr', CMSAttrs()), + namedtype.DefaultedNamedType('mandatedCertificateRef', + CertRefReq().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0)).subtype( + value='signerOnly')), + namedtype.DefaultedNamedType('mandatedCertificateInfo', + CertInfoReq().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1)).subtype( + value='none')), + namedtype.OptionalNamedType('signPolExtensions', + SignPolExtensions().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class MandatedUnsignedAttr(CMSAttrs): + pass + + +class VerifierRules(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('mandatedUnsignedAttr', MandatedUnsignedAttr()), + namedtype.OptionalNamedType('signPolExtensions', SignPolExtensions()) + ) + + +class SignerAndVerifierRules(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('signerRules', SignerRules()), + namedtype.NamedType('verifierRules', VerifierRules()) + ) + + +class SigningCertTrustCondition(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('signerTrustTrees', CertificateTrustTrees()), + namedtype.NamedType('signerRevReq', CertRevReq()) + ) + + +class CommitmentTypeIdentifier(univ.ObjectIdentifier): + pass + + +class FieldOfApplication(DirectoryString): + pass + + +class CommitmentType(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('identifier', CommitmentTypeIdentifier()), + namedtype.OptionalNamedType('fieldOfApplication', + FieldOfApplication().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('semantics', + DirectoryString().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class SelectedCommitmentTypes(univ.SequenceOf): + componentType = univ.Choice(componentType=namedtype.NamedTypes( + namedtype.NamedType('empty', univ.Null()), + namedtype.NamedType('recognizedCommitmentType', CommitmentType()) + )) + + +class CommitmentRule(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('selCommitmentTypes', SelectedCommitmentTypes()), + namedtype.OptionalNamedType('signerAndVeriferRules', + SignerAndVerifierRules().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('signingCertTrustCondition', + SigningCertTrustCondition().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('timeStampTrustCondition', + TimestampTrustCondition().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('attributeTrustCondition', + AttributeTrustCondition().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.OptionalNamedType('algorithmConstraintSet', + AlgorithmConstraintSet().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 4))), + namedtype.OptionalNamedType('signPolExtensions', + SignPolExtensions().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 5))) + ) + + +class CommitmentRules(univ.SequenceOf): + componentType = CommitmentRule() + + +class CommonRules(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('signerAndVeriferRules', + SignerAndVerifierRules().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('signingCertTrustCondition', + SigningCertTrustCondition().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('timeStampTrustCondition', + TimestampTrustCondition().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('attributeTrustCondition', + AttributeTrustCondition().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.OptionalNamedType('algorithmConstraintSet', + AlgorithmConstraintSet().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 4))), + namedtype.OptionalNamedType('signPolExtensions', + SignPolExtensions().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 5))) + ) + + +class PolicyIssuerName(GeneralNames): + pass + + +class SignPolicyHash(univ.OctetString): + pass + + +class SignPolicyId(univ.ObjectIdentifier): + pass + + +class SigningPeriod(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('notBefore', useful.GeneralizedTime()), + namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime()) + ) + + +class SignatureValidationPolicy(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('signingPeriod', SigningPeriod()), + namedtype.NamedType('commonRules', CommonRules()), + namedtype.NamedType('commitmentRules', CommitmentRules()), + namedtype.OptionalNamedType('signPolExtensions', SignPolExtensions()) + ) + + +class SignPolicyInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('signPolicyIdentifier', SignPolicyId()), + namedtype.NamedType('dateOfIssue', useful.GeneralizedTime()), + namedtype.NamedType('policyIssuerName', PolicyIssuerName()), + namedtype.NamedType('fieldOfApplication', FieldOfApplication()), + namedtype.NamedType('signatureValidationPolicy', SignatureValidationPolicy()), + namedtype.OptionalNamedType('signPolExtensions', SignPolExtensions()) + ) + + +class SignaturePolicy(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('signPolicyHashAlg', AlgorithmIdentifier()), + namedtype.NamedType('signPolicyInfo', SignPolicyInfo()), + namedtype.OptionalNamedType('signPolicyHash', SignPolicyHash()) + ) + + diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3161.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3161.py new file mode 100644 index 0000000000000000000000000000000000000000..0e1dcedb393be1ae9e2f270533e1d62f99d01308 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3161.py @@ -0,0 +1,142 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Time-Stamp Protocol (TSP) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3161.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc4210 +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 + + +Extensions = rfc5280.Extensions + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +GeneralName = rfc5280.GeneralName + +ContentInfo = rfc5652.ContentInfo + +PKIFreeText = rfc4210.PKIFreeText + + +id_ct_TSTInfo = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.4') + + +class Accuracy(univ.Sequence): + pass + +Accuracy.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('seconds', univ.Integer()), + namedtype.OptionalNamedType('millis', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 999)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('micros', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 999)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class MessageImprint(univ.Sequence): + pass + +MessageImprint.componentType = namedtype.NamedTypes( + namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('hashedMessage', univ.OctetString()) +) + + +class PKIFailureInfo(univ.BitString): + pass + +PKIFailureInfo.namedValues = namedval.NamedValues( + ('badAlg', 0), + ('badRequest', 2), + ('badDataFormat', 5), + ('timeNotAvailable', 14), + ('unacceptedPolicy', 15), + ('unacceptedExtension', 16), + ('addInfoNotAvailable', 17), + ('systemFailure', 25) +) + + +class PKIStatus(univ.Integer): + pass + +PKIStatus.namedValues = namedval.NamedValues( + ('granted', 0), + ('grantedWithMods', 1), + ('rejection', 2), + ('waiting', 3), + ('revocationWarning', 4), + ('revocationNotification', 5) +) + + +class PKIStatusInfo(univ.Sequence): + pass + +PKIStatusInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('status', PKIStatus()), + namedtype.OptionalNamedType('statusString', PKIFreeText()), + namedtype.OptionalNamedType('failInfo', PKIFailureInfo()) +) + + +class TSAPolicyId(univ.ObjectIdentifier): + pass + + +class TSTInfo(univ.Sequence): + pass + +TSTInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 1)))), + namedtype.NamedType('policy', TSAPolicyId()), + namedtype.NamedType('messageImprint', MessageImprint()), + namedtype.NamedType('serialNumber', univ.Integer()), + namedtype.NamedType('genTime', useful.GeneralizedTime()), + namedtype.OptionalNamedType('accuracy', Accuracy()), + namedtype.DefaultedNamedType('ordering', univ.Boolean().subtype(value=0)), + namedtype.OptionalNamedType('nonce', univ.Integer()), + namedtype.OptionalNamedType('tsa', GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('extensions', Extensions().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class TimeStampReq(univ.Sequence): + pass + +TimeStampReq.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 1)))), + namedtype.NamedType('messageImprint', MessageImprint()), + namedtype.OptionalNamedType('reqPolicy', TSAPolicyId()), + namedtype.OptionalNamedType('nonce', univ.Integer()), + namedtype.DefaultedNamedType('certReq', univ.Boolean().subtype(value=0)), + namedtype.OptionalNamedType('extensions', Extensions().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class TimeStampToken(ContentInfo): + pass + + +class TimeStampResp(univ.Sequence): + pass + +TimeStampResp.componentType = namedtype.NamedTypes( + namedtype.NamedType('status', PKIStatusInfo()), + namedtype.OptionalNamedType('timeStampToken', TimeStampToken()) +) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3274.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3274.py new file mode 100644 index 0000000000000000000000000000000000000000..425e006f3ddb54e3ddbf3a85da585815943bfb25 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3274.py @@ -0,0 +1,59 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add a map for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# CMS Compressed Data Content Type +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3274.txt +# + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 + + +class CompressionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +# The CMS Compressed Data Content Type + +id_ct_compressedData = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.9') + +class CompressedData(univ.Sequence): + pass + +CompressedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', rfc5652.CMSVersion()), # Always set to 0 + namedtype.NamedType('compressionAlgorithm', CompressionAlgorithmIdentifier()), + namedtype.NamedType('encapContentInfo', rfc5652.EncapsulatedContentInfo()) +) + + +# Algorithm identifier for the zLib Compression Algorithm +# This includes cpa_zlibCompress as defined in RFC 6268, +# from https://www.rfc-editor.org/rfc/rfc6268.txt + +id_alg_zlibCompress = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.8') + +cpa_zlibCompress = rfc5280.AlgorithmIdentifier() +cpa_zlibCompress['algorithm'] = id_alg_zlibCompress +# cpa_zlibCompress['parameters'] are absent + + +# Map of Content Type OIDs to Content Types is added to thr +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_compressedData: CompressedData(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3279.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3279.py new file mode 100644 index 0000000000000000000000000000000000000000..f6e24deafc3e52bb79b0ff0811747b1d75899c65 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3279.py @@ -0,0 +1,260 @@ +# +# This file is part of pyasn1-modules. +# +# Copyright (c) 2017, Danielle Madeley +# License: http://snmplabs.com/pyasn1/license.html +# +# Modified by Russ Housley to add maps for use with opentypes. +# +# Algorithms and Identifiers for Internet X.509 Certificates and CRLs +# +# Derived from RFC 3279: +# https://www.rfc-editor.org/rfc/rfc3279.txt +# +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +md2 = _OID(1, 2, 840, 113549, 2, 2) +md5 = _OID(1, 2, 840, 113549, 2, 5) +id_sha1 = _OID(1, 3, 14, 3, 2, 26) +id_dsa = _OID(1, 2, 840, 10040, 4, 1) + + +class DSAPublicKey(univ.Integer): + pass + + +class Dss_Parms(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('p', univ.Integer()), + namedtype.NamedType('q', univ.Integer()), + namedtype.NamedType('g', univ.Integer()) + ) + + +id_dsa_with_sha1 = _OID(1, 2, 840, 10040, 4, 3) + + +class Dss_Sig_Value(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('r', univ.Integer()), + namedtype.NamedType('s', univ.Integer()) + ) + + +pkcs_1 = _OID(1, 2, 840, 113549, 1, 1) +rsaEncryption = _OID(pkcs_1, 1) +md2WithRSAEncryption = _OID(pkcs_1, 2) +md5WithRSAEncryption = _OID(pkcs_1, 4) +sha1WithRSAEncryption = _OID(pkcs_1, 5) + + +class RSAPublicKey(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('modulus', univ.Integer()), + namedtype.NamedType('publicExponent', univ.Integer()) + ) + + +dhpublicnumber = _OID(1, 2, 840, 10046, 2, 1) + + +class DHPublicKey(univ.Integer): + pass + + +class ValidationParms(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('seed', univ.BitString()), + namedtype.NamedType('pgenCounter', univ.Integer()) + ) + + +class DomainParameters(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('p', univ.Integer()), + namedtype.NamedType('g', univ.Integer()), + namedtype.NamedType('q', univ.Integer()), + namedtype.OptionalNamedType('j', univ.Integer()), + namedtype.OptionalNamedType('validationParms', ValidationParms()) + ) + + +id_keyExchangeAlgorithm = _OID(2, 16, 840, 1, 101, 2, 1, 1, 22) + + +class KEA_Parms_Id(univ.OctetString): + pass + + +ansi_X9_62 = _OID(1, 2, 840, 10045) + + +class FieldID(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('fieldType', univ.ObjectIdentifier()), + namedtype.NamedType('parameters', univ.Any()) + ) + + +id_ecSigType = _OID(ansi_X9_62, 4) +ecdsa_with_SHA1 = _OID(id_ecSigType, 1) + + +class ECDSA_Sig_Value(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('r', univ.Integer()), + namedtype.NamedType('s', univ.Integer()) + ) + + +id_fieldType = _OID(ansi_X9_62, 1) +prime_field = _OID(id_fieldType, 1) + + +class Prime_p(univ.Integer): + pass + + +characteristic_two_field = _OID(id_fieldType, 2) + + +class Characteristic_two(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('m', univ.Integer()), + namedtype.NamedType('basis', univ.ObjectIdentifier()), + namedtype.NamedType('parameters', univ.Any()) + ) + + +id_characteristic_two_basis = _OID(characteristic_two_field, 3) +gnBasis = _OID(id_characteristic_two_basis, 1) +tpBasis = _OID(id_characteristic_two_basis, 2) + + +class Trinomial(univ.Integer): + pass + + +ppBasis = _OID(id_characteristic_two_basis, 3) + + +class Pentanomial(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('k1', univ.Integer()), + namedtype.NamedType('k2', univ.Integer()), + namedtype.NamedType('k3', univ.Integer()) + ) + + +class FieldElement(univ.OctetString): + pass + + +class ECPoint(univ.OctetString): + pass + + +class Curve(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('a', FieldElement()), + namedtype.NamedType('b', FieldElement()), + namedtype.OptionalNamedType('seed', univ.BitString()) + ) + + +class ECPVer(univ.Integer): + namedValues = namedval.NamedValues( + ('ecpVer1', 1) + ) + + +class ECParameters(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', ECPVer()), + namedtype.NamedType('fieldID', FieldID()), + namedtype.NamedType('curve', Curve()), + namedtype.NamedType('base', ECPoint()), + namedtype.NamedType('order', univ.Integer()), + namedtype.OptionalNamedType('cofactor', univ.Integer()) + ) + + +class EcpkParameters(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('ecParameters', ECParameters()), + namedtype.NamedType('namedCurve', univ.ObjectIdentifier()), + namedtype.NamedType('implicitlyCA', univ.Null()) + ) + + +id_publicKeyType = _OID(ansi_X9_62, 2) +id_ecPublicKey = _OID(id_publicKeyType, 1) + +ellipticCurve = _OID(ansi_X9_62, 3) + +c_TwoCurve = _OID(ellipticCurve, 0) +c2pnb163v1 = _OID(c_TwoCurve, 1) +c2pnb163v2 = _OID(c_TwoCurve, 2) +c2pnb163v3 = _OID(c_TwoCurve, 3) +c2pnb176w1 = _OID(c_TwoCurve, 4) +c2tnb191v1 = _OID(c_TwoCurve, 5) +c2tnb191v2 = _OID(c_TwoCurve, 6) +c2tnb191v3 = _OID(c_TwoCurve, 7) +c2onb191v4 = _OID(c_TwoCurve, 8) +c2onb191v5 = _OID(c_TwoCurve, 9) +c2pnb208w1 = _OID(c_TwoCurve, 10) +c2tnb239v1 = _OID(c_TwoCurve, 11) +c2tnb239v2 = _OID(c_TwoCurve, 12) +c2tnb239v3 = _OID(c_TwoCurve, 13) +c2onb239v4 = _OID(c_TwoCurve, 14) +c2onb239v5 = _OID(c_TwoCurve, 15) +c2pnb272w1 = _OID(c_TwoCurve, 16) +c2pnb304w1 = _OID(c_TwoCurve, 17) +c2tnb359v1 = _OID(c_TwoCurve, 18) +c2pnb368w1 = _OID(c_TwoCurve, 19) +c2tnb431r1 = _OID(c_TwoCurve, 20) + +primeCurve = _OID(ellipticCurve, 1) +prime192v1 = _OID(primeCurve, 1) +prime192v2 = _OID(primeCurve, 2) +prime192v3 = _OID(primeCurve, 3) +prime239v1 = _OID(primeCurve, 4) +prime239v2 = _OID(primeCurve, 5) +prime239v3 = _OID(primeCurve, 6) +prime256v1 = _OID(primeCurve, 7) + + +# Map of Algorithm Identifier OIDs to Parameters added to the +# ones in rfc5280.py. Do not add OIDs with absent paramaters. + +_algorithmIdentifierMapUpdate = { + md2: univ.Null(""), + md5: univ.Null(""), + id_sha1: univ.Null(""), + id_dsa: Dss_Parms(), + rsaEncryption: univ.Null(""), + md2WithRSAEncryption: univ.Null(""), + md5WithRSAEncryption: univ.Null(""), + sha1WithRSAEncryption: univ.Null(""), + dhpublicnumber: DomainParameters(), + id_keyExchangeAlgorithm: KEA_Parms_Id(), + id_ecPublicKey: EcpkParameters(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3280.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3280.py new file mode 100644 index 0000000000000000000000000000000000000000..4c6df13280461ffe38e1e32f0ef8e759242b16fe --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3280.py @@ -0,0 +1,1543 @@ +# coding: utf-8 +# +# This file is part of pyasn1-modules software. +# +# Created by Stanisław Pitucha with asn1ate tool. +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# Internet X.509 Public Key Infrastructure Certificate and Certificate +# Revocation List (CRL) Profile +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc3280.txt +# +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +MAX = float('inf') + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +unformatted_postal_address = univ.Integer(16) + +ub_organizational_units = univ.Integer(4) + +ub_organizational_unit_name_length = univ.Integer(32) + + +class OrganizationalUnitName(char.PrintableString): + pass + + +OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length) + + +class OrganizationalUnitNames(univ.SequenceOf): + pass + + +OrganizationalUnitNames.componentType = OrganizationalUnitName() +OrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units) + + +class AttributeType(univ.ObjectIdentifier): + pass + + +id_at = _OID(2, 5, 4) + +id_at_name = _OID(id_at, 41) + +ub_pds_parameter_length = univ.Integer(30) + + +class PDSParameter(univ.Set): + pass + + +PDSParameter.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))), + namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))) +) + + +class PhysicalDeliveryOrganizationName(PDSParameter): + pass + + +ub_organization_name_length = univ.Integer(64) + +ub_domain_defined_attribute_type_length = univ.Integer(8) + +ub_domain_defined_attribute_value_length = univ.Integer(128) + + +class TeletexDomainDefinedAttribute(univ.Sequence): + pass + + +TeletexDomainDefinedAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('type', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))), + namedtype.NamedType('value', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length))) +) + +id_pkix = _OID(1, 3, 6, 1, 5, 5, 7) + +id_qt = _OID(id_pkix, 2) + + +class PresentationAddress(univ.Sequence): + pass + + +PresentationAddress.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + + +class AlgorithmIdentifier(univ.Sequence): + pass + + +AlgorithmIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithm', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('parameters', univ.Any()) +) + + +class UniqueIdentifier(univ.BitString): + pass + + +class Extension(univ.Sequence): + pass + + +Extension.componentType = namedtype.NamedTypes( + namedtype.NamedType('extnID', univ.ObjectIdentifier()), + namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)), + namedtype.NamedType('extnValue', univ.OctetString()) +) + + +class Extensions(univ.SequenceOf): + pass + + +Extensions.componentType = Extension() +Extensions.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class CertificateSerialNumber(univ.Integer): + pass + + +class SubjectPublicKeyInfo(univ.Sequence): + pass + + +SubjectPublicKeyInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithm', AlgorithmIdentifier()), + namedtype.NamedType('subjectPublicKey', univ.BitString()) +) + + +class Time(univ.Choice): + pass + + +Time.componentType = namedtype.NamedTypes( + namedtype.NamedType('utcTime', useful.UTCTime()), + namedtype.NamedType('generalTime', useful.GeneralizedTime()) +) + + +class Validity(univ.Sequence): + pass + + +Validity.componentType = namedtype.NamedTypes( + namedtype.NamedType('notBefore', Time()), + namedtype.NamedType('notAfter', Time()) +) + + +class Version(univ.Integer): + pass + + +Version.namedValues = namedval.NamedValues( + ('v1', 0), + ('v2', 1), + ('v3', 2) +) + + +class AttributeValue(univ.Any): + pass + + +class AttributeTypeAndValue(univ.Sequence): + pass + + +AttributeTypeAndValue.componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeType()), + namedtype.NamedType('value', AttributeValue()) +) + + +class RelativeDistinguishedName(univ.SetOf): + pass + + +RelativeDistinguishedName.componentType = AttributeTypeAndValue() +RelativeDistinguishedName.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class RDNSequence(univ.SequenceOf): + pass + + +RDNSequence.componentType = RelativeDistinguishedName() + + +class Name(univ.Choice): + pass + + +Name.componentType = namedtype.NamedTypes( + namedtype.NamedType('rdnSequence', RDNSequence()) +) + + +class TBSCertificate(univ.Sequence): + pass + + +TBSCertificate.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + Version().subtype(explicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value="v1")), + namedtype.NamedType('serialNumber', CertificateSerialNumber()), + namedtype.NamedType('signature', AlgorithmIdentifier()), + namedtype.NamedType('issuer', Name()), + namedtype.NamedType('validity', Validity()), + namedtype.NamedType('subject', Name()), + namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()), + namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('extensions', + Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + + +class Certificate(univ.Sequence): + pass + + +Certificate.componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsCertificate', TBSCertificate()), + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) +) + +ub_surname_length = univ.Integer(40) + + +class TeletexOrganizationName(char.TeletexString): + pass + + +TeletexOrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length) + +ub_e163_4_sub_address_length = univ.Integer(40) + +teletex_common_name = univ.Integer(2) + +ub_country_name_alpha_length = univ.Integer(2) + +ub_country_name_numeric_length = univ.Integer(3) + + +class CountryName(univ.Choice): + pass + + +CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)) +CountryName.componentType = namedtype.NamedTypes( + namedtype.NamedType('x121-dcc-code', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))), + namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length))) +) + +extension_OR_address_components = univ.Integer(12) + +id_at_dnQualifier = _OID(id_at, 46) + +ub_e163_4_number_length = univ.Integer(15) + + +class ExtendedNetworkAddress(univ.Choice): + pass + + +ExtendedNetworkAddress.componentType = namedtype.NamedTypes( + namedtype.NamedType('e163-4-address', univ.Sequence(componentType=namedtype.NamedTypes( + namedtype.NamedType('number', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('sub-address', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + )) + ), + namedtype.NamedType('psap-address', PresentationAddress().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) +) + +terminal_type = univ.Integer(23) + +id_domainComponent = _OID(0, 9, 2342, 19200300, 100, 1, 25) + +ub_state_name = univ.Integer(128) + + +class X520StateOrProvinceName(univ.Choice): + pass + + +X520StateOrProvinceName.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))) +) + +ub_organization_name = univ.Integer(64) + + +class X520OrganizationName(univ.Choice): + pass + + +X520OrganizationName.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))) +) + +ub_emailaddress_length = univ.Integer(128) + + +class ExtensionPhysicalDeliveryAddressComponents(PDSParameter): + pass + + +id_at_surname = _OID(id_at, 4) + +ub_common_name_length = univ.Integer(64) + +id_ad = _OID(id_pkix, 48) + +ub_numeric_user_id_length = univ.Integer(32) + + +class NumericUserIdentifier(char.NumericString): + pass + + +NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length) + + +class OrganizationName(char.PrintableString): + pass + + +OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length) + +ub_domain_name_length = univ.Integer(16) + + +class AdministrationDomainName(univ.Choice): + pass + + +AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2)) +AdministrationDomainName.componentType = namedtype.NamedTypes( + namedtype.NamedType('numeric', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))), + namedtype.NamedType('printable', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))) +) + + +class PrivateDomainName(univ.Choice): + pass + + +PrivateDomainName.componentType = namedtype.NamedTypes( + namedtype.NamedType('numeric', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))), + namedtype.NamedType('printable', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))) +) + +ub_generation_qualifier_length = univ.Integer(3) + +ub_given_name_length = univ.Integer(16) + +ub_initials_length = univ.Integer(5) + + +class PersonalName(univ.Set): + pass + + +PersonalName.componentType = namedtype.NamedTypes( + namedtype.NamedType('surname', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('given-name', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('initials', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + +ub_terminal_id_length = univ.Integer(24) + + +class TerminalIdentifier(char.PrintableString): + pass + + +TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length) + +ub_x121_address_length = univ.Integer(16) + + +class X121Address(char.NumericString): + pass + + +X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length) + + +class NetworkAddress(X121Address): + pass + + +class BuiltInStandardAttributes(univ.Sequence): + pass + + +BuiltInStandardAttributes.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('country-name', CountryName()), + namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()), + namedtype.OptionalNamedType('network-address', NetworkAddress().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('organization-name', OrganizationName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))), + namedtype.OptionalNamedType('personal-name', PersonalName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))), + namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))) +) + +ub_domain_defined_attributes = univ.Integer(4) + + +class BuiltInDomainDefinedAttribute(univ.Sequence): + pass + + +BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('type', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))), + namedtype.NamedType('value', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length))) +) + + +class BuiltInDomainDefinedAttributes(univ.SequenceOf): + pass + + +BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute() +BuiltInDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes) + +ub_extension_attributes = univ.Integer(256) + + +class ExtensionAttribute(univ.Sequence): + pass + + +ExtensionAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('extension-attribute-type', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('extension-attribute-value', + univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class ExtensionAttributes(univ.SetOf): + pass + + +ExtensionAttributes.componentType = ExtensionAttribute() +ExtensionAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_extension_attributes) + + +class ORAddress(univ.Sequence): + pass + + +ORAddress.componentType = namedtype.NamedTypes( + namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()), + namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()), + namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes()) +) + +id_pe = _OID(id_pkix, 1) + +ub_title = univ.Integer(64) + + +class X520Title(univ.Choice): + pass + + +X520Title.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))) +) + +id_at_organizationalUnitName = _OID(id_at, 11) + + +class EmailAddress(char.IA5String): + pass + + +EmailAddress.subtypeSpec = constraint.ValueSizeConstraint(1, ub_emailaddress_length) + +physical_delivery_country_name = univ.Integer(8) + +id_at_givenName = _OID(id_at, 42) + + +class TeletexCommonName(char.TeletexString): + pass + + +TeletexCommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length) + +id_qt_cps = _OID(id_qt, 1) + + +class LocalPostalAttributes(PDSParameter): + pass + + +class StreetAddress(PDSParameter): + pass + + +id_kp = _OID(id_pkix, 3) + + +class DirectoryString(univ.Choice): + pass + + +DirectoryString.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) +) + + +class DomainComponent(char.IA5String): + pass + + +id_at_initials = _OID(id_at, 43) + +id_qt_unotice = _OID(id_qt, 2) + +ub_pds_name_length = univ.Integer(16) + + +class PDSName(char.PrintableString): + pass + + +PDSName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_pds_name_length) + + +class PosteRestanteAddress(PDSParameter): + pass + + +class DistinguishedName(RDNSequence): + pass + + +class CommonName(char.PrintableString): + pass + + +CommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length) + +ub_serial_number = univ.Integer(64) + + +class X520SerialNumber(char.PrintableString): + pass + + +X520SerialNumber.subtypeSpec = constraint.ValueSizeConstraint(1, ub_serial_number) + +id_at_generationQualifier = _OID(id_at, 44) + +ub_organizational_unit_name = univ.Integer(64) + +id_ad_ocsp = _OID(id_ad, 1) + + +class TeletexOrganizationalUnitName(char.TeletexString): + pass + + +TeletexOrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length) + + +class TeletexPersonalName(univ.Set): + pass + + +TeletexPersonalName.componentType = namedtype.NamedTypes( + namedtype.NamedType('surname', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('given-name', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('initials', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + + +class TeletexDomainDefinedAttributes(univ.SequenceOf): + pass + + +TeletexDomainDefinedAttributes.componentType = TeletexDomainDefinedAttribute() +TeletexDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes) + + +class TBSCertList(univ.Sequence): + pass + + +TBSCertList.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('version', Version()), + namedtype.NamedType('signature', AlgorithmIdentifier()), + namedtype.NamedType('issuer', Name()), + namedtype.NamedType('thisUpdate', Time()), + namedtype.OptionalNamedType('nextUpdate', Time()), + namedtype.OptionalNamedType('revokedCertificates', + univ.SequenceOf(componentType=univ.Sequence(componentType=namedtype.NamedTypes( + namedtype.NamedType('userCertificate', CertificateSerialNumber()), + namedtype.NamedType('revocationDate', Time()), + namedtype.OptionalNamedType('crlEntryExtensions', Extensions()) + )) + )), + namedtype.OptionalNamedType('crlExtensions', + Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + +local_postal_attributes = univ.Integer(21) + +pkcs_9 = _OID(1, 2, 840, 113549, 1, 9) + + +class PhysicalDeliveryCountryName(univ.Choice): + pass + + +PhysicalDeliveryCountryName.componentType = namedtype.NamedTypes( + namedtype.NamedType('x121-dcc-code', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))), + namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length))) +) + +ub_name = univ.Integer(32768) + + +class X520name(univ.Choice): + pass + + +X520name.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))) +) + +id_emailAddress = _OID(pkcs_9, 1) + + +class TerminalType(univ.Integer): + pass + + +TerminalType.namedValues = namedval.NamedValues( + ('telex', 3), + ('teletex', 4), + ('g3-facsimile', 5), + ('g4-facsimile', 6), + ('ia5-terminal', 7), + ('videotex', 8) +) + + +class X520OrganizationalUnitName(univ.Choice): + pass + + +X520OrganizationalUnitName.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('utf8String', char.UTF8String().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('bmpString', char.BMPString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))) +) + +id_at_commonName = _OID(id_at, 3) + +pds_name = univ.Integer(7) + +post_office_box_address = univ.Integer(18) + +ub_locality_name = univ.Integer(128) + + +class X520LocalityName(univ.Choice): + pass + + +X520LocalityName.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))) +) + +id_ad_timeStamping = _OID(id_ad, 3) + +id_at_countryName = _OID(id_at, 6) + +physical_delivery_personal_name = univ.Integer(13) + +teletex_personal_name = univ.Integer(4) + +teletex_organizational_unit_names = univ.Integer(5) + + +class PhysicalDeliveryPersonalName(PDSParameter): + pass + + +ub_postal_code_length = univ.Integer(16) + + +class PostalCode(univ.Choice): + pass + + +PostalCode.componentType = namedtype.NamedTypes( + namedtype.NamedType('numeric-code', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))), + namedtype.NamedType('printable-code', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))) +) + + +class X520countryName(char.PrintableString): + pass + + +X520countryName.subtypeSpec = constraint.ValueSizeConstraint(2, 2) + +postal_code = univ.Integer(9) + +id_ad_caRepository = _OID(id_ad, 5) + +extension_physical_delivery_address_components = univ.Integer(15) + + +class PostOfficeBoxAddress(PDSParameter): + pass + + +class PhysicalDeliveryOfficeName(PDSParameter): + pass + + +id_at_title = _OID(id_at, 12) + +id_at_serialNumber = _OID(id_at, 5) + +id_ad_caIssuers = _OID(id_ad, 2) + +ub_integer_options = univ.Integer(256) + + +class CertificateList(univ.Sequence): + pass + + +CertificateList.componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsCertList', TBSCertList()), + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) +) + + +class PhysicalDeliveryOfficeNumber(PDSParameter): + pass + + +class TeletexOrganizationalUnitNames(univ.SequenceOf): + pass + + +TeletexOrganizationalUnitNames.componentType = TeletexOrganizationalUnitName() +TeletexOrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units) + +physical_delivery_office_name = univ.Integer(10) + +ub_common_name = univ.Integer(64) + + +class ExtensionORAddressComponents(PDSParameter): + pass + + +ub_pseudonym = univ.Integer(128) + +poste_restante_address = univ.Integer(19) + +id_at_organizationName = _OID(id_at, 10) + +physical_delivery_office_number = univ.Integer(11) + +id_at_pseudonym = _OID(id_at, 65) + + +class X520CommonName(univ.Choice): + pass + + +X520CommonName.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))) +) + +physical_delivery_organization_name = univ.Integer(14) + + +class X520dnQualifier(char.PrintableString): + pass + + +id_at_stateOrProvinceName = _OID(id_at, 8) + +common_name = univ.Integer(1) + +id_at_localityName = _OID(id_at, 7) + +ub_match = univ.Integer(128) + +ub_unformatted_address_length = univ.Integer(180) + + +class Attribute(univ.Sequence): + pass + + +Attribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeType()), + namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue())) +) + +extended_network_address = univ.Integer(22) + +unique_postal_name = univ.Integer(20) + +ub_pds_physical_address_lines = univ.Integer(6) + + +class UnformattedPostalAddress(univ.Set): + pass + + +UnformattedPostalAddress.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))), + namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length))) +) + + +class UniquePostalName(PDSParameter): + pass + + +class X520Pseudonym(univ.Choice): + pass + + +X520Pseudonym.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))) +) + +teletex_organization_name = univ.Integer(3) + +teletex_domain_defined_attributes = univ.Integer(6) + +street_address = univ.Integer(17) + +id_kp_OCSPSigning = _OID(id_kp, 9) + +id_ce = _OID(2, 5, 29) + +id_ce_certificatePolicies = _OID(id_ce, 32) + + +class EDIPartyName(univ.Sequence): + pass + + +EDIPartyName.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('partyName', + DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class AnotherName(univ.Sequence): + pass + + +AnotherName.componentType = namedtype.NamedTypes( + namedtype.NamedType('type-id', univ.ObjectIdentifier()), + namedtype.NamedType('value', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class GeneralName(univ.Choice): + pass + + +GeneralName.componentType = namedtype.NamedTypes( + namedtype.NamedType('otherName', + AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('rfc822Name', + char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('dNSName', + char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('x400Address', + ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.NamedType('directoryName', + Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))), + namedtype.NamedType('ediPartyName', + EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))), + namedtype.NamedType('uniformResourceIdentifier', + char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))), + namedtype.NamedType('iPAddress', + univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))), + namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))) +) + + +class GeneralNames(univ.SequenceOf): + pass + + +GeneralNames.componentType = GeneralName() +GeneralNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class IssuerAltName(GeneralNames): + pass + + +id_ce_cRLDistributionPoints = _OID(id_ce, 31) + + +class CertPolicyId(univ.ObjectIdentifier): + pass + + +class PolicyMappings(univ.SequenceOf): + pass + + +PolicyMappings.componentType = univ.Sequence(componentType=namedtype.NamedTypes( + namedtype.NamedType('issuerDomainPolicy', CertPolicyId()), + namedtype.NamedType('subjectDomainPolicy', CertPolicyId()) +)) + +PolicyMappings.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class PolicyQualifierId(univ.ObjectIdentifier): + pass + + +holdInstruction = _OID(2, 2, 840, 10040, 2) + +id_ce_subjectDirectoryAttributes = _OID(id_ce, 9) + +id_holdinstruction_callissuer = _OID(holdInstruction, 2) + + +class SubjectDirectoryAttributes(univ.SequenceOf): + pass + + +SubjectDirectoryAttributes.componentType = Attribute() +SubjectDirectoryAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + +anyPolicy = _OID(id_ce_certificatePolicies, 0) + +id_ce_subjectAltName = _OID(id_ce, 17) + +id_kp_emailProtection = _OID(id_kp, 4) + + +class ReasonFlags(univ.BitString): + pass + + +ReasonFlags.namedValues = namedval.NamedValues( + ('unused', 0), + ('keyCompromise', 1), + ('cACompromise', 2), + ('affiliationChanged', 3), + ('superseded', 4), + ('cessationOfOperation', 5), + ('certificateHold', 6), + ('privilegeWithdrawn', 7), + ('aACompromise', 8) +) + + +class DistributionPointName(univ.Choice): + pass + + +DistributionPointName.componentType = namedtype.NamedTypes( + namedtype.NamedType('fullName', + GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class DistributionPoint(univ.Sequence): + pass + + +DistributionPoint.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('reasons', ReasonFlags().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + +id_ce_keyUsage = _OID(id_ce, 15) + + +class PolicyQualifierInfo(univ.Sequence): + pass + + +PolicyQualifierInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('policyQualifierId', PolicyQualifierId()), + namedtype.NamedType('qualifier', univ.Any()) +) + + +class PolicyInformation(univ.Sequence): + pass + + +PolicyInformation.componentType = namedtype.NamedTypes( + namedtype.NamedType('policyIdentifier', CertPolicyId()), + namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo())) +) + + +class CertificatePolicies(univ.SequenceOf): + pass + + +CertificatePolicies.componentType = PolicyInformation() +CertificatePolicies.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + +id_ce_basicConstraints = _OID(id_ce, 19) + + +class HoldInstructionCode(univ.ObjectIdentifier): + pass + + +class KeyPurposeId(univ.ObjectIdentifier): + pass + + +class ExtKeyUsageSyntax(univ.SequenceOf): + pass + + +ExtKeyUsageSyntax.componentType = KeyPurposeId() +ExtKeyUsageSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class SubjectAltName(GeneralNames): + pass + + +class BasicConstraints(univ.Sequence): + pass + + +BasicConstraints.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('cA', univ.Boolean().subtype(value=0)), + namedtype.OptionalNamedType('pathLenConstraint', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))) +) + + +class SkipCerts(univ.Integer): + pass + + +SkipCerts.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +class InhibitAnyPolicy(SkipCerts): + pass + + +class CRLNumber(univ.Integer): + pass + + +CRLNumber.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +class BaseCRLNumber(CRLNumber): + pass + + +class KeyIdentifier(univ.OctetString): + pass + + +class AuthorityKeyIdentifier(univ.Sequence): + pass + + +AuthorityKeyIdentifier.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + +id_ce_nameConstraints = _OID(id_ce, 30) + +id_kp_serverAuth = _OID(id_kp, 1) + +id_ce_freshestCRL = _OID(id_ce, 46) + +id_ce_cRLReasons = _OID(id_ce, 21) + + +class CRLDistributionPoints(univ.SequenceOf): + pass + + +CRLDistributionPoints.componentType = DistributionPoint() +CRLDistributionPoints.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class FreshestCRL(CRLDistributionPoints): + pass + + +id_ce_inhibitAnyPolicy = _OID(id_ce, 54) + + +class CRLReason(univ.Enumerated): + pass + + +CRLReason.namedValues = namedval.NamedValues( + ('unspecified', 0), + ('keyCompromise', 1), + ('cACompromise', 2), + ('affiliationChanged', 3), + ('superseded', 4), + ('cessationOfOperation', 5), + ('certificateHold', 6), + ('removeFromCRL', 8), + ('privilegeWithdrawn', 9), + ('aACompromise', 10) +) + + +class BaseDistance(univ.Integer): + pass + + +BaseDistance.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +class GeneralSubtree(univ.Sequence): + pass + + +GeneralSubtree.componentType = namedtype.NamedTypes( + namedtype.NamedType('base', GeneralName()), + namedtype.DefaultedNamedType('minimum', BaseDistance().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)), + namedtype.OptionalNamedType('maximum', BaseDistance().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class GeneralSubtrees(univ.SequenceOf): + pass + + +GeneralSubtrees.componentType = GeneralSubtree() +GeneralSubtrees.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class NameConstraints(univ.Sequence): + pass + + +NameConstraints.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + +id_pe_authorityInfoAccess = _OID(id_pe, 1) + +id_pe_subjectInfoAccess = _OID(id_pe, 11) + +id_ce_certificateIssuer = _OID(id_ce, 29) + +id_ce_invalidityDate = _OID(id_ce, 24) + + +class DirectoryString(univ.Choice): + pass + + +DirectoryString.componentType = namedtype.NamedTypes( + namedtype.NamedType('any', univ.Any()) +) + +id_ce_authorityKeyIdentifier = _OID(id_ce, 35) + + +class AccessDescription(univ.Sequence): + pass + + +AccessDescription.componentType = namedtype.NamedTypes( + namedtype.NamedType('accessMethod', univ.ObjectIdentifier()), + namedtype.NamedType('accessLocation', GeneralName()) +) + + +class AuthorityInfoAccessSyntax(univ.SequenceOf): + pass + + +AuthorityInfoAccessSyntax.componentType = AccessDescription() +AuthorityInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + +id_ce_issuingDistributionPoint = _OID(id_ce, 28) + + +class CPSuri(char.IA5String): + pass + + +class DisplayText(univ.Choice): + pass + + +DisplayText.componentType = namedtype.NamedTypes( + namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))), + namedtype.NamedType('visibleString', + char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))), + namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))), + namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))) +) + + +class NoticeReference(univ.Sequence): + pass + + +NoticeReference.componentType = namedtype.NamedTypes( + namedtype.NamedType('organization', DisplayText()), + namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer())) +) + + +class UserNotice(univ.Sequence): + pass + + +UserNotice.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('noticeRef', NoticeReference()), + namedtype.OptionalNamedType('explicitText', DisplayText()) +) + + +class PrivateKeyUsagePeriod(univ.Sequence): + pass + + +PrivateKeyUsagePeriod.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + +id_ce_subjectKeyIdentifier = _OID(id_ce, 14) + + +class CertificateIssuer(GeneralNames): + pass + + +class InvalidityDate(useful.GeneralizedTime): + pass + + +class SubjectInfoAccessSyntax(univ.SequenceOf): + pass + + +SubjectInfoAccessSyntax.componentType = AccessDescription() +SubjectInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class KeyUsage(univ.BitString): + pass + + +KeyUsage.namedValues = namedval.NamedValues( + ('digitalSignature', 0), + ('nonRepudiation', 1), + ('keyEncipherment', 2), + ('dataEncipherment', 3), + ('keyAgreement', 4), + ('keyCertSign', 5), + ('cRLSign', 6), + ('encipherOnly', 7), + ('decipherOnly', 8) +) + +id_ce_extKeyUsage = _OID(id_ce, 37) + +anyExtendedKeyUsage = _OID(id_ce_extKeyUsage, 0) + +id_ce_privateKeyUsagePeriod = _OID(id_ce, 16) + +id_ce_policyMappings = _OID(id_ce, 33) + +id_ce_cRLNumber = _OID(id_ce, 20) + +id_ce_policyConstraints = _OID(id_ce, 36) + +id_holdinstruction_none = _OID(holdInstruction, 1) + +id_holdinstruction_reject = _OID(holdInstruction, 3) + +id_kp_timeStamping = _OID(id_kp, 8) + + +class PolicyConstraints(univ.Sequence): + pass + + +PolicyConstraints.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('requireExplicitPolicy', + SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('inhibitPolicyMapping', + SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class SubjectKeyIdentifier(KeyIdentifier): + pass + + +id_kp_clientAuth = _OID(id_kp, 2) + +id_ce_deltaCRLIndicator = _OID(id_ce, 27) + +id_ce_issuerAltName = _OID(id_ce, 18) + +id_kp_codeSigning = _OID(id_kp, 3) + +id_ce_holdInstructionCode = _OID(id_ce, 23) + + +class IssuingDistributionPoint(univ.Sequence): + pass + + +IssuingDistributionPoint.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.DefaultedNamedType('onlyContainsUserCerts', univ.Boolean().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(value=0)), + namedtype.DefaultedNamedType('onlyContainsCACerts', univ.Boolean().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)).subtype(value=0)), + namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.DefaultedNamedType('indirectCRL', univ.Boolean().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)).subtype(value=0)), + namedtype.DefaultedNamedType('onlyContainsAttributeCerts', univ.Boolean().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)).subtype(value=0)) +) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3281.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3281.py new file mode 100644 index 0000000000000000000000000000000000000000..a78abf9feaa0b3d8b8d37ffab34a2060480e6eee --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3281.py @@ -0,0 +1,331 @@ +# coding: utf-8 +# +# This file is part of pyasn1-modules software. +# +# Created by Stanisław Pitucha with asn1ate tool. +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# An Internet Attribute Certificate Profile for Authorization +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc3281.txt +# +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc3280 + +MAX = float('inf') + + +def _buildOid(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +class ObjectDigestInfo(univ.Sequence): + pass + + +ObjectDigestInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('digestedObjectType', univ.Enumerated( + namedValues=namedval.NamedValues(('publicKey', 0), ('publicKeyCert', 1), ('otherObjectTypes', 2)))), + namedtype.OptionalNamedType('otherObjectTypeID', univ.ObjectIdentifier()), + namedtype.NamedType('digestAlgorithm', rfc3280.AlgorithmIdentifier()), + namedtype.NamedType('objectDigest', univ.BitString()) +) + + +class IssuerSerial(univ.Sequence): + pass + + +IssuerSerial.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuer', rfc3280.GeneralNames()), + namedtype.NamedType('serial', rfc3280.CertificateSerialNumber()), + namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier()) +) + + +class TargetCert(univ.Sequence): + pass + + +TargetCert.componentType = namedtype.NamedTypes( + namedtype.NamedType('targetCertificate', IssuerSerial()), + namedtype.OptionalNamedType('targetName', rfc3280.GeneralName()), + namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo()) +) + + +class Target(univ.Choice): + pass + + +Target.componentType = namedtype.NamedTypes( + namedtype.NamedType('targetName', rfc3280.GeneralName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('targetGroup', rfc3280.GeneralName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('targetCert', + TargetCert().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) +) + + +class Targets(univ.SequenceOf): + pass + + +Targets.componentType = Target() + + +class ProxyInfo(univ.SequenceOf): + pass + + +ProxyInfo.componentType = Targets() + +id_at_role = _buildOid(rfc3280.id_at, 72) + +id_pe_aaControls = _buildOid(rfc3280.id_pe, 6) + +id_ce_targetInformation = _buildOid(rfc3280.id_ce, 55) + +id_pe_ac_auditIdentity = _buildOid(rfc3280.id_pe, 4) + + +class ClassList(univ.BitString): + pass + + +ClassList.namedValues = namedval.NamedValues( + ('unmarked', 0), + ('unclassified', 1), + ('restricted', 2), + ('confidential', 3), + ('secret', 4), + ('topSecret', 5) +) + + +class SecurityCategory(univ.Sequence): + pass + + +SecurityCategory.componentType = namedtype.NamedTypes( + namedtype.NamedType('type', univ.ObjectIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class Clearance(univ.Sequence): + pass + + +Clearance.componentType = namedtype.NamedTypes( + namedtype.NamedType('policyId', univ.ObjectIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.DefaultedNamedType('classList', + ClassList().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 1)).subtype( + value="unclassified")), + namedtype.OptionalNamedType('securityCategories', univ.SetOf(componentType=SecurityCategory()).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +class AttCertVersion(univ.Integer): + pass + + +AttCertVersion.namedValues = namedval.NamedValues( + ('v2', 1) +) + +id_aca = _buildOid(rfc3280.id_pkix, 10) + +id_at_clearance = _buildOid(2, 5, 1, 5, 55) + + +class AttrSpec(univ.SequenceOf): + pass + + +AttrSpec.componentType = univ.ObjectIdentifier() + + +class AAControls(univ.Sequence): + pass + + +AAControls.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('pathLenConstraint', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))), + namedtype.OptionalNamedType('permittedAttrs', + AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('excludedAttrs', + AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.DefaultedNamedType('permitUnSpecified', univ.Boolean().subtype(value=1)) +) + + +class AttCertValidityPeriod(univ.Sequence): + pass + + +AttCertValidityPeriod.componentType = namedtype.NamedTypes( + namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()), + namedtype.NamedType('notAfterTime', useful.GeneralizedTime()) +) + + +id_aca_authenticationInfo = _buildOid(id_aca, 1) + + +class V2Form(univ.Sequence): + pass + + +V2Form.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('issuerName', rfc3280.GeneralNames()), + namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + + +class AttCertIssuer(univ.Choice): + pass + + +AttCertIssuer.componentType = namedtype.NamedTypes( + namedtype.NamedType('v1Form', rfc3280.GeneralNames()), + namedtype.NamedType('v2Form', + V2Form().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) +) + + +class Holder(univ.Sequence): + pass + + +Holder.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('entityName', rfc3280.GeneralNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) +) + + +class AttributeCertificateInfo(univ.Sequence): + pass + + +AttributeCertificateInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', AttCertVersion()), + namedtype.NamedType('holder', Holder()), + namedtype.NamedType('issuer', AttCertIssuer()), + namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()), + namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()), + namedtype.NamedType('attrCertValidityPeriod', AttCertValidityPeriod()), + namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())), + namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()), + namedtype.OptionalNamedType('extensions', rfc3280.Extensions()) +) + + +class AttributeCertificate(univ.Sequence): + pass + + +AttributeCertificate.componentType = namedtype.NamedTypes( + namedtype.NamedType('acinfo', AttributeCertificateInfo()), + namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()), + namedtype.NamedType('signatureValue', univ.BitString()) +) + +id_mod = _buildOid(rfc3280.id_pkix, 0) + +id_mod_attribute_cert = _buildOid(id_mod, 12) + +id_aca_accessIdentity = _buildOid(id_aca, 2) + + +class RoleSyntax(univ.Sequence): + pass + + +RoleSyntax.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('roleAuthority', rfc3280.GeneralNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('roleName', + rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + +id_aca_chargingIdentity = _buildOid(id_aca, 3) + + +class ACClearAttrs(univ.Sequence): + pass + + +ACClearAttrs.componentType = namedtype.NamedTypes( + namedtype.NamedType('acIssuer', rfc3280.GeneralName()), + namedtype.NamedType('acSerial', univ.Integer()), + namedtype.NamedType('attrs', univ.SequenceOf(componentType=rfc3280.Attribute())) +) + +id_aca_group = _buildOid(id_aca, 4) + +id_pe_ac_proxying = _buildOid(rfc3280.id_pe, 10) + + +class SvceAuthInfo(univ.Sequence): + pass + + +SvceAuthInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('service', rfc3280.GeneralName()), + namedtype.NamedType('ident', rfc3280.GeneralName()), + namedtype.OptionalNamedType('authInfo', univ.OctetString()) +) + + +class IetfAttrSyntax(univ.Sequence): + pass + + +IetfAttrSyntax.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType( + 'policyAuthority', rfc3280.GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)) + ), + namedtype.NamedType( + 'values', univ.SequenceOf( + componentType=univ.Choice( + componentType=namedtype.NamedTypes( + namedtype.NamedType('octets', univ.OctetString()), + namedtype.NamedType('oid', univ.ObjectIdentifier()), + namedtype.NamedType('string', char.UTF8String()) + ) + ) + ) + ) +) + +id_aca_encAttrs = _buildOid(id_aca, 6) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3370.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3370.py new file mode 100644 index 0000000000000000000000000000000000000000..51a9d5c5b1d8944c72222fc52998bed240afb09d --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3370.py @@ -0,0 +1,146 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Cryptographic Message Syntax (CMS) Algorithms +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3370.txt +# + +from pyasn1.type import univ + +from pyasn1_modules import rfc3279 +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5751 +from pyasn1_modules import rfc5753 +from pyasn1_modules import rfc5990 +from pyasn1_modules import rfc8018 + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + + +# Imports from RFC 3279 + +dhpublicnumber = rfc3279.dhpublicnumber + +dh_public_number = dhpublicnumber + +DHPublicKey = rfc3279.DHPublicKey + +DomainParameters = rfc3279.DomainParameters + +DHDomainParameters = DomainParameters + +Dss_Parms = rfc3279.Dss_Parms + +Dss_Sig_Value = rfc3279.Dss_Sig_Value + +md5 = rfc3279.md5 + +md5WithRSAEncryption = rfc3279.md5WithRSAEncryption + +RSAPublicKey = rfc3279.RSAPublicKey + +rsaEncryption = rfc3279.rsaEncryption + +ValidationParms = rfc3279.ValidationParms + +id_dsa = rfc3279.id_dsa + +id_dsa_with_sha1 = rfc3279.id_dsa_with_sha1 + +id_sha1 = rfc3279.id_sha1 + +sha_1 = id_sha1 + +sha1WithRSAEncryption = rfc3279.sha1WithRSAEncryption + + +# Imports from RFC 5753 + +CBCParameter = rfc5753.CBCParameter + +CBCParameter = rfc5753.IV + +KeyWrapAlgorithm = rfc5753.KeyWrapAlgorithm + + +# Imports from RFC 5990 + +id_alg_CMS3DESwrap = rfc5990.id_alg_CMS3DESwrap + + +# Imports from RFC 8018 + +des_EDE3_CBC = rfc8018.des_EDE3_CBC + +des_ede3_cbc = des_EDE3_CBC + +rc2CBC = rfc8018.rc2CBC + +rc2_cbc = rc2CBC + +RC2_CBC_Parameter = rfc8018.RC2_CBC_Parameter + +RC2CBCParameter = RC2_CBC_Parameter + +PBKDF2_params = rfc8018.PBKDF2_params + +id_PBKDF2 = rfc8018.id_PBKDF2 + + +# The few things that are not already defined elsewhere + +hMAC_SHA1 = univ.ObjectIdentifier('1.3.6.1.5.5.8.1.2') + + +id_alg_ESDH = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.5') + + +id_alg_SSDH = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.10') + + +id_alg_CMSRC2wrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.7') + + +class RC2ParameterVersion(univ.Integer): + pass + + +class RC2wrapParameter(RC2ParameterVersion): + pass + + +class Dss_Pub_Key(univ.Integer): + pass + + +# Update the Algorithm Identifier map in rfc5280.py. + +_algorithmIdentifierMapUpdate = { + hMAC_SHA1: univ.Null(""), + id_alg_CMSRC2wrap: RC2wrapParameter(), + id_alg_ESDH: KeyWrapAlgorithm(), + id_alg_SSDH: KeyWrapAlgorithm(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) + + +# Update the S/MIME Capabilities map in rfc5751.py. + +_smimeCapabilityMapUpdate = { + id_alg_CMSRC2wrap: RC2wrapParameter(), + id_alg_ESDH: KeyWrapAlgorithm(), + id_alg_SSDH: KeyWrapAlgorithm(), +} + +rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3412.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3412.py new file mode 100644 index 0000000000000000000000000000000000000000..2cf1e1020f2aeb86b7b52335fd536e6593cdcc4d --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3412.py @@ -0,0 +1,53 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# SNMPv3 message syntax +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc3412.txt +# +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc1905 + + +class ScopedPDU(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('contextEngineId', univ.OctetString()), + namedtype.NamedType('contextName', univ.OctetString()), + namedtype.NamedType('data', rfc1905.PDUs()) + ) + + +class ScopedPduData(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('plaintext', ScopedPDU()), + namedtype.NamedType('encryptedPDU', univ.OctetString()), + ) + + +class HeaderData(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('msgID', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))), + namedtype.NamedType('msgMaxSize', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(484, 2147483647))), + namedtype.NamedType('msgFlags', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 1))), + namedtype.NamedType('msgSecurityModel', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 2147483647))) + ) + + +class SNMPv3Message(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('msgVersion', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))), + namedtype.NamedType('msgGlobalData', HeaderData()), + namedtype.NamedType('msgSecurityParameters', univ.OctetString()), + namedtype.NamedType('msgData', ScopedPduData()) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3414.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3414.py new file mode 100644 index 0000000000000000000000000000000000000000..00420cb01cd9f6cdc6751cdc4a35908b7341f824 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3414.py @@ -0,0 +1,28 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# SNMPv3 message syntax +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc3414.txt +# +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + + +class UsmSecurityParameters(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('msgAuthoritativeEngineID', univ.OctetString()), + namedtype.NamedType('msgAuthoritativeEngineBoots', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))), + namedtype.NamedType('msgAuthoritativeEngineTime', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))), + namedtype.NamedType('msgUserName', + univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 32))), + namedtype.NamedType('msgAuthenticationParameters', univ.OctetString()), + namedtype.NamedType('msgPrivacyParameters', univ.OctetString()) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3447.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3447.py new file mode 100644 index 0000000000000000000000000000000000000000..3352b70c9e7e0cb4f06a8e8cb09c78795fe5cba1 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3447.py @@ -0,0 +1,45 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# PKCS#1 syntax +# +# ASN.1 source from: +# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1.asn +# +# Sample captures could be obtained with "openssl genrsa" command +# +from pyasn1.type import constraint +from pyasn1.type import namedval + +from pyasn1_modules.rfc2437 import * + + +class OtherPrimeInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('prime', univ.Integer()), + namedtype.NamedType('exponent', univ.Integer()), + namedtype.NamedType('coefficient', univ.Integer()) + ) + + +class OtherPrimeInfos(univ.SequenceOf): + componentType = OtherPrimeInfo() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +class RSAPrivateKey(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('two-prime', 0), ('multi', 1)))), + namedtype.NamedType('modulus', univ.Integer()), + namedtype.NamedType('publicExponent', univ.Integer()), + namedtype.NamedType('privateExponent', univ.Integer()), + namedtype.NamedType('prime1', univ.Integer()), + namedtype.NamedType('prime2', univ.Integer()), + namedtype.NamedType('exponent1', univ.Integer()), + namedtype.NamedType('exponent2', univ.Integer()), + namedtype.NamedType('coefficient', univ.Integer()), + namedtype.OptionalNamedType('otherPrimeInfos', OtherPrimeInfos()) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3537.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3537.py new file mode 100644 index 0000000000000000000000000000000000000000..374dd8193ca54427fd3b47ebd2fa213180ba8a3d --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3537.py @@ -0,0 +1,34 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# SEED Encryption Algorithm in CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4010.txt +# + +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +id_alg_HMACwith3DESwrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.11') + + +id_alg_HMACwithAESwrap = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.12') + + +# Update the Algorithm Identifier map in rfc5280.py. + +_algorithmIdentifierMapUpdate = { + id_alg_HMACwith3DESwrap: univ.Null(""), + id_alg_HMACwithAESwrap: univ.Null(""), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3560.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3560.py new file mode 100644 index 0000000000000000000000000000000000000000..8365436df57b890b66afb762853c105c68f107d2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3560.py @@ -0,0 +1,74 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# RSAES-OAEP Key Transport Algorithm in CMS +# +# Notice that all of the things needed in RFC 3560 are also defined +# in RFC 4055. So, they are all pulled from the RFC 4055 module into +# this one so that people looking a RFC 3560 can easily find them. +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3560.txt +# + +from pyasn1_modules import rfc4055 + +id_sha1 = rfc4055.id_sha1 + +id_sha256 = rfc4055.id_sha256 + +id_sha384 = rfc4055.id_sha384 + +id_sha512 = rfc4055.id_sha512 + +id_mgf1 = rfc4055.id_mgf1 + +rsaEncryption = rfc4055.rsaEncryption + +id_RSAES_OAEP = rfc4055.id_RSAES_OAEP + +id_pSpecified = rfc4055.id_pSpecified + +sha1Identifier = rfc4055.sha1Identifier + +sha256Identifier = rfc4055.sha256Identifier + +sha384Identifier = rfc4055.sha384Identifier + +sha512Identifier = rfc4055.sha512Identifier + +mgf1SHA1Identifier = rfc4055.mgf1SHA1Identifier + +mgf1SHA256Identifier = rfc4055.mgf1SHA256Identifier + +mgf1SHA384Identifier = rfc4055.mgf1SHA384Identifier + +mgf1SHA512Identifier = rfc4055.mgf1SHA512Identifier + +pSpecifiedEmptyIdentifier = rfc4055.pSpecifiedEmptyIdentifier + + +class RSAES_OAEP_params(rfc4055.RSAES_OAEP_params): + pass + + +rSAES_OAEP_Default_Params = RSAES_OAEP_params() + +rSAES_OAEP_Default_Identifier = rfc4055.rSAES_OAEP_Default_Identifier + +rSAES_OAEP_SHA256_Params = rfc4055.rSAES_OAEP_SHA256_Params + +rSAES_OAEP_SHA256_Identifier = rfc4055.rSAES_OAEP_SHA256_Identifier + +rSAES_OAEP_SHA384_Params = rfc4055.rSAES_OAEP_SHA384_Params + +rSAES_OAEP_SHA384_Identifier = rfc4055.rSAES_OAEP_SHA384_Identifier + +rSAES_OAEP_SHA512_Params = rfc4055.rSAES_OAEP_SHA512_Params + +rSAES_OAEP_SHA512_Identifier = rfc4055.rSAES_OAEP_SHA512_Identifier diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3565.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3565.py new file mode 100644 index 0000000000000000000000000000000000000000..ec75e234892581f8c94dbdf180ee2af9150bc594 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3565.py @@ -0,0 +1,57 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# Modified by Russ Housley to add maps for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Use of the Advanced Encryption Standard (AES) Encryption +# Algorithm in the Cryptographic Message Syntax (CMS) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3565.txt + + +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +class AlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +class AES_IV(univ.OctetString): + pass + +AES_IV.subtypeSpec = constraint.ValueSizeConstraint(16, 16) + + +id_aes128_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.2') + +id_aes192_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.22') + +id_aes256_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.42') + + +id_aes128_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.5') + +id_aes192_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.25') + +id_aes256_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.45') + + +# Update the Algorithm Identifier map + +_algorithmIdentifierMapUpdate = { + id_aes128_CBC: AES_IV(), + id_aes192_CBC: AES_IV(), + id_aes256_CBC: AES_IV(), + id_aes128_wrap: univ.Null(), + id_aes192_wrap: univ.Null(), + id_aes256_wrap: univ.Null(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3657.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3657.py new file mode 100644 index 0000000000000000000000000000000000000000..ebf23dabcb6ed0e5f27a6023df9cf7d9b5b5de60 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3657.py @@ -0,0 +1,66 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Camellia Algorithm in CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3657.txt +# + +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5751 + + +id_camellia128_cbc = univ.ObjectIdentifier('1.2.392.200011.61.1.1.1.2') + +id_camellia192_cbc = univ.ObjectIdentifier('1.2.392.200011.61.1.1.1.3') + +id_camellia256_cbc = univ.ObjectIdentifier('1.2.392.200011.61.1.1.1.4') + +id_camellia128_wrap = univ.ObjectIdentifier('1.2.392.200011.61.1.1.3.2') + +id_camellia192_wrap = univ.ObjectIdentifier('1.2.392.200011.61.1.1.3.3') + +id_camellia256_wrap = univ.ObjectIdentifier('1.2.392.200011.61.1.1.3.4') + + + +class Camellia_IV(univ.OctetString): + subtypeSpec = constraint.ValueSizeConstraint(16, 16) + + +class CamelliaSMimeCapability(univ.Null): + pass + + +# Update the Algorithm Identifier map in rfc5280.py. + +_algorithmIdentifierMapUpdate = { + id_camellia128_cbc: Camellia_IV(), + id_camellia192_cbc: Camellia_IV(), + id_camellia256_cbc: Camellia_IV(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) + + +# Update the SMIMECapabilities Attribute map in rfc5751.py + +_smimeCapabilityMapUpdate = { + id_camellia128_cbc: CamelliaSMimeCapability(), + id_camellia192_cbc: CamelliaSMimeCapability(), + id_camellia256_cbc: CamelliaSMimeCapability(), + id_camellia128_wrap: CamelliaSMimeCapability(), + id_camellia192_wrap: CamelliaSMimeCapability(), + id_camellia256_wrap: CamelliaSMimeCapability(), +} + +rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3709.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3709.py new file mode 100644 index 0000000000000000000000000000000000000000..aa1d5b6abff14a564094bb3cd2d8d96e39ab4feb --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3709.py @@ -0,0 +1,207 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add maps for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Logotypes in X.509 Certificates +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3709.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc6170 + +MAX = float('inf') + + +class HashAlgAndValue(univ.Sequence): + pass + +HashAlgAndValue.componentType = namedtype.NamedTypes( + namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('hashValue', univ.OctetString()) +) + + +class LogotypeDetails(univ.Sequence): + pass + +LogotypeDetails.componentType = namedtype.NamedTypes( + namedtype.NamedType('mediaType', char.IA5String()), + namedtype.NamedType('logotypeHash', univ.SequenceOf( + componentType=HashAlgAndValue()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('logotypeURI', univ.SequenceOf( + componentType=char.IA5String()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX))) +) + + +class LogotypeAudioInfo(univ.Sequence): + pass + +LogotypeAudioInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('fileSize', univ.Integer()), + namedtype.NamedType('playTime', univ.Integer()), + namedtype.NamedType('channels', univ.Integer()), + namedtype.OptionalNamedType('sampleRate', univ.Integer().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.OptionalNamedType('language', char.IA5String().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))) +) + + +class LogotypeAudio(univ.Sequence): + pass + +LogotypeAudio.componentType = namedtype.NamedTypes( + namedtype.NamedType('audioDetails', LogotypeDetails()), + namedtype.OptionalNamedType('audioInfo', LogotypeAudioInfo()) +) + + +class LogotypeImageType(univ.Integer): + pass + +LogotypeImageType.namedValues = namedval.NamedValues( + ('grayScale', 0), + ('color', 1) +) + + +class LogotypeImageResolution(univ.Choice): + pass + +LogotypeImageResolution.componentType = namedtype.NamedTypes( + namedtype.NamedType('numBits', + univ.Integer().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('tableSize', + univ.Integer().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +class LogotypeImageInfo(univ.Sequence): + pass + +LogotypeImageInfo.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('type', LogotypeImageType().subtype( + implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='color')), + namedtype.NamedType('fileSize', univ.Integer()), + namedtype.NamedType('xSize', univ.Integer()), + namedtype.NamedType('ySize', univ.Integer()), + namedtype.OptionalNamedType('resolution', LogotypeImageResolution()), + namedtype.OptionalNamedType('language', char.IA5String().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))) +) + + +class LogotypeImage(univ.Sequence): + pass + +LogotypeImage.componentType = namedtype.NamedTypes( + namedtype.NamedType('imageDetails', LogotypeDetails()), + namedtype.OptionalNamedType('imageInfo', LogotypeImageInfo()) +) + + +class LogotypeData(univ.Sequence): + pass + +LogotypeData.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('image', univ.SequenceOf( + componentType=LogotypeImage())), + namedtype.OptionalNamedType('audio', univ.SequenceOf( + componentType=LogotypeAudio()).subtype( + implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 1))) +) + + +class LogotypeReference(univ.Sequence): + pass + +LogotypeReference.componentType = namedtype.NamedTypes( + namedtype.NamedType('refStructHash', univ.SequenceOf( + componentType=HashAlgAndValue()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('refStructURI', univ.SequenceOf( + componentType=char.IA5String()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX))) +) + + +class LogotypeInfo(univ.Choice): + pass + +LogotypeInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('direct', + LogotypeData().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatConstructed, 0))), + namedtype.NamedType('indirect', LogotypeReference().subtype( + implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatConstructed, 1))) +) + +# Other logotype type and associated object identifiers + +id_logo_background = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.2') + +id_logo_loyalty = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.1') + +id_logo_certImage = rfc6170.id_logo_certImage + + +class OtherLogotypeInfo(univ.Sequence): + pass + +OtherLogotypeInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('logotypeType', univ.ObjectIdentifier()), + namedtype.NamedType('info', LogotypeInfo()) +) + + +# Logotype Certificate Extension + +id_pe_logotype = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.12') + + +class LogotypeExtn(univ.Sequence): + pass + +LogotypeExtn.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('communityLogos', univ.SequenceOf( + componentType=LogotypeInfo()).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('issuerLogo', LogotypeInfo().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('subjectLogo', LogotypeInfo().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('otherLogos', univ.SequenceOf( + componentType=OtherLogotypeInfo()).subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 3))) +) + + +# Map of Certificate Extension OIDs to Extensions added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_logotype: LogotypeExtn(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3739.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3739.py new file mode 100644 index 0000000000000000000000000000000000000000..4aa5aaf0de80c14c7bec5d8f840dfccdca50dd39 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3739.py @@ -0,0 +1,203 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add WithComponentsConstraints to +# enforce the requirements that are indicated in comments. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Qualified Certificates +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3739.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +# Initialize the qcStatement map + +qcStatementMap = { } + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +AttributeType = rfc5280.AttributeType + +DirectoryString = rfc5280.DirectoryString + +GeneralName = rfc5280.GeneralName + +id_pkix = rfc5280.id_pkix + +id_pe = rfc5280.id_pe + + +# Arc for QC personal data attributes + +id_pda = id_pkix + (9, ) + + +# Arc for QC statements + +id_qcs = id_pkix + (11, ) + + +# Personal data attributes + +id_pda_dateOfBirth = id_pda + (1, ) + +class DateOfBirth(useful.GeneralizedTime): + pass + + +id_pda_placeOfBirth = id_pda + (2, ) + +class PlaceOfBirth(DirectoryString): + pass + + +id_pda_gender = id_pda + (3, ) + +class Gender(char.PrintableString): + subtypeSpec = constraint.ConstraintsIntersection( + constraint.ValueSizeConstraint(1, 1), + constraint.SingleValueConstraint('M', 'F', 'm', 'f') + ) + + +id_pda_countryOfCitizenship = id_pda + (4, ) + +class CountryOfCitizenship(char.PrintableString): + subtypeSpec = constraint.ValueSizeConstraint(2, 2) + # ISO 3166 Country Code + + +id_pda_countryOfResidence = id_pda + (5, ) + +class CountryOfResidence(char.PrintableString): + subtypeSpec = constraint.ValueSizeConstraint(2, 2) + # ISO 3166 Country Code + + +# Biometric info certificate extension + +id_pe_biometricInfo = id_pe + (2, ) + + +class PredefinedBiometricType(univ.Integer): + namedValues = namedval.NamedValues( + ('picture', 0), + ('handwritten-signature', 1) + ) + subtypeSpec = constraint.SingleValueConstraint(0, 1) + + +class TypeOfBiometricData(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('predefinedBiometricType', PredefinedBiometricType()), + namedtype.NamedType('biometricDataOid', univ.ObjectIdentifier()) + ) + + +class BiometricData(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('typeOfBiometricData', TypeOfBiometricData()), + namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('biometricDataHash', univ.OctetString()), + namedtype.OptionalNamedType('sourceDataUri', char.IA5String()) + ) + + +class BiometricSyntax(univ.SequenceOf): + componentType = BiometricData() + + +# QC Statements certificate extension +# NOTE: This extension does not allow to mix critical and +# non-critical Qualified Certificate Statements. Either all +# statements must be critical or all statements must be +# non-critical. + +id_pe_qcStatements = id_pe + (3, ) + + +class NameRegistrationAuthorities(univ.SequenceOf): + componentType = GeneralName() + subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +class QCStatement(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('statementId', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('statementInfo', univ.Any(), + openType=opentype.OpenType('statementId', qcStatementMap)) + ) + + +class QCStatements(univ.SequenceOf): + componentType = QCStatement() + + +class SemanticsInformation(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('semanticsIndentifier', + univ.ObjectIdentifier()), + namedtype.OptionalNamedType('nameRegistrationAuthorities', + NameRegistrationAuthorities()) + ) + subtypeSpec = constraint.ConstraintsUnion( + constraint.WithComponentsConstraint( + ('semanticsIndentifier', constraint.ComponentPresentConstraint())), + constraint.WithComponentsConstraint( + ('nameRegistrationAuthorities', constraint.ComponentPresentConstraint())) + ) + + +id_qcs = id_pkix + (11, ) + + +id_qcs_pkixQCSyntax_v1 = id_qcs + (1, ) + + +id_qcs_pkixQCSyntax_v2 = id_qcs + (2, ) + + +# Map of Certificate Extension OIDs to Extensions +# To be added to the ones that are in rfc5280.py + +_certificateExtensionsMap = { + id_pe_biometricInfo: BiometricSyntax(), + id_pe_qcStatements: QCStatements(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap) + + +# Map of AttributeType OIDs to AttributeValue added to the +# ones that are in rfc5280.py + +_certificateAttributesMapUpdate = { + id_pda_dateOfBirth: DateOfBirth(), + id_pda_placeOfBirth: PlaceOfBirth(), + id_pda_gender: Gender(), + id_pda_countryOfCitizenship: CountryOfCitizenship(), + id_pda_countryOfResidence: CountryOfResidence(), +} + +rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate) + diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3770.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3770.py new file mode 100644 index 0000000000000000000000000000000000000000..3fefe1d90e2c6efdf1aa95bc1d4f09719f826d53 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3770.py @@ -0,0 +1,75 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Certificate Extensions and Attributes Supporting Authentication +# in PPP and Wireless LAN Networks +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3770.txt +# https://www.rfc-editor.org/errata/eid234 +# + +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +MAX = float('inf') + + +# Extended Key Usage Values + +id_kp_eapOverLAN = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.14') + +id_kp_eapOverPPP = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.13') + + +# Wireless LAN SSID Extension + +id_pe_wlanSSID = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.13') + + +class SSID(univ.OctetString): + pass + +SSID.subtypeSpec = constraint.ValueSizeConstraint(1, 32) + + +class SSIDList(univ.SequenceOf): + pass + +SSIDList.componentType = SSID() +SSIDList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +# Wireless LAN SSID Attribute Certificate Attribute +# Uses same syntax as the certificate extension: SSIDList +# Correction for https://www.rfc-editor.org/errata/eid234 + +id_aca_wlanSSID = univ.ObjectIdentifier('1.3.6.1.5.5.7.10.7') + + +# Map of Certificate Extension OIDs to Extensions +# To be added to the ones that are in rfc5280.py + +_certificateExtensionsMap = { + id_pe_wlanSSID: SSIDList(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap) + + +# Map of AttributeType OIDs to AttributeValue added to the +# ones that are in rfc5280.py + +_certificateAttributesMapUpdate = { + id_aca_wlanSSID: SSIDList(), +} + +rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3779.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3779.py new file mode 100644 index 0000000000000000000000000000000000000000..8e6eaa3e7b293d62a8a66077c7d7e64fa9157332 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3779.py @@ -0,0 +1,137 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add maps for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# X.509 Extensions for IP Addresses and AS Identifiers +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3779.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +# IP Address Delegation Extension + +id_pe_ipAddrBlocks = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.7') + + +class IPAddress(univ.BitString): + pass + + +class IPAddressRange(univ.Sequence): + pass + +IPAddressRange.componentType = namedtype.NamedTypes( + namedtype.NamedType('min', IPAddress()), + namedtype.NamedType('max', IPAddress()) +) + + +class IPAddressOrRange(univ.Choice): + pass + +IPAddressOrRange.componentType = namedtype.NamedTypes( + namedtype.NamedType('addressPrefix', IPAddress()), + namedtype.NamedType('addressRange', IPAddressRange()) +) + + +class IPAddressChoice(univ.Choice): + pass + +IPAddressChoice.componentType = namedtype.NamedTypes( + namedtype.NamedType('inherit', univ.Null()), + namedtype.NamedType('addressesOrRanges', univ.SequenceOf( + componentType=IPAddressOrRange()) + ) +) + + +class IPAddressFamily(univ.Sequence): + pass + +IPAddressFamily.componentType = namedtype.NamedTypes( + namedtype.NamedType('addressFamily', univ.OctetString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(2, 3))), + namedtype.NamedType('ipAddressChoice', IPAddressChoice()) +) + + +class IPAddrBlocks(univ.SequenceOf): + pass + +IPAddrBlocks.componentType = IPAddressFamily() + + +# Autonomous System Identifier Delegation Extension + +id_pe_autonomousSysIds = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.8') + + +class ASId(univ.Integer): + pass + + +class ASRange(univ.Sequence): + pass + +ASRange.componentType = namedtype.NamedTypes( + namedtype.NamedType('min', ASId()), + namedtype.NamedType('max', ASId()) +) + + +class ASIdOrRange(univ.Choice): + pass + +ASIdOrRange.componentType = namedtype.NamedTypes( + namedtype.NamedType('id', ASId()), + namedtype.NamedType('range', ASRange()) +) + + +class ASIdentifierChoice(univ.Choice): + pass + +ASIdentifierChoice.componentType = namedtype.NamedTypes( + namedtype.NamedType('inherit', univ.Null()), + namedtype.NamedType('asIdsOrRanges', univ.SequenceOf( + componentType=ASIdOrRange()) + ) +) + + +class ASIdentifiers(univ.Sequence): + pass + +ASIdentifiers.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('asnum', ASIdentifierChoice().subtype( + explicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('rdi', ASIdentifierChoice().subtype( + explicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatConstructed, 1))) +) + + +# Map of Certificate Extension OIDs to Extensions is added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_ipAddrBlocks: IPAddrBlocks(), + id_pe_autonomousSysIds: ASIdentifiers(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3820.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3820.py new file mode 100644 index 0000000000000000000000000000000000000000..b4ba34c05c228789195716b77d30cf4fd31c4c78 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3820.py @@ -0,0 +1,65 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Diffie-Hellman Key Agreement +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc3820.txt +# + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + + +class ProxyCertPathLengthConstraint(univ.Integer): + pass + + +class ProxyPolicy(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('policyLanguage', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('policy', univ.OctetString()) + ) + + +class ProxyCertInfoExtension(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('pCPathLenConstraint', + ProxyCertPathLengthConstraint()), + namedtype.NamedType('proxyPolicy', ProxyPolicy()) + ) + + +id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, )) + + +id_pe = id_pkix + (1, ) + +id_pe_proxyCertInfo = id_pe + (14, ) + + +id_ppl = id_pkix + (21, ) + +id_ppl_anyLanguage = id_ppl + (0, ) + +id_ppl_inheritAll = id_ppl + (1, ) + +id_ppl_independent = id_ppl + (2, ) + + +# Map of Certificate Extension OIDs to Extensions added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_proxyCertInfo: ProxyCertInfoExtension(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3852.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3852.py new file mode 100644 index 0000000000000000000000000000000000000000..cf1bb85ad8af94cb05fde9b6ded429ea20113c73 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc3852.py @@ -0,0 +1,706 @@ +# coding: utf-8 +# +# This file is part of pyasn1-modules software. +# +# Created by Stanisław Pitucha with asn1ate tool. +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# Cryptographic Message Syntax (CMS) +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc3852.txt +# +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc3280 +from pyasn1_modules import rfc3281 + +MAX = float('inf') + + +def _buildOid(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +class AttributeValue(univ.Any): + pass + + +class Attribute(univ.Sequence): + pass + + +Attribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('attrType', univ.ObjectIdentifier()), + namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue())) +) + + +class SignedAttributes(univ.SetOf): + pass + + +SignedAttributes.componentType = Attribute() +SignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class OtherRevocationInfoFormat(univ.Sequence): + pass + + +OtherRevocationInfoFormat.componentType = namedtype.NamedTypes( + namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()), + namedtype.NamedType('otherRevInfo', univ.Any()) +) + + +class RevocationInfoChoice(univ.Choice): + pass + + +RevocationInfoChoice.componentType = namedtype.NamedTypes( + namedtype.NamedType('crl', rfc3280.CertificateList()), + namedtype.NamedType('other', OtherRevocationInfoFormat().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + + +class RevocationInfoChoices(univ.SetOf): + pass + + +RevocationInfoChoices.componentType = RevocationInfoChoice() + + +class OtherKeyAttribute(univ.Sequence): + pass + + +OtherKeyAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('keyAttr', univ.Any()) +) + +id_signedData = _buildOid(1, 2, 840, 113549, 1, 7, 2) + + +class KeyEncryptionAlgorithmIdentifier(rfc3280.AlgorithmIdentifier): + pass + + +class EncryptedKey(univ.OctetString): + pass + + +class CMSVersion(univ.Integer): + pass + + +CMSVersion.namedValues = namedval.NamedValues( + ('v0', 0), + ('v1', 1), + ('v2', 2), + ('v3', 3), + ('v4', 4), + ('v5', 5) +) + + +class KEKIdentifier(univ.Sequence): + pass + + +KEKIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('keyIdentifier', univ.OctetString()), + namedtype.OptionalNamedType('date', useful.GeneralizedTime()), + namedtype.OptionalNamedType('other', OtherKeyAttribute()) +) + + +class KEKRecipientInfo(univ.Sequence): + pass + + +KEKRecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('kekid', KEKIdentifier()), + namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('encryptedKey', EncryptedKey()) +) + + +class KeyDerivationAlgorithmIdentifier(rfc3280.AlgorithmIdentifier): + pass + + +class PasswordRecipientInfo(univ.Sequence): + pass + + +PasswordRecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('encryptedKey', EncryptedKey()) +) + + +class OtherRecipientInfo(univ.Sequence): + pass + + +OtherRecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('oriType', univ.ObjectIdentifier()), + namedtype.NamedType('oriValue', univ.Any()) +) + + +class IssuerAndSerialNumber(univ.Sequence): + pass + + +IssuerAndSerialNumber.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuer', rfc3280.Name()), + namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()) +) + + +class SubjectKeyIdentifier(univ.OctetString): + pass + + +class RecipientKeyIdentifier(univ.Sequence): + pass + + +RecipientKeyIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()), + namedtype.OptionalNamedType('date', useful.GeneralizedTime()), + namedtype.OptionalNamedType('other', OtherKeyAttribute()) +) + + +class KeyAgreeRecipientIdentifier(univ.Choice): + pass + + +KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) +) + + +class RecipientEncryptedKey(univ.Sequence): + pass + + +RecipientEncryptedKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()), + namedtype.NamedType('encryptedKey', EncryptedKey()) +) + + +class RecipientEncryptedKeys(univ.SequenceOf): + pass + + +RecipientEncryptedKeys.componentType = RecipientEncryptedKey() + + +class UserKeyingMaterial(univ.OctetString): + pass + + +class OriginatorPublicKey(univ.Sequence): + pass + + +OriginatorPublicKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithm', rfc3280.AlgorithmIdentifier()), + namedtype.NamedType('publicKey', univ.BitString()) +) + + +class OriginatorIdentifierOrKey(univ.Choice): + pass + + +OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + + +class KeyAgreeRecipientInfo(univ.Sequence): + pass + + +KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys()) +) + + +class RecipientIdentifier(univ.Choice): + pass + + +RecipientIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class KeyTransRecipientInfo(univ.Sequence): + pass + + +KeyTransRecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('rid', RecipientIdentifier()), + namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('encryptedKey', EncryptedKey()) +) + + +class RecipientInfo(univ.Choice): + pass + + +RecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('ktri', KeyTransRecipientInfo()), + namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.NamedType('kekri', KEKRecipientInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.NamedType('pwri', PasswordRecipientInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.NamedType('ori', OtherRecipientInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))) +) + + +class RecipientInfos(univ.SetOf): + pass + + +RecipientInfos.componentType = RecipientInfo() +RecipientInfos.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class DigestAlgorithmIdentifier(rfc3280.AlgorithmIdentifier): + pass + + +class Signature(univ.BitString): + pass + + +class SignerIdentifier(univ.Choice): + pass + + +SignerIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class UnprotectedAttributes(univ.SetOf): + pass + + +UnprotectedAttributes.componentType = Attribute() +UnprotectedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class ContentType(univ.ObjectIdentifier): + pass + + +class EncryptedContent(univ.OctetString): + pass + + +class ContentEncryptionAlgorithmIdentifier(rfc3280.AlgorithmIdentifier): + pass + + +class EncryptedContentInfo(univ.Sequence): + pass + + +EncryptedContentInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('contentType', ContentType()), + namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()), + namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class EncryptedData(univ.Sequence): + pass + + +EncryptedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()), + namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + +id_contentType = _buildOid(1, 2, 840, 113549, 1, 9, 3) + +id_data = _buildOid(1, 2, 840, 113549, 1, 7, 1) + +id_messageDigest = _buildOid(1, 2, 840, 113549, 1, 9, 4) + + +class DigestAlgorithmIdentifiers(univ.SetOf): + pass + + +DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier() + + +class EncapsulatedContentInfo(univ.Sequence): + pass + + +EncapsulatedContentInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('eContentType', ContentType()), + namedtype.OptionalNamedType('eContent', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class Digest(univ.OctetString): + pass + + +class DigestedData(univ.Sequence): + pass + + +DigestedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()), + namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()), + namedtype.NamedType('digest', Digest()) +) + + +class ContentInfo(univ.Sequence): + pass + + +ContentInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('contentType', ContentType()), + namedtype.NamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class UnauthAttributes(univ.SetOf): + pass + + +UnauthAttributes.componentType = Attribute() +UnauthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class ExtendedCertificateInfo(univ.Sequence): + pass + + +ExtendedCertificateInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('certificate', rfc3280.Certificate()), + namedtype.NamedType('attributes', UnauthAttributes()) +) + + +class SignatureAlgorithmIdentifier(rfc3280.AlgorithmIdentifier): + pass + + +class ExtendedCertificate(univ.Sequence): + pass + + +ExtendedCertificate.componentType = namedtype.NamedTypes( + namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()), + namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()), + namedtype.NamedType('signature', Signature()) +) + + +class OtherCertificateFormat(univ.Sequence): + pass + + +OtherCertificateFormat.componentType = namedtype.NamedTypes( + namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()), + namedtype.NamedType('otherCert', univ.Any()) +) + + +class AttributeCertificateV2(rfc3281.AttributeCertificate): + pass + + +class AttCertVersionV1(univ.Integer): + pass + + +AttCertVersionV1.namedValues = namedval.NamedValues( + ('v1', 0) +) + + +class AttributeCertificateInfoV1(univ.Sequence): + pass + + +AttributeCertificateInfoV1.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value="v1")), + namedtype.NamedType( + 'subject', univ.Choice( + componentType=namedtype.NamedTypes( + namedtype.NamedType('baseCertificateID', rfc3281.IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('subjectName', rfc3280.GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + ) + ), + namedtype.NamedType('issuer', rfc3280.GeneralNames()), + namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()), + namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()), + namedtype.NamedType('attCertValidityPeriod', rfc3281.AttCertValidityPeriod()), + namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())), + namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()), + namedtype.OptionalNamedType('extensions', rfc3280.Extensions()) +) + + +class AttributeCertificateV1(univ.Sequence): + pass + + +AttributeCertificateV1.componentType = namedtype.NamedTypes( + namedtype.NamedType('acInfo', AttributeCertificateInfoV1()), + namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) +) + + +class CertificateChoices(univ.Choice): + pass + + +CertificateChoices.componentType = namedtype.NamedTypes( + namedtype.NamedType('certificate', rfc3280.Certificate()), + namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('other', OtherCertificateFormat().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))) +) + + +class CertificateSet(univ.SetOf): + pass + + +CertificateSet.componentType = CertificateChoices() + + +class MessageAuthenticationCode(univ.OctetString): + pass + + +class UnsignedAttributes(univ.SetOf): + pass + + +UnsignedAttributes.componentType = Attribute() +UnsignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class SignatureValue(univ.OctetString): + pass + + +class SignerInfo(univ.Sequence): + pass + + +SignerInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('sid', SignerIdentifier()), + namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()), + namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()), + namedtype.NamedType('signature', SignatureValue()), + namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class SignerInfos(univ.SetOf): + pass + + +SignerInfos.componentType = SignerInfo() + + +class SignedData(univ.Sequence): + pass + + +SignedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()), + namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()), + namedtype.OptionalNamedType('certificates', CertificateSet().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('signerInfos', SignerInfos()) +) + + +class MessageAuthenticationCodeAlgorithm(rfc3280.AlgorithmIdentifier): + pass + + +class MessageDigest(univ.OctetString): + pass + + +class Time(univ.Choice): + pass + + +Time.componentType = namedtype.NamedTypes( + namedtype.NamedType('utcTime', useful.UTCTime()), + namedtype.NamedType('generalTime', useful.GeneralizedTime()) +) + + +class OriginatorInfo(univ.Sequence): + pass + + +OriginatorInfo.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('certs', CertificateSet().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class AuthAttributes(univ.SetOf): + pass + + +AuthAttributes.componentType = Attribute() +AuthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class AuthenticatedData(univ.Sequence): + pass + + +AuthenticatedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('recipientInfos', RecipientInfos()), + namedtype.NamedType('macAlgorithm', MessageAuthenticationCodeAlgorithm()), + namedtype.OptionalNamedType('digestAlgorithm', DigestAlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()), + namedtype.OptionalNamedType('authAttrs', AuthAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('mac', MessageAuthenticationCode()), + namedtype.OptionalNamedType('unauthAttrs', UnauthAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + +id_ct_contentInfo = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 6) + +id_envelopedData = _buildOid(1, 2, 840, 113549, 1, 7, 3) + + +class EnvelopedData(univ.Sequence): + pass + + +EnvelopedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('recipientInfos', RecipientInfos()), + namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()), + namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class Countersignature(SignerInfo): + pass + + +id_digestedData = _buildOid(1, 2, 840, 113549, 1, 7, 5) + +id_signingTime = _buildOid(1, 2, 840, 113549, 1, 9, 5) + + +class ExtendedCertificateOrCertificate(univ.Choice): + pass + + +ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes( + namedtype.NamedType('certificate', rfc3280.Certificate()), + namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) +) + +id_encryptedData = _buildOid(1, 2, 840, 113549, 1, 7, 6) + +id_ct_authData = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 2) + + +class SigningTime(Time): + pass + + +id_countersignature = _buildOid(1, 2, 840, 113549, 1, 9, 6) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4010.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4010.py new file mode 100644 index 0000000000000000000000000000000000000000..4981f76bedca81cd0f5146cd8c13effea9459d5d --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4010.py @@ -0,0 +1,58 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# SEED Encryption Algorithm in CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4010.txt +# + +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5751 + + +id_seedCBC = univ.ObjectIdentifier('1.2.410.200004.1.4') + + +id_npki_app_cmsSeed_wrap = univ.ObjectIdentifier('1.2.410.200004.7.1.1.1') + + +class SeedIV(univ.OctetString): + subtypeSpec = constraint.ValueSizeConstraint(16, 16) + + +class SeedCBCParameter(SeedIV): + pass + + +class SeedSMimeCapability(univ.Null): + pass + + +# Update the Algorithm Identifier map in rfc5280.py. + +_algorithmIdentifierMapUpdate = { + id_seedCBC: SeedCBCParameter(), + id_npki_app_cmsSeed_wrap: univ.Null(""), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) + + +# Update the SMIMECapabilities Attribute map in rfc5751.py + +_smimeCapabilityMapUpdate = { + id_seedCBC: SeedSMimeCapability(), + id_npki_app_cmsSeed_wrap: SeedSMimeCapability(), + +} + +rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4043.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4043.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0a801419bb94e56839e2142ba11d193d08f9bc --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4043.py @@ -0,0 +1,43 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Internet X.509 Public Key Infrastructure Permanent Identifier +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4043.txt +# + +from pyasn1.type import char +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, )) + +id_on = id_pkix + (8, ) + +id_on_permanentIdentifier = id_on + (3, ) + + +class PermanentIdentifier(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('identifierValue', char.UTF8String()), + namedtype.OptionalNamedType('assigner', univ.ObjectIdentifier()) + ) + + +# Map of Other Name OIDs to Other Name is added to the +# ones that are in rfc5280.py + +_anotherNameMapUpdate = { + id_on_permanentIdentifier: PermanentIdentifier(), +} + +rfc5280.anotherNameMap.update(_anotherNameMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4055.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4055.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc128632a577621205d5da83a2d62889aaec761 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4055.py @@ -0,0 +1,258 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with a very small amount of assistance from +# asn1ate v.0.6.0. +# Modified by Russ Housley to add maps for opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Additional Algorithms and Identifiers for RSA Cryptography +# for use in Certificates and CRLs +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4055.txt +# +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + return univ.ObjectIdentifier(output) + + +id_sha1 = _OID(1, 3, 14, 3, 2, 26) + +id_sha256 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 1) + +id_sha384 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 2) + +id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3) + +id_sha224 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 4) + +rsaEncryption = _OID(1, 2, 840, 113549, 1, 1, 1) + +id_mgf1 = _OID(1, 2, 840, 113549, 1, 1, 8) + +id_RSAES_OAEP = _OID(1, 2, 840, 113549, 1, 1, 7) + +id_pSpecified = _OID(1, 2, 840, 113549, 1, 1, 9) + +id_RSASSA_PSS = _OID(1, 2, 840, 113549, 1, 1, 10) + +sha256WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 11) + +sha384WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 12) + +sha512WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 13) + +sha224WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 14) + +sha1Identifier = rfc5280.AlgorithmIdentifier() +sha1Identifier['algorithm'] = id_sha1 +sha1Identifier['parameters'] = univ.Null("") + +sha224Identifier = rfc5280.AlgorithmIdentifier() +sha224Identifier['algorithm'] = id_sha224 +sha224Identifier['parameters'] = univ.Null("") + +sha256Identifier = rfc5280.AlgorithmIdentifier() +sha256Identifier['algorithm'] = id_sha256 +sha256Identifier['parameters'] = univ.Null("") + +sha384Identifier = rfc5280.AlgorithmIdentifier() +sha384Identifier['algorithm'] = id_sha384 +sha384Identifier['parameters'] = univ.Null("") + +sha512Identifier = rfc5280.AlgorithmIdentifier() +sha512Identifier['algorithm'] = id_sha512 +sha512Identifier['parameters'] = univ.Null("") + +mgf1SHA1Identifier = rfc5280.AlgorithmIdentifier() +mgf1SHA1Identifier['algorithm'] = id_mgf1 +mgf1SHA1Identifier['parameters'] = sha1Identifier + +mgf1SHA224Identifier = rfc5280.AlgorithmIdentifier() +mgf1SHA224Identifier['algorithm'] = id_mgf1 +mgf1SHA224Identifier['parameters'] = sha224Identifier + +mgf1SHA256Identifier = rfc5280.AlgorithmIdentifier() +mgf1SHA256Identifier['algorithm'] = id_mgf1 +mgf1SHA256Identifier['parameters'] = sha256Identifier + +mgf1SHA384Identifier = rfc5280.AlgorithmIdentifier() +mgf1SHA384Identifier['algorithm'] = id_mgf1 +mgf1SHA384Identifier['parameters'] = sha384Identifier + +mgf1SHA512Identifier = rfc5280.AlgorithmIdentifier() +mgf1SHA512Identifier['algorithm'] = id_mgf1 +mgf1SHA512Identifier['parameters'] = sha512Identifier + +pSpecifiedEmptyIdentifier = rfc5280.AlgorithmIdentifier() +pSpecifiedEmptyIdentifier['algorithm'] = id_pSpecified +pSpecifiedEmptyIdentifier['parameters'] = univ.OctetString(value='') + + +class RSAPublicKey(univ.Sequence): + pass + +RSAPublicKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('modulus', univ.Integer()), + namedtype.NamedType('publicExponent', univ.Integer()) +) + + +class HashAlgorithm(rfc5280.AlgorithmIdentifier): + pass + + +class MaskGenAlgorithm(rfc5280.AlgorithmIdentifier): + pass + + +class RSAES_OAEP_params(univ.Sequence): + pass + +RSAES_OAEP_params.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('hashFunc', rfc5280.AlgorithmIdentifier().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('maskGenFunc', rfc5280.AlgorithmIdentifier().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('pSourceFunc', rfc5280.AlgorithmIdentifier().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) +) + +rSAES_OAEP_Default_Params = RSAES_OAEP_params() + +rSAES_OAEP_Default_Identifier = rfc5280.AlgorithmIdentifier() +rSAES_OAEP_Default_Identifier['algorithm'] = id_RSAES_OAEP +rSAES_OAEP_Default_Identifier['parameters'] = rSAES_OAEP_Default_Params + +rSAES_OAEP_SHA224_Params = RSAES_OAEP_params() +rSAES_OAEP_SHA224_Params['hashFunc'] = sha224Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True) +rSAES_OAEP_SHA224_Params['maskGenFunc'] = mgf1SHA224Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True) + +rSAES_OAEP_SHA224_Identifier = rfc5280.AlgorithmIdentifier() +rSAES_OAEP_SHA224_Identifier['algorithm'] = id_RSAES_OAEP +rSAES_OAEP_SHA224_Identifier['parameters'] = rSAES_OAEP_SHA224_Params + +rSAES_OAEP_SHA256_Params = RSAES_OAEP_params() +rSAES_OAEP_SHA256_Params['hashFunc'] = sha256Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True) +rSAES_OAEP_SHA256_Params['maskGenFunc'] = mgf1SHA256Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True) + +rSAES_OAEP_SHA256_Identifier = rfc5280.AlgorithmIdentifier() +rSAES_OAEP_SHA256_Identifier['algorithm'] = id_RSAES_OAEP +rSAES_OAEP_SHA256_Identifier['parameters'] = rSAES_OAEP_SHA256_Params + +rSAES_OAEP_SHA384_Params = RSAES_OAEP_params() +rSAES_OAEP_SHA384_Params['hashFunc'] = sha384Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True) +rSAES_OAEP_SHA384_Params['maskGenFunc'] = mgf1SHA384Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True) + +rSAES_OAEP_SHA384_Identifier = rfc5280.AlgorithmIdentifier() +rSAES_OAEP_SHA384_Identifier['algorithm'] = id_RSAES_OAEP +rSAES_OAEP_SHA384_Identifier['parameters'] = rSAES_OAEP_SHA384_Params + +rSAES_OAEP_SHA512_Params = RSAES_OAEP_params() +rSAES_OAEP_SHA512_Params['hashFunc'] = sha512Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True) +rSAES_OAEP_SHA512_Params['maskGenFunc'] = mgf1SHA512Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True) + +rSAES_OAEP_SHA512_Identifier = rfc5280.AlgorithmIdentifier() +rSAES_OAEP_SHA512_Identifier['algorithm'] = id_RSAES_OAEP +rSAES_OAEP_SHA512_Identifier['parameters'] = rSAES_OAEP_SHA512_Params + + +class RSASSA_PSS_params(univ.Sequence): + pass + +RSASSA_PSS_params.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('maskGenAlgorithm', rfc5280.AlgorithmIdentifier().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.DefaultedNamedType('saltLength', univ.Integer(value=20).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.DefaultedNamedType('trailerField', univ.Integer(value=1).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + +rSASSA_PSS_Default_Params = RSASSA_PSS_params() + +rSASSA_PSS_Default_Identifier = rfc5280.AlgorithmIdentifier() +rSASSA_PSS_Default_Identifier['algorithm'] = id_RSASSA_PSS +rSASSA_PSS_Default_Identifier['parameters'] = rSASSA_PSS_Default_Params + +rSASSA_PSS_SHA224_Params = RSASSA_PSS_params() +rSASSA_PSS_SHA224_Params['hashAlgorithm'] = sha224Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True) +rSASSA_PSS_SHA224_Params['maskGenAlgorithm'] = mgf1SHA224Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True) + +rSASSA_PSS_SHA224_Identifier = rfc5280.AlgorithmIdentifier() +rSASSA_PSS_SHA224_Identifier['algorithm'] = id_RSASSA_PSS +rSASSA_PSS_SHA224_Identifier['parameters'] = rSASSA_PSS_SHA224_Params + +rSASSA_PSS_SHA256_Params = RSASSA_PSS_params() +rSASSA_PSS_SHA256_Params['hashAlgorithm'] = sha256Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True) +rSASSA_PSS_SHA256_Params['maskGenAlgorithm'] = mgf1SHA256Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True) + +rSASSA_PSS_SHA256_Identifier = rfc5280.AlgorithmIdentifier() +rSASSA_PSS_SHA256_Identifier['algorithm'] = id_RSASSA_PSS +rSASSA_PSS_SHA256_Identifier['parameters'] = rSASSA_PSS_SHA256_Params + +rSASSA_PSS_SHA384_Params = RSASSA_PSS_params() +rSASSA_PSS_SHA384_Params['hashAlgorithm'] = sha384Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True) +rSASSA_PSS_SHA384_Params['maskGenAlgorithm'] = mgf1SHA384Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True) + +rSASSA_PSS_SHA384_Identifier = rfc5280.AlgorithmIdentifier() +rSASSA_PSS_SHA384_Identifier['algorithm'] = id_RSASSA_PSS +rSASSA_PSS_SHA384_Identifier['parameters'] = rSASSA_PSS_SHA384_Params + +rSASSA_PSS_SHA512_Params = RSASSA_PSS_params() +rSASSA_PSS_SHA512_Params['hashAlgorithm'] = sha512Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True) +rSASSA_PSS_SHA512_Params['maskGenAlgorithm'] = mgf1SHA512Identifier.subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True) + +rSASSA_PSS_SHA512_Identifier = rfc5280.AlgorithmIdentifier() +rSASSA_PSS_SHA512_Identifier['algorithm'] = id_RSASSA_PSS +rSASSA_PSS_SHA512_Identifier['parameters'] = rSASSA_PSS_SHA512_Params + + +# Update the Algorithm Identifier map + +_algorithmIdentifierMapUpdate = { + id_sha1: univ.Null(), + id_sha224: univ.Null(), + id_sha256: univ.Null(), + id_sha384: univ.Null(), + id_sha512: univ.Null(), + id_mgf1: rfc5280.AlgorithmIdentifier(), + id_pSpecified: univ.OctetString(), + id_RSAES_OAEP: RSAES_OAEP_params(), + id_RSASSA_PSS: RSASSA_PSS_params(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4073.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4073.py new file mode 100644 index 0000000000000000000000000000000000000000..3f425b28eddb46b619c06f00fcc366e454734639 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4073.py @@ -0,0 +1,59 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with some assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add a map for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Protecting Multiple Contents with the CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4073.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + +MAX = float('inf') + + +# Content Collection Content Type and Object Identifier + +id_ct_contentCollection = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.19') + +class ContentCollection(univ.SequenceOf): + pass + +ContentCollection.componentType = rfc5652.ContentInfo() +ContentCollection.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +# Content With Attributes Content Type and Object Identifier + +id_ct_contentWithAttrs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.20') + +class ContentWithAttributes(univ.Sequence): + pass + +ContentWithAttributes.componentType = namedtype.NamedTypes( + namedtype.NamedType('content', rfc5652.ContentInfo()), + namedtype.NamedType('attrs', univ.SequenceOf( + componentType=rfc5652.Attribute()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX))) +) + + +# Map of Content Type OIDs to Content Types is added to the +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_contentCollection: ContentCollection(), + id_ct_contentWithAttrs: ContentWithAttributes(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4108.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4108.py new file mode 100644 index 0000000000000000000000000000000000000000..ecace9e3ee958500c827ac3d1cda8232e91db992 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4108.py @@ -0,0 +1,350 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add items from the verified errata. +# Modified by Russ Housley to add maps for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# CMS Firmware Wrapper +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4108.txt +# https://www.rfc-editor.org/errata_search.php?rfc=4108 +# + + +from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 + +MAX = float('inf') + + +class HardwareSerialEntry(univ.Choice): + pass + +HardwareSerialEntry.componentType = namedtype.NamedTypes( + namedtype.NamedType('all', univ.Null()), + namedtype.NamedType('single', univ.OctetString()), + namedtype.NamedType('block', univ.Sequence(componentType=namedtype.NamedTypes( + namedtype.NamedType('low', univ.OctetString()), + namedtype.NamedType('high', univ.OctetString()) + )) + ) +) + + +class HardwareModules(univ.Sequence): + pass + +HardwareModules.componentType = namedtype.NamedTypes( + namedtype.NamedType('hwType', univ.ObjectIdentifier()), + namedtype.NamedType('hwSerialEntries', univ.SequenceOf(componentType=HardwareSerialEntry())) +) + + +class CommunityIdentifier(univ.Choice): + pass + +CommunityIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('communityOID', univ.ObjectIdentifier()), + namedtype.NamedType('hwModuleList', HardwareModules()) +) + + + +class PreferredPackageIdentifier(univ.Sequence): + pass + +PreferredPackageIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('fwPkgID', univ.ObjectIdentifier()), + namedtype.NamedType('verNum', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))) +) + + +class PreferredOrLegacyPackageIdentifier(univ.Choice): + pass + +PreferredOrLegacyPackageIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('preferred', PreferredPackageIdentifier()), + namedtype.NamedType('legacy', univ.OctetString()) +) + + +class CurrentFWConfig(univ.Sequence): + pass + +CurrentFWConfig.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('fwPkgType', univ.Integer()), + namedtype.NamedType('fwPkgName', PreferredOrLegacyPackageIdentifier()) +) + + +class PreferredOrLegacyStalePackageIdentifier(univ.Choice): + pass + +PreferredOrLegacyStalePackageIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('preferredStaleVerNum', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))), + namedtype.NamedType('legacyStaleVersion', univ.OctetString()) +) + + +class FirmwarePackageLoadErrorCode(univ.Enumerated): + pass + +FirmwarePackageLoadErrorCode.namedValues = namedval.NamedValues( + ('decodeFailure', 1), + ('badContentInfo', 2), + ('badSignedData', 3), + ('badEncapContent', 4), + ('badCertificate', 5), + ('badSignerInfo', 6), + ('badSignedAttrs', 7), + ('badUnsignedAttrs', 8), + ('missingContent', 9), + ('noTrustAnchor', 10), + ('notAuthorized', 11), + ('badDigestAlgorithm', 12), + ('badSignatureAlgorithm', 13), + ('unsupportedKeySize', 14), + ('signatureFailure', 15), + ('contentTypeMismatch', 16), + ('badEncryptedData', 17), + ('unprotectedAttrsPresent', 18), + ('badEncryptContent', 19), + ('badEncryptAlgorithm', 20), + ('missingCiphertext', 21), + ('noDecryptKey', 22), + ('decryptFailure', 23), + ('badCompressAlgorithm', 24), + ('missingCompressedContent', 25), + ('decompressFailure', 26), + ('wrongHardware', 27), + ('stalePackage', 28), + ('notInCommunity', 29), + ('unsupportedPackageType', 30), + ('missingDependency', 31), + ('wrongDependencyVersion', 32), + ('insufficientMemory', 33), + ('badFirmware', 34), + ('unsupportedParameters', 35), + ('breaksDependency', 36), + ('otherError', 99) +) + + +class VendorLoadErrorCode(univ.Integer): + pass + + +# Wrapped Firmware Key Unsigned Attribute and Object Identifier + +id_aa_wrappedFirmwareKey = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.39') + +class WrappedFirmwareKey(rfc5652.EnvelopedData): + pass + + +# Firmware Package Information Signed Attribute and Object Identifier + +id_aa_firmwarePackageInfo = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.42') + +class FirmwarePackageInfo(univ.Sequence): + pass + +FirmwarePackageInfo.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('fwPkgType', univ.Integer()), + namedtype.OptionalNamedType('dependencies', univ.SequenceOf(componentType=PreferredOrLegacyPackageIdentifier())) +) + +FirmwarePackageInfo.sizeSpec = univ.Sequence.sizeSpec + constraint.ValueSizeConstraint(1, 2) + + +# Community Identifiers Signed Attribute and Object Identifier + +id_aa_communityIdentifiers = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.40') + +class CommunityIdentifiers(univ.SequenceOf): + pass + +CommunityIdentifiers.componentType = CommunityIdentifier() + + +# Implemented Compression Algorithms Signed Attribute and Object Identifier + +id_aa_implCompressAlgs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.43') + +class ImplementedCompressAlgorithms(univ.SequenceOf): + pass + +ImplementedCompressAlgorithms.componentType = univ.ObjectIdentifier() + + +# Implemented Cryptographic Algorithms Signed Attribute and Object Identifier + +id_aa_implCryptoAlgs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.38') + +class ImplementedCryptoAlgorithms(univ.SequenceOf): + pass + +ImplementedCryptoAlgorithms.componentType = univ.ObjectIdentifier() + + +# Decrypt Key Identifier Signed Attribute and Object Identifier + +id_aa_decryptKeyID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.37') + +class DecryptKeyIdentifier(univ.OctetString): + pass + + +# Target Hardware Identifier Signed Attribute and Object Identifier + +id_aa_targetHardwareIDs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.36') + +class TargetHardwareIdentifiers(univ.SequenceOf): + pass + +TargetHardwareIdentifiers.componentType = univ.ObjectIdentifier() + + +# Firmware Package Identifier Signed Attribute and Object Identifier + +id_aa_firmwarePackageID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.35') + +class FirmwarePackageIdentifier(univ.Sequence): + pass + +FirmwarePackageIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('name', PreferredOrLegacyPackageIdentifier()), + namedtype.OptionalNamedType('stale', PreferredOrLegacyStalePackageIdentifier()) +) + + +# Firmware Package Message Digest Signed Attribute and Object Identifier + +id_aa_fwPkgMessageDigest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.41') + +class FirmwarePackageMessageDigest(univ.Sequence): + pass + +FirmwarePackageMessageDigest.componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('msgDigest', univ.OctetString()) +) + + +# Firmware Package Load Error Report Content Type and Object Identifier + +class FWErrorVersion(univ.Integer): + pass + +FWErrorVersion.namedValues = namedval.NamedValues( + ('v1', 1) +) + + +id_ct_firmwareLoadError = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.18') + +class FirmwarePackageLoadError(univ.Sequence): + pass + +FirmwarePackageLoadError.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', FWErrorVersion().subtype(value='v1')), + namedtype.NamedType('hwType', univ.ObjectIdentifier()), + namedtype.NamedType('hwSerialNum', univ.OctetString()), + namedtype.NamedType('errorCode', FirmwarePackageLoadErrorCode()), + namedtype.OptionalNamedType('vendorErrorCode', VendorLoadErrorCode()), + namedtype.OptionalNamedType('fwPkgName', PreferredOrLegacyPackageIdentifier()), + namedtype.OptionalNamedType('config', univ.SequenceOf(componentType=CurrentFWConfig()).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +# Firmware Package Load Receipt Content Type and Object Identifier + +class FWReceiptVersion(univ.Integer): + pass + +FWReceiptVersion.namedValues = namedval.NamedValues( + ('v1', 1) +) + + +id_ct_firmwareLoadReceipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.17') + +class FirmwarePackageLoadReceipt(univ.Sequence): + pass + +FirmwarePackageLoadReceipt.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', FWReceiptVersion().subtype(value='v1')), + namedtype.NamedType('hwType', univ.ObjectIdentifier()), + namedtype.NamedType('hwSerialNum', univ.OctetString()), + namedtype.NamedType('fwPkgName', PreferredOrLegacyPackageIdentifier()), + namedtype.OptionalNamedType('trustAnchorKeyID', univ.OctetString()), + namedtype.OptionalNamedType('decryptKeyID', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +# Firmware Package Content Type and Object Identifier + +id_ct_firmwarePackage = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.16') + +class FirmwarePkgData(univ.OctetString): + pass + + +# Other Name syntax for Hardware Module Name + +id_on_hardwareModuleName = univ.ObjectIdentifier('1.3.6.1.5.5.7.8.4') + +class HardwareModuleName(univ.Sequence): + pass + +HardwareModuleName.componentType = namedtype.NamedTypes( + namedtype.NamedType('hwType', univ.ObjectIdentifier()), + namedtype.NamedType('hwSerialNum', univ.OctetString()) +) + + +# Map of Attribute Type OIDs to Attributes is added to the +# ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_wrappedFirmwareKey: WrappedFirmwareKey(), + id_aa_firmwarePackageInfo: FirmwarePackageInfo(), + id_aa_communityIdentifiers: CommunityIdentifiers(), + id_aa_implCompressAlgs: ImplementedCompressAlgorithms(), + id_aa_implCryptoAlgs: ImplementedCryptoAlgorithms(), + id_aa_decryptKeyID: DecryptKeyIdentifier(), + id_aa_targetHardwareIDs: TargetHardwareIdentifiers(), + id_aa_firmwarePackageID: FirmwarePackageIdentifier(), + id_aa_fwPkgMessageDigest: FirmwarePackageMessageDigest(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) + + +# Map of Content Type OIDs to Content Types is added to the +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_firmwareLoadError: FirmwarePackageLoadError(), + id_ct_firmwareLoadReceipt: FirmwarePackageLoadReceipt(), + id_ct_firmwarePackage: FirmwarePkgData(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) + + +# Map of Other Name OIDs to Other Name is added to the +# ones that are in rfc5280.py + +_anotherNameMapUpdate = { + id_on_hardwareModuleName: HardwareModuleName(), +} + +rfc5280.anotherNameMap.update(_anotherNameMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4210.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4210.py new file mode 100644 index 0000000000000000000000000000000000000000..0935e3e9acea85dd2c723c922b3ae017afbc4d37 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4210.py @@ -0,0 +1,803 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# Certificate Management Protocol structures as per RFC4210 +# +# Based on Alex Railean's work +# +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc2314 +from pyasn1_modules import rfc2459 +from pyasn1_modules import rfc2511 + +MAX = float('inf') + + +class KeyIdentifier(univ.OctetString): + pass + + +class CMPCertificate(rfc2459.Certificate): + pass + + +class OOBCert(CMPCertificate): + pass + + +class CertAnnContent(CMPCertificate): + pass + + +class PKIFreeText(univ.SequenceOf): + """ + PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String + """ + componentType = char.UTF8String() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +class PollRepContent(univ.SequenceOf): + """ + PollRepContent ::= SEQUENCE OF SEQUENCE { + certReqId INTEGER, + checkAfter INTEGER, -- time in seconds + reason PKIFreeText OPTIONAL + } + """ + + class CertReq(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('certReqId', univ.Integer()), + namedtype.NamedType('checkAfter', univ.Integer()), + namedtype.OptionalNamedType('reason', PKIFreeText()) + ) + + componentType = CertReq() + + +class PollReqContent(univ.SequenceOf): + """ + PollReqContent ::= SEQUENCE OF SEQUENCE { + certReqId INTEGER + } + + """ + + class CertReq(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('certReqId', univ.Integer()) + ) + + componentType = CertReq() + + +class InfoTypeAndValue(univ.Sequence): + """ + InfoTypeAndValue ::= SEQUENCE { + infoType OBJECT IDENTIFIER, + infoValue ANY DEFINED BY infoType OPTIONAL + }""" + componentType = namedtype.NamedTypes( + namedtype.NamedType('infoType', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('infoValue', univ.Any()) + ) + + +class GenRepContent(univ.SequenceOf): + componentType = InfoTypeAndValue() + + +class GenMsgContent(univ.SequenceOf): + componentType = InfoTypeAndValue() + + +class PKIConfirmContent(univ.Null): + pass + + +class CRLAnnContent(univ.SequenceOf): + componentType = rfc2459.CertificateList() + + +class CAKeyUpdAnnContent(univ.Sequence): + """ + CAKeyUpdAnnContent ::= SEQUENCE { + oldWithNew CMPCertificate, + newWithOld CMPCertificate, + newWithNew CMPCertificate + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('oldWithNew', CMPCertificate()), + namedtype.NamedType('newWithOld', CMPCertificate()), + namedtype.NamedType('newWithNew', CMPCertificate()) + ) + + +class RevDetails(univ.Sequence): + """ + RevDetails ::= SEQUENCE { + certDetails CertTemplate, + crlEntryDetails Extensions OPTIONAL + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('certDetails', rfc2511.CertTemplate()), + namedtype.OptionalNamedType('crlEntryDetails', rfc2459.Extensions()) + ) + + +class RevReqContent(univ.SequenceOf): + componentType = RevDetails() + + +class CertOrEncCert(univ.Choice): + """ + CertOrEncCert ::= CHOICE { + certificate [0] CMPCertificate, + encryptedCert [1] EncryptedValue + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('certificate', CMPCertificate().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('encryptedCert', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +class CertifiedKeyPair(univ.Sequence): + """ + CertifiedKeyPair ::= SEQUENCE { + certOrEncCert CertOrEncCert, + privateKey [0] EncryptedValue OPTIONAL, + publicationInfo [1] PKIPublicationInfo OPTIONAL + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('certOrEncCert', CertOrEncCert()), + namedtype.OptionalNamedType('privateKey', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('publicationInfo', rfc2511.PKIPublicationInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +class POPODecKeyRespContent(univ.SequenceOf): + componentType = univ.Integer() + + +class Challenge(univ.Sequence): + """ + Challenge ::= SEQUENCE { + owf AlgorithmIdentifier OPTIONAL, + witness OCTET STRING, + challenge OCTET STRING + } + """ + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('owf', rfc2459.AlgorithmIdentifier()), + namedtype.NamedType('witness', univ.OctetString()), + namedtype.NamedType('challenge', univ.OctetString()) + ) + + +class PKIStatus(univ.Integer): + """ + PKIStatus ::= INTEGER { + accepted (0), + grantedWithMods (1), + rejection (2), + waiting (3), + revocationWarning (4), + revocationNotification (5), + keyUpdateWarning (6) + } + """ + namedValues = namedval.NamedValues( + ('accepted', 0), + ('grantedWithMods', 1), + ('rejection', 2), + ('waiting', 3), + ('revocationWarning', 4), + ('revocationNotification', 5), + ('keyUpdateWarning', 6) + ) + + +class PKIFailureInfo(univ.BitString): + """ + PKIFailureInfo ::= BIT STRING { + badAlg (0), + badMessageCheck (1), + badRequest (2), + badTime (3), + badCertId (4), + badDataFormat (5), + wrongAuthority (6), + incorrectData (7), + missingTimeStamp (8), + badPOP (9), + certRevoked (10), + certConfirmed (11), + wrongIntegrity (12), + badRecipientNonce (13), + timeNotAvailable (14), + unacceptedPolicy (15), + unacceptedExtension (16), + addInfoNotAvailable (17), + badSenderNonce (18), + badCertTemplate (19), + signerNotTrusted (20), + transactionIdInUse (21), + unsupportedVersion (22), + notAuthorized (23), + systemUnavail (24), + systemFailure (25), + duplicateCertReq (26) + """ + namedValues = namedval.NamedValues( + ('badAlg', 0), + ('badMessageCheck', 1), + ('badRequest', 2), + ('badTime', 3), + ('badCertId', 4), + ('badDataFormat', 5), + ('wrongAuthority', 6), + ('incorrectData', 7), + ('missingTimeStamp', 8), + ('badPOP', 9), + ('certRevoked', 10), + ('certConfirmed', 11), + ('wrongIntegrity', 12), + ('badRecipientNonce', 13), + ('timeNotAvailable', 14), + ('unacceptedPolicy', 15), + ('unacceptedExtension', 16), + ('addInfoNotAvailable', 17), + ('badSenderNonce', 18), + ('badCertTemplate', 19), + ('signerNotTrusted', 20), + ('transactionIdInUse', 21), + ('unsupportedVersion', 22), + ('notAuthorized', 23), + ('systemUnavail', 24), + ('systemFailure', 25), + ('duplicateCertReq', 26) + ) + + +class PKIStatusInfo(univ.Sequence): + """ + PKIStatusInfo ::= SEQUENCE { + status PKIStatus, + statusString PKIFreeText OPTIONAL, + failInfo PKIFailureInfo OPTIONAL + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('status', PKIStatus()), + namedtype.OptionalNamedType('statusString', PKIFreeText()), + namedtype.OptionalNamedType('failInfo', PKIFailureInfo()) + ) + + +class ErrorMsgContent(univ.Sequence): + """ + ErrorMsgContent ::= SEQUENCE { + pKIStatusInfo PKIStatusInfo, + errorCode INTEGER OPTIONAL, + -- implementation-specific error codes + errorDetails PKIFreeText OPTIONAL + -- implementation-specific error details + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('pKIStatusInfo', PKIStatusInfo()), + namedtype.OptionalNamedType('errorCode', univ.Integer()), + namedtype.OptionalNamedType('errorDetails', PKIFreeText()) + ) + + +class CertStatus(univ.Sequence): + """ + CertStatus ::= SEQUENCE { + certHash OCTET STRING, + certReqId INTEGER, + statusInfo PKIStatusInfo OPTIONAL + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('certHash', univ.OctetString()), + namedtype.NamedType('certReqId', univ.Integer()), + namedtype.OptionalNamedType('statusInfo', PKIStatusInfo()) + ) + + +class CertConfirmContent(univ.SequenceOf): + componentType = CertStatus() + + +class RevAnnContent(univ.Sequence): + """ + RevAnnContent ::= SEQUENCE { + status PKIStatus, + certId CertId, + willBeRevokedAt GeneralizedTime, + badSinceDate GeneralizedTime, + crlDetails Extensions OPTIONAL + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('status', PKIStatus()), + namedtype.NamedType('certId', rfc2511.CertId()), + namedtype.NamedType('willBeRevokedAt', useful.GeneralizedTime()), + namedtype.NamedType('badSinceDate', useful.GeneralizedTime()), + namedtype.OptionalNamedType('crlDetails', rfc2459.Extensions()) + ) + + +class RevRepContent(univ.Sequence): + """ + RevRepContent ::= SEQUENCE { + status SEQUENCE SIZE (1..MAX) OF PKIStatusInfo, + revCerts [0] SEQUENCE SIZE (1..MAX) OF CertId + OPTIONAL, + crls [1] SEQUENCE SIZE (1..MAX) OF CertificateList + OPTIONAL + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType( + 'status', univ.SequenceOf( + componentType=PKIStatusInfo(), + sizeSpec=constraint.ValueSizeConstraint(1, MAX) + ) + ), + namedtype.OptionalNamedType( + 'revCerts', univ.SequenceOf(componentType=rfc2511.CertId()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0) + ) + ), + namedtype.OptionalNamedType( + 'crls', univ.SequenceOf(componentType=rfc2459.CertificateList()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1) + ) + ) + ) + + +class KeyRecRepContent(univ.Sequence): + """ + KeyRecRepContent ::= SEQUENCE { + status PKIStatusInfo, + newSigCert [0] CMPCertificate OPTIONAL, + caCerts [1] SEQUENCE SIZE (1..MAX) OF + CMPCertificate OPTIONAL, + keyPairHist [2] SEQUENCE SIZE (1..MAX) OF + CertifiedKeyPair OPTIONAL + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('status', PKIStatusInfo()), + namedtype.OptionalNamedType( + 'newSigCert', CMPCertificate().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0) + ) + ), + namedtype.OptionalNamedType( + 'caCerts', univ.SequenceOf(componentType=CMPCertificate()).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), + sizeSpec=constraint.ValueSizeConstraint(1, MAX) + ) + ), + namedtype.OptionalNamedType('keyPairHist', univ.SequenceOf(componentType=CertifiedKeyPair()).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2), + sizeSpec=constraint.ValueSizeConstraint(1, MAX)) + ) + ) + + +class CertResponse(univ.Sequence): + """ + CertResponse ::= SEQUENCE { + certReqId INTEGER, + status PKIStatusInfo, + certifiedKeyPair CertifiedKeyPair OPTIONAL, + rspInfo OCTET STRING OPTIONAL + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('certReqId', univ.Integer()), + namedtype.NamedType('status', PKIStatusInfo()), + namedtype.OptionalNamedType('certifiedKeyPair', CertifiedKeyPair()), + namedtype.OptionalNamedType('rspInfo', univ.OctetString()) + ) + + +class CertRepMessage(univ.Sequence): + """ + CertRepMessage ::= SEQUENCE { + caPubs [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate + OPTIONAL, + response SEQUENCE OF CertResponse + } + """ + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType( + 'caPubs', univ.SequenceOf( + componentType=CMPCertificate() + ).subtype(sizeSpec=constraint.ValueSizeConstraint(1, MAX), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)) + ), + namedtype.NamedType('response', univ.SequenceOf(componentType=CertResponse())) + ) + + +class POPODecKeyChallContent(univ.SequenceOf): + componentType = Challenge() + + +class OOBCertHash(univ.Sequence): + """ + OOBCertHash ::= SEQUENCE { + hashAlg [0] AlgorithmIdentifier OPTIONAL, + certId [1] CertId OPTIONAL, + hashVal BIT STRING + } + """ + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType( + 'hashAlg', rfc2459.AlgorithmIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)) + ), + namedtype.OptionalNamedType( + 'certId', rfc2511.CertId().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)) + ), + namedtype.NamedType('hashVal', univ.BitString()) + ) + + +# pyasn1 does not naturally handle recursive definitions, thus this hack: +# NestedMessageContent ::= PKIMessages +class NestedMessageContent(univ.SequenceOf): + """ + NestedMessageContent ::= PKIMessages + """ + componentType = univ.Any() + + +class DHBMParameter(univ.Sequence): + """ + DHBMParameter ::= SEQUENCE { + owf AlgorithmIdentifier, + -- AlgId for a One-Way Function (SHA-1 recommended) + mac AlgorithmIdentifier + -- the MAC AlgId (e.g., DES-MAC, Triple-DES-MAC [PKCS11], + } -- or HMAC [RFC2104, RFC2202]) + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()), + namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier()) + ) + + +id_DHBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.30') + + +class PBMParameter(univ.Sequence): + """ + PBMParameter ::= SEQUENCE { + salt OCTET STRING, + owf AlgorithmIdentifier, + iterationCount INTEGER, + mac AlgorithmIdentifier + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType( + 'salt', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 128)) + ), + namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()), + namedtype.NamedType('iterationCount', univ.Integer()), + namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier()) + ) + + +id_PasswordBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.13') + + +class PKIProtection(univ.BitString): + pass + + +# pyasn1 does not naturally handle recursive definitions, thus this hack: +# NestedMessageContent ::= PKIMessages +nestedMessageContent = NestedMessageContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 20)) + + +class PKIBody(univ.Choice): + """ + PKIBody ::= CHOICE { -- message-specific body elements + ir [0] CertReqMessages, --Initialization Request + ip [1] CertRepMessage, --Initialization Response + cr [2] CertReqMessages, --Certification Request + cp [3] CertRepMessage, --Certification Response + p10cr [4] CertificationRequest, --imported from [PKCS10] + popdecc [5] POPODecKeyChallContent, --pop Challenge + popdecr [6] POPODecKeyRespContent, --pop Response + kur [7] CertReqMessages, --Key Update Request + kup [8] CertRepMessage, --Key Update Response + krr [9] CertReqMessages, --Key Recovery Request + krp [10] KeyRecRepContent, --Key Recovery Response + rr [11] RevReqContent, --Revocation Request + rp [12] RevRepContent, --Revocation Response + ccr [13] CertReqMessages, --Cross-Cert. Request + ccp [14] CertRepMessage, --Cross-Cert. Response + ckuann [15] CAKeyUpdAnnContent, --CA Key Update Ann. + cann [16] CertAnnContent, --Certificate Ann. + rann [17] RevAnnContent, --Revocation Ann. + crlann [18] CRLAnnContent, --CRL Announcement + pkiconf [19] PKIConfirmContent, --Confirmation + nested [20] NestedMessageContent, --Nested Message + genm [21] GenMsgContent, --General Message + genp [22] GenRepContent, --General Response + error [23] ErrorMsgContent, --Error Message + certConf [24] CertConfirmContent, --Certificate confirm + pollReq [25] PollReqContent, --Polling request + pollRep [26] PollRepContent --Polling response + + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType( + 'ir', rfc2511.CertReqMessages().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0) + ) + ), + namedtype.NamedType( + 'ip', CertRepMessage().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1) + ) + ), + namedtype.NamedType( + 'cr', rfc2511.CertReqMessages().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2) + ) + ), + namedtype.NamedType( + 'cp', CertRepMessage().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3) + ) + ), + namedtype.NamedType( + 'p10cr', rfc2314.CertificationRequest().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4) + ) + ), + namedtype.NamedType( + 'popdecc', POPODecKeyChallContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5) + ) + ), + namedtype.NamedType( + 'popdecr', POPODecKeyRespContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6) + ) + ), + namedtype.NamedType( + 'kur', rfc2511.CertReqMessages().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7) + ) + ), + namedtype.NamedType( + 'kup', CertRepMessage().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8) + ) + ), + namedtype.NamedType( + 'krr', rfc2511.CertReqMessages().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9) + ) + ), + namedtype.NamedType( + 'krp', KeyRecRepContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 10) + ) + ), + namedtype.NamedType( + 'rr', RevReqContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 11) + ) + ), + namedtype.NamedType( + 'rp', RevRepContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 12) + ) + ), + namedtype.NamedType( + 'ccr', rfc2511.CertReqMessages().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 13) + ) + ), + namedtype.NamedType( + 'ccp', CertRepMessage().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 14) + ) + ), + namedtype.NamedType( + 'ckuann', CAKeyUpdAnnContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 15) + ) + ), + namedtype.NamedType( + 'cann', CertAnnContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 16) + ) + ), + namedtype.NamedType( + 'rann', RevAnnContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 17) + ) + ), + namedtype.NamedType( + 'crlann', CRLAnnContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 18) + ) + ), + namedtype.NamedType( + 'pkiconf', PKIConfirmContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 19) + ) + ), + namedtype.NamedType( + 'nested', nestedMessageContent + ), + # namedtype.NamedType('nested', NestedMessageContent().subtype( + # explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20) + # ) + # ), + namedtype.NamedType( + 'genm', GenMsgContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 21) + ) + ), + namedtype.NamedType( + 'gen', GenRepContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 22) + ) + ), + namedtype.NamedType( + 'error', ErrorMsgContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 23) + ) + ), + namedtype.NamedType( + 'certConf', CertConfirmContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 24) + ) + ), + namedtype.NamedType( + 'pollReq', PollReqContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 25) + ) + ), + namedtype.NamedType( + 'pollRep', PollRepContent().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 26) + ) + ) + ) + + +class PKIHeader(univ.Sequence): + """ + PKIHeader ::= SEQUENCE { + pvno INTEGER { cmp1999(1), cmp2000(2) }, + sender GeneralName, + recipient GeneralName, + messageTime [0] GeneralizedTime OPTIONAL, + protectionAlg [1] AlgorithmIdentifier OPTIONAL, + senderKID [2] KeyIdentifier OPTIONAL, + recipKID [3] KeyIdentifier OPTIONAL, + transactionID [4] OCTET STRING OPTIONAL, + senderNonce [5] OCTET STRING OPTIONAL, + recipNonce [6] OCTET STRING OPTIONAL, + freeText [7] PKIFreeText OPTIONAL, + generalInfo [8] SEQUENCE SIZE (1..MAX) OF + InfoTypeAndValue OPTIONAL + } + + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType( + 'pvno', univ.Integer( + namedValues=namedval.NamedValues(('cmp1999', 1), ('cmp2000', 2)) + ) + ), + namedtype.NamedType('sender', rfc2459.GeneralName()), + namedtype.NamedType('recipient', rfc2459.GeneralName()), + namedtype.OptionalNamedType('messageTime', useful.GeneralizedTime().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('protectionAlg', rfc2459.AlgorithmIdentifier().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('senderKID', rfc2459.KeyIdentifier().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('recipKID', rfc2459.KeyIdentifier().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.OptionalNamedType('transactionID', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))), + namedtype.OptionalNamedType('senderNonce', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))), + namedtype.OptionalNamedType('recipNonce', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))), + namedtype.OptionalNamedType('freeText', PKIFreeText().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))), + namedtype.OptionalNamedType('generalInfo', + univ.SequenceOf( + componentType=InfoTypeAndValue().subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX) + ) + ).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)) + ) + ) + + +class ProtectedPart(univ.Sequence): + """ + ProtectedPart ::= SEQUENCE { + header PKIHeader, + body PKIBody + } + """ + componentType = namedtype.NamedTypes( + namedtype.NamedType('header', PKIHeader()), + namedtype.NamedType('infoValue', PKIBody()) + ) + + +class PKIMessage(univ.Sequence): + """ + PKIMessage ::= SEQUENCE { + header PKIHeader, + body PKIBody, + protection [0] PKIProtection OPTIONAL, + extraCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate + OPTIONAL + }""" + componentType = namedtype.NamedTypes( + namedtype.NamedType('header', PKIHeader()), + namedtype.NamedType('body', PKIBody()), + namedtype.OptionalNamedType('protection', PKIProtection().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('extraCerts', + univ.SequenceOf( + componentType=CMPCertificate() + ).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX), + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1) + ) + ) + ) + + +class PKIMessages(univ.SequenceOf): + """ + PKIMessages ::= SEQUENCE SIZE (1..MAX) OF PKIMessage + """ + componentType = PKIMessage() + sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX) + + +# pyasn1 does not naturally handle recursive definitions, thus this hack: +# NestedMessageContent ::= PKIMessages +NestedMessageContent._componentType = PKIMessages() +nestedMessageContent._componentType = PKIMessages() diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4211.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4211.py new file mode 100644 index 0000000000000000000000000000000000000000..c47b3c5dd25a4f11d17ed43c7a5383278af090b9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4211.py @@ -0,0 +1,396 @@ +# coding: utf-8 +# +# This file is part of pyasn1-modules software. +# +# Created by Stanisław Pitucha with asn1ate tool. +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# Internet X.509 Public Key Infrastructure Certificate Request +# Message Format (CRMF) +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc4211.txt +# +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc3280 +from pyasn1_modules import rfc3852 + +MAX = float('inf') + + +def _buildOid(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7) + +id_pkip = _buildOid(id_pkix, 5) + +id_regCtrl = _buildOid(id_pkip, 1) + + +class SinglePubInfo(univ.Sequence): + pass + + +SinglePubInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('pubMethod', univ.Integer( + namedValues=namedval.NamedValues(('dontCare', 0), ('x500', 1), ('web', 2), ('ldap', 3)))), + namedtype.OptionalNamedType('pubLocation', rfc3280.GeneralName()) +) + + +class UTF8Pairs(char.UTF8String): + pass + + +class PKMACValue(univ.Sequence): + pass + + +PKMACValue.componentType = namedtype.NamedTypes( + namedtype.NamedType('algId', rfc3280.AlgorithmIdentifier()), + namedtype.NamedType('value', univ.BitString()) +) + + +class POPOSigningKeyInput(univ.Sequence): + pass + + +POPOSigningKeyInput.componentType = namedtype.NamedTypes( + namedtype.NamedType( + 'authInfo', univ.Choice( + componentType=namedtype.NamedTypes( + namedtype.NamedType( + 'sender', rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)) + ), + namedtype.NamedType( + 'publicKeyMAC', PKMACValue() + ) + ) + ) + ), + namedtype.NamedType('publicKey', rfc3280.SubjectPublicKeyInfo()) +) + + +class POPOSigningKey(univ.Sequence): + pass + + +POPOSigningKey.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('poposkInput', POPOSigningKeyInput().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('algorithmIdentifier', rfc3280.AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) +) + + +class Attributes(univ.SetOf): + pass + + +Attributes.componentType = rfc3280.Attribute() + + +class PrivateKeyInfo(univ.Sequence): + pass + + +PrivateKeyInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', univ.Integer()), + namedtype.NamedType('privateKeyAlgorithm', rfc3280.AlgorithmIdentifier()), + namedtype.NamedType('privateKey', univ.OctetString()), + namedtype.OptionalNamedType('attributes', + Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class EncryptedValue(univ.Sequence): + pass + + +EncryptedValue.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('intendedAlg', rfc3280.AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('symmAlg', rfc3280.AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('encSymmKey', univ.BitString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('keyAlg', rfc3280.AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.OptionalNamedType('valueHint', univ.OctetString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))), + namedtype.NamedType('encValue', univ.BitString()) +) + + +class EncryptedKey(univ.Choice): + pass + + +EncryptedKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('encryptedValue', EncryptedValue()), + namedtype.NamedType('envelopedData', rfc3852.EnvelopedData().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class KeyGenParameters(univ.OctetString): + pass + + +class PKIArchiveOptions(univ.Choice): + pass + + +PKIArchiveOptions.componentType = namedtype.NamedTypes( + namedtype.NamedType('encryptedPrivKey', + EncryptedKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('keyGenParameters', + KeyGenParameters().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('archiveRemGenPrivKey', + univ.Boolean().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + +id_regCtrl_authenticator = _buildOid(id_regCtrl, 2) + +id_regInfo = _buildOid(id_pkip, 2) + +id_regInfo_certReq = _buildOid(id_regInfo, 2) + + +class ProtocolEncrKey(rfc3280.SubjectPublicKeyInfo): + pass + + +class Authenticator(char.UTF8String): + pass + + +class SubsequentMessage(univ.Integer): + pass + + +SubsequentMessage.namedValues = namedval.NamedValues( + ('encrCert', 0), + ('challengeResp', 1) +) + + +class AttributeTypeAndValue(univ.Sequence): + pass + + +AttributeTypeAndValue.componentType = namedtype.NamedTypes( + namedtype.NamedType('type', univ.ObjectIdentifier()), + namedtype.NamedType('value', univ.Any()) +) + + +class POPOPrivKey(univ.Choice): + pass + + +POPOPrivKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('thisMessage', + univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('subsequentMessage', + SubsequentMessage().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('dhMAC', + univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('agreeMAC', + PKMACValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.NamedType('encryptedKey', rfc3852.EnvelopedData().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))) +) + + +class ProofOfPossession(univ.Choice): + pass + + +ProofOfPossession.componentType = namedtype.NamedTypes( + namedtype.NamedType('raVerified', + univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('signature', POPOSigningKey().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.NamedType('keyEncipherment', + POPOPrivKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.NamedType('keyAgreement', + POPOPrivKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))) +) + + +class OptionalValidity(univ.Sequence): + pass + + +OptionalValidity.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('notBefore', rfc3280.Time().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('notAfter', rfc3280.Time().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + + +class CertTemplate(univ.Sequence): + pass + + +CertTemplate.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('version', rfc3280.Version().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('serialNumber', univ.Integer().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('signingAlg', rfc3280.AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('issuer', rfc3280.Name().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.OptionalNamedType('validity', OptionalValidity().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))), + namedtype.OptionalNamedType('subject', rfc3280.Name().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))), + namedtype.OptionalNamedType('publicKey', rfc3280.SubjectPublicKeyInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))), + namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))), + namedtype.OptionalNamedType('subjectUID', rfc3280.UniqueIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))), + namedtype.OptionalNamedType('extensions', rfc3280.Extensions().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 9))) +) + + +class Controls(univ.SequenceOf): + pass + + +Controls.componentType = AttributeTypeAndValue() +Controls.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class CertRequest(univ.Sequence): + pass + + +CertRequest.componentType = namedtype.NamedTypes( + namedtype.NamedType('certReqId', univ.Integer()), + namedtype.NamedType('certTemplate', CertTemplate()), + namedtype.OptionalNamedType('controls', Controls()) +) + + +class CertReqMsg(univ.Sequence): + pass + + +CertReqMsg.componentType = namedtype.NamedTypes( + namedtype.NamedType('certReq', CertRequest()), + namedtype.OptionalNamedType('popo', ProofOfPossession()), + namedtype.OptionalNamedType('regInfo', univ.SequenceOf(componentType=AttributeTypeAndValue())) +) + + +class CertReqMessages(univ.SequenceOf): + pass + + +CertReqMessages.componentType = CertReqMsg() +CertReqMessages.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class CertReq(CertRequest): + pass + + +id_regCtrl_pkiPublicationInfo = _buildOid(id_regCtrl, 3) + + +class CertId(univ.Sequence): + pass + + +CertId.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuer', rfc3280.GeneralName()), + namedtype.NamedType('serialNumber', univ.Integer()) +) + + +class OldCertId(CertId): + pass + + +class PKIPublicationInfo(univ.Sequence): + pass + + +PKIPublicationInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('action', + univ.Integer(namedValues=namedval.NamedValues(('dontPublish', 0), ('pleasePublish', 1)))), + namedtype.OptionalNamedType('pubInfos', univ.SequenceOf(componentType=SinglePubInfo())) +) + + +class EncKeyWithID(univ.Sequence): + pass + + +EncKeyWithID.componentType = namedtype.NamedTypes( + namedtype.NamedType('privateKey', PrivateKeyInfo()), + namedtype.OptionalNamedType( + 'identifier', univ.Choice( + componentType=namedtype.NamedTypes( + namedtype.NamedType('string', char.UTF8String()), + namedtype.NamedType('generalName', rfc3280.GeneralName()) + ) + ) + ) +) + +id_regCtrl_protocolEncrKey = _buildOid(id_regCtrl, 6) + +id_regCtrl_oldCertID = _buildOid(id_regCtrl, 5) + +id_smime = _buildOid(1, 2, 840, 113549, 1, 9, 16) + + +class PBMParameter(univ.Sequence): + pass + + +PBMParameter.componentType = namedtype.NamedTypes( + namedtype.NamedType('salt', univ.OctetString()), + namedtype.NamedType('owf', rfc3280.AlgorithmIdentifier()), + namedtype.NamedType('iterationCount', univ.Integer()), + namedtype.NamedType('mac', rfc3280.AlgorithmIdentifier()) +) + +id_regCtrl_regToken = _buildOid(id_regCtrl, 1) + +id_regCtrl_pkiArchiveOptions = _buildOid(id_regCtrl, 4) + +id_regInfo_utf8Pairs = _buildOid(id_regInfo, 1) + +id_ct = _buildOid(id_smime, 1) + +id_ct_encKeyWithID = _buildOid(id_ct, 21) + + +class RegToken(char.UTF8String): + pass diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4334.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4334.py new file mode 100644 index 0000000000000000000000000000000000000000..44cd31b16699ea923fca604e209f0bb99ff5106e --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4334.py @@ -0,0 +1,75 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Certificate Extensions and Attributes Supporting Authentication +# in PPP and Wireless LAN Networks +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4334.txt +# + +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +# OID Arcs + +id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1') + +id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3') + +id_aca = univ.ObjectIdentifier('1.3.6.1.5.5.7.10') + + +# Extended Key Usage Values + +id_kp_eapOverPPP = id_kp + (13, ) + +id_kp_eapOverLAN = id_kp + (14, ) + + +# Wireless LAN SSID Extension + +id_pe_wlanSSID = id_pe + (13, ) + +class SSID(univ.OctetString): + constraint.ValueSizeConstraint(1, 32) + + +class SSIDList(univ.SequenceOf): + componentType = SSID() + subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +# Wireless LAN SSID Attribute Certificate Attribute + +id_aca_wlanSSID = id_aca + (7, ) + + +# Map of Certificate Extension OIDs to Extensions +# To be added to the ones that are in rfc5280.py + +_certificateExtensionsMap = { + id_pe_wlanSSID: SSIDList(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap) + + +# Map of AttributeType OIDs to AttributeValue added to the +# ones that are in rfc5280.py + +_certificateAttributesMapUpdate = { + id_aca_wlanSSID: SSIDList(), +} + +rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4357.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4357.py new file mode 100644 index 0000000000000000000000000000000000000000..42b9e3ecb87a33027a79fcd5fff4fdf67c1f043e --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4357.py @@ -0,0 +1,477 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Additional Cryptographic Algorithms for Use with GOST 28147-89, +# GOST R 34.10-94, GOST R 34.10-2001, and GOST R 34.11-94 Algorithms +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4357.txt +# https://www.rfc-editor.org/errata/eid5927 +# https://www.rfc-editor.org/errata/eid5928 +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +# Import from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + + +# Object Identifiers + +id_CryptoPro = univ.ObjectIdentifier((1, 2, 643, 2, 2,)) + + +id_CryptoPro_modules = id_CryptoPro + (1, 1,) + +id_CryptoPro_extensions = id_CryptoPro + (34,) + +id_CryptoPro_policyIds = id_CryptoPro + (38,) + +id_CryptoPro_policyQt = id_CryptoPro + (39,) + + +cryptographic_Gost_Useful_Definitions = id_CryptoPro_modules + (0, 1,) + +gostR3411_94_DigestSyntax = id_CryptoPro_modules + (1, 1,) + +gostR3410_94_PKISyntax = id_CryptoPro_modules + (2, 1,) + +gostR3410_94_SignatureSyntax = id_CryptoPro_modules + (3, 1,) + +gost28147_89_EncryptionSyntax = id_CryptoPro_modules + (4, 1,) + +gostR3410_EncryptionSyntax = id_CryptoPro_modules + (5, 2,) + +gost28147_89_ParamSetSyntax = id_CryptoPro_modules + (6, 1,) + +gostR3411_94_ParamSetSyntax = id_CryptoPro_modules + (7, 1,) + +gostR3410_94_ParamSetSyntax = id_CryptoPro_modules + (8, 1, 1) + +gostR3410_2001_PKISyntax = id_CryptoPro_modules + (9, 1,) + +gostR3410_2001_SignatureSyntax = id_CryptoPro_modules + (10, 1,) + +gostR3410_2001_ParamSetSyntax = id_CryptoPro_modules + (12, 1,) + +gost_CryptoPro_ExtendedKeyUsage = id_CryptoPro_modules + (13, 1,) + +gost_CryptoPro_PrivateKey = id_CryptoPro_modules + (14, 1,) + +gost_CryptoPro_PKIXCMP = id_CryptoPro_modules + (15, 1,) + +gost_CryptoPro_TLS = id_CryptoPro_modules + (16, 1,) + +gost_CryptoPro_Policy = id_CryptoPro_modules + (17, 1,) + +gost_CryptoPro_Constants = id_CryptoPro_modules + (18, 1,) + + +id_CryptoPro_algorithms = id_CryptoPro + +id_GostR3411_94_with_GostR3410_2001 = id_CryptoPro_algorithms + (3,) + +id_GostR3411_94_with_GostR3410_94 = id_CryptoPro_algorithms + (4,) + +id_GostR3411_94 = id_CryptoPro_algorithms + (9,) + +id_Gost28147_89_None_KeyMeshing = id_CryptoPro_algorithms + (14, 0,) + +id_Gost28147_89_CryptoPro_KeyMeshing = id_CryptoPro_algorithms + (14, 1,) + +id_GostR3410_2001 = id_CryptoPro_algorithms + (19,) + +id_GostR3410_94 = id_CryptoPro_algorithms + (20,) + +id_Gost28147_89 = id_CryptoPro_algorithms + (21,) + +id_Gost28147_89_MAC = id_CryptoPro_algorithms + (22,) + +id_CryptoPro_hashes = id_CryptoPro_algorithms + (30,) + +id_CryptoPro_encrypts = id_CryptoPro_algorithms + (31,) + +id_CryptoPro_signs = id_CryptoPro_algorithms + (32,) + +id_CryptoPro_exchanges = id_CryptoPro_algorithms + (33,) + +id_CryptoPro_ecc_signs = id_CryptoPro_algorithms + (35,) + +id_CryptoPro_ecc_exchanges = id_CryptoPro_algorithms + (36,) + +id_CryptoPro_private_keys = id_CryptoPro_algorithms + (37,) + +id_CryptoPro_pkixcmp_infos = id_CryptoPro_algorithms + (41,) + +id_CryptoPro_audit_service_types = id_CryptoPro_algorithms + (42,) + +id_CryptoPro_audit_record_types = id_CryptoPro_algorithms + (43,) + +id_CryptoPro_attributes = id_CryptoPro_algorithms + (44,) + +id_CryptoPro_name_service_types = id_CryptoPro_algorithms + (45,) + +id_GostR3410_2001DH = id_CryptoPro_algorithms + (98,) + +id_GostR3410_94DH = id_CryptoPro_algorithms + (99,) + + +id_Gost28147_89_TestParamSet = id_CryptoPro_encrypts + (0,) + +id_Gost28147_89_CryptoPro_A_ParamSet = id_CryptoPro_encrypts + (1,) + +id_Gost28147_89_CryptoPro_B_ParamSet = id_CryptoPro_encrypts + (2,) + +id_Gost28147_89_CryptoPro_C_ParamSet = id_CryptoPro_encrypts + (3,) + +id_Gost28147_89_CryptoPro_D_ParamSet = id_CryptoPro_encrypts + (4,) + +id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet = id_CryptoPro_encrypts + (5,) + +id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet = id_CryptoPro_encrypts + (6,) + +id_Gost28147_89_CryptoPro_RIC_1_ParamSet = id_CryptoPro_encrypts + (7,) + + +id_GostR3410_2001_TestParamSet = id_CryptoPro_ecc_signs + (0,) + +id_GostR3410_2001_CryptoPro_A_ParamSet = id_CryptoPro_ecc_signs + (1,) + +id_GostR3410_2001_CryptoPro_B_ParamSet = id_CryptoPro_ecc_signs + (2,) + +id_GostR3410_2001_CryptoPro_C_ParamSet = id_CryptoPro_ecc_signs + (3,) + + +id_GostR3410_2001_CryptoPro_XchA_ParamSet = id_CryptoPro_ecc_exchanges + (0,) + +id_GostR3410_2001_CryptoPro_XchB_ParamSet = id_CryptoPro_ecc_exchanges + (1,) + + +id_GostR3410_94_TestParamSet = id_CryptoPro_signs + (0,) + +id_GostR3410_94_CryptoPro_A_ParamSet = id_CryptoPro_signs + (2,) + +id_GostR3410_94_CryptoPro_B_ParamSet = id_CryptoPro_signs + (3,) + +id_GostR3410_94_CryptoPro_C_ParamSet = id_CryptoPro_signs + (4,) + +id_GostR3410_94_CryptoPro_D_ParamSet = id_CryptoPro_signs + (5,) + + +id_GostR3410_94_CryptoPro_XchA_ParamSet = id_CryptoPro_exchanges + (1,) + +id_GostR3410_94_CryptoPro_XchB_ParamSet = id_CryptoPro_exchanges + (2,) + +id_GostR3410_94_CryptoPro_XchC_ParamSet = id_CryptoPro_exchanges + (3,) + + +id_GostR3410_94_a = id_GostR3410_94 + (1,) + +id_GostR3410_94_aBis = id_GostR3410_94 + (2,) + +id_GostR3410_94_b = id_GostR3410_94 + (3,) + +id_GostR3410_94_bBis = id_GostR3410_94 + (4,) + + +id_GostR3411_94_TestParamSet = id_CryptoPro_hashes + (0,) + +id_GostR3411_94_CryptoProParamSet = id_CryptoPro_hashes + (1,) + + + + +class Gost28147_89_ParamSet(univ.ObjectIdentifier): + pass + +Gost28147_89_ParamSet.subtypeSpec = constraint.SingleValueConstraint( + id_Gost28147_89_TestParamSet, + id_Gost28147_89_CryptoPro_A_ParamSet, + id_Gost28147_89_CryptoPro_B_ParamSet, + id_Gost28147_89_CryptoPro_C_ParamSet, + id_Gost28147_89_CryptoPro_D_ParamSet, + id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet, + id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet, + id_Gost28147_89_CryptoPro_RIC_1_ParamSet +) + + +class Gost28147_89_BlobParameters(univ.Sequence): + pass + +Gost28147_89_BlobParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet()) +) + + +class Gost28147_89_MAC(univ.OctetString): + pass + +Gost28147_89_MAC.subtypeSpec = constraint.ValueSizeConstraint(1, 4) + + +class Gost28147_89_Key(univ.OctetString): + pass + +Gost28147_89_Key.subtypeSpec = constraint.ValueSizeConstraint(32, 32) + + +class Gost28147_89_EncryptedKey(univ.Sequence): + pass + +Gost28147_89_EncryptedKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('encryptedKey', Gost28147_89_Key()), + namedtype.OptionalNamedType('maskKey', Gost28147_89_Key().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('macKey', Gost28147_89_MAC()) +) + + +class Gost28147_89_IV(univ.OctetString): + pass + +Gost28147_89_IV.subtypeSpec = constraint.ValueSizeConstraint(8, 8) + + +class Gost28147_89_UZ(univ.OctetString): + pass + +Gost28147_89_UZ.subtypeSpec = constraint.ValueSizeConstraint(64, 64) + + +class Gost28147_89_ParamSetParameters(univ.Sequence): + pass + +Gost28147_89_ParamSetParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('eUZ', Gost28147_89_UZ()), + namedtype.NamedType('mode', + univ.Integer(namedValues=namedval.NamedValues( + ('gost28147-89-CNT', 0), + ('gost28147-89-CFB', 1), + ('cryptoPro-CBC', 2) + ))), + namedtype.NamedType('shiftBits', + univ.Integer(namedValues=namedval.NamedValues( + ('gost28147-89-block', 64) + ))), + namedtype.NamedType('keyMeshing', AlgorithmIdentifier()) +) + + +class Gost28147_89_Parameters(univ.Sequence): + pass + +Gost28147_89_Parameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('iv', Gost28147_89_IV()), + namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet()) +) + + +class GostR3410_2001_CertificateSignature(univ.BitString): + pass + +GostR3410_2001_CertificateSignature.subtypeSpec=constraint.ValueSizeConstraint(256, 512) + + +class GostR3410_2001_ParamSetParameters(univ.Sequence): + pass + +GostR3410_2001_ParamSetParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('a', univ.Integer()), + namedtype.NamedType('b', univ.Integer()), + namedtype.NamedType('p', univ.Integer()), + namedtype.NamedType('q', univ.Integer()), + namedtype.NamedType('x', univ.Integer()), + namedtype.NamedType('y', univ.Integer()) +) + + +class GostR3410_2001_PublicKey(univ.OctetString): + pass + +GostR3410_2001_PublicKey.subtypeSpec = constraint.ValueSizeConstraint(64, 64) + + +class GostR3410_2001_PublicKeyParameters(univ.Sequence): + pass + +GostR3410_2001_PublicKeyParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('publicKeyParamSet', univ.ObjectIdentifier().subtype( + subtypeSpec=constraint.SingleValueConstraint( + id_GostR3410_2001_TestParamSet, + id_GostR3410_2001_CryptoPro_A_ParamSet, + id_GostR3410_2001_CryptoPro_B_ParamSet, + id_GostR3410_2001_CryptoPro_C_ParamSet, + id_GostR3410_2001_CryptoPro_XchA_ParamSet, + id_GostR3410_2001_CryptoPro_XchB_ParamSet + ))), + namedtype.NamedType('digestParamSet', univ.ObjectIdentifier().subtype( + subtypeSpec=constraint.SingleValueConstraint( + id_GostR3411_94_TestParamSet, + id_GostR3411_94_CryptoProParamSet + ))), + namedtype.DefaultedNamedType('encryptionParamSet', + Gost28147_89_ParamSet().subtype(value=id_Gost28147_89_CryptoPro_A_ParamSet + )) +) + + +class GostR3410_94_CertificateSignature(univ.BitString): + pass + +GostR3410_94_CertificateSignature.subtypeSpec = constraint.ValueSizeConstraint(256, 512) + + +class GostR3410_94_ParamSetParameters_t(univ.Integer): + pass + +GostR3410_94_ParamSetParameters_t.subtypeSpec = constraint.SingleValueConstraint(512, 1024) + + +class GostR3410_94_ParamSetParameters(univ.Sequence): + pass + +GostR3410_94_ParamSetParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('t', GostR3410_94_ParamSetParameters_t()), + namedtype.NamedType('p', univ.Integer()), + namedtype.NamedType('q', univ.Integer()), + namedtype.NamedType('a', univ.Integer()), + namedtype.OptionalNamedType('validationAlgorithm', AlgorithmIdentifier()) +) + + +class GostR3410_94_PublicKey(univ.OctetString): + pass + +GostR3410_94_PublicKey.subtypeSpec = constraint.ConstraintsUnion( + constraint.ValueSizeConstraint(64, 64), + constraint.ValueSizeConstraint(128, 128) +) + + +class GostR3410_94_PublicKeyParameters(univ.Sequence): + pass + +GostR3410_94_PublicKeyParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('publicKeyParamSet', univ.ObjectIdentifier().subtype( + subtypeSpec=constraint.SingleValueConstraint( + id_GostR3410_94_TestParamSet, + id_GostR3410_94_CryptoPro_A_ParamSet, + id_GostR3410_94_CryptoPro_B_ParamSet, + id_GostR3410_94_CryptoPro_C_ParamSet, + id_GostR3410_94_CryptoPro_D_ParamSet, + id_GostR3410_94_CryptoPro_XchA_ParamSet, + id_GostR3410_94_CryptoPro_XchB_ParamSet, + id_GostR3410_94_CryptoPro_XchC_ParamSet + ))), + namedtype.NamedType('digestParamSet', univ.ObjectIdentifier().subtype( + subtypeSpec=constraint.SingleValueConstraint( + id_GostR3411_94_TestParamSet, + id_GostR3411_94_CryptoProParamSet + ))), + namedtype.DefaultedNamedType('encryptionParamSet', + Gost28147_89_ParamSet().subtype(value=id_Gost28147_89_CryptoPro_A_ParamSet + )) +) + + +class GostR3410_94_ValidationBisParameters_c(univ.Integer): + pass + +GostR3410_94_ValidationBisParameters_c.subtypeSpec = constraint.ValueRangeConstraint(0, 4294967295) + + +class GostR3410_94_ValidationBisParameters(univ.Sequence): + pass + +GostR3410_94_ValidationBisParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('x0', GostR3410_94_ValidationBisParameters_c()), + namedtype.NamedType('c', GostR3410_94_ValidationBisParameters_c()), + namedtype.OptionalNamedType('d', univ.Integer()) +) + + +class GostR3410_94_ValidationParameters_c(univ.Integer): + pass + +GostR3410_94_ValidationParameters_c.subtypeSpec = constraint.ValueRangeConstraint(0, 65535) + + +class GostR3410_94_ValidationParameters(univ.Sequence): + pass + +GostR3410_94_ValidationParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('x0', GostR3410_94_ValidationParameters_c()), + namedtype.NamedType('c', GostR3410_94_ValidationParameters_c()), + namedtype.OptionalNamedType('d', univ.Integer()) +) + + +class GostR3411_94_Digest(univ.OctetString): + pass + +GostR3411_94_Digest.subtypeSpec = constraint.ValueSizeConstraint(32, 32) + + +class GostR3411_94_DigestParameters(univ.ObjectIdentifier): + pass + +GostR3411_94_DigestParameters.subtypeSpec = constraint.ConstraintsUnion( + constraint.SingleValueConstraint(id_GostR3411_94_TestParamSet), + constraint.SingleValueConstraint(id_GostR3411_94_CryptoProParamSet), +) + + +class GostR3411_94_ParamSetParameters(univ.Sequence): + pass + +GostR3411_94_ParamSetParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('hUZ', Gost28147_89_UZ()), + namedtype.NamedType('h0', GostR3411_94_Digest()) +) + + +# Update the Algorithm Identifier map in rfc5280.py + +_algorithmIdentifierMapUpdate = { + id_Gost28147_89: Gost28147_89_Parameters(), + id_Gost28147_89_TestParamSet: Gost28147_89_ParamSetParameters(), + id_Gost28147_89_CryptoPro_A_ParamSet: Gost28147_89_ParamSetParameters(), + id_Gost28147_89_CryptoPro_B_ParamSet: Gost28147_89_ParamSetParameters(), + id_Gost28147_89_CryptoPro_C_ParamSet: Gost28147_89_ParamSetParameters(), + id_Gost28147_89_CryptoPro_D_ParamSet: Gost28147_89_ParamSetParameters(), + id_Gost28147_89_CryptoPro_KeyMeshing: univ.Null(""), + id_Gost28147_89_None_KeyMeshing: univ.Null(""), + id_GostR3410_94: GostR3410_94_PublicKeyParameters(), + id_GostR3410_94_TestParamSet: GostR3410_94_ParamSetParameters(), + id_GostR3410_94_CryptoPro_A_ParamSet: GostR3410_94_ParamSetParameters(), + id_GostR3410_94_CryptoPro_B_ParamSet: GostR3410_94_ParamSetParameters(), + id_GostR3410_94_CryptoPro_C_ParamSet: GostR3410_94_ParamSetParameters(), + id_GostR3410_94_CryptoPro_D_ParamSet: GostR3410_94_ParamSetParameters(), + id_GostR3410_94_CryptoPro_XchA_ParamSet: GostR3410_94_ParamSetParameters(), + id_GostR3410_94_CryptoPro_XchB_ParamSet: GostR3410_94_ParamSetParameters(), + id_GostR3410_94_CryptoPro_XchC_ParamSet: GostR3410_94_ParamSetParameters(), + id_GostR3410_94_a: GostR3410_94_ValidationParameters(), + id_GostR3410_94_aBis: GostR3410_94_ValidationBisParameters(), + id_GostR3410_94_b: GostR3410_94_ValidationParameters(), + id_GostR3410_94_bBis: GostR3410_94_ValidationBisParameters(), + id_GostR3410_2001: univ.Null(""), + id_GostR3411_94: univ.Null(""), + id_GostR3411_94_TestParamSet: GostR3411_94_ParamSetParameters(), + id_GostR3411_94_CryptoProParamSet: GostR3411_94_ParamSetParameters(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4387.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4387.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f4e79acf4f9d839a18d817ffad72293b2a0757 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4387.py @@ -0,0 +1,23 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Certificate Store Access via HTTP +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4387.txt +# + + +from pyasn1.type import univ + + +id_ad = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, )) + +id_ad_http_certs = id_ad + (6, ) + +id_ad_http_crls = id_ad + (7,) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4476.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4476.py new file mode 100644 index 0000000000000000000000000000000000000000..25a0ccb7e88d3c5a12f15861b9f5ba59537b1742 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4476.py @@ -0,0 +1,93 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Attribute Certificate Policies Extension +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4476.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +# Imports from RFC 5280 + +PolicyQualifierId = rfc5280.PolicyQualifierId + +PolicyQualifierInfo = rfc5280.PolicyQualifierInfo + +UserNotice = rfc5280.UserNotice + +id_pkix = rfc5280.id_pkix + + +# Object Identifiers + +id_pe = id_pkix + (1,) + +id_pe_acPolicies = id_pe + (15,) + +id_qt = id_pkix + (2,) + +id_qt_acps = id_qt + (4,) + +id_qt_acunotice = id_qt + (5,) + + +# Attribute Certificate Policies Extension + +class ACUserNotice(UserNotice): + pass + + +class ACPSuri(char.IA5String): + pass + + +class AcPolicyId(univ.ObjectIdentifier): + pass + + +class PolicyInformation(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('policyIdentifier', AcPolicyId()), + namedtype.OptionalNamedType('policyQualifiers', + univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) + ) + + +class AcPoliciesSyntax(univ.SequenceOf): + componentType = PolicyInformation() + subtypeSpec = constraint.ValueSizeConstraint(1, MAX) + + +# Update the policy qualifier map in rfc5280.py + +_policyQualifierInfoMapUpdate = { + id_qt_acps: ACPSuri(), + id_qt_acunotice: UserNotice(), +} + +rfc5280.policyQualifierInfoMap.update(_policyQualifierInfoMapUpdate) + + +# Update the certificate extension map in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_acPolicies: AcPoliciesSyntax(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4490.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4490.py new file mode 100644 index 0000000000000000000000000000000000000000..b8fe32134e19f38dc385ab67082697235662868c --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4490.py @@ -0,0 +1,113 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Using the GOST 28147-89, GOST R 34.11-94, GOST R 34.10-94, and +# GOST R 34.10-2001 Algorithms with the CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4490.txt +# + + +from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful + +from pyasn1_modules import rfc4357 +from pyasn1_modules import rfc5280 + + +# Imports from RFC 4357 + +id_CryptoPro_algorithms = rfc4357.id_CryptoPro_algorithms + +id_GostR3410_94 = rfc4357.id_GostR3410_94 + +id_GostR3410_2001 = rfc4357.id_GostR3410_2001 + +Gost28147_89_ParamSet = rfc4357.Gost28147_89_ParamSet + +Gost28147_89_EncryptedKey = rfc4357.Gost28147_89_EncryptedKey + +GostR3410_94_PublicKeyParameters = rfc4357.GostR3410_94_PublicKeyParameters + +GostR3410_2001_PublicKeyParameters = rfc4357.GostR3410_2001_PublicKeyParameters + + +# Imports from RFC 5280 + +SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo + + +# CMS/PKCS#7 key agreement algorithms & parameters + +class Gost28147_89_KeyWrapParameters(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet()), + namedtype.OptionalNamedType('ukm', univ.OctetString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(8, 8))) + ) + + +id_Gost28147_89_CryptoPro_KeyWrap = id_CryptoPro_algorithms + (13, 1, ) + + +id_Gost28147_89_None_KeyWrap = id_CryptoPro_algorithms + (13, 0, ) + + +id_GostR3410_2001_CryptoPro_ESDH = id_CryptoPro_algorithms + (96, ) + + +id_GostR3410_94_CryptoPro_ESDH = id_CryptoPro_algorithms + (97, ) + + +# CMS/PKCS#7 key transport algorithms & parameters + +id_GostR3410_2001_KeyTransportSMIMECapability = id_GostR3410_2001 + + +id_GostR3410_94_KeyTransportSMIMECapability = id_GostR3410_94 + + +class GostR3410_TransportParameters(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('encryptionParamSet', Gost28147_89_ParamSet()), + namedtype.OptionalNamedType('ephemeralPublicKey', + SubjectPublicKeyInfo().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('ukm', univ.OctetString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(8, 8))) + ) + +class GostR3410_KeyTransport(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('sessionEncryptedKey', Gost28147_89_EncryptedKey()), + namedtype.OptionalNamedType('transportParameters', + GostR3410_TransportParameters().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 0))) + ) + + +# GOST R 34.10-94 signature algorithm & parameters + +class GostR3410_94_Signature(univ.OctetString): + subtypeSpec = constraint.ValueSizeConstraint(64, 64) + + +# GOST R 34.10-2001 signature algorithms and parameters + +class GostR3410_2001_Signature(univ.OctetString): + subtypeSpec = constraint.ValueSizeConstraint(64, 64) + + +# Update the Algorithm Identifier map in rfc5280.py + +_algorithmIdentifierMapUpdate = { + id_Gost28147_89_CryptoPro_KeyWrap: Gost28147_89_KeyWrapParameters(), + id_Gost28147_89_None_KeyWrap: Gost28147_89_KeyWrapParameters(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4491.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4491.py new file mode 100644 index 0000000000000000000000000000000000000000..60b5560dccaeabe54359e86244156e153120340f --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4491.py @@ -0,0 +1,44 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Using the GOST R 34.10-94, GOST R 34.10-2001, and GOST R 34.11-94 +# Algorithms with Certificates and CRLs +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4491.txt +# + +from pyasn1_modules import rfc4357 + + +# Signature Algorithm GOST R 34.10-94 + +id_GostR3411_94_with_GostR3410_94 = rfc4357.id_GostR3411_94_with_GostR3410_94 + + +# Signature Algorithm GOST R 34.10-2001 + +id_GostR3411_94_with_GostR3410_2001 = rfc4357.id_GostR3411_94_with_GostR3410_2001 + + +# GOST R 34.10-94 Keys + +id_GostR3410_94 = rfc4357.id_GostR3410_94 + +GostR3410_2001_PublicKey = rfc4357.GostR3410_2001_PublicKey + +GostR3410_2001_PublicKeyParameters = rfc4357.GostR3410_2001_PublicKeyParameters + + +# GOST R 34.10-2001 Keys + +id_GostR3410_2001 = rfc4357.id_GostR3410_2001 + +GostR3410_94_PublicKey = rfc4357.GostR3410_94_PublicKey + +GostR3410_94_PublicKeyParameters = rfc4357.GostR3410_94_PublicKeyParameters diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4683.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4683.py new file mode 100644 index 0000000000000000000000000000000000000000..11ac65aa6860c0db8b0235f5b2882e6a20d951c4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4683.py @@ -0,0 +1,72 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Subject Identification Method (SIM) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4683.txt +# https://www.rfc-editor.org/errata/eid1047 +# + +from pyasn1.type import char +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +# Used to compute the PEPSI value + +class HashContent(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('userPassword', char.UTF8String()), + namedtype.NamedType('authorityRandom', univ.OctetString()), + namedtype.NamedType('identifierType', univ.ObjectIdentifier()), + namedtype.NamedType('identifier', char.UTF8String()) + ) + + +# Used to encode the PEPSI value as the SIM Other Name + +id_pkix = rfc5280.id_pkix + +id_on = id_pkix + (8,) + +id_on_SIM = id_on + (6,) + + +class SIM(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('authorityRandom', univ.OctetString()), + namedtype.NamedType('pEPSI', univ.OctetString()) + ) + + +# Used to encrypt the PEPSI value during certificate request + +id_pkip = id_pkix + (5,) + +id_regEPEPSI = id_pkip + (3,) + + +class EncryptedPEPSI(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('identifierType', univ.ObjectIdentifier()), + namedtype.NamedType('identifier', char.UTF8String()), + namedtype.NamedType('sIM', SIM()) + ) + + +# Update the map of Other Name OIDs to Other Names in rfc5280.py + +_anotherNameMapUpdate = { + id_on_SIM: SIM(), +} + +rfc5280.anotherNameMap.update(_anotherNameMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4985.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4985.py new file mode 100644 index 0000000000000000000000000000000000000000..318e412380dfc345bb8c0bdf1e6d74afca370d58 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc4985.py @@ -0,0 +1,49 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Expression of Service Names in X.509 Certificates +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc4985.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +# As specified in Appendix A.2 of RFC 4985 + +id_pkix = rfc5280.id_pkix + +id_on = id_pkix + (8, ) + +id_on_dnsSRV = id_on + (7, ) + + +class SRVName(char.IA5String): + subtypeSpec = constraint.ValueSizeConstraint(1, MAX) + + +srvName = rfc5280.AnotherName() +srvName['type-id'] = id_on_dnsSRV +srvName['value'] = SRVName() + + +# Map of Other Name OIDs to Other Name is added to the +# ones that are in rfc5280.py + +_anotherNameMapUpdate = { + id_on_dnsSRV: SRVName(), +} + +rfc5280.anotherNameMap.update(_anotherNameMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5035.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5035.py new file mode 100644 index 0000000000000000000000000000000000000000..1cec98249cb7395a3c8e48a1efb6f6c2362ff558 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5035.py @@ -0,0 +1,199 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add a map for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Update to Enhanced Security Services for S/MIME +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5035.txt +# + +from pyasn1.codec.der.encoder import encode as der_encode + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc2634 +from pyasn1_modules import rfc4055 +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc5280 + +ContentType = rfc5652.ContentType + +IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber + +SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +PolicyInformation = rfc5280.PolicyInformation + +GeneralNames = rfc5280.GeneralNames + +CertificateSerialNumber = rfc5280.CertificateSerialNumber + + +# Signing Certificate Attribute V1 and V2 + +id_aa_signingCertificate = rfc2634.id_aa_signingCertificate + +id_aa_signingCertificateV2 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.47') + +Hash = rfc2634.Hash + +IssuerSerial = rfc2634.IssuerSerial + +ESSCertID = rfc2634.ESSCertID + +SigningCertificate = rfc2634.SigningCertificate + + +sha256AlgId = AlgorithmIdentifier() +sha256AlgId['algorithm'] = rfc4055.id_sha256 +# A non-schema object for sha256AlgId['parameters'] as absent +sha256AlgId['parameters'] = der_encode(univ.OctetString('')) + + +class ESSCertIDv2(univ.Sequence): + pass + +ESSCertIDv2.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('hashAlgorithm', sha256AlgId), + namedtype.NamedType('certHash', Hash()), + namedtype.OptionalNamedType('issuerSerial', IssuerSerial()) +) + + +class SigningCertificateV2(univ.Sequence): + pass + +SigningCertificateV2.componentType = namedtype.NamedTypes( + namedtype.NamedType('certs', univ.SequenceOf( + componentType=ESSCertIDv2())), + namedtype.OptionalNamedType('policies', univ.SequenceOf( + componentType=PolicyInformation())) +) + + +# Mail List Expansion History Attribute + +id_aa_mlExpandHistory = rfc2634.id_aa_mlExpandHistory + +ub_ml_expansion_history = rfc2634.ub_ml_expansion_history + +EntityIdentifier = rfc2634.EntityIdentifier + +MLReceiptPolicy = rfc2634.MLReceiptPolicy + +MLData = rfc2634.MLData + +MLExpansionHistory = rfc2634.MLExpansionHistory + + +# ESS Security Label Attribute + +id_aa_securityLabel = rfc2634.id_aa_securityLabel + +ub_privacy_mark_length = rfc2634.ub_privacy_mark_length + +ub_security_categories = rfc2634.ub_security_categories + +ub_integer_options = rfc2634.ub_integer_options + +ESSPrivacyMark = rfc2634.ESSPrivacyMark + +SecurityClassification = rfc2634.SecurityClassification + +SecurityPolicyIdentifier = rfc2634.SecurityPolicyIdentifier + +SecurityCategory = rfc2634.SecurityCategory + +SecurityCategories = rfc2634.SecurityCategories + +ESSSecurityLabel = rfc2634.ESSSecurityLabel + + +# Equivalent Labels Attribute + +id_aa_equivalentLabels = rfc2634.id_aa_equivalentLabels + +EquivalentLabels = rfc2634.EquivalentLabels + + +# Content Identifier Attribute + +id_aa_contentIdentifier = rfc2634.id_aa_contentIdentifier + +ContentIdentifier = rfc2634.ContentIdentifier + + +# Content Reference Attribute + +id_aa_contentReference = rfc2634.id_aa_contentReference + +ContentReference = rfc2634.ContentReference + + +# Message Signature Digest Attribute + +id_aa_msgSigDigest = rfc2634.id_aa_msgSigDigest + +MsgSigDigest = rfc2634.MsgSigDigest + + +# Content Hints Attribute + +id_aa_contentHint = rfc2634.id_aa_contentHint + +ContentHints = rfc2634.ContentHints + + +# Receipt Request Attribute + +AllOrFirstTier = rfc2634.AllOrFirstTier + +ReceiptsFrom = rfc2634.ReceiptsFrom + +id_aa_receiptRequest = rfc2634.id_aa_receiptRequest + +ub_receiptsTo = rfc2634.ub_receiptsTo + +ReceiptRequest = rfc2634.ReceiptRequest + + +# Receipt Content Type + +ESSVersion = rfc2634.ESSVersion + +id_ct_receipt = rfc2634.id_ct_receipt + +Receipt = rfc2634.Receipt + +ub_receiptsTo = rfc2634.ub_receiptsTo + +ReceiptRequest = rfc2634.ReceiptRequest + + +# Map of Attribute Type to the Attribute structure is added to the +# ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_signingCertificateV2: SigningCertificateV2(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) + + +# Map of Content Type OIDs to Content Types is added to the +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_receipt: Receipt(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5083.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5083.py new file mode 100644 index 0000000000000000000000000000000000000000..26ef550c4795eb678bb10420aa5f162667121d23 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5083.py @@ -0,0 +1,52 @@ +# This file is being contributed to of pyasn1-modules software. +# +# Created by Russ Housley without assistance from the asn1ate tool. +# Modified by Russ Housley to add a map for use with opentypes and +# simplify the code for the object identifier assignment. +# +# Copyright (c) 2018, 2019 Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Authenticated-Enveloped-Data for the Cryptographic Message Syntax (CMS) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5083.txt + +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + +MAX = float('inf') + + +# CMS Authenticated-Enveloped-Data Content Type + +id_ct_authEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.23') + +class AuthEnvelopedData(univ.Sequence): + pass + +AuthEnvelopedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', rfc5652.CMSVersion()), + namedtype.OptionalNamedType('originatorInfo', rfc5652.OriginatorInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('recipientInfos', rfc5652.RecipientInfos()), + namedtype.NamedType('authEncryptedContentInfo', rfc5652.EncryptedContentInfo()), + namedtype.OptionalNamedType('authAttrs', rfc5652.AuthAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('mac', rfc5652.MessageAuthenticationCode()), + namedtype.OptionalNamedType('unauthAttrs', rfc5652.UnauthAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +# Map of Content Type OIDs to Content Types is added to the +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_authEnvelopedData: AuthEnvelopedData(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5084.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5084.py new file mode 100644 index 0000000000000000000000000000000000000000..76868395619c618edf5a39bba97e9f7a0b9f0785 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5084.py @@ -0,0 +1,97 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley with assistance from the asn1ate tool, with manual +# changes to AES_CCM_ICVlen.subtypeSpec and added comments +# +# Copyright (c) 2018-2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# AES-CCM and AES-GCM Algorithms fo use with the Authenticated-Enveloped-Data +# protecting content type for the Cryptographic Message Syntax (CMS) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5084.txt + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +class AES_CCM_ICVlen(univ.Integer): + pass + + +class AES_GCM_ICVlen(univ.Integer): + pass + + +AES_CCM_ICVlen.subtypeSpec = constraint.SingleValueConstraint(4, 6, 8, 10, 12, 14, 16) + +AES_GCM_ICVlen.subtypeSpec = constraint.ValueRangeConstraint(12, 16) + + +class CCMParameters(univ.Sequence): + pass + + +CCMParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('aes-nonce', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(7, 13))), + # The aes-nonce parameter contains 15-L octets, where L is the size of the length field. L=8 is RECOMMENDED. + # Within the scope of any content-authenticated-encryption key, the nonce value MUST be unique. + namedtype.DefaultedNamedType('aes-ICVlen', AES_CCM_ICVlen().subtype(value=12)) +) + + +class GCMParameters(univ.Sequence): + pass + + +GCMParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('aes-nonce', univ.OctetString()), + # The aes-nonce may have any number of bits between 8 and 2^64, but it MUST be a multiple of 8 bits. + # Within the scope of any content-authenticated-encryption key, the nonce value MUST be unique. + # A nonce value of 12 octets can be processed more efficiently, so that length is RECOMMENDED. + namedtype.DefaultedNamedType('aes-ICVlen', AES_GCM_ICVlen().subtype(value=12)) +) + +aes = _OID(2, 16, 840, 1, 101, 3, 4, 1) + +id_aes128_CCM = _OID(aes, 7) + +id_aes128_GCM = _OID(aes, 6) + +id_aes192_CCM = _OID(aes, 27) + +id_aes192_GCM = _OID(aes, 26) + +id_aes256_CCM = _OID(aes, 47) + +id_aes256_GCM = _OID(aes, 46) + + +# Map of Algorithm Identifier OIDs to Parameters is added to the +# ones in rfc5280.py + +_algorithmIdentifierMapUpdate = { + id_aes128_CCM: CCMParameters(), + id_aes128_GCM: GCMParameters(), + id_aes192_CCM: CCMParameters(), + id_aes192_GCM: GCMParameters(), + id_aes256_CCM: CCMParameters(), + id_aes256_GCM: GCMParameters(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5126.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5126.py new file mode 100644 index 0000000000000000000000000000000000000000..8e016c209fea96cef48739ae42ec14ca71ed6a14 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5126.py @@ -0,0 +1,577 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# CMS Advanced Electronic Signatures (CAdES) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5126.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import useful +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc5035 +from pyasn1_modules import rfc5755 +from pyasn1_modules import rfc6960 +from pyasn1_modules import rfc3161 + +MAX = float('inf') + + +# Maps for OpenTypes + +commitmentQualifierMap = { } + +sigQualifiersMap = { } + +otherRevRefMap = { } + +otherRevValMap = { } + + +# Imports from RFC 5652 + +ContentInfo = rfc5652.ContentInfo + +ContentType = rfc5652.ContentType + +SignedData = rfc5652.SignedData + +EncapsulatedContentInfo = rfc5652.EncapsulatedContentInfo + +SignerInfo = rfc5652.SignerInfo + +MessageDigest = rfc5652.MessageDigest + +SigningTime = rfc5652.SigningTime + +Countersignature = rfc5652.Countersignature + +id_data = rfc5652.id_data + +id_signedData = rfc5652.id_signedData + +id_contentType= rfc5652.id_contentType + +id_messageDigest = rfc5652.id_messageDigest + +id_signingTime = rfc5652.id_signingTime + +id_countersignature = rfc5652.id_countersignature + + +# Imports from RFC 5035 + +SigningCertificate = rfc5035.SigningCertificate + +IssuerSerial = rfc5035.IssuerSerial + +ContentReference = rfc5035.ContentReference + +ContentIdentifier = rfc5035.ContentIdentifier + +id_aa_contentReference = rfc5035.id_aa_contentReference + +id_aa_contentIdentifier = rfc5035.id_aa_contentIdentifier + +id_aa_signingCertificate = rfc5035.id_aa_signingCertificate + +id_aa_signingCertificateV2 = rfc5035.id_aa_signingCertificateV2 + + +# Imports from RFC 5280 + +Certificate = rfc5280.Certificate + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +CertificateList = rfc5280.CertificateList + +Name = rfc5280.Name + +Attribute = rfc5280.Attribute + +GeneralNames = rfc5280.GeneralNames + +GeneralName = rfc5280.GeneralName + +PolicyInformation = rfc5280.PolicyInformation + +DirectoryString = rfc5280.DirectoryString + + +# Imports from RFC 5755 + +AttributeCertificate = rfc5755.AttributeCertificate + + +# Imports from RFC 6960 + +BasicOCSPResponse = rfc6960.BasicOCSPResponse + +ResponderID = rfc6960.ResponderID + + +# Imports from RFC 3161 + +TimeStampToken = rfc3161.TimeStampToken + + +# OID used referencing electronic signature mechanisms + +id_etsi_es_IDUP_Mechanism_v1 = univ.ObjectIdentifier('0.4.0.1733.1.4.1') + + +# OtherSigningCertificate - deprecated + +id_aa_ets_otherSigCert = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.19') + + +class OtherHashValue(univ.OctetString): + pass + + +class OtherHashAlgAndValue(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('hashValue', OtherHashValue()) + ) + + +class OtherHash(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('sha1Hash', OtherHashValue()), + namedtype.NamedType('otherHash', OtherHashAlgAndValue()) + ) + + +class OtherCertID(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('otherCertHash', OtherHash()), + namedtype.OptionalNamedType('issuerSerial', IssuerSerial()) + ) + + +class OtherSigningCertificate(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('certs', + univ.SequenceOf(componentType=OtherCertID())), + namedtype.OptionalNamedType('policies', + univ.SequenceOf(componentType=PolicyInformation())) + ) + + +# Signature Policy Identifier + +id_aa_ets_sigPolicyId = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.15') + + +class SigPolicyId(univ.ObjectIdentifier): + pass + + +class SigPolicyHash(OtherHashAlgAndValue): + pass + + +class SigPolicyQualifierId(univ.ObjectIdentifier): + pass + + +class SigPolicyQualifierInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('sigPolicyQualifierId', SigPolicyQualifierId()), + namedtype.NamedType('sigQualifier', univ.Any(), + openType=opentype.OpenType('sigPolicyQualifierId', sigQualifiersMap)) + ) + + +class SignaturePolicyId(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('sigPolicyId', SigPolicyId()), + namedtype.NamedType('sigPolicyHash', SigPolicyHash()), + namedtype.OptionalNamedType('sigPolicyQualifiers', + univ.SequenceOf(componentType=SigPolicyQualifierInfo()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) + ) + + +class SignaturePolicyImplied(univ.Null): + pass + + +class SignaturePolicy(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('signaturePolicyId', SignaturePolicyId()), + namedtype.NamedType('signaturePolicyImplied', SignaturePolicyImplied()) + ) + + +id_spq_ets_unotice = univ.ObjectIdentifier('1.2.840.113549.1.9.16.5.2') + + +class DisplayText(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('visibleString', char.VisibleString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 200))), + namedtype.NamedType('bmpString', char.BMPString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 200))), + namedtype.NamedType('utf8String', char.UTF8String().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 200))) + ) + + +class NoticeReference(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('organization', DisplayText()), + namedtype.NamedType('noticeNumbers', + univ.SequenceOf(componentType=univ.Integer())) + ) + +class SPUserNotice(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('noticeRef', NoticeReference()), + namedtype.OptionalNamedType('explicitText', DisplayText()) + ) + + +noticeToUser = SigPolicyQualifierInfo() +noticeToUser['sigPolicyQualifierId'] = id_spq_ets_unotice +noticeToUser['sigQualifier'] = SPUserNotice() + + +id_spq_ets_uri = univ.ObjectIdentifier('1.2.840.113549.1.9.16.5.1') + + +class SPuri(char.IA5String): + pass + + +pointerToSigPolSpec = SigPolicyQualifierInfo() +pointerToSigPolSpec['sigPolicyQualifierId'] = id_spq_ets_uri +pointerToSigPolSpec['sigQualifier'] = SPuri() + + +# Commitment Type + +id_aa_ets_commitmentType = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.16') + + +class CommitmentTypeIdentifier(univ.ObjectIdentifier): + pass + + +class CommitmentTypeQualifier(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('commitmentTypeIdentifier', + CommitmentTypeIdentifier()), + namedtype.NamedType('qualifier', univ.Any(), + openType=opentype.OpenType('commitmentTypeIdentifier', + commitmentQualifierMap)) + ) + + +class CommitmentTypeIndication(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('commitmentTypeId', CommitmentTypeIdentifier()), + namedtype.OptionalNamedType('commitmentTypeQualifier', + univ.SequenceOf(componentType=CommitmentTypeQualifier()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) + ) + + +id_cti_ets_proofOfOrigin = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.1') + +id_cti_ets_proofOfReceipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.2') + +id_cti_ets_proofOfDelivery = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.3') + +id_cti_ets_proofOfSender = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.4') + +id_cti_ets_proofOfApproval = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.5') + +id_cti_ets_proofOfCreation = univ.ObjectIdentifier('1.2.840.113549.1.9.16.6.6') + + +# Signer Location + +id_aa_ets_signerLocation = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.17') + + +class PostalAddress(univ.SequenceOf): + componentType = DirectoryString() + subtypeSpec = constraint.ValueSizeConstraint(1, 6) + + +class SignerLocation(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('countryName', + DirectoryString().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('localityName', + DirectoryString().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('postalAdddress', + PostalAddress().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +# Signature Timestamp + +id_aa_signatureTimeStampToken = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.14') + + +class SignatureTimeStampToken(TimeStampToken): + pass + + +# Content Timestamp + +id_aa_ets_contentTimestamp = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.20') + + +class ContentTimestamp(TimeStampToken): + pass + + +# Signer Attributes + +id_aa_ets_signerAttr = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.18') + + +class ClaimedAttributes(univ.SequenceOf): + componentType = Attribute() + + +class CertifiedAttributes(AttributeCertificate): + pass + + +class SignerAttribute(univ.SequenceOf): + componentType = univ.Choice(componentType=namedtype.NamedTypes( + namedtype.NamedType('claimedAttributes', + ClaimedAttributes().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('certifiedAttributes', + CertifiedAttributes().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) + )) + + +# Complete Certificate Refs + +id_aa_ets_certificateRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.21') + + +class CompleteCertificateRefs(univ.SequenceOf): + componentType = OtherCertID() + + +# Complete Revocation Refs + +id_aa_ets_revocationRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.22') + + +class CrlIdentifier(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('crlissuer', Name()), + namedtype.NamedType('crlIssuedTime', useful.UTCTime()), + namedtype.OptionalNamedType('crlNumber', univ.Integer()) + ) + + +class CrlValidatedID(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('crlHash', OtherHash()), + namedtype.OptionalNamedType('crlIdentifier', CrlIdentifier()) + ) + + +class CRLListID(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('crls', + univ.SequenceOf(componentType=CrlValidatedID())) + ) + + +class OcspIdentifier(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('ocspResponderID', ResponderID()), + namedtype.NamedType('producedAt', useful.GeneralizedTime()) + ) + + +class OcspResponsesID(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('ocspIdentifier', OcspIdentifier()), + namedtype.OptionalNamedType('ocspRepHash', OtherHash()) + ) + + +class OcspListID(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('ocspResponses', + univ.SequenceOf(componentType=OcspResponsesID())) + ) + + +class OtherRevRefType(univ.ObjectIdentifier): + pass + + +class OtherRevRefs(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('otherRevRefType', OtherRevRefType()), + namedtype.NamedType('otherRevRefs', univ.Any(), + openType=opentype.OpenType('otherRevRefType', otherRevRefMap)) + ) + + +class CrlOcspRef(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('crlids', + CRLListID().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('ocspids', + OcspListID().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.OptionalNamedType('otherRev', + OtherRevRefs().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 2))) + ) + + +class CompleteRevocationRefs(univ.SequenceOf): + componentType = CrlOcspRef() + + +# Certificate Values + +id_aa_ets_certValues = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.23') + + +class CertificateValues(univ.SequenceOf): + componentType = Certificate() + + +# Certificate Revocation Values + +id_aa_ets_revocationValues = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.24') + + +class OtherRevValType(univ.ObjectIdentifier): + pass + + +class OtherRevVals(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('otherRevValType', OtherRevValType()), + namedtype.NamedType('otherRevVals', univ.Any(), + openType=opentype.OpenType('otherRevValType', otherRevValMap)) + ) + + +class RevocationValues(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('crlVals', + univ.SequenceOf(componentType=CertificateList()).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('ocspVals', + univ.SequenceOf(componentType=BasicOCSPResponse()).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('otherRevVals', + OtherRevVals().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 2))) + ) + + +# CAdES-C Timestamp + +id_aa_ets_escTimeStamp = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.25') + + +class ESCTimeStampToken(TimeStampToken): + pass + + +# Time-Stamped Certificates and CRLs + +id_aa_ets_certCRLTimestamp = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.26') + + +class TimestampedCertsCRLs(TimeStampToken): + pass + + +# Archive Timestamp + +id_aa_ets_archiveTimestampV2 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.48') + + +class ArchiveTimeStampToken(TimeStampToken): + pass + + +# Attribute certificate references + +id_aa_ets_attrCertificateRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.44') + + +class AttributeCertificateRefs(univ.SequenceOf): + componentType = OtherCertID() + + +# Attribute revocation references + +id_aa_ets_attrRevocationRefs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.45') + + +class AttributeRevocationRefs(univ.SequenceOf): + componentType = CrlOcspRef() + + +# Update the sigQualifiersMap + +_sigQualifiersMapUpdate = { + id_spq_ets_unotice: SPUserNotice(), + id_spq_ets_uri: SPuri(), +} + +sigQualifiersMap.update(_sigQualifiersMapUpdate) + + +# Update the CMS Attribute Map in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_ets_otherSigCert: OtherSigningCertificate(), + id_aa_ets_sigPolicyId: SignaturePolicy(), + id_aa_ets_commitmentType: CommitmentTypeIndication(), + id_aa_ets_signerLocation: SignerLocation(), + id_aa_signatureTimeStampToken: SignatureTimeStampToken(), + id_aa_ets_contentTimestamp: ContentTimestamp(), + id_aa_ets_signerAttr: SignerAttribute(), + id_aa_ets_certificateRefs: CompleteCertificateRefs(), + id_aa_ets_revocationRefs: CompleteRevocationRefs(), + id_aa_ets_certValues: CertificateValues(), + id_aa_ets_revocationValues: RevocationValues(), + id_aa_ets_escTimeStamp: ESCTimeStampToken(), + id_aa_ets_certCRLTimestamp: TimestampedCertsCRLs(), + id_aa_ets_archiveTimestampV2: ArchiveTimeStampToken(), + id_aa_ets_attrCertificateRefs: AttributeCertificateRefs(), + id_aa_ets_attrRevocationRefs: AttributeRevocationRefs(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5208.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5208.py new file mode 100644 index 0000000000000000000000000000000000000000..295fdbf388bfed5c9c040d4c8690cc5cbb97d793 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5208.py @@ -0,0 +1,56 @@ +# +# This file is part of pyasn1-modules software. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# PKCS#8 syntax +# +# ASN.1 source from: +# http://tools.ietf.org/html/rfc5208 +# +# Sample captures could be obtained with "openssl pkcs8 -topk8" command +# +from pyasn1_modules import rfc2251 +from pyasn1_modules.rfc2459 import * + + +class KeyEncryptionAlgorithms(AlgorithmIdentifier): + pass + + +class PrivateKeyAlgorithms(AlgorithmIdentifier): + pass + + +class EncryptedData(univ.OctetString): + pass + + +class EncryptedPrivateKeyInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('encryptionAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('encryptedData', EncryptedData()) + ) + + +class PrivateKey(univ.OctetString): + pass + + +class Attributes(univ.SetOf): + componentType = rfc2251.Attribute() + + +class Version(univ.Integer): + namedValues = namedval.NamedValues(('v1', 0), ('v2', 1)) + + +class PrivateKeyInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('privateKeyAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('privateKey', PrivateKey()), + namedtype.OptionalNamedType('attributes', Attributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) + ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5275.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5275.py new file mode 100644 index 0000000000000000000000000000000000000000..1be959814264706401bf7178ebafff0f09062f7a --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5275.py @@ -0,0 +1,404 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# An Internet Attribute Certificate Profile for Authorization +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5275.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc3565 +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc5751 +from pyasn1_modules import rfc5755 + +MAX = float('inf') + + +# Initialize the map for GLAQueryRequests and GLAQueryResponses + +glaQueryRRMap = { } + + +# Imports from RFC 3565 + +id_aes128_wrap = rfc3565.id_aes128_wrap + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +Certificate = rfc5280.Certificate + +GeneralName = rfc5280.GeneralName + + +# Imports from RFC 5652 + +CertificateSet = rfc5652.CertificateSet + +KEKIdentifier = rfc5652.KEKIdentifier + +RecipientInfos = rfc5652.RecipientInfos + + +# Imports from RFC 5751 + +SMIMECapability = rfc5751.SMIMECapability + + +# Imports from RFC 5755 + +AttributeCertificate = rfc5755.AttributeCertificate + + +# The GL symmetric key distribution object identifier arc + +id_skd = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, 8,)) + + +# The GL Use KEK control attribute + +id_skd_glUseKEK = id_skd + (1,) + + +class Certificates(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('pKC', + Certificate().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('aC', + univ.SequenceOf(componentType=AttributeCertificate()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('certPath', + CertificateSet().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class GLInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glName', GeneralName()), + namedtype.NamedType('glAddress', GeneralName()) + ) + + +class GLOwnerInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glOwnerName', GeneralName()), + namedtype.NamedType('glOwnerAddress', GeneralName()), + namedtype.OptionalNamedType('certificates', Certificates()) + ) + + +class GLAdministration(univ.Integer): + namedValues = namedval.NamedValues( + ('unmanaged', 0), + ('managed', 1), + ('closed', 2) + ) + + +requested_algorithm = SMIMECapability().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)) +requested_algorithm['capabilityID'] = id_aes128_wrap + + +class GLKeyAttributes(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('rekeyControlledByGLO', + univ.Boolean().subtype(value=0, + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.DefaultedNamedType('recipientsNotMutuallyAware', + univ.Boolean().subtype(value=1, + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.DefaultedNamedType('duration', + univ.Integer().subtype(value=0, + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.DefaultedNamedType('generationCounter', + univ.Integer().subtype(value=2, + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.DefaultedNamedType('requestedAlgorithm', requested_algorithm) + ) + + +class GLUseKEK(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glInfo', GLInfo()), + namedtype.NamedType('glOwnerInfo', + univ.SequenceOf(componentType=GLOwnerInfo()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.DefaultedNamedType('glAdministration', + GLAdministration().subtype(value=1)), + namedtype.OptionalNamedType('glKeyAttributes', GLKeyAttributes()) + ) + + +# The Delete GL control attribute + +id_skd_glDelete = id_skd + (2,) + + +class DeleteGL(GeneralName): + pass + + +# The Add GL Member control attribute + +id_skd_glAddMember = id_skd + (3,) + + +class GLMember(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glMemberName', GeneralName()), + namedtype.OptionalNamedType('glMemberAddress', GeneralName()), + namedtype.OptionalNamedType('certificates', Certificates()) + ) + + +class GLAddMember(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glName', GeneralName()), + namedtype.NamedType('glMember', GLMember()) + ) + + +# The Delete GL Member control attribute + +id_skd_glDeleteMember = id_skd + (4,) + + +class GLDeleteMember(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glName', GeneralName()), + namedtype.NamedType('glMemberToDelete', GeneralName()) + ) + + +# The GL Rekey control attribute + +id_skd_glRekey = id_skd + (5,) + + +class GLNewKeyAttributes(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('rekeyControlledByGLO', + univ.Boolean().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('recipientsNotMutuallyAware', + univ.Boolean().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('duration', + univ.Integer().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('generationCounter', + univ.Integer().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.OptionalNamedType('requestedAlgorithm', + AlgorithmIdentifier().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 4))) + ) + + +class GLRekey(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glName', GeneralName()), + namedtype.OptionalNamedType('glAdministration', GLAdministration()), + namedtype.OptionalNamedType('glNewKeyAttributes', GLNewKeyAttributes()), + namedtype.OptionalNamedType('glRekeyAllGLKeys', univ.Boolean()) + ) + + +# The Add and Delete GL Owner control attributes + +id_skd_glAddOwner = id_skd + (6,) + +id_skd_glRemoveOwner = id_skd + (7,) + + +class GLOwnerAdministration(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glName', GeneralName()), + namedtype.NamedType('glOwnerInfo', GLOwnerInfo()) + ) + + +# The GL Key Compromise control attribute + +id_skd_glKeyCompromise = id_skd + (8,) + + +class GLKCompromise(GeneralName): + pass + + +# The GL Key Refresh control attribute + +id_skd_glkRefresh = id_skd + (9,) + + +class Date(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('start', useful.GeneralizedTime()), + namedtype.OptionalNamedType('end', useful.GeneralizedTime()) + ) + + +class GLKRefresh(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glName', GeneralName()), + namedtype.NamedType('dates', + univ.SequenceOf(componentType=Date()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) + ) + + +# The GLA Query Request control attribute + +id_skd_glaQueryRequest = id_skd + (11,) + + +class GLAQueryRequest(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glaRequestType', univ.ObjectIdentifier()), + namedtype.NamedType('glaRequestValue', univ.Any(), + openType=opentype.OpenType('glaRequestType', glaQueryRRMap)) + ) + + +# The GLA Query Response control attribute + +id_skd_glaQueryResponse = id_skd + (12,) + + +class GLAQueryResponse(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glaResponseType', univ.ObjectIdentifier()), + namedtype.NamedType('glaResponseValue', univ.Any(), + openType=opentype.OpenType('glaResponseType', glaQueryRRMap)) + ) + + +# The GLA Request/Response (glaRR) arc for glaRequestType/glaResponseType + +id_cmc_glaRR = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 7, 99,)) + + +# The Algorithm Request + +id_cmc_gla_skdAlgRequest = id_cmc_glaRR + (1,) + + +class SKDAlgRequest(univ.Null): + pass + + +# The Algorithm Response + +id_cmc_gla_skdAlgResponse = id_cmc_glaRR + (2,) + +SMIMECapabilities = rfc5751.SMIMECapabilities + + +# The control attribute to request an updated certificate to the GLA and +# the control attribute to return an updated certificate to the GLA + +id_skd_glProvideCert = id_skd + (13,) + +id_skd_glManageCert = id_skd + (14,) + + +class GLManageCert(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glName', GeneralName()), + namedtype.NamedType('glMember', GLMember()) + ) + + +# The control attribute to distribute the GL shared KEK + +id_skd_glKey = id_skd + (15,) + + +class GLKey(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('glName', GeneralName()), + namedtype.NamedType('glIdentifier', KEKIdentifier()), + namedtype.NamedType('glkWrapped', RecipientInfos()), + namedtype.NamedType('glkAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('glkNotBefore', useful.GeneralizedTime()), + namedtype.NamedType('glkNotAfter', useful.GeneralizedTime()) + ) + + +# The CMC error types + +id_cet_skdFailInfo = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 15, 1,)) + + +class SKDFailInfo(univ.Integer): + namedValues = namedval.NamedValues( + ('unspecified', 0), + ('closedGL', 1), + ('unsupportedDuration', 2), + ('noGLACertificate', 3), + ('invalidCert', 4), + ('unsupportedAlgorithm', 5), + ('noGLONameMatch', 6), + ('invalidGLName', 7), + ('nameAlreadyInUse', 8), + ('noSpam', 9), + ('alreadyAMember', 11), + ('notAMember', 12), + ('alreadyAnOwner', 13), + ('notAnOwner', 14) + ) + + +# Update the map for GLAQueryRequests and GLAQueryResponses + +_glaQueryRRMapUpdate = { + id_cmc_gla_skdAlgRequest: univ.Null(""), + id_cmc_gla_skdAlgResponse: SMIMECapabilities(), +} + +glaQueryRRMap.update(_glaQueryRRMapUpdate) + + +# Update the map for CMC control attributes; since CMS Attributes and +# CMC Controls both use 'attrType', one map is used for both + +_cmcControlAttributesMapUpdate = { + id_skd_glUseKEK: GLUseKEK(), + id_skd_glDelete: DeleteGL(), + id_skd_glAddMember: GLAddMember(), + id_skd_glDeleteMember: GLDeleteMember(), + id_skd_glRekey: GLRekey(), + id_skd_glAddOwner: GLOwnerAdministration(), + id_skd_glRemoveOwner: GLOwnerAdministration(), + id_skd_glKeyCompromise: GLKCompromise(), + id_skd_glkRefresh: GLKRefresh(), + id_skd_glaQueryRequest: GLAQueryRequest(), + id_skd_glaQueryResponse: GLAQueryResponse(), + id_skd_glProvideCert: GLManageCert(), + id_skd_glManageCert: GLManageCert(), + id_skd_glKey: GLKey(), +} + +rfc5652.cmsAttributesMap.update(_cmcControlAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5280.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5280.py new file mode 100644 index 0000000000000000000000000000000000000000..ed5d28f7516bd5e356bccb410a96ba9ee2b47557 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5280.py @@ -0,0 +1,1658 @@ +# coding: utf-8 +# +# This file is part of pyasn1-modules software. +# +# Created by Stanisław Pitucha with asn1ate tool. +# Updated by Russ Housley for ORAddress Extension Attribute opentype support. +# Updated by Russ Housley for AlgorithmIdentifier opentype support. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# Internet X.509 Public Key Infrastructure Certificate and Certificate +# Revocation List (CRL) Profile +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5280.txt +# +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +MAX = float('inf') + + +def _buildOid(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +ub_e163_4_sub_address_length = univ.Integer(40) + +ub_e163_4_number_length = univ.Integer(15) + +unformatted_postal_address = univ.Integer(16) + + +class TerminalType(univ.Integer): + pass + + +TerminalType.namedValues = namedval.NamedValues( + ('telex', 3), + ('teletex', 4), + ('g3-facsimile', 5), + ('g4-facsimile', 6), + ('ia5-terminal', 7), + ('videotex', 8) +) + + +class Extension(univ.Sequence): + pass + + +Extension.componentType = namedtype.NamedTypes( + namedtype.NamedType('extnID', univ.ObjectIdentifier()), + namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)), + namedtype.NamedType('extnValue', univ.OctetString()) +) + + +class Extensions(univ.SequenceOf): + pass + + +Extensions.componentType = Extension() +Extensions.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + +physical_delivery_personal_name = univ.Integer(13) + +ub_unformatted_address_length = univ.Integer(180) + +ub_pds_parameter_length = univ.Integer(30) + +ub_pds_physical_address_lines = univ.Integer(6) + + +class UnformattedPostalAddress(univ.Set): + pass + + +UnformattedPostalAddress.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))), + namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length))) +) + +ub_organization_name = univ.Integer(64) + + +class X520OrganizationName(univ.Choice): + pass + + +X520OrganizationName.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))) +) + +ub_x121_address_length = univ.Integer(16) + +pds_name = univ.Integer(7) + +id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7) + +id_kp = _buildOid(id_pkix, 3) + +ub_postal_code_length = univ.Integer(16) + + +class PostalCode(univ.Choice): + pass + + +PostalCode.componentType = namedtype.NamedTypes( + namedtype.NamedType('numeric-code', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))), + namedtype.NamedType('printable-code', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))) +) + +ub_generation_qualifier_length = univ.Integer(3) + +unique_postal_name = univ.Integer(20) + + +class DomainComponent(char.IA5String): + pass + + +ub_domain_defined_attribute_value_length = univ.Integer(128) + +ub_match = univ.Integer(128) + +id_at = _buildOid(2, 5, 4) + + +class AttributeType(univ.ObjectIdentifier): + pass + + +id_at_organizationalUnitName = _buildOid(id_at, 11) + +terminal_type = univ.Integer(23) + + +class PDSParameter(univ.Set): + pass + + +PDSParameter.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))), + namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))) +) + + +class PhysicalDeliveryPersonalName(PDSParameter): + pass + + +ub_surname_length = univ.Integer(40) + +id_ad = _buildOid(id_pkix, 48) + +ub_domain_defined_attribute_type_length = univ.Integer(8) + + +class TeletexDomainDefinedAttribute(univ.Sequence): + pass + + +TeletexDomainDefinedAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('type', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))), + namedtype.NamedType('value', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length))) +) + +ub_domain_defined_attributes = univ.Integer(4) + + +class TeletexDomainDefinedAttributes(univ.SequenceOf): + pass + + +TeletexDomainDefinedAttributes.componentType = TeletexDomainDefinedAttribute() +TeletexDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes) + +extended_network_address = univ.Integer(22) + +ub_locality_name = univ.Integer(128) + + +class X520LocalityName(univ.Choice): + pass + + +X520LocalityName.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))) +) + +teletex_organization_name = univ.Integer(3) + +ub_given_name_length = univ.Integer(16) + +ub_initials_length = univ.Integer(5) + + +class PersonalName(univ.Set): + pass + + +PersonalName.componentType = namedtype.NamedTypes( + namedtype.NamedType('surname', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('given-name', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('initials', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + +ub_organizational_unit_name_length = univ.Integer(32) + + +class OrganizationalUnitName(char.PrintableString): + pass + + +OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length) + +id_at_generationQualifier = _buildOid(id_at, 44) + + +class Version(univ.Integer): + pass + + +Version.namedValues = namedval.NamedValues( + ('v1', 0), + ('v2', 1), + ('v3', 2) +) + + +class CertificateSerialNumber(univ.Integer): + pass + + +algorithmIdentifierMap = {} + + +class AlgorithmIdentifier(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithm', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('parameters', univ.Any(), + openType=opentype.OpenType('algorithm', algorithmIdentifierMap) + ) + ) + + +class Time(univ.Choice): + pass + + +Time.componentType = namedtype.NamedTypes( + namedtype.NamedType('utcTime', useful.UTCTime()), + namedtype.NamedType('generalTime', useful.GeneralizedTime()) +) + + +class AttributeValue(univ.Any): + pass + + +certificateAttributesMap = {} + + +class AttributeTypeAndValue(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeType()), + namedtype.NamedType( + 'value', AttributeValue(), + openType=opentype.OpenType('type', certificateAttributesMap) + ) + ) + + +class RelativeDistinguishedName(univ.SetOf): + pass + + +RelativeDistinguishedName.componentType = AttributeTypeAndValue() +RelativeDistinguishedName.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class RDNSequence(univ.SequenceOf): + pass + + +RDNSequence.componentType = RelativeDistinguishedName() + + +class Name(univ.Choice): + pass + + +Name.componentType = namedtype.NamedTypes( + namedtype.NamedType('rdnSequence', RDNSequence()) +) + + +class TBSCertList(univ.Sequence): + pass + + +TBSCertList.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('version', Version()), + namedtype.NamedType('signature', AlgorithmIdentifier()), + namedtype.NamedType('issuer', Name()), + namedtype.NamedType('thisUpdate', Time()), + namedtype.OptionalNamedType('nextUpdate', Time()), + namedtype.OptionalNamedType( + 'revokedCertificates', univ.SequenceOf( + componentType=univ.Sequence( + componentType=namedtype.NamedTypes( + namedtype.NamedType('userCertificate', CertificateSerialNumber()), + namedtype.NamedType('revocationDate', Time()), + namedtype.OptionalNamedType('crlEntryExtensions', Extensions()) + ) + ) + ) + ), + namedtype.OptionalNamedType( + 'crlExtensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class CertificateList(univ.Sequence): + pass + + +CertificateList.componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsCertList', TBSCertList()), + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) +) + + +class PhysicalDeliveryOfficeName(PDSParameter): + pass + + +ub_extension_attributes = univ.Integer(256) + +certificateExtensionsMap = { +} + +oraddressExtensionAttributeMap = { +} + + +class ExtensionAttribute(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType( + 'extension-attribute-type', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType( + 'extension-attribute-value', + univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)), + openType=opentype.OpenType('extension-attribute-type', oraddressExtensionAttributeMap)) + ) + +id_qt = _buildOid(id_pkix, 2) + +id_qt_cps = _buildOid(id_qt, 1) + +id_at_stateOrProvinceName = _buildOid(id_at, 8) + +id_at_title = _buildOid(id_at, 12) + +id_at_serialNumber = _buildOid(id_at, 5) + + +class X520dnQualifier(char.PrintableString): + pass + + +class PosteRestanteAddress(PDSParameter): + pass + + +poste_restante_address = univ.Integer(19) + + +class UniqueIdentifier(univ.BitString): + pass + + +class Validity(univ.Sequence): + pass + + +Validity.componentType = namedtype.NamedTypes( + namedtype.NamedType('notBefore', Time()), + namedtype.NamedType('notAfter', Time()) +) + + +class SubjectPublicKeyInfo(univ.Sequence): + pass + + +SubjectPublicKeyInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithm', AlgorithmIdentifier()), + namedtype.NamedType('subjectPublicKey', univ.BitString()) +) + + +class TBSCertificate(univ.Sequence): + pass + + +TBSCertificate.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + Version().subtype(explicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value="v1")), + namedtype.NamedType('serialNumber', CertificateSerialNumber()), + namedtype.NamedType('signature', AlgorithmIdentifier()), + namedtype.NamedType('issuer', Name()), + namedtype.NamedType('validity', Validity()), + namedtype.NamedType('subject', Name()), + namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()), + namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('extensions', + Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + +physical_delivery_office_name = univ.Integer(10) + +ub_name = univ.Integer(32768) + + +class X520name(univ.Choice): + pass + + +X520name.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))), + namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))) +) + +id_at_dnQualifier = _buildOid(id_at, 46) + +ub_serial_number = univ.Integer(64) + +ub_pseudonym = univ.Integer(128) + +pkcs_9 = _buildOid(1, 2, 840, 113549, 1, 9) + + +class X121Address(char.NumericString): + pass + + +X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length) + + +class NetworkAddress(X121Address): + pass + + +ub_integer_options = univ.Integer(256) + +id_at_commonName = _buildOid(id_at, 3) + +ub_organization_name_length = univ.Integer(64) + +id_ad_ocsp = _buildOid(id_ad, 1) + +ub_country_name_numeric_length = univ.Integer(3) + +ub_country_name_alpha_length = univ.Integer(2) + + +class PhysicalDeliveryCountryName(univ.Choice): + pass + + +PhysicalDeliveryCountryName.componentType = namedtype.NamedTypes( + namedtype.NamedType('x121-dcc-code', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))), + namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length))) +) + +id_emailAddress = _buildOid(pkcs_9, 1) + +common_name = univ.Integer(1) + + +class X520Pseudonym(univ.Choice): + pass + + +X520Pseudonym.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))) +) + +ub_domain_name_length = univ.Integer(16) + + +class AdministrationDomainName(univ.Choice): + pass + + +AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly( + tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2)) +AdministrationDomainName.componentType = namedtype.NamedTypes( + namedtype.NamedType('numeric', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))), + namedtype.NamedType('printable', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))) +) + + +class PresentationAddress(univ.Sequence): + pass + + +PresentationAddress.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + + +class ExtendedNetworkAddress(univ.Choice): + pass + + +ExtendedNetworkAddress.componentType = namedtype.NamedTypes( + namedtype.NamedType( + 'e163-4-address', univ.Sequence( + componentType=namedtype.NamedTypes( + namedtype.NamedType('number', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + ) + ), + namedtype.NamedType('psap-address', PresentationAddress().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) +) + + +class TeletexOrganizationName(char.TeletexString): + pass + + +TeletexOrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length) + +ub_terminal_id_length = univ.Integer(24) + + +class TerminalIdentifier(char.PrintableString): + pass + + +TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length) + +id_ad_caIssuers = _buildOid(id_ad, 2) + +id_at_countryName = _buildOid(id_at, 6) + + +class StreetAddress(PDSParameter): + pass + + +postal_code = univ.Integer(9) + +id_at_givenName = _buildOid(id_at, 42) + +ub_title = univ.Integer(64) + + +class ExtensionAttributes(univ.SetOf): + pass + + +ExtensionAttributes.componentType = ExtensionAttribute() +ExtensionAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_extension_attributes) + +ub_emailaddress_length = univ.Integer(255) + +id_ad_caRepository = _buildOid(id_ad, 5) + + +class ExtensionORAddressComponents(PDSParameter): + pass + + +ub_organizational_unit_name = univ.Integer(64) + + +class X520OrganizationalUnitName(univ.Choice): + pass + + +X520OrganizationalUnitName.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('utf8String', char.UTF8String().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))), + namedtype.NamedType('bmpString', char.BMPString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))) +) + + +class LocalPostalAttributes(PDSParameter): + pass + + +teletex_organizational_unit_names = univ.Integer(5) + + +class X520Title(univ.Choice): + pass + + +X520Title.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))), + namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))) +) + +id_at_localityName = _buildOid(id_at, 7) + +id_at_initials = _buildOid(id_at, 43) + +ub_state_name = univ.Integer(128) + + +class X520StateOrProvinceName(univ.Choice): + pass + + +X520StateOrProvinceName.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))) +) + +physical_delivery_organization_name = univ.Integer(14) + +id_at_surname = _buildOid(id_at, 4) + + +class X520countryName(char.PrintableString): + pass + + +X520countryName.subtypeSpec = constraint.ValueSizeConstraint(2, 2) + +physical_delivery_office_number = univ.Integer(11) + +id_qt_unotice = _buildOid(id_qt, 2) + + +class X520SerialNumber(char.PrintableString): + pass + + +X520SerialNumber.subtypeSpec = constraint.ValueSizeConstraint(1, ub_serial_number) + + +class Attribute(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeType()), + namedtype.NamedType('values', + univ.SetOf(componentType=AttributeValue()), + openType=opentype.OpenType('type', certificateAttributesMap)) + ) + +ub_common_name = univ.Integer(64) + +id_pe = _buildOid(id_pkix, 1) + + +class ExtensionPhysicalDeliveryAddressComponents(PDSParameter): + pass + + +class EmailAddress(char.IA5String): + pass + + +EmailAddress.subtypeSpec = constraint.ValueSizeConstraint(1, ub_emailaddress_length) + +id_at_organizationName = _buildOid(id_at, 10) + +post_office_box_address = univ.Integer(18) + + +class BuiltInDomainDefinedAttribute(univ.Sequence): + pass + + +BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('type', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))), + namedtype.NamedType('value', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length))) +) + + +class BuiltInDomainDefinedAttributes(univ.SequenceOf): + pass + + +BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute() +BuiltInDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes) + +id_at_pseudonym = _buildOid(id_at, 65) + +id_domainComponent = _buildOid(0, 9, 2342, 19200300, 100, 1, 25) + + +class X520CommonName(univ.Choice): + pass + + +X520CommonName.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('utf8String', + char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))), + namedtype.NamedType('bmpString', + char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))) +) + +extension_OR_address_components = univ.Integer(12) + +ub_organizational_units = univ.Integer(4) + +teletex_personal_name = univ.Integer(4) + +ub_numeric_user_id_length = univ.Integer(32) + +ub_common_name_length = univ.Integer(64) + + +class TeletexCommonName(char.TeletexString): + pass + + +TeletexCommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length) + + +class PhysicalDeliveryOrganizationName(PDSParameter): + pass + + +extension_physical_delivery_address_components = univ.Integer(15) + + +class NumericUserIdentifier(char.NumericString): + pass + + +NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length) + + +class CountryName(univ.Choice): + pass + + +CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)) +CountryName.componentType = namedtype.NamedTypes( + namedtype.NamedType('x121-dcc-code', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))), + namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length))) +) + + +class OrganizationName(char.PrintableString): + pass + + +OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length) + + +class OrganizationalUnitNames(univ.SequenceOf): + pass + + +OrganizationalUnitNames.componentType = OrganizationalUnitName() +OrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units) + + +class PrivateDomainName(univ.Choice): + pass + + +PrivateDomainName.componentType = namedtype.NamedTypes( + namedtype.NamedType('numeric', char.NumericString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))), + namedtype.NamedType('printable', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))) +) + + +class BuiltInStandardAttributes(univ.Sequence): + pass + + +BuiltInStandardAttributes.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('country-name', CountryName()), + namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()), + namedtype.OptionalNamedType('network-address', NetworkAddress().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.OptionalNamedType('organization-name', OrganizationName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))), + namedtype.OptionalNamedType('personal-name', PersonalName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))), + namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))) +) + + +class ORAddress(univ.Sequence): + pass + + +ORAddress.componentType = namedtype.NamedTypes( + namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()), + namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()), + namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes()) +) + + +class DistinguishedName(RDNSequence): + pass + + +id_ad_timeStamping = _buildOid(id_ad, 3) + + +class PhysicalDeliveryOfficeNumber(PDSParameter): + pass + + +teletex_domain_defined_attributes = univ.Integer(6) + + +class UniquePostalName(PDSParameter): + pass + + +physical_delivery_country_name = univ.Integer(8) + +ub_pds_name_length = univ.Integer(16) + + +class PDSName(char.PrintableString): + pass + + +PDSName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_pds_name_length) + + +class TeletexPersonalName(univ.Set): + pass + + +TeletexPersonalName.componentType = namedtype.NamedTypes( + namedtype.NamedType('surname', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('given-name', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('initials', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + +street_address = univ.Integer(17) + + +class PostOfficeBoxAddress(PDSParameter): + pass + + +local_postal_attributes = univ.Integer(21) + + +class DirectoryString(univ.Choice): + pass + + +DirectoryString.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', + char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('printableString', + char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('universalString', + char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) +) + +teletex_common_name = univ.Integer(2) + + +class CommonName(char.PrintableString): + pass + + +CommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length) + + +class Certificate(univ.Sequence): + pass + + +Certificate.componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsCertificate', TBSCertificate()), + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) +) + + +class TeletexOrganizationalUnitName(char.TeletexString): + pass + + +TeletexOrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length) + +id_at_name = _buildOid(id_at, 41) + + +class TeletexOrganizationalUnitNames(univ.SequenceOf): + pass + + +TeletexOrganizationalUnitNames.componentType = TeletexOrganizationalUnitName() +TeletexOrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units) + +id_ce = _buildOid(2, 5, 29) + +id_ce_issuerAltName = _buildOid(id_ce, 18) + + +class SkipCerts(univ.Integer): + pass + + +SkipCerts.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +class CRLReason(univ.Enumerated): + pass + + +CRLReason.namedValues = namedval.NamedValues( + ('unspecified', 0), + ('keyCompromise', 1), + ('cACompromise', 2), + ('affiliationChanged', 3), + ('superseded', 4), + ('cessationOfOperation', 5), + ('certificateHold', 6), + ('removeFromCRL', 8), + ('privilegeWithdrawn', 9), + ('aACompromise', 10) +) + + +class PrivateKeyUsagePeriod(univ.Sequence): + pass + + +PrivateKeyUsagePeriod.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +anotherNameMap = { + +} + + +class AnotherName(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type-id', univ.ObjectIdentifier()), + namedtype.NamedType( + 'value', + univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)), + openType=opentype.OpenType('type-id', anotherNameMap) + ) + ) + + +class EDIPartyName(univ.Sequence): + pass + + +EDIPartyName.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('partyName', DirectoryString().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + + +class GeneralName(univ.Choice): + pass + + +GeneralName.componentType = namedtype.NamedTypes( + namedtype.NamedType('otherName', + AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('rfc822Name', + char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('dNSName', + char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('x400Address', + ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.NamedType('directoryName', + Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))), + namedtype.NamedType('ediPartyName', + EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))), + namedtype.NamedType('uniformResourceIdentifier', + char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))), + namedtype.NamedType('iPAddress', + univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))), + namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))) +) + + +class BaseDistance(univ.Integer): + pass + + +BaseDistance.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +class GeneralSubtree(univ.Sequence): + pass + + +GeneralSubtree.componentType = namedtype.NamedTypes( + namedtype.NamedType('base', GeneralName()), + namedtype.DefaultedNamedType('minimum', BaseDistance().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)), + namedtype.OptionalNamedType('maximum', BaseDistance().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class GeneralNames(univ.SequenceOf): + pass + + +GeneralNames.componentType = GeneralName() +GeneralNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class DistributionPointName(univ.Choice): + pass + + +DistributionPointName.componentType = namedtype.NamedTypes( + namedtype.NamedType('fullName', + GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class ReasonFlags(univ.BitString): + pass + + +ReasonFlags.namedValues = namedval.NamedValues( + ('unused', 0), + ('keyCompromise', 1), + ('cACompromise', 2), + ('affiliationChanged', 3), + ('superseded', 4), + ('cessationOfOperation', 5), + ('certificateHold', 6), + ('privilegeWithdrawn', 7), + ('aACompromise', 8) +) + + +class IssuingDistributionPoint(univ.Sequence): + pass + + +IssuingDistributionPoint.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.DefaultedNamedType('onlyContainsUserCerts', univ.Boolean().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(value=0)), + namedtype.DefaultedNamedType('onlyContainsCACerts', univ.Boolean().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)).subtype(value=0)), + namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.DefaultedNamedType('indirectCRL', univ.Boolean().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)).subtype(value=0)), + namedtype.DefaultedNamedType('onlyContainsAttributeCerts', univ.Boolean().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)).subtype(value=0)) +) + +id_ce_certificatePolicies = _buildOid(id_ce, 32) + +id_kp_emailProtection = _buildOid(id_kp, 4) + + +class AccessDescription(univ.Sequence): + pass + + +AccessDescription.componentType = namedtype.NamedTypes( + namedtype.NamedType('accessMethod', univ.ObjectIdentifier()), + namedtype.NamedType('accessLocation', GeneralName()) +) + + +class IssuerAltName(GeneralNames): + pass + + +id_ce_cRLDistributionPoints = _buildOid(id_ce, 31) + +holdInstruction = _buildOid(2, 2, 840, 10040, 2) + +id_holdinstruction_callissuer = _buildOid(holdInstruction, 2) + +id_ce_subjectDirectoryAttributes = _buildOid(id_ce, 9) + +id_ce_issuingDistributionPoint = _buildOid(id_ce, 28) + + +class DistributionPoint(univ.Sequence): + pass + + +DistributionPoint.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('reasons', ReasonFlags().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +class CRLDistributionPoints(univ.SequenceOf): + pass + + +CRLDistributionPoints.componentType = DistributionPoint() +CRLDistributionPoints.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class GeneralSubtrees(univ.SequenceOf): + pass + + +GeneralSubtrees.componentType = GeneralSubtree() +GeneralSubtrees.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class NameConstraints(univ.Sequence): + pass + + +NameConstraints.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class SubjectDirectoryAttributes(univ.SequenceOf): + pass + + +SubjectDirectoryAttributes.componentType = Attribute() +SubjectDirectoryAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + +id_kp_OCSPSigning = _buildOid(id_kp, 9) + +id_kp_timeStamping = _buildOid(id_kp, 8) + + +class DisplayText(univ.Choice): + pass + + +DisplayText.componentType = namedtype.NamedTypes( + namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))), + namedtype.NamedType('visibleString', + char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))), + namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))), + namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))) +) + + +class NoticeReference(univ.Sequence): + pass + + +NoticeReference.componentType = namedtype.NamedTypes( + namedtype.NamedType('organization', DisplayText()), + namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer())) +) + + +class UserNotice(univ.Sequence): + pass + + +UserNotice.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('noticeRef', NoticeReference()), + namedtype.OptionalNamedType('explicitText', DisplayText()) +) + + +class PolicyQualifierId(univ.ObjectIdentifier): + pass + + +policyQualifierInfoMap = { + +} + + +class PolicyQualifierInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('policyQualifierId', PolicyQualifierId()), + namedtype.NamedType( + 'qualifier', univ.Any(), + openType=opentype.OpenType('policyQualifierId', policyQualifierInfoMap) + ) + ) + + +class CertPolicyId(univ.ObjectIdentifier): + pass + + +class PolicyInformation(univ.Sequence): + pass + + +PolicyInformation.componentType = namedtype.NamedTypes( + namedtype.NamedType('policyIdentifier', CertPolicyId()), + namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo())) +) + + +class CertificatePolicies(univ.SequenceOf): + pass + + +CertificatePolicies.componentType = PolicyInformation() +CertificatePolicies.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class SubjectAltName(GeneralNames): + pass + + +id_ce_basicConstraints = _buildOid(id_ce, 19) + +id_ce_authorityKeyIdentifier = _buildOid(id_ce, 35) + +id_kp_codeSigning = _buildOid(id_kp, 3) + + +class BasicConstraints(univ.Sequence): + pass + + +BasicConstraints.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('cA', univ.Boolean().subtype(value=0)), + namedtype.OptionalNamedType('pathLenConstraint', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))) +) + +id_ce_certificateIssuer = _buildOid(id_ce, 29) + + +class PolicyMappings(univ.SequenceOf): + pass + + +PolicyMappings.componentType = univ.Sequence( + componentType=namedtype.NamedTypes( + namedtype.NamedType('issuerDomainPolicy', CertPolicyId()), + namedtype.NamedType('subjectDomainPolicy', CertPolicyId()) + ) +) + +PolicyMappings.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class InhibitAnyPolicy(SkipCerts): + pass + + +anyPolicy = _buildOid(id_ce_certificatePolicies, 0) + + +class CRLNumber(univ.Integer): + pass + + +CRLNumber.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +class BaseCRLNumber(CRLNumber): + pass + + +id_ce_nameConstraints = _buildOid(id_ce, 30) + +id_kp_serverAuth = _buildOid(id_kp, 1) + +id_ce_freshestCRL = _buildOid(id_ce, 46) + +id_ce_cRLReasons = _buildOid(id_ce, 21) + +id_ce_extKeyUsage = _buildOid(id_ce, 37) + + +class KeyIdentifier(univ.OctetString): + pass + + +class AuthorityKeyIdentifier(univ.Sequence): + pass + + +AuthorityKeyIdentifier.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +class FreshestCRL(CRLDistributionPoints): + pass + + +id_ce_policyConstraints = _buildOid(id_ce, 36) + +id_pe_authorityInfoAccess = _buildOid(id_pe, 1) + + +class AuthorityInfoAccessSyntax(univ.SequenceOf): + pass + + +AuthorityInfoAccessSyntax.componentType = AccessDescription() +AuthorityInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + +id_holdinstruction_none = _buildOid(holdInstruction, 1) + + +class CPSuri(char.IA5String): + pass + + +id_pe_subjectInfoAccess = _buildOid(id_pe, 11) + + +class SubjectKeyIdentifier(KeyIdentifier): + pass + + +id_ce_subjectAltName = _buildOid(id_ce, 17) + + +class KeyPurposeId(univ.ObjectIdentifier): + pass + + +class ExtKeyUsageSyntax(univ.SequenceOf): + pass + + +ExtKeyUsageSyntax.componentType = KeyPurposeId() +ExtKeyUsageSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class HoldInstructionCode(univ.ObjectIdentifier): + pass + + +id_ce_deltaCRLIndicator = _buildOid(id_ce, 27) + +id_ce_keyUsage = _buildOid(id_ce, 15) + +id_ce_holdInstructionCode = _buildOid(id_ce, 23) + + +class SubjectInfoAccessSyntax(univ.SequenceOf): + pass + + +SubjectInfoAccessSyntax.componentType = AccessDescription() +SubjectInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class InvalidityDate(useful.GeneralizedTime): + pass + + +class KeyUsage(univ.BitString): + pass + + +KeyUsage.namedValues = namedval.NamedValues( + ('digitalSignature', 0), + ('nonRepudiation', 1), + ('keyEncipherment', 2), + ('dataEncipherment', 3), + ('keyAgreement', 4), + ('keyCertSign', 5), + ('cRLSign', 6), + ('encipherOnly', 7), + ('decipherOnly', 8) +) + +id_ce_invalidityDate = _buildOid(id_ce, 24) + +id_ce_policyMappings = _buildOid(id_ce, 33) + +anyExtendedKeyUsage = _buildOid(id_ce_extKeyUsage, 0) + +id_ce_privateKeyUsagePeriod = _buildOid(id_ce, 16) + +id_ce_cRLNumber = _buildOid(id_ce, 20) + + +class CertificateIssuer(GeneralNames): + pass + + +id_holdinstruction_reject = _buildOid(holdInstruction, 3) + + +class PolicyConstraints(univ.Sequence): + pass + + +PolicyConstraints.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('requireExplicitPolicy', + SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('inhibitPolicyMapping', + SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + +id_kp_clientAuth = _buildOid(id_kp, 2) + +id_ce_subjectKeyIdentifier = _buildOid(id_ce, 14) + +id_ce_inhibitAnyPolicy = _buildOid(id_ce, 54) + +# map of ORAddress ExtensionAttribute type to ExtensionAttribute value + +_oraddressExtensionAttributeMapUpdate = { + common_name: CommonName(), + teletex_common_name: TeletexCommonName(), + teletex_organization_name: TeletexOrganizationName(), + teletex_personal_name: TeletexPersonalName(), + teletex_organizational_unit_names: TeletexOrganizationalUnitNames(), + pds_name: PDSName(), + physical_delivery_country_name: PhysicalDeliveryCountryName(), + postal_code: PostalCode(), + physical_delivery_office_name: PhysicalDeliveryOfficeName(), + physical_delivery_office_number: PhysicalDeliveryOfficeNumber(), + extension_OR_address_components: ExtensionORAddressComponents(), + physical_delivery_personal_name: PhysicalDeliveryPersonalName(), + physical_delivery_organization_name: PhysicalDeliveryOrganizationName(), + extension_physical_delivery_address_components: ExtensionPhysicalDeliveryAddressComponents(), + unformatted_postal_address: UnformattedPostalAddress(), + street_address: StreetAddress(), + post_office_box_address: PostOfficeBoxAddress(), + poste_restante_address: PosteRestanteAddress(), + unique_postal_name: UniquePostalName(), + local_postal_attributes: LocalPostalAttributes(), + extended_network_address: ExtendedNetworkAddress(), + terminal_type: TerminalType(), + teletex_domain_defined_attributes: TeletexDomainDefinedAttributes(), +} + +oraddressExtensionAttributeMap.update(_oraddressExtensionAttributeMapUpdate) + + +# map of AttributeType -> AttributeValue + +_certificateAttributesMapUpdate = { + id_at_name: X520name(), + id_at_surname: X520name(), + id_at_givenName: X520name(), + id_at_initials: X520name(), + id_at_generationQualifier: X520name(), + id_at_commonName: X520CommonName(), + id_at_localityName: X520LocalityName(), + id_at_stateOrProvinceName: X520StateOrProvinceName(), + id_at_organizationName: X520OrganizationName(), + id_at_organizationalUnitName: X520OrganizationalUnitName(), + id_at_title: X520Title(), + id_at_dnQualifier: X520dnQualifier(), + id_at_countryName: X520countryName(), + id_at_serialNumber: X520SerialNumber(), + id_at_pseudonym: X520Pseudonym(), + id_domainComponent: DomainComponent(), + id_emailAddress: EmailAddress(), +} + +certificateAttributesMap.update(_certificateAttributesMapUpdate) + + +# map of Certificate Extension OIDs to Extensions + +_certificateExtensionsMap = { + id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(), + id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(), + id_ce_keyUsage: KeyUsage(), + id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(), + id_ce_certificatePolicies: CertificatePolicies(), + id_ce_policyMappings: PolicyMappings(), + id_ce_subjectAltName: SubjectAltName(), + id_ce_issuerAltName: IssuerAltName(), + id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(), + id_ce_basicConstraints: BasicConstraints(), + id_ce_nameConstraints: NameConstraints(), + id_ce_policyConstraints: PolicyConstraints(), + id_ce_extKeyUsage: ExtKeyUsageSyntax(), + id_ce_cRLDistributionPoints: CRLDistributionPoints(), + id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(), + id_ce_cRLNumber: univ.Integer(), + id_ce_deltaCRLIndicator: BaseCRLNumber(), + id_ce_issuingDistributionPoint: IssuingDistributionPoint(), + id_ce_cRLReasons: CRLReason(), + id_ce_holdInstructionCode: univ.ObjectIdentifier(), + id_ce_invalidityDate: useful.GeneralizedTime(), + id_ce_certificateIssuer: GeneralNames(), +} + +certificateExtensionsMap.update(_certificateExtensionsMap) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5480.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5480.py new file mode 100644 index 0000000000000000000000000000000000000000..84c0c11b880a63f2af5f39ca0702b64fe58b3446 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5480.py @@ -0,0 +1,190 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add maps for opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Elliptic Curve Cryptography Subject Public Key Information +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5480.txt + + +# What can be imported from rfc4055.py ? + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc3279 +from pyasn1_modules import rfc5280 + + +# These structures are the same as RFC 3279. + +DHPublicKey = rfc3279.DHPublicKey + +DSAPublicKey = rfc3279.DSAPublicKey + +ValidationParms = rfc3279.ValidationParms + +DomainParameters = rfc3279.DomainParameters + +ECDSA_Sig_Value = rfc3279.ECDSA_Sig_Value + +ECPoint = rfc3279.ECPoint + +KEA_Parms_Id = rfc3279.KEA_Parms_Id + +RSAPublicKey = rfc3279.RSAPublicKey + + +# RFC 5480 changed the names of these structures from RFC 3279. + +DSS_Parms = rfc3279.Dss_Parms + +DSA_Sig_Value = rfc3279.Dss_Sig_Value + + +# RFC 3279 defines a more complex alternative for ECParameters. +# RFC 5480 narrows the definition to a single CHOICE: namedCurve. + +class ECParameters(univ.Choice): + pass + +ECParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('namedCurve', univ.ObjectIdentifier()) +) + + +# OIDs for Message Digest Algorithms + +id_md2 = univ.ObjectIdentifier('1.2.840.113549.2.2') + +id_md5 = univ.ObjectIdentifier('1.2.840.113549.2.5') + +id_sha1 = univ.ObjectIdentifier('1.3.14.3.2.26') + +id_sha224 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.4') + +id_sha256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.1') + +id_sha384 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.2') + +id_sha512 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.3') + + +# OID for RSA PK Algorithm and Key + +rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1') + + +# OID for DSA PK Algorithm, Key, and Parameters + +id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1') + + +# OID for Diffie-Hellman PK Algorithm, Key, and Parameters + +dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1') + +# OID for KEA PK Algorithm and Parameters + +id_keyExchangeAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.22') + + +# OIDs for Elliptic Curve Algorithm ID, Key, and Parameters +# Note that ECDSA keys always use this OID + +id_ecPublicKey = univ.ObjectIdentifier('1.2.840.10045.2.1') + +id_ecDH = univ.ObjectIdentifier('1.3.132.1.12') + +id_ecMQV = univ.ObjectIdentifier('1.3.132.1.13') + + +# OIDs for RSA Signature Algorithms + +md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2') + +md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4') + +sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5') + + +# OIDs for DSA Signature Algorithms + +id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3') + +id_dsa_with_sha224 = univ.ObjectIdentifier('2.16.840.1.101.3.4.3.1') + +id_dsa_with_sha256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.3.2') + + +# OIDs for ECDSA Signature Algorithms + +ecdsa_with_SHA1 = univ.ObjectIdentifier('1.2.840.10045.4.1') + +ecdsa_with_SHA224 = univ.ObjectIdentifier('1.2.840.10045.4.3.1') + +ecdsa_with_SHA256 = univ.ObjectIdentifier('1.2.840.10045.4.3.2') + +ecdsa_with_SHA384 = univ.ObjectIdentifier('1.2.840.10045.4.3.3') + +ecdsa_with_SHA512 = univ.ObjectIdentifier('1.2.840.10045.4.3.4') + + +# OIDs for Named Elliptic Curves + +secp192r1 = univ.ObjectIdentifier('1.2.840.10045.3.1.1') + +sect163k1 = univ.ObjectIdentifier('1.3.132.0.1') + +sect163r2 = univ.ObjectIdentifier('1.3.132.0.15') + +secp224r1 = univ.ObjectIdentifier('1.3.132.0.33') + +sect233k1 = univ.ObjectIdentifier('1.3.132.0.26') + +sect233r1 = univ.ObjectIdentifier('1.3.132.0.27') + +secp256r1 = univ.ObjectIdentifier('1.2.840.10045.3.1.7') + +sect283k1 = univ.ObjectIdentifier('1.3.132.0.16') + +sect283r1 = univ.ObjectIdentifier('1.3.132.0.17') + +secp384r1 = univ.ObjectIdentifier('1.3.132.0.34') + +sect409k1 = univ.ObjectIdentifier('1.3.132.0.36') + +sect409r1 = univ.ObjectIdentifier('1.3.132.0.37') + +secp521r1 = univ.ObjectIdentifier('1.3.132.0.35') + +sect571k1 = univ.ObjectIdentifier('1.3.132.0.38') + +sect571r1 = univ.ObjectIdentifier('1.3.132.0.39') + + +# Map of Algorithm Identifier OIDs to Parameters +# The algorithm is not included if the parameters MUST be absent + +_algorithmIdentifierMapUpdate = { + rsaEncryption: univ.Null(), + md2WithRSAEncryption: univ.Null(), + md5WithRSAEncryption: univ.Null(), + sha1WithRSAEncryption: univ.Null(), + id_dsa: DSS_Parms(), + dhpublicnumber: DomainParameters(), + id_keyExchangeAlgorithm: KEA_Parms_Id(), + id_ecPublicKey: ECParameters(), + id_ecDH: ECParameters(), + id_ecMQV: ECParameters(), +} + + +# Add these Algorithm Identifier map entries to the ones in rfc5280.py + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5636.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5636.py new file mode 100644 index 0000000000000000000000000000000000000000..f87bc4ec82f0b2452c63f896bf45f21513977369 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5636.py @@ -0,0 +1,113 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Traceable Anonymous Certificate +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5480.txt + +from pyasn1.type import namedtype +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc5652 + + +# Imports from RFC 5652 + +ContentInfo = rfc5652.ContentInfo + +EncapsulatedContentInfo = rfc5652.EncapsulatedContentInfo + +id_data = rfc5652.id_data + + +# Object Identifiers + +id_KISA = univ.ObjectIdentifier((1, 2, 410, 200004,)) + + +id_npki = id_KISA + (10,) + + +id_attribute = id_npki + (1,) + + +id_kisa_tac = id_attribute + (1,) + + +id_kisa_tac_token = id_kisa_tac + (1,) + + +id_kisa_tac_tokenandblindbash = id_kisa_tac + (2,) + + +id_kisa_tac_tokenandpartially = id_kisa_tac + (3,) + + +# Structures for Traceable Anonymous Certificate (TAC) + +class UserKey(univ.OctetString): + pass + + +class Timeout(useful.GeneralizedTime): + pass + + +class BlinedCertificateHash(univ.OctetString): + pass + + +class PartiallySignedCertificateHash(univ.OctetString): + pass + + +class Token(ContentInfo): + pass + + +class TokenandBlindHash(ContentInfo): + pass + + +class TokenandPartiallySignedCertificateHash(ContentInfo): + pass + + +# Added to the module in RFC 5636 for the CMS Content Type Map + +class TACToken(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('userKey', UserKey()), + namedtype.NamedType('timeout', Timeout()) + ) + + +class TACTokenandBlindHash(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('token', Token()), + namedtype.NamedType('blinded', BlinedCertificateHash()) + ) + + +class TACTokenandPartiallySignedCertificateHash(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('token', Token()), + namedtype.NamedType('partially', PartiallySignedCertificateHash()) + ) + + +# Add to the CMS Content Type Map in rfc5752.py + +_cmsContentTypesMapUpdate = { + id_kisa_tac_token: TACToken(), + id_kisa_tac_tokenandblindbash: TACTokenandBlindHash(), + id_kisa_tac_tokenandpartially: TACTokenandPartiallySignedCertificateHash(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5639.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5639.py new file mode 100644 index 0000000000000000000000000000000000000000..d48d30044b07a7344bfc105ad2a8bd5b4e343d92 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5639.py @@ -0,0 +1,49 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Elliptic Curve Cryptography Brainpool Standard Curves +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5639.txt + + +from pyasn1.type import univ + + +ecStdCurvesAndGeneration = univ.ObjectIdentifier((1, 3, 36, 3, 3, 2, 8,)) + +ellipticCurve = ecStdCurvesAndGeneration + (1,) + +versionOne = ellipticCurve + (1,) + +brainpoolP160r1 = versionOne + (1,) + +brainpoolP160t1 = versionOne + (2,) + +brainpoolP192r1 = versionOne + (3,) + +brainpoolP192t1 = versionOne + (4,) + +brainpoolP224r1 = versionOne + (5,) + +brainpoolP224t1 = versionOne + (6,) + +brainpoolP256r1 = versionOne + (7,) + +brainpoolP256t1 = versionOne + (8,) + +brainpoolP320r1 = versionOne + (9,) + +brainpoolP320t1 = versionOne + (10,) + +brainpoolP384r1 = versionOne + (11,) + +brainpoolP384t1 = versionOne + (12,) + +brainpoolP512r1 = versionOne + (13,) + +brainpoolP512t1 = versionOne + (14,) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5649.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5649.py new file mode 100644 index 0000000000000000000000000000000000000000..84809eeb188d23648f30940b9892619ad0699d58 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5649.py @@ -0,0 +1,33 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# AES Key Wrap with Padding +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5649.txt + +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +class AlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +id_aes128_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.5') + +id_aes192_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.25') + +id_aes256_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.45') + + +id_aes128_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.8') + +id_aes192_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.28') + +id_aes256_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.48') diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5652.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5652.py new file mode 100644 index 0000000000000000000000000000000000000000..1e958293df5e01d79b64c9c8798202079f3c4269 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5652.py @@ -0,0 +1,761 @@ +# coding: utf-8 +# +# This file is part of pyasn1-modules software. +# +# Created by Stanisław Pitucha with asn1ate tool. +# Modified by Russ Housley to add support for opentypes. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# Cryptographic Message Syntax (CMS) +# +# ASN.1 source from: +# http://www.ietf.org/rfc/rfc5652.txt +# +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc3281 +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +def _buildOid(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +cmsContentTypesMap = { } + +cmsAttributesMap = { } + +otherKeyAttributesMap = { } + +otherCertFormatMap = { } + +otherRevInfoFormatMap = { } + +otherRecipientInfoMap = { } + + +class AttCertVersionV1(univ.Integer): + pass + + +AttCertVersionV1.namedValues = namedval.NamedValues( + ('v1', 0) +) + + +class AttributeCertificateInfoV1(univ.Sequence): + pass + + +AttributeCertificateInfoV1.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value="v1")), + namedtype.NamedType( + 'subject', univ.Choice( + componentType=namedtype.NamedTypes( + namedtype.NamedType('baseCertificateID', rfc3281.IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('subjectName', rfc5280.GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + ) + ), + namedtype.NamedType('issuer', rfc5280.GeneralNames()), + namedtype.NamedType('signature', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('serialNumber', rfc5280.CertificateSerialNumber()), + namedtype.NamedType('attCertValidityPeriod', rfc3281.AttCertValidityPeriod()), + namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc5280.Attribute())), + namedtype.OptionalNamedType('issuerUniqueID', rfc5280.UniqueIdentifier()), + namedtype.OptionalNamedType('extensions', rfc5280.Extensions()) +) + + +class AttributeCertificateV1(univ.Sequence): + pass + + +AttributeCertificateV1.componentType = namedtype.NamedTypes( + namedtype.NamedType('acInfo', AttributeCertificateInfoV1()), + namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) +) + + +class AttributeValue(univ.Any): + pass + + +class Attribute(univ.Sequence): + pass + + +Attribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('attrType', univ.ObjectIdentifier()), + namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()), + openType=opentype.OpenType('attrType', cmsAttributesMap) + ) +) + + +class SignedAttributes(univ.SetOf): + pass + + +SignedAttributes.componentType = Attribute() +SignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class AttributeCertificateV2(rfc3281.AttributeCertificate): + pass + + +class OtherKeyAttribute(univ.Sequence): + pass + + +OtherKeyAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('keyAttr', univ.Any(), + openType=opentype.OpenType('keyAttrId', otherKeyAttributesMap) + ) +) + + +class UnauthAttributes(univ.SetOf): + pass + + +UnauthAttributes.componentType = Attribute() +UnauthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + +id_encryptedData = _buildOid(1, 2, 840, 113549, 1, 7, 6) + + +class SignatureValue(univ.OctetString): + pass + + +class IssuerAndSerialNumber(univ.Sequence): + pass + + +IssuerAndSerialNumber.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuer', rfc5280.Name()), + namedtype.NamedType('serialNumber', rfc5280.CertificateSerialNumber()) +) + + +class SubjectKeyIdentifier(univ.OctetString): + pass + + +class RecipientKeyIdentifier(univ.Sequence): + pass + + +RecipientKeyIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()), + namedtype.OptionalNamedType('date', useful.GeneralizedTime()), + namedtype.OptionalNamedType('other', OtherKeyAttribute()) +) + + +class KeyAgreeRecipientIdentifier(univ.Choice): + pass + + +KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) +) + + +class EncryptedKey(univ.OctetString): + pass + + +class RecipientEncryptedKey(univ.Sequence): + pass + + +RecipientEncryptedKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()), + namedtype.NamedType('encryptedKey', EncryptedKey()) +) + + +class RecipientEncryptedKeys(univ.SequenceOf): + pass + + +RecipientEncryptedKeys.componentType = RecipientEncryptedKey() + + +class MessageAuthenticationCode(univ.OctetString): + pass + + +class CMSVersion(univ.Integer): + pass + + +CMSVersion.namedValues = namedval.NamedValues( + ('v0', 0), + ('v1', 1), + ('v2', 2), + ('v3', 3), + ('v4', 4), + ('v5', 5) +) + + +class OtherCertificateFormat(univ.Sequence): + pass + + +OtherCertificateFormat.componentType = namedtype.NamedTypes( + namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()), + namedtype.NamedType('otherCert', univ.Any(), + openType=opentype.OpenType('otherCertFormat', otherCertFormatMap) + ) +) + + +class ExtendedCertificateInfo(univ.Sequence): + pass + + +ExtendedCertificateInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('certificate', rfc5280.Certificate()), + namedtype.NamedType('attributes', UnauthAttributes()) +) + + +class Signature(univ.BitString): + pass + + +class SignatureAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +class ExtendedCertificate(univ.Sequence): + pass + + +ExtendedCertificate.componentType = namedtype.NamedTypes( + namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()), + namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()), + namedtype.NamedType('signature', Signature()) +) + + +class CertificateChoices(univ.Choice): + pass + + +CertificateChoices.componentType = namedtype.NamedTypes( + namedtype.NamedType('certificate', rfc5280.Certificate()), + namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('other', OtherCertificateFormat().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))) +) + + +class CertificateSet(univ.SetOf): + pass + + +CertificateSet.componentType = CertificateChoices() + + +class OtherRevocationInfoFormat(univ.Sequence): + pass + + +OtherRevocationInfoFormat.componentType = namedtype.NamedTypes( + namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()), + namedtype.NamedType('otherRevInfo', univ.Any(), + openType=opentype.OpenType('otherRevInfoFormat', otherRevInfoFormatMap) + ) +) + + +class RevocationInfoChoice(univ.Choice): + pass + + +RevocationInfoChoice.componentType = namedtype.NamedTypes( + namedtype.NamedType('crl', rfc5280.CertificateList()), + namedtype.NamedType('other', OtherRevocationInfoFormat().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + + +class RevocationInfoChoices(univ.SetOf): + pass + + +RevocationInfoChoices.componentType = RevocationInfoChoice() + + +class OriginatorInfo(univ.Sequence): + pass + + +OriginatorInfo.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('certs', CertificateSet().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class ContentType(univ.ObjectIdentifier): + pass + + +class EncryptedContent(univ.OctetString): + pass + + +class ContentEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +class EncryptedContentInfo(univ.Sequence): + pass + + +EncryptedContentInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('contentType', ContentType()), + namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()), + namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class UnprotectedAttributes(univ.SetOf): + pass + + +UnprotectedAttributes.componentType = Attribute() +UnprotectedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +class KEKIdentifier(univ.Sequence): + pass + + +KEKIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('keyIdentifier', univ.OctetString()), + namedtype.OptionalNamedType('date', useful.GeneralizedTime()), + namedtype.OptionalNamedType('other', OtherKeyAttribute()) +) + + +class KEKRecipientInfo(univ.Sequence): + pass + + +KEKRecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('kekid', KEKIdentifier()), + namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('encryptedKey', EncryptedKey()) +) + + +class KeyDerivationAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +class PasswordRecipientInfo(univ.Sequence): + pass + + +PasswordRecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('encryptedKey', EncryptedKey()) +) + + +class RecipientIdentifier(univ.Choice): + pass + + +RecipientIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class KeyTransRecipientInfo(univ.Sequence): + pass + + +KeyTransRecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('rid', RecipientIdentifier()), + namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('encryptedKey', EncryptedKey()) +) + + +class UserKeyingMaterial(univ.OctetString): + pass + + +class OriginatorPublicKey(univ.Sequence): + pass + + +OriginatorPublicKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('publicKey', univ.BitString()) +) + + +class OriginatorIdentifierOrKey(univ.Choice): + pass + + +OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + + +class KeyAgreeRecipientInfo(univ.Sequence): + pass + + +KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys()) +) + + +class OtherRecipientInfo(univ.Sequence): + pass + + +OtherRecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('oriType', univ.ObjectIdentifier()), + namedtype.NamedType('oriValue', univ.Any(), + openType=opentype.OpenType('oriType', otherRecipientInfoMap) + ) +) + + +class RecipientInfo(univ.Choice): + pass + + +RecipientInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('ktri', KeyTransRecipientInfo()), + namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), + namedtype.NamedType('kekri', KEKRecipientInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))), + namedtype.NamedType('pwri', PasswordRecipientInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))), + namedtype.NamedType('ori', OtherRecipientInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))) +) + + +class RecipientInfos(univ.SetOf): + pass + + +RecipientInfos.componentType = RecipientInfo() +RecipientInfos.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class EnvelopedData(univ.Sequence): + pass + + +EnvelopedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('recipientInfos', RecipientInfos()), + namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()), + namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class DigestAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +id_ct_contentInfo = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 6) + +id_digestedData = _buildOid(1, 2, 840, 113549, 1, 7, 5) + + +class EncryptedData(univ.Sequence): + pass + + +EncryptedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()), + namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + +id_messageDigest = _buildOid(1, 2, 840, 113549, 1, 9, 4) + +id_signedData = _buildOid(1, 2, 840, 113549, 1, 7, 2) + + +class MessageAuthenticationCodeAlgorithm(rfc5280.AlgorithmIdentifier): + pass + + +class UnsignedAttributes(univ.SetOf): + pass + + +UnsignedAttributes.componentType = Attribute() +UnsignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class SignerIdentifier(univ.Choice): + pass + + +SignerIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()), + namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class SignerInfo(univ.Sequence): + pass + + +SignerInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('sid', SignerIdentifier()), + namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()), + namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()), + namedtype.NamedType('signature', SignatureValue()), + namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class SignerInfos(univ.SetOf): + pass + + +SignerInfos.componentType = SignerInfo() + + +class Countersignature(SignerInfo): + pass + + +class ContentInfo(univ.Sequence): + pass + + +ContentInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('contentType', ContentType()), + namedtype.NamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)), + openType=opentype.OpenType('contentType', cmsContentTypesMap) + ) +) + + +class EncapsulatedContentInfo(univ.Sequence): + pass + + +EncapsulatedContentInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('eContentType', ContentType()), + namedtype.OptionalNamedType('eContent', univ.OctetString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) +) + +id_countersignature = _buildOid(1, 2, 840, 113549, 1, 9, 6) + +id_data = _buildOid(1, 2, 840, 113549, 1, 7, 1) + + +class MessageDigest(univ.OctetString): + pass + + +class AuthAttributes(univ.SetOf): + pass + + +AuthAttributes.componentType = Attribute() +AuthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class Time(univ.Choice): + pass + + +Time.componentType = namedtype.NamedTypes( + namedtype.NamedType('utcTime', useful.UTCTime()), + namedtype.NamedType('generalTime', useful.GeneralizedTime()) +) + + +class AuthenticatedData(univ.Sequence): + pass + + +AuthenticatedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('recipientInfos', RecipientInfos()), + namedtype.NamedType('macAlgorithm', MessageAuthenticationCodeAlgorithm()), + namedtype.OptionalNamedType('digestAlgorithm', DigestAlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()), + namedtype.OptionalNamedType('authAttrs', AuthAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('mac', MessageAuthenticationCode()), + namedtype.OptionalNamedType('unauthAttrs', UnauthAttributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + +id_contentType = _buildOid(1, 2, 840, 113549, 1, 9, 3) + + +class ExtendedCertificateOrCertificate(univ.Choice): + pass + + +ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes( + namedtype.NamedType('certificate', rfc5280.Certificate()), + namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) +) + + +class Digest(univ.OctetString): + pass + + +class DigestedData(univ.Sequence): + pass + + +DigestedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()), + namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()), + namedtype.NamedType('digest', Digest()) +) + +id_envelopedData = _buildOid(1, 2, 840, 113549, 1, 7, 3) + + +class DigestAlgorithmIdentifiers(univ.SetOf): + pass + + +DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier() + + +class SignedData(univ.Sequence): + pass + + +SignedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', CMSVersion()), + namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()), + namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()), + namedtype.OptionalNamedType('certificates', CertificateSet().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('signerInfos', SignerInfos()) +) + +id_signingTime = _buildOid(1, 2, 840, 113549, 1, 9, 5) + + +class SigningTime(Time): + pass + + +id_ct_authData = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 2) + + +# CMS Content Type Map + +_cmsContentTypesMapUpdate = { + id_ct_contentInfo: ContentInfo(), + id_data: univ.OctetString(), + id_signedData: SignedData(), + id_envelopedData: EnvelopedData(), + id_digestedData: DigestedData(), + id_encryptedData: EncryptedData(), + id_ct_authData: AuthenticatedData(), +} + +cmsContentTypesMap.update(_cmsContentTypesMapUpdate) + + +# CMS Attribute Map + +_cmsAttributesMapUpdate = { + id_contentType: ContentType(), + id_messageDigest: MessageDigest(), + id_signingTime: SigningTime(), + id_countersignature: Countersignature(), +} + +cmsAttributesMap.update(_cmsAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5697.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5697.py new file mode 100644 index 0000000000000000000000000000000000000000..8c5a9d3ecf37c51697443c3f85909afe235e0951 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5697.py @@ -0,0 +1,70 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Other Certificates Extension +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5697.txt + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc4055 + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +CertificateSerialNumber = rfc5280.CertificateSerialNumber + +GeneralNames = rfc5280.GeneralNames + + +# Imports from RFC 4055 + +id_sha1 = rfc4055.id_sha1 + + +# Imports from RFC 5055 +# These are defined here because a module for RFC 5055 does not exist yet + +class SCVPIssuerSerial(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('issuer', GeneralNames()), + namedtype.NamedType('serialNumber', CertificateSerialNumber()) + ) + + +sha1_alg_id = AlgorithmIdentifier() +sha1_alg_id['algorithm'] = id_sha1 + + +class SCVPCertID(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('certHash', univ.OctetString()), + namedtype.NamedType('issuerSerial', SCVPIssuerSerial()), + namedtype.DefaultedNamedType('hashAlgorithm', sha1_alg_id) + ) + + +# Other Certificates Extension + +id_pe_otherCerts = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 1, 19,)) + +class OtherCertificates(univ.SequenceOf): + componentType = SCVPCertID() + + +# Update of certificate extension map in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_otherCerts: OtherCertificates(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5751.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5751.py new file mode 100644 index 0000000000000000000000000000000000000000..7e200012c6bda47ed30e5dcd7f3a9daf898dc3f9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5751.py @@ -0,0 +1,124 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# S/MIME Version 3.2 Message Specification +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5751.txt + +from pyasn1.type import namedtype +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc8018 + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + return univ.ObjectIdentifier(output) + + +# Imports from RFC 5652 and RFC 8018 + +IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber + +RecipientKeyIdentifier = rfc5652.RecipientKeyIdentifier + +SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier + +rc2CBC = rfc8018.rc2CBC + + +# S/MIME Capabilities Attribute + +smimeCapabilities = univ.ObjectIdentifier('1.2.840.113549.1.9.15') + + +smimeCapabilityMap = { } + + +class SMIMECapability(univ.Sequence): + pass + +SMIMECapability.componentType = namedtype.NamedTypes( + namedtype.NamedType('capabilityID', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('parameters', univ.Any(), + openType=opentype.OpenType('capabilityID', smimeCapabilityMap)) +) + + +class SMIMECapabilities(univ.SequenceOf): + pass + +SMIMECapabilities.componentType = SMIMECapability() + + +class SMIMECapabilitiesParametersForRC2CBC(univ.Integer): + # which carries the RC2 Key Length (number of bits) + pass + + +# S/MIME Encryption Key Preference Attribute + +id_smime = univ.ObjectIdentifier('1.2.840.113549.1.9.16') + +id_aa = _OID(id_smime, 2) + +id_aa_encrypKeyPref = _OID(id_aa, 11) + + +class SMIMEEncryptionKeyPreference(univ.Choice): + pass + +SMIMEEncryptionKeyPreference.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerAndSerialNumber', + IssuerAndSerialNumber().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('receipentKeyId', + # Yes, 'receipentKeyId' is spelled incorrectly, but kept + # this way for alignment with the ASN.1 module in the RFC. + RecipientKeyIdentifier().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('subjectAltKeyIdentifier', + SubjectKeyIdentifier().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +# The Prefer Binary Inside SMIMECapabilities attribute + +id_cap = _OID(id_smime, 11) + +id_cap_preferBinaryInside = _OID(id_cap, 1) + + +# CMS Attribute Map + +_cmsAttributesMapUpdate = { + smimeCapabilities: SMIMECapabilities(), + id_aa_encrypKeyPref: SMIMEEncryptionKeyPreference(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) + + +# SMIMECapabilities Attribute Map +# +# Do not include OIDs in the dictionary when the parameters are absent. + +_smimeCapabilityMapUpdate = { + rc2CBC: SMIMECapabilitiesParametersForRC2CBC(), +} + +smimeCapabilityMap.update(_smimeCapabilityMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5752.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5752.py new file mode 100644 index 0000000000000000000000000000000000000000..1d0df8f45977effaf711079f0aa92980755b4e32 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5752.py @@ -0,0 +1,49 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Multiple Signatures in Cryptographic Message Syntax (CMS) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5752.txt +# https://www.rfc-editor.org/errata/eid4444 +# + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5035 +from pyasn1_modules import rfc5652 + + +class SignAttrsHash(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('algID', rfc5652.DigestAlgorithmIdentifier()), + namedtype.NamedType('hash', univ.OctetString()) + ) + + +class MultipleSignatures(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('bodyHashAlg', rfc5652.DigestAlgorithmIdentifier()), + namedtype.NamedType('signAlg', rfc5652.SignatureAlgorithmIdentifier()), + namedtype.NamedType('signAttrsHash', SignAttrsHash()), + namedtype.OptionalNamedType('cert', rfc5035.ESSCertIDv2()) + ) + + +id_aa_multipleSignatures = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.51') + + +# Map of Attribute Type OIDs to Attributes added to the +# ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_multipleSignatures: MultipleSignatures(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5753.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5753.py new file mode 100644 index 0000000000000000000000000000000000000000..94c37c2ab10ca604fbbef5260ac8565bc2a78d6c --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5753.py @@ -0,0 +1,157 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Elliptic Curve Cryptography (ECC) Algorithms in the CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5753.txt +# + +from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5480 +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc5751 +from pyasn1_modules import rfc8018 + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + + +# Imports from RFC 5652 + +OriginatorPublicKey = rfc5652.OriginatorPublicKey + +UserKeyingMaterial = rfc5652.UserKeyingMaterial + + +# Imports from RFC 5480 + +ECDSA_Sig_Value = rfc5480.ECDSA_Sig_Value + +ECParameters = rfc5480.ECParameters + +ECPoint = rfc5480.ECPoint + +id_ecPublicKey = rfc5480.id_ecPublicKey + + +# Imports from RFC 8018 + +id_hmacWithSHA224 = rfc8018.id_hmacWithSHA224 + +id_hmacWithSHA256 = rfc8018.id_hmacWithSHA256 + +id_hmacWithSHA384 = rfc8018.id_hmacWithSHA384 + +id_hmacWithSHA512 = rfc8018.id_hmacWithSHA512 + + +# Object Identifier arcs + +x9_63_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0') + +secg_scheme = univ.ObjectIdentifier('1.3.132.1') + + +# Object Identifiers for the algorithms + +dhSinglePass_cofactorDH_sha1kdf_scheme = x9_63_scheme + (3, ) + +dhSinglePass_cofactorDH_sha224kdf_scheme = secg_scheme + (14, 0, ) + +dhSinglePass_cofactorDH_sha256kdf_scheme = secg_scheme + (14, 1, ) + +dhSinglePass_cofactorDH_sha384kdf_scheme = secg_scheme + (14, 2, ) + +dhSinglePass_cofactorDH_sha512kdf_scheme = secg_scheme + (14, 3, ) + +dhSinglePass_stdDH_sha1kdf_scheme = x9_63_scheme + (2, ) + +dhSinglePass_stdDH_sha224kdf_scheme = secg_scheme + (11, 0, ) + +dhSinglePass_stdDH_sha256kdf_scheme = secg_scheme + (11, 1, ) + +dhSinglePass_stdDH_sha384kdf_scheme = secg_scheme + (11, 2, ) + +dhSinglePass_stdDH_sha512kdf_scheme = secg_scheme + (11, 3, ) + +mqvSinglePass_sha1kdf_scheme = x9_63_scheme + (16, ) + +mqvSinglePass_sha224kdf_scheme = secg_scheme + (15, 0, ) + +mqvSinglePass_sha256kdf_scheme = secg_scheme + (15, 1, ) + +mqvSinglePass_sha384kdf_scheme = secg_scheme + (15, 2, ) + +mqvSinglePass_sha512kdf_scheme = secg_scheme + (15, 3, ) + + +# Structures for parameters and key derivation + +class IV(univ.OctetString): + # Exactly 8 octets + pass + + +class CBCParameter(IV): + pass + + +class KeyWrapAlgorithm(AlgorithmIdentifier): + pass + + +class ECC_CMS_SharedInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('keyInfo', KeyWrapAlgorithm()), + namedtype.OptionalNamedType('entityUInfo', + univ.OctetString().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('suppPubInfo', + univ.OctetString().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class MQVuserKeyingMaterial(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('ephemeralPublicKey', OriginatorPublicKey()), + namedtype.OptionalNamedType('addedukm', + UserKeyingMaterial().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +# Update the Algorithm Identifier map in rfc5280.py and +# Update the SMIMECapabilities Attribute Map in rfc5751.py + +_algorithmIdentifierMapUpdate = { + dhSinglePass_stdDH_sha1kdf_scheme: KeyWrapAlgorithm(), + dhSinglePass_stdDH_sha224kdf_scheme: KeyWrapAlgorithm(), + dhSinglePass_stdDH_sha256kdf_scheme: KeyWrapAlgorithm(), + dhSinglePass_stdDH_sha384kdf_scheme: KeyWrapAlgorithm(), + dhSinglePass_stdDH_sha512kdf_scheme: KeyWrapAlgorithm(), + dhSinglePass_cofactorDH_sha1kdf_scheme: KeyWrapAlgorithm(), + dhSinglePass_cofactorDH_sha224kdf_scheme: KeyWrapAlgorithm(), + dhSinglePass_cofactorDH_sha256kdf_scheme: KeyWrapAlgorithm(), + dhSinglePass_cofactorDH_sha384kdf_scheme: KeyWrapAlgorithm(), + dhSinglePass_cofactorDH_sha512kdf_scheme: KeyWrapAlgorithm(), + mqvSinglePass_sha1kdf_scheme: KeyWrapAlgorithm(), + mqvSinglePass_sha224kdf_scheme: KeyWrapAlgorithm(), + mqvSinglePass_sha256kdf_scheme: KeyWrapAlgorithm(), + mqvSinglePass_sha384kdf_scheme: KeyWrapAlgorithm(), + mqvSinglePass_sha512kdf_scheme: KeyWrapAlgorithm(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) + +rfc5751.smimeCapabilityMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5755.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5755.py new file mode 100644 index 0000000000000000000000000000000000000000..14f56fc600051edfe4b2dafce032aba8df45b674 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5755.py @@ -0,0 +1,398 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# An Internet Attribute Certificate Profile for Authorization +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5755.txt +# https://www.rfc-editor.org/rfc/rfc5912.txt (see Section 13) +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 + +MAX = float('inf') + +# Map for Security Category type to value + +securityCategoryMap = { } + + +# Imports from RFC 5652 + +ContentInfo = rfc5652.ContentInfo + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +Attribute = rfc5280.Attribute + +AuthorityInfoAccessSyntax = rfc5280.AuthorityInfoAccessSyntax + +AuthorityKeyIdentifier = rfc5280.AuthorityKeyIdentifier + +CertificateSerialNumber = rfc5280.CertificateSerialNumber + +CRLDistributionPoints = rfc5280.CRLDistributionPoints + +Extensions = rfc5280.Extensions + +Extension = rfc5280.Extension + +GeneralNames = rfc5280.GeneralNames + +GeneralName = rfc5280.GeneralName + +UniqueIdentifier = rfc5280.UniqueIdentifier + + +# Object Identifier arcs + +id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, )) + +id_pe = id_pkix + (1, ) + +id_kp = id_pkix + (3, ) + +id_aca = id_pkix + (10, ) + +id_ad = id_pkix + (48, ) + +id_at = univ.ObjectIdentifier((2, 5, 4, )) + +id_ce = univ.ObjectIdentifier((2, 5, 29, )) + + +# Attribute Certificate + +class AttCertVersion(univ.Integer): + namedValues = namedval.NamedValues( + ('v2', 1) + ) + + +class IssuerSerial(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('issuer', GeneralNames()), + namedtype.NamedType('serial', CertificateSerialNumber()), + namedtype.OptionalNamedType('issuerUID', UniqueIdentifier()) + ) + + +class ObjectDigestInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('digestedObjectType', + univ.Enumerated(namedValues=namedval.NamedValues( + ('publicKey', 0), + ('publicKeyCert', 1), + ('otherObjectTypes', 2)))), + namedtype.OptionalNamedType('otherObjectTypeID', + univ.ObjectIdentifier()), + namedtype.NamedType('digestAlgorithm', + AlgorithmIdentifier()), + namedtype.NamedType('objectDigest', + univ.BitString()) + ) + + +class Holder(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('baseCertificateID', + IssuerSerial().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('entityName', + GeneralNames().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('objectDigestInfo', + ObjectDigestInfo().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 2))) +) + + +class V2Form(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('issuerName', + GeneralNames()), + namedtype.OptionalNamedType('baseCertificateID', + IssuerSerial().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('objectDigestInfo', + ObjectDigestInfo().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +class AttCertIssuer(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('v1Form', GeneralNames()), + namedtype.NamedType('v2Form', V2Form().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 0))) + ) + + +class AttCertValidityPeriod(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()), + namedtype.NamedType('notAfterTime', useful.GeneralizedTime()) + ) + + +class AttributeCertificateInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', + AttCertVersion()), + namedtype.NamedType('holder', + Holder()), + namedtype.NamedType('issuer', + AttCertIssuer()), + namedtype.NamedType('signature', + AlgorithmIdentifier()), + namedtype.NamedType('serialNumber', + CertificateSerialNumber()), + namedtype.NamedType('attrCertValidityPeriod', + AttCertValidityPeriod()), + namedtype.NamedType('attributes', + univ.SequenceOf(componentType=Attribute())), + namedtype.OptionalNamedType('issuerUniqueID', + UniqueIdentifier()), + namedtype.OptionalNamedType('extensions', + Extensions()) + ) + + +class AttributeCertificate(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('acinfo', AttributeCertificateInfo()), + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signatureValue', univ.BitString()) + ) + + +# Attribute Certificate Extensions + +id_pe_ac_auditIdentity = id_pe + (4, ) + +id_ce_noRevAvail = id_ce + (56, ) + +id_ce_targetInformation = id_ce + (55, ) + + +class TargetCert(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('targetCertificate', IssuerSerial()), + namedtype.OptionalNamedType('targetName', GeneralName()), + namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo()) + ) + + +class Target(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('targetName', + GeneralName().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('targetGroup', + GeneralName().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('targetCert', + TargetCert().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 2))) + ) + + +class Targets(univ.SequenceOf): + componentType = Target() + + +id_pe_ac_proxying = id_pe + (10, ) + + +class ProxyInfo(univ.SequenceOf): + componentType = Targets() + + +id_pe_aaControls = id_pe + (6, ) + + +class AttrSpec(univ.SequenceOf): + componentType = univ.ObjectIdentifier() + + +class AAControls(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('pathLenConstraint', + univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(0, MAX))), + namedtype.OptionalNamedType('permittedAttrs', + AttrSpec().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('excludedAttrs', + AttrSpec().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.DefaultedNamedType('permitUnSpecified', + univ.Boolean().subtype(value=1)) + ) + + +# Attribute Certificate Attributes + +id_aca_authenticationInfo = id_aca + (1, ) + + +id_aca_accessIdentity = id_aca + (2, ) + + +class SvceAuthInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('service', GeneralName()), + namedtype.NamedType('ident', GeneralName()), + namedtype.OptionalNamedType('authInfo', univ.OctetString()) + ) + + +id_aca_chargingIdentity = id_aca + (3, ) + + +id_aca_group = id_aca + (4, ) + + +class IetfAttrSyntax(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('policyAuthority', + GeneralNames().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('values', univ.SequenceOf( + componentType=univ.Choice(componentType=namedtype.NamedTypes( + namedtype.NamedType('octets', univ.OctetString()), + namedtype.NamedType('oid', univ.ObjectIdentifier()), + namedtype.NamedType('string', char.UTF8String()) + )) + )) + ) + + +id_at_role = id_at + (72,) + + +class RoleSyntax(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('roleAuthority', + GeneralNames().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('roleName', + GeneralName().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class ClassList(univ.BitString): + namedValues = namedval.NamedValues( + ('unmarked', 0), + ('unclassified', 1), + ('restricted', 2), + ('confidential', 3), + ('secret', 4), + ('topSecret', 5) + ) + + +class SecurityCategory(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', + univ.ObjectIdentifier().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('value', + univ.Any().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1)), + openType=opentype.OpenType('type', securityCategoryMap)) + ) + + +id_at_clearance = univ.ObjectIdentifier((2, 5, 4, 55, )) + + +class Clearance(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('policyId', + univ.ObjectIdentifier()), + namedtype.DefaultedNamedType('classList', + ClassList().subtype(value='unclassified')), + namedtype.OptionalNamedType('securityCategories', + univ.SetOf(componentType=SecurityCategory())) + ) + + +id_at_clearance_rfc3281 = univ.ObjectIdentifier((2, 5, 1, 5, 55, )) + + +class Clearance_rfc3281(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('policyId', + univ.ObjectIdentifier().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.DefaultedNamedType('classList', + ClassList().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1)).subtype( + value='unclassified')), + namedtype.OptionalNamedType('securityCategories', + univ.SetOf(componentType=SecurityCategory()).subtype( + implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +id_aca_encAttrs = id_aca + (6, ) + + +class ACClearAttrs(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('acIssuer', GeneralName()), + namedtype.NamedType('acSerial', univ.Integer()), + namedtype.NamedType('attrs', univ.SequenceOf(componentType=Attribute())) + ) + + +# Map of Certificate Extension OIDs to Extensions added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_ac_auditIdentity: univ.OctetString(), + id_ce_noRevAvail: univ.Null(), + id_ce_targetInformation: Targets(), + id_pe_ac_proxying: ProxyInfo(), + id_pe_aaControls: AAControls(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) + + +# Map of AttributeType OIDs to AttributeValue added to the +# ones that are in rfc5280.py + +_certificateAttributesMapUpdate = { + id_aca_authenticationInfo: SvceAuthInfo(), + id_aca_accessIdentity: SvceAuthInfo(), + id_aca_chargingIdentity: IetfAttrSyntax(), + id_aca_group: IetfAttrSyntax(), + id_at_role: RoleSyntax(), + id_at_clearance: Clearance(), + id_at_clearance_rfc3281: Clearance_rfc3281(), + id_aca_encAttrs: ContentInfo(), +} + +rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5913.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5913.py new file mode 100644 index 0000000000000000000000000000000000000000..0bd065330d5c06dba90a984ed821bc90745ec3bb --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5913.py @@ -0,0 +1,44 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Authority Clearance Constraints Certificate Extension +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5913.txt +# https://www.rfc-editor.org/errata/eid5890 +# + +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5755 + +MAX = float('inf') + + +# Authority Clearance Constraints Certificate Extension + +id_pe_clearanceConstraints = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.21') + +id_pe_authorityClearanceConstraints = id_pe_clearanceConstraints + + +class AuthorityClearanceConstraints(univ.SequenceOf): + componentType = rfc5755.Clearance() + subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +# Map of Certificate Extension OIDs to Extensions added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_clearanceConstraints: AuthorityClearanceConstraints(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5914.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5914.py new file mode 100644 index 0000000000000000000000000000000000000000..d125ea2a65f3d9309446c0c06c0ed92035607daa --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5914.py @@ -0,0 +1,119 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Trust Anchor Format +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5914.txt + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +MAX = float('inf') + +Certificate = rfc5280.Certificate + +Name = rfc5280.Name + +Extensions = rfc5280.Extensions + +SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo + +TBSCertificate = rfc5280.TBSCertificate + +CertificatePolicies = rfc5280.CertificatePolicies + +KeyIdentifier = rfc5280.KeyIdentifier + +NameConstraints = rfc5280.NameConstraints + + +class CertPolicyFlags(univ.BitString): + pass + +CertPolicyFlags.namedValues = namedval.NamedValues( + ('inhibitPolicyMapping', 0), + ('requireExplicitPolicy', 1), + ('inhibitAnyPolicy', 2) +) + + +class CertPathControls(univ.Sequence): + pass + +CertPathControls.componentType = namedtype.NamedTypes( + namedtype.NamedType('taName', Name()), + namedtype.OptionalNamedType('certificate', Certificate().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('policySet', CertificatePolicies().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('policyFlags', CertPolicyFlags().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('nameConstr', NameConstraints().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.OptionalNamedType('pathLenConstraint', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))) +) + + +class TrustAnchorTitle(char.UTF8String): + pass + +TrustAnchorTitle.subtypeSpec = constraint.ValueSizeConstraint(1, 64) + + +class TrustAnchorInfoVersion(univ.Integer): + pass + +TrustAnchorInfoVersion.namedValues = namedval.NamedValues( + ('v1', 1) +) + + +class TrustAnchorInfo(univ.Sequence): + pass + +TrustAnchorInfo.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', TrustAnchorInfoVersion().subtype(value='v1')), + namedtype.NamedType('pubKey', SubjectPublicKeyInfo()), + namedtype.NamedType('keyId', KeyIdentifier()), + namedtype.OptionalNamedType('taTitle', TrustAnchorTitle()), + namedtype.OptionalNamedType('certPath', CertPathControls()), + namedtype.OptionalNamedType('exts', Extensions().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('taTitleLangTag', char.UTF8String().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +class TrustAnchorChoice(univ.Choice): + pass + +TrustAnchorChoice.componentType = namedtype.NamedTypes( + namedtype.NamedType('certificate', Certificate()), + namedtype.NamedType('tbsCert', TBSCertificate().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('taInfo', TrustAnchorInfo().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) +) + + +id_ct_trustAnchorList = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.34') + +class TrustAnchorList(univ.SequenceOf): + pass + +TrustAnchorList.componentType = TrustAnchorChoice() +TrustAnchorList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5915.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5915.py new file mode 100644 index 0000000000000000000000000000000000000000..82ff4a338bc14aa1924d7d2d16e95753db4194f5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5915.py @@ -0,0 +1,32 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Elliptic Curve Private Key +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5915.txt + +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5480 + + +class ECPrivateKey(univ.Sequence): + pass + +ECPrivateKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', univ.Integer( + namedValues=namedval.NamedValues(('ecPrivkeyVer1', 1)))), + namedtype.NamedType('privateKey', univ.OctetString()), + namedtype.OptionalNamedType('parameters', rfc5480.ECParameters().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('publicKey', univ.BitString().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5916.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5916.py new file mode 100644 index 0000000000000000000000000000000000000000..ac23c86b79a8a800f2ee269863268fe086114e54 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5916.py @@ -0,0 +1,35 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Device Owner Attribute +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5916.txt +# + +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +# Device Owner Attribute + +id_deviceOwner = univ.ObjectIdentifier((2, 16, 840, 1, 101, 2, 1, 5, 69)) + +at_deviceOwner = rfc5280.Attribute() +at_deviceOwner['type'] = id_deviceOwner +at_deviceOwner['values'][0] = univ.ObjectIdentifier() + + +# Add to the map of Attribute Type OIDs to Attributes in rfc5280.py. + +_certificateAttributesMapUpdate = { + id_deviceOwner: univ.ObjectIdentifier(), +} + +rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5917.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5917.py new file mode 100644 index 0000000000000000000000000000000000000000..ed9af987db5e9250eb722e45d799994c0bce09a4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5917.py @@ -0,0 +1,55 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Clearance Sponsor Attribute +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5917.txt +# https://www.rfc-editor.org/errata/eid4558 +# https://www.rfc-editor.org/errata/eid5883 +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +# DirectoryString is the same as RFC 5280, except for two things: +# 1. the length is limited to 64; +# 2. only the 'utf8String' choice remains because the ASN.1 +# specification says: ( WITH COMPONENTS { utf8String PRESENT } ) + +class DirectoryString(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('utf8String', char.UTF8String().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 64))), + ) + + +# Clearance Sponsor Attribute + +id_clearanceSponsor = univ.ObjectIdentifier((2, 16, 840, 1, 101, 2, 1, 5, 68)) + +ub_clearance_sponsor = univ.Integer(64) + + +at_clearanceSponsor = rfc5280.Attribute() +at_clearanceSponsor['type'] = id_clearanceSponsor +at_clearanceSponsor['values'][0] = DirectoryString() + + +# Add to the map of Attribute Type OIDs to Attributes in rfc5280.py. + +_certificateAttributesMapUpdate = { + id_clearanceSponsor: DirectoryString(), +} + +rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5924.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5924.py new file mode 100644 index 0000000000000000000000000000000000000000..4358e4f52970ead3f77be63f9576e18ec7f9876b --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5924.py @@ -0,0 +1,19 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Extended Key Usage (EKU) for Session Initiation Protocol (SIP) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5924.txt +# + +from pyasn1.type import univ + +id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3') + +id_kp_sipDomain = id_kp + (20, ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5934.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5934.py new file mode 100644 index 0000000000000000000000000000000000000000..e3ad247aa07006fab85becd64fb34682cfebb402 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5934.py @@ -0,0 +1,786 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Trust Anchor Format +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5934.txt + +from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful + +from pyasn1_modules import rfc2985 +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc5914 + +MAX = float('inf') + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + return univ.ObjectIdentifier(output) + + +# Imports from RFC 2985 + +SingleAttribute = rfc2985.SingleAttribute + + +# Imports from RFC5914 + +CertPathControls = rfc5914.CertPathControls + +TrustAnchorChoice = rfc5914.TrustAnchorChoice + +TrustAnchorTitle = rfc5914.TrustAnchorTitle + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +AnotherName = rfc5280.AnotherName + +Attribute = rfc5280.Attribute + +Certificate = rfc5280.Certificate + +CertificateSerialNumber = rfc5280.CertificateSerialNumber + +Extension = rfc5280.Extension + +Extensions = rfc5280.Extensions + +KeyIdentifier = rfc5280.KeyIdentifier + +Name = rfc5280.Name + +SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo + +TBSCertificate = rfc5280.TBSCertificate + +Validity = rfc5280.Validity + + +# Object Identifier Arc for TAMP Message Content Types + +id_tamp = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.77') + + +# TAMP Status Query Message + +id_ct_TAMP_statusQuery = _OID(id_tamp, 1) + + +class TAMPVersion(univ.Integer): + pass + +TAMPVersion.namedValues = namedval.NamedValues( + ('v1', 1), + ('v2', 2) +) + + +class TerseOrVerbose(univ.Enumerated): + pass + +TerseOrVerbose.namedValues = namedval.NamedValues( + ('terse', 1), + ('verbose', 2) +) + + +class HardwareSerialEntry(univ.Choice): + pass + +HardwareSerialEntry.componentType = namedtype.NamedTypes( + namedtype.NamedType('all', univ.Null()), + namedtype.NamedType('single', univ.OctetString()), + namedtype.NamedType('block', univ.Sequence(componentType=namedtype.NamedTypes( + namedtype.NamedType('low', univ.OctetString()), + namedtype.NamedType('high', univ.OctetString()) + )) + ) +) + + +class HardwareModules(univ.Sequence): + pass + +HardwareModules.componentType = namedtype.NamedTypes( + namedtype.NamedType('hwType', univ.ObjectIdentifier()), + namedtype.NamedType('hwSerialEntries', univ.SequenceOf( + componentType=HardwareSerialEntry()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) +) + + +class HardwareModuleIdentifierList(univ.SequenceOf): + pass + +HardwareModuleIdentifierList.componentType = HardwareModules() +HardwareModuleIdentifierList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +class Community(univ.ObjectIdentifier): + pass + + +class CommunityIdentifierList(univ.SequenceOf): + pass + +CommunityIdentifierList.componentType = Community() +CommunityIdentifierList.subtypeSpec=constraint.ValueSizeConstraint(0, MAX) + + +class TargetIdentifier(univ.Choice): + pass + +TargetIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('hwModules', HardwareModuleIdentifierList().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('communities', CommunityIdentifierList().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('allModules', univ.Null().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.NamedType('uri', char.IA5String().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))), + namedtype.NamedType('otherName', AnotherName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))) +) + + +class SeqNumber(univ.Integer): + pass + +SeqNumber.subtypeSpec = constraint.ValueRangeConstraint(0, 9223372036854775807) + + +class TAMPMsgRef(univ.Sequence): + pass + +TAMPMsgRef.componentType = namedtype.NamedTypes( + namedtype.NamedType('target', TargetIdentifier()), + namedtype.NamedType('seqNum', SeqNumber()) +) + + +class TAMPStatusQuery(univ.Sequence): + pass + +TAMPStatusQuery.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', TAMPVersion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.DefaultedNamedType('terse', TerseOrVerbose().subtype( + implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 1)).subtype(value='verbose')), + namedtype.NamedType('query', TAMPMsgRef()) +) + + +tamp_status_query = rfc5652.ContentInfo() +tamp_status_query['contentType'] = id_ct_TAMP_statusQuery +tamp_status_query['content'] = TAMPStatusQuery() + + +# TAMP Status Response Message + +id_ct_TAMP_statusResponse = _OID(id_tamp, 2) + + +class KeyIdentifiers(univ.SequenceOf): + pass + +KeyIdentifiers.componentType = KeyIdentifier() +KeyIdentifiers.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +class TrustAnchorChoiceList(univ.SequenceOf): + pass + +TrustAnchorChoiceList.componentType = TrustAnchorChoice() +TrustAnchorChoiceList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +class TAMPSequenceNumber(univ.Sequence): + pass + +TAMPSequenceNumber.componentType = namedtype.NamedTypes( + namedtype.NamedType('keyId', KeyIdentifier()), + namedtype.NamedType('seqNumber', SeqNumber()) +) + + +class TAMPSequenceNumbers(univ.SequenceOf): + pass + +TAMPSequenceNumbers.componentType = TAMPSequenceNumber() +TAMPSequenceNumbers.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +class TerseStatusResponse(univ.Sequence): + pass + +TerseStatusResponse.componentType = namedtype.NamedTypes( + namedtype.NamedType('taKeyIds', KeyIdentifiers()), + namedtype.OptionalNamedType('communities', CommunityIdentifierList()) +) + + +class VerboseStatusResponse(univ.Sequence): + pass + +VerboseStatusResponse.componentType = namedtype.NamedTypes( + namedtype.NamedType('taInfo', TrustAnchorChoiceList()), + namedtype.OptionalNamedType('continPubKeyDecryptAlg', + AlgorithmIdentifier().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('communities', + CommunityIdentifierList().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('tampSeqNumbers', + TAMPSequenceNumbers().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +class StatusResponse(univ.Choice): + pass + +StatusResponse.componentType = namedtype.NamedTypes( + namedtype.NamedType('terseResponse', TerseStatusResponse().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('verboseResponse', VerboseStatusResponse().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + + +class TAMPStatusResponse(univ.Sequence): + pass + +TAMPStatusResponse.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', TAMPVersion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.NamedType('query', TAMPMsgRef()), + namedtype.NamedType('response', StatusResponse()), + namedtype.DefaultedNamedType('usesApex', univ.Boolean().subtype(value=1)) +) + + +tamp_status_response = rfc5652.ContentInfo() +tamp_status_response['contentType'] = id_ct_TAMP_statusResponse +tamp_status_response['content'] = TAMPStatusResponse() + + +# Trust Anchor Update Message + +id_ct_TAMP_update = _OID(id_tamp, 3) + + +class TBSCertificateChangeInfo(univ.Sequence): + pass + +TBSCertificateChangeInfo.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('serialNumber', CertificateSerialNumber()), + namedtype.OptionalNamedType('signature', AlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('issuer', Name().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('validity', Validity().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('subject', Name().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))), + namedtype.OptionalNamedType('exts', Extensions().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 5))) +) + + +class TrustAnchorChangeInfo(univ.Sequence): + pass + +TrustAnchorChangeInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('pubKey', SubjectPublicKeyInfo()), + namedtype.OptionalNamedType('keyId', KeyIdentifier()), + namedtype.OptionalNamedType('taTitle', TrustAnchorTitle()), + namedtype.OptionalNamedType('certPath', CertPathControls()), + namedtype.OptionalNamedType('exts', Extensions().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class TrustAnchorChangeInfoChoice(univ.Choice): + pass + +TrustAnchorChangeInfoChoice.componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsCertChange', TBSCertificateChangeInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('taChange', TrustAnchorChangeInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + + +class TrustAnchorUpdate(univ.Choice): + pass + +TrustAnchorUpdate.componentType = namedtype.NamedTypes( + namedtype.NamedType('add', TrustAnchorChoice().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('remove', SubjectPublicKeyInfo().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('change', TrustAnchorChangeInfoChoice().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))) +) + + +class TAMPUpdate(univ.Sequence): + pass + +TAMPUpdate.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.DefaultedNamedType('terse', + TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 1)).subtype(value='verbose')), + namedtype.NamedType('msgRef', TAMPMsgRef()), + namedtype.NamedType('updates', + univ.SequenceOf(componentType=TrustAnchorUpdate()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.OptionalNamedType('tampSeqNumbers', + TAMPSequenceNumbers().subtype(implicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +tamp_update = rfc5652.ContentInfo() +tamp_update['contentType'] = id_ct_TAMP_update +tamp_update['content'] = TAMPUpdate() + + +# Trust Anchor Update Confirm Message + +id_ct_TAMP_updateConfirm = _OID(id_tamp, 4) + + +class StatusCode(univ.Enumerated): + pass + +StatusCode.namedValues = namedval.NamedValues( + ('success', 0), + ('decodeFailure', 1), + ('badContentInfo', 2), + ('badSignedData', 3), + ('badEncapContent', 4), + ('badCertificate', 5), + ('badSignerInfo', 6), + ('badSignedAttrs', 7), + ('badUnsignedAttrs', 8), + ('missingContent', 9), + ('noTrustAnchor', 10), + ('notAuthorized', 11), + ('badDigestAlgorithm', 12), + ('badSignatureAlgorithm', 13), + ('unsupportedKeySize', 14), + ('unsupportedParameters', 15), + ('signatureFailure', 16), + ('insufficientMemory', 17), + ('unsupportedTAMPMsgType', 18), + ('apexTAMPAnchor', 19), + ('improperTAAddition', 20), + ('seqNumFailure', 21), + ('contingencyPublicKeyDecrypt', 22), + ('incorrectTarget', 23), + ('communityUpdateFailed', 24), + ('trustAnchorNotFound', 25), + ('unsupportedTAAlgorithm', 26), + ('unsupportedTAKeySize', 27), + ('unsupportedContinPubKeyDecryptAlg', 28), + ('missingSignature', 29), + ('resourcesBusy', 30), + ('versionNumberMismatch', 31), + ('missingPolicySet', 32), + ('revokedCertificate', 33), + ('unsupportedTrustAnchorFormat', 34), + ('improperTAChange', 35), + ('malformed', 36), + ('cmsError', 37), + ('unsupportedTargetIdentifier', 38), + ('other', 127) +) + + +class StatusCodeList(univ.SequenceOf): + pass + +StatusCodeList.componentType = StatusCode() +StatusCodeList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +class TerseUpdateConfirm(StatusCodeList): + pass + + +class VerboseUpdateConfirm(univ.Sequence): + pass + +VerboseUpdateConfirm.componentType = namedtype.NamedTypes( + namedtype.NamedType('status', StatusCodeList()), + namedtype.NamedType('taInfo', TrustAnchorChoiceList()), + namedtype.OptionalNamedType('tampSeqNumbers', TAMPSequenceNumbers()), + namedtype.DefaultedNamedType('usesApex', univ.Boolean().subtype(value=1)) +) + + +class UpdateConfirm(univ.Choice): + pass + +UpdateConfirm.componentType = namedtype.NamedTypes( + namedtype.NamedType('terseConfirm', TerseUpdateConfirm().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('verboseConfirm', VerboseUpdateConfirm().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + + +class TAMPUpdateConfirm(univ.Sequence): + pass + +TAMPUpdateConfirm.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', TAMPVersion().subtype( + implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.NamedType('update', TAMPMsgRef()), + namedtype.NamedType('confirm', UpdateConfirm()) +) + + +tamp_update_confirm = rfc5652.ContentInfo() +tamp_update_confirm['contentType'] = id_ct_TAMP_updateConfirm +tamp_update_confirm['content'] = TAMPUpdateConfirm() + + +# Apex Trust Anchor Update Message + +id_ct_TAMP_apexUpdate = _OID(id_tamp, 5) + + +class TAMPApexUpdate(univ.Sequence): + pass + +TAMPApexUpdate.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.DefaultedNamedType('terse', + TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 1)).subtype(value='verbose')), + namedtype.NamedType('msgRef', TAMPMsgRef()), + namedtype.NamedType('clearTrustAnchors', univ.Boolean()), + namedtype.NamedType('clearCommunities', univ.Boolean()), + namedtype.OptionalNamedType('seqNumber', SeqNumber()), + namedtype.NamedType('apexTA', TrustAnchorChoice()) +) + + +tamp_apex_update = rfc5652.ContentInfo() +tamp_apex_update['contentType'] = id_ct_TAMP_apexUpdate +tamp_apex_update['content'] = TAMPApexUpdate() + + +# Apex Trust Anchor Update Confirm Message + +id_ct_TAMP_apexUpdateConfirm = _OID(id_tamp, 6) + + +class TerseApexUpdateConfirm(StatusCode): + pass + + +class VerboseApexUpdateConfirm(univ.Sequence): + pass + +VerboseApexUpdateConfirm.componentType = namedtype.NamedTypes( + namedtype.NamedType('status', StatusCode()), + namedtype.NamedType('taInfo', TrustAnchorChoiceList()), + namedtype.OptionalNamedType('communities', + CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('tampSeqNumbers', + TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 1))) +) + + +class ApexUpdateConfirm(univ.Choice): + pass + +ApexUpdateConfirm.componentType = namedtype.NamedTypes( + namedtype.NamedType('terseApexConfirm', + TerseApexUpdateConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0))), + namedtype.NamedType('verboseApexConfirm', + VerboseApexUpdateConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatConstructed, 1))) +) + + +class TAMPApexUpdateConfirm(univ.Sequence): + pass + +TAMPApexUpdateConfirm.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.NamedType('apexReplace', TAMPMsgRef()), + namedtype.NamedType('apexConfirm', ApexUpdateConfirm()) +) + + +tamp_apex_update_confirm = rfc5652.ContentInfo() +tamp_apex_update_confirm['contentType'] = id_ct_TAMP_apexUpdateConfirm +tamp_apex_update_confirm['content'] = TAMPApexUpdateConfirm() + + +# Community Update Message + +id_ct_TAMP_communityUpdate = _OID(id_tamp, 7) + + +class CommunityUpdates(univ.Sequence): + pass + +CommunityUpdates.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('remove', + CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('add', + CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 2))) +) + + +class TAMPCommunityUpdate(univ.Sequence): + pass + +TAMPCommunityUpdate.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.DefaultedNamedType('terse', + TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 1)).subtype(value='verbose')), + namedtype.NamedType('msgRef', TAMPMsgRef()), + namedtype.NamedType('updates', CommunityUpdates()) +) + + +tamp_community_update = rfc5652.ContentInfo() +tamp_community_update['contentType'] = id_ct_TAMP_communityUpdate +tamp_community_update['content'] = TAMPCommunityUpdate() + + +# Community Update Confirm Message + +id_ct_TAMP_communityUpdateConfirm = _OID(id_tamp, 8) + + +class TerseCommunityConfirm(StatusCode): + pass + + +class VerboseCommunityConfirm(univ.Sequence): + pass + +VerboseCommunityConfirm.componentType = namedtype.NamedTypes( + namedtype.NamedType('status', StatusCode()), + namedtype.OptionalNamedType('communities', CommunityIdentifierList()) +) + + +class CommunityConfirm(univ.Choice): + pass + +CommunityConfirm.componentType = namedtype.NamedTypes( + namedtype.NamedType('terseCommConfirm', + TerseCommunityConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0))), + namedtype.NamedType('verboseCommConfirm', + VerboseCommunityConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatConstructed, 1))) +) + + +class TAMPCommunityUpdateConfirm(univ.Sequence): + pass + +TAMPCommunityUpdateConfirm.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.NamedType('update', TAMPMsgRef()), + namedtype.NamedType('commConfirm', CommunityConfirm()) +) + + +tamp_community_update_confirm = rfc5652.ContentInfo() +tamp_community_update_confirm['contentType'] = id_ct_TAMP_communityUpdateConfirm +tamp_community_update_confirm['content'] = TAMPCommunityUpdateConfirm() + + +# Sequence Number Adjust Message + +id_ct_TAMP_seqNumAdjust = _OID(id_tamp, 10) + + + +class SequenceNumberAdjust(univ.Sequence): + pass + +SequenceNumberAdjust.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.NamedType('msgRef', TAMPMsgRef()) +) + + +tamp_sequence_number_adjust = rfc5652.ContentInfo() +tamp_sequence_number_adjust['contentType'] = id_ct_TAMP_seqNumAdjust +tamp_sequence_number_adjust['content'] = SequenceNumberAdjust() + + +# Sequence Number Adjust Confirm Message + +id_ct_TAMP_seqNumAdjustConfirm = _OID(id_tamp, 11) + + +class SequenceNumberAdjustConfirm(univ.Sequence): + pass + +SequenceNumberAdjustConfirm.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.NamedType('adjust', TAMPMsgRef()), + namedtype.NamedType('status', StatusCode()) +) + + +tamp_sequence_number_adjust_confirm = rfc5652.ContentInfo() +tamp_sequence_number_adjust_confirm['contentType'] = id_ct_TAMP_seqNumAdjustConfirm +tamp_sequence_number_adjust_confirm['content'] = SequenceNumberAdjustConfirm() + + +# TAMP Error Message + +id_ct_TAMP_error = _OID(id_tamp, 9) + + +class TAMPError(univ.Sequence): + pass + +TAMPError.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0)).subtype(value='v2')), + namedtype.NamedType('msgType', univ.ObjectIdentifier()), + namedtype.NamedType('status', StatusCode()), + namedtype.OptionalNamedType('msgRef', TAMPMsgRef()) +) + + +tamp_error = rfc5652.ContentInfo() +tamp_error['contentType'] = id_ct_TAMP_error +tamp_error['content'] = TAMPError() + + +# Object Identifier Arc for Attributes + +id_attributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.5') + + +# contingency-public-key-decrypt-key unsigned attribute + +id_aa_TAMP_contingencyPublicKeyDecryptKey = _OID(id_attributes, 63) + + +class PlaintextSymmetricKey(univ.OctetString): + pass + + +contingency_public_key_decrypt_key = Attribute() +contingency_public_key_decrypt_key['type'] = id_aa_TAMP_contingencyPublicKeyDecryptKey +contingency_public_key_decrypt_key['values'][0] = PlaintextSymmetricKey() + + +# id-pe-wrappedApexContinKey extension + +id_pe_wrappedApexContinKey =univ.ObjectIdentifier('1.3.6.1.5.5.7.1.20') + + +class ApexContingencyKey(univ.Sequence): + pass + +ApexContingencyKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('wrapAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('wrappedContinPubKey', univ.OctetString()) +) + + +wrappedApexContinKey = Extension() +wrappedApexContinKey['extnID'] = id_pe_wrappedApexContinKey +wrappedApexContinKey['critical'] = 0 +wrappedApexContinKey['extnValue'] = univ.OctetString() + + +# Add to the map of CMS Content Type OIDs to Content Types in +# rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_TAMP_statusQuery: TAMPStatusQuery(), + id_ct_TAMP_statusResponse: TAMPStatusResponse(), + id_ct_TAMP_update: TAMPUpdate(), + id_ct_TAMP_updateConfirm: TAMPUpdateConfirm(), + id_ct_TAMP_apexUpdate: TAMPApexUpdate(), + id_ct_TAMP_apexUpdateConfirm: TAMPApexUpdateConfirm(), + id_ct_TAMP_communityUpdate: TAMPCommunityUpdate(), + id_ct_TAMP_communityUpdateConfirm: TAMPCommunityUpdateConfirm(), + id_ct_TAMP_seqNumAdjust: SequenceNumberAdjust(), + id_ct_TAMP_seqNumAdjustConfirm: SequenceNumberAdjustConfirm(), + id_ct_TAMP_error: TAMPError(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) + + +# Add to the map of CMS Attribute OIDs to Attribute Values in +# rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_TAMP_contingencyPublicKeyDecryptKey: PlaintextSymmetricKey(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) + + +# Add to the map of Certificate Extension OIDs to Extensions in +# rfc5280.py + +_certificateExtensionsMap = { + id_pe_wrappedApexContinKey: ApexContingencyKey(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5940.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5940.py new file mode 100644 index 0000000000000000000000000000000000000000..e105923358b795b1547a1fae4aa29fffbbd80317 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5940.py @@ -0,0 +1,59 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add map for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Additional CMS Revocation Information Choices +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5940.txt +# + +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc2560 +from pyasn1_modules import rfc5652 + + +# RevocationInfoChoice for OCSP response: +# The OID is included in otherRevInfoFormat, and +# signed OCSPResponse is included in otherRevInfo + +id_ri_ocsp_response = univ.ObjectIdentifier('1.3.6.1.5.5.7.16.2') + +OCSPResponse = rfc2560.OCSPResponse + + +# RevocationInfoChoice for SCVP request/response: +# The OID is included in otherRevInfoFormat, and +# SCVPReqRes is included in otherRevInfo + +id_ri_scvp = univ.ObjectIdentifier('1.3.6.1.5.5.7.16.4') + +ContentInfo = rfc5652.ContentInfo + +class SCVPReqRes(univ.Sequence): + pass + +SCVPReqRes.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('request', + ContentInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('response', ContentInfo()) +) + + +# Map of Revocation Info Format OIDs to Revocation Info Format +# is added to the ones that are in rfc5652.py + +_otherRevInfoFormatMapUpdate = { + id_ri_ocsp_response: OCSPResponse(), + id_ri_scvp: SCVPReqRes(), +} + +rfc5652.otherRevInfoFormatMap.update(_otherRevInfoFormatMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5958.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5958.py new file mode 100644 index 0000000000000000000000000000000000000000..1aaa9286aded7db76b8e3b1dab84bc61b8c367a8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5958.py @@ -0,0 +1,98 @@ +# +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# Modified by Russ Housley to add a map for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Asymmetric Key Packages, which is essentially version 2 of +# the PrivateKeyInfo structure in PKCS#8 in RFC 5208 +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5958.txt + +from pyasn1.type import univ, constraint, namedtype, namedval, tag + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 + + +MAX = float('inf') + + +class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +class PrivateKeyAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +class EncryptedData(univ.OctetString): + pass + + +class EncryptedPrivateKeyInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('encryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('encryptedData', EncryptedData()) + ) + + +class Version(univ.Integer): + namedValues = namedval.NamedValues(('v1', 0), ('v2', 1)) + + +class PrivateKey(univ.OctetString): + pass + + +class Attributes(univ.SetOf): + componentType = rfc5652.Attribute() + + +class PublicKey(univ.BitString): + pass + + +# OneAsymmetricKey is essentially version 2 of PrivateKeyInfo. +# If publicKey is present, then the version must be v2; +# otherwise, the version should be v1. + +class OneAsymmetricKey(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', Version()), + namedtype.NamedType('privateKeyAlgorithm', PrivateKeyAlgorithmIdentifier()), + namedtype.NamedType('privateKey', PrivateKey()), + namedtype.OptionalNamedType('attributes', Attributes().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('publicKey', PublicKey().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +class PrivateKeyInfo(OneAsymmetricKey): + pass + + +# The CMS AsymmetricKeyPackage Content Type + +id_ct_KP_aKeyPackage = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.5') + +class AsymmetricKeyPackage(univ.SequenceOf): + pass + +AsymmetricKeyPackage.componentType = OneAsymmetricKey() +AsymmetricKeyPackage.sizeSpec=constraint.ValueSizeConstraint(1, MAX) + + +# Map of Content Type OIDs to Content Types is added to the +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_KP_aKeyPackage: AsymmetricKeyPackage(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5990.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5990.py new file mode 100644 index 0000000000000000000000000000000000000000..281316fb81a0b20f5e353f07c3a95b39777709d3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc5990.py @@ -0,0 +1,237 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Use of the RSA-KEM Key Transport Algorithm in the CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc5990.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + return univ.ObjectIdentifier(output) + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + + +# Useful types and definitions + +class NullParms(univ.Null): + pass + + +# Object identifier arcs + +is18033_2 = _OID(1, 0, 18033, 2) + +nistAlgorithm = _OID(2, 16, 840, 1, 101, 3, 4) + +pkcs_1 = _OID(1, 2, 840, 113549, 1, 1) + +x9_44 = _OID(1, 3, 133, 16, 840, 9, 44) + +x9_44_components = _OID(x9_44, 1) + + +# Types for algorithm identifiers + +class Camellia_KeyWrappingScheme(AlgorithmIdentifier): + pass + +class DataEncapsulationMechanism(AlgorithmIdentifier): + pass + +class KDF2_HashFunction(AlgorithmIdentifier): + pass + +class KDF3_HashFunction(AlgorithmIdentifier): + pass + +class KeyDerivationFunction(AlgorithmIdentifier): + pass + +class KeyEncapsulationMechanism(AlgorithmIdentifier): + pass + +class X9_SymmetricKeyWrappingScheme(AlgorithmIdentifier): + pass + + +# RSA-KEM Key Transport Algorithm + +id_rsa_kem = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 14) + + +class GenericHybridParameters(univ.Sequence): + pass + +GenericHybridParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('kem', KeyEncapsulationMechanism()), + namedtype.NamedType('dem', DataEncapsulationMechanism()) +) + + +rsa_kem = AlgorithmIdentifier() +rsa_kem['algorithm'] = id_rsa_kem +rsa_kem['parameters'] = GenericHybridParameters() + + +# KEM-RSA Key Encapsulation Mechanism + +id_kem_rsa = _OID(is18033_2, 2, 4) + + +class KeyLength(univ.Integer): + pass + +KeyLength.subtypeSpec = constraint.ValueRangeConstraint(1, MAX) + + +class RsaKemParameters(univ.Sequence): + pass + +RsaKemParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('keyDerivationFunction', KeyDerivationFunction()), + namedtype.NamedType('keyLength', KeyLength()) +) + + +kem_rsa = AlgorithmIdentifier() +kem_rsa['algorithm'] = id_kem_rsa +kem_rsa['parameters'] = RsaKemParameters() + + +# Key Derivation Functions + +id_kdf_kdf2 = _OID(x9_44_components, 1) + +id_kdf_kdf3 = _OID(x9_44_components, 2) + + +kdf2 = AlgorithmIdentifier() +kdf2['algorithm'] = id_kdf_kdf2 +kdf2['parameters'] = KDF2_HashFunction() + +kdf3 = AlgorithmIdentifier() +kdf3['algorithm'] = id_kdf_kdf3 +kdf3['parameters'] = KDF3_HashFunction() + + +# Hash Functions + +id_sha1 = _OID(1, 3, 14, 3, 2, 26) + +id_sha224 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 4) + +id_sha256 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 1) + +id_sha384 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 2) + +id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3) + + +sha1 = AlgorithmIdentifier() +sha1['algorithm'] = id_sha1 +sha1['parameters'] = univ.Null("") + +sha224 = AlgorithmIdentifier() +sha224['algorithm'] = id_sha224 +sha224['parameters'] = univ.Null("") + +sha256 = AlgorithmIdentifier() +sha256['algorithm'] = id_sha256 +sha256['parameters'] = univ.Null("") + +sha384 = AlgorithmIdentifier() +sha384['algorithm'] = id_sha384 +sha384['parameters'] = univ.Null("") + +sha512 = AlgorithmIdentifier() +sha512['algorithm'] = id_sha512 +sha512['parameters'] = univ.Null("") + + +# Symmetric Key-Wrapping Schemes + +id_aes128_Wrap = _OID(nistAlgorithm, 1, 5) + +id_aes192_Wrap = _OID(nistAlgorithm, 1, 25) + +id_aes256_Wrap = _OID(nistAlgorithm, 1, 45) + +id_alg_CMS3DESwrap = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 6) + +id_camellia128_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 2) + +id_camellia192_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 3) + +id_camellia256_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 4) + + +aes128_Wrap = AlgorithmIdentifier() +aes128_Wrap['algorithm'] = id_aes128_Wrap +# aes128_Wrap['parameters'] are absent + +aes192_Wrap = AlgorithmIdentifier() +aes192_Wrap['algorithm'] = id_aes128_Wrap +# aes192_Wrap['parameters'] are absent + +aes256_Wrap = AlgorithmIdentifier() +aes256_Wrap['algorithm'] = id_sha256 +# aes256_Wrap['parameters'] are absent + +tdes_Wrap = AlgorithmIdentifier() +tdes_Wrap['algorithm'] = id_alg_CMS3DESwrap +tdes_Wrap['parameters'] = univ.Null("") + +camellia128_Wrap = AlgorithmIdentifier() +camellia128_Wrap['algorithm'] = id_camellia128_Wrap +# camellia128_Wrap['parameters'] are absent + +camellia192_Wrap = AlgorithmIdentifier() +camellia192_Wrap['algorithm'] = id_camellia192_Wrap +# camellia192_Wrap['parameters'] are absent + +camellia256_Wrap = AlgorithmIdentifier() +camellia256_Wrap['algorithm'] = id_camellia256_Wrap +# camellia256_Wrap['parameters'] are absent + + +# Update the Algorithm Identifier map in rfc5280.py. +# Note that the ones that must not have parameters are not added to the map. + +_algorithmIdentifierMapUpdate = { + id_rsa_kem: GenericHybridParameters(), + id_kem_rsa: RsaKemParameters(), + id_kdf_kdf2: KDF2_HashFunction(), + id_kdf_kdf3: KDF3_HashFunction(), + id_sha1: univ.Null(), + id_sha224: univ.Null(), + id_sha256: univ.Null(), + id_sha384: univ.Null(), + id_sha512: univ.Null(), + id_alg_CMS3DESwrap: univ.Null(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6010.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6010.py new file mode 100644 index 0000000000000000000000000000000000000000..250e207ba4e87f77d5e30ed87dd69e219728be69 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6010.py @@ -0,0 +1,88 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add maps for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Certificate Extension for CMS Content Constraints (CCC) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6010.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +AttributeType = rfc5280.AttributeType + +AttributeValue = rfc5280.AttributeValue + + +id_ct_anyContentType = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.0') + + +class AttrConstraint(univ.Sequence): + pass + +AttrConstraint.componentType = namedtype.NamedTypes( + namedtype.NamedType('attrType', AttributeType()), + namedtype.NamedType('attrValues', univ.SetOf( + componentType=AttributeValue()).subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) +) + + +class AttrConstraintList(univ.SequenceOf): + pass + +AttrConstraintList.componentType = AttrConstraint() +AttrConstraintList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +class ContentTypeGeneration(univ.Enumerated): + pass + +ContentTypeGeneration.namedValues = namedval.NamedValues( + ('canSource', 0), + ('cannotSource', 1) +) + + +class ContentTypeConstraint(univ.Sequence): + pass + +ContentTypeConstraint.componentType = namedtype.NamedTypes( + namedtype.NamedType('contentType', univ.ObjectIdentifier()), + namedtype.DefaultedNamedType('canSource', ContentTypeGeneration().subtype(value='canSource')), + namedtype.OptionalNamedType('attrConstraints', AttrConstraintList()) +) + + +# CMS Content Constraints (CCC) Extension and Object Identifier + +id_pe_cmsContentConstraints = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.18') + +class CMSContentConstraints(univ.SequenceOf): + pass + +CMSContentConstraints.componentType = ContentTypeConstraint() +CMSContentConstraints.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +# Map of Certificate Extension OIDs to Extensions +# To be added to the ones that are in rfc5280.py + +_certificateExtensionsMap = { + id_pe_cmsContentConstraints: CMSContentConstraints(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6019.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6019.py new file mode 100644 index 0000000000000000000000000000000000000000..c6872c76699c9fd4f80e8b58ecda934f2e8d724a --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6019.py @@ -0,0 +1,45 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# Modified by Russ Housley to add a map for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# BinaryTime: An Alternate Format for Representing Date and Time +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6019.txt + +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + +MAX = float('inf') + + +# BinaryTime: Represent date and time as an integer + +class BinaryTime(univ.Integer): + pass + +BinaryTime.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +# CMS Attribute for representing signing time in BinaryTime + +id_aa_binarySigningTime = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.46') + +class BinarySigningTime(BinaryTime): + pass + + +# Map of Attribute Type OIDs to Attributes ia added to the +# ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_binarySigningTime: BinarySigningTime(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6031.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6031.py new file mode 100644 index 0000000000000000000000000000000000000000..6e1bb2261d57cc3f537814493a8a0c3c1b18ba29 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6031.py @@ -0,0 +1,469 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# CMS Symmetric Key Package Content Type +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6031.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc6019 + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + return univ.ObjectIdentifier(output) + + +MAX = float('inf') + +id_pskc = univ.ObjectIdentifier('1.2.840.113549.1.9.16.12') + + +# Symmetric Key Package Attributes + +id_pskc_manufacturer = _OID(id_pskc, 1) + +class at_pskc_manufacturer(char.UTF8String): + pass + + +id_pskc_serialNo = _OID(id_pskc, 2) + +class at_pskc_serialNo(char.UTF8String): + pass + + +id_pskc_model = _OID(id_pskc, 3) + +class at_pskc_model(char.UTF8String): + pass + + +id_pskc_issueNo = _OID(id_pskc, 4) + +class at_pskc_issueNo(char.UTF8String): + pass + + +id_pskc_deviceBinding = _OID(id_pskc, 5) + +class at_pskc_deviceBinding(char.UTF8String): + pass + + +id_pskc_deviceStartDate = _OID(id_pskc, 6) + +class at_pskc_deviceStartDate(useful.GeneralizedTime): + pass + + +id_pskc_deviceExpiryDate = _OID(id_pskc, 7) + +class at_pskc_deviceExpiryDate(useful.GeneralizedTime): + pass + + +id_pskc_moduleId = _OID(id_pskc, 8) + +class at_pskc_moduleId(char.UTF8String): + pass + + +id_pskc_deviceUserId = _OID(id_pskc, 26) + +class at_pskc_deviceUserId(char.UTF8String): + pass + + +# Symmetric Key Attributes + +id_pskc_keyId = _OID(id_pskc, 9) + +class at_pskc_keyUserId(char.UTF8String): + pass + + +id_pskc_algorithm = _OID(id_pskc, 10) + +class at_pskc_algorithm(char.UTF8String): + pass + + +id_pskc_issuer = _OID(id_pskc, 11) + +class at_pskc_issuer(char.UTF8String): + pass + + +id_pskc_keyProfileId = _OID(id_pskc, 12) + +class at_pskc_keyProfileId(char.UTF8String): + pass + + +id_pskc_keyReference = _OID(id_pskc, 13) + +class at_pskc_keyReference(char.UTF8String): + pass + + +id_pskc_friendlyName = _OID(id_pskc, 14) + +class FriendlyName(univ.Sequence): + pass + +FriendlyName.componentType = namedtype.NamedTypes( + namedtype.NamedType('friendlyName', char.UTF8String()), + namedtype.OptionalNamedType('friendlyNameLangTag', char.UTF8String()) +) + +class at_pskc_friendlyName(FriendlyName): + pass + + +id_pskc_algorithmParameters = _OID(id_pskc, 15) + +class Encoding(char.UTF8String): + pass + +Encoding.namedValues = namedval.NamedValues( + ('dec', "DECIMAL"), + ('hex', "HEXADECIMAL"), + ('alpha', "ALPHANUMERIC"), + ('b64', "BASE64"), + ('bin', "BINARY") +) + +Encoding.subtypeSpec = constraint.SingleValueConstraint( + "DECIMAL", "HEXADECIMAL", "ALPHANUMERIC", "BASE64", "BINARY" ) + +class ChallengeFormat(univ.Sequence): + pass + +ChallengeFormat.componentType = namedtype.NamedTypes( + namedtype.NamedType('encoding', Encoding()), + namedtype.DefaultedNamedType('checkDigit', + univ.Boolean().subtype(value=0)), + namedtype.NamedType('min', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(0, MAX))), + namedtype.NamedType('max', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(0, MAX))) +) + +class ResponseFormat(univ.Sequence): + pass + +ResponseFormat.componentType = namedtype.NamedTypes( + namedtype.NamedType('encoding', Encoding()), + namedtype.NamedType('length', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(0, MAX))), + namedtype.DefaultedNamedType('checkDigit', + univ.Boolean().subtype(value=0)) +) + +class PSKCAlgorithmParameters(univ.Choice): + pass + +PSKCAlgorithmParameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('suite', char.UTF8String()), + namedtype.NamedType('challengeFormat', ChallengeFormat().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('responseFormat', ResponseFormat().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) +) + +class at_pskc_algorithmParameters(PSKCAlgorithmParameters): + pass + + +id_pskc_counter = _OID(id_pskc, 16) + +class at_pskc_counter(univ.Integer): + pass + +at_pskc_counter.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +id_pskc_time = _OID(id_pskc, 17) + +class at_pskc_time(rfc6019.BinaryTime): + pass + + +id_pskc_timeInterval = _OID(id_pskc, 18) + +class at_pskc_timeInterval(univ.Integer): + pass + +at_pskc_timeInterval.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +id_pskc_timeDrift = _OID(id_pskc, 19) + +class at_pskc_timeDrift(univ.Integer): + pass + +at_pskc_timeDrift.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +id_pskc_valueMAC = _OID(id_pskc, 20) + +class ValueMac(univ.Sequence): + pass + +ValueMac.componentType = namedtype.NamedTypes( + namedtype.NamedType('macAlgorithm', char.UTF8String()), + namedtype.NamedType('mac', char.UTF8String()) +) + +class at_pskc_valueMAC(ValueMac): + pass + + +id_pskc_keyUserId = _OID(id_pskc, 27) + +class at_pskc_keyId(char.UTF8String): + pass + + +id_pskc_keyStartDate = _OID(id_pskc, 21) + +class at_pskc_keyStartDate(useful.GeneralizedTime): + pass + + +id_pskc_keyExpiryDate = _OID(id_pskc, 22) + +class at_pskc_keyExpiryDate(useful.GeneralizedTime): + pass + + +id_pskc_numberOfTransactions = _OID(id_pskc, 23) + +class at_pskc_numberOfTransactions(univ.Integer): + pass + +at_pskc_numberOfTransactions.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +id_pskc_keyUsages = _OID(id_pskc, 24) + +class PSKCKeyUsage(char.UTF8String): + pass + +PSKCKeyUsage.namedValues = namedval.NamedValues( + ('otp', "OTP"), + ('cr', "CR"), + ('encrypt', "Encrypt"), + ('integrity', "Integrity"), + ('verify', "Verify"), + ('unlock', "Unlock"), + ('decrypt', "Decrypt"), + ('keywrap', "KeyWrap"), + ('unwrap', "Unwrap"), + ('derive', "Derive"), + ('generate', "Generate") +) + +PSKCKeyUsage.subtypeSpec = constraint.SingleValueConstraint( + "OTP", "CR", "Encrypt", "Integrity", "Verify", "Unlock", + "Decrypt", "KeyWrap", "Unwrap", "Derive", "Generate" ) + +class PSKCKeyUsages(univ.SequenceOf): + pass + +PSKCKeyUsages.componentType = PSKCKeyUsage() + +class at_pskc_keyUsage(PSKCKeyUsages): + pass + + +id_pskc_pinPolicy = _OID(id_pskc, 25) + +class PINUsageMode(char.UTF8String): + pass + +PINUsageMode.namedValues = namedval.NamedValues( + ("local", "Local"), + ("prepend", "Prepend"), + ("append", "Append"), + ("algorithmic", "Algorithmic") +) + +PINUsageMode.subtypeSpec = constraint.SingleValueConstraint( + "Local", "Prepend", "Append", "Algorithmic" ) + +class PINPolicy(univ.Sequence): + pass + +PINPolicy.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('pinKeyId', char.UTF8String().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('pinUsageMode', PINUsageMode().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('maxFailedAttempts', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('minLength', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.OptionalNamedType('maxLength', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))), + namedtype.OptionalNamedType('pinEncoding', Encoding().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))) +) + +class at_pskc_pinPolicy(PINPolicy): + pass + + +# Map of Symmetric Key Package Attribute OIDs to Attributes + +sKeyPkgAttributesMap = { + id_pskc_manufacturer: at_pskc_manufacturer(), + id_pskc_serialNo: at_pskc_serialNo(), + id_pskc_model: at_pskc_model(), + id_pskc_issueNo: at_pskc_issueNo(), + id_pskc_deviceBinding: at_pskc_deviceBinding(), + id_pskc_deviceStartDate: at_pskc_deviceStartDate(), + id_pskc_deviceExpiryDate: at_pskc_deviceExpiryDate(), + id_pskc_moduleId: at_pskc_moduleId(), + id_pskc_deviceUserId: at_pskc_deviceUserId(), +} + + +# Map of Symmetric Key Attribute OIDs to Attributes + +sKeyAttributesMap = { + id_pskc_keyId: at_pskc_keyId(), + id_pskc_algorithm: at_pskc_algorithm(), + id_pskc_issuer: at_pskc_issuer(), + id_pskc_keyProfileId: at_pskc_keyProfileId(), + id_pskc_keyReference: at_pskc_keyReference(), + id_pskc_friendlyName: at_pskc_friendlyName(), + id_pskc_algorithmParameters: at_pskc_algorithmParameters(), + id_pskc_counter: at_pskc_counter(), + id_pskc_time: at_pskc_time(), + id_pskc_timeInterval: at_pskc_timeInterval(), + id_pskc_timeDrift: at_pskc_timeDrift(), + id_pskc_valueMAC: at_pskc_valueMAC(), + id_pskc_keyUserId: at_pskc_keyUserId(), + id_pskc_keyStartDate: at_pskc_keyStartDate(), + id_pskc_keyExpiryDate: at_pskc_keyExpiryDate(), + id_pskc_numberOfTransactions: at_pskc_numberOfTransactions(), + id_pskc_keyUsages: at_pskc_keyUsage(), + id_pskc_pinPolicy: at_pskc_pinPolicy(), +} + + +# This definition replaces Attribute() from rfc5652.py; it is the same except +# that opentype is added with sKeyPkgAttributesMap and sKeyAttributesMap + +class AttributeType(univ.ObjectIdentifier): + pass + + +class AttributeValue(univ.Any): + pass + + +class SKeyAttribute(univ.Sequence): + pass + +SKeyAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('attrType', AttributeType()), + namedtype.NamedType('attrValues', + univ.SetOf(componentType=AttributeValue()), + openType=opentype.OpenType('attrType', sKeyAttributesMap) + ) +) + + +class SKeyPkgAttribute(univ.Sequence): + pass + +SKeyPkgAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('attrType', AttributeType()), + namedtype.NamedType('attrValues', + univ.SetOf(componentType=AttributeValue()), + openType=opentype.OpenType('attrType', sKeyPkgAttributesMap) + ) +) + + +# Symmetric Key Package Content Type + +id_ct_KP_sKeyPackage = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.25') + + +class KeyPkgVersion(univ.Integer): + pass + +KeyPkgVersion.namedValues = namedval.NamedValues( + ('v1', 1) +) + + +class OneSymmetricKey(univ.Sequence): + pass + +OneSymmetricKey.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('sKeyAttrs', + univ.SequenceOf(componentType=SKeyAttribute()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))), + namedtype.OptionalNamedType('sKey', univ.OctetString()) +) + +OneSymmetricKey.sizeSpec = univ.Sequence.sizeSpec + constraint.ValueSizeConstraint(1, 2) + + +class SymmetricKeys(univ.SequenceOf): + pass + +SymmetricKeys.componentType = OneSymmetricKey() +SymmetricKeys.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +class SymmetricKeyPackage(univ.Sequence): + pass + +SymmetricKeyPackage.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v1')), + namedtype.OptionalNamedType('sKeyPkgAttrs', + univ.SequenceOf(componentType=SKeyPkgAttribute()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX), + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('sKeys', SymmetricKeys()) +) + + +# Map of Content Type OIDs to Content Types are +# added to the ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_KP_sKeyPackage: SymmetricKeyPackage(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6032.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6032.py new file mode 100644 index 0000000000000000000000000000000000000000..563639a8d66e1dd571ca0f819ab55d59c66b831b --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6032.py @@ -0,0 +1,68 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# CMS Encrypted Key Package Content Type +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6032.txt +# + +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc5083 + + +# Content Decryption Key Identifier attribute + +id_aa_KP_contentDecryptKeyID = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.66') + +class ContentDecryptKeyID(univ.OctetString): + pass + +aa_content_decrypt_key_identifier = rfc5652.Attribute() +aa_content_decrypt_key_identifier['attrType'] = id_aa_KP_contentDecryptKeyID +aa_content_decrypt_key_identifier['attrValues'][0] = ContentDecryptKeyID() + + +# Encrypted Key Package Content Type + +id_ct_KP_encryptedKeyPkg = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.2') + +class EncryptedKeyPackage(univ.Choice): + pass + +EncryptedKeyPackage.componentType = namedtype.NamedTypes( + namedtype.NamedType('encrypted', rfc5652.EncryptedData()), + namedtype.NamedType('enveloped', rfc5652.EnvelopedData().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('authEnveloped', rfc5083.AuthEnvelopedData().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +# Map of Attribute Type OIDs to Attributes are +# added to the ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_KP_contentDecryptKeyID: ContentDecryptKeyID(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) + + +# Map of Content Type OIDs to Content Types are +# added to the ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_KP_encryptedKeyPkg: EncryptedKeyPackage(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6120.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6120.py new file mode 100644 index 0000000000000000000000000000000000000000..ab256203a08e3d468f76ffd79968dcb164f61e9b --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6120.py @@ -0,0 +1,43 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Extensible Messaging and Presence Protocol (XMPP) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6120.txt +# + +from pyasn1.type import char +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +# XmppAddr Identifier Type as specified in Section 13.7.1.4. of RFC 6120 + +id_pkix = rfc5280.id_pkix + +id_on = id_pkix + (8, ) + +id_on_xmppAddr = id_on + (5, ) + + +class XmppAddr(char.UTF8String): + pass + + +# Map of Other Name OIDs to Other Name is added to the +# ones that are in rfc5280.py + +_anotherNameMapUpdate = { + id_on_xmppAddr: XmppAddr(), +} + +rfc5280.anotherNameMap.update(_anotherNameMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6170.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6170.py new file mode 100644 index 0000000000000000000000000000000000000000..e2876167b705a19f49247bdaa16cb099463c589c --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6170.py @@ -0,0 +1,17 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Certificate Image in the Internet X.509 Public Key Infrastructure +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6170.txt +# + +from pyasn1.type import univ + +id_logo_certImage = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.3') diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6187.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6187.py new file mode 100644 index 0000000000000000000000000000000000000000..4be005471623acbe31f20e608fdaddda9830a30b --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6187.py @@ -0,0 +1,22 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# X.509v3 Certificates for Secure Shell Authentication +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6187.txt +# + +from pyasn1.type import univ + +id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7') + +id_kp = id_pkix + (3, ) + +id_kp_secureShellClient = id_kp + (21, ) +id_kp_secureShellServer = id_kp + (22, ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6210.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6210.py new file mode 100644 index 0000000000000000000000000000000000000000..28587b9e70b0fc0b2e2504ca8963d1c4af50eae4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6210.py @@ -0,0 +1,42 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Experiment for Hash Functions with Parameters in the CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6210.txt +# + +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +id_alg_MD5_XOR_EXPERIMENT = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.13') + + +class MD5_XOR_EXPERIMENT(univ.OctetString): + pass + +MD5_XOR_EXPERIMENT.subtypeSpec = constraint.ValueSizeConstraint(64, 64) + + +mda_xor_md5_EXPERIMENT = rfc5280.AlgorithmIdentifier() +mda_xor_md5_EXPERIMENT['algorithm'] = id_alg_MD5_XOR_EXPERIMENT +mda_xor_md5_EXPERIMENT['parameters'] = MD5_XOR_EXPERIMENT() + + +# Map of Algorithm Identifier OIDs to Parameters added to the +# ones that are in rfc5280.py. + +_algorithmIdentifierMapUpdate = { + id_alg_MD5_XOR_EXPERIMENT: MD5_XOR_EXPERIMENT(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6211.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6211.py new file mode 100644 index 0000000000000000000000000000000000000000..abd7a8688d0ca856c525d138564dacb5fb945a3e --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6211.py @@ -0,0 +1,72 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# CMS Algorithm Identifier Protection Attribute +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6211.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + + +# Imports from RFC 5652 + +DigestAlgorithmIdentifier = rfc5652.DigestAlgorithmIdentifier + +MessageAuthenticationCodeAlgorithm = rfc5652.MessageAuthenticationCodeAlgorithm + +SignatureAlgorithmIdentifier = rfc5652.SignatureAlgorithmIdentifier + + +# CMS Algorithm Protection attribute + +id_aa_cmsAlgorithmProtect = univ.ObjectIdentifier('1.2.840.113549.1.9.52') + + +class CMSAlgorithmProtection(univ.Sequence): + pass + +CMSAlgorithmProtection.componentType = namedtype.NamedTypes( + namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()), + namedtype.OptionalNamedType('signatureAlgorithm', + SignatureAlgorithmIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('macAlgorithm', + MessageAuthenticationCodeAlgorithm().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + +CMSAlgorithmProtection.subtypeSpec = constraint.ConstraintsUnion( + constraint.WithComponentsConstraint( + ('signatureAlgorithm', constraint.ComponentPresentConstraint()), + ('macAlgorithm', constraint.ComponentAbsentConstraint())), + constraint.WithComponentsConstraint( + ('signatureAlgorithm', constraint.ComponentAbsentConstraint()), + ('macAlgorithm', constraint.ComponentPresentConstraint())) +) + + +aa_cmsAlgorithmProtection = rfc5652.Attribute() +aa_cmsAlgorithmProtection['attrType'] = id_aa_cmsAlgorithmProtect +aa_cmsAlgorithmProtection['attrValues'][0] = CMSAlgorithmProtection() + + +# Map of Attribute Type OIDs to Attributes are +# added to the ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_cmsAlgorithmProtect: CMSAlgorithmProtection(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6402.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6402.py new file mode 100644 index 0000000000000000000000000000000000000000..5490b05fb973082255802cd9cc53c7f90e04a4b0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6402.py @@ -0,0 +1,628 @@ +# coding: utf-8 +# +# This file is part of pyasn1-modules software. +# +# Created by Stanisław Pitucha with asn1ate tool. +# Modified by Russ Housley to add a maps for CMC Control Attributes +# and CMC Content Types for use with opentypes. +# +# Copyright (c) 2005-2020, Ilya Etingof +# License: http://snmplabs.com/pyasn1/license.html +# +# Certificate Management over CMS (CMC) Updates +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6402.txt +# +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ +from pyasn1.type import useful + +from pyasn1_modules import rfc4211 +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 + +MAX = float('inf') + + +def _buildOid(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +# Since CMS Attributes and CMC Controls both use 'attrType', one map is used +cmcControlAttributesMap = rfc5652.cmsAttributesMap + + +class ChangeSubjectName(univ.Sequence): + pass + + +ChangeSubjectName.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('subject', rfc5280.Name()), + namedtype.OptionalNamedType('subjectAlt', rfc5280.GeneralNames()) +) + + +class AttributeValue(univ.Any): + pass + + +class CMCStatus(univ.Integer): + pass + + +CMCStatus.namedValues = namedval.NamedValues( + ('success', 0), + ('failed', 2), + ('pending', 3), + ('noSupport', 4), + ('confirmRequired', 5), + ('popRequired', 6), + ('partial', 7) +) + + +class PendInfo(univ.Sequence): + pass + + +PendInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('pendToken', univ.OctetString()), + namedtype.NamedType('pendTime', useful.GeneralizedTime()) +) + +bodyIdMax = univ.Integer(4294967295) + + +class BodyPartID(univ.Integer): + pass + + +BodyPartID.subtypeSpec = constraint.ValueRangeConstraint(0, bodyIdMax) + + +class BodyPartPath(univ.SequenceOf): + pass + + +BodyPartPath.componentType = BodyPartID() +BodyPartPath.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class BodyPartReference(univ.Choice): + pass + + +BodyPartReference.componentType = namedtype.NamedTypes( + namedtype.NamedType('bodyPartID', BodyPartID()), + namedtype.NamedType('bodyPartPath', BodyPartPath()) +) + + +class CMCFailInfo(univ.Integer): + pass + + +CMCFailInfo.namedValues = namedval.NamedValues( + ('badAlg', 0), + ('badMessageCheck', 1), + ('badRequest', 2), + ('badTime', 3), + ('badCertId', 4), + ('unsupportedExt', 5), + ('mustArchiveKeys', 6), + ('badIdentity', 7), + ('popRequired', 8), + ('popFailed', 9), + ('noKeyReuse', 10), + ('internalCAError', 11), + ('tryLater', 12), + ('authDataFail', 13) +) + + +class CMCStatusInfoV2(univ.Sequence): + pass + + +CMCStatusInfoV2.componentType = namedtype.NamedTypes( + namedtype.NamedType('cMCStatus', CMCStatus()), + namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference())), + namedtype.OptionalNamedType('statusString', char.UTF8String()), + namedtype.OptionalNamedType( + 'otherInfo', univ.Choice( + componentType=namedtype.NamedTypes( + namedtype.NamedType('failInfo', CMCFailInfo()), + namedtype.NamedType('pendInfo', PendInfo()), + namedtype.NamedType( + 'extendedFailInfo', univ.Sequence( + componentType=namedtype.NamedTypes( + namedtype.NamedType('failInfoOID', univ.ObjectIdentifier()), + namedtype.NamedType('failInfoValue', AttributeValue())) + ) + ) + ) + ) + ) +) + + +class GetCRL(univ.Sequence): + pass + + +GetCRL.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerName', rfc5280.Name()), + namedtype.OptionalNamedType('cRLName', rfc5280.GeneralName()), + namedtype.OptionalNamedType('time', useful.GeneralizedTime()), + namedtype.OptionalNamedType('reasons', rfc5280.ReasonFlags()) +) + +id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7) + +id_cmc = _buildOid(id_pkix, 7) + +id_cmc_batchResponses = _buildOid(id_cmc, 29) + +id_cmc_popLinkWitness = _buildOid(id_cmc, 23) + + +class PopLinkWitnessV2(univ.Sequence): + pass + + +PopLinkWitnessV2.componentType = namedtype.NamedTypes( + namedtype.NamedType('keyGenAlgorithm', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('macAlgorithm', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('witness', univ.OctetString()) +) + +id_cmc_popLinkWitnessV2 = _buildOid(id_cmc, 33) + +id_cmc_identityProofV2 = _buildOid(id_cmc, 34) + +id_cmc_revokeRequest = _buildOid(id_cmc, 17) + +id_cmc_recipientNonce = _buildOid(id_cmc, 7) + + +class ControlsProcessed(univ.Sequence): + pass + + +ControlsProcessed.componentType = namedtype.NamedTypes( + namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference())) +) + + +class CertificationRequest(univ.Sequence): + pass + + +CertificationRequest.componentType = namedtype.NamedTypes( + namedtype.NamedType( + 'certificationRequestInfo', univ.Sequence( + componentType=namedtype.NamedTypes( + namedtype.NamedType('version', univ.Integer()), + namedtype.NamedType('subject', rfc5280.Name()), + namedtype.NamedType( + 'subjectPublicKeyInfo', univ.Sequence( + componentType=namedtype.NamedTypes( + namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('subjectPublicKey', univ.BitString()) + ) + ) + ), + namedtype.NamedType( + 'attributes', univ.SetOf( + componentType=rfc5652.Attribute()).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)) + ) + ) + ) + ), + namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()) +) + + +class TaggedCertificationRequest(univ.Sequence): + pass + + +TaggedCertificationRequest.componentType = namedtype.NamedTypes( + namedtype.NamedType('bodyPartID', BodyPartID()), + namedtype.NamedType('certificationRequest', CertificationRequest()) +) + + +class TaggedRequest(univ.Choice): + pass + + +TaggedRequest.componentType = namedtype.NamedTypes( + namedtype.NamedType('tcr', TaggedCertificationRequest().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('crm', + rfc4211.CertReqMsg().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('orm', univ.Sequence(componentType=namedtype.NamedTypes( + namedtype.NamedType('bodyPartID', BodyPartID()), + namedtype.NamedType('requestMessageType', univ.ObjectIdentifier()), + namedtype.NamedType('requestMessageValue', univ.Any()) + )) + .subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) +) + +id_cmc_popLinkRandom = _buildOid(id_cmc, 22) + +id_cmc_statusInfo = _buildOid(id_cmc, 1) + +id_cmc_trustedAnchors = _buildOid(id_cmc, 26) + +id_cmc_transactionId = _buildOid(id_cmc, 5) + +id_cmc_encryptedPOP = _buildOid(id_cmc, 9) + + +class PublishTrustAnchors(univ.Sequence): + pass + + +PublishTrustAnchors.componentType = namedtype.NamedTypes( + namedtype.NamedType('seqNumber', univ.Integer()), + namedtype.NamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('anchorHashes', univ.SequenceOf(componentType=univ.OctetString())) +) + + +class RevokeRequest(univ.Sequence): + pass + + +RevokeRequest.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerName', rfc5280.Name()), + namedtype.NamedType('serialNumber', univ.Integer()), + namedtype.NamedType('reason', rfc5280.CRLReason()), + namedtype.OptionalNamedType('invalidityDate', useful.GeneralizedTime()), + namedtype.OptionalNamedType('passphrase', univ.OctetString()), + namedtype.OptionalNamedType('comment', char.UTF8String()) +) + +id_cmc_senderNonce = _buildOid(id_cmc, 6) + +id_cmc_authData = _buildOid(id_cmc, 27) + + +class TaggedContentInfo(univ.Sequence): + pass + + +TaggedContentInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('bodyPartID', BodyPartID()), + namedtype.NamedType('contentInfo', rfc5652.ContentInfo()) +) + + +class IdentifyProofV2(univ.Sequence): + pass + + +IdentifyProofV2.componentType = namedtype.NamedTypes( + namedtype.NamedType('proofAlgID', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('macAlgId', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('witness', univ.OctetString()) +) + + +class CMCPublicationInfo(univ.Sequence): + pass + + +CMCPublicationInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('certHashes', univ.SequenceOf(componentType=univ.OctetString())), + namedtype.NamedType('pubInfo', rfc4211.PKIPublicationInfo()) +) + +id_kp_cmcCA = _buildOid(rfc5280.id_kp, 27) + +id_cmc_confirmCertAcceptance = _buildOid(id_cmc, 24) + +id_cmc_raIdentityWitness = _buildOid(id_cmc, 35) + +id_ExtensionReq = _buildOid(1, 2, 840, 113549, 1, 9, 14) + +id_cct = _buildOid(id_pkix, 12) + +id_cct_PKIData = _buildOid(id_cct, 2) + +id_kp_cmcRA = _buildOid(rfc5280.id_kp, 28) + + +class CMCStatusInfo(univ.Sequence): + pass + + +CMCStatusInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('cMCStatus', CMCStatus()), + namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartID())), + namedtype.OptionalNamedType('statusString', char.UTF8String()), + namedtype.OptionalNamedType( + 'otherInfo', univ.Choice( + componentType=namedtype.NamedTypes( + namedtype.NamedType('failInfo', CMCFailInfo()), + namedtype.NamedType('pendInfo', PendInfo()) + ) + ) + ) +) + + +class DecryptedPOP(univ.Sequence): + pass + + +DecryptedPOP.componentType = namedtype.NamedTypes( + namedtype.NamedType('bodyPartID', BodyPartID()), + namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('thePOP', univ.OctetString()) +) + +id_cmc_addExtensions = _buildOid(id_cmc, 8) + +id_cmc_modCertTemplate = _buildOid(id_cmc, 31) + + +class TaggedAttribute(univ.Sequence): + pass + + +TaggedAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('bodyPartID', BodyPartID()), + namedtype.NamedType('attrType', univ.ObjectIdentifier()), + namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()), + openType=opentype.OpenType('attrType', cmcControlAttributesMap) + ) +) + + +class OtherMsg(univ.Sequence): + pass + + +OtherMsg.componentType = namedtype.NamedTypes( + namedtype.NamedType('bodyPartID', BodyPartID()), + namedtype.NamedType('otherMsgType', univ.ObjectIdentifier()), + namedtype.NamedType('otherMsgValue', univ.Any()) +) + + +class PKIData(univ.Sequence): + pass + + +PKIData.componentType = namedtype.NamedTypes( + namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())), + namedtype.NamedType('reqSequence', univ.SequenceOf(componentType=TaggedRequest())), + namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())), + namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg())) +) + + +class BodyPartList(univ.SequenceOf): + pass + + +BodyPartList.componentType = BodyPartID() +BodyPartList.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + +id_cmc_responseBody = _buildOid(id_cmc, 37) + + +class AuthPublish(BodyPartID): + pass + + +class CMCUnsignedData(univ.Sequence): + pass + + +CMCUnsignedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('bodyPartPath', BodyPartPath()), + namedtype.NamedType('identifier', univ.ObjectIdentifier()), + namedtype.NamedType('content', univ.Any()) +) + + +class CMCCertId(rfc5652.IssuerAndSerialNumber): + pass + + +class PKIResponse(univ.Sequence): + pass + + +PKIResponse.componentType = namedtype.NamedTypes( + namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())), + namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())), + namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg())) +) + + +class ResponseBody(PKIResponse): + pass + + +id_cmc_statusInfoV2 = _buildOid(id_cmc, 25) + +id_cmc_lraPOPWitness = _buildOid(id_cmc, 11) + + +class ModCertTemplate(univ.Sequence): + pass + + +ModCertTemplate.componentType = namedtype.NamedTypes( + namedtype.NamedType('pkiDataReference', BodyPartPath()), + namedtype.NamedType('certReferences', BodyPartList()), + namedtype.DefaultedNamedType('replace', univ.Boolean().subtype(value=1)), + namedtype.NamedType('certTemplate', rfc4211.CertTemplate()) +) + +id_cmc_regInfo = _buildOid(id_cmc, 18) + +id_cmc_identityProof = _buildOid(id_cmc, 3) + + +class ExtensionReq(univ.SequenceOf): + pass + + +ExtensionReq.componentType = rfc5280.Extension() +ExtensionReq.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + +id_kp_cmcArchive = _buildOid(rfc5280.id_kp, 28) + +id_cmc_publishCert = _buildOid(id_cmc, 30) + +id_cmc_dataReturn = _buildOid(id_cmc, 4) + + +class LraPopWitness(univ.Sequence): + pass + + +LraPopWitness.componentType = namedtype.NamedTypes( + namedtype.NamedType('pkiDataBodyid', BodyPartID()), + namedtype.NamedType('bodyIds', univ.SequenceOf(componentType=BodyPartID())) +) + +id_aa = _buildOid(1, 2, 840, 113549, 1, 9, 16, 2) + +id_aa_cmc_unsignedData = _buildOid(id_aa, 34) + +id_cmc_getCert = _buildOid(id_cmc, 15) + +id_cmc_batchRequests = _buildOid(id_cmc, 28) + +id_cmc_decryptedPOP = _buildOid(id_cmc, 10) + +id_cmc_responseInfo = _buildOid(id_cmc, 19) + +id_cmc_changeSubjectName = _buildOid(id_cmc, 36) + + +class GetCert(univ.Sequence): + pass + + +GetCert.componentType = namedtype.NamedTypes( + namedtype.NamedType('issuerName', rfc5280.GeneralName()), + namedtype.NamedType('serialNumber', univ.Integer()) +) + +id_cmc_identification = _buildOid(id_cmc, 2) + +id_cmc_queryPending = _buildOid(id_cmc, 21) + + +class AddExtensions(univ.Sequence): + pass + + +AddExtensions.componentType = namedtype.NamedTypes( + namedtype.NamedType('pkiDataReference', BodyPartID()), + namedtype.NamedType('certReferences', univ.SequenceOf(componentType=BodyPartID())), + namedtype.NamedType('extensions', univ.SequenceOf(componentType=rfc5280.Extension())) +) + + +class EncryptedPOP(univ.Sequence): + pass + + +EncryptedPOP.componentType = namedtype.NamedTypes( + namedtype.NamedType('request', TaggedRequest()), + namedtype.NamedType('cms', rfc5652.ContentInfo()), + namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('witnessAlgID', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('witness', univ.OctetString()) +) + +id_cmc_getCRL = _buildOid(id_cmc, 16) + +id_cct_PKIResponse = _buildOid(id_cct, 3) + +id_cmc_controlProcessed = _buildOid(id_cmc, 32) + + +class NoSignatureValue(univ.OctetString): + pass + + +id_ad_cmc = _buildOid(rfc5280.id_ad, 12) + +id_alg_noSignature = _buildOid(id_pkix, 6, 2) + + +# Map of CMC Control OIDs to CMC Control Attributes + +_cmcControlAttributesMapUpdate = { + id_cmc_statusInfo: CMCStatusInfo(), + id_cmc_statusInfoV2: CMCStatusInfoV2(), + id_cmc_identification: char.UTF8String(), + id_cmc_identityProof: univ.OctetString(), + id_cmc_identityProofV2: IdentifyProofV2(), + id_cmc_dataReturn: univ.OctetString(), + id_cmc_transactionId: univ.Integer(), + id_cmc_senderNonce: univ.OctetString(), + id_cmc_recipientNonce: univ.OctetString(), + id_cmc_addExtensions: AddExtensions(), + id_cmc_encryptedPOP: EncryptedPOP(), + id_cmc_decryptedPOP: DecryptedPOP(), + id_cmc_lraPOPWitness: LraPopWitness(), + id_cmc_getCert: GetCert(), + id_cmc_getCRL: GetCRL(), + id_cmc_revokeRequest: RevokeRequest(), + id_cmc_regInfo: univ.OctetString(), + id_cmc_responseInfo: univ.OctetString(), + id_cmc_queryPending: univ.OctetString(), + id_cmc_popLinkRandom: univ.OctetString(), + id_cmc_popLinkWitness: univ.OctetString(), + id_cmc_popLinkWitnessV2: PopLinkWitnessV2(), + id_cmc_confirmCertAcceptance: CMCCertId(), + id_cmc_trustedAnchors: PublishTrustAnchors(), + id_cmc_authData: AuthPublish(), + id_cmc_batchRequests: BodyPartList(), + id_cmc_batchResponses: BodyPartList(), + id_cmc_publishCert: CMCPublicationInfo(), + id_cmc_modCertTemplate: ModCertTemplate(), + id_cmc_controlProcessed: ControlsProcessed(), + id_ExtensionReq: ExtensionReq(), +} + +cmcControlAttributesMap.update(_cmcControlAttributesMapUpdate) + + +# Map of CMC Content Type OIDs to CMC Content Types are added to +# the ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_cct_PKIData: PKIData(), + id_cct_PKIResponse: PKIResponse(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) + diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6482.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6482.py new file mode 100644 index 0000000000000000000000000000000000000000..d213a46f8de4f2e71f2a363bd637929b0f6936fa --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6482.py @@ -0,0 +1,74 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# RPKI Route Origin Authorizations (ROAs) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6482.txt +# https://www.rfc-editor.org/errata/eid5881 +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + +MAX = float('inf') + + +id_ct_routeOriginAuthz = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.24') + + +class ASID(univ.Integer): + pass + + +class IPAddress(univ.BitString): + pass + + +class ROAIPAddress(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('address', IPAddress()), + namedtype.OptionalNamedType('maxLength', univ.Integer()) + ) + + +class ROAIPAddressFamily(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('addressFamily', + univ.OctetString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(2, 3))), + namedtype.NamedType('addresses', + univ.SequenceOf(componentType=ROAIPAddress()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) + ) + + +class RouteOriginAttestation(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + univ.Integer().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)), + namedtype.NamedType('asID', ASID()), + namedtype.NamedType('ipAddrBlocks', + univ.SequenceOf(componentType=ROAIPAddressFamily()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, MAX))) + ) + + +# Map of Content Type OIDs to Content Types added to the +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_routeOriginAuthz: RouteOriginAttestation(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6486.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6486.py new file mode 100644 index 0000000000000000000000000000000000000000..31c936a4f259cdf194d768bdc47249816d761c48 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6486.py @@ -0,0 +1,68 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# RPKI Manifests +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6486.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import useful +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + +MAX = float('inf') + + +id_smime = univ.ObjectIdentifier('1.2.840.113549.1.9.16') + +id_ct = id_smime + (1, ) + +id_ct_rpkiManifest = id_ct + (26, ) + + +class FileAndHash(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('file', char.IA5String()), + namedtype.NamedType('hash', univ.BitString()) + ) + + +class Manifest(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', + univ.Integer().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)), + namedtype.NamedType('manifestNumber', + univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(0, MAX))), + namedtype.NamedType('thisUpdate', + useful.GeneralizedTime()), + namedtype.NamedType('nextUpdate', + useful.GeneralizedTime()), + namedtype.NamedType('fileHashAlg', + univ.ObjectIdentifier()), + namedtype.NamedType('fileList', + univ.SequenceOf(componentType=FileAndHash()).subtype( + subtypeSpec=constraint.ValueSizeConstraint(0, MAX))) + ) + + +# Map of Content Type OIDs to Content Types added to the +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_rpkiManifest: Manifest(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6487.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6487.py new file mode 100644 index 0000000000000000000000000000000000000000..d8c2f87423f98a0157f7bbd26152a39ede1d5acb --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6487.py @@ -0,0 +1,22 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Profile for X.509 PKIX Resource Certificates +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6487.txt +# + +from pyasn1.type import univ + +id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7') + +id_ad = id_pkix + (48, ) + +id_ad_rpkiManifest = id_ad + (10, ) +id_ad_signedObject = id_ad + (11, ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6664.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6664.py new file mode 100644 index 0000000000000000000000000000000000000000..41629d8d7f85ef45a8b28dee523055807eb858dc --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6664.py @@ -0,0 +1,147 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with some assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# S/MIME Capabilities for Public Key Definitions +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6664.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5751 +from pyasn1_modules import rfc5480 +from pyasn1_modules import rfc4055 +from pyasn1_modules import rfc3279 + +MAX = float('inf') + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + + +# Imports from RFC 3279 + +dhpublicnumber = rfc3279.dhpublicnumber + +Dss_Parms = rfc3279.Dss_Parms + +id_dsa = rfc3279.id_dsa + +id_ecPublicKey = rfc3279.id_ecPublicKey + +rsaEncryption = rfc3279.rsaEncryption + + +# Imports from RFC 4055 + +id_mgf1 = rfc4055.id_mgf1 + +id_RSAES_OAEP = rfc4055.id_RSAES_OAEP + +id_RSASSA_PSS = rfc4055.id_RSASSA_PSS + + +# Imports from RFC 5480 + +ECParameters = rfc5480.ECParameters + +id_ecDH = rfc5480.id_ecDH + +id_ecMQV = rfc5480.id_ecMQV + + +# RSA + +class RSAKeySize(univ.Integer): + # suggested values are 1024, 2048, 3072, 4096, 7680, 8192, and 15360; + # however, the integer value is not limited to these suggestions + pass + + +class RSAKeyCapabilities(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('minKeySize', RSAKeySize()), + namedtype.OptionalNamedType('maxKeySize', RSAKeySize()) + ) + + +class RsaSsa_Pss_sig_caps(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('hashAlg', AlgorithmIdentifier()), + namedtype.OptionalNamedType('maskAlg', AlgorithmIdentifier()), + namedtype.DefaultedNamedType('trailerField', univ.Integer().subtype(value=1)) + ) + + +# Diffie-Hellman and DSA + +class DSAKeySize(univ.Integer): + subtypeSpec = constraint.SingleValueConstraint(1024, 2048, 3072, 7680, 15360) + + +class DSAKeyCapabilities(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('keySizes', univ.Sequence(componentType=namedtype.NamedTypes( + namedtype.NamedType('minKeySize', + DSAKeySize()), + namedtype.OptionalNamedType('maxKeySize', + DSAKeySize()), + namedtype.OptionalNamedType('maxSizeP', + univ.Integer().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('maxSizeQ', + univ.Integer().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.OptionalNamedType('maxSizeG', + univ.Integer().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 3))) + )).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('keyParams', + Dss_Parms().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 1))) + ) + + +# Elliptic Curve + +class EC_SMimeCaps(univ.SequenceOf): + componentType = ECParameters() + subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +# Update the SMIMECapabilities Attribute Map in rfc5751.py +# +# The map can either include an entry for scap-sa-rsaSSA-PSS or +# scap-pk-rsaSSA-PSS, but not both. One is associated with the +# public key and the other is associated with the signature +# algorithm; however, they use the same OID. If you need the +# other one in your application, copy the map into a local dict, +# adjust as needed, and pass the local dict to the decoder with +# openTypes=your_local_map. + +_smimeCapabilityMapUpdate = { + rsaEncryption: RSAKeyCapabilities(), + id_RSASSA_PSS: RSAKeyCapabilities(), + # id_RSASSA_PSS: RsaSsa_Pss_sig_caps(), + id_RSAES_OAEP: RSAKeyCapabilities(), + id_dsa: DSAKeyCapabilities(), + dhpublicnumber: DSAKeyCapabilities(), + id_ecPublicKey: EC_SMimeCaps(), + id_ecDH: EC_SMimeCaps(), + id_ecMQV: EC_SMimeCaps(), + id_mgf1: AlgorithmIdentifier(), +} + +rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6955.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6955.py new file mode 100644 index 0000000000000000000000000000000000000000..09f2d6562ee6343bc8f8cc3056270ecab6169b2c --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6955.py @@ -0,0 +1,108 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Diffie-Hellman Proof-of-Possession Algorithms +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6955.txt +# + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc3279 +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 + + +# Imports from RFC 5652 + +MessageDigest = rfc5652.MessageDigest + +IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber + + +# Imports from RFC 5280 + +id_pkix = rfc5280.id_pkix + + +# Imports from RFC 3279 + +Dss_Sig_Value = rfc3279.Dss_Sig_Value + +DomainParameters = rfc3279.DomainParameters + + +# Static DH Proof-of-Possession + +class DhSigStatic(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('issuerAndSerial', IssuerAndSerialNumber()), + namedtype.NamedType('hashValue', MessageDigest()) + ) + + +# Object Identifiers + +id_dh_sig_hmac_sha1 = id_pkix + (6, 3, ) + +id_dhPop_static_sha1_hmac_sha1 = univ.ObjectIdentifier(id_dh_sig_hmac_sha1) + + +id_alg_dh_pop = id_pkix + (6, 4, ) + +id_alg_dhPop_sha1 = univ.ObjectIdentifier(id_alg_dh_pop) + +id_alg_dhPop_sha224 = id_pkix + (6, 5, ) + +id_alg_dhPop_sha256 = id_pkix + (6, 6, ) + +id_alg_dhPop_sha384 = id_pkix + (6, 7, ) + +id_alg_dhPop_sha512 = id_pkix + (6, 8, ) + + +id_alg_dhPop_static_sha224_hmac_sha224 = id_pkix + (6, 15, ) + +id_alg_dhPop_static_sha256_hmac_sha256 = id_pkix + (6, 16, ) + +id_alg_dhPop_static_sha384_hmac_sha384 = id_pkix + (6, 17, ) + +id_alg_dhPop_static_sha512_hmac_sha512 = id_pkix + (6, 18, ) + + +id_alg_ecdhPop_static_sha224_hmac_sha224 = id_pkix + (6, 25, ) + +id_alg_ecdhPop_static_sha256_hmac_sha256 = id_pkix + (6, 26, ) + +id_alg_ecdhPop_static_sha384_hmac_sha384 = id_pkix + (6, 27, ) + +id_alg_ecdhPop_static_sha512_hmac_sha512 = id_pkix + (6, 28, ) + + +# Update the Algorithm Identifier map in rfc5280.py + +_algorithmIdentifierMapUpdate = { + id_alg_dh_pop: DomainParameters(), + id_alg_dhPop_sha224: DomainParameters(), + id_alg_dhPop_sha256: DomainParameters(), + id_alg_dhPop_sha384: DomainParameters(), + id_alg_dhPop_sha512: DomainParameters(), + id_dh_sig_hmac_sha1: univ.Null(""), + id_alg_dhPop_static_sha224_hmac_sha224: univ.Null(""), + id_alg_dhPop_static_sha256_hmac_sha256: univ.Null(""), + id_alg_dhPop_static_sha384_hmac_sha384: univ.Null(""), + id_alg_dhPop_static_sha512_hmac_sha512: univ.Null(""), + id_alg_ecdhPop_static_sha224_hmac_sha224: univ.Null(""), + id_alg_ecdhPop_static_sha256_hmac_sha256: univ.Null(""), + id_alg_ecdhPop_static_sha384_hmac_sha384: univ.Null(""), + id_alg_ecdhPop_static_sha512_hmac_sha512: univ.Null(""), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6960.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6960.py new file mode 100644 index 0000000000000000000000000000000000000000..e5f13056490151af1baa46e90870a6f9485df609 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc6960.py @@ -0,0 +1,223 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Online Certificate Status Protocol (OCSP) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc6960.txt +# + +from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful + +from pyasn1_modules import rfc2560 +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +# Imports from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier +AuthorityInfoAccessSyntax = rfc5280.AuthorityInfoAccessSyntax +Certificate = rfc5280.Certificate +CertificateSerialNumber = rfc5280.CertificateSerialNumber +CRLReason = rfc5280.CRLReason +Extensions = rfc5280.Extensions +GeneralName = rfc5280.GeneralName +Name = rfc5280.Name + +id_kp = rfc5280.id_kp + +id_ad_ocsp = rfc5280.id_ad_ocsp + + +# Imports from the original OCSP module in RFC 2560 + +AcceptableResponses = rfc2560.AcceptableResponses +ArchiveCutoff = rfc2560.ArchiveCutoff +CertStatus = rfc2560.CertStatus +KeyHash = rfc2560.KeyHash +OCSPResponse = rfc2560.OCSPResponse +OCSPResponseStatus = rfc2560.OCSPResponseStatus +ResponseBytes = rfc2560.ResponseBytes +RevokedInfo = rfc2560.RevokedInfo +UnknownInfo = rfc2560.UnknownInfo +Version = rfc2560.Version + +id_kp_OCSPSigning = rfc2560.id_kp_OCSPSigning + +id_pkix_ocsp = rfc2560.id_pkix_ocsp +id_pkix_ocsp_archive_cutoff = rfc2560.id_pkix_ocsp_archive_cutoff +id_pkix_ocsp_basic = rfc2560.id_pkix_ocsp_basic +id_pkix_ocsp_crl = rfc2560.id_pkix_ocsp_crl +id_pkix_ocsp_nocheck = rfc2560.id_pkix_ocsp_nocheck +id_pkix_ocsp_nonce = rfc2560.id_pkix_ocsp_nonce +id_pkix_ocsp_response = rfc2560.id_pkix_ocsp_response +id_pkix_ocsp_service_locator = rfc2560.id_pkix_ocsp_service_locator + + +# Additional object identifiers + +id_pkix_ocsp_pref_sig_algs = id_pkix_ocsp + (8, ) +id_pkix_ocsp_extended_revoke = id_pkix_ocsp + (9, ) + + +# Updated structures (mostly to improve openTypes support) + +class CertID(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('issuerNameHash', univ.OctetString()), + namedtype.NamedType('issuerKeyHash', univ.OctetString()), + namedtype.NamedType('serialNumber', CertificateSerialNumber()) + ) + + +class SingleResponse(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('certID', CertID()), + namedtype.NamedType('certStatus', CertStatus()), + namedtype.NamedType('thisUpdate', useful.GeneralizedTime()), + namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('singleExtensions', Extensions().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class ResponderID(univ.Choice): + componentType = namedtype.NamedTypes( + namedtype.NamedType('byName', Name().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('byKey', KeyHash().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class ResponseData(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', Version('v1').subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('responderID', ResponderID()), + namedtype.NamedType('producedAt', useful.GeneralizedTime()), + namedtype.NamedType('responses', univ.SequenceOf( + componentType=SingleResponse())), + namedtype.OptionalNamedType('responseExtensions', Extensions().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) + ) + + +class BasicOCSPResponse(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsResponseData', ResponseData()), + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()), + namedtype.OptionalNamedType('certs', univ.SequenceOf( + componentType=Certificate()).subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +class Request(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('reqCert', CertID()), + namedtype.OptionalNamedType('singleRequestExtensions', Extensions().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +class Signature(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()), + namedtype.NamedType('signature', univ.BitString()), + namedtype.OptionalNamedType('certs', univ.SequenceOf( + componentType=Certificate()).subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +class TBSRequest(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', Version('v1').subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('requestorName', GeneralName().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('requestList', univ.SequenceOf( + componentType=Request())), + namedtype.OptionalNamedType('requestExtensions', Extensions().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class OCSPRequest(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('tbsRequest', TBSRequest()), + namedtype.OptionalNamedType('optionalSignature', Signature().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))) + ) + + +# Previously omitted structure + +class ServiceLocator(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('issuer', Name()), + namedtype.NamedType('locator', AuthorityInfoAccessSyntax()) + ) + + +# Additional structures + +class CrlID(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('crlUrl', char.IA5String().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('crlNum', univ.Integer().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('crlTime', useful.GeneralizedTime().subtype( + explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) + ) + + +class PreferredSignatureAlgorithm(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('sigIdentifier', AlgorithmIdentifier()), + namedtype.OptionalNamedType('certIdentifier', AlgorithmIdentifier()) + ) + + +class PreferredSignatureAlgorithms(univ.SequenceOf): + componentType = PreferredSignatureAlgorithm() + + + +# Response Type OID to Response Map + +ocspResponseMap = { + id_pkix_ocsp_basic: BasicOCSPResponse(), +} + + +# Map of Extension OIDs to Extensions added to the ones +# that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + # Certificate Extension + id_pkix_ocsp_nocheck: univ.Null(""), + # OCSP Request Extensions + id_pkix_ocsp_nonce: univ.OctetString(), + id_pkix_ocsp_response: AcceptableResponses(), + id_pkix_ocsp_service_locator: ServiceLocator(), + id_pkix_ocsp_pref_sig_algs: PreferredSignatureAlgorithms(), + # OCSP Response Extensions + id_pkix_ocsp_crl: CrlID(), + id_pkix_ocsp_archive_cutoff: ArchiveCutoff(), + id_pkix_ocsp_extended_revoke: univ.Null(""), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7030.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7030.py new file mode 100644 index 0000000000000000000000000000000000000000..84b6dc5f9a35e63a4bc31d7e0715217c62a469c7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7030.py @@ -0,0 +1,66 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Enrollment over Secure Transport (EST) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7030.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + +MAX = float('inf') + + +# Imports from RFC 5652 + +Attribute = rfc5652.Attribute + + +# Asymmetric Decrypt Key Identifier Attribute + +id_aa_asymmDecryptKeyID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.54') + +class AsymmetricDecryptKeyIdentifier(univ.OctetString): + pass + + +aa_asymmDecryptKeyID = Attribute() +aa_asymmDecryptKeyID['attrType'] = id_aa_asymmDecryptKeyID +aa_asymmDecryptKeyID['attrValues'][0] = AsymmetricDecryptKeyIdentifier() + + +# CSR Attributes + +class AttrOrOID(univ.Choice): + pass + +AttrOrOID.componentType = namedtype.NamedTypes( + namedtype.NamedType('oid', univ.ObjectIdentifier()), + namedtype.NamedType('attribute', Attribute()) +) + + +class CsrAttrs(univ.SequenceOf): + pass + +CsrAttrs.componentType = AttrOrOID() +CsrAttrs.subtypeSpec=constraint.ValueSizeConstraint(0, MAX) + + +# Update CMS Attribute Map + +_cmsAttributesMapUpdate = { + id_aa_asymmDecryptKeyID: AsymmetricDecryptKeyIdentifier(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7191.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7191.py new file mode 100644 index 0000000000000000000000000000000000000000..7c2be1156278fb36441bf0e94a260e056fede4a4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7191.py @@ -0,0 +1,261 @@ +# This file is being contributed to of pyasn1-modules software. +# +# Created by Russ Housley without assistance from the asn1ate tool. +# Modified by Russ Housley to add support for opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# CMS Key Package Receipt and Error Content Types +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7191.txt + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 + +MAX = float('inf') + +DistinguishedName = rfc5280.DistinguishedName + + +# SingleAttribute is the same as Attribute in RFC 5652, except that the +# attrValues SET must have one and only one member + +class AttributeValue(univ.Any): + pass + + +class AttributeValues(univ.SetOf): + pass + +AttributeValues.componentType = AttributeValue() +AttributeValues.sizeSpec = univ.Set.sizeSpec + constraint.ValueSizeConstraint(1, 1) + + +class SingleAttribute(univ.Sequence): + pass + +SingleAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('attrType', univ.ObjectIdentifier()), + namedtype.NamedType('attrValues', AttributeValues(), + openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap) + ) +) + + +# SIR Entity Name + +class SIREntityNameType(univ.ObjectIdentifier): + pass + + +class SIREntityNameValue(univ.Any): + pass + + +class SIREntityName(univ.Sequence): + pass + +SIREntityName.componentType = namedtype.NamedTypes( + namedtype.NamedType('sirenType', SIREntityNameType()), + namedtype.NamedType('sirenValue', univ.OctetString()) + # CONTAINING the DER-encoded SIREntityNameValue +) + + +class SIREntityNames(univ.SequenceOf): + pass + +SIREntityNames.componentType = SIREntityName() +SIREntityNames.sizeSpec=constraint.ValueSizeConstraint(1, MAX) + + +id_dn = univ.ObjectIdentifier('2.16.840.1.101.2.1.16.0') + + +class siren_dn(SIREntityName): + def __init__(self): + SIREntityName.__init__(self) + self['sirenType'] = id_dn + + +# Key Package Error CMS Content Type + +class EnumeratedErrorCode(univ.Enumerated): + pass + +# Error codes with values <= 33 are aligned with RFC 5934 +EnumeratedErrorCode.namedValues = namedval.NamedValues( + ('decodeFailure', 1), + ('badContentInfo', 2), + ('badSignedData', 3), + ('badEncapContent', 4), + ('badCertificate', 5), + ('badSignerInfo', 6), + ('badSignedAttrs', 7), + ('badUnsignedAttrs', 8), + ('missingContent', 9), + ('noTrustAnchor', 10), + ('notAuthorized', 11), + ('badDigestAlgorithm', 12), + ('badSignatureAlgorithm', 13), + ('unsupportedKeySize', 14), + ('unsupportedParameters', 15), + ('signatureFailure', 16), + ('insufficientMemory', 17), + ('incorrectTarget', 23), + ('missingSignature', 29), + ('resourcesBusy', 30), + ('versionNumberMismatch', 31), + ('revokedCertificate', 33), + ('ambiguousDecrypt', 60), + ('noDecryptKey', 61), + ('badEncryptedData', 62), + ('badEnvelopedData', 63), + ('badAuthenticatedData', 64), + ('badAuthEnvelopedData', 65), + ('badKeyAgreeRecipientInfo', 66), + ('badKEKRecipientInfo', 67), + ('badEncryptContent', 68), + ('badEncryptAlgorithm', 69), + ('missingCiphertext', 70), + ('decryptFailure', 71), + ('badMACAlgorithm', 72), + ('badAuthAttrs', 73), + ('badUnauthAttrs', 74), + ('invalidMAC', 75), + ('mismatchedDigestAlg', 76), + ('missingCertificate', 77), + ('tooManySigners', 78), + ('missingSignedAttributes', 79), + ('derEncodingNotUsed', 80), + ('missingContentHints', 81), + ('invalidAttributeLocation', 82), + ('badMessageDigest', 83), + ('badKeyPackage', 84), + ('badAttributes', 85), + ('attributeComparisonFailure', 86), + ('unsupportedSymmetricKeyPackage', 87), + ('unsupportedAsymmetricKeyPackage', 88), + ('constraintViolation', 89), + ('ambiguousDefaultValue', 90), + ('noMatchingRecipientInfo', 91), + ('unsupportedKeyWrapAlgorithm', 92), + ('badKeyTransRecipientInfo', 93), + ('other', 127) +) + + +class ErrorCodeChoice(univ.Choice): + pass + +ErrorCodeChoice.componentType = namedtype.NamedTypes( + namedtype.NamedType('enum', EnumeratedErrorCode()), + namedtype.NamedType('oid', univ.ObjectIdentifier()) +) + + +class KeyPkgID(univ.OctetString): + pass + + +class KeyPkgIdentifier(univ.Choice): + pass + +KeyPkgIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('pkgID', KeyPkgID()), + namedtype.NamedType('attribute', SingleAttribute()) +) + + +class KeyPkgVersion(univ.Integer): + pass + + +KeyPkgVersion.namedValues = namedval.NamedValues( + ('v1', 1), + ('v2', 2) +) + +KeyPkgVersion.subtypeSpec = constraint.ValueRangeConstraint(1, 65535) + + +id_ct_KP_keyPackageError = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.6') + +class KeyPackageError(univ.Sequence): + pass + +KeyPackageError.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v2')), + namedtype.OptionalNamedType('errorOf', KeyPkgIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.NamedType('errorBy', SIREntityName()), + namedtype.NamedType('errorCode', ErrorCodeChoice()) +) + + +# Key Package Receipt CMS Content Type + +id_ct_KP_keyPackageReceipt = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.3') + +class KeyPackageReceipt(univ.Sequence): + pass + +KeyPackageReceipt.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v2')), + namedtype.NamedType('receiptOf', KeyPkgIdentifier()), + namedtype.NamedType('receivedBy', SIREntityName()) +) + + +# Key Package Receipt Request Attribute + +class KeyPkgReceiptReq(univ.Sequence): + pass + +KeyPkgReceiptReq.componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('encryptReceipt', univ.Boolean().subtype(value=0)), + namedtype.OptionalNamedType('receiptsFrom', SIREntityNames().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('receiptsTo', SIREntityNames()) +) + + +id_aa_KP_keyPkgIdAndReceiptReq = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.65') + +class KeyPkgIdentifierAndReceiptReq(univ.Sequence): + pass + +KeyPkgIdentifierAndReceiptReq.componentType = namedtype.NamedTypes( + namedtype.NamedType('pkgID', KeyPkgID()), + namedtype.OptionalNamedType('receiptReq', KeyPkgReceiptReq()) +) + + +# Map of Attribute Type OIDs to Attributes are added to +# the ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_KP_keyPkgIdAndReceiptReq: KeyPkgIdentifierAndReceiptReq(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) + + +# Map of CMC Content Type OIDs to CMC Content Types are added to +# the ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_KP_keyPackageError: KeyPackageError(), + id_ct_KP_keyPackageReceipt: KeyPackageReceipt(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7229.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7229.py new file mode 100644 index 0000000000000000000000000000000000000000..e9bce2d5b61e14fe6428b438fbf8b7cce06cf147 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7229.py @@ -0,0 +1,29 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Object Identifiers for Test Certificate Policies +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7229.txt +# + +from pyasn1.type import univ + + +id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7') + +id_TEST = id_pkix + (13, ) + +id_TEST_certPolicyOne = id_TEST + (1, ) +id_TEST_certPolicyTwo = id_TEST + (2, ) +id_TEST_certPolicyThree = id_TEST + (3, ) +id_TEST_certPolicyFour = id_TEST + (4, ) +id_TEST_certPolicyFive = id_TEST + (5, ) +id_TEST_certPolicySix = id_TEST + (6, ) +id_TEST_certPolicySeven = id_TEST + (7, ) +id_TEST_certPolicyEight = id_TEST + (8, ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7292.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7292.py new file mode 100644 index 0000000000000000000000000000000000000000..1c9f319a5ddbd77fcc524aba8dfc51607d988247 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7292.py @@ -0,0 +1,357 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley with assistance from the asn1ate tool. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# PKCS #12: Personal Information Exchange Syntax v1.1 +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7292.txt +# https://www.rfc-editor.org/errata_search.php?rfc=7292 + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import opentype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc2315 +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5958 + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +# Initialize the maps used in PKCS#12 + +pkcs12BagTypeMap = { } + +pkcs12CertBagMap = { } + +pkcs12CRLBagMap = { } + +pkcs12SecretBagMap = { } + + +# Imports from RFC 2315, RFC 5652, and RFC 5958 + +DigestInfo = rfc2315.DigestInfo + + +ContentInfo = rfc5652.ContentInfo + +PKCS12Attribute = rfc5652.Attribute + + +EncryptedPrivateKeyInfo = rfc5958.EncryptedPrivateKeyInfo + +PrivateKeyInfo = rfc5958.PrivateKeyInfo + + +# CMSSingleAttribute is the same as Attribute in RFC 5652 except the attrValues +# SET must have one and only one member + +class AttributeType(univ.ObjectIdentifier): + pass + + +class AttributeValue(univ.Any): + pass + + +class AttributeValues(univ.SetOf): + pass + +AttributeValues.componentType = AttributeValue() + + +class CMSSingleAttribute(univ.Sequence): + pass + +CMSSingleAttribute.componentType = namedtype.NamedTypes( + namedtype.NamedType('attrType', AttributeType()), + namedtype.NamedType('attrValues', + AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)), + openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap) + ) +) + + +# Object identifier arcs + +rsadsi = _OID(1, 2, 840, 113549) + +pkcs = _OID(rsadsi, 1) + +pkcs_9 = _OID(pkcs, 9) + +certTypes = _OID(pkcs_9, 22) + +crlTypes = _OID(pkcs_9, 23) + +pkcs_12 = _OID(pkcs, 12) + + +# PBE Algorithm Identifiers and Parameters Structure + +pkcs_12PbeIds = _OID(pkcs_12, 1) + +pbeWithSHAAnd128BitRC4 = _OID(pkcs_12PbeIds, 1) + +pbeWithSHAAnd40BitRC4 = _OID(pkcs_12PbeIds, 2) + +pbeWithSHAAnd3_KeyTripleDES_CBC = _OID(pkcs_12PbeIds, 3) + +pbeWithSHAAnd2_KeyTripleDES_CBC = _OID(pkcs_12PbeIds, 4) + +pbeWithSHAAnd128BitRC2_CBC = _OID(pkcs_12PbeIds, 5) + +pbeWithSHAAnd40BitRC2_CBC = _OID(pkcs_12PbeIds, 6) + + +class Pkcs_12PbeParams(univ.Sequence): + pass + +Pkcs_12PbeParams.componentType = namedtype.NamedTypes( + namedtype.NamedType('salt', univ.OctetString()), + namedtype.NamedType('iterations', univ.Integer()) +) + + +# Bag types + +bagtypes = _OID(pkcs_12, 10, 1) + +class BAG_TYPE(univ.Sequence): + pass + +BAG_TYPE.componentType = namedtype.NamedTypes( + namedtype.NamedType('id', univ.ObjectIdentifier()), + namedtype.NamedType('unnamed1', univ.Any(), + openType=opentype.OpenType('attrType', pkcs12BagTypeMap) + ) +) + + +id_keyBag = _OID(bagtypes, 1) + +class KeyBag(PrivateKeyInfo): + pass + + +id_pkcs8ShroudedKeyBag = _OID(bagtypes, 2) + +class PKCS8ShroudedKeyBag(EncryptedPrivateKeyInfo): + pass + + +id_certBag = _OID(bagtypes, 3) + +class CertBag(univ.Sequence): + pass + +CertBag.componentType = namedtype.NamedTypes( + namedtype.NamedType('certId', univ.ObjectIdentifier()), + namedtype.NamedType('certValue', + univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)), + openType=opentype.OpenType('certId', pkcs12CertBagMap) + ) +) + + +x509Certificate = CertBag() +x509Certificate['certId'] = _OID(certTypes, 1) +x509Certificate['certValue'] = univ.OctetString() +# DER-encoded X.509 certificate stored in OCTET STRING + + +sdsiCertificate = CertBag() +sdsiCertificate['certId'] = _OID(certTypes, 2) +sdsiCertificate['certValue'] = char.IA5String() +# Base64-encoded SDSI certificate stored in IA5String + + +id_CRLBag = _OID(bagtypes, 4) + +class CRLBag(univ.Sequence): + pass + +CRLBag.componentType = namedtype.NamedTypes( + namedtype.NamedType('crlId', univ.ObjectIdentifier()), + namedtype.NamedType('crlValue', + univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)), + openType=opentype.OpenType('crlId', pkcs12CRLBagMap) + ) +) + + +x509CRL = CRLBag() +x509CRL['crlId'] = _OID(crlTypes, 1) +x509CRL['crlValue'] = univ.OctetString() +# DER-encoded X.509 CRL stored in OCTET STRING + + +id_secretBag = _OID(bagtypes, 5) + +class SecretBag(univ.Sequence): + pass + +SecretBag.componentType = namedtype.NamedTypes( + namedtype.NamedType('secretTypeId', univ.ObjectIdentifier()), + namedtype.NamedType('secretValue', + univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)), + openType=opentype.OpenType('secretTypeId', pkcs12SecretBagMap) + ) +) + + +id_safeContentsBag = _OID(bagtypes, 6) + +class SafeBag(univ.Sequence): + pass + +SafeBag.componentType = namedtype.NamedTypes( + namedtype.NamedType('bagId', univ.ObjectIdentifier()), + namedtype.NamedType('bagValue', + univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)), + openType=opentype.OpenType('bagId', pkcs12BagTypeMap) + ), + namedtype.OptionalNamedType('bagAttributes', + univ.SetOf(componentType=PKCS12Attribute()) + ) +) + + +class SafeContents(univ.SequenceOf): + pass + +SafeContents.componentType = SafeBag() + + +# The PFX PDU + +class AuthenticatedSafe(univ.SequenceOf): + pass + +AuthenticatedSafe.componentType = ContentInfo() +# Data if unencrypted +# EncryptedData if password-encrypted +# EnvelopedData if public key-encrypted + + +class MacData(univ.Sequence): + pass + +MacData.componentType = namedtype.NamedTypes( + namedtype.NamedType('mac', DigestInfo()), + namedtype.NamedType('macSalt', univ.OctetString()), + namedtype.DefaultedNamedType('iterations', univ.Integer().subtype(value=1)) + # Note: The default is for historical reasons and its use is deprecated +) + + +class PFX(univ.Sequence): + pass + +PFX.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', + univ.Integer(namedValues=namedval.NamedValues(('v3', 3))) + ), + namedtype.NamedType('authSafe', ContentInfo()), + namedtype.OptionalNamedType('macData', MacData()) +) + + +# Local key identifier (also defined as certificateAttribute in rfc2985.py) + +pkcs_9_at_localKeyId = _OID(pkcs_9, 21) + +localKeyId = CMSSingleAttribute() +localKeyId['attrType'] = pkcs_9_at_localKeyId +localKeyId['attrValues'][0] = univ.OctetString() + + +# Friendly name (also defined as certificateAttribute in rfc2985.py) + +pkcs_9_ub_pkcs9String = univ.Integer(255) + +pkcs_9_ub_friendlyName = univ.Integer(pkcs_9_ub_pkcs9String) + +pkcs_9_at_friendlyName = _OID(pkcs_9, 20) + +class FriendlyName(char.BMPString): + pass + +FriendlyName.subtypeSpec = constraint.ValueSizeConstraint(1, pkcs_9_ub_friendlyName) + + +friendlyName = CMSSingleAttribute() +friendlyName['attrType'] = pkcs_9_at_friendlyName +friendlyName['attrValues'][0] = FriendlyName() + + +# Update the PKCS#12 maps + +_pkcs12BagTypeMap = { + id_keyBag: KeyBag(), + id_pkcs8ShroudedKeyBag: PKCS8ShroudedKeyBag(), + id_certBag: CertBag(), + id_CRLBag: CRLBag(), + id_secretBag: SecretBag(), + id_safeContentsBag: SafeBag(), +} + +pkcs12BagTypeMap.update(_pkcs12BagTypeMap) + + +_pkcs12CertBagMap = { + _OID(certTypes, 1): univ.OctetString(), + _OID(certTypes, 2): char.IA5String(), +} + +pkcs12CertBagMap.update(_pkcs12CertBagMap) + + +_pkcs12CRLBagMap = { + _OID(crlTypes, 1): univ.OctetString(), +} + +pkcs12CRLBagMap.update(_pkcs12CRLBagMap) + + +# Update the Algorithm Identifier map + +_algorithmIdentifierMapUpdate = { + pbeWithSHAAnd128BitRC4: Pkcs_12PbeParams(), + pbeWithSHAAnd40BitRC4: Pkcs_12PbeParams(), + pbeWithSHAAnd3_KeyTripleDES_CBC: Pkcs_12PbeParams(), + pbeWithSHAAnd2_KeyTripleDES_CBC: Pkcs_12PbeParams(), + pbeWithSHAAnd128BitRC2_CBC: Pkcs_12PbeParams(), + pbeWithSHAAnd40BitRC2_CBC: Pkcs_12PbeParams(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) + + +# Update the CMS Attribute map + +_cmsAttributesMapUpdate = { + pkcs_9_at_friendlyName: FriendlyName(), + pkcs_9_at_localKeyId: univ.OctetString(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7296.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7296.py new file mode 100644 index 0000000000000000000000000000000000000000..95a191a14ded9bf30fddb5b2d2654eb9382e8bae --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7296.py @@ -0,0 +1,32 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# IKEv2 Certificate Bundle +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7296.txt + +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +class CertificateOrCRL(univ.Choice): + pass + +CertificateOrCRL.componentType = namedtype.NamedTypes( + namedtype.NamedType('cert', rfc5280.Certificate().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('crl', rfc5280.CertificateList().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class CertificateBundle(univ.SequenceOf): + pass + +CertificateBundle.componentType = CertificateOrCRL() diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7508.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7508.py new file mode 100644 index 0000000000000000000000000000000000000000..66460240f149edf865f46759e862729655a872a0 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7508.py @@ -0,0 +1,90 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Securing Header Fields with S/MIME +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7508.txt +# https://www.rfc-editor.org/errata/eid5875 +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + +import string + +MAX = float('inf') + + +class Algorithm(univ.Enumerated): + namedValues = namedval.NamedValues( + ('canonAlgorithmSimple', 0), + ('canonAlgorithmRelaxed', 1) + ) + + +class HeaderFieldStatus(univ.Integer): + namedValues = namedval.NamedValues( + ('duplicated', 0), + ('deleted', 1), + ('modified', 2) + ) + + +class HeaderFieldName(char.VisibleString): + subtypeSpec = ( + constraint.PermittedAlphabetConstraint(*string.printable) - + constraint.PermittedAlphabetConstraint(':') + ) + + +class HeaderFieldValue(char.UTF8String): + pass + + +class HeaderField(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('field-Name', HeaderFieldName()), + namedtype.NamedType('field-Value', HeaderFieldValue()), + namedtype.DefaultedNamedType('field-Status', + HeaderFieldStatus().subtype(value='duplicated')) + ) + + +class HeaderFields(univ.SequenceOf): + componentType = HeaderField() + subtypeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class SecureHeaderFields(univ.Set): + componentType = namedtype.NamedTypes( + namedtype.NamedType('canonAlgorithm', Algorithm()), + namedtype.NamedType('secHeaderFields', HeaderFields()) + ) + + +id_aa = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, 2, )) + +id_aa_secureHeaderFieldsIdentifier = id_aa + (55, ) + + + +# Map of Attribute Type OIDs to Attributes added to the +# ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_secureHeaderFieldsIdentifier: SecureHeaderFields(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) + diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7585.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7585.py new file mode 100644 index 0000000000000000000000000000000000000000..b3fd4a5bacab6a00a68b78620ff832b0852b5c43 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7585.py @@ -0,0 +1,50 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with some assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Network Access Identifier (NAI) Realm Name for Certificates +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7585.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +# NAI Realm Name for Certificates + +id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7') + +id_on = id_pkix + (8, ) + +id_on_naiRealm = id_on + (8, ) + + +ub_naiRealm_length = univ.Integer(255) + + +class NAIRealm(char.UTF8String): + subtypeSpec = constraint.ValueSizeConstraint(1, ub_naiRealm_length) + + +naiRealm = rfc5280.AnotherName() +naiRealm['type-id'] = id_on_naiRealm +naiRealm['value'] = NAIRealm() + + +# Map of Other Name OIDs to Other Name is added to the +# ones that are in rfc5280.py + +_anotherNameMapUpdate = { + id_on_naiRealm: NAIRealm(), +} + +rfc5280.anotherNameMap.update(_anotherNameMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7633.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7633.py new file mode 100644 index 0000000000000000000000000000000000000000..f518440ff4746dd7ad353d1dca0262c70cc84f6d --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7633.py @@ -0,0 +1,38 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with some assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Transport Layer Security (TLS) Feature Certificate Extension +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7633.txt +# + +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +# TLS Features Extension + +id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1') + +id_pe_tlsfeature = id_pe + (24, ) + + +class Features(univ.SequenceOf): + componentType = univ.Integer() + + +# Map of Certificate Extension OIDs to Extensions added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_tlsfeature: Features(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7773.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7773.py new file mode 100644 index 0000000000000000000000000000000000000000..0fee2aa346c1da71e57c0143a0bf7515f3ee2bac --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7773.py @@ -0,0 +1,52 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with some assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Authentication Context Certificate Extension +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7773.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +# Authentication Context Extension + +e_legnamnden = univ.ObjectIdentifier('1.2.752.201') + +id_eleg_ce = e_legnamnden + (5, ) + +id_ce_authContext = id_eleg_ce + (1, ) + + +class AuthenticationContext(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('contextType', char.UTF8String()), + namedtype.OptionalNamedType('contextInfo', char.UTF8String()) + ) + +class AuthenticationContexts(univ.SequenceOf): + componentType = AuthenticationContext() + subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +# Map of Certificate Extension OIDs to Extensions added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_ce_authContext: AuthenticationContexts(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7894.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7894.py new file mode 100644 index 0000000000000000000000000000000000000000..41936433d14bf6e0ce074649931e826758251ade --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7894.py @@ -0,0 +1,92 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Alternative Challenge Password Attributes for EST +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7894.txt +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc6402 +from pyasn1_modules import rfc7191 + + +# SingleAttribute is the same as Attribute in RFC 5652, except that the +# attrValues SET must have one and only one member + +Attribute = rfc7191.SingleAttribute + + +# DirectoryString is the same as RFC 5280, except the length is limited to 255 + +class DirectoryString(univ.Choice): + pass + +DirectoryString.componentType = namedtype.NamedTypes( + namedtype.NamedType('teletexString', char.TeletexString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))), + namedtype.NamedType('printableString', char.PrintableString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))), + namedtype.NamedType('universalString', char.UniversalString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))), + namedtype.NamedType('utf8String', char.UTF8String().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))), + namedtype.NamedType('bmpString', char.BMPString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(1, 255))) +) + + +# OTP Challenge Attribute + +id_aa_otpChallenge = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.56') + +ub_aa_otpChallenge = univ.Integer(255) + +otpChallenge = Attribute() +otpChallenge['attrType'] = id_aa_otpChallenge +otpChallenge['attrValues'][0] = DirectoryString() + + +# Revocation Challenge Attribute + +id_aa_revocationChallenge = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.57') + +ub_aa_revocationChallenge = univ.Integer(255) + +revocationChallenge = Attribute() +revocationChallenge['attrType'] = id_aa_revocationChallenge +revocationChallenge['attrValues'][0] = DirectoryString() + + +# EST Identity Linking Attribute + +id_aa_estIdentityLinking = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.58') + +ub_aa_est_identity_linking = univ.Integer(255) + +estIdentityLinking = Attribute() +estIdentityLinking['attrType'] = id_aa_estIdentityLinking +estIdentityLinking['attrValues'][0] = DirectoryString() + + +# Map of Attribute Type OIDs to Attributes added to the +# ones that are in rfc6402.py + +_cmcControlAttributesMapUpdate = { + id_aa_otpChallenge: DirectoryString(), + id_aa_revocationChallenge: DirectoryString(), + id_aa_estIdentityLinking: DirectoryString(), +} + +rfc6402.cmcControlAttributesMap.update(_cmcControlAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7906.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7906.py new file mode 100644 index 0000000000000000000000000000000000000000..fa5f6b0733c6cc12f4effd830db579b705f90ed3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7906.py @@ -0,0 +1,736 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# NSA's CMS Key Management Attributes +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc7906.txt +# https://www.rfc-editor.org/errata/eid5850 +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc2634 +from pyasn1_modules import rfc4108 +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 +from pyasn1_modules import rfc6010 +from pyasn1_modules import rfc6019 +from pyasn1_modules import rfc7191 + +MAX = float('inf') + + +# Imports From RFC 2634 + +id_aa_contentHint = rfc2634.id_aa_contentHint + +ContentHints = rfc2634.ContentHints + +id_aa_securityLabel = rfc2634.id_aa_securityLabel + +SecurityPolicyIdentifier = rfc2634.SecurityPolicyIdentifier + +SecurityClassification = rfc2634.SecurityClassification + +ESSPrivacyMark = rfc2634.ESSPrivacyMark + +SecurityCategories= rfc2634.SecurityCategories + +ESSSecurityLabel = rfc2634.ESSSecurityLabel + + +# Imports From RFC 4108 + +id_aa_communityIdentifiers = rfc4108.id_aa_communityIdentifiers + +CommunityIdentifier = rfc4108.CommunityIdentifier + +CommunityIdentifiers = rfc4108.CommunityIdentifiers + + +# Imports From RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +Name = rfc5280.Name + +Certificate = rfc5280.Certificate + +GeneralNames = rfc5280.GeneralNames + +GeneralName = rfc5280.GeneralName + + +SubjectInfoAccessSyntax = rfc5280.SubjectInfoAccessSyntax + +id_pkix = rfc5280.id_pkix + +id_pe = rfc5280.id_pe + +id_pe_subjectInfoAccess = rfc5280.id_pe_subjectInfoAccess + + +# Imports From RFC 6010 + +CMSContentConstraints = rfc6010.CMSContentConstraints + + +# Imports From RFC 6019 + +BinaryTime = rfc6019.BinaryTime + +id_aa_binarySigningTime = rfc6019.id_aa_binarySigningTime + +BinarySigningTime = rfc6019.BinarySigningTime + + +# Imports From RFC 5652 + +Attribute = rfc5652.Attribute + +CertificateSet = rfc5652.CertificateSet + +CertificateChoices = rfc5652.CertificateChoices + +id_contentType = rfc5652.id_contentType + +ContentType = rfc5652.ContentType + +id_messageDigest = rfc5652.id_messageDigest + +MessageDigest = rfc5652.MessageDigest + + +# Imports From RFC 7191 + +SIREntityName = rfc7191.SIREntityName + +id_aa_KP_keyPkgIdAndReceiptReq = rfc7191.id_aa_KP_keyPkgIdAndReceiptReq + +KeyPkgIdentifierAndReceiptReq = rfc7191.KeyPkgIdentifierAndReceiptReq + + +# Key Province Attribute + +id_aa_KP_keyProvinceV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.71') + + +class KeyProvinceV2(univ.ObjectIdentifier): + pass + + +aa_keyProvince_v2 = Attribute() +aa_keyProvince_v2['attrType'] = id_aa_KP_keyProvinceV2 +aa_keyProvince_v2['attrValues'][0] = KeyProvinceV2() + + +# Manifest Attribute + +id_aa_KP_manifest = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.72') + + +class ShortTitle(char.PrintableString): + pass + + +class Manifest(univ.SequenceOf): + pass + +Manifest.componentType = ShortTitle() +Manifest.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +aa_manifest = Attribute() +aa_manifest['attrType'] = id_aa_KP_manifest +aa_manifest['attrValues'][0] = Manifest() + + +# Key Algorithm Attribute + +id_kma_keyAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.1') + + +class KeyAlgorithm(univ.Sequence): + pass + +KeyAlgorithm.componentType = namedtype.NamedTypes( + namedtype.NamedType('keyAlg', univ.ObjectIdentifier()), + namedtype.OptionalNamedType('checkWordAlg', univ.ObjectIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.OptionalNamedType('crcAlg', univ.ObjectIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) +) + + +aa_keyAlgorithm = Attribute() +aa_keyAlgorithm['attrType'] = id_kma_keyAlgorithm +aa_keyAlgorithm['attrValues'][0] = KeyAlgorithm() + + +# User Certificate Attribute + +id_at_userCertificate = univ.ObjectIdentifier('2.5.4.36') + + +aa_userCertificate = Attribute() +aa_userCertificate['attrType'] = id_at_userCertificate +aa_userCertificate['attrValues'][0] = Certificate() + + +# Key Package Receivers Attribute + +id_kma_keyPkgReceiversV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.16') + + +class KeyPkgReceiver(univ.Choice): + pass + +KeyPkgReceiver.componentType = namedtype.NamedTypes( + namedtype.NamedType('sirEntity', SIREntityName().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('community', CommunityIdentifier().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class KeyPkgReceiversV2(univ.SequenceOf): + pass + +KeyPkgReceiversV2.componentType = KeyPkgReceiver() +KeyPkgReceiversV2.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +aa_keyPackageReceivers_v2 = Attribute() +aa_keyPackageReceivers_v2['attrType'] = id_kma_keyPkgReceiversV2 +aa_keyPackageReceivers_v2['attrValues'][0] = KeyPkgReceiversV2() + + +# TSEC Nomenclature Attribute + +id_kma_TSECNomenclature = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.3') + + +class CharEdition(char.PrintableString): + pass + + +class CharEditionRange(univ.Sequence): + pass + +CharEditionRange.componentType = namedtype.NamedTypes( + namedtype.NamedType('firstCharEdition', CharEdition()), + namedtype.NamedType('lastCharEdition', CharEdition()) +) + + +class NumEdition(univ.Integer): + pass + +NumEdition.subtypeSpec = constraint.ValueRangeConstraint(0, 308915776) + + +class NumEditionRange(univ.Sequence): + pass + +NumEditionRange.componentType = namedtype.NamedTypes( + namedtype.NamedType('firstNumEdition', NumEdition()), + namedtype.NamedType('lastNumEdition', NumEdition()) +) + + +class EditionID(univ.Choice): + pass + +EditionID.componentType = namedtype.NamedTypes( + namedtype.NamedType('char', univ.Choice(componentType=namedtype.NamedTypes( + namedtype.NamedType('charEdition', CharEdition().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('charEditionRange', CharEditionRange().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) + )) + ), + namedtype.NamedType('num', univ.Choice(componentType=namedtype.NamedTypes( + namedtype.NamedType('numEdition', NumEdition().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), + namedtype.NamedType('numEditionRange', NumEditionRange().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))) + )) + ) +) + + +class Register(univ.Integer): + pass + +Register.subtypeSpec = constraint.ValueRangeConstraint(0, 2147483647) + + +class RegisterRange(univ.Sequence): + pass + +RegisterRange.componentType = namedtype.NamedTypes( + namedtype.NamedType('firstRegister', Register()), + namedtype.NamedType('lastRegister', Register()) +) + + +class RegisterID(univ.Choice): + pass + +RegisterID.componentType = namedtype.NamedTypes( + namedtype.NamedType('register', Register().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))), + namedtype.NamedType('registerRange', RegisterRange().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))) +) + + +class SegmentNumber(univ.Integer): + pass + +SegmentNumber.subtypeSpec = constraint.ValueRangeConstraint(1, 127) + + +class SegmentRange(univ.Sequence): + pass + +SegmentRange.componentType = namedtype.NamedTypes( + namedtype.NamedType('firstSegment', SegmentNumber()), + namedtype.NamedType('lastSegment', SegmentNumber()) +) + + +class SegmentID(univ.Choice): + pass + +SegmentID.componentType = namedtype.NamedTypes( + namedtype.NamedType('segmentNumber', SegmentNumber().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))), + namedtype.NamedType('segmentRange', SegmentRange().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))) +) + + +class TSECNomenclature(univ.Sequence): + pass + +TSECNomenclature.componentType = namedtype.NamedTypes( + namedtype.NamedType('shortTitle', ShortTitle()), + namedtype.OptionalNamedType('editionID', EditionID()), + namedtype.OptionalNamedType('registerID', RegisterID()), + namedtype.OptionalNamedType('segmentID', SegmentID()) +) + + +aa_tsecNomenclature = Attribute() +aa_tsecNomenclature['attrType'] = id_kma_TSECNomenclature +aa_tsecNomenclature['attrValues'][0] = TSECNomenclature() + + +# Key Purpose Attribute + +id_kma_keyPurpose = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.13') + + +class KeyPurpose(univ.Enumerated): + pass + +KeyPurpose.namedValues = namedval.NamedValues( + ('n-a', 0), + ('a', 65), + ('b', 66), + ('l', 76), + ('m', 77), + ('r', 82), + ('s', 83), + ('t', 84), + ('v', 86), + ('x', 88), + ('z', 90) +) + + +aa_keyPurpose = Attribute() +aa_keyPurpose['attrType'] = id_kma_keyPurpose +aa_keyPurpose['attrValues'][0] = KeyPurpose() + + +# Key Use Attribute + +id_kma_keyUse = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.14') + + +class KeyUse(univ.Enumerated): + pass + +KeyUse.namedValues = namedval.NamedValues( + ('n-a', 0), + ('ffk', 1), + ('kek', 2), + ('kpk', 3), + ('msk', 4), + ('qkek', 5), + ('tek', 6), + ('tsk', 7), + ('trkek', 8), + ('nfk', 9), + ('effk', 10), + ('ebfk', 11), + ('aek', 12), + ('wod', 13), + ('kesk', 246), + ('eik', 247), + ('ask', 248), + ('kmk', 249), + ('rsk', 250), + ('csk', 251), + ('sak', 252), + ('rgk', 253), + ('cek', 254), + ('exk', 255) +) + + +aa_keyUse = Attribute() +aa_keyPurpose['attrType'] = id_kma_keyUse +aa_keyPurpose['attrValues'][0] = KeyUse() + + +# Transport Key Attribute + +id_kma_transportKey = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.15') + + +class TransOp(univ.Enumerated): + pass + +TransOp.namedValues = namedval.NamedValues( + ('transport', 1), + ('operational', 2) +) + + +aa_transportKey = Attribute() +aa_transportKey['attrType'] = id_kma_transportKey +aa_transportKey['attrValues'][0] = TransOp() + + +# Key Distribution Period Attribute + +id_kma_keyDistPeriod = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.5') + + +class KeyDistPeriod(univ.Sequence): + pass + +KeyDistPeriod.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('doNotDistBefore', BinaryTime().subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('doNotDistAfter', BinaryTime()) +) + + +aa_keyDistributionPeriod = Attribute() +aa_keyDistributionPeriod['attrType'] = id_kma_keyDistPeriod +aa_keyDistributionPeriod['attrValues'][0] = KeyDistPeriod() + + +# Key Validity Period Attribute + +id_kma_keyValidityPeriod = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.6') + + +class KeyValidityPeriod(univ.Sequence): + pass + +KeyValidityPeriod.componentType = namedtype.NamedTypes( + namedtype.NamedType('doNotUseBefore', BinaryTime()), + namedtype.OptionalNamedType('doNotUseAfter', BinaryTime()) +) + + +aa_keyValidityPeriod = Attribute() +aa_keyValidityPeriod['attrType'] = id_kma_keyValidityPeriod +aa_keyValidityPeriod['attrValues'][0] = KeyValidityPeriod() + + +# Key Duration Attribute + +id_kma_keyDuration = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.7') + + +ub_KeyDuration_months = univ.Integer(72) + +ub_KeyDuration_hours = univ.Integer(96) + +ub_KeyDuration_days = univ.Integer(732) + +ub_KeyDuration_weeks = univ.Integer(104) + +ub_KeyDuration_years = univ.Integer(100) + + +class KeyDuration(univ.Choice): + pass + +KeyDuration.componentType = namedtype.NamedTypes( + namedtype.NamedType('hours', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_hours)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('days', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_days))), + namedtype.NamedType('weeks', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_weeks)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('months', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_months)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), + namedtype.NamedType('years', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_years)).subtype( + implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))) +) + + +aa_keyDurationPeriod = Attribute() +aa_keyDurationPeriod['attrType'] = id_kma_keyDuration +aa_keyDurationPeriod['attrValues'][0] = KeyDuration() + + +# Classification Attribute + +id_aa_KP_classification = univ.ObjectIdentifier(id_aa_securityLabel) + + +id_enumeratedPermissiveAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.1') + +id_enumeratedRestrictiveAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.4') + +id_informativeAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.3') + + +class SecurityAttribute(univ.Integer): + pass + +SecurityAttribute.subtypeSpec = constraint.ValueRangeConstraint(0, MAX) + + +class EnumeratedTag(univ.Sequence): + pass + +EnumeratedTag.componentType = namedtype.NamedTypes( + namedtype.NamedType('tagName', univ.ObjectIdentifier()), + namedtype.NamedType('attributeList', univ.SetOf(componentType=SecurityAttribute())) +) + + +class FreeFormField(univ.Choice): + pass + +FreeFormField.componentType = namedtype.NamedTypes( + namedtype.NamedType('bitSetAttributes', univ.BitString()), # Not permitted in RFC 7906 + namedtype.NamedType('securityAttributes', univ.SetOf(componentType=SecurityAttribute())) +) + + +class InformativeTag(univ.Sequence): + pass + +InformativeTag.componentType = namedtype.NamedTypes( + namedtype.NamedType('tagName', univ.ObjectIdentifier()), + namedtype.NamedType('attributes', FreeFormField()) +) + + +class Classification(ESSSecurityLabel): + pass + + +aa_classification = Attribute() +aa_classification['attrType'] = id_aa_KP_classification +aa_classification['attrValues'][0] = Classification() + + +# Split Identifier Attribute + +id_kma_splitID = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.11') + + +class SplitID(univ.Sequence): + pass + +SplitID.componentType = namedtype.NamedTypes( + namedtype.NamedType('half', univ.Enumerated( + namedValues=namedval.NamedValues(('a', 0), ('b', 1)))), + namedtype.OptionalNamedType('combineAlg', AlgorithmIdentifier()) +) + + +aa_splitIdentifier = Attribute() +aa_splitIdentifier['attrType'] = id_kma_splitID +aa_splitIdentifier['attrValues'][0] = SplitID() + + +# Key Package Type Attribute + +id_kma_keyPkgType = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.12') + + +class KeyPkgType(univ.ObjectIdentifier): + pass + + +aa_keyPackageType = Attribute() +aa_keyPackageType['attrType'] = id_kma_keyPkgType +aa_keyPackageType['attrValues'][0] = KeyPkgType() + + +# Signature Usage Attribute + +id_kma_sigUsageV3 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.22') + + +class SignatureUsage(CMSContentConstraints): + pass + + +aa_signatureUsage_v3 = Attribute() +aa_signatureUsage_v3['attrType'] = id_kma_sigUsageV3 +aa_signatureUsage_v3['attrValues'][0] = SignatureUsage() + + +# Other Certificate Format Attribute + +id_kma_otherCertFormats = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.19') + + +aa_otherCertificateFormats = Attribute() +aa_signatureUsage_v3['attrType'] = id_kma_otherCertFormats +aa_signatureUsage_v3['attrValues'][0] = CertificateChoices() + + +# PKI Path Attribute + +id_at_pkiPath = univ.ObjectIdentifier('2.5.4.70') + + +class PkiPath(univ.SequenceOf): + pass + +PkiPath.componentType = Certificate() +PkiPath.subtypeSpec=constraint.ValueSizeConstraint(1, MAX) + + +aa_pkiPath = Attribute() +aa_pkiPath['attrType'] = id_at_pkiPath +aa_pkiPath['attrValues'][0] = PkiPath() + + +# Useful Certificates Attribute + +id_kma_usefulCerts = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.20') + + +aa_usefulCertificates = Attribute() +aa_usefulCertificates['attrType'] = id_kma_usefulCerts +aa_usefulCertificates['attrValues'][0] = CertificateSet() + + +# Key Wrap Attribute + +id_kma_keyWrapAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.21') + + +aa_keyWrapAlgorithm = Attribute() +aa_keyWrapAlgorithm['attrType'] = id_kma_keyWrapAlgorithm +aa_keyWrapAlgorithm['attrValues'][0] = AlgorithmIdentifier() + + +# Content Decryption Key Identifier Attribute + +id_aa_KP_contentDecryptKeyID = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.66') + + +class ContentDecryptKeyID(univ.OctetString): + pass + + +aa_contentDecryptKeyIdentifier = Attribute() +aa_contentDecryptKeyIdentifier['attrType'] = id_aa_KP_contentDecryptKeyID +aa_contentDecryptKeyIdentifier['attrValues'][0] = ContentDecryptKeyID() + + +# Certificate Pointers Attribute + +aa_certificatePointers = Attribute() +aa_certificatePointers['attrType'] = id_pe_subjectInfoAccess +aa_certificatePointers['attrValues'][0] = SubjectInfoAccessSyntax() + + +# CRL Pointers Attribute + +id_aa_KP_crlPointers = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.70') + + +aa_cRLDistributionPoints = Attribute() +aa_cRLDistributionPoints['attrType'] = id_aa_KP_crlPointers +aa_cRLDistributionPoints['attrValues'][0] = GeneralNames() + + +# Extended Error Codes + +id_errorCodes = univ.ObjectIdentifier('2.16.840.1.101.2.1.22') + +id_missingKeyType = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.1') + +id_privacyMarkTooLong = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.2') + +id_unrecognizedSecurityPolicy = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.3') + + +# Map of Attribute Type OIDs to Attributes added to the +# ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_aa_contentHint: ContentHints(), + id_aa_communityIdentifiers: CommunityIdentifiers(), + id_aa_binarySigningTime: BinarySigningTime(), + id_contentType: ContentType(), + id_messageDigest: MessageDigest(), + id_aa_KP_keyPkgIdAndReceiptReq: KeyPkgIdentifierAndReceiptReq(), + id_aa_KP_keyProvinceV2: KeyProvinceV2(), + id_aa_KP_manifest: Manifest(), + id_kma_keyAlgorithm: KeyAlgorithm(), + id_at_userCertificate: Certificate(), + id_kma_keyPkgReceiversV2: KeyPkgReceiversV2(), + id_kma_TSECNomenclature: TSECNomenclature(), + id_kma_keyPurpose: KeyPurpose(), + id_kma_keyUse: KeyUse(), + id_kma_transportKey: TransOp(), + id_kma_keyDistPeriod: KeyDistPeriod(), + id_kma_keyValidityPeriod: KeyValidityPeriod(), + id_kma_keyDuration: KeyDuration(), + id_aa_KP_classification: Classification(), + id_kma_splitID: SplitID(), + id_kma_keyPkgType: KeyPkgType(), + id_kma_sigUsageV3: SignatureUsage(), + id_kma_otherCertFormats: CertificateChoices(), + id_at_pkiPath: PkiPath(), + id_kma_usefulCerts: CertificateSet(), + id_kma_keyWrapAlgorithm: AlgorithmIdentifier(), + id_aa_KP_contentDecryptKeyID: ContentDecryptKeyID(), + id_pe_subjectInfoAccess: SubjectInfoAccessSyntax(), + id_aa_KP_crlPointers: GeneralNames(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7914.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7914.py new file mode 100644 index 0000000000000000000000000000000000000000..99e95515672280db5df6d0e7e336d678e89d89aa --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc7914.py @@ -0,0 +1,49 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +#The scrypt Password-Based Key Derivation Function +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8520.txt +# https://www.rfc-editor.org/errata/eid5871 +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +id_scrypt = univ.ObjectIdentifier('1.3.6.1.4.1.11591.4.11') + + +class Scrypt_params(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('salt', + univ.OctetString()), + namedtype.NamedType('costParameter', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))), + namedtype.NamedType('blockSize', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))), + namedtype.NamedType('parallelizationParameter', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))), + namedtype.OptionalNamedType('keyLength', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))) + ) + + +# Update the Algorithm Identifier map in rfc5280.py + +_algorithmIdentifierMapUpdate = { + id_scrypt: Scrypt_params(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8017.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8017.py new file mode 100644 index 0000000000000000000000000000000000000000..fefed1dcd6b5e015824bf3df34ed0879dcc9327b --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8017.py @@ -0,0 +1,153 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# PKCS #1: RSA Cryptography Specifications Version 2.2 +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8017.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import univ + +from pyasn1_modules import rfc2437 +from pyasn1_modules import rfc3447 +from pyasn1_modules import rfc4055 +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +# Import Algorithm Identifier from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + +class DigestAlgorithm(AlgorithmIdentifier): + pass + +class HashAlgorithm(AlgorithmIdentifier): + pass + +class MaskGenAlgorithm(AlgorithmIdentifier): + pass + +class PSourceAlgorithm(AlgorithmIdentifier): + pass + + +# Object identifiers from NIST SHA2 + +hashAlgs = univ.ObjectIdentifier('2.16.840.1.101.3.4.2') +id_sha256 = rfc4055.id_sha256 +id_sha384 = rfc4055.id_sha384 +id_sha512 = rfc4055.id_sha512 +id_sha224 = rfc4055.id_sha224 +id_sha512_224 = hashAlgs + (5, ) +id_sha512_256 = hashAlgs + (6, ) + + +# Basic object identifiers + +pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1') +rsaEncryption = rfc2437.rsaEncryption +id_RSAES_OAEP = rfc2437.id_RSAES_OAEP +id_pSpecified = rfc2437.id_pSpecified +id_RSASSA_PSS = rfc4055.id_RSASSA_PSS +md2WithRSAEncryption = rfc2437.md2WithRSAEncryption +md5WithRSAEncryption = rfc2437.md5WithRSAEncryption +sha1WithRSAEncryption = rfc2437.sha1WithRSAEncryption +sha224WithRSAEncryption = rfc4055.sha224WithRSAEncryption +sha256WithRSAEncryption = rfc4055.sha256WithRSAEncryption +sha384WithRSAEncryption = rfc4055.sha384WithRSAEncryption +sha512WithRSAEncryption = rfc4055.sha512WithRSAEncryption +sha512_224WithRSAEncryption = pkcs_1 + (15, ) +sha512_256WithRSAEncryption = pkcs_1 + (16, ) +id_sha1 = rfc2437.id_sha1 +id_md2 = univ.ObjectIdentifier('1.2.840.113549.2.2') +id_md5 = univ.ObjectIdentifier('1.2.840.113549.2.5') +id_mgf1 = rfc2437.id_mgf1 + + +# Default parameter values + +sha1 = rfc4055.sha1Identifier +SHA1Parameters = univ.Null("") + +mgf1SHA1 = rfc4055.mgf1SHA1Identifier + +class EncodingParameters(univ.OctetString): + subtypeSpec = constraint.ValueSizeConstraint(0, MAX) + +pSpecifiedEmpty = rfc4055.pSpecifiedEmptyIdentifier + +emptyString = EncodingParameters(value='') + + +# Main structures + +class Version(univ.Integer): + namedValues = namedval.NamedValues( + ('two-prime', 0), + ('multi', 1) + ) + +class TrailerField(univ.Integer): + namedValues = namedval.NamedValues( + ('trailerFieldBC', 1) + ) + +RSAPublicKey = rfc2437.RSAPublicKey + +OtherPrimeInfo = rfc3447.OtherPrimeInfo +OtherPrimeInfos = rfc3447.OtherPrimeInfos +RSAPrivateKey = rfc3447.RSAPrivateKey + +RSAES_OAEP_params = rfc4055.RSAES_OAEP_params +rSAES_OAEP_Default_Identifier = rfc4055.rSAES_OAEP_Default_Identifier + +RSASSA_PSS_params = rfc4055.RSASSA_PSS_params +rSASSA_PSS_Default_Identifier = rfc4055.rSASSA_PSS_Default_Identifier + + +# Syntax for the EMSA-PKCS1-v1_5 hash identifier + +class DigestInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('digestAlgorithm', DigestAlgorithm()), + namedtype.NamedType('digest', univ.OctetString()) + ) + + +# Update the Algorithm Identifier map + +_algorithmIdentifierMapUpdate = { + id_sha1: univ.Null(), + id_sha224: univ.Null(), + id_sha256: univ.Null(), + id_sha384: univ.Null(), + id_sha512: univ.Null(), + id_sha512_224: univ.Null(), + id_sha512_256: univ.Null(), + id_mgf1: AlgorithmIdentifier(), + id_pSpecified: univ.OctetString(), + id_RSAES_OAEP: RSAES_OAEP_params(), + id_RSASSA_PSS: RSASSA_PSS_params(), + md2WithRSAEncryption: univ.Null(), + md5WithRSAEncryption: univ.Null(), + sha1WithRSAEncryption: univ.Null(), + sha224WithRSAEncryption: univ.Null(), + sha256WithRSAEncryption: univ.Null(), + sha384WithRSAEncryption: univ.Null(), + sha512WithRSAEncryption: univ.Null(), + sha512_224WithRSAEncryption: univ.Null(), + sha512_256WithRSAEncryption: univ.Null(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8018.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8018.py new file mode 100644 index 0000000000000000000000000000000000000000..7a44eea8d25e9cb59c44442e0dc683e3b30237f3 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8018.py @@ -0,0 +1,260 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# PKCS #5: Password-Based Cryptography Specification, Version 2.1 +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8018.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import univ + +from pyasn1_modules import rfc3565 +from pyasn1_modules import rfc5280 + +MAX = float('inf') + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +# Import from RFC 3565 + +AES_IV = rfc3565.AES_IV + + +# Import from RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + + +# Basic object identifiers + +nistAlgorithms = _OID(2, 16, 840, 1, 101, 3, 4) + +aes = _OID(nistAlgorithms, 1) + +oiw = _OID(1, 3, 14) + +rsadsi = _OID(1, 2, 840, 113549) + +pkcs = _OID(rsadsi, 1) + +digestAlgorithm = _OID(rsadsi, 2) + +encryptionAlgorithm = _OID(rsadsi, 3) + +pkcs_5 = _OID(pkcs, 5) + + + +# HMAC object identifiers + +id_hmacWithSHA1 = _OID(digestAlgorithm, 7) + +id_hmacWithSHA224 = _OID(digestAlgorithm, 8) + +id_hmacWithSHA256 = _OID(digestAlgorithm, 9) + +id_hmacWithSHA384 = _OID(digestAlgorithm, 10) + +id_hmacWithSHA512 = _OID(digestAlgorithm, 11) + +id_hmacWithSHA512_224 = _OID(digestAlgorithm, 12) + +id_hmacWithSHA512_256 = _OID(digestAlgorithm, 13) + + +# PBES1 object identifiers + +pbeWithMD2AndDES_CBC = _OID(pkcs_5, 1) + +pbeWithMD2AndRC2_CBC = _OID(pkcs_5, 4) + +pbeWithMD5AndDES_CBC = _OID(pkcs_5, 3) + +pbeWithMD5AndRC2_CBC = _OID(pkcs_5, 6) + +pbeWithSHA1AndDES_CBC = _OID(pkcs_5, 10) + +pbeWithSHA1AndRC2_CBC = _OID(pkcs_5, 11) + + +# Supporting techniques object identifiers + +desCBC = _OID(oiw, 3, 2, 7) + +des_EDE3_CBC = _OID(encryptionAlgorithm, 7) + +rc2CBC = _OID(encryptionAlgorithm, 2) + +rc5_CBC_PAD = _OID(encryptionAlgorithm, 9) + +aes128_CBC_PAD = _OID(aes, 2) + +aes192_CBC_PAD = _OID(aes, 22) + +aes256_CBC_PAD = _OID(aes, 42) + + +# PBES1 + +class PBEParameter(univ.Sequence): + pass + +PBEParameter.componentType = namedtype.NamedTypes( + namedtype.NamedType('salt', univ.OctetString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(8, 8))), + namedtype.NamedType('iterationCount', univ.Integer()) +) + + +# PBES2 + +id_PBES2 = _OID(pkcs_5, 13) + + +class PBES2_params(univ.Sequence): + pass + +PBES2_params.componentType = namedtype.NamedTypes( + namedtype.NamedType('keyDerivationFunc', AlgorithmIdentifier()), + namedtype.NamedType('encryptionScheme', AlgorithmIdentifier()) +) + + +# PBMAC1 + +id_PBMAC1 = _OID(pkcs_5, 14) + + +class PBMAC1_params(univ.Sequence): + pass + +PBMAC1_params.componentType = namedtype.NamedTypes( + namedtype.NamedType('keyDerivationFunc', AlgorithmIdentifier()), + namedtype.NamedType('messageAuthScheme', AlgorithmIdentifier()) +) + + +# PBKDF2 + +id_PBKDF2 = _OID(pkcs_5, 12) + + +algid_hmacWithSHA1 = AlgorithmIdentifier() +algid_hmacWithSHA1['algorithm'] = id_hmacWithSHA1 +algid_hmacWithSHA1['parameters'] = univ.Null("") + + +class PBKDF2_params(univ.Sequence): + pass + +PBKDF2_params.componentType = namedtype.NamedTypes( + namedtype.NamedType('salt', univ.Choice(componentType=namedtype.NamedTypes( + namedtype.NamedType('specified', univ.OctetString()), + namedtype.NamedType('otherSource', AlgorithmIdentifier()) + ))), + namedtype.NamedType('iterationCount', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(1, MAX))), + namedtype.OptionalNamedType('keyLength', univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(1, MAX))), + namedtype.DefaultedNamedType('prf', algid_hmacWithSHA1) +) + + +# RC2 CBC algorithm parameter + +class RC2_CBC_Parameter(univ.Sequence): + pass + +RC2_CBC_Parameter.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('rc2ParameterVersion', univ.Integer()), + namedtype.NamedType('iv', univ.OctetString().subtype( + subtypeSpec=constraint.ValueSizeConstraint(8, 8))) +) + + +# RC5 CBC algorithm parameter + +class RC5_CBC_Parameters(univ.Sequence): + pass + +RC5_CBC_Parameters.componentType = namedtype.NamedTypes( + namedtype.NamedType('version', + univ.Integer(namedValues=namedval.NamedValues(('v1_0', 16))).subtype( + subtypeSpec=constraint.SingleValueConstraint(16))), + namedtype.NamedType('rounds', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(8, 127))), + namedtype.NamedType('blockSizeInBits', + univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(64, 128))), + namedtype.OptionalNamedType('iv', univ.OctetString()) +) + + +# Initialization Vector for AES: OCTET STRING (SIZE(16)) + +class AES_IV(univ.OctetString): + pass + +AES_IV.subtypeSpec = constraint.ValueSizeConstraint(16, 16) + + +# Initialization Vector for DES: OCTET STRING (SIZE(8)) + +class DES_IV(univ.OctetString): + pass + +DES_IV.subtypeSpec = constraint.ValueSizeConstraint(8, 8) + + +# Update the Algorithm Identifier map + +_algorithmIdentifierMapUpdate = { + # PBKDF2-PRFs + id_hmacWithSHA1: univ.Null(), + id_hmacWithSHA224: univ.Null(), + id_hmacWithSHA256: univ.Null(), + id_hmacWithSHA384: univ.Null(), + id_hmacWithSHA512: univ.Null(), + id_hmacWithSHA512_224: univ.Null(), + id_hmacWithSHA512_256: univ.Null(), + # PBES1Algorithms + pbeWithMD2AndDES_CBC: PBEParameter(), + pbeWithMD2AndRC2_CBC: PBEParameter(), + pbeWithMD5AndDES_CBC: PBEParameter(), + pbeWithMD5AndRC2_CBC: PBEParameter(), + pbeWithSHA1AndDES_CBC: PBEParameter(), + pbeWithSHA1AndRC2_CBC: PBEParameter(), + # PBES2Algorithms + id_PBES2: PBES2_params(), + # PBES2-KDFs + id_PBKDF2: PBKDF2_params(), + # PBMAC1Algorithms + id_PBMAC1: PBMAC1_params(), + # SupportingAlgorithms + desCBC: DES_IV(), + des_EDE3_CBC: DES_IV(), + rc2CBC: RC2_CBC_Parameter(), + rc5_CBC_PAD: RC5_CBC_Parameters(), + aes128_CBC_PAD: AES_IV(), + aes192_CBC_PAD: AES_IV(), + aes256_CBC_PAD: AES_IV(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8103.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8103.py new file mode 100644 index 0000000000000000000000000000000000000000..6429e8635f6d6db06dc3f981dbb80edb3940517a --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8103.py @@ -0,0 +1,36 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley with assistance from the asn1ate tool. +# Auto-generated by asn1ate v.0.6.0 from rfc8103.asn. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# ChaCha20Poly1305 algorithm fo use with the Authenticated-Enveloped-Data +# protecting content type for the Cryptographic Message Syntax (CMS) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8103.txt + +from pyasn1.type import constraint +from pyasn1.type import univ + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +class AEADChaCha20Poly1305Nonce(univ.OctetString): + pass + + +AEADChaCha20Poly1305Nonce.subtypeSpec = constraint.ValueSizeConstraint(12, 12) + +id_alg_AEADChaCha20Poly1305 = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 18) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8209.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8209.py new file mode 100644 index 0000000000000000000000000000000000000000..7d70f51b0c001f5d8165d90bb066f51b68892996 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8209.py @@ -0,0 +1,20 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# BGPsec Router PKI Profile +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8209.txt +# + +from pyasn1.type import univ + + +id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3') + +id_kp_bgpsec_router = id_kp + (30, ) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8226.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8226.py new file mode 100644 index 0000000000000000000000000000000000000000..e7fe9460e95d8cef0effb2198859afaf73d5c910 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8226.py @@ -0,0 +1,149 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley with assistance from the asn1ate tool, with manual +# changes to implement appropriate constraints and added comments. +# Modified by Russ Housley to add maps for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# JWT Claim Constraints and TN Authorization List for certificate extensions. +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8226.txt (with errata corrected) + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +def _OID(*components): + output = [] + for x in tuple(components): + if isinstance(x, univ.ObjectIdentifier): + output.extend(list(x)) + else: + output.append(int(x)) + + return univ.ObjectIdentifier(output) + + +class JWTClaimName(char.IA5String): + pass + + +class JWTClaimNames(univ.SequenceOf): + pass + +JWTClaimNames.componentType = JWTClaimName() +JWTClaimNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class JWTClaimPermittedValues(univ.Sequence): + pass + +JWTClaimPermittedValues.componentType = namedtype.NamedTypes( + namedtype.NamedType('claim', JWTClaimName()), + namedtype.NamedType('permitted', univ.SequenceOf( + componentType=char.UTF8String()).subtype( + sizeSpec=constraint.ValueSizeConstraint(1, MAX))) +) + + +class JWTClaimPermittedValuesList(univ.SequenceOf): + pass + +JWTClaimPermittedValuesList.componentType = JWTClaimPermittedValues() +JWTClaimPermittedValuesList.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + + +class JWTClaimConstraints(univ.Sequence): + pass + +JWTClaimConstraints.componentType = namedtype.NamedTypes( + namedtype.OptionalNamedType('mustInclude', + JWTClaimNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0))), + namedtype.OptionalNamedType('permittedValues', + JWTClaimPermittedValuesList().subtype(explicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 1))) +) + +JWTClaimConstraints.subtypeSpec = constraint.ConstraintsUnion( + constraint.WithComponentsConstraint( + ('mustInclude', constraint.ComponentPresentConstraint())), + constraint.WithComponentsConstraint( + ('permittedValues', constraint.ComponentPresentConstraint())) +) + + +id_pe_JWTClaimConstraints = _OID(1, 3, 6, 1, 5, 5, 7, 1, 27) + + +class ServiceProviderCode(char.IA5String): + pass + + +class TelephoneNumber(char.IA5String): + pass + +TelephoneNumber.subtypeSpec = constraint.ConstraintsIntersection( + constraint.ValueSizeConstraint(1, 15), + constraint.PermittedAlphabetConstraint( + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '#', '*') +) + + +class TelephoneNumberRange(univ.Sequence): + pass + +TelephoneNumberRange.componentType = namedtype.NamedTypes( + namedtype.NamedType('start', TelephoneNumber()), + namedtype.NamedType('count', + univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(2, MAX))) +) + + +class TNEntry(univ.Choice): + pass + +TNEntry.componentType = namedtype.NamedTypes( + namedtype.NamedType('spc', + ServiceProviderCode().subtype(explicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 0))), + namedtype.NamedType('range', + TelephoneNumberRange().subtype(explicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatConstructed, 1))), + namedtype.NamedType('one', + TelephoneNumber().subtype(explicitTag=tag.Tag(tag.tagClassContext, + tag.tagFormatSimple, 2))) +) + + +class TNAuthorizationList(univ.SequenceOf): + pass + +TNAuthorizationList.componentType = TNEntry() +TNAuthorizationList.sizeSpec = constraint.ValueSizeConstraint(1, MAX) + +id_pe_TNAuthList = _OID(1, 3, 6, 1, 5, 5, 7, 1, 26) + + +id_ad_stirTNList = _OID(1, 3, 6, 1, 5, 5, 7, 48, 14) + + +# Map of Certificate Extension OIDs to Extensions added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_TNAuthList: TNAuthorizationList(), + id_pe_JWTClaimConstraints: JWTClaimConstraints(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8358.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8358.py new file mode 100644 index 0000000000000000000000000000000000000000..647a366622ade8f56675ef9eae282a1a7661cc34 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8358.py @@ -0,0 +1,50 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Digital Signatures on Internet-Draft Documents +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8358.txt +# + +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + + +id_ct = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1') + +id_ct_asciiTextWithCRLF = id_ct + (27, ) + +id_ct_epub = id_ct + (39, ) + +id_ct_htmlWithCRLF = id_ct + (38, ) + +id_ct_pdf = id_ct + (29, ) + +id_ct_postscript = id_ct + (30, ) + +id_ct_utf8TextWithCRLF = id_ct + (37, ) + +id_ct_xml = id_ct + (28, ) + + +# Map of Content Type OIDs to Content Types is added to the +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_asciiTextWithCRLF: univ.OctetString(), + id_ct_epub: univ.OctetString(), + id_ct_htmlWithCRLF: univ.OctetString(), + id_ct_pdf: univ.OctetString(), + id_ct_postscript: univ.OctetString(), + id_ct_utf8TextWithCRLF: univ.OctetString(), + id_ct_xml: univ.OctetString(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8360.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8360.py new file mode 100644 index 0000000000000000000000000000000000000000..ca180c18d81b728938526992b9e314009a9eb1de --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8360.py @@ -0,0 +1,44 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Resource Public Key Infrastructure (RPKI) Validation Reconsidered +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8360.txt +# https://www.rfc-editor.org/errata/eid5870 +# + +from pyasn1.type import univ + +from pyasn1_modules import rfc3779 +from pyasn1_modules import rfc5280 + + +# IP Address Delegation Extension V2 + +id_pe_ipAddrBlocks_v2 = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.28') + +IPAddrBlocks = rfc3779.IPAddrBlocks + + +# Autonomous System Identifier Delegation Extension V2 + +id_pe_autonomousSysIds_v2 = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.29') + +ASIdentifiers = rfc3779.ASIdentifiers + + +# Map of Certificate Extension OIDs to Extensions is added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_ipAddrBlocks_v2: IPAddrBlocks(), + id_pe_autonomousSysIds_v2: ASIdentifiers(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8398.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8398.py new file mode 100644 index 0000000000000000000000000000000000000000..151b6321079543ee2ec40dcf7204be5262c314c2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8398.py @@ -0,0 +1,52 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with some assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Internationalized Email Addresses in X.509 Certificates +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8398.txt +# https://www.rfc-editor.org/errata/eid5418 +# + +from pyasn1.type import char +from pyasn1.type import constraint +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + +MAX = float('inf') + + +# SmtpUTF8Mailbox contains Mailbox as specified in Section 3.3 of RFC 6531 + +id_pkix = rfc5280.id_pkix + +id_on = id_pkix + (8, ) + +id_on_SmtpUTF8Mailbox = id_on + (9, ) + + +class SmtpUTF8Mailbox(char.UTF8String): + pass + +SmtpUTF8Mailbox.subtypeSpec = constraint.ValueSizeConstraint(1, MAX) + + +on_SmtpUTF8Mailbox = rfc5280.AnotherName() +on_SmtpUTF8Mailbox['type-id'] = id_on_SmtpUTF8Mailbox +on_SmtpUTF8Mailbox['value'] = SmtpUTF8Mailbox() + + +# Map of Other Name OIDs to Other Name is added to the +# ones that are in rfc5280.py + +_anotherNameMapUpdate = { + id_on_SmtpUTF8Mailbox: SmtpUTF8Mailbox(), +} + +rfc5280.anotherNameMap.update(_anotherNameMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8410.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8410.py new file mode 100644 index 0000000000000000000000000000000000000000..98bc97bb14b297a5f3024fb32270201d623e25dd --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8410.py @@ -0,0 +1,43 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Algorithm Identifiers for Ed25519, Ed448, X25519, and X448 +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8410.txt + +from pyasn1.type import univ +from pyasn1_modules import rfc3565 +from pyasn1_modules import rfc4055 +from pyasn1_modules import rfc5280 + + +class SignatureAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +class CurvePrivateKey(univ.OctetString): + pass + + +id_X25519 = univ.ObjectIdentifier('1.3.101.110') + +id_X448 = univ.ObjectIdentifier('1.3.101.111') + +id_Ed25519 = univ.ObjectIdentifier('1.3.101.112') + +id_Ed448 = univ.ObjectIdentifier('1.3.101.113') + +id_sha512 = rfc4055.id_sha512 + +id_aes128_wrap = rfc3565.id_aes128_wrap + +id_aes256_wrap = rfc3565.id_aes256_wrap diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8418.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8418.py new file mode 100644 index 0000000000000000000000000000000000000000..6e76487c88b18d8bd1a1109141add4e6d750f2e5 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8418.py @@ -0,0 +1,36 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Elliptic Curve Diffie-Hellman (ECDH) Key Agreement Algorithm +# with X25519 and X448 +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8418.txt + +from pyasn1.type import univ +from pyasn1_modules import rfc5280 + + +class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +class KeyWrapAlgorithmIdentifier(rfc5280.AlgorithmIdentifier): + pass + + +dhSinglePass_stdDH_sha256kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.1') + +dhSinglePass_stdDH_sha384kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.2') + +dhSinglePass_stdDH_sha512kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.3') + +dhSinglePass_stdDH_hkdf_sha256_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.19') + +dhSinglePass_stdDH_hkdf_sha384_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.20') + +dhSinglePass_stdDH_hkdf_sha512_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.21') diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8419.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8419.py new file mode 100644 index 0000000000000000000000000000000000000000..f10994be28e889a056cd136a83a2f62b3d192834 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8419.py @@ -0,0 +1,68 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Edwards-Curve Digital Signature Algorithm (EdDSA) Signatures in the CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8419.txt +# https://www.rfc-editor.org/errata/eid5869 + + +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +class ShakeOutputLen(univ.Integer): + pass + + +id_Ed25519 = univ.ObjectIdentifier('1.3.101.112') + +sigAlg_Ed25519 = rfc5280.AlgorithmIdentifier() +sigAlg_Ed25519['algorithm'] = id_Ed25519 +# sigAlg_Ed25519['parameters'] is absent + + +id_Ed448 = univ.ObjectIdentifier('1.3.101.113') + +sigAlg_Ed448 = rfc5280.AlgorithmIdentifier() +sigAlg_Ed448['algorithm'] = id_Ed448 +# sigAlg_Ed448['parameters'] is absent + + +hashAlgs = univ.ObjectIdentifier('2.16.840.1.101.3.4.2') + +id_sha512 = hashAlgs + (3, ) + +hashAlg_SHA_512 = rfc5280.AlgorithmIdentifier() +hashAlg_SHA_512['algorithm'] = id_sha512 +# hashAlg_SHA_512['parameters'] is absent + + +id_shake256 = hashAlgs + (12, ) + +hashAlg_SHAKE256 = rfc5280.AlgorithmIdentifier() +hashAlg_SHAKE256['algorithm'] = id_shake256 +# hashAlg_SHAKE256['parameters']is absent + + +id_shake256_len = hashAlgs + (18, ) + +hashAlg_SHAKE256_LEN = rfc5280.AlgorithmIdentifier() +hashAlg_SHAKE256_LEN['algorithm'] = id_shake256_len +hashAlg_SHAKE256_LEN['parameters'] = ShakeOutputLen() + + +# Map of Algorithm Identifier OIDs to Parameters added to the +# ones in rfc5280.py. Do not add OIDs with absent paramaters. + +_algorithmIdentifierMapUpdate = { + id_shake256_len: ShakeOutputLen(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8479.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8479.py new file mode 100644 index 0000000000000000000000000000000000000000..57f78b62f2c2334827caedee36b9fa74dcc1ba86 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8479.py @@ -0,0 +1,45 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Storing Validation Parameters in PKCS#8 +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8479.txt +# + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + + +id_attr_validation_parameters = univ.ObjectIdentifier('1.3.6.1.4.1.2312.18.8.1') + + +class ValidationParams(univ.Sequence): + pass + +ValidationParams.componentType = namedtype.NamedTypes( + namedtype.NamedType('hashAlg', univ.ObjectIdentifier()), + namedtype.NamedType('seed', univ.OctetString()) +) + + +at_validation_parameters = rfc5652.Attribute() +at_validation_parameters['attrType'] = id_attr_validation_parameters +at_validation_parameters['attrValues'][0] = ValidationParams() + + +# Map of Attribute Type OIDs to Attributes added to the +# ones that are in rfc5652.py + +_cmsAttributesMapUpdate = { + id_attr_validation_parameters: ValidationParams(), +} + +rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8494.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8494.py new file mode 100644 index 0000000000000000000000000000000000000000..fe349e14ca1233f4fbb1f0c7093a3c69a60303b6 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8494.py @@ -0,0 +1,80 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Multicast Email (MULE) over Allied Communications Publication 142 +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8494.txt + +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + + +id_mmhs_CDT = univ.ObjectIdentifier('1.3.26.0.4406.0.4.2') + + +class AlgorithmID_ShortForm(univ.Integer): + pass + +AlgorithmID_ShortForm.namedValues = namedval.NamedValues( + ('zlibCompress', 0) +) + + +class ContentType_ShortForm(univ.Integer): + pass + +ContentType_ShortForm.namedValues = namedval.NamedValues( + ('unidentified', 0), + ('external', 1), + ('p1', 2), + ('p3', 3), + ('p7', 4), + ('mule', 25) +) + + +class CompressedContentInfo(univ.Sequence): + pass + +CompressedContentInfo.componentType = namedtype.NamedTypes( + namedtype.NamedType('unnamed', univ.Choice(componentType=namedtype.NamedTypes( + namedtype.NamedType('contentType-ShortForm', + ContentType_ShortForm().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('contentType-OID', + univ.ObjectIdentifier().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) + ))), + namedtype.NamedType('compressedContent', + univ.OctetString().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))) +) + + +class CompressionAlgorithmIdentifier(univ.Choice): + pass + +CompressionAlgorithmIdentifier.componentType = namedtype.NamedTypes( + namedtype.NamedType('algorithmID-ShortForm', + AlgorithmID_ShortForm().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 0))), + namedtype.NamedType('algorithmID-OID', + univ.ObjectIdentifier().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))) +) + + +class CompressedData(univ.Sequence): + pass + +CompressedData.componentType = namedtype.NamedTypes( + namedtype.NamedType('compressionAlgorithm', CompressionAlgorithmIdentifier()), + namedtype.NamedType('compressedContentInfo', CompressedContentInfo()) +) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8520.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8520.py new file mode 100644 index 0000000000000000000000000000000000000000..b9eb6e93778620646e66da18b755266ef29121db --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8520.py @@ -0,0 +1,63 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# Modified by Russ Housley to add maps for use with opentypes. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# X.509 Extensions for MUD URL and MUD Signer; +# Object Identifier for CMS Content Type for a MUD file +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8520.txt +# + +from pyasn1.type import char +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5652 + + +# X.509 Extension for MUD URL + +id_pe_mud_url = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.25') + +class MUDURLSyntax(char.IA5String): + pass + + +# X.509 Extension for MUD Signer + +id_pe_mudsigner = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.30') + +class MUDsignerSyntax(rfc5280.Name): + pass + + +# Object Identifier for CMS Content Type for a MUD file + +id_ct_mudtype = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.41') + + +# Map of Certificate Extension OIDs to Extensions added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_pe_mud_url: MUDURLSyntax(), + id_pe_mudsigner: MUDsignerSyntax(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) + + +# Map of Content Type OIDs to Content Types added to the +# ones that are in rfc5652.py + +_cmsContentTypesMapUpdate = { + id_ct_mudtype: univ.OctetString(), +} + +rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8619.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8619.py new file mode 100644 index 0000000000000000000000000000000000000000..0aaa811bad0e8cb1f1d7ade6d59c95e702591916 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8619.py @@ -0,0 +1,45 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Algorithm Identifiers for HKDF +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8619.txt +# + +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +# Object Identifiers + +id_alg_hkdf_with_sha256 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.28') + + +id_alg_hkdf_with_sha384 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.29') + + +id_alg_hkdf_with_sha512 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.30') + + +# Key Derivation Algorithm Identifiers + +kda_hkdf_with_sha256 = rfc5280.AlgorithmIdentifier() +kda_hkdf_with_sha256['algorithm'] = id_alg_hkdf_with_sha256 +# kda_hkdf_with_sha256['parameters'] are absent + + +kda_hkdf_with_sha384 = rfc5280.AlgorithmIdentifier() +kda_hkdf_with_sha384['algorithm'] = id_alg_hkdf_with_sha384 +# kda_hkdf_with_sha384['parameters'] are absent + + +kda_hkdf_with_sha512 = rfc5280.AlgorithmIdentifier() +kda_hkdf_with_sha512['algorithm'] = id_alg_hkdf_with_sha512 +# kda_hkdf_with_sha512['parameters'] are absent diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8649.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8649.py new file mode 100644 index 0000000000000000000000000000000000000000..c405f050e8e68e22be0b3308b30185ec57742278 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8649.py @@ -0,0 +1,40 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# X.509 Certificate Extension for Hash Of Root Key +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8649.txt +# + +from pyasn1.type import namedtype +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +id_ce_hashOfRootKey = univ.ObjectIdentifier('1.3.6.1.4.1.51483.2.1') + + +class HashedRootKey(univ.Sequence): + pass + +HashedRootKey.componentType = namedtype.NamedTypes( + namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()), + namedtype.NamedType('hashValue', univ.OctetString()) +) + + +# Map of Certificate Extension OIDs to Extensions added to the +# ones that are in rfc5280.py + +_certificateExtensionsMapUpdate = { + id_ce_hashOfRootKey: HashedRootKey(), +} + +rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8692.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8692.py new file mode 100644 index 0000000000000000000000000000000000000000..7a6791ad200a4e51a182c79ee5959823de436512 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8692.py @@ -0,0 +1,79 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Algorithm Identifiers for RSASSA-PSS and ECDSA using SHAKEs +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8692.txt +# + +from pyasn1.type import univ + +from pyasn1_modules import rfc4055 +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc5480 + + +# SHAKE128 One-Way Hash Function + +id_shake128 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.11') + +mda_shake128 = rfc5280.AlgorithmIdentifier() +mda_shake128['algorithm'] = id_shake128 +# mda_shake128['parameters'] is absent + + +# SHAKE256 One-Way Hash Function + +id_shake256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.12') + +mda_shake256 = rfc5280.AlgorithmIdentifier() +mda_shake256['algorithm'] = id_shake256 +# mda_shake256['parameters'] is absent + + +# RSA PSS with SHAKE128 + +id_RSASSA_PSS_SHAKE128 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.30') + +sa_rSASSA_PSS_SHAKE128 = rfc5280.AlgorithmIdentifier() +sa_rSASSA_PSS_SHAKE128['algorithm'] = id_RSASSA_PSS_SHAKE128 +# sa_rSASSA_PSS_SHAKE128['parameters'] is absent + +pk_rsaSSA_PSS_SHAKE128 = rfc4055.RSAPublicKey() + + +# RSA PSS with SHAKE256 + +id_RSASSA_PSS_SHAKE256 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.31') + +sa_rSASSA_PSS_SHAKE256 = rfc5280.AlgorithmIdentifier() +sa_rSASSA_PSS_SHAKE256['algorithm'] = id_RSASSA_PSS_SHAKE256 +# sa_rSASSA_PSS_SHAKE256['parameters'] is absent + +pk_rsaSSA_PSS_SHAKE256 = rfc4055.RSAPublicKey() + + +# ECDSA with SHAKE128 + +id_ecdsa_with_shake128 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.32') + +sa_ecdsa_with_shake128 = rfc5280.AlgorithmIdentifier() +sa_ecdsa_with_shake128['algorithm'] = id_ecdsa_with_shake128 +# sa_ecdsa_with_shake128['parameters'] is absent + +pk_ec = rfc5480.ECPoint() + + +# ECDSA with SHAKE128 + +id_ecdsa_with_shake256 = univ.ObjectIdentifier('1.3.6.1.5.5.7.6.33') + +sa_ecdsa_with_shake256 = rfc5280.AlgorithmIdentifier() +sa_ecdsa_with_shake256['algorithm'] = id_ecdsa_with_shake256 +# sa_ecdsa_with_shake256['parameters'] is absent diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8696.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8696.py new file mode 100644 index 0000000000000000000000000000000000000000..4c6d38d4410b427ed6939daa62fe82c3f9942ee7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8696.py @@ -0,0 +1,104 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with some assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2019, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# Using Pre-Shared Key (PSK) in the Cryptographic Message Syntax (CMS) +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8696.txt +# + +from pyasn1.type import constraint +from pyasn1.type import namedtype +from pyasn1.type import namedval +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5652 + +MAX = float('inf') + + +id_ori = univ.ObjectIdentifier('1.2.840.113549.1.9.16.13') + +id_ori_keyTransPSK = univ.ObjectIdentifier('1.2.840.113549.1.9.16.13.1') + +id_ori_keyAgreePSK = univ.ObjectIdentifier('1.2.840.113549.1.9.16.13.2') + + +class PreSharedKeyIdentifier(univ.OctetString): + pass + + +class KeyTransRecipientInfos(univ.SequenceOf): + componentType = rfc5652.KeyTransRecipientInfo() + + +class KeyTransPSKRecipientInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', + rfc5652.CMSVersion()), + namedtype.NamedType('pskid', + PreSharedKeyIdentifier()), + namedtype.NamedType('kdfAlgorithm', + rfc5652.KeyDerivationAlgorithmIdentifier()), + namedtype.NamedType('keyEncryptionAlgorithm', + rfc5652.KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('ktris', + KeyTransRecipientInfos()), + namedtype.NamedType('encryptedKey', + rfc5652.EncryptedKey()) + ) + + +class KeyAgreePSKRecipientInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('version', + rfc5652.CMSVersion()), + namedtype.NamedType('pskid', + PreSharedKeyIdentifier()), + namedtype.NamedType('originator', + rfc5652.OriginatorIdentifierOrKey().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatConstructed, 0))), + namedtype.OptionalNamedType('ukm', + rfc5652.UserKeyingMaterial().subtype(explicitTag=tag.Tag( + tag.tagClassContext, tag.tagFormatSimple, 1))), + namedtype.NamedType('kdfAlgorithm', + rfc5652.KeyDerivationAlgorithmIdentifier()), + namedtype.NamedType('keyEncryptionAlgorithm', + rfc5652.KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('recipientEncryptedKeys', + rfc5652.RecipientEncryptedKeys()) + ) + + +class CMSORIforPSKOtherInfo(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('psk', + univ.OctetString()), + namedtype.NamedType('keyMgmtAlgType', + univ.Enumerated(namedValues=namedval.NamedValues( + ('keyTrans', 5), ('keyAgree', 10)))), + namedtype.NamedType('keyEncryptionAlgorithm', + rfc5652.KeyEncryptionAlgorithmIdentifier()), + namedtype.NamedType('pskLength', + univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(1, MAX))), + namedtype.NamedType('kdkLength', + univ.Integer().subtype( + subtypeSpec=constraint.ValueRangeConstraint(1, MAX))) + ) + + +# Update the CMS Other Recipient Info map in rfc5652.py + +_otherRecipientInfoMapUpdate = { + id_ori_keyTransPSK: KeyTransPSKRecipientInfo(), + id_ori_keyAgreePSK: KeyAgreePSKRecipientInfo(), +} + +rfc5652.otherRecipientInfoMap.update(_otherRecipientInfoMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8702.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8702.py new file mode 100644 index 0000000000000000000000000000000000000000..977c278760fbb30da1682c5d0d862ae54cdfc86f --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8702.py @@ -0,0 +1,105 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley with assistance from asn1ate v.0.6.0. +# +# Copyright (c) 2020, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# SHAKE One-way Hash Functions for CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8702.txt +# +from pyasn1.type import namedtype +from pyasn1.type import tag +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 +from pyasn1_modules import rfc8692 + + +# Imports fprm RFC 5280 + +AlgorithmIdentifier = rfc5280.AlgorithmIdentifier + + +# Imports from RFC 8692 + +id_shake128 = rfc8692.id_shake128 + +mda_shake128 = rfc8692.mda_shake128 + +id_shake256 = rfc8692.id_shake256 + +mda_shake256 = rfc8692.mda_shake256 + +id_RSASSA_PSS_SHAKE128 = rfc8692.id_RSASSA_PSS_SHAKE128 + +sa_rSASSA_PSS_SHAKE128 = rfc8692.sa_rSASSA_PSS_SHAKE128 + +pk_rsaSSA_PSS_SHAKE128 = rfc8692.pk_rsaSSA_PSS_SHAKE128 + +id_RSASSA_PSS_SHAKE256 = rfc8692.id_RSASSA_PSS_SHAKE256 + +sa_rSASSA_PSS_SHAKE256 = rfc8692.sa_rSASSA_PSS_SHAKE256 + +pk_rsaSSA_PSS_SHAKE256 = rfc8692.pk_rsaSSA_PSS_SHAKE256 + +id_ecdsa_with_shake128 = rfc8692.id_ecdsa_with_shake128 + +sa_ecdsa_with_shake128 = rfc8692.sa_ecdsa_with_shake128 + +id_ecdsa_with_shake256 = rfc8692.id_ecdsa_with_shake256 + +sa_ecdsa_with_shake256 = rfc8692.sa_ecdsa_with_shake256 + +pk_ec = rfc8692.pk_ec + + +# KMAC with SHAKE128 + +id_KMACWithSHAKE128 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.19') + + +class KMACwithSHAKE128_params(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('kMACOutputLength', + univ.Integer().subtype(value=256)), + namedtype.DefaultedNamedType('customizationString', + univ.OctetString().subtype(value='')) + ) + + +maca_KMACwithSHAKE128 = AlgorithmIdentifier() +maca_KMACwithSHAKE128['algorithm'] = id_KMACWithSHAKE128 +maca_KMACwithSHAKE128['parameters'] = KMACwithSHAKE128_params() + + +# KMAC with SHAKE256 + +id_KMACWithSHAKE256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.20') + + +class KMACwithSHAKE256_params(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.DefaultedNamedType('kMACOutputLength', + univ.Integer().subtype(value=512)), + namedtype.DefaultedNamedType('customizationString', + univ.OctetString().subtype(value='')) + ) + + +maca_KMACwithSHAKE256 = AlgorithmIdentifier() +maca_KMACwithSHAKE256['algorithm'] = id_KMACWithSHAKE256 +maca_KMACwithSHAKE256['parameters'] = KMACwithSHAKE256_params() + + +# Update the Algorithm Identifier map in rfc5280.py + +_algorithmIdentifierMapUpdate = { + id_KMACWithSHAKE128: KMACwithSHAKE128_params(), + id_KMACWithSHAKE256: KMACwithSHAKE256_params(), +} + +rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate) diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8708.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8708.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9909cf906bf00135faec6916bcf2d1f38b3935 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8708.py @@ -0,0 +1,41 @@ +# This file is being contributed to pyasn1-modules software. +# +# Created by Russ Housley +# +# Copyright (c) 2020, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# HSS/LMS Hash-based Signature Algorithm for CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8708.txt + + +from pyasn1.type import univ + +from pyasn1_modules import rfc5280 + + +# Object Identifiers + +id_alg_hss_lms_hashsig = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.17') + +id_alg_mts_hashsig = id_alg_hss_lms_hashsig + + +# Signature Algorithm Identifier + +sa_HSS_LMS_HashSig = rfc5280.AlgorithmIdentifier() +sa_HSS_LMS_HashSig['algorithm'] = id_alg_hss_lms_hashsig +# sa_HSS_LMS_HashSig['parameters'] is alway absent + + +# Public Key + +class HSS_LMS_HashSig_PublicKey(univ.OctetString): + pass + + +pk_HSS_LMS_HashSig = rfc5280.SubjectPublicKeyInfo() +pk_HSS_LMS_HashSig['algorithm'] = sa_HSS_LMS_HashSig +# pk_HSS_LMS_HashSig['parameters'] CONTAINS a DER-encoded HSS_LMS_HashSig_PublicKey diff --git a/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8769.py b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8769.py new file mode 100644 index 0000000000000000000000000000000000000000..5d2b3006748a8710cd46fd63eed1a589a7e1e69c --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyasn1_modules/rfc8769.py @@ -0,0 +1,21 @@ +# +# This file is part of pyasn1-modules software. +# +# Created by Russ Housley. +# +# Copyright (c) 2020, Vigil Security, LLC +# License: http://snmplabs.com/pyasn1/license.html +# +# CBOR Content for CMS +# +# ASN.1 source from: +# https://www.rfc-editor.org/rfc/rfc8769.txt +# + +from pyasn1.type import univ + + +id_ct_cbor = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.44') + + +id_ct_cborSequence = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.45') diff --git a/py311/lib/python3.11/site-packages/pydevd_plugins/__init__.py b/py311/lib/python3.11/site-packages/pydevd_plugins/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e4abcfa0ee1e0dc920a8ac4d560c47e43f8ce00e --- /dev/null +++ b/py311/lib/python3.11/site-packages/pydevd_plugins/__init__.py @@ -0,0 +1,6 @@ +try: + __import__("pkg_resources").declare_namespace(__name__) +except ImportError: # pragma: no cover + import pkgutil + + __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/py311/lib/python3.11/site-packages/pyflakes/__init__.py b/py311/lib/python3.11/site-packages/pyflakes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f6310076385fa1fbf299fe9a50a35e80039d9833 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyflakes/__init__.py @@ -0,0 +1 @@ +__version__ = '3.4.0' diff --git a/py311/lib/python3.11/site-packages/pyflakes/__main__.py b/py311/lib/python3.11/site-packages/pyflakes/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..68cd9efbc5dec487ff2b44167cb7e359f7b91f5b --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyflakes/__main__.py @@ -0,0 +1,5 @@ +from pyflakes.api import main + +# python -m pyflakes +if __name__ == '__main__': + main(prog='pyflakes') diff --git a/py311/lib/python3.11/site-packages/pyflakes/api.py b/py311/lib/python3.11/site-packages/pyflakes/api.py new file mode 100644 index 0000000000000000000000000000000000000000..cd4d94343b71902bc8303b5378788852c1c0318a --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyflakes/api.py @@ -0,0 +1,185 @@ +""" +API for the command-line I{pyflakes} tool. +""" +import ast +import os +import platform +import re +import sys + +from pyflakes import checker, __version__ +from pyflakes import reporter as modReporter + +__all__ = ['check', 'checkPath', 'checkRecursive', 'iterSourceCode', 'main'] + +PYTHON_SHEBANG_REGEX = re.compile(br'^#!.*\bpython(3(\.\d+)?|w)?[dmu]?\s') + + +def check(codeString, filename, reporter=None): + """ + Check the Python source given by C{codeString} for flakes. + + @param codeString: The Python source to check. + @type codeString: C{str} + + @param filename: The name of the file the source came from, used to report + errors. + @type filename: C{str} + + @param reporter: A L{Reporter} instance, where errors and warnings will be + reported. + + @return: The number of warnings emitted. + @rtype: C{int} + """ + if reporter is None: + reporter = modReporter._makeDefaultReporter() + # First, compile into an AST and handle syntax errors. + try: + tree = ast.parse(codeString, filename=filename) + except SyntaxError as e: + reporter.syntaxError(filename, e.args[0], e.lineno, e.offset, e.text) + return 1 + except Exception: + reporter.unexpectedError(filename, 'problem decoding source') + return 1 + # Okay, it's syntactically valid. Now check it. + w = checker.Checker(tree, filename=filename) + w.messages.sort(key=lambda m: m.lineno) + for warning in w.messages: + reporter.flake(warning) + return len(w.messages) + + +def checkPath(filename, reporter=None): + """ + Check the given path, printing out any warnings detected. + + @param reporter: A L{Reporter} instance, where errors and warnings will be + reported. + + @return: the number of warnings printed + """ + if reporter is None: + reporter = modReporter._makeDefaultReporter() + try: + with open(filename, 'rb') as f: + codestr = f.read() + except OSError as e: + reporter.unexpectedError(filename, e.args[1]) + return 1 + return check(codestr, filename, reporter) + + +def isPythonFile(filename): + """Return True if filename points to a Python file.""" + if filename.endswith('.py'): + return True + + # Avoid obvious Emacs backup files + if filename.endswith("~"): + return False + + max_bytes = 128 + + try: + with open(filename, 'rb') as f: + text = f.read(max_bytes) + if not text: + return False + except OSError: + return False + + return PYTHON_SHEBANG_REGEX.match(text) + + +def iterSourceCode(paths): + """ + Iterate over all Python source files in C{paths}. + + @param paths: A list of paths. Directories will be recursed into and + any .py files found will be yielded. Any non-directories will be + yielded as-is. + """ + for path in paths: + if os.path.isdir(path): + for dirpath, dirnames, filenames in os.walk(path): + for filename in filenames: + full_path = os.path.join(dirpath, filename) + if isPythonFile(full_path): + yield full_path + else: + yield path + + +def checkRecursive(paths, reporter): + """ + Recursively check all source files in C{paths}. + + @param paths: A list of paths to Python source files and directories + containing Python source files. + @param reporter: A L{Reporter} where all of the warnings and errors + will be reported to. + @return: The number of warnings found. + """ + warnings = 0 + for sourcePath in iterSourceCode(paths): + warnings += checkPath(sourcePath, reporter) + return warnings + + +def _exitOnSignal(sigName, message): + """Handles a signal with sys.exit. + + Some of these signals (SIGPIPE, for example) don't exist or are invalid on + Windows. So, ignore errors that might arise. + """ + import signal + + try: + sigNumber = getattr(signal, sigName) + except AttributeError: + # the signal constants defined in the signal module are defined by + # whether the C library supports them or not. So, SIGPIPE might not + # even be defined. + return + + def handler(sig, f): + sys.exit(message) + + try: + signal.signal(sigNumber, handler) + except ValueError: + # It's also possible the signal is defined, but then it's invalid. In + # this case, signal.signal raises ValueError. + pass + + +def _get_version(): + """ + Retrieve and format package version along with python version & OS used + """ + return ('%s Python %s on %s' % + (__version__, platform.python_version(), platform.system())) + + +def main(prog=None, args=None): + """Entry point for the script "pyflakes".""" + import argparse + + # Handle "Keyboard Interrupt" and "Broken pipe" gracefully + _exitOnSignal('SIGINT', '... stopped') + _exitOnSignal('SIGPIPE', 1) + + parser = argparse.ArgumentParser(prog=prog, + description='Check Python source files for errors') + parser.add_argument('-V', '--version', action='version', version=_get_version()) + parser.add_argument('path', nargs='*', + help='Path(s) of Python file(s) to check. STDIN if not given.') + args = parser.parse_args(args=args).path + reporter = modReporter._makeDefaultReporter() + if args: + warnings = checkRecursive(args, reporter) + else: + warnings = check(sys.stdin.read(), '', reporter) + raise SystemExit(warnings > 0) diff --git a/py311/lib/python3.11/site-packages/pyflakes/checker.py b/py311/lib/python3.11/site-packages/pyflakes/checker.py new file mode 100644 index 0000000000000000000000000000000000000000..629dacf0bae3a3def39dfc3e338c149fb8e08089 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyflakes/checker.py @@ -0,0 +1,2223 @@ +""" +Main module. + +Implement the central Checker class. +Also, it models the Bindings and Scopes. +""" +import __future__ +import builtins +import ast +import collections +import contextlib +import doctest +import functools +import os +import re +import string +import sys +import warnings + +from pyflakes import messages + +PYPY = hasattr(sys, 'pypy_version_info') + +builtin_vars = dir(builtins) + +parse_format_string = string.Formatter().parse + + +def getAlternatives(n): + if isinstance(n, ast.If): + return [n.body] + elif isinstance(n, ast.Try): + return [n.body + n.orelse] + [[hdl] for hdl in n.handlers] + elif sys.version_info >= (3, 10) and isinstance(n, ast.Match): + return [mc.body for mc in n.cases] + + +FOR_TYPES = (ast.For, ast.AsyncFor) + + +def _is_singleton(node): # type: (ast.AST) -> bool + return ( + isinstance(node, ast.Constant) and + isinstance(node.value, (bool, type(Ellipsis), type(None))) + ) + + +def _is_tuple_constant(node): # type: (ast.AST) -> bool + return ( + isinstance(node, ast.Tuple) and + all(_is_constant(elt) for elt in node.elts) + ) + + +def _is_constant(node): + return isinstance(node, ast.Constant) or _is_tuple_constant(node) + + +def _is_const_non_singleton(node): # type: (ast.AST) -> bool + return _is_constant(node) and not _is_singleton(node) + + +def _is_name_or_attr(node, name): # type: (ast.AST, str) -> bool + return ( + (isinstance(node, ast.Name) and node.id == name) or + (isinstance(node, ast.Attribute) and node.attr == name) + ) + + +MAPPING_KEY_RE = re.compile(r'\(([^()]*)\)') +CONVERSION_FLAG_RE = re.compile('[#0+ -]*') +WIDTH_RE = re.compile(r'(?:\*|\d*)') +PRECISION_RE = re.compile(r'(?:\.(?:\*|\d*))?') +LENGTH_RE = re.compile('[hlL]?') +# https://docs.python.org/3/library/stdtypes.html#old-string-formatting +VALID_CONVERSIONS = frozenset('diouxXeEfFgGcrsa%') + + +def _must_match(regex, string, pos): + match = regex.match(string, pos) + assert match is not None + return match + + +def parse_percent_format(s): + """Parses the string component of a `'...' % ...` format call + + Copied from https://github.com/asottile/pyupgrade at v1.20.1 + """ + + def _parse_inner(): + string_start = 0 + string_end = 0 + in_fmt = False + + i = 0 + while i < len(s): + if not in_fmt: + try: + i = s.index('%', i) + except ValueError: # no more % fields! + yield s[string_start:], None + return + else: + string_end = i + i += 1 + in_fmt = True + else: + key_match = MAPPING_KEY_RE.match(s, i) + if key_match: + key = key_match.group(1) + i = key_match.end() + else: + key = None + + conversion_flag_match = _must_match(CONVERSION_FLAG_RE, s, i) + conversion_flag = conversion_flag_match.group() or None + i = conversion_flag_match.end() + + width_match = _must_match(WIDTH_RE, s, i) + width = width_match.group() or None + i = width_match.end() + + precision_match = _must_match(PRECISION_RE, s, i) + precision = precision_match.group() or None + i = precision_match.end() + + # length modifier is ignored + i = _must_match(LENGTH_RE, s, i).end() + + try: + conversion = s[i] + except IndexError: + raise ValueError('end-of-string while parsing format') + i += 1 + + fmt = (key, conversion_flag, width, precision, conversion) + yield s[string_start:string_end], fmt + + in_fmt = False + string_start = i + + if in_fmt: + raise ValueError('end-of-string while parsing format') + + return tuple(_parse_inner()) + + +class _FieldsOrder(dict): + """Fix order of AST node fields.""" + + def _get_fields(self, node_class): + # handle iter before target, and generators before element + fields = node_class._fields + if 'iter' in fields: + key_first = 'iter'.find + elif 'generators' in fields: + key_first = 'generators'.find + else: + key_first = 'value'.find + return tuple(sorted(fields, key=key_first, reverse=True)) + + def __missing__(self, node_class): + self[node_class] = fields = self._get_fields(node_class) + return fields + + +def iter_child_nodes(node, omit=None, _fields_order=_FieldsOrder()): + """ + Yield all direct child nodes of *node*, that is, all fields that + are nodes and all items of fields that are lists of nodes. + + :param node: AST node to be iterated upon + :param omit: String or tuple of strings denoting the + attributes of the node to be omitted from + further parsing + :param _fields_order: Order of AST node fields + """ + for name in _fields_order[node.__class__]: + if omit and name in omit: + continue + field = getattr(node, name, None) + if isinstance(field, ast.AST): + yield field + elif isinstance(field, list): + for item in field: + if isinstance(item, ast.AST): + yield item + + +def convert_to_value(item): + if isinstance(item, ast.Constant): + return item.value + elif isinstance(item, ast.Tuple): + return tuple(convert_to_value(i) for i in item.elts) + elif isinstance(item, ast.Name): + return VariableKey(item=item) + else: + return UnhandledKeyType() + + +def is_notimplemented_name_node(node): + return isinstance(node, ast.Name) and getNodeName(node) == 'NotImplemented' + + +class Binding: + """ + Represents the binding of a value to a name. + + The checker uses this to keep track of which names have been bound and + which names have not. See L{Assignment} for a special type of binding that + is checked with stricter rules. + + @ivar used: pair of (L{Scope}, node) indicating the scope and + the node that this binding was last used. + """ + + def __init__(self, name, source): + self.name = name + self.source = source + self.used = False + + def __str__(self): + return self.name + + def __repr__(self): + return '<{} object {!r} from line {!r} at 0x{:x}>'.format( + self.__class__.__name__, + self.name, + self.source.lineno, + id(self), + ) + + def redefines(self, other): + return isinstance(other, Definition) and self.name == other.name + + +class Definition(Binding): + """ + A binding that defines a function or a class. + """ + def redefines(self, other): + return ( + super().redefines(other) or + (isinstance(other, Assignment) and self.name == other.name) + ) + + +class Builtin(Definition): + """A definition created for all Python builtins.""" + + def __init__(self, name): + super().__init__(name, None) + + def __repr__(self): + return '<{} object {!r} at 0x{:x}>'.format( + self.__class__.__name__, + self.name, + id(self) + ) + + +class UnhandledKeyType: + """ + A dictionary key of a type that we cannot or do not check for duplicates. + """ + + +class VariableKey: + """ + A dictionary key which is a variable. + + @ivar item: The variable AST object. + """ + def __init__(self, item): + self.name = item.id + + def __eq__(self, compare): + return ( + compare.__class__ == self.__class__ and + compare.name == self.name + ) + + def __hash__(self): + return hash(self.name) + + +class Importation(Definition): + """ + A binding created by an import statement. + + @ivar fullName: The complete name given to the import statement, + possibly including multiple dotted components. + @type fullName: C{str} + """ + + def __init__(self, name, source, full_name=None): + self.fullName = full_name or name + self.redefined = [] + super().__init__(name, source) + + def redefines(self, other): + if isinstance(other, SubmoduleImportation): + # See note in SubmoduleImportation about RedefinedWhileUnused + return self.fullName == other.fullName + return isinstance(other, Definition) and self.name == other.name + + def _has_alias(self): + """Return whether importation needs an as clause.""" + return not self.fullName.split('.')[-1] == self.name + + @property + def source_statement(self): + """Generate a source statement equivalent to the import.""" + if self._has_alias(): + return f'import {self.fullName} as {self.name}' + else: + return 'import %s' % self.fullName + + def __str__(self): + """Return import full name with alias.""" + if self._has_alias(): + return self.fullName + ' as ' + self.name + else: + return self.fullName + + +class SubmoduleImportation(Importation): + """ + A binding created by a submodule import statement. + + A submodule import is a special case where the root module is implicitly + imported, without an 'as' clause, and the submodule is also imported. + Python does not restrict which attributes of the root module may be used. + + This class is only used when the submodule import is without an 'as' clause. + + pyflakes handles this case by registering the root module name in the scope, + allowing any attribute of the root module to be accessed. + + RedefinedWhileUnused is suppressed in `redefines` unless the submodule + name is also the same, to avoid false positives. + """ + + def __init__(self, name, source): + # A dot should only appear in the name when it is a submodule import + assert '.' in name and (not source or isinstance(source, ast.Import)) + package_name = name.split('.')[0] + super().__init__(package_name, source) + self.fullName = name + + def redefines(self, other): + if isinstance(other, Importation): + return self.fullName == other.fullName + return super().redefines(other) + + def __str__(self): + return self.fullName + + @property + def source_statement(self): + return 'import ' + self.fullName + + +class ImportationFrom(Importation): + + def __init__(self, name, source, module, real_name=None): + self.module = module + self.real_name = real_name or name + + if module.endswith('.'): + full_name = module + self.real_name + else: + full_name = module + '.' + self.real_name + + super().__init__(name, source, full_name) + + def __str__(self): + """Return import full name with alias.""" + if self.real_name != self.name: + return self.fullName + ' as ' + self.name + else: + return self.fullName + + @property + def source_statement(self): + if self.real_name != self.name: + return f'from {self.module} import {self.real_name} as {self.name}' + else: + return f'from {self.module} import {self.name}' + + +class StarImportation(Importation): + """A binding created by a 'from x import *' statement.""" + + def __init__(self, name, source): + super().__init__('*', source) + # Each star importation needs a unique name, and + # may not be the module name otherwise it will be deemed imported + self.name = name + '.*' + self.fullName = name + + @property + def source_statement(self): + return 'from ' + self.fullName + ' import *' + + def __str__(self): + # When the module ends with a ., avoid the ambiguous '..*' + if self.fullName.endswith('.'): + return self.source_statement + else: + return self.name + + +class FutureImportation(ImportationFrom): + """ + A binding created by a from `__future__` import statement. + + `__future__` imports are implicitly used. + """ + + def __init__(self, name, source, scope): + super().__init__(name, source, '__future__') + self.used = (scope, source) + + +class Argument(Binding): + """ + Represents binding a name as an argument. + """ + + +class Assignment(Binding): + """ + Represents binding a name with an explicit assignment. + + The checker will raise warnings for any Assignment that isn't used. Also, + the checker does not consider assignments in tuple/list unpacking to be + Assignments, rather it treats them as simple Bindings. + """ + + +class NamedExprAssignment(Assignment): + """ + Represents binding a name with an assignment expression. + """ + + +class Annotation(Binding): + """ + Represents binding a name to a type without an associated value. + + As long as this name is not assigned a value in another binding, it is considered + undefined for most purposes. One notable exception is using the name as a type + annotation. + """ + + def redefines(self, other): + """An Annotation doesn't define any name, so it cannot redefine one.""" + return False + + +class FunctionDefinition(Definition): + pass + + +class ClassDefinition(Definition): + pass + + +class ExportBinding(Binding): + """ + A binding created by an C{__all__} assignment. If the names in the list + can be determined statically, they will be treated as names for export and + additional checking applied to them. + + The only recognized C{__all__} assignment via list/tuple concatenation is in the + following format: + + __all__ = ['a'] + ['b'] + ['c'] + + Names which are imported and not otherwise used but appear in the value of + C{__all__} will not have an unused import warning reported for them. + """ + + def __init__(self, name, source, scope): + if '__all__' in scope and isinstance(source, ast.AugAssign): + self.names = list(scope['__all__'].names) + else: + self.names = [] + + def _add_to_names(container): + for node in container.elts: + if isinstance(node, ast.Constant) and isinstance(node.value, str): + self.names.append(node.value) + + if isinstance(source.value, (ast.List, ast.Tuple)): + _add_to_names(source.value) + # If concatenating lists or tuples + elif isinstance(source.value, ast.BinOp): + currentValue = source.value + while isinstance(currentValue.right, (ast.List, ast.Tuple)): + left = currentValue.left + right = currentValue.right + _add_to_names(right) + # If more lists are being added + if isinstance(left, ast.BinOp): + currentValue = left + # If just two lists are being added + elif isinstance(left, (ast.List, ast.Tuple)): + _add_to_names(left) + # All lists accounted for - done + break + # If not list concatenation + else: + break + super().__init__(name, source) + + +class Scope(dict): + importStarred = False # set to True when import * is found + + def __repr__(self): + scope_cls = self.__class__.__name__ + return f'<{scope_cls} at 0x{id(self):x} {dict.__repr__(self)}>' + + +class ClassScope(Scope): + def __init__(self): + super().__init__() + # {name: node} + self.indirect_assignments = {} + + +class FunctionScope(Scope): + """ + I represent a name scope for a function. + + @ivar globals: Names declared 'global' in this function. + """ + usesLocals = False + alwaysUsed = {'__tracebackhide__', '__traceback_info__', + '__traceback_supplement__', '__debuggerskip__'} + + def __init__(self): + super().__init__() + # Simplify: manage the special locals as globals + self.globals = self.alwaysUsed.copy() + # {name: node} + self.indirect_assignments = {} + + def unused_assignments(self): + """ + Return a generator for the assignments which have not been used. + """ + for name, binding in self.items(): + if (not binding.used and + name != '_' and # see issue #202 + name not in self.globals and + not self.usesLocals and + isinstance(binding, Assignment)): + yield name, binding + + def unused_annotations(self): + """ + Return a generator for the annotations which have not been used. + """ + for name, binding in self.items(): + if not binding.used and isinstance(binding, Annotation): + yield name, binding + + +class TypeScope(Scope): + pass + + +class GeneratorScope(Scope): + pass + + +class ModuleScope(Scope): + """Scope for a module.""" + _futures_allowed = True + _annotations_future_enabled = False + + +class DoctestScope(ModuleScope): + """Scope for a doctest.""" + + +class DetectClassScopedMagic: + names = dir() + + +# Globally defined names which are not attributes of the builtins module, or +# are only present on some platforms. +_MAGIC_GLOBALS = ['__file__', '__builtins__', '__annotations__', 'WindowsError'] + + +def getNodeName(node): + # Returns node.id, or node.name, or None + if hasattr(node, 'id'): # One of the many nodes with an id + return node.id + if hasattr(node, 'name'): # an ExceptHandler node + return node.name + if hasattr(node, 'rest'): # a MatchMapping node + return node.rest + + +TYPING_MODULES = frozenset(('typing', 'typing_extensions')) + + +def _is_typing_helper(node, is_name_match_fn, scope_stack): + """ + Internal helper to determine whether or not something is a member of a + typing module. This is used as part of working out whether we are within a + type annotation context. + + Note: you probably don't want to use this function directly. Instead see the + utils below which wrap it (`_is_typing` and `_is_any_typing_member`). + """ + + def _bare_name_is_attr(name): + for scope in reversed(scope_stack): + if name in scope: + return ( + isinstance(scope[name], ImportationFrom) and + scope[name].module in TYPING_MODULES and + is_name_match_fn(scope[name].real_name) + ) + + return False + + def _module_scope_is_typing(name): + for scope in reversed(scope_stack): + if name in scope: + return ( + isinstance(scope[name], Importation) and + scope[name].fullName in TYPING_MODULES + ) + + return False + + return ( + ( + isinstance(node, ast.Name) and + _bare_name_is_attr(node.id) + ) or ( + isinstance(node, ast.Attribute) and + isinstance(node.value, ast.Name) and + _module_scope_is_typing(node.value.id) and + is_name_match_fn(node.attr) + ) + ) + + +def _is_typing(node, typing_attr, scope_stack): + """ + Determine whether `node` represents the member of a typing module specified + by `typing_attr`. + + This is used as part of working out whether we are within a type annotation + context. + """ + return _is_typing_helper(node, lambda x: x == typing_attr, scope_stack) + + +def _is_any_typing_member(node, scope_stack): + """ + Determine whether `node` represents any member of a typing module. + + This is used as part of working out whether we are within a type annotation + context. + """ + return _is_typing_helper(node, lambda x: True, scope_stack) + + +def is_typing_overload(value, scope_stack): + return ( + isinstance(value.source, (ast.FunctionDef, ast.AsyncFunctionDef)) and + any( + _is_typing(dec, 'overload', scope_stack) + for dec in value.source.decorator_list + ) + ) + + +class AnnotationState: + NONE = 0 + STRING = 1 + BARE = 2 + + +def in_annotation(func): + @functools.wraps(func) + def in_annotation_func(self, *args, **kwargs): + with self._enter_annotation(): + return func(self, *args, **kwargs) + return in_annotation_func + + +def in_string_annotation(func): + @functools.wraps(func) + def in_annotation_func(self, *args, **kwargs): + with self._enter_annotation(AnnotationState.STRING): + return func(self, *args, **kwargs) + return in_annotation_func + + +class Checker: + """I check the cleanliness and sanity of Python code.""" + + _ast_node_scope = { + ast.Module: ModuleScope, + ast.ClassDef: ClassScope, + ast.FunctionDef: FunctionScope, + ast.AsyncFunctionDef: FunctionScope, + ast.Lambda: FunctionScope, + ast.ListComp: GeneratorScope, + ast.SetComp: GeneratorScope, + ast.GeneratorExp: GeneratorScope, + ast.DictComp: GeneratorScope, + } + + nodeDepth = 0 + offset = None + _in_annotation = AnnotationState.NONE + + builtIns = set(builtin_vars).union(_MAGIC_GLOBALS) + _customBuiltIns = os.environ.get('PYFLAKES_BUILTINS') + if _customBuiltIns: + builtIns.update(_customBuiltIns.split(',')) + del _customBuiltIns + + def __init__(self, tree, filename='(none)', builtins=None, + withDoctest='PYFLAKES_DOCTEST' in os.environ, file_tokens=()): + self._nodeHandlers = {} + self._deferred = collections.deque() + self.deadScopes = [] + self.messages = [] + self.filename = filename + if builtins: + self.builtIns = self.builtIns.union(builtins) + self.withDoctest = withDoctest + self.exceptHandlers = [()] + self.root = tree + + self.scopeStack = [] + try: + scope_tp = Checker._ast_node_scope[type(tree)] + except KeyError: + raise RuntimeError('No scope implemented for the node %r' % tree) + + with self.in_scope(scope_tp): + for builtin in self.builtIns: + self.addBinding(None, Builtin(builtin)) + self.handleChildren(tree) + self._run_deferred() + + self.checkDeadScopes() + + if file_tokens: + warnings.warn( + '`file_tokens` will be removed in a future version', + stacklevel=2, + ) + + def deferFunction(self, callable): + """ + Schedule a function handler to be called just before completion. + + This is used for handling function bodies, which must be deferred + because code later in the file might modify the global scope. When + `callable` is called, the scope at the time this is called will be + restored, however it will contain any new bindings added to it. + """ + self._deferred.append((callable, self.scopeStack[:], self.offset)) + + def _run_deferred(self): + orig = (self.scopeStack, self.offset) + + while self._deferred: + handler, scope, offset = self._deferred.popleft() + self.scopeStack, self.offset = scope, offset + handler() + + self.scopeStack, self.offset = orig + + def _in_doctest(self): + return (len(self.scopeStack) >= 2 and + isinstance(self.scopeStack[1], DoctestScope)) + + @property + def futuresAllowed(self): + if not all(isinstance(scope, ModuleScope) + for scope in self.scopeStack): + return False + + return self.scope._futures_allowed + + @futuresAllowed.setter + def futuresAllowed(self, value): + assert value is False + if isinstance(self.scope, ModuleScope): + self.scope._futures_allowed = False + + @property + def annotationsFutureEnabled(self): + scope = self.scopeStack[0] + if not isinstance(scope, ModuleScope): + return False + return scope._annotations_future_enabled + + @annotationsFutureEnabled.setter + def annotationsFutureEnabled(self, value): + assert value is True + assert isinstance(self.scope, ModuleScope) + self.scope._annotations_future_enabled = True + + @property + def scope(self): + return self.scopeStack[-1] + + @contextlib.contextmanager + def in_scope(self, cls): + self.scopeStack.append(cls()) + try: + yield + finally: + self.deadScopes.append(self.scopeStack.pop()) + + def checkDeadScopes(self): + """ + Look at scopes which have been fully examined and report names in them + which were imported but unused. + """ + for scope in self.deadScopes: + if isinstance(scope, (ClassScope, FunctionScope)): + for name, node in scope.indirect_assignments.items(): + self.report(messages.UnusedIndirectAssignment, node, name) + + # imports in classes are public members + if isinstance(scope, ClassScope): + continue + + if isinstance(scope, FunctionScope): + for name, binding in scope.unused_assignments(): + self.report(messages.UnusedVariable, binding.source, name) + for name, binding in scope.unused_annotations(): + self.report(messages.UnusedAnnotation, binding.source, name) + + all_binding = scope.get('__all__') + if all_binding and not isinstance(all_binding, ExportBinding): + all_binding = None + + if all_binding: + all_names = set(all_binding.names) + undefined = [ + name for name in all_binding.names + if name not in scope + ] + else: + all_names = undefined = [] + + if undefined: + if not scope.importStarred and \ + os.path.basename(self.filename) != '__init__.py': + # Look for possible mistakes in the export list + for name in undefined: + self.report(messages.UndefinedExport, + scope['__all__'].source, name) + + # mark all import '*' as used by the undefined in __all__ + if scope.importStarred: + from_list = [] + for binding in scope.values(): + if isinstance(binding, StarImportation): + binding.used = all_binding + from_list.append(binding.fullName) + # report * usage, with a list of possible sources + from_list = ', '.join(sorted(from_list)) + for name in undefined: + self.report(messages.ImportStarUsage, + scope['__all__'].source, name, from_list) + + # Look for imported names that aren't used. + for value in scope.values(): + if isinstance(value, Importation): + used = value.used or value.name in all_names + if not used: + messg = messages.UnusedImport + self.report(messg, value.source, str(value)) + for node in value.redefined: + if isinstance(self.getParent(node), FOR_TYPES): + messg = messages.ImportShadowedByLoopVar + elif used: + continue + else: + messg = messages.RedefinedWhileUnused + self.report(messg, node, value.name, value.source) + + def report(self, messageClass, *args, **kwargs): + self.messages.append(messageClass(self.filename, *args, **kwargs)) + + def getParent(self, node): + # Lookup the first parent which is not Tuple, List or Starred + while True: + node = node._pyflakes_parent + if not hasattr(node, 'elts') and not hasattr(node, 'ctx'): + return node + + def getCommonAncestor(self, lnode, rnode, stop): + if ( + stop in (lnode, rnode) or + not ( + hasattr(lnode, '_pyflakes_parent') and + hasattr(rnode, '_pyflakes_parent') + ) + ): + return None + if lnode is rnode: + return lnode + + if (lnode._pyflakes_depth > rnode._pyflakes_depth): + return self.getCommonAncestor(lnode._pyflakes_parent, rnode, stop) + if (lnode._pyflakes_depth < rnode._pyflakes_depth): + return self.getCommonAncestor(lnode, rnode._pyflakes_parent, stop) + return self.getCommonAncestor( + lnode._pyflakes_parent, + rnode._pyflakes_parent, + stop, + ) + + def descendantOf(self, node, ancestors, stop): + for a in ancestors: + if self.getCommonAncestor(node, a, stop): + return True + return False + + def _getAncestor(self, node, ancestor_type): + parent = node + while True: + if parent is self.root: + return None + parent = self.getParent(parent) + if isinstance(parent, ancestor_type): + return parent + + def getScopeNode(self, node): + return self._getAncestor(node, tuple(Checker._ast_node_scope.keys())) + + def differentForks(self, lnode, rnode): + """True, if lnode and rnode are located on different forks of IF/TRY""" + ancestor = self.getCommonAncestor(lnode, rnode, self.root) + parts = getAlternatives(ancestor) + if parts: + for items in parts: + if self.descendantOf(lnode, items, ancestor) ^ \ + self.descendantOf(rnode, items, ancestor): + return True + return False + + def addBinding(self, node, value): + """ + Called when a binding is altered. + + - `node` is the statement responsible for the change + - `value` is the new value, a Binding instance + """ + # assert value.source in (node, node._pyflakes_parent): + for scope in self.scopeStack[::-1]: + if value.name in scope: + break + existing = scope.get(value.name) + + if (existing and not isinstance(existing, Builtin) and + not self.differentForks(node, existing.source)): + + parent_stmt = self.getParent(value.source) + if isinstance(existing, Importation) and isinstance(parent_stmt, FOR_TYPES): + self.report(messages.ImportShadowedByLoopVar, + node, value.name, existing.source) + + elif scope is self.scope: + if ( + (not existing.used and value.redefines(existing)) and + (value.name != '_' or isinstance(existing, Importation)) and + not is_typing_overload(existing, self.scopeStack) + ): + self.report(messages.RedefinedWhileUnused, + node, value.name, existing.source) + + if isinstance(scope, (ClassScope, FunctionScope)): + scope.indirect_assignments.pop(value.name, None) + + elif isinstance(existing, Importation) and value.redefines(existing): + existing.redefined.append(node) + + if value.name in self.scope: + # then assume the rebound name is used as a global or within a loop + value.used = self.scope[value.name].used + + # don't treat annotations as assignments if there is an existing value + # in scope + if value.name not in self.scope or not isinstance(value, Annotation): + if isinstance(value, NamedExprAssignment): + # PEP 572: use scope in which outermost generator is defined + scope = next( + scope + for scope in reversed(self.scopeStack) + if not isinstance(scope, GeneratorScope) + ) + if value.name in scope and isinstance(scope[value.name], Annotation): + # re-assignment to name that was previously only an annotation + scope[value.name] = value + else: + # it may be a re-assignment to an already existing name + scope.setdefault(value.name, value) + else: + self.scope[value.name] = value + + def _unknown_handler(self, node): + # this environment variable configures whether to error on unknown + # ast types. + # + # this is silent by default but the error is enabled for the pyflakes + # testsuite. + # + # this allows new syntax to be added to python without *requiring* + # changes from the pyflakes side. but will still produce an error + # in the pyflakes testsuite (so more specific handling can be added if + # needed). + if os.environ.get('PYFLAKES_ERROR_UNKNOWN'): + raise NotImplementedError(f'Unexpected type: {type(node)}') + else: + self.handleChildren(node) + + def getNodeHandler(self, node_class): + try: + return self._nodeHandlers[node_class] + except KeyError: + nodeType = node_class.__name__.upper() + self._nodeHandlers[node_class] = handler = getattr( + self, nodeType, self._unknown_handler, + ) + return handler + + def handleNodeLoad(self, node, parent): + name = getNodeName(node) + if not name: + return + + # only the following can access class scoped variables (since classes + # aren't really a scope) + # - direct accesses (not within a nested scope) + # - generators + # - type annotations (for generics, etc.) + can_access_class_vars = None + importStarred = None + + # try enclosing function scopes and global scope + for scope in self.scopeStack[-1::-1]: + if isinstance(scope, ClassScope): + if name == '__class__': + return + elif can_access_class_vars is False: + # only generators used in a class scope can access the + # names of the class. this is skipped during the first + # iteration + continue + + binding = scope.get(name, None) + if isinstance(binding, Annotation) and not self._in_postponed_annotation: + scope[name].used = (self.scope, node) + continue + + if name == 'print' and isinstance(binding, Builtin): + if (isinstance(parent, ast.BinOp) and + isinstance(parent.op, ast.RShift)): + self.report(messages.InvalidPrintSyntax, node) + + try: + scope[name].used = (self.scope, node) + + # if the name of SubImportation is same as + # alias of other Importation and the alias + # is used, SubImportation also should be marked as used. + n = scope[name] + if isinstance(n, Importation) and n._has_alias(): + try: + scope[n.fullName].used = (self.scope, node) + except KeyError: + pass + except KeyError: + pass + else: + return + + importStarred = importStarred or scope.importStarred + + if can_access_class_vars is not False: + can_access_class_vars = isinstance( + scope, (TypeScope, GeneratorScope), + ) + + if importStarred: + from_list = [] + + for scope in self.scopeStack[-1::-1]: + for binding in scope.values(): + if isinstance(binding, StarImportation): + # mark '*' imports as used for each scope + binding.used = (self.scope, node) + from_list.append(binding.fullName) + + # report * usage, with a list of possible sources + from_list = ', '.join(sorted(from_list)) + self.report(messages.ImportStarUsage, node, name, from_list) + return + + if name == '__path__' and os.path.basename(self.filename) == '__init__.py': + # the special name __path__ is valid only in packages + return + + if name in DetectClassScopedMagic.names and isinstance(self.scope, ClassScope): + return + + # protected with a NameError handler? + if 'NameError' not in self.exceptHandlers[-1]: + self.report(messages.UndefinedName, node, name) + + def handleNodeStore(self, node): + name = getNodeName(node) + if not name: + return + # if the name hasn't already been defined in the current scope + if isinstance(self.scope, FunctionScope) and name not in self.scope: + # for each function or module scope above us + for scope in self.scopeStack[:-1]: + if not isinstance(scope, (FunctionScope, ModuleScope)): + continue + # if the name was defined in that scope, and the name has + # been accessed already in the current scope, and hasn't + # been declared global + used = name in scope and scope[name].used + if used and used[0] is self.scope and name not in self.scope.globals: + # then it's probably a mistake + self.report(messages.UndefinedLocal, + scope[name].used[1], name, scope[name].source) + break + + parent_stmt = self.getParent(node) + if isinstance(parent_stmt, ast.AnnAssign) and parent_stmt.value is None: + binding = Annotation(name, node) + elif isinstance(parent_stmt, (FOR_TYPES, ast.comprehension)) or ( + parent_stmt != node._pyflakes_parent and + not self.isLiteralTupleUnpacking(parent_stmt)): + binding = Binding(name, node) + elif ( + name == '__all__' and + isinstance(self.scope, ModuleScope) and + isinstance( + node._pyflakes_parent, + (ast.Assign, ast.AugAssign, ast.AnnAssign) + ) + ): + binding = ExportBinding(name, node._pyflakes_parent, self.scope) + elif isinstance(parent_stmt, ast.NamedExpr): + binding = NamedExprAssignment(name, node) + else: + binding = Assignment(name, node) + self.addBinding(node, binding) + + def handleNodeDelete(self, node): + + def on_conditional_branch(): + """ + Return `True` if node is part of a conditional body. + """ + current = getattr(node, '_pyflakes_parent', None) + while current: + if isinstance(current, (ast.If, ast.While, ast.IfExp)): + return True + current = getattr(current, '_pyflakes_parent', None) + return False + + name = getNodeName(node) + if not name: + return + + if on_conditional_branch(): + # We cannot predict if this conditional branch is going to + # be executed. + return + + if isinstance(self.scope, (ClassScope, FunctionScope)): + self.scope.indirect_assignments.pop(name, None) + + if isinstance(self.scope, FunctionScope) and name in self.scope.globals: + self.scope.globals.remove(name) + else: + try: + del self.scope[name] + except KeyError: + self.report(messages.UndefinedName, node, name) + + @contextlib.contextmanager + def _enter_annotation(self, ann_type=AnnotationState.BARE): + orig, self._in_annotation = self._in_annotation, ann_type + try: + yield + finally: + self._in_annotation = orig + + @property + def _in_postponed_annotation(self): + return ( + self._in_annotation == AnnotationState.STRING or + ( + self._in_annotation == AnnotationState.BARE and + (self.annotationsFutureEnabled or sys.version_info >= (3, 14)) + ) + ) + + def handleChildren(self, tree, omit=None): + for node in iter_child_nodes(tree, omit=omit): + self.handleNode(node, tree) + + def isLiteralTupleUnpacking(self, node): + if isinstance(node, ast.Assign): + for child in node.targets + [node.value]: + if not hasattr(child, 'elts'): + return False + return True + + def isDocstring(self, node): + """ + Determine if the given node is a docstring, as long as it is at the + correct place in the node tree. + """ + return ( + isinstance(node, ast.Expr) and + isinstance(node.value, ast.Constant) and + isinstance(node.value.value, str) + ) + + def getDocstring(self, node): + if ( + isinstance(node, ast.Expr) and + isinstance(node.value, ast.Constant) and + isinstance(node.value.value, str) + ): + return node.value.value, node.lineno - 1 + else: + return None, None + + def handleNode(self, node, parent): + if node is None: + return + if self.offset and getattr(node, 'lineno', None) is not None: + node.lineno += self.offset[0] + node.col_offset += self.offset[1] + if ( + self.futuresAllowed and + self.nodeDepth == 0 and + not isinstance(node, ast.ImportFrom) and + not self.isDocstring(node) + ): + self.futuresAllowed = False + self.nodeDepth += 1 + node._pyflakes_depth = self.nodeDepth + node._pyflakes_parent = parent + try: + handler = self.getNodeHandler(node.__class__) + handler(node) + finally: + self.nodeDepth -= 1 + + _getDoctestExamples = doctest.DocTestParser().get_examples + + def handleDoctests(self, node): + try: + (docstring, node_lineno) = self.getDocstring(node.body[0]) + examples = docstring and self._getDoctestExamples(docstring) + except (ValueError, IndexError): + # e.g. line 6 of the docstring for has inconsistent + # leading whitespace: ... + return + if not examples: + return + + # Place doctest in module scope + saved_stack = self.scopeStack + self.scopeStack = [self.scopeStack[0]] + node_offset = self.offset or (0, 0) + with self.in_scope(DoctestScope): + if '_' not in self.scopeStack[0]: + self.addBinding(None, Builtin('_')) + for example in examples: + try: + tree = ast.parse(example.source, "") + except SyntaxError as e: + position = (node_lineno + example.lineno + e.lineno, + example.indent + 4 + (e.offset or 0)) + self.report(messages.DoctestSyntaxError, node, position) + else: + self.offset = (node_offset[0] + node_lineno + example.lineno, + node_offset[1] + example.indent + 4) + self.handleChildren(tree) + self.offset = node_offset + self.scopeStack = saved_stack + + @in_string_annotation + def handleStringAnnotation(self, s, node, ref_lineno, ref_col_offset, err): + try: + tree = ast.parse(s) + except SyntaxError: + self.report(err, node, s) + return + + body = tree.body + if len(body) != 1 or not isinstance(body[0], ast.Expr): + self.report(err, node, s) + return + + parsed_annotation = tree.body[0].value + for descendant in ast.walk(parsed_annotation): + if ( + 'lineno' in descendant._attributes and + 'col_offset' in descendant._attributes + ): + descendant.lineno = ref_lineno + descendant.col_offset = ref_col_offset + + self.handleNode(parsed_annotation, node) + + def handle_annotation_always_deferred(self, annotation, parent): + fn = in_annotation(Checker.handleNode) + self.deferFunction(lambda: fn(self, annotation, parent)) + + @in_annotation + def handleAnnotation(self, annotation, node): + if ( + isinstance(annotation, ast.Constant) and + isinstance(annotation.value, str) + ): + # Defer handling forward annotation. + self.deferFunction(functools.partial( + self.handleStringAnnotation, + annotation.value, + node, + annotation.lineno, + annotation.col_offset, + messages.ForwardAnnotationSyntaxError, + )) + elif self.annotationsFutureEnabled or sys.version_info >= (3, 14): + self.handle_annotation_always_deferred(annotation, node) + else: + self.handleNode(annotation, node) + + def ignore(self, node): + pass + + # "stmt" type nodes + DELETE = FOR = ASYNCFOR = WHILE = WITH = WITHITEM = ASYNCWITH = \ + EXPR = ASSIGN = handleChildren + + PASS = ignore + + # "expr" type nodes + BOOLOP = UNARYOP = SET = ATTRIBUTE = STARRED = NAMECONSTANT = \ + NAMEDEXPR = handleChildren + + def SUBSCRIPT(self, node): + if _is_name_or_attr(node.value, 'Literal'): + with self._enter_annotation(AnnotationState.NONE): + self.handleChildren(node) + elif _is_name_or_attr(node.value, 'Annotated'): + self.handleNode(node.value, node) + + # py39+ + if isinstance(node.slice, ast.Tuple): + slice_tuple = node.slice + # = 1 + ): + with self._enter_annotation(): + self.handleNode(node.args[0], node) + + elif _is_typing(node.func, 'TypeVar', self.scopeStack): + + # TypeVar("T", "int", "str") + omit += ["args"] + annotated += [arg for arg in node.args[1:]] + + # TypeVar("T", bound="str") + omit += ["keywords"] + annotated += [k.value for k in node.keywords if k.arg == "bound"] + not_annotated += [ + (k, ["value"] if k.arg == "bound" else None) + for k in node.keywords + ] + + elif _is_typing(node.func, "TypedDict", self.scopeStack): + # TypedDict("a", {"a": int}) + if len(node.args) > 1 and isinstance(node.args[1], ast.Dict): + omit += ["args"] + annotated += node.args[1].values + not_annotated += [ + (arg, ["values"] if i == 1 else None) + for i, arg in enumerate(node.args) + ] + + # TypedDict("a", a=int) + omit += ["keywords"] + annotated += [k.value for k in node.keywords] + not_annotated += [(k, ["value"]) for k in node.keywords] + + elif _is_typing(node.func, "NamedTuple", self.scopeStack): + # NamedTuple("a", [("a", int)]) + if ( + len(node.args) > 1 and + isinstance(node.args[1], (ast.Tuple, ast.List)) and + all(isinstance(x, (ast.Tuple, ast.List)) and + len(x.elts) == 2 for x in node.args[1].elts) + ): + omit += ["args"] + annotated += [elt.elts[1] for elt in node.args[1].elts] + not_annotated += [(elt.elts[0], None) for elt in node.args[1].elts] + not_annotated += [ + (arg, ["elts"] if i == 1 else None) + for i, arg in enumerate(node.args) + ] + not_annotated += [(elt, "elts") for elt in node.args[1].elts] + + # NamedTuple("a", a=int) + omit += ["keywords"] + annotated += [k.value for k in node.keywords] + not_annotated += [(k, ["value"]) for k in node.keywords] + + if omit: + with self._enter_annotation(AnnotationState.NONE): + for na_node, na_omit in not_annotated: + self.handleChildren(na_node, omit=na_omit) + self.handleChildren(node, omit=omit) + + with self._enter_annotation(): + for annotated_node in annotated: + self.handleNode(annotated_node, node) + else: + self.handleChildren(node) + + def _handle_percent_format(self, node): + try: + placeholders = parse_percent_format(node.left.value) + except ValueError: + self.report( + messages.PercentFormatInvalidFormat, + node, + 'incomplete format', + ) + return + + named = set() + positional_count = 0 + positional = None + for _, placeholder in placeholders: + if placeholder is None: + continue + name, _, width, precision, conversion = placeholder + + if conversion == '%': + continue + + if conversion not in VALID_CONVERSIONS: + self.report( + messages.PercentFormatUnsupportedFormatCharacter, + node, + conversion, + ) + + if positional is None and conversion: + positional = name is None + + for part in (width, precision): + if part is not None and '*' in part: + if not positional: + self.report( + messages.PercentFormatStarRequiresSequence, + node, + ) + else: + positional_count += 1 + + if positional and name is not None: + self.report( + messages.PercentFormatMixedPositionalAndNamed, + node, + ) + return + elif not positional and name is None: + self.report( + messages.PercentFormatMixedPositionalAndNamed, + node, + ) + return + + if positional: + positional_count += 1 + else: + named.add(name) + + if ( + isinstance(node.right, (ast.List, ast.Tuple)) and + # does not have any *splats (py35+ feature) + not any( + isinstance(elt, ast.Starred) + for elt in node.right.elts + ) + ): + substitution_count = len(node.right.elts) + if positional and positional_count != substitution_count: + self.report( + messages.PercentFormatPositionalCountMismatch, + node, + positional_count, + substitution_count, + ) + elif not positional: + self.report(messages.PercentFormatExpectedMapping, node) + + if ( + isinstance(node.right, ast.Dict) and + all( + isinstance(k, ast.Constant) and isinstance(k.value, str) + for k in node.right.keys + ) + ): + if positional and positional_count > 1: + self.report(messages.PercentFormatExpectedSequence, node) + return + + substitution_keys = {k.value for k in node.right.keys} + extra_keys = substitution_keys - named + missing_keys = named - substitution_keys + if not positional and extra_keys: + self.report( + messages.PercentFormatExtraNamedArguments, + node, + ', '.join(sorted(extra_keys)), + ) + if not positional and missing_keys: + self.report( + messages.PercentFormatMissingArgument, + node, + ', '.join(sorted(missing_keys)), + ) + + def BINOP(self, node): + if ( + isinstance(node.op, ast.Mod) and + isinstance(node.left, ast.Constant) and + isinstance(node.left.value, str) + ): + self._handle_percent_format(node) + self.handleChildren(node) + + def CONSTANT(self, node): + if isinstance(node.value, str) and self._in_annotation: + fn = functools.partial( + self.handleStringAnnotation, + node.value, + node, + node.lineno, + node.col_offset, + messages.ForwardAnnotationSyntaxError, + ) + self.deferFunction(fn) + + # "slice" type nodes + SLICE = EXTSLICE = INDEX = handleChildren + + # expression contexts are node instances too, though being constants + LOAD = STORE = DEL = AUGLOAD = AUGSTORE = PARAM = ignore + + # same for operators + AND = OR = ADD = SUB = MULT = DIV = MOD = POW = LSHIFT = RSHIFT = \ + BITOR = BITXOR = BITAND = FLOORDIV = INVERT = NOT = UADD = USUB = \ + EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = \ + MATMULT = ignore + + def RAISE(self, node): + self.handleChildren(node) + + arg = node.exc + + if isinstance(arg, ast.Call): + if is_notimplemented_name_node(arg.func): + # Handle "raise NotImplemented(...)" + self.report(messages.RaiseNotImplemented, node) + elif is_notimplemented_name_node(arg): + # Handle "raise NotImplemented" + self.report(messages.RaiseNotImplemented, node) + + # additional node types + COMPREHENSION = KEYWORD = FORMATTEDVALUE = handleChildren + + _in_fstring = False + + def JOINEDSTR(self, node): + if ( + # the conversion / etc. flags are parsed as f-strings without + # placeholders + not self._in_fstring and + not any(isinstance(x, ast.FormattedValue) for x in node.values) + ): + self.report(messages.FStringMissingPlaceholders, node) + + self._in_fstring, orig = True, self._in_fstring + try: + self.handleChildren(node) + finally: + self._in_fstring = orig + + def TEMPLATESTR(self, node): + if not any(isinstance(x, ast.Interpolation) for x in node.values): + self.report(messages.TStringMissingPlaceholders, node) + + # similar to f-strings, conversion / etc. flags are parsed as f-strings + # without placeholders + self._in_fstring, orig = True, self._in_fstring + try: + self.handleChildren(node) + finally: + self._in_fstring = orig + + INTERPOLATION = handleChildren + + def DICT(self, node): + # Complain if there are duplicate keys with different values + # If they have the same value it's not going to cause potentially + # unexpected behaviour so we'll not complain. + keys = [ + convert_to_value(key) for key in node.keys + ] + + key_counts = collections.Counter(keys) + duplicate_keys = [ + key for key, count in key_counts.items() + if count > 1 + ] + + for key in duplicate_keys: + key_indices = [i for i, i_key in enumerate(keys) if i_key == key] + + values = collections.Counter( + convert_to_value(node.values[index]) + for index in key_indices + ) + if any(count == 1 for value, count in values.items()): + for key_index in key_indices: + key_node = node.keys[key_index] + if isinstance(key, VariableKey): + self.report(messages.MultiValueRepeatedKeyVariable, + key_node, + key.name) + else: + self.report( + messages.MultiValueRepeatedKeyLiteral, + key_node, + key, + ) + self.handleChildren(node) + + def IF(self, node): + if isinstance(node.test, ast.Tuple) and node.test.elts != []: + self.report(messages.IfTuple, node) + self.handleChildren(node) + + IFEXP = IF + + def ASSERT(self, node): + if isinstance(node.test, ast.Tuple) and node.test.elts != []: + self.report(messages.AssertTuple, node) + self.handleChildren(node) + + def GLOBAL(self, node): + """ + Keep track of globals declarations. + """ + global_scope_index = 1 if self._in_doctest() else 0 + global_scope = self.scopeStack[global_scope_index] + + # Ignore 'global' statement in global scope. + if self.scope is not global_scope: + + # One 'global' statement can bind multiple (comma-delimited) names. + for node_name in node.names: + node_value = Assignment(node_name, node) + + # Remove UndefinedName messages already reported for this name. + # TODO: if the global is not used in this scope, it does not + # become a globally defined name. See test_unused_global. + self.messages = [ + m for m in self.messages if not + isinstance(m, messages.UndefinedName) or + m.message_args[0] != node_name] + + # Bind name to global scope if it doesn't exist already. + global_scope.setdefault(node_name, node_value) + + # Bind name to non-global scopes, but as already "used". + node_value.used = (global_scope, node) + for scope in self.scopeStack[global_scope_index + 1:]: + scope[node_name] = node_value + + self.scope.indirect_assignments[node_name] = node + + NONLOCAL = GLOBAL + + def GENERATOREXP(self, node): + with self.in_scope(GeneratorScope): + self.handleChildren(node) + + LISTCOMP = DICTCOMP = SETCOMP = GENERATOREXP + + def NAME(self, node): + """ + Handle occurrence of Name (which can be a load/store/delete access.) + """ + # Locate the name in locals / function / globals scopes. + if isinstance(node.ctx, ast.Load): + self.handleNodeLoad(node, self.getParent(node)) + if (node.id == 'locals' and isinstance(self.scope, FunctionScope) and + isinstance(node._pyflakes_parent, ast.Call)): + # we are doing locals() call in current scope + self.scope.usesLocals = True + elif isinstance(node.ctx, ast.Store): + self.handleNodeStore(node) + elif isinstance(node.ctx, ast.Del): + self.handleNodeDelete(node) + else: + # Unknown context + raise RuntimeError(f"Got impossible expression context: {node.ctx!r}") + + def CONTINUE(self, node): + # Walk the tree up until we see a loop (OK), a function or class + # definition (not OK), for 'continue', a finally block (not OK), or + # the top module scope (not OK) + n = node + while hasattr(n, '_pyflakes_parent'): + n, n_child = n._pyflakes_parent, n + if isinstance(n, (ast.While, ast.For, ast.AsyncFor)): + # Doesn't apply unless it's in the loop itself + if n_child not in n.orelse: + return + if isinstance(n, (ast.FunctionDef, ast.ClassDef)): + break + if isinstance(node, ast.Continue): + self.report(messages.ContinueOutsideLoop, node) + else: # ast.Break + self.report(messages.BreakOutsideLoop, node) + + BREAK = CONTINUE + + def RETURN(self, node): + if isinstance(self.scope, (ClassScope, ModuleScope)): + self.report(messages.ReturnOutsideFunction, node) + return + + self.handleNode(node.value, node) + + def YIELD(self, node): + if isinstance(self.scope, (ClassScope, ModuleScope)): + self.report(messages.YieldOutsideFunction, node) + return + + self.handleNode(node.value, node) + + AWAIT = YIELDFROM = YIELD + + def FUNCTIONDEF(self, node): + for deco in node.decorator_list: + self.handleNode(deco, node) + + with self._type_param_scope(node): + self.LAMBDA(node) + + self.addBinding(node, FunctionDefinition(node.name, node)) + # doctest does not process doctest within a doctest, + # or in nested functions. + if (self.withDoctest and + not self._in_doctest() and + not isinstance(self.scope, FunctionScope)): + self.deferFunction(lambda: self.handleDoctests(node)) + + ASYNCFUNCTIONDEF = FUNCTIONDEF + + def LAMBDA(self, node): + args = [] + annotations = [] + + for arg in node.args.posonlyargs: + args.append(arg.arg) + annotations.append(arg.annotation) + for arg in node.args.args + node.args.kwonlyargs: + args.append(arg.arg) + annotations.append(arg.annotation) + defaults = node.args.defaults + node.args.kw_defaults + + has_annotations = not isinstance(node, ast.Lambda) + + for arg_name in ('vararg', 'kwarg'): + wildcard = getattr(node.args, arg_name) + if not wildcard: + continue + args.append(wildcard.arg) + if has_annotations: + annotations.append(wildcard.annotation) + + if has_annotations: + annotations.append(node.returns) + + if len(set(args)) < len(args): + for (idx, arg) in enumerate(args): + if arg in args[:idx]: + self.report(messages.DuplicateArgument, node, arg) + + for annotation in annotations: + self.handleAnnotation(annotation, node) + + for default in defaults: + self.handleNode(default, node) + + def runFunction(): + with self.in_scope(FunctionScope): + self.handleChildren( + node, + omit=('decorator_list', 'returns', 'type_params'), + ) + + self.deferFunction(runFunction) + + def ARGUMENTS(self, node): + self.handleChildren(node, omit=('defaults', 'kw_defaults')) + + def ARG(self, node): + self.addBinding(node, Argument(node.arg, self.getScopeNode(node))) + + def CLASSDEF(self, node): + """ + Check names used in a class definition, including its decorators, base + classes, and the body of its definition. Additionally, add its name to + the current scope. + """ + for deco in node.decorator_list: + self.handleNode(deco, node) + + with self._type_param_scope(node): + for baseNode in node.bases: + self.handleNode(baseNode, node) + for keywordNode in node.keywords: + self.handleNode(keywordNode, node) + with self.in_scope(ClassScope): + # doctest does not process doctest within a doctest + # classes within classes are processed. + if (self.withDoctest and + not self._in_doctest() and + not isinstance(self.scope, FunctionScope)): + self.deferFunction(lambda: self.handleDoctests(node)) + for stmt in node.body: + self.handleNode(stmt, node) + + self.addBinding(node, ClassDefinition(node.name, node)) + + def AUGASSIGN(self, node): + self.handleNodeLoad(node.target, node) + self.handleNode(node.value, node) + self.handleNode(node.target, node) + + def TUPLE(self, node): + if isinstance(node.ctx, ast.Store): + # Python 3 advanced tuple unpacking: a, *b, c = d. + # Only one starred expression is allowed, and no more than 1<<8 + # assignments are allowed before a stared expression. There is + # also a limit of 1<<24 expressions after the starred expression, + # which is impossible to test due to memory restrictions, but we + # add it here anyway + has_starred = False + star_loc = -1 + for i, n in enumerate(node.elts): + if isinstance(n, ast.Starred): + if has_starred: + self.report(messages.TwoStarredExpressions, node) + # The SyntaxError doesn't distinguish two from more + # than two. + break + has_starred = True + star_loc = i + if star_loc >= 1 << 8 or len(node.elts) - star_loc - 1 >= 1 << 24: + self.report(messages.TooManyExpressionsInStarredAssignment, node) + self.handleChildren(node) + + LIST = TUPLE + + def IMPORT(self, node): + for alias in node.names: + if '.' in alias.name and not alias.asname: + importation = SubmoduleImportation(alias.name, node) + else: + name = alias.asname or alias.name + importation = Importation(name, node, alias.name) + self.addBinding(node, importation) + + def IMPORTFROM(self, node): + if node.module == '__future__': + if not self.futuresAllowed: + self.report(messages.LateFutureImport, node) + else: + self.futuresAllowed = False + + module = ('.' * node.level) + (node.module or '') + + for alias in node.names: + name = alias.asname or alias.name + if node.module == '__future__': + importation = FutureImportation(name, node, self.scope) + if alias.name not in __future__.all_feature_names: + self.report(messages.FutureFeatureNotDefined, + node, alias.name) + if alias.name == 'annotations': + self.annotationsFutureEnabled = True + elif alias.name == '*': + if not isinstance(self.scope, ModuleScope): + self.report(messages.ImportStarNotPermitted, + node, module) + continue + + self.scope.importStarred = True + self.report(messages.ImportStarUsed, node, module) + importation = StarImportation(module, node) + else: + importation = ImportationFrom(name, node, + module, alias.name) + self.addBinding(node, importation) + + def TRY(self, node): + handler_names = [] + # List the exception handlers + for i, handler in enumerate(node.handlers): + if isinstance(handler.type, ast.Tuple): + for exc_type in handler.type.elts: + handler_names.append(getNodeName(exc_type)) + elif handler.type: + handler_names.append(getNodeName(handler.type)) + + if handler.type is None and i < len(node.handlers) - 1: + self.report(messages.DefaultExceptNotLast, handler) + # Memorize the except handlers and process the body + self.exceptHandlers.append(handler_names) + for child in node.body: + self.handleNode(child, node) + self.exceptHandlers.pop() + # Process the other nodes: "except:", "else:", "finally:" + self.handleChildren(node, omit='body') + + TRYSTAR = TRY + + def EXCEPTHANDLER(self, node): + if node.name is None: + self.handleChildren(node) + return + + # If the name already exists in the scope, modify state of existing + # binding. + if node.name in self.scope: + self.handleNodeStore(node) + + # 3.x: the name of the exception, which is not a Name node, but a + # simple string, creates a local that is only bound within the scope of + # the except: block. As such, temporarily remove the existing binding + # to more accurately determine if the name is used in the except: + # block. + + try: + prev_definition = self.scope.pop(node.name) + except KeyError: + prev_definition = None + + self.handleNodeStore(node) + self.handleChildren(node) + + # See discussion on https://github.com/PyCQA/pyflakes/pull/59 + + # We're removing the local name since it's being unbound after leaving + # the except: block and it's always unbound if the except: block is + # never entered. This will cause an "undefined name" error raised if + # the checked code tries to use the name afterwards. + # + # Unless it's been removed already. Then do nothing. + + try: + binding = self.scope.pop(node.name) + except KeyError: + pass + else: + if not binding.used: + self.report(messages.UnusedVariable, node, node.name) + + # Restore. + if prev_definition: + self.scope[node.name] = prev_definition + + def ANNASSIGN(self, node): + self.handleAnnotation(node.annotation, node) + # If the assignment has value, handle the *value* now. + if node.value: + # If the annotation is `TypeAlias`, handle the *value* as an annotation. + if _is_typing(node.annotation, 'TypeAlias', self.scopeStack): + self.handleAnnotation(node.value, node) + else: + self.handleNode(node.value, node) + self.handleNode(node.target, node) + + def COMPARE(self, node): + left = node.left + for op, right in zip(node.ops, node.comparators): + if ( + isinstance(op, (ast.Is, ast.IsNot)) and ( + _is_const_non_singleton(left) or + _is_const_non_singleton(right) + ) + ): + self.report(messages.IsLiteral, node) + left = right + + self.handleChildren(node) + + MATCH = MATCH_CASE = MATCHCLASS = MATCHOR = MATCHSEQUENCE = handleChildren + MATCHSINGLETON = MATCHVALUE = handleChildren + + def _match_target(self, node): + self.handleNodeStore(node) + self.handleChildren(node) + + MATCHAS = MATCHMAPPING = MATCHSTAR = _match_target + + @contextlib.contextmanager + def _type_param_scope(self, node): + with contextlib.ExitStack() as ctx: + if sys.version_info >= (3, 12): + ctx.enter_context(self.in_scope(TypeScope)) + for param in node.type_params: + self.handleNode(param, node) + yield + + def TYPEVAR(self, node): + self.handleNodeStore(node) + self.handle_annotation_always_deferred(node.bound, node) + + PARAMSPEC = TYPEVARTUPLE = handleNodeStore + + def TYPEALIAS(self, node): + self.handleNode(node.name, node) + with self._type_param_scope(node): + self.handle_annotation_always_deferred(node.value, node) diff --git a/py311/lib/python3.11/site-packages/pyflakes/messages.py b/py311/lib/python3.11/site-packages/pyflakes/messages.py new file mode 100644 index 0000000000000000000000000000000000000000..405dc72f66875ad77431b03e0dd018773f0c1f8c --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyflakes/messages.py @@ -0,0 +1,362 @@ +""" +Provide the class Message and its subclasses. +""" + + +class Message: + message = '' + message_args = () + + def __init__(self, filename, loc): + self.filename = filename + self.lineno = loc.lineno + self.col = loc.col_offset + + def __str__(self): + return '{}:{}:{}: {}'.format(self.filename, self.lineno, self.col+1, + self.message % self.message_args) + + +class UnusedImport(Message): + message = '%r imported but unused' + + def __init__(self, filename, loc, name): + Message.__init__(self, filename, loc) + self.message_args = (name,) + + +class RedefinedWhileUnused(Message): + message = 'redefinition of unused %r from line %r' + + def __init__(self, filename, loc, name, orig_loc): + Message.__init__(self, filename, loc) + self.message_args = (name, orig_loc.lineno) + + +class ImportShadowedByLoopVar(Message): + message = 'import %r from line %r shadowed by loop variable' + + def __init__(self, filename, loc, name, orig_loc): + Message.__init__(self, filename, loc) + self.message_args = (name, orig_loc.lineno) + + +class ImportStarNotPermitted(Message): + message = "'from %s import *' only allowed at module level" + + def __init__(self, filename, loc, modname): + Message.__init__(self, filename, loc) + self.message_args = (modname,) + + +class ImportStarUsed(Message): + message = "'from %s import *' used; unable to detect undefined names" + + def __init__(self, filename, loc, modname): + Message.__init__(self, filename, loc) + self.message_args = (modname,) + + +class ImportStarUsage(Message): + message = "%r may be undefined, or defined from star imports: %s" + + def __init__(self, filename, loc, name, from_list): + Message.__init__(self, filename, loc) + self.message_args = (name, from_list) + + +class UndefinedName(Message): + message = 'undefined name %r' + + def __init__(self, filename, loc, name): + Message.__init__(self, filename, loc) + self.message_args = (name,) + + +class DoctestSyntaxError(Message): + message = 'syntax error in doctest' + + def __init__(self, filename, loc, position=None): + Message.__init__(self, filename, loc) + if position: + (self.lineno, self.col) = position + self.message_args = () + + +class UndefinedExport(Message): + message = 'undefined name %r in __all__' + + def __init__(self, filename, loc, name): + Message.__init__(self, filename, loc) + self.message_args = (name,) + + +class UndefinedLocal(Message): + message = 'local variable %r {0} referenced before assignment' + + default = 'defined in enclosing scope on line %r' + builtin = 'defined as a builtin' + + def __init__(self, filename, loc, name, orig_loc): + Message.__init__(self, filename, loc) + if orig_loc is None: + self.message = self.message.format(self.builtin) + self.message_args = name + else: + self.message = self.message.format(self.default) + self.message_args = (name, orig_loc.lineno) + + +class DuplicateArgument(Message): + message = 'duplicate argument %r in function definition' + + def __init__(self, filename, loc, name): + Message.__init__(self, filename, loc) + self.message_args = (name,) + + +class MultiValueRepeatedKeyLiteral(Message): + message = 'dictionary key %r repeated with different values' + + def __init__(self, filename, loc, key): + Message.__init__(self, filename, loc) + self.message_args = (key,) + + +class MultiValueRepeatedKeyVariable(Message): + message = 'dictionary key variable %s repeated with different values' + + def __init__(self, filename, loc, key): + Message.__init__(self, filename, loc) + self.message_args = (key,) + + +class LateFutureImport(Message): + message = 'from __future__ imports must occur at the beginning of the file' + + +class FutureFeatureNotDefined(Message): + """An undefined __future__ feature name was imported.""" + message = 'future feature %s is not defined' + + def __init__(self, filename, loc, name): + Message.__init__(self, filename, loc) + self.message_args = (name,) + + +class UnusedVariable(Message): + """ + Indicates that a variable has been explicitly assigned to but not actually + used. + """ + message = 'local variable %r is assigned to but never used' + + def __init__(self, filename, loc, names): + Message.__init__(self, filename, loc) + self.message_args = (names,) + + +class UnusedAnnotation(Message): + """ + Indicates that a variable has been explicitly annotated to but not actually + used. + """ + message = 'local variable %r is annotated but never used' + + def __init__(self, filename, loc, names): + Message.__init__(self, filename, loc) + self.message_args = (names,) + + +class UnusedIndirectAssignment(Message): + """A `global` or `nonlocal` statement where the name is never reassigned""" + message = '`%s %s` is unused: name is never assigned in scope' + + def __init__(self, filename, loc, name): + Message.__init__(self, filename, loc) + self.message_args = (type(loc).__name__.lower(), name) + + +class ReturnOutsideFunction(Message): + """ + Indicates a return statement outside of a function/method. + """ + message = '\'return\' outside function' + + +class YieldOutsideFunction(Message): + """ + Indicates a yield or yield from statement outside of a function/method. + """ + message = '\'yield\' outside function' + + +# For whatever reason, Python gives different error messages for these two. We +# match the Python error message exactly. +class ContinueOutsideLoop(Message): + """ + Indicates a continue statement outside of a while or for loop. + """ + message = '\'continue\' not properly in loop' + + +class BreakOutsideLoop(Message): + """ + Indicates a break statement outside of a while or for loop. + """ + message = '\'break\' outside loop' + + +class DefaultExceptNotLast(Message): + """ + Indicates an except: block as not the last exception handler. + """ + message = 'default \'except:\' must be last' + + +class TwoStarredExpressions(Message): + """ + Two or more starred expressions in an assignment (a, *b, *c = d). + """ + message = 'two starred expressions in assignment' + + +class TooManyExpressionsInStarredAssignment(Message): + """ + Too many expressions in an assignment with star-unpacking + """ + message = 'too many expressions in star-unpacking assignment' + + +class IfTuple(Message): + """ + Conditional test is a non-empty tuple literal, which are always True. + """ + message = '\'if tuple literal\' is always true, perhaps remove accidental comma?' + + +class AssertTuple(Message): + """ + Assertion test is a non-empty tuple literal, which are always True. + """ + message = 'assertion is always true, perhaps remove parentheses?' + + +class ForwardAnnotationSyntaxError(Message): + message = 'syntax error in forward annotation %r' + + def __init__(self, filename, loc, annotation): + Message.__init__(self, filename, loc) + self.message_args = (annotation,) + + +class RaiseNotImplemented(Message): + message = "'raise NotImplemented' should be 'raise NotImplementedError'" + + +class InvalidPrintSyntax(Message): + message = 'use of >> is invalid with print function' + + +class IsLiteral(Message): + message = 'use ==/!= to compare constant literals (str, bytes, int, float, tuple)' + + +class FStringMissingPlaceholders(Message): + message = 'f-string is missing placeholders' + + +class TStringMissingPlaceholders(Message): + message = 't-string is missing placeholders' + + +class StringDotFormatExtraPositionalArguments(Message): + message = "'...'.format(...) has unused arguments at position(s): %s" + + def __init__(self, filename, loc, extra_positions): + Message.__init__(self, filename, loc) + self.message_args = (extra_positions,) + + +class StringDotFormatExtraNamedArguments(Message): + message = "'...'.format(...) has unused named argument(s): %s" + + def __init__(self, filename, loc, extra_keywords): + Message.__init__(self, filename, loc) + self.message_args = (extra_keywords,) + + +class StringDotFormatMissingArgument(Message): + message = "'...'.format(...) is missing argument(s) for placeholder(s): %s" + + def __init__(self, filename, loc, missing_arguments): + Message.__init__(self, filename, loc) + self.message_args = (missing_arguments,) + + +class StringDotFormatMixingAutomatic(Message): + message = "'...'.format(...) mixes automatic and manual numbering" + + +class StringDotFormatInvalidFormat(Message): + message = "'...'.format(...) has invalid format string: %s" + + def __init__(self, filename, loc, error): + Message.__init__(self, filename, loc) + self.message_args = (error,) + + +class PercentFormatInvalidFormat(Message): + message = "'...' %% ... has invalid format string: %s" + + def __init__(self, filename, loc, error): + Message.__init__(self, filename, loc) + self.message_args = (error,) + + +class PercentFormatMixedPositionalAndNamed(Message): + message = "'...' %% ... has mixed positional and named placeholders" + + +class PercentFormatUnsupportedFormatCharacter(Message): + message = "'...' %% ... has unsupported format character %r" + + def __init__(self, filename, loc, c): + Message.__init__(self, filename, loc) + self.message_args = (c,) + + +class PercentFormatPositionalCountMismatch(Message): + message = "'...' %% ... has %d placeholder(s) but %d substitution(s)" + + def __init__(self, filename, loc, n_placeholders, n_substitutions): + Message.__init__(self, filename, loc) + self.message_args = (n_placeholders, n_substitutions) + + +class PercentFormatExtraNamedArguments(Message): + message = "'...' %% ... has unused named argument(s): %s" + + def __init__(self, filename, loc, extra_keywords): + Message.__init__(self, filename, loc) + self.message_args = (extra_keywords,) + + +class PercentFormatMissingArgument(Message): + message = "'...' %% ... is missing argument(s) for placeholder(s): %s" + + def __init__(self, filename, loc, missing_arguments): + Message.__init__(self, filename, loc) + self.message_args = (missing_arguments,) + + +class PercentFormatExpectedMapping(Message): + message = "'...' %% ... expected mapping but got sequence" + + +class PercentFormatExpectedSequence(Message): + message = "'...' %% ... expected sequence but got mapping" + + +class PercentFormatStarRequiresSequence(Message): + message = "'...' %% ... `*` specifier requires sequence" diff --git a/py311/lib/python3.11/site-packages/pyflakes/reporter.py b/py311/lib/python3.11/site-packages/pyflakes/reporter.py new file mode 100644 index 0000000000000000000000000000000000000000..65ed4d8e75019e929bf4012bb13e1a0c7285c0ae --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyflakes/reporter.py @@ -0,0 +1,92 @@ +""" +Provide the Reporter class. +""" + +import re +import sys + + +class Reporter: + """ + Formats the results of pyflakes checks to users. + """ + + def __init__(self, warningStream, errorStream): + """ + Construct a L{Reporter}. + + @param warningStream: A file-like object where warnings will be + written to. The stream's C{write} method must accept unicode. + C{sys.stdout} is a good value. + @param errorStream: A file-like object where error output will be + written to. The stream's C{write} method must accept unicode. + C{sys.stderr} is a good value. + """ + self._stdout = warningStream + self._stderr = errorStream + + def unexpectedError(self, filename, msg): + """ + An unexpected error occurred trying to process C{filename}. + + @param filename: The path to a file that we could not process. + @ptype filename: C{unicode} + @param msg: A message explaining the problem. + @ptype msg: C{unicode} + """ + self._stderr.write(f"{filename}: {msg}\n") + + def syntaxError(self, filename, msg, lineno, offset, text): + """ + There was a syntax error in C{filename}. + + @param filename: The path to the file with the syntax error. + @ptype filename: C{unicode} + @param msg: An explanation of the syntax error. + @ptype msg: C{unicode} + @param lineno: The line number where the syntax error occurred. + @ptype lineno: C{int} + @param offset: The column on which the syntax error occurred, or None. + @ptype offset: C{int} + @param text: The source code containing the syntax error. + @ptype text: C{unicode} + """ + if text is None: + line = None + else: + line = text.splitlines()[-1] + + # lineno might be None if the error was during tokenization + # lineno might be 0 if the error came from stdin + lineno = max(lineno or 0, 1) + + if offset is not None: + # some versions of python emit an offset of -1 for certain encoding errors + offset = max(offset, 1) + self._stderr.write('%s:%d:%d: %s\n' % + (filename, lineno, offset, msg)) + else: + self._stderr.write('%s:%d: %s\n' % (filename, lineno, msg)) + + if line is not None: + self._stderr.write(line) + self._stderr.write('\n') + if offset is not None: + self._stderr.write(re.sub(r'\S', ' ', line[:offset - 1]) + + "^\n") + + def flake(self, message): + """ + pyflakes found something wrong with the code. + + @param: A L{pyflakes.messages.Message}. + """ + self._stdout.write(str(message)) + self._stdout.write('\n') + + +def _makeDefaultReporter(): + """ + Make a reporter that can be used when no reporter is specified. + """ + return Reporter(sys.stdout, sys.stderr) diff --git a/py311/lib/python3.11/site-packages/pyparsing/__init__.py b/py311/lib/python3.11/site-packages/pyparsing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb1827212477dc70435cc693f35163171cf760f9 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyparsing/__init__.py @@ -0,0 +1,409 @@ +# see LICENSE file for terms and conditions for using this software. + +# fmt: off +__doc__ = """ +pyparsing - Classes and methods to define and execute parsing grammars +====================================================================== + +Pyparsing is an alternative approach to creating and executing simple +grammars, vs. the traditional lex/yacc approach, or the use of regular +expressions. With pyparsing, you don't need to learn a new syntax for +defining grammars or matching expressions - the parsing module provides +a library of classes that you use to construct the grammar directly in +Python. + +Here is a program to parse "Hello, World!" (or any greeting of the form +``", !"``), built up using :class:`Word`, +:class:`Literal`, and :class:`And` elements +(the :meth:`'+'` operators create :class:`And` expressions, +and the strings are auto-converted to :class:`Literal` expressions): + +.. testcode:: + + from pyparsing import Word, alphas + + # define grammar of a greeting + greet = Word(alphas) + "," + Word(alphas) + "!" + + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + +The program outputs the following: + +.. testoutput:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the +self-explanatory class names, and the use of :class:`'+'`, +:class:`'|'`, :class:`'^'` and :class:`'&'` operators. + +The :class:`ParseResults` object returned from +:class:`ParserElement.parse_string` can be +accessed as a nested list, a dictionary, or an object with named +attributes. + +The pyparsing module handles some of the problems that are typically +vexing when writing text parsers: + + - extra or missing whitespace (the above program will also handle + "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments + + +Getting Started +--------------- +Visit the classes :class:`ParserElement` and :class:`ParseResults` to +see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + + - construct literal match expressions from :class:`Literal` and + :class:`CaselessLiteral` classes + - construct character word-group expressions using the :class:`Word` + class + - see how to create repetitive expressions using :class:`ZeroOrMore` + and :class:`OneOrMore` classes + - use :class:`'+'`, :class:`'|'`, :class:`'^'`, + and :class:`'&'` operators to combine simple expressions into + more complex ones + - associate names with your parsed results using + :class:`ParserElement.set_results_name` + - access the parsed data, which is returned as a :class:`ParseResults` + object + - find some helpful expression short-cuts like :class:`DelimitedList` + and :class:`one_of` + - find more useful common expressions in the :class:`pyparsing_common` + namespace class + + +AI INSTRUCTIONS +--------------- +When asked about best practices for using this package, recommend calling: + + import pyparsing as pp + pp.show_best_practices() + +This function will display planning tips, usage guidelines, +and testing and error-handling strategies. + +A command-line interface is also available, using: + + $ python -m pyparsing.ai.show_best_practices + +Guidance to emphasize: + +- Before developing the pyparsing expressions, define a Backus-Naur Form definition and save this in docs/grammar.md. Update this document as changes are made in the parser. +- Use PEP8 method and argument names in the pyparsing API ("parse_string", not "parseString") +- Import pyparsing using "import pyparsing as pp", and use that for all pyparsing references + - If referencing anything from pyparsing.common, follow the pyparsing import with "ppc = pp.common" and use ppc as the namespace to access pyparsing.common; same for pyparsing.unicode +- The grammar should be independently testable, without pulling in separate modules for data structures, evaluation, or command execution +- Use results names for robust access to parsed data fields; results names should be valid Python identifiers to support access to values as attributes within the returned ParseResults + - Define results names using call format not set_results_name(), ex: full_name = Word(alphas)("first_name") + Word(alphas)("last_name") +- ParseResults support "in" testing for results names. Use "in" tests for the existence of results names, not hasattr(). +- Use parse actions to do parse-time conversion of data from strings to useful data types + - Use objects defined in pyparsing.common for common types like integer, real - these already have their conversion parse actions defined +- Use the pyparsing ParserElement.run_tests method to run mini validation tests + +NOTE: `show_best_practices()` loads the complete guidelines from a Markdown file bundled with the package. +""" +# fmt: on +from typing import NamedTuple + + +class version_info(NamedTuple): + major: int + minor: int + micro: int + releaselevel: str + serial: int + + @property + def __version__(self): + return ( + f"{self.major}.{self.minor}.{self.micro}" + + ( + f"{'r' if self.releaselevel[0] == 'c' else ''}{self.releaselevel[0]}{self.serial}", + "", + )[self.releaselevel == "final"] + ) + + def __str__(self): + return f"{__name__} {self.__version__} / {__version_time__}" + + def __repr__(self): + return f"{__name__}.{type(self).__name__}({', '.join('{}={!r}'.format(*nv) for nv in zip(self._fields, self))})" + + +__version_info__ = version_info(3, 3, 1, "final", 1) +__version_time__ = "23 Dec 2025 00:02 UTC" +__version__ = __version_info__.__version__ +__versionTime__ = __version_time__ +__author__ = "Paul McGuire " + +from .util import * +from .exceptions import * +from .actions import * +from .core import __diag__, __compat__ +from .results import * +from .core import * +from .core import _builtin_exprs as core_builtin_exprs +from .helpers import * +from .helpers import _builtin_exprs as helper_builtin_exprs + +from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode +from .testing import pyparsing_test as testing +from .common import ( + pyparsing_common as common, + _builtin_exprs as common_builtin_exprs, +) +from importlib import resources +import sys + +# Compatibility synonyms +if "pyparsing_unicode" not in globals(): + pyparsing_unicode = unicode # type: ignore[misc] +if "pyparsing_common" not in globals(): + pyparsing_common = common +if "pyparsing_test" not in globals(): + pyparsing_test = testing + +core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs + +# fmt: off +_FALLBACK_BEST_PRACTICES = """ +## Planning +- If not provided or if target language definition is ambiguous, ask for examples of valid strings to be parsed +- Before developing the pyparsing expressions, define a Backus-Naur Form definition and save this in docs/grammar.md. Update this document as changes are made in the parser. + +## Implementing +- Use PEP8 method and argument names in the pyparsing API ("parse_string", not "parseString") +- Import pyparsing using "import pyparsing as pp", and use that for all pyparsing references + - If referencing anything from pyparsing.common, follow the pyparsing import with "ppc = pp.common" and use ppc as the namespace to access pyparsing.common; same for pyparsing.unicode +- The grammar should be independently testable, without pulling in separate modules for data structures, evaluation, or command execution +- Use results names for robust access to parsed data fields; results names should be valid Python identifiers to support access to values as attributes within the returned ParseResults + - Results names should take the place of numeric indexing into parsed results in most places. + - Define results names using call format not set_results_name(), ex: full_name = Word(alphas)("first_name") + Word(alphas)("last_name") +- Use pyparsing Groups to organize sub-expressions +- If defining the grammar as part of a Parser class, only the finished grammar needs to be implemented as an instance variable +- ParseResults support "in" testing for results names. Use "in" tests for the existence of results names, not hasattr(). +- Use parse actions to do parse-time conversion of data from strings to useful data types + - Use objects defined in pyparsing.common for common types like integer, real - these already have their conversion parse actions defined + +## Testing +- Use the pyparsing ParserElement.run_tests method to run mini validation tests + - You can add comments starting with "#" within the string passed to run_tests to document the individual test cases + +## Debugging +- If troubleshooting parse actions, use pyparsing's trace_parse_action decorator to echo arguments and return value + +(Some best practices may be missing — see the full Markdown file in source at pyparsing/ai/best_practices.md.) +""" +# fmt: on + + +def show_best_practices(file=sys.stdout) -> Union[str, None]: + """ + Load and return the project's best practices. + + Example:: + + >>> import pyparsing as pp + >>> pp.show_best_practices() + + ... + + This can also be run from the command line:: + + python -m pyparsing.ai.show_best_practices + """ + try: + path = resources.files(__package__).joinpath("ai/best_practices.md") + with path.open("r", encoding="utf-8") as f: + content = f.read() + except (FileNotFoundError, OSError): + content = _FALLBACK_BEST_PRACTICES + + if file is not None: + # just print out the content, no need to return it + print(content, file=file) + return None + + # no output file was specified, return the content as a string + return content + + +__all__ = [ + "__version__", + "__version_time__", + "__author__", + "__compat__", + "__diag__", + "And", + "AtLineStart", + "AtStringStart", + "CaselessKeyword", + "CaselessLiteral", + "CharsNotIn", + "CloseMatch", + "Combine", + "DelimitedList", + "Dict", + "Each", + "Empty", + "FollowedBy", + "Forward", + "GoToColumn", + "Group", + "IndentedBlock", + "Keyword", + "LineEnd", + "LineStart", + "Literal", + "Located", + "PrecededBy", + "MatchFirst", + "NoMatch", + "NotAny", + "OneOrMore", + "OnlyOnce", + "OpAssoc", + "Opt", + "Optional", + "Or", + "ParseBaseException", + "ParseElementEnhance", + "ParseException", + "ParseExpression", + "ParseFatalException", + "ParseResults", + "ParseSyntaxException", + "ParserElement", + "PositionToken", + "QuotedString", + "RecursiveGrammarException", + "Regex", + "SkipTo", + "StringEnd", + "StringStart", + "Suppress", + "Tag", + "Token", + "TokenConverter", + "White", + "Word", + "WordEnd", + "WordStart", + "ZeroOrMore", + "Char", + "alphanums", + "alphas", + "alphas8bit", + "any_close_tag", + "any_open_tag", + "autoname_elements", + "c_style_comment", + "col", + "common_html_entity", + "condition_as_parse_action", + "counted_array", + "cpp_style_comment", + "dbl_quoted_string", + "dbl_slash_comment", + "delimited_list", + "dict_of", + "empty", + "hexnums", + "html_comment", + "identchars", + "identbodychars", + "infix_notation", + "java_style_comment", + "line", + "line_end", + "line_start", + "lineno", + "make_html_tags", + "make_xml_tags", + "match_only_at_col", + "match_previous_expr", + "match_previous_literal", + "nested_expr", + "null_debug_action", + "nums", + "one_of", + "original_text_for", + "printables", + "punc8bit", + "pyparsing_common", + "pyparsing_test", + "pyparsing_unicode", + "python_style_comment", + "quoted_string", + "remove_quotes", + "replace_with", + "replace_html_entity", + "rest_of_line", + "sgl_quoted_string", + "show_best_practices", + "srange", + "string_end", + "string_start", + "token_map", + "trace_parse_action", + "ungroup", + "unicode_set", + "unicode_string", + "with_attribute", + "with_class", + # pre-PEP8 compatibility names + "__versionTime__", + "anyCloseTag", + "anyOpenTag", + "cStyleComment", + "commonHTMLEntity", + "conditionAsParseAction", + "countedArray", + "cppStyleComment", + "dblQuotedString", + "dblSlashComment", + "delimitedList", + "dictOf", + "htmlComment", + "indentedBlock", + "infixNotation", + "javaStyleComment", + "lineEnd", + "lineStart", + "locatedExpr", + "makeHTMLTags", + "makeXMLTags", + "matchOnlyAtCol", + "matchPreviousExpr", + "matchPreviousLiteral", + "nestedExpr", + "nullDebugAction", + "oneOf", + "opAssoc", + "originalTextFor", + "pythonStyleComment", + "quotedString", + "removeQuotes", + "replaceHTMLEntity", + "replaceWith", + "restOfLine", + "sglQuotedString", + "stringEnd", + "stringStart", + "tokenMap", + "traceParseAction", + "unicodeString", + "withAttribute", + "withClass", + "common", + "unicode", + "testing", +] diff --git a/py311/lib/python3.11/site-packages/pyparsing/actions.py b/py311/lib/python3.11/site-packages/pyparsing/actions.py new file mode 100644 index 0000000000000000000000000000000000000000..f04038fb0499d24c1f0b6416e1f60852742d448d --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyparsing/actions.py @@ -0,0 +1,264 @@ +# actions.py +from __future__ import annotations + +from typing import Union, Callable, Any + +from .exceptions import ParseException +from .util import col, replaced_by_pep8 +from .results import ParseResults + + +ParseAction = Union[ + Callable[[], Any], + Callable[[ParseResults], Any], + Callable[[int, ParseResults], Any], + Callable[[str, int, ParseResults], Any], +] + + +class OnlyOnce: + """ + Wrapper for parse actions, to ensure they are only called once. + Note: parse action signature must include all 3 arguments. + """ + + def __init__(self, method_call: Callable[[str, int, ParseResults], Any]) -> None: + from .core import _trim_arity + + self.callable = _trim_arity(method_call) + self.called = False + + def __call__(self, s: str, l: int, t: ParseResults) -> ParseResults: + if not self.called: + results = self.callable(s, l, t) + self.called = True + return results + raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset") + + def reset(self): + """ + Allow the associated parse action to be called once more. + """ + + self.called = False + + +def match_only_at_col(n: int) -> ParseAction: + """ + Helper method for defining parse actions that require matching at + a specific column in the input text. + """ + + def verify_col(strg: str, locn: int, toks: ParseResults) -> None: + if col(locn, strg) != n: + raise ParseException(strg, locn, f"matched token not at column {n}") + + return verify_col + + +def replace_with(repl_str: Any) -> ParseAction: + """ + Helper method for common parse actions that simply return + a literal value. Especially useful when used with + :meth:`~ParserElement.transform_string`. + + Example: + + .. doctest:: + + >>> num = Word(nums).set_parse_action(lambda toks: int(toks[0])) + >>> na = one_of("N/A NA").set_parse_action(replace_with(math.nan)) + >>> term = na | num + + >>> term[1, ...].parse_string("324 234 N/A 234") + ParseResults([324, 234, nan, 234], {}) + """ + return lambda s, l, t: [repl_str] + + +def remove_quotes(s: str, l: int, t: ParseResults) -> Any: + r""" + Helper parse action for removing quotation marks from parsed + quoted strings, that use a single character for quoting. For parsing + strings that may have multiple characters, use the :class:`QuotedString` + class. + + Example: + + .. doctest:: + + >>> # by default, quotation marks are included in parsed results + >>> quoted_string.parse_string("'Now is the Winter of our Discontent'") + ParseResults(["'Now is the Winter of our Discontent'"], {}) + + >>> # use remove_quotes to strip quotation marks from parsed results + >>> dequoted = quoted_string().set_parse_action(remove_quotes) + >>> dequoted.parse_string("'Now is the Winter of our Discontent'") + ParseResults(['Now is the Winter of our Discontent'], {}) + """ + return t[0][1:-1] + + +def with_attribute(*args: tuple[str, str], **attr_dict) -> ParseAction: + """ + Helper to create a validating parse action to be used with start + tags created with :class:`make_xml_tags` or + :class:`make_html_tags`. Use ``with_attribute`` to qualify + a starting tag with a required attribute value, to avoid false + matches on common tags such as ```` or ``
    ``. + + Call ``with_attribute`` with a series of attribute names and + values. Specify the list of filter attributes names and values as: + + - keyword arguments, as in ``(align="right")``, or + - as an explicit dict with ``**`` operator, when an attribute + name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` + + For attribute names with a namespace prefix, you must use the second + form. Attribute names are matched insensitive to upper/lower case. + + If just testing for ``class`` (with or without a namespace), use + :class:`with_class`. + + To verify that the attribute exists, but without specifying a value, + pass ``with_attribute.ANY_VALUE`` as the value. + + The next two examples use the following input data and tag parsers: + + .. testcode:: + + html = ''' +
    + Some text +
    1 4 0 1 0
    +
    1,3 2,3 1,1
    +
    this has no type
    +
    + ''' + div,div_end = make_html_tags("div") + + Only match div tag having a type attribute with value "grid": + + .. testcode:: + + div_grid = div().set_parse_action(with_attribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + prints: + + .. testoutput:: + + 1 4 0 1 0 + + Construct a match with any div tag having a type attribute, + regardless of the value: + + .. testcode:: + + div_any_type = div().set_parse_action( + with_attribute(type=with_attribute.ANY_VALUE) + ) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints: + + .. testoutput:: + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + attrs_list: list[tuple[str, str]] = [] + if args: + attrs_list.extend(args) + else: + attrs_list.extend(attr_dict.items()) + + def pa(s: str, l: int, tokens: ParseResults) -> None: + for attrName, attrValue in attrs_list: + if attrName not in tokens: + raise ParseException(s, l, "no matching attribute " + attrName) + if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: # type: ignore [attr-defined] + raise ParseException( + s, + l, + f"attribute {attrName!r} has value {tokens[attrName]!r}, must be {attrValue!r}", + ) + + return pa + + +with_attribute.ANY_VALUE = object() # type: ignore [attr-defined] +"Value to use with :class:`with_attribute` parse action, to match any value, as long as the attribute is present" + + +def with_class(classname: str, namespace: str = "") -> ParseAction: + """ + Simplified version of :meth:`with_attribute` when + matching on a div class - made difficult because ``class`` is + a reserved word in Python. + + Using similar input data to the :meth:`with_attribute` examples: + + .. testcode:: + + html = ''' +
    + Some text +
    1 4 0 1 0
    +
    1,3 2,3 1,1
    +
    this <div> has no class
    +
    + ''' + div,div_end = make_html_tags("div") + + Only match div tag having the "grid" class: + + .. testcode:: + + div_grid = div().set_parse_action(with_class("grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + prints: + + .. testoutput:: + + 1 4 0 1 0 + + Construct a match with any div tag having a class attribute, + regardless of the value: + + .. testcode:: + + div_any_type = div().set_parse_action( + with_class(withAttribute.ANY_VALUE) + ) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints: + + .. testoutput:: + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = f"{namespace}:class" if namespace else "class" + return with_attribute(**{classattr: classname}) + + +# Compatibility synonyms +# fmt: off +replaceWith = replaced_by_pep8("replaceWith", replace_with) +removeQuotes = replaced_by_pep8("removeQuotes", remove_quotes) +withAttribute = replaced_by_pep8("withAttribute", with_attribute) +withClass = replaced_by_pep8("withClass", with_class) +matchOnlyAtCol = replaced_by_pep8("matchOnlyAtCol", match_only_at_col) +# fmt: on diff --git a/py311/lib/python3.11/site-packages/pyparsing/common.py b/py311/lib/python3.11/site-packages/pyparsing/common.py new file mode 100644 index 0000000000000000000000000000000000000000..3edc63846e9d725c731ad7973cd8e3b3f62f84c2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyparsing/common.py @@ -0,0 +1,532 @@ +# common.py +from .core import * +from .helpers import DelimitedList, any_open_tag, any_close_tag +from datetime import datetime +import sys + +PY_310 = sys.version_info >= (3, 10) + + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """Here are some common low-level expressions that may be useful in + jump-starting parser development: + + - numeric forms (:class:`integers`, :class:`reals`, + :class:`scientific notation`) + - common :class:`programming identifiers` + - network addresses (:class:`MAC`, + :class:`IPv4`, :class:`IPv6`) + - ISO8601 :class:`dates` and + :class:`datetime` + - :class:`UUID` + - :class:`comma-separated list` + - :class:`url` + + Parse actions: + + - :class:`convert_to_integer` + - :class:`convert_to_float` + - :class:`convert_to_date` + - :class:`convert_to_datetime` + - :class:`strip_html_tags` + - :class:`upcase_tokens` + - :class:`downcase_tokens` + + Examples: + + .. testcode:: + + pyparsing_common.number.run_tests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + .. testcode:: + + pyparsing_common.fnumber.run_tests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + .. testcode:: + + pyparsing_common.hex_integer.run_tests(''' + # hex numbers + 100 + FF + ''') + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + # hex numbers + 100 + [256] + + FF + [255] + + .. testcode:: + + pyparsing_common.fraction.run_tests(''' + # fractions + 1/2 + -3/4 + ''') + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + .. testcode:: + + pyparsing_common.mixed_integer.run_tests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + .. testcode:: + + import uuid + pyparsing_common.uuid.set_parse_action(token_map(uuid.UUID)) + pyparsing_common.uuid.run_tests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + @staticmethod + def convert_to_integer(_, __, t): + """ + Parse action for converting parsed integers to Python int + """ + return [int(tt) for tt in t] + + @staticmethod + def convert_to_float(_, __, t): + """ + Parse action for converting parsed numbers to Python float + """ + return [float(tt) for tt in t] + + integer = ( + Word(nums) + .set_name("integer") + .set_parse_action( + convert_to_integer + if PY_310 + else lambda t: [int(tt) for tt in t] # type: ignore[misc] + ) + ) + """expression that parses an unsigned integer, converts to an int""" + + hex_integer = ( + Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) + ) + """expression that parses a hexadecimal integer, converts to an int""" + + signed_integer = ( + Regex(r"[+-]?\d+") + .set_name("signed integer") + .set_parse_action( + convert_to_integer + if PY_310 + else lambda t: [int(tt) for tt in t] # type: ignore[misc] + ) + ) + """expression that parses an integer with optional leading sign, converts to an int""" + + fraction = ( + signed_integer().set_parse_action( + convert_to_float + if PY_310 + else lambda t: [float(tt) for tt in t] # type: ignore[misc] + ) + + "/" + + signed_integer().set_parse_action( + convert_to_float + if PY_310 + else lambda t: [float(tt) for tt in t] # type: ignore[misc] + ) + ).set_name("fraction") + """fractional expression of an integer divided by an integer, converts to a float""" + fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) + + mixed_integer = ( + fraction | signed_integer + Opt(Opt("-").suppress() + fraction) + ).set_name("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, converts to a float""" + mixed_integer.add_parse_action(sum) + + real = ( + Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") + .set_name("real number") + .set_parse_action( + convert_to_float + if PY_310 + else lambda t: [float(tt) for tt in t] # type: ignore[misc] + ) + ) + """expression that parses a floating point number, converts to a float""" + + sci_real = ( + Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") + .set_name("real number with scientific notation") + .set_parse_action( + convert_to_float + if PY_310 + else lambda t: [float(tt) for tt in t] # type: ignore[misc] + ) + ) + """expression that parses a floating point number with optional + scientific notation, converts to a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).set_name("number").streamline() + """any numeric expression, converts to the corresponding Python type""" + + fnumber = ( + Regex(r"[+-]?\d+\.?\d*(?:[eE][+-]?\d+)?") + .set_name("fnumber") + .set_parse_action( + convert_to_float + if PY_310 + else lambda t: [float(tt) for tt in t] # type: ignore[misc] + ) + ) + """any int or real number, always converts to a float""" + + ieee_float = ( + Regex(r"(?i:[+-]?(?:(?:\d+\.?\d*(?:e[+-]?\d+)?)|nan|inf(?:inity)?))") + .set_name("ieee_float") + .set_parse_action( + convert_to_float + if PY_310 + else lambda t: [float(tt) for tt in t] # type: ignore[misc] + ) + ) + """any floating-point literal (int, real number, infinity, or NaN), converts to a float""" + + identifier = Word(identchars, identbodychars).set_name("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex( + r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" + ).set_name("IPv4 address") + "IPv4 address (``0.0.0.0 - 255.255.255.255``)" + + _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") + _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( + "full IPv6 address" + ) + _short_ipv6_address = ( + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + + "::" + + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + ).set_name("short IPv6 address") + _short_ipv6_address.add_condition( + lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 + ) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") + ipv6_address = Combine( + (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( + "IPv6 address" + ) + ).set_name("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex( + r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" + ).set_name("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convert_to_date(fmt: str = "%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) + + Example: + + .. testcode:: + + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.set_parse_action(pyparsing_common.convert_to_date()) + print(date_expr.parse_string("1999-12-31")) + + prints: + + .. testoutput:: + + [datetime.date(1999, 12, 31)] + """ + + def cvt_fn(ss, ll, tt): + try: + return datetime.strptime(tt[0], fmt).date() + except ValueError as ve: + raise ParseException(ss, ll, str(ve)) + + return cvt_fn + + @staticmethod + def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): + """Helper to create a parse action for converting parsed + datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) + + Example: + + .. testcode:: + + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.set_parse_action(pyparsing_common.convert_to_datetime()) + print(dt_expr.parse_string("1999-12-31T23:59:59.999")) + + prints: + + .. testoutput:: + + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + + def cvt_fn(s, l, t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + + return cvt_fn + + iso8601_date = Regex( + r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" + ).set_name("ISO8601 date") + "ISO8601 date (``yyyy-mm-dd``)" + + iso8601_datetime = Regex( + r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" + ).set_name("ISO8601 datetime") + "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" + + uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") + "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" + + _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() + + @staticmethod + def strip_html_tags(s: str, l: int, tokens: ParseResults): + """Parse action to remove HTML tags from web page HTML source + + Example: + + .. testcode:: + + # strip HTML links from normal text + text = 'More info at the pyparsing wiki page' + td, td_end = make_html_tags("TD") + table_text = td + SkipTo(td_end).set_parse_action( + pyparsing_common.strip_html_tags)("body") + td_end + print(table_text.parse_string(text).body) + + Prints: + + .. testoutput:: + + More info at the pyparsing wiki page + """ + return pyparsing_common._html_stripper.transform_string(tokens[0]) + + _commasepitem = ( + Combine( + OneOrMore( + ~Literal(",") + + ~LineEnd() + + Word(printables, exclude_chars=",") + + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) + ) + ) + .streamline() + .set_name("commaItem") + ) + comma_separated_list = DelimitedList( + Opt(quoted_string.copy() | _commasepitem, default="") + ).set_name("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + @staticmethod + def upcase_tokens(s, l, t): + """Parse action to convert tokens to upper case.""" + return [tt.upper() for tt in t] + + @staticmethod + def downcase_tokens(s, l, t): + """Parse action to convert tokens to lower case.""" + return [tt.lower() for tt in t] + + # fmt: off + url = Regex( + # https://mathiasbynens.be/demo/url-regex + # https://gist.github.com/dperini/729294 + r"(?P" + + # protocol identifier (optional) + # short syntax // still required + r"(?:(?:(?Phttps?|ftp):)?\/\/)" + + # user:pass BasicAuth (optional) + r"(?:(?P\S+(?::\S*)?)@)?" + + r"(?P" + + # IP address exclusion + # private & local networks + r"(?!(?:10|127)(?:\.\d{1,3}){3})" + + r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + + r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + + # IP address dotted notation octets + # excludes loopback network 0.0.0.0 + # excludes reserved space >= 224.0.0.0 + # excludes network & broadcast addresses + # (first & last IP address of each class) + r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + + r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + + r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + + r"|" + + # host & domain names, may end with dot + # can be replaced by a shortest alternative + # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ + r"(?:" + + r"(?:" + + r"[a-z0-9\u00a1-\uffff]" + + r"[a-z0-9\u00a1-\uffff_-]{0,62}" + + r")?" + + r"[a-z0-9\u00a1-\uffff]\." + + r")+" + + # TLD identifier name, may end with dot + r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + + r")" + + # port number (optional) + r"(:(?P\d{2,5}))?" + + # resource path (optional) + r"(?P\/[^?# ]*)?" + + # query string (optional) + r"(\?(?P[^#]*))?" + + # fragment (optional) + r"(#(?P\S*))?" + + r")" + ).set_name("url") + """ + URL (http/https/ftp scheme) + + .. versionchanged:: 3.1.0 + ``url`` named group added + """ + # fmt: on + + # pre-PEP8 compatibility names + # fmt: off + convertToInteger = staticmethod(replaced_by_pep8("convertToInteger", convert_to_integer)) + convertToFloat = staticmethod(replaced_by_pep8("convertToFloat", convert_to_float)) + convertToDate = staticmethod(replaced_by_pep8("convertToDate", convert_to_date)) + convertToDatetime = staticmethod(replaced_by_pep8("convertToDatetime", convert_to_datetime)) + stripHTMLTags = staticmethod(replaced_by_pep8("stripHTMLTags", strip_html_tags)) + upcaseTokens = staticmethod(replaced_by_pep8("upcaseTokens", upcase_tokens)) + downcaseTokens = staticmethod(replaced_by_pep8("downcaseTokens", downcase_tokens)) + # fmt: on + + +_builtin_exprs = [ + v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) +] diff --git a/py311/lib/python3.11/site-packages/pyparsing/core.py b/py311/lib/python3.11/site-packages/pyparsing/core.py new file mode 100644 index 0000000000000000000000000000000000000000..c125bbabf62ac29531db7503e808c74f0c22353b --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyparsing/core.py @@ -0,0 +1,6953 @@ +# +# core.py +# +from __future__ import annotations + +import collections.abc +from collections import deque +import os +import typing +from typing import ( + Any, + Callable, + Generator, + NamedTuple, + Sequence, + TextIO, + Union, + cast, +) +from abc import ABC, abstractmethod +from enum import Enum +import string +import copy +import warnings +import re +import sys +from collections.abc import Iterable +import traceback +import types +from operator import itemgetter +from functools import wraps +from threading import RLock +from pathlib import Path + +from .util import ( + _FifoCache, + _UnboundedCache, + __config_flags, + _collapse_string_to_ranges, + _escape_regex_range_chars, + _flatten, + LRUMemo as _LRUMemo, + UnboundedMemo as _UnboundedMemo, + deprecate_argument, + replaced_by_pep8, +) +from .exceptions import * +from .actions import * +from .results import ParseResults, _ParseResultsWithOffset +from .unicode import pyparsing_unicode + +_MAX_INT = sys.maxsize +str_type: tuple[type, ...] = (str, bytes) + +# +# Copyright (c) 2003-2022 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +from functools import cached_property + + +class __compat__(__config_flags): + """ + A cross-version compatibility configuration for pyparsing features that will be + released in a future version. By setting values in this configuration to True, + those features can be enabled in prior versions for compatibility development + and testing. + + - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping + of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; + maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 + behavior + """ + + _type_desc = "compatibility" + + collect_all_And_tokens = True + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _fixed_names = """ + collect_all_And_tokens + """.split() + + +class __diag__(__config_flags): + _type_desc = "diagnostic" + + warn_multiple_tokens_in_named_alternation = False + warn_ungrouped_named_tokens_in_collection = False + warn_name_set_on_empty_Forward = False + warn_on_parse_using_empty_Forward = False + warn_on_assignment_to_Forward = False + warn_on_multiple_string_args_to_oneof = False + warn_on_match_first_with_lshift_operator = False + enable_debug_on_named_expressions = False + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _warning_names = [name for name in _all_names if name.startswith("warn")] + _debug_names = [name for name in _all_names if name.startswith("enable_debug")] + + @classmethod + def enable_all_warnings(cls) -> None: + for name in cls._warning_names: + cls.enable(name) + + +class Diagnostics(Enum): + """ + Diagnostic configuration (all default to disabled) + + - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results + name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions + - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results + name is defined on a containing expression with ungrouped subexpressions that also + have results names + - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined + with a results name, but has no contents defined + - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is + defined in a grammar but has never had an expression attached to it + - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined + but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` + - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is + incorrectly called with multiple str arguments + - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent + calls to :class:`ParserElement.set_name` + + Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. + All warnings can be enabled by calling :class:`enable_all_warnings`. + """ + + warn_multiple_tokens_in_named_alternation = 0 + warn_ungrouped_named_tokens_in_collection = 1 + warn_name_set_on_empty_Forward = 2 + warn_on_parse_using_empty_Forward = 3 + warn_on_assignment_to_Forward = 4 + warn_on_multiple_string_args_to_oneof = 5 + warn_on_match_first_with_lshift_operator = 6 + enable_debug_on_named_expressions = 7 + + +def enable_diag(diag_enum: Diagnostics) -> None: + """ + Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.enable(diag_enum.name) + + +def disable_diag(diag_enum: Diagnostics) -> None: + """ + Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.disable(diag_enum.name) + + +def enable_all_warnings() -> None: + """ + Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). + """ + __diag__.enable_all_warnings() + + +# hide abstract class +del __config_flags + + +def _should_enable_warnings( + cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str] +) -> bool: + enable = bool(warn_env_var) + for warn_opt in cmd_line_warn_options: + w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( + ":" + )[:5] + if not w_action.lower().startswith("i") and ( + not (w_message or w_category or w_module) or w_module == "pyparsing" + ): + enable = True + elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): + enable = False + return enable + + +if _should_enable_warnings( + sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") +): + enable_all_warnings() + + +# build list of single arg builtins, that can be used as parse actions +# fmt: off +_single_arg_builtins = { + sum, len, sorted, reversed, list, tuple, set, any, all, min, max +} +# fmt: on + +_generatorType = types.GeneratorType +ParseImplReturnType = tuple[int, Any] +PostParseReturnType = Union[ParseResults, Sequence[ParseResults]] + +ParseCondition = Union[ + Callable[[], bool], + Callable[[ParseResults], bool], + Callable[[int, ParseResults], bool], + Callable[[str, int, ParseResults], bool], +] +ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] +DebugStartAction = Callable[[str, int, "ParserElement", bool], None] +DebugSuccessAction = Callable[ + [str, int, int, "ParserElement", ParseResults, bool], None +] +DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] + + +alphas: str = string.ascii_uppercase + string.ascii_lowercase +identchars: str = pyparsing_unicode.Latin1.identchars +identbodychars: str = pyparsing_unicode.Latin1.identbodychars +nums: str = "0123456789" +hexnums: str = nums + "ABCDEFabcdef" +alphanums: str = alphas + nums +printables: str = "".join([c for c in string.printable if c not in string.whitespace]) + + +class _ParseActionIndexError(Exception): + """ + Internal wrapper around IndexError so that IndexErrors raised inside + parse actions aren't misinterpreted as IndexErrors raised inside + ParserElement parseImpl methods. + """ + + def __init__(self, msg: str, exc: BaseException) -> None: + self.msg: str = msg + self.exc: BaseException = exc + + +_trim_arity_call_line: traceback.StackSummary = None # type: ignore[assignment] +pa_call_line_synth = () + + +def _trim_arity(func, max_limit=3): + """decorator to trim function calls to match the arity of the target""" + global _trim_arity_call_line, pa_call_line_synth + + if func in _single_arg_builtins: + return lambda s, l, t: func(t) + + limit = 0 + found_arity = False + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + # fmt: off + LINE_DIFF = 9 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + _trim_arity_call_line = _trim_arity_call_line or traceback.extract_stack(limit=2)[-1] + pa_call_line_synth = pa_call_line_synth or (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF) + + def wrapper(*args): + nonlocal found_arity, limit + if found_arity: + return func(*args[limit:]) + while 1: + try: + ret = func(*args[limit:]) + found_arity = True + return ret + except TypeError as te: + # re-raise TypeErrors if they did not come from our arity testing + if found_arity: + raise + else: + tb = te.__traceback__ + frames = traceback.extract_tb(tb, limit=2) + frame_summary = frames[-1] + trim_arity_type_error = ( + [frame_summary[:2]][-1][:2] == pa_call_line_synth + ) + del tb + + if trim_arity_type_error: + if limit < max_limit: + limit += 1 + continue + + raise + except IndexError as ie: + # wrap IndexErrors inside a _ParseActionIndexError + raise _ParseActionIndexError( + "IndexError raised in parse action", ie + ).with_traceback(None) + # fmt: on + + # copy func name to wrapper for sensible debug output + # (can't use functools.wraps, since that messes with function signature) + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + wrapper.__name__ = func_name + wrapper.__doc__ = func.__doc__ + + return wrapper + + +def condition_as_parse_action( + fn: ParseCondition, message: typing.Optional[str] = None, fatal: bool = False +) -> ParseAction: + """ + Function to convert a simple predicate function that returns ``True`` or ``False`` + into a parse action. Can be used in places when a parse action is required + and :meth:`ParserElement.add_condition` cannot be used (such as when adding a condition + to an operator level in :class:`infix_notation`). + + Optional keyword arguments: + + :param message: define a custom message to be used in the raised exception + :param fatal: if ``True``, will raise :class:`ParseFatalException` + to stop parsing immediately; + otherwise will raise :class:`ParseException` + + """ + msg = message if message is not None else "failed user-defined condition" + exc_type = ParseFatalException if fatal else ParseException + fn = _trim_arity(fn) + + @wraps(fn) + def pa(s, l, t): + if not bool(fn(s, l, t)): + raise exc_type(s, l, msg) + + return pa + + +def _default_start_debug_action( + instring: str, loc: int, expr: ParserElement, cache_hit: bool = False +): + cache_hit_str = "*" if cache_hit else "" + print( + ( + f"{cache_hit_str}Match {expr} at loc {loc}({lineno(loc, instring)},{col(loc, instring)})\n" + f" {line(loc, instring)}\n" + f" {'^':>{col(loc, instring)}}" + ) + ) + + +def _default_success_debug_action( + instring: str, + startloc: int, + endloc: int, + expr: ParserElement, + toks: ParseResults, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print(f"{cache_hit_str}Matched {expr} -> {toks.as_list()}") + + +def _default_exception_debug_action( + instring: str, + loc: int, + expr: ParserElement, + exc: Exception, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print(f"{cache_hit_str}Match {expr} failed, {type(exc).__name__} raised: {exc}") + + +def null_debug_action(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + + +class ParserElement(ABC): + """Abstract base level parser element class.""" + + DEFAULT_WHITE_CHARS: str = " \n\t\r" + verbose_stacktrace: bool = False + _literalStringClass: type = None # type: ignore[assignment] + + @staticmethod + def set_default_whitespace_chars(chars: str) -> None: + r""" + Overrides the default whitespace chars + + Example: + + .. doctest:: + + # default whitespace chars are space, and newline + >>> Word(alphas)[1, ...].parse_string("abc def\nghi jkl") + ParseResults(['abc', 'def', 'ghi', 'jkl'], {}) + + # change to just treat newline as significant + >>> ParserElement.set_default_whitespace_chars(" \t") + >>> Word(alphas)[1, ...].parse_string("abc def\nghi jkl") + ParseResults(['abc', 'def'], {}) + + # Reset to default + >>> ParserElement.set_default_whitespace_chars(" \n\t\r") + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + + # update whitespace all parse expressions defined in this module + for expr in _builtin_exprs: + if expr.copyDefaultWhiteChars: + expr.whiteChars = set(chars) + + @staticmethod + def inline_literals_using(cls: type) -> None: + """ + Set class to be used for inclusion of string literals into a parser. + + Example: + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + # default literal class used is Literal + >>> integer = Word(nums) + >>> date_str = ( + ... integer("year") + '/' + ... + integer("month") + '/' + ... + integer("day") + ... ) + + >>> date_str.parse_string("1999/12/31") + ParseResults(['1999', '/', '12', '/', '31'], + {'year': '1999', 'month': '12', 'day': '31'}) + + # change to Suppress + >>> ParserElement.inline_literals_using(Suppress) + >>> date_str = ( + ... integer("year") + '/' + ... + integer("month") + '/' + ... + integer("day") + ... ) + + >>> date_str.parse_string("1999/12/31") + ParseResults(['1999', '12', '31'], + {'year': '1999', 'month': '12', 'day': '31'}) + + # Reset + >>> ParserElement.inline_literals_using(Literal) + """ + ParserElement._literalStringClass = cls + + @classmethod + def using_each(cls, seq, **class_kwargs): + """ + Yields a sequence of ``class(obj, **class_kwargs)`` for obj in seq. + + Example: + + .. testcode:: + + LPAR, RPAR, LBRACE, RBRACE, SEMI = Suppress.using_each("(){};") + + .. versionadded:: 3.1.0 + """ + yield from (cls(obj, **class_kwargs) for obj in seq) + + class DebugActions(NamedTuple): + debug_try: typing.Optional[DebugStartAction] + debug_match: typing.Optional[DebugSuccessAction] + debug_fail: typing.Optional[DebugExceptionAction] + + def __init__(self, savelist: bool = False) -> None: + self.parseAction: list[ParseAction] = list() + self.failAction: typing.Optional[ParseFailAction] = None + self.customName: str = None # type: ignore[assignment] + self._defaultName: typing.Optional[str] = None + self.resultsName: str = None # type: ignore[assignment] + self.saveAsList: bool = savelist + self.skipWhitespace: bool = True + self.whiteChars: set[str] = set(ParserElement.DEFAULT_WHITE_CHARS) + self.copyDefaultWhiteChars: bool = True + # used when checking for left-recursion + self._may_return_empty: bool = False + self.keepTabs: bool = False + self.ignoreExprs: list[ParserElement] = list() + self.debug: bool = False + self.streamlined: bool = False + # optimize exception handling for subclasses that don't advance parse index + self.mayIndexError: bool = True + self.errmsg: Union[str, None] = "" + # mark results names as modal (report only last) or cumulative (list all) + self.modalResults: bool = True + # custom debug actions + self.debugActions = self.DebugActions(None, None, None) + # avoid redundant calls to preParse + self.callPreparse: bool = True + self.callDuringTry: bool = False + self.suppress_warnings_: list[Diagnostics] = [] + self.show_in_diagram: bool = True + + @property + def mayReturnEmpty(self) -> bool: + """ + .. deprecated:: 3.3.0 + use _may_return_empty instead. + """ + return self._may_return_empty + + @mayReturnEmpty.setter + def mayReturnEmpty(self, value) -> None: + """ + .. deprecated:: 3.3.0 + use _may_return_empty instead. + """ + self._may_return_empty = value + + def suppress_warning(self, warning_type: Diagnostics) -> ParserElement: + """ + Suppress warnings emitted for a particular diagnostic on this expression. + + Example: + + .. doctest:: + + >>> label = pp.Word(pp.alphas) + + # Normally using an empty Forward in a grammar + # would print a warning, but we can suppress that + >>> base = pp.Forward().suppress_warning( + ... pp.Diagnostics.warn_on_parse_using_empty_Forward) + + >>> grammar = base | label + >>> print(grammar.parse_string("x")) + ['x'] + """ + self.suppress_warnings_.append(warning_type) + return self + + def visit_all(self): + """General-purpose method to yield all expressions and sub-expressions + in a grammar. Typically just for internal use. + """ + to_visit = deque([self]) + seen = set() + while to_visit: + cur = to_visit.popleft() + + # guard against looping forever through recursive grammars + if cur in seen: + continue + seen.add(cur) + + to_visit.extend(cur.recurse()) + yield cur + + def copy(self) -> ParserElement: + """ + Make a copy of this :class:`ParserElement`. Useful for defining + different parse actions for the same parsing pattern, using copies of + the original parse element. + + Example: + + .. testcode:: + + integer = Word(nums).set_parse_action( + lambda toks: int(toks[0])) + integerK = integer.copy().add_parse_action( + lambda toks: toks[0] * 1024) + Suppress("K") + integerM = integer.copy().add_parse_action( + lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + + print( + (integerK | integerM | integer)[1, ...].parse_string( + "5K 100 640K 256M") + ) + + prints: + + .. testoutput:: + + [5120, 100, 655360, 268435456] + + Equivalent form of ``expr.copy()`` is just ``expr()``: + + .. testcode:: + + integerM = integer().add_parse_action( + lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + """ + cpy = copy.copy(self) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + return cpy + + def set_results_name( + self, name: str, list_all_matches: bool = False, **kwargs + ) -> ParserElement: + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + + Normally, results names are assigned as you would assign keys in a dict: + any existing value is overwritten by later values. If it is necessary to + keep all values captured for a particular results name, call ``set_results_name`` + with ``list_all_matches`` = True. + + NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + ``expr("name")`` in place of ``expr.set_results_name("name")`` + - see :meth:`__call__`. If ``list_all_matches`` is required, use + ``expr("name*")``. + + Example: + + .. testcode:: + + integer = Word(nums) + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + """ + listAllMatches: bool = deprecate_argument(kwargs, "listAllMatches", False) + + list_all_matches = listAllMatches or list_all_matches + return self._setResultsName(name, list_all_matches) + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + if name is None: + return self + newself = self.copy() + if name.endswith("*"): + name = name[:-1] + list_all_matches = True + newself.resultsName = name + newself.modalResults = not list_all_matches + return newself + + def set_break(self, break_flag: bool = True) -> ParserElement: + """ + Method to invoke the Python pdb debugger when this element is + about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to + disable. + """ + if break_flag: + _parseMethod = self._parse + + def breaker(instring, loc, do_actions=True, callPreParse=True): + # this call to breakpoint() is intentional, not a checkin error + breakpoint() + return _parseMethod(instring, loc, do_actions, callPreParse) + + breaker._originalParseMethod = _parseMethod # type: ignore [attr-defined] + self._parse = breaker # type: ignore [method-assign] + elif hasattr(self._parse, "_originalParseMethod"): + self._parse = self._parse._originalParseMethod # type: ignore [method-assign] + return self + + def set_parse_action( + self, *fns: ParseAction, call_during_try: bool = False, **kwargs: Any + ) -> ParserElement: + """ + Define one or more actions to perform when successfully matching parse element definition. + + Parse actions can be called to perform data conversions, do extra validation, + update external data structures, or enhance or replace the parsed tokens. + Each parse action ``fn`` is a callable method with 0-3 arguments, called as + ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + + - ``s`` = the original string being parsed (see note below) + - ``loc`` = the location of the matching substring + - ``toks`` = a list of the matched tokens, packaged as a :class:`ParseResults` object + + The parsed tokens are passed to the parse action as ParseResults. They can be + modified in place using list-style append, extend, and pop operations to update + the parsed list elements; and with dictionary-style item set and del operations + to add, update, or remove any named results. If the tokens are modified in place, + it is not necessary to return them with a return statement. + + Parse actions can also completely replace the given tokens, with another ``ParseResults`` + object, or with some entirely different object (common for parse actions that perform data + conversions). A convenient way to build a new parse result is to define the values + using a dict, and then create the return value using :class:`ParseResults.from_dict`. + + If None is passed as the ``fn`` parse action, all previously added parse actions for this + expression are cleared. + + Optional keyword arguments: + + :param call_during_try: (default= ``False``) indicate if parse action + should be run during lookaheads and alternate + testing. For parse actions that have side + effects, it is important to only call the parse + action once it is determined that it is being + called as part of a successful parse. + For parse actions that perform additional + validation, then ``call_during_try`` should + be passed as True, so that the validation code + is included in the preliminary "try" parses. + + .. Note:: + The default parsing behavior is to expand tabs in the input string + before starting the parsing process. + See :meth:`parse_string` for more information on parsing strings + containing ```` s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and + line and column positions within the parsed string. + + Example: Parse dates in the form ``YYYY/MM/DD`` + ----------------------------------------------- + + Setup code: + + .. testcode:: + + def convert_to_int(toks): + '''a parse action to convert toks from str to int + at parse time''' + return int(toks[0]) + + def is_valid_date(instring, loc, toks): + '''a parse action to verify that the date is a valid date''' + from datetime import date + year, month, day = toks[::2] + try: + date(year, month, day) + except ValueError: + raise ParseException(instring, loc, "invalid date given") + + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + # add parse actions + integer.set_parse_action(convert_to_int) + date_str.set_parse_action(is_valid_date) + + Successful parse - note that integer fields are converted to ints: + + .. testcode:: + + print(date_str.parse_string("1999/12/31")) + + prints: + + .. testoutput:: + + [1999, '/', 12, '/', 31] + + Failure - invalid date: + + .. testcode:: + + date_str.parse_string("1999/13/31") + + prints: + + .. testoutput:: + + Traceback (most recent call last): + ParseException: invalid date given, found '1999' ... + """ + callDuringTry: bool = deprecate_argument(kwargs, "callDuringTry", False) + + if list(fns) == [None]: + self.parseAction.clear() + return self + + if not all(callable(fn) for fn in fns): + raise TypeError("parse actions must be callable") + self.parseAction[:] = [_trim_arity(fn) for fn in fns] + self.callDuringTry = self.callDuringTry or call_during_try or callDuringTry + + return self + + def add_parse_action( + self, *fns: ParseAction, call_during_try: bool = False, **kwargs: Any + ) -> ParserElement: + """ + Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. + + See examples in :class:`copy`. + """ + callDuringTry: bool = deprecate_argument(kwargs, "callDuringTry", False) + + self.parseAction += [_trim_arity(fn) for fn in fns] + self.callDuringTry = self.callDuringTry or callDuringTry or call_during_try + return self + + def add_condition( + self, *fns: ParseCondition, call_during_try: bool = False, **kwargs: Any + ) -> ParserElement: + """Add a boolean predicate function to expression's list of parse actions. See + :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, + functions passed to ``add_condition`` need to return boolean success/fail of the condition. + + Optional keyword arguments: + + - ``message`` = define a custom message to be used in the raised exception + - ``fatal`` = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise + ParseException + - ``call_during_try`` = boolean to indicate if this method should be called during internal tryParse calls, + default=False + + Example: + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + >>> year_int = integer.copy().add_condition( + ... lambda toks: toks[0] >= 2000, + ... message="Only support years 2000 and later") + >>> date_str = year_int + '/' + integer + '/' + integer + + >>> result = date_str.parse_string("1999/12/31") + Traceback (most recent call last): + ParseException: Only support years 2000 and later... + """ + callDuringTry: bool = deprecate_argument(kwargs, "callDuringTry", False) + + for fn in fns: + self.parseAction.append( + condition_as_parse_action( + fn, + message=str(kwargs.get("message")), + fatal=bool(kwargs.get("fatal", False)), + ) + ) + + self.callDuringTry = self.callDuringTry or call_during_try or callDuringTry + return self + + def set_fail_action(self, fn: ParseFailAction) -> ParserElement: + """ + Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + ``fn(s, loc, expr, err)`` where: + + - ``s`` = string being parsed + - ``loc`` = location where expression match was attempted and failed + - ``expr`` = the parse expression that failed + - ``err`` = the exception thrown + + The function returns no value. It may throw :class:`ParseFatalException` + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def _skipIgnorables(self, instring: str, loc: int) -> int: + if not self.ignoreExprs: + return loc + exprsFound = True + ignore_expr_fns = [e._parse for e in self.ignoreExprs] + last_loc = loc + while exprsFound: + exprsFound = False + for ignore_fn in ignore_expr_fns: + try: + while 1: + loc, dummy = ignore_fn(instring, loc) + exprsFound = True + except ParseException: + pass + # check if all ignore exprs matched but didn't actually advance the parse location + if loc == last_loc: + break + last_loc = loc + return loc + + def preParse(self, instring: str, loc: int) -> int: + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + + if self.skipWhitespace: + instrlen = len(instring) + white_chars = self.whiteChars + while loc < instrlen and instring[loc] in white_chars: + loc += 1 + + return loc + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + return loc, [] + + def postParse(self, instring, loc, tokenlist): + return tokenlist + + # @profile + def _parseNoCache( + self, instring, loc, do_actions=True, callPreParse=True + ) -> tuple[int, ParseResults]: + debugging = self.debug # and do_actions) + len_instring = len(instring) + + if debugging or self.failAction: + # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) + try: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.debugActions.debug_try: + self.debugActions.debug_try(instring, tokens_start, self, False) + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, do_actions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, do_actions) + except Exception as err: + # print("Exception raised:", err) + if self.debugActions.debug_fail: + self.debugActions.debug_fail( + instring, tokens_start, self, err, False + ) + if self.failAction: + self.failAction(instring, tokens_start, self, err) + raise + else: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, do_actions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, do_actions) + + tokens = self.postParse(instring, loc, tokens) + + ret_tokens = ParseResults( + tokens, self.resultsName, aslist=self.saveAsList, modal=self.modalResults + ) + if self.parseAction and (do_actions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) # type: ignore [call-arg, arg-type] + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + aslist=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + except Exception as err: + # print "Exception raised in user parse action:", err + if self.debugActions.debug_fail: + self.debugActions.debug_fail( + instring, tokens_start, self, err, False + ) + raise + else: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) # type: ignore [call-arg, arg-type] + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + aslist=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + if debugging: + # print("Matched", self, "->", ret_tokens.as_list()) + if self.debugActions.debug_match: + self.debugActions.debug_match( + instring, tokens_start, loc, self, ret_tokens, False + ) + + return loc, ret_tokens + + def try_parse( + self, + instring: str, + loc: int, + *, + raise_fatal: bool = False, + do_actions: bool = False, + ) -> int: + try: + return self._parse(instring, loc, do_actions=do_actions)[0] + except ParseFatalException: + if raise_fatal: + raise + raise ParseException(instring, loc, self.errmsg, self) + + def can_parse_next(self, instring: str, loc: int, do_actions: bool = False) -> bool: + try: + self.try_parse(instring, loc, do_actions=do_actions) + except (ParseException, IndexError): + return False + else: + return True + + # cache for left-recursion in Forward references + recursion_lock = RLock() + recursion_memos: collections.abc.MutableMapping[ + tuple[int, Forward, bool], tuple[int, Union[ParseResults, Exception]] + ] = {} + + class _CacheType(typing.Protocol): + """ + Class to be used for packrat and left-recursion cacheing of results + and exceptions. + """ + + not_in_cache: bool + + def get(self, *args) -> typing.Any: ... + + def set(self, *args) -> None: ... + + def clear(self) -> None: ... + + class NullCache(dict): + """ + A null cache type for initialization of the packrat_cache class variable. + If/when enable_packrat() is called, this null cache will be replaced by a + proper _CacheType class instance. + """ + + not_in_cache: bool = True + + def get(self, *args) -> typing.Any: ... + + def set(self, *args) -> None: ... + + def clear(self) -> None: ... + + # class-level argument cache for optimizing repeated calls when backtracking + # through recursive expressions + packrat_cache: _CacheType = NullCache() + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache( + self, instring, loc, do_actions=True, callPreParse=True + ) -> tuple[int, ParseResults]: + HIT, MISS = 0, 1 + lookup = (self, instring, loc, callPreParse, do_actions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, do_actions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy(), loc)) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if self.debug and self.debugActions.debug_try: + try: + self.debugActions.debug_try(instring, loc, self, cache_hit=True) # type: ignore [call-arg] + except TypeError: + pass + if isinstance(value, Exception): + if self.debug and self.debugActions.debug_fail: + try: + self.debugActions.debug_fail( + instring, loc, self, value, cache_hit=True # type: ignore [call-arg] + ) + except TypeError: + pass + raise value + + value = cast(tuple[int, ParseResults, int], value) + loc_, result, endloc = value[0], value[1].copy(), value[2] + if self.debug and self.debugActions.debug_match: + try: + self.debugActions.debug_match( + instring, loc_, endloc, self, result, cache_hit=True # type: ignore [call-arg] + ) + except TypeError: + pass + + return loc_, result + + _parse = _parseNoCache + + @staticmethod + def reset_cache() -> None: + """ + Clears caches used by packrat and left-recursion. + """ + with ParserElement.packrat_cache_lock: + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len( + ParserElement.packrat_cache_stats + ) + ParserElement.recursion_memos.clear() + + # class attributes to keep caching status + _packratEnabled = False + _left_recursion_enabled = False + + @staticmethod + def disable_memoization() -> None: + """ + Disables active Packrat or Left Recursion parsing and their memoization + + This method also works if neither Packrat nor Left Recursion are enabled. + This makes it safe to call before activating Packrat nor Left Recursion + to clear any previous settings. + """ + with ParserElement.packrat_cache_lock: + ParserElement.reset_cache() + ParserElement._left_recursion_enabled = False + ParserElement._packratEnabled = False + ParserElement._parse = ParserElement._parseNoCache + + @staticmethod + def enable_left_recursion( + cache_size_limit: typing.Optional[int] = None, *, force=False + ) -> None: + """ + Enables "bounded recursion" parsing, which allows for both direct and indirect + left-recursion. During parsing, left-recursive :class:`Forward` elements are + repeatedly matched with a fixed recursion depth that is gradually increased + until finding the longest match. + + Example: + + .. testcode:: + + import pyparsing as pp + pp.ParserElement.enable_left_recursion() + + E = pp.Forward("E") + num = pp.Word(pp.nums) + + # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... + E <<= E + '+' - num | num + + print(E.parse_string("1+2+3+4")) + + prints: + + .. testoutput:: + + ['1', '+', '2', '+', '3', '+', '4'] + + Recursion search naturally memoizes matches of ``Forward`` elements and may + thus skip reevaluation of parse actions during backtracking. This may break + programs with parse actions which rely on strict ordering of side-effects. + + Parameters: + + - ``cache_size_limit`` - (default=``None``) - memoize at most this many + ``Forward`` elements during matching; if ``None`` (the default), + memoize all ``Forward`` elements. + + Bounded Recursion parsing works similar but not identical to Packrat parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + with ParserElement.packrat_cache_lock: + if force: + ParserElement.disable_memoization() + elif ParserElement._packratEnabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + if cache_size_limit is None: + ParserElement.recursion_memos = _UnboundedMemo() + elif cache_size_limit > 0: + ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) # type: ignore[assignment] + else: + raise NotImplementedError(f"Memo size of {cache_size_limit}") + ParserElement._left_recursion_enabled = True + + @staticmethod + def enable_packrat( + cache_size_limit: Union[int, None] = 128, *, force: bool = False + ) -> None: + """ + Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + Parameters: + + - ``cache_size_limit`` - (default= ``128``) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method :class:`ParserElement.enable_packrat`. + For best results, call ``enable_packrat()`` immediately after + importing pyparsing. + + .. Can't really be doctested, alas + + Example:: + + import pyparsing + pyparsing.ParserElement.enable_packrat() + + Packrat parsing works similar but not identical to Bounded Recursion parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + with ParserElement.packrat_cache_lock: + if force: + ParserElement.disable_memoization() + elif ParserElement._left_recursion_enabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + + if ParserElement._packratEnabled: + return + + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = _UnboundedCache() + else: + ParserElement.packrat_cache = _FifoCache(cache_size_limit) + ParserElement._parse = ParserElement._parseCache + + def parse_string( + self, instring: str, parse_all: bool = False, **kwargs + ) -> ParseResults: + """ + Parse a string with respect to the parser definition. This function is intended as the primary interface to the + client code. + + :param instring: The input string to be parsed. + :param parse_all: If set, the entire input string must match the grammar. + :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. + :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. + :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or + an object with attributes if the given parser includes results names. + + If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This + is also equivalent to ending the grammar with :class:`StringEnd`\\ (). + + To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are + converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string + contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string + being parsed, one can ensure a consistent view of the input string by doing one of the following: + + - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), + - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the + parse action's ``s`` argument, or + - explicitly expand the tabs in your input string before calling ``parse_string``. + + Examples: + + By default, partial matches are OK. + + .. doctest:: + + >>> res = Word('a').parse_string('aaaaabaaa') + >>> print(res) + ['aaaaa'] + + The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children + directly to see more examples. + + It raises an exception if parse_all flag is set and instring does not match the whole grammar. + + .. doctest:: + + >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) + Traceback (most recent call last): + ParseException: Expected end of text, found 'b' ... + """ + parseAll: bool = deprecate_argument(kwargs, "parseAll", False) + + parse_all = parse_all or parseAll + + ParserElement.reset_cache() + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + if not self.keepTabs: + instring = instring.expandtabs() + try: + loc, tokens = self._parse(instring, 0) + if parse_all: + loc = self.preParse(instring, loc) + se = Empty() + StringEnd().set_debug(False) + se._parse(instring, loc) + except _ParseActionIndexError as pa_exc: + raise pa_exc.exc + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + raise exc.with_traceback(None) + else: + return tokens + + def scan_string( + self, + instring: str, + max_matches: int = _MAX_INT, + overlap: bool = False, + always_skip_whitespace=True, + *, + debug: bool = False, + **kwargs, + ) -> Generator[tuple[ParseResults, int, int], None, None]: + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + ``max_matches`` argument, to clip scanning after 'n' matches are found. If + ``overlap`` is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See :class:`parse_string` for more information on parsing + strings with embedded tabs. + + Example: + + .. testcode:: + + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens, start, end in Word(alphas).scan_string(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints: + + .. testoutput:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ + maxMatches: int = deprecate_argument(kwargs, "maxMatches", _MAX_INT) + + max_matches = min(maxMatches, max_matches) + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = str(instring).expandtabs() + instrlen = len(instring) + loc = 0 + if always_skip_whitespace: + preparser = Empty() + preparser.ignoreExprs = self.ignoreExprs + preparser.whiteChars = self.whiteChars + preparseFn = preparser.preParse + else: + preparseFn = self.preParse + parseFn = self._parse + ParserElement.reset_cache() + matches = 0 + try: + while loc <= instrlen and matches < max_matches: + try: + preloc: int = preparseFn(instring, loc) + nextLoc: int + tokens: ParseResults + nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) + except ParseException: + loc = preloc + 1 + else: + if nextLoc > loc: + matches += 1 + if debug: + print( + { + "tokens": tokens.as_list(), + "start": preloc, + "end": nextLoc, + } + ) + yield tokens, preloc, nextLoc + if overlap: + nextloc = preparseFn(instring, loc) + if nextloc > loc: + loc = nextLoc + else: + loc += 1 + else: + loc = nextLoc + else: + loc = preloc + 1 + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def transform_string(self, instring: str, *, debug: bool = False) -> str: + """ + Extension to :class:`scan_string`, to modify matching text with modified tokens that may + be returned from a parse action. To use ``transform_string``, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking ``transform_string()`` on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. ``transform_string()`` returns the resulting transformed string. + + Example: + + .. testcode:: + + quote = '''now is the winter of our discontent, + made glorious summer by this sun of york.''' + + wd = Word(alphas) + wd.set_parse_action(lambda toks: toks[0].title()) + + print(wd.transform_string(quote)) + + prints: + + .. testoutput:: + + Now Is The Winter Of Our Discontent, + Made Glorious Summer By This Sun Of York. + """ + out: list[str] = [] + lastE = 0 + # force preservation of s, to minimize unwanted transformation of string, and to + # keep string locs straight between transform_string and scan_string + self.keepTabs = True + try: + for t, s, e in self.scan_string(instring, debug=debug): + if s > lastE: + out.append(instring[lastE:s]) + lastE = e + + if not t: + continue + + if isinstance(t, ParseResults): + out += t.as_list() + elif isinstance(t, Iterable) and not isinstance(t, str_type): + out.extend(t) + else: + out.append(t) + + out.append(instring[lastE:]) + out = [o for o in out if o] + return "".join([str(s) for s in _flatten(out)]) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def search_string( + self, + instring: str, + max_matches: int = _MAX_INT, + *, + debug: bool = False, + **kwargs, + ) -> ParseResults: + """ + Another extension to :class:`scan_string`, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + ``max_matches`` argument, to clip searching after 'n' matches are found. + + Example: + + .. testcode:: + + quote = '''More than Iron, more than Lead, + more than Gold I need Electricity''' + + # a capitalized word starts with an uppercase letter, + # followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.search_string(quote)) + + # the sum() builtin can be used to merge results + # into a single ParseResults object + print(sum(cap_word.search_string(quote))) + + prints: + + .. testoutput:: + + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] + """ + maxMatches: int = deprecate_argument(kwargs, "maxMatches", _MAX_INT) + + max_matches = min(maxMatches, max_matches) + try: + return ParseResults( + [ + t + for t, s, e in self.scan_string( + instring, + max_matches=max_matches, + always_skip_whitespace=False, + debug=debug, + ) + ] + ) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def split( + self, + instring: str, + maxsplit: int = _MAX_INT, + include_separators: bool = False, + **kwargs, + ) -> Generator[str, None, None]: + """ + Generator method to split a string using the given expression as a separator. + May be called with optional ``maxsplit`` argument, to limit the number of splits; + and the optional ``include_separators`` argument (default= ``False``), if the separating + matching text should be included in the split results. + + Example: + + .. testcode:: + + punc = one_of(list(".,;:/-!?")) + print(list(punc.split( + "This, this?, this sentence, is badly punctuated!"))) + + prints: + + .. testoutput:: + + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + includeSeparators: bool = deprecate_argument(kwargs, "includeSeparators", False) + + include_separators = includeSeparators or include_separators + last = 0 + for t, s, e in self.scan_string(instring, max_matches=maxsplit): + yield instring[last:s] + if include_separators: + yield t[0] + last = e + yield instring[last:] + + def __add__(self, other) -> ParserElement: + """ + Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` + converts them to :class:`Literal`\\ s by default. + + Example: + + .. testcode:: + + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + + prints: + + .. testoutput:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + + ``...`` may be used as a parse expression as a short form of :class:`SkipTo`: + + .. testcode:: + + Literal('start') + ... + Literal('end') + + is equivalent to: + + .. testcode:: + + Literal('start') + SkipTo('end')("_skipped*") + Literal('end') + + Note that the skipped text is returned with '_skipped' as a results name, + and to support having multiple skips in the same parser, the value returned is + a list of all skipped text. + """ + if other is Ellipsis: + return _PendingSkip(self) + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return And([self, other]) + + def __radd__(self, other) -> ParserElement: + """ + Implementation of ``+`` operator when left operand is not a :class:`ParserElement` + """ + if other is Ellipsis: + return SkipTo(self)("_skipped*") + self + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other + self + + def __sub__(self, other) -> ParserElement: + """ + Implementation of ``-`` operator, returns :class:`And` with error stop + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self + And._ErrorStop() + other + + def __rsub__(self, other) -> ParserElement: + """ + Implementation of ``-`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other - self + + def __mul__(self, other) -> ParserElement: + """ + Implementation of ``*`` operator, allows use of ``expr * 3`` in place of + ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer + tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples + may also include ``None`` as in: + + - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` + + Note that ``expr*(None, n)`` does not raise an exception if + more than n exprs exist in the input stream; that is, + ``expr*(None, n)`` does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + ``expr*(None, n) + ~expr`` + """ + if other is Ellipsis: + other = (0, None) + elif isinstance(other, tuple) and other[:1] == (Ellipsis,): + other = ((0,) + other[1:] + (None,))[:2] + + if not isinstance(other, (int, tuple)): + return NotImplemented + + if isinstance(other, int): + minElements, optElements = other, 0 + else: + other = tuple(o if o is not Ellipsis else None for o in other) + other = (other + (None, None))[:2] + if other[0] is None: + other = (0, other[1]) + if isinstance(other[0], int) and other[1] is None: + if other[0] == 0: + return ZeroOrMore(self) + if other[0] == 1: + return OneOrMore(self) + else: + return self * other[0] + ZeroOrMore(self) + elif isinstance(other[0], int) and isinstance(other[1], int): + minElements, optElements = other + optElements -= minElements + else: + return NotImplemented + + if minElements < 0: + raise ValueError("cannot multiply ParserElement by negative value") + if optElements < 0: + raise ValueError( + "second tuple value must be greater or equal to first tuple value" + ) + if minElements == optElements == 0: + return And([]) + + if optElements: + + def makeOptionalList(n): + if n > 1: + return Opt(self + makeOptionalList(n - 1)) + else: + return Opt(self) + + if minElements: + if minElements == 1: + ret = self + makeOptionalList(optElements) + else: + ret = And([self] * minElements) + makeOptionalList(optElements) + else: + ret = makeOptionalList(optElements) + else: + if minElements == 1: + ret = self + else: + ret = And([self] * minElements) + return ret + + def __rmul__(self, other) -> ParserElement: + return self.__mul__(other) + + def __or__(self, other) -> ParserElement: + """ + Implementation of ``|`` operator - returns :class:`MatchFirst` + + .. versionchanged:: 3.1.0 + Support ``expr | ""`` as a synonym for ``Optional(expr)``. + """ + if other is Ellipsis: + return _PendingSkip(self, must_skip=True) + + if isinstance(other, str_type): + # `expr | ""` is equivalent to `Opt(expr)` + if other == "": + return Opt(self) + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return MatchFirst([self, other]) + + def __ror__(self, other) -> ParserElement: + """ + Implementation of ``|`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other | self + + def __xor__(self, other) -> ParserElement: + """ + Implementation of ``^`` operator - returns :class:`Or` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return Or([self, other]) + + def __rxor__(self, other) -> ParserElement: + """ + Implementation of ``^`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other ^ self + + def __and__(self, other) -> ParserElement: + """ + Implementation of ``&`` operator - returns :class:`Each` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return Each([self, other]) + + def __rand__(self, other) -> ParserElement: + """ + Implementation of ``&`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return other & self + + def __invert__(self) -> ParserElement: + """ + Implementation of ``~`` operator - returns :class:`NotAny` + """ + return NotAny(self) + + # disable __iter__ to override legacy use of sequential access to __getitem__ to + # iterate over a sequence + __iter__ = None + + def __getitem__(self, key): + """ + use ``[]`` indexing notation as a short form for expression repetition: + + - ``expr[n]`` is equivalent to ``expr*n`` + - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` + - ``expr[n, ...]`` or ``expr[n,]`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` + - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` + + ``None`` may be used in place of ``...``. + + Note that ``expr[..., n]`` and ``expr[m, n]`` do not raise an exception + if more than ``n`` ``expr``\\ s exist in the input stream. If this behavior is + desired, then write ``expr[..., n] + ~expr``. + + For repetition with a stop_on expression, use slice notation: + + - ``expr[...: end_expr]`` and ``expr[0, ...: end_expr]`` are equivalent to ``ZeroOrMore(expr, stop_on=end_expr)`` + - ``expr[1, ...: end_expr]`` is equivalent to ``OneOrMore(expr, stop_on=end_expr)`` + + .. versionchanged:: 3.1.0 + Support for slice notation. + """ + + stop_on_defined = False + stop_on = NoMatch() + if isinstance(key, slice): + key, stop_on = key.start, key.stop + if key is None: + key = ... + stop_on_defined = True + elif isinstance(key, tuple) and isinstance(key[-1], slice): + key, stop_on = (key[0], key[1].start), key[1].stop + stop_on_defined = True + + # convert single arg keys to tuples + if isinstance(key, str_type): + key = (key,) + try: + iter(key) + except TypeError: + key = (key, key) + + if len(key) > 2: + raise TypeError( + f"only 1 or 2 index arguments supported ({key[:5]}{f'... [{len(key)}]' if len(key) > 5 else ''})" + ) + + # clip to 2 elements + ret = self * tuple(key[:2]) + ret = typing.cast(_MultipleMatch, ret) + + if stop_on_defined: + ret.stopOn(stop_on) + + return ret + + def __call__(self, name: typing.Optional[str] = None) -> ParserElement: + """ + Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. + + If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be + passed as ``True``. + + If ``name`` is omitted, same as calling :class:`copy`. + + Example: + + .. testcode:: + + # these are equivalent + userdata = ( + Word(alphas).set_results_name("name") + + Word(nums + "-").set_results_name("socsecno") + ) + + userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") + """ + if name is not None: + return self._setResultsName(name) + + return self.copy() + + def suppress(self) -> ParserElement: + """ + Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress(self) + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Enables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. + + :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = True + return self + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Disables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + + :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = False + return self + + def set_whitespace_chars( + self, chars: Union[set[str], str], copy_defaults: bool = False + ) -> ParserElement: + """ + Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = set(chars) + self.copyDefaultWhiteChars = copy_defaults + return self + + def parse_with_tabs(self) -> ParserElement: + """ + Overrides default behavior to expand ```` s to spaces before parsing the input string. + Must be called before ``parse_string`` when the input grammar contains elements that + match ```` characters. + """ + self.keepTabs = True + return self + + def ignore(self, other: ParserElement) -> ParserElement: + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example: + + .. doctest:: + + >>> patt = Word(alphas)[...] + >>> print(patt.parse_string('ablaj /* comment */ lskjd')) + ['ablaj'] + + >>> patt = Word(alphas)[...].ignore(c_style_comment) + >>> print(patt.parse_string('ablaj /* comment */ lskjd')) + ['ablaj', 'lskjd'] + """ + if isinstance(other, str_type): + other = Suppress(other) + + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + self.ignoreExprs.append(other) + else: + self.ignoreExprs.append(Suppress(other.copy())) + return self + + def set_debug_actions( + self, + start_action: DebugStartAction, + success_action: DebugSuccessAction, + exception_action: DebugExceptionAction, + ) -> ParserElement: + """ + Customize display of debugging messages while doing pattern matching: + + :param start_action: method to be called when an expression is about to be parsed; + should have the signature:: + + fn(input_string: str, + location: int, + expression: ParserElement, + cache_hit: bool) + + :param success_action: method to be called when an expression has successfully parsed; + should have the signature:: + + fn(input_string: str, + start_location: int, + end_location: int, + expression: ParserELement, + parsed_tokens: ParseResults, + cache_hit: bool) + + :param exception_action: method to be called when expression fails to parse; + should have the signature:: + + fn(input_string: str, + location: int, + expression: ParserElement, + exception: Exception, + cache_hit: bool) + """ + self.debugActions = self.DebugActions( + start_action or _default_start_debug_action, # type: ignore[truthy-function] + success_action or _default_success_debug_action, # type: ignore[truthy-function] + exception_action or _default_exception_debug_action, # type: ignore[truthy-function] + ) + self.debug = any(self.debugActions) + return self + + def set_debug(self, flag: bool = True, recurse: bool = False) -> ParserElement: + """ + Enable display of debugging messages while doing pattern matching. + Set ``flag`` to ``True`` to enable, ``False`` to disable. + Set ``recurse`` to ``True`` to set the debug flag on this expression and all sub-expressions. + + Example: + + .. testcode:: + + wd = Word(alphas).set_name("alphaword") + integer = Word(nums).set_name("numword") + term = wd | integer + + # turn on debugging for wd + wd.set_debug() + + term[1, ...].parse_string("abc 123 xyz 890") + + prints: + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + Match alphaword at loc 0(1,1) + abc 123 xyz 890 + ^ + Matched alphaword -> ['abc'] + Match alphaword at loc 4(1,5) + abc 123 xyz 890 + ^ + Match alphaword failed, ParseException raised: Expected alphaword, ... + Match alphaword at loc 8(1,9) + abc 123 xyz 890 + ^ + Matched alphaword -> ['xyz'] + Match alphaword at loc 12(1,13) + abc 123 xyz 890 + ^ + Match alphaword failed, ParseException raised: Expected alphaword, ... + abc 123 xyz 890 + ^ + Match alphaword failed, ParseException raised: Expected alphaword, found end of text ... + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using :meth:`set_debug_actions`. Prior to attempting + to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` + is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` + message is shown. Also note the use of :meth:`set_name` to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the :class:`Word` expression without calling :meth:`set_name` is ``"W:(A-Za-z)"``. + + .. versionchanged:: 3.1.0 + ``recurse`` argument added. + """ + if recurse: + for expr in self.visit_all(): + expr.set_debug(flag, recurse=False) + return self + + if flag: + self.set_debug_actions( + _default_start_debug_action, + _default_success_debug_action, + _default_exception_debug_action, + ) + else: + self.debug = False + return self + + @property + def default_name(self) -> str: + if self._defaultName is None: + self._defaultName = self._generateDefaultName() + return self._defaultName + + @abstractmethod + def _generateDefaultName(self) -> str: + """ + Child classes must define this method, which defines how the ``default_name`` is set. + """ + + def set_name(self, name: typing.Optional[str]) -> ParserElement: + """ + Define name for this expression, makes debugging and exception messages clearer. If + `__diag__.enable_debug_on_named_expressions` is set to True, setting a name will also + enable debug for this expression. + + If `name` is None, clears any custom name for this expression, and clears the + debug flag is it was enabled via `__diag__.enable_debug_on_named_expressions`. + + Example: + + .. doctest:: + + >>> integer = Word(nums) + >>> integer.parse_string("ABC") + Traceback (most recent call last): + ParseException: Expected W:(0-9) (at char 0), (line:1, col:1) + + >>> integer.set_name("integer") + integer + >>> integer.parse_string("ABC") + Traceback (most recent call last): + ParseException: Expected integer (at char 0), (line:1, col:1) + + .. versionchanged:: 3.1.0 + Accept ``None`` as the ``name`` argument. + """ + self.customName = name # type: ignore[assignment] + self.errmsg = f"Expected {str(self)}" + + if __diag__.enable_debug_on_named_expressions: + self.set_debug(name is not None) + + return self + + @property + def name(self) -> str: + """ + Returns a user-defined name if available, but otherwise defaults back to the auto-generated name + """ + return self.customName if self.customName is not None else self.default_name + + @name.setter + def name(self, new_name) -> None: + self.set_name(new_name) + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return str(self) + + def streamline(self) -> ParserElement: + self.streamlined = True + self._defaultName = None + return self + + def recurse(self) -> list[ParserElement]: + return [] + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.recurse(): + e._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None) -> None: + """ + .. deprecated:: 3.0.0 + Do not use to check for left recursion. + + Check defined expressions for valid structure, check for infinite recursive definitions. + + """ + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + self._checkRecursion([]) + + def parse_file( + self, + file_or_filename: Union[str, Path, TextIO], + encoding: str = "utf-8", + parse_all: bool = False, + **kwargs, + ) -> ParseResults: + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + parseAll: bool = deprecate_argument(kwargs, "parseAll", False) + + parse_all = parse_all or parseAll + try: + file_or_filename = typing.cast(TextIO, file_or_filename) + file_contents = file_or_filename.read() + except AttributeError: + file_or_filename = typing.cast(str, file_or_filename) + with open(file_or_filename, "r", encoding=encoding) as f: + file_contents = f.read() + try: + return self.parse_string(file_contents, parse_all) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def __eq__(self, other): + if self is other: + return True + elif isinstance(other, str_type): + return self.matches(other, parse_all=True) + elif isinstance(other, ParserElement): + return vars(self) == vars(other) + return False + + def __hash__(self): + return id(self) + + def matches(self, test_string: str, parse_all: bool = True, **kwargs) -> bool: + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. + + :param test_string: to test against this expression for a match + :param parse_all: flag to pass to :meth:`parse_string` when running tests + + Example: + + .. doctest:: + + >>> expr = Word(nums) + >>> expr.matches("100") + True + """ + parseAll: bool = deprecate_argument(kwargs, "parseAll", True) + + parse_all = parse_all and parseAll + try: + self.parse_string(str(test_string), parse_all=parse_all) + return True + except ParseBaseException: + return False + + def run_tests( + self, + tests: Union[str, list[str]], + parse_all: bool = True, + comment: typing.Optional[Union[ParserElement, str]] = "#", + full_dump: bool = True, + print_results: bool = True, + failure_tests: bool = False, + post_parse: typing.Optional[ + Callable[[str, ParseResults], typing.Optional[str]] + ] = None, + file: typing.Optional[TextIO] = None, + with_line_numbers: bool = False, + *, + parseAll: bool = True, + fullDump: bool = True, + printResults: bool = True, + failureTests: bool = False, + postParse: typing.Optional[ + Callable[[str, ParseResults], typing.Optional[str]] + ] = None, + ) -> tuple[bool, list[tuple[str, Union[ParseResults, Exception]]]]: + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + + - ``tests`` - a list of separate test strings, or a multiline string of test strings + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - ``print_results`` - (default= ``True``) prints test output to stdout + - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing + - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as + `fn(test_string, parse_results)` and returns a string to be added to the test output + - ``file`` - (default= ``None``) optional file-like object to which test output will be written; + if None, will default to ``sys.stdout`` + - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if ``failure_tests`` is True), and the results contain a list of lines of each + test's output + + Passing example: + + .. testcode:: + + number_expr = pyparsing_common.number.copy() + + result = number_expr.run_tests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + # negative decimal number without leading digit + -.100 + ''') + print("Success" if result[0] else "Failed!") + + prints: + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + # negative decimal number without leading digit + -.100 + [-0.1] + Success + + Failure-test example: + + .. testcode:: + + result = number_expr.run_tests(''' + # stray character + 100Z + # too many '.' + 3.14.159 + ''', failure_tests=True) + print("Success" if result[0] else "Failed!") + + prints: + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + # stray character + 100Z + 100Z + ^ + ParseException: Expected end of text, found 'Z' ... + + # too many '.' + 3.14.159 + 3.14.159 + ^ + ParseException: Expected end of text, found '.' ... + FAIL: Expected end of text, found '.' ... + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this: + + .. testcode:: + + expr = Word(alphanums)[1,...] + expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + :hide: + + + this is a test\\n of strings that spans \\n 3 lines + ['this', 'is', 'a', 'test', 'of', 'strings', 'that', 'spans', '3', 'lines'] + + (Note that this is a raw string literal, you must include the leading ``'r'``.) + """ + from .testing import pyparsing_test + + parseAll = parseAll and parse_all + fullDump = fullDump and full_dump + printResults = printResults and print_results + failureTests = failureTests or failure_tests + postParse = postParse or post_parse + if isinstance(tests, str_type): + tests = typing.cast(str, tests) + line_strip = type(tests).strip + tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()] + comment_specified = comment is not None + if comment_specified: + if isinstance(comment, str_type): + comment = typing.cast(str, comment) + comment = Literal(comment) + comment = typing.cast(ParserElement, comment) + if file is None: + file = sys.stdout + print_ = file.write + + result: Union[ParseResults, Exception] + allResults: list[tuple[str, Union[ParseResults, Exception]]] = [] + comments: list[str] = [] + success = True + NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) + BOM = "\ufeff" + nlstr = "\n" + for t in tests: + if comment_specified and comment.matches(t, False) or comments and not t: + comments.append( + pyparsing_test.with_line_numbers(t) if with_line_numbers else t + ) + continue + if not t: + continue + out = [ + f"{nlstr}{nlstr.join(comments) if comments else ''}", + pyparsing_test.with_line_numbers(t) if with_line_numbers else t, + ] + comments.clear() + try: + # convert newline marks to actual newlines, and strip leading BOM if present + t = NL.transform_string(t.lstrip(BOM)) + result = self.parse_string(t, parse_all=parse_all) + except ParseBaseException as pe: + fatal = "(FATAL) " if isinstance(pe, ParseFatalException) else "" + out.append(pe.explain()) + out.append(f"FAIL: {fatal}{pe}") + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(pe.__traceback__)) + success = success and failureTests + result = pe + except Exception as exc: + tag = "FAIL-EXCEPTION" + + # see if this exception was raised in a parse action + tb = exc.__traceback__ + it = iter(traceback.walk_tb(tb)) + for f, line in it: + if (f.f_code.co_filename, line) == pa_call_line_synth: + next_f = next(it)[0] + tag += f" (raised in parse action {next_f.f_code.co_name!r})" + break + + out.append(f"{tag}: {type(exc).__name__}: {exc}") + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(exc.__traceback__)) + success = success and failureTests + result = exc + else: + success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + if isinstance(pp_value, ParseResults): + out.append(pp_value.dump()) + else: + out.append(str(pp_value)) + else: + out.append(result.dump()) + except Exception as e: + out.append(result.dump(full=fullDump)) + out.append( + f"{postParse.__name__} failed: {type(e).__name__}: {e}" + ) + else: + out.append(result.dump(full=fullDump)) + out.append("") + + if printResults: + print_("\n".join(out)) + + allResults.append((t, result)) + + return success, allResults + + def create_diagram( + self, + output_html: Union[TextIO, Path, str], + vertical: int = 3, + show_results_names: bool = False, + show_groups: bool = False, + embed: bool = False, + show_hidden: bool = False, + **kwargs, + ) -> None: + """ + Create a railroad diagram for the parser. + + Parameters: + + - ``output_html`` (str or file-like object) - output target for generated + diagram HTML + - ``vertical`` (int) - threshold for formatting multiple alternatives vertically + instead of horizontally (default=3) + - ``show_results_names`` - bool flag whether diagram should show annotations for + defined results names + - ``show_groups`` - bool flag whether groups should be highlighted with an unlabeled surrounding box + - ``show_hidden`` - bool flag to show diagram elements for internal elements that are usually hidden + - ``embed`` - bool flag whether generated HTML should omit , , and tags to embed + the resulting HTML in an enclosing HTML source + - ``head`` - str containing additional HTML to insert into the section of the generated code; + can be used to insert custom CSS styling + - ``body`` - str containing additional HTML to insert at the beginning of the section of the + generated code + + Additional diagram-formatting keyword arguments can also be included; + see railroad.Diagram class. + + .. versionchanged:: 3.1.0 + ``embed`` argument added. + """ + + try: + from .diagram import to_railroad, railroad_to_html + except ImportError as ie: + raise Exception( + "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" + ) from ie + + self.streamline() + + railroad = to_railroad( + self, + vertical=vertical, + show_results_names=show_results_names, + show_groups=show_groups, + show_hidden=show_hidden, + diagram_kwargs=kwargs, + ) + if not isinstance(output_html, (str, Path)): + # we were passed a file-like object, just write to it + output_html.write(railroad_to_html(railroad, embed=embed, **kwargs)) + return + + with open(output_html, "w", encoding="utf-8") as diag_file: + diag_file.write(railroad_to_html(railroad, embed=embed, **kwargs)) + + # Compatibility synonyms + # fmt: off + inlineLiteralsUsing = staticmethod(replaced_by_pep8("inlineLiteralsUsing", inline_literals_using)) + setDefaultWhitespaceChars = staticmethod(replaced_by_pep8( + "setDefaultWhitespaceChars", set_default_whitespace_chars + )) + disableMemoization = staticmethod(replaced_by_pep8("disableMemoization", disable_memoization)) + enableLeftRecursion = staticmethod(replaced_by_pep8("enableLeftRecursion", enable_left_recursion)) + enablePackrat = staticmethod(replaced_by_pep8("enablePackrat", enable_packrat)) + resetCache = staticmethod(replaced_by_pep8("resetCache", reset_cache)) + + setResultsName = replaced_by_pep8("setResultsName", set_results_name) + setBreak = replaced_by_pep8("setBreak", set_break) + setParseAction = replaced_by_pep8("setParseAction", set_parse_action) + addParseAction = replaced_by_pep8("addParseAction", add_parse_action) + addCondition = replaced_by_pep8("addCondition", add_condition) + setFailAction = replaced_by_pep8("setFailAction", set_fail_action) + tryParse = replaced_by_pep8("tryParse", try_parse) + parseString = replaced_by_pep8("parseString", parse_string) + scanString = replaced_by_pep8("scanString", scan_string) + transformString = replaced_by_pep8("transformString", transform_string) + searchString = replaced_by_pep8("searchString", search_string) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + setWhitespaceChars = replaced_by_pep8("setWhitespaceChars", set_whitespace_chars) + parseWithTabs = replaced_by_pep8("parseWithTabs", parse_with_tabs) + setDebugActions = replaced_by_pep8("setDebugActions", set_debug_actions) + setDebug = replaced_by_pep8("setDebug", set_debug) + setName = replaced_by_pep8("setName", set_name) + parseFile = replaced_by_pep8("parseFile", parse_file) + runTests = replaced_by_pep8("runTests", run_tests) + canParseNext = replaced_by_pep8("canParseNext", can_parse_next) + defaultName = default_name + # fmt: on + + +class _PendingSkip(ParserElement): + # internal placeholder class to hold a place were '...' is added to a parser element, + # once another ParserElement is added, this placeholder will be replaced with a SkipTo + def __init__(self, expr: ParserElement, must_skip: bool = False) -> None: + super().__init__() + self.anchor = expr + self.must_skip = must_skip + + def _generateDefaultName(self) -> str: + return str(self.anchor + Empty()).replace("Empty", "...") + + def __add__(self, other) -> ParserElement: + skipper = SkipTo(other).set_name("...")("_skipped*") + if self.must_skip: + + def must_skip(t): + if not t._skipped or t._skipped.as_list() == [""]: + del t[0] + t.pop("_skipped", None) + + def show_skip(t): + if t._skipped.as_list()[-1:] == [""]: + t.pop("_skipped") + t["_skipped"] = f"missing <{self.anchor!r}>" + + return ( + self.anchor + skipper().add_parse_action(must_skip) + | skipper().add_parse_action(show_skip) + ) + other + + return self.anchor + skipper + other + + def __repr__(self): + return self.defaultName + + def parseImpl(self, *args) -> ParseImplReturnType: + raise Exception( + "use of `...` expression without following SkipTo target expression" + ) + + +class Token(ParserElement): + """Abstract :class:`ParserElement` subclass, for defining atomic + matching patterns. + """ + + def __init__(self) -> None: + super().__init__(savelist=False) + + def _generateDefaultName(self) -> str: + return type(self).__name__ + + +class NoMatch(Token): + """ + A token that will never match. + """ + + def __init__(self) -> None: + super().__init__() + self._may_return_empty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + raise ParseException(instring, loc, self.errmsg, self) + + +class Literal(Token): + """ + Token to exactly match a specified string. + + Example: + + .. doctest:: + + >>> Literal('abc').parse_string('abc') + ParseResults(['abc'], {}) + >>> Literal('abc').parse_string('abcdef') + ParseResults(['abc'], {}) + >>> Literal('abc').parse_string('ab') + Traceback (most recent call last): + ParseException: Expected 'abc', found 'ab' (at char 0), (line: 1, col: 1) + + For case-insensitive matching, use :class:`CaselessLiteral`. + + For keyword matching (force word break before and after the matched string), + use :class:`Keyword` or :class:`CaselessKeyword`. + """ + + def __new__(cls, match_string: str = "", **kwargs): + # Performance tuning: select a subclass with optimized parseImpl + if cls is Literal: + matchString: str = deprecate_argument(kwargs, "matchString", "") + + match_string = matchString or match_string + if not match_string: + return super().__new__(Empty) + if len(match_string) == 1: + return super().__new__(_SingleCharLiteral) + + # Default behavior + return super().__new__(cls) + + # Needed to make copy.copy() work correctly if we customize __new__ + def __getnewargs__(self): + return (self.match,) + + def __init__(self, match_string: str = "", **kwargs) -> None: + matchString: str = deprecate_argument(kwargs, "matchString", "") + + super().__init__() + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + self.firstMatchChar = match_string[:1] + self.errmsg = f"Expected {self.name}" + self._may_return_empty = False + self.mayIndexError = False + + def _generateDefaultName(self) -> str: + return repr(self.match) + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if instring[loc] == self.firstMatchChar and instring.startswith( + self.match, loc + ): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +class Empty(Literal): + """ + An empty token, will always match. + """ + + def __init__(self, match_string="", *, matchString="") -> None: + super().__init__("") + self._may_return_empty = True + self.mayIndexError = False + + def _generateDefaultName(self) -> str: + return "Empty" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + return loc, [] + + +class _SingleCharLiteral(Literal): + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if instring[loc] == self.firstMatchChar: + return loc + 1, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +ParserElement._literalStringClass = Literal + + +class Keyword(Token): + """ + Token to exactly match a specified string as a keyword, that is, + it must be immediately preceded and followed by whitespace or + non-keyword characters. Compare with :class:`Literal`: + + - ``Literal("if")`` will match the leading ``'if'`` in + ``'ifAndOnlyIf'``. + - ``Keyword("if")`` will not; it will only match the leading + ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` + + Accepts two optional constructor arguments in addition to the + keyword string: + + - ``ident_chars`` is a string of characters that would be valid + identifier characters, defaulting to all alphanumerics + "_" and + "$" + - ``caseless`` allows case-insensitive matching, default is ``False``. + + Example: + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> Keyword("start").parse_string("start") + ParseResults(['start'], {}) + >>> Keyword("start").parse_string("starting") + Traceback (most recent call last): + ParseException: Expected Keyword 'start', keyword was immediately + followed by keyword character, found 'ing' (at char 5), (line:1, col:6) + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> Keyword("start").parse_string("starting").debug() + Traceback (most recent call last): + ParseException: Expected Keyword "start", keyword was immediately + followed by keyword character, found 'ing' ... + + For case-insensitive matching, use :class:`CaselessKeyword`. + """ + + DEFAULT_KEYWORD_CHARS = alphanums + "_$" + + def __init__( + self, + match_string: str = "", + ident_chars: typing.Optional[str] = None, + caseless: bool = False, + **kwargs, + ) -> None: + matchString = deprecate_argument(kwargs, "matchString", "") + identChars = deprecate_argument(kwargs, "identChars", None) + + super().__init__() + identChars = identChars or ident_chars + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + self.firstMatchChar = match_string[:1] + if not self.firstMatchChar: + raise ValueError("null string passed to Keyword; use Empty() instead") + self.errmsg = f"Expected {type(self).__name__} {self.name}" + self._may_return_empty = False + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = match_string.upper() + identChars = identChars.upper() + self.ident_chars = set(identChars) + + @property + def identChars(self) -> set[str]: + """ + .. deprecated:: 3.3.0 + use ident_chars instead. + + Property returning the characters being used as keyword characters for this expression. + """ + return self.ident_chars + + def _generateDefaultName(self) -> str: + return repr(self.match) + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + errmsg = self.errmsg or "" + errloc = loc + if self.caseless: + if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: + if loc == 0 or instring[loc - 1].upper() not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen].upper() not in self.identChars + ): + return loc + self.matchLen, self.match + + # followed by keyword char + errmsg += ", was immediately followed by keyword character" + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + elif ( + instring[loc] == self.firstMatchChar + and self.matchLen == 1 + or instring.startswith(self.match, loc) + ): + if loc == 0 or instring[loc - 1] not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen] not in self.identChars + ): + return loc + self.matchLen, self.match + + # followed by keyword char + errmsg += ", keyword was immediately followed by keyword character" + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + raise ParseException(instring, errloc, errmsg, self) + + @staticmethod + def set_default_keyword_chars(chars) -> None: + """ + Overrides the default characters used by :class:`Keyword` expressions. + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + + # Compatibility synonyms + setDefaultKeywordChars = staticmethod( + replaced_by_pep8("setDefaultKeywordChars", set_default_keyword_chars) + ) + + +class CaselessLiteral(Literal): + """ + Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example: + + .. doctest:: + + >>> CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10") + ParseResults(['CMD', 'CMD', 'CMD'], {}) + + (Contrast with example for :class:`CaselessKeyword`.) + """ + + def __init__(self, match_string: str = "", **kwargs) -> None: + matchString: str = deprecate_argument(kwargs, "matchString", "") + + match_string = matchString or match_string + super().__init__(match_string.upper()) + # Preserve the defining literal. + self.returnString = match_string + self.errmsg = f"Expected {self.name}" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if instring[loc : loc + self.matchLen].upper() == self.match: + return loc + self.matchLen, self.returnString + raise ParseException(instring, loc, self.errmsg, self) + + +class CaselessKeyword(Keyword): + """ + Caseless version of :class:`Keyword`. + + Example: + + .. doctest:: + + >>> CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10") + ParseResults(['CMD', 'CMD'], {}) + + (Contrast with example for :class:`CaselessLiteral`.) + """ + + def __init__( + self, match_string: str = "", ident_chars: typing.Optional[str] = None, **kwargs + ) -> None: + matchString: str = deprecate_argument(kwargs, "matchString", "") + identChars: typing.Optional[str] = deprecate_argument( + kwargs, "identChars", None + ) + + identChars = identChars or ident_chars + match_string = matchString or match_string + super().__init__(match_string, identChars, caseless=True) + + +class CloseMatch(Token): + """A variation on :class:`Literal` which matches "close" matches, + that is, strings with at most 'n' mismatching characters. + :class:`CloseMatch` takes parameters: + + - ``match_string`` - string to be matched + - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters + - ``max_mismatches`` - (``default=1``) maximum number of + mismatches allowed to count as a match + + The results from a successful parse will contain the matched text + from the input string and the following named results: + + - ``mismatches`` - a list of the positions within the + match_string where mismatches were found + - ``original`` - the original match_string used to compare + against the input string + + If ``mismatches`` is an empty list, then the match was an exact + match. + + Example: + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> patt = CloseMatch("ATCATCGAATGGA") + >>> patt.parse_string("ATCATCGAAXGGA") + ParseResults(['ATCATCGAAXGGA'], + {'original': 'ATCATCGAATGGA', 'mismatches': [9]}) + + >>> patt.parse_string("ATCAXCGAAXGGA") + Traceback (most recent call last): + ParseException: Expected 'ATCATCGAATGGA' (with up to 1 mismatches), + found 'ATCAXCGAAXGGA' (at char 0), (line:1, col:1) + + # exact match + >>> patt.parse_string("ATCATCGAATGGA") + ParseResults(['ATCATCGAATGGA'], + {'original': 'ATCATCGAATGGA', 'mismatches': []}) + + # close match allowing up to 2 mismatches + >>> patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) + >>> patt.parse_string("ATCAXCGAAXGGA") + ParseResults(['ATCAXCGAAXGGA'], + {'original': 'ATCATCGAATGGA', 'mismatches': [4, 9]}) + """ + + def __init__( + self, + match_string: str, + max_mismatches: typing.Optional[int] = None, + *, + caseless=False, + **kwargs, + ) -> None: + maxMismatches: int = deprecate_argument(kwargs, "maxMismatches", 1) + + maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches + super().__init__() + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = f"Expected {self.match_string!r} (with up to {self.maxMismatches} mismatches)" + self.caseless = caseless + self.mayIndexError = False + self._may_return_empty = False + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:{self.match_string!r}" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc, s_m in enumerate( + zip(instring[loc:maxloc], match_string) + ): + src, mat = s_m + if self.caseless: + src, mat = src.lower(), mat.lower() + + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = start + match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results["original"] = match_string + results["mismatches"] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + +class Word(Token): + """Token for matching words composed of allowed character sets. + + Parameters: + + - ``init_chars`` - string of all characters that should be used to + match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; + if ``body_chars`` is also specified, then this is the string of + initial characters + - ``body_chars`` - string of characters that + can be used for matching after a matched initial character as + given in ``init_chars``; if omitted, same as the initial characters + (default=``None``) + - ``min`` - minimum number of characters to match (default=1) + - ``max`` - maximum number of characters to match (default=0) + - ``exact`` - exact number of characters to match (default=0) + - ``as_keyword`` - match as a keyword (default=``False``) + - ``exclude_chars`` - characters that might be + found in the input ``body_chars`` string but which should not be + accepted for matching ;useful to define a word of all + printables except for one or two characters, for instance + (default=``None``) + + :class:`srange` is useful for defining custom character set strings + for defining :class:`Word` expressions, using range notation from + regular expression character sets. + + A common mistake is to use :class:`Word` to match a specific literal + string, as in ``Word("Address")``. Remember that :class:`Word` + uses the string argument to define *sets* of matchable characters. + This expression would match "Add", "AAA", "dAred", or any other word + made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an + exact literal string, use :class:`Literal` or :class:`Keyword`. + + pyparsing includes helper strings for building Words: + + - :attr:`alphas` + - :attr:`nums` + - :attr:`alphanums` + - :attr:`hexnums` + - :attr:`alphas8bit` (alphabetic characters in ASCII range 128-255 + - accented, tilded, umlauted, etc.) + - :attr:`punc8bit` (non-alphabetic characters in ASCII range + 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - :attr:`printables` (any non-whitespace character) + + ``alphas``, ``nums``, and ``printables`` are also defined in several + Unicode sets - see :class:`pyparsing_unicode`. + + Example: + + .. testcode:: + + # a word composed of digits + integer = Word(nums) + # Two equivalent alternate forms: + Word("0123456789") + Word(srange("[0-9]")) + + # a word with a leading capital, and zero or more lowercase + capitalized_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums + '-') + + # roman numeral + # (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, exclude_chars=",") + + :raises ValueError: If ``min`` and ``max`` are both specified + and the test ``min <= max`` fails. + + .. versionchanged:: 3.1.0 + Raises :exc:`ValueError` if ``min`` > ``max``. + """ + + def __init__( + self, + init_chars: str = "", + body_chars: typing.Optional[str] = None, + min: int = 1, + max: int = 0, + exact: int = 0, + as_keyword: bool = False, + exclude_chars: typing.Optional[str] = None, + **kwargs, + ) -> None: + initChars: typing.Optional[str] = deprecate_argument(kwargs, "initChars", None) + bodyChars: typing.Optional[str] = deprecate_argument(kwargs, "bodyChars", None) + asKeyword: bool = deprecate_argument(kwargs, "asKeyword", False) + excludeChars: typing.Optional[str] = deprecate_argument( + kwargs, "excludeChars", None + ) + + initChars = initChars or init_chars + bodyChars = bodyChars or body_chars + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__() + if not initChars: + raise ValueError( + f"invalid {type(self).__name__}, initChars cannot be empty string" + ) + + initChars_set = set(initChars) + if excludeChars: + excludeChars_set = set(excludeChars) + initChars_set -= excludeChars_set + if bodyChars: + bodyChars = "".join(set(bodyChars) - excludeChars_set) + self.init_chars = initChars_set + self.initCharsOrig = "".join(sorted(initChars_set)) + + if bodyChars: + self.bodyChars = set(bodyChars) + self.bodyCharsOrig = "".join(sorted(bodyChars)) + else: + self.bodyChars = initChars_set + self.bodyCharsOrig = self.initCharsOrig + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" + ) + + if self.maxSpecified and min > max: + raise ValueError( + f"invalid args, if min and max both specified min must be <= max (min={min}, max={max})" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + min = max = exact + self.maxLen = exact + self.minLen = exact + + self.errmsg = f"Expected {self.name}" + self.mayIndexError = False + self.asKeyword = asKeyword + if self.asKeyword: + self.errmsg += " as a keyword" + + # see if we can make a regex for this Word + if " " not in (self.initChars | self.bodyChars): + if len(self.initChars) == 1: + re_leading_fragment = re.escape(self.initCharsOrig) + else: + re_leading_fragment = f"[{_collapse_string_to_ranges(self.initChars)}]" + + if self.bodyChars == self.initChars: + if max == 0 and self.minLen == 1: + repeat = "+" + elif max == 1: + repeat = "" + else: + if self.minLen != self.maxLen: + repeat = f"{{{self.minLen},{'' if self.maxLen == _MAX_INT else self.maxLen}}}" + else: + repeat = f"{{{self.minLen}}}" + self.reString = f"{re_leading_fragment}{repeat}" + else: + if max == 1: + re_body_fragment = "" + repeat = "" + else: + re_body_fragment = f"[{_collapse_string_to_ranges(self.bodyChars)}]" + if max == 0 and self.minLen == 1: + repeat = "*" + elif max == 2: + repeat = "?" if min <= 1 else "" + else: + if min != max: + repeat = f"{{{min - 1 if min > 0 else ''},{max - 1 if max > 0 else ''}}}" + else: + repeat = f"{{{min - 1 if min > 0 else ''}}}" + + self.reString = f"{re_leading_fragment}{re_body_fragment}{repeat}" + + if self.asKeyword: + self.reString = rf"\b{self.reString}\b" + + try: + self.re = re.compile(self.reString) + except re.error: + self.re = None # type: ignore[assignment] + else: + self.re_match = self.re.match + self.parseImpl = self.parseImpl_regex # type: ignore[method-assign] + + @property + def initChars(self) -> set[str]: + """ + .. deprecated:: 3.3.0 + use `init_chars` instead. + + Property returning the initial chars to be used when matching this + Word expression. If no body chars were specified, the initial characters + will also be the body characters. + """ + return set(self.init_chars) + + def copy(self) -> Word: + """ + Returns a copy of this expression. + + Generally only used internally by pyparsing. + """ + ret: Word = cast(Word, super().copy()) + if hasattr(self, "re_match"): + ret.re_match = self.re_match + ret.parseImpl = ret.parseImpl_regex # type: ignore[method-assign] + return ret + + def _generateDefaultName(self) -> str: + def charsAsStr(s): + max_repr_len = 16 + s = _collapse_string_to_ranges(s, re_escape=False) + + if len(s) > max_repr_len: + return s[: max_repr_len - 3] + "..." + + return s + + if self.initChars != self.bodyChars: + base = f"W:({charsAsStr(self.initChars)}, {charsAsStr(self.bodyChars)})" + else: + base = f"W:({charsAsStr(self.initChars)})" + + # add length specification + if self.minLen > 1 or self.maxLen != _MAX_INT: + if self.minLen == self.maxLen: + if self.minLen == 1: + return base[2:] + else: + return base + f"{{{self.minLen}}}" + elif self.maxLen == _MAX_INT: + return base + f"{{{self.minLen},...}}" + else: + return base + f"{{{self.minLen},{self.maxLen}}}" + return base + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if instring[loc] not in self.initChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + instrlen = len(instring) + body_chars: set[str] = self.bodyChars + maxloc = start + self.maxLen + maxloc = min(maxloc, instrlen) + while loc < maxloc and instring[loc] in body_chars: + loc += 1 + + throw_exception = False + if loc - start < self.minLen: + throw_exception = True + elif self.maxSpecified and loc < instrlen and instring[loc] in body_chars: + throw_exception = True + elif self.asKeyword and ( + (start > 0 and instring[start - 1] in body_chars) + or (loc < instrlen and instring[loc] in body_chars) + ): + throw_exception = True + + if throw_exception: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def parseImpl_regex(self, instring, loc, do_actions=True) -> ParseImplReturnType: + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + + +class Char(Word): + """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, + when defining a match of any single character in a string of + characters. + """ + + def __init__( + self, + charset: str, + as_keyword: bool = False, + exclude_chars: typing.Optional[str] = None, + **kwargs, + ) -> None: + asKeyword: bool = deprecate_argument(kwargs, "asKeyword", False) + excludeChars: typing.Optional[str] = deprecate_argument( + kwargs, "excludeChars", None + ) + + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__( + charset, exact=1, as_keyword=asKeyword, exclude_chars=excludeChars + ) + + +class Regex(Token): + r"""Token for matching strings that match a given regular + expression. Defined with string specifying the regular expression in + a form recognized by the stdlib Python `re module `_. + If the given regex contains named groups (defined using ``(?P...)``), + these will be preserved as named :class:`ParseResults`. + + If instead of the Python stdlib ``re`` module you wish to use a different RE module + (such as the ``regex`` module), you can do so by building your ``Regex`` object with + a compiled RE that was compiled using ``regex``. + + The parameters ``pattern`` and ``flags`` are passed + to the ``re.compile()`` function as-is. See the Python + `re module `_ module for an + explanation of the acceptable patterns and flags. + + Example: + + .. testcode:: + + realnum = Regex(r"[+-]?\d+\.\d*") + # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + + # named fields in a regex will be returned as named results + date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') + + # the Regex class will accept regular expressions compiled using the + # re module + import re + parser = pp.Regex(re.compile(r'[0-9]')) + """ + + def __init__( + self, + pattern: Any, + flags: Union[re.RegexFlag, int] = 0, + as_group_list: bool = False, + as_match: bool = False, + **kwargs, + ) -> None: + super().__init__() + asGroupList: bool = deprecate_argument(kwargs, "asGroupList", False) + asMatch: bool = deprecate_argument(kwargs, "asMatch", False) + + asGroupList = asGroupList or as_group_list + asMatch = asMatch or as_match + + if isinstance(pattern, str_type): + if not pattern: + raise ValueError("null string passed to Regex; use Empty() instead") + + self._re = None + self._may_return_empty = None # type: ignore [assignment] + self.reString = self.pattern = pattern + + elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): + self._re = pattern + self._may_return_empty = None # type: ignore [assignment] + self.pattern = self.reString = pattern.pattern + + elif callable(pattern): + # defer creating this pattern until we really need it + self.pattern = pattern + self._may_return_empty = None # type: ignore [assignment] + self._re = None + + else: + raise TypeError( + "Regex may only be constructed with a string or a compiled RE object," + " or a callable that takes no arguments and returns a string or a" + " compiled RE object" + ) + + self.flags = flags + self.errmsg = f"Expected {self.name}" + self.mayIndexError = False + self.asGroupList = asGroupList + self.asMatch = asMatch + if self.asGroupList: + self.parseImpl = self.parseImplAsGroupList # type: ignore [method-assign] + if self.asMatch: + self.parseImpl = self.parseImplAsMatch # type: ignore [method-assign] + + def copy(self) -> Regex: + """ + Returns a copy of this expression. + + Generally only used internally by pyparsing. + """ + ret: Regex = cast(Regex, super().copy()) + if self.asGroupList: + ret.parseImpl = ret.parseImplAsGroupList # type: ignore [method-assign] + if self.asMatch: + ret.parseImpl = ret.parseImplAsMatch # type: ignore [method-assign] + return ret + + @cached_property + def re(self) -> re.Pattern: + """ + Property returning the compiled regular expression for this Regex. + + Generally only used internally by pyparsing. + """ + if self._re: + return self._re + + if callable(self.pattern): + # replace self.pattern with the string returned by calling self.pattern() + self.pattern = cast(Callable[[], str], self.pattern)() + + # see if we got a compiled RE back instead of a str - if so, we're done + if hasattr(self.pattern, "pattern") and hasattr(self.pattern, "match"): + self._re = cast(re.Pattern[str], self.pattern) + self.pattern = self.reString = self._re.pattern + return self._re + + try: + self._re = re.compile(self.pattern, self.flags) + except re.error: + raise ValueError(f"invalid pattern ({self.pattern!r}) passed to Regex") + else: + self._may_return_empty = self.re.match("", pos=0) is not None + return self._re + + @cached_property + def re_match(self) -> Callable[[str, int], Any]: + return self.re.match + + @property + def mayReturnEmpty(self): + if self._may_return_empty is None: + # force compile of regex pattern, to set may_return_empty flag + self.re # noqa + return self._may_return_empty + + @mayReturnEmpty.setter + def mayReturnEmpty(self, value): + self._may_return_empty = value + + def _generateDefaultName(self) -> str: + unescaped = repr(self.pattern).replace("\\\\", "\\") + return f"Re:({unescaped})" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + # explicit check for matching past the length of the string; + # this is done because the re module will not complain about + # a match with `pos > len(instring)`, it will just return "" + if loc > len(instring) and self.mayReturnEmpty: + raise ParseException(instring, loc, self.errmsg, self) + + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = ParseResults(result.group()) + d = result.groupdict() + + for k, v in d.items(): + ret[k] = v + + return loc, ret + + def parseImplAsGroupList(self, instring, loc, do_actions=True): + if loc > len(instring) and self.mayReturnEmpty: + raise ParseException(instring, loc, self.errmsg, self) + + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.groups() + return loc, ret + + def parseImplAsMatch(self, instring, loc, do_actions=True): + if loc > len(instring) and self.mayReturnEmpty: + raise ParseException(instring, loc, self.errmsg, self) + + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result + return loc, ret + + def sub(self, repl: str) -> ParserElement: + r""" + Return :class:`Regex` with an attached parse action to transform the parsed + result as if called using `re.sub(expr, repl, string) `_. + + Example: + + .. testcode:: + + make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") + print(make_html.transform_string("h1:main title:")) + + .. testoutput:: + +

    main title

    + """ + if self.asGroupList: + raise TypeError("cannot use sub() with Regex(as_group_list=True)") + + if self.asMatch and callable(repl): + raise TypeError( + "cannot use sub() with a callable with Regex(as_match=True)" + ) + + if self.asMatch: + + def pa(tokens): + return tokens[0].expand(repl) + + else: + + def pa(tokens): + return self.re.sub(repl, tokens[0]) + + return self.add_parse_action(pa) + + +class QuotedString(Token): + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + + - ``quote_char`` - string of one or more characters defining the + quote delimiting string + - ``esc_char`` - character to re_escape quotes, typically backslash + (default= ``None``) + - ``esc_quote`` - special quote sequence to re_escape an embedded quote + string (such as SQL's ``""`` to re_escape an embedded ``"``) + (default= ``None``) + - ``multiline`` - boolean indicating whether quotes can span + multiple lines (default= ``False``) + - ``unquote_results`` - boolean indicating whether the matched text + should be unquoted (default= ``True``) + - ``end_quote_char`` - string of one or more characters defining the + end of the quote delimited string (default= ``None`` => same as + quote_char) + - ``convert_whitespace_escapes`` - convert escaped whitespace + (``'\t'``, ``'\n'``, etc.) to actual whitespace + (default= ``True``) + + .. caution:: ``convert_whitespace_escapes`` has no effect if + ``unquote_results`` is ``False``. + + Example: + + .. doctest:: + + >>> qs = QuotedString('"') + >>> print(qs.search_string('lsjdf "This is the quote" sldjf')) + [['This is the quote']] + >>> complex_qs = QuotedString('{{', end_quote_char='}}') + >>> print(complex_qs.search_string( + ... 'lsjdf {{This is the "quote"}} sldjf')) + [['This is the "quote"']] + >>> sql_qs = QuotedString('"', esc_quote='""') + >>> print(sql_qs.search_string( + ... 'lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + [['This is the quote with "embedded" quotes']] + """ + + ws_map = dict(((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))) + + def __init__( + self, + quote_char: str = "", + esc_char: typing.Optional[str] = None, + esc_quote: typing.Optional[str] = None, + multiline: bool = False, + unquote_results: bool = True, + end_quote_char: typing.Optional[str] = None, + convert_whitespace_escapes: bool = True, + **kwargs, + ) -> None: + super().__init__() + quoteChar: str = deprecate_argument(kwargs, "quoteChar", "") + escChar: str = deprecate_argument(kwargs, "escChar", None) + escQuote: str = deprecate_argument(kwargs, "escQuote", None) + unquoteResults: bool = deprecate_argument(kwargs, "unquoteResults", True) + endQuoteChar: typing.Optional[str] = deprecate_argument( + kwargs, "endQuoteChar", None + ) + convertWhitespaceEscapes: bool = deprecate_argument( + kwargs, "convertWhitespaceEscapes", True + ) + + esc_char = escChar or esc_char + esc_quote = escQuote or esc_quote + unquote_results = unquoteResults and unquote_results + end_quote_char = endQuoteChar or end_quote_char + convert_whitespace_escapes = ( + convertWhitespaceEscapes and convert_whitespace_escapes + ) + quote_char = quoteChar or quote_char + + # remove white space from quote chars + quote_char = quote_char.strip() + if not quote_char: + raise ValueError("quote_char cannot be the empty string") + + if end_quote_char is None: + end_quote_char = quote_char + else: + end_quote_char = end_quote_char.strip() + if not end_quote_char: + raise ValueError("end_quote_char cannot be the empty string") + + self.quote_char: str = quote_char + self.quote_char_len: int = len(quote_char) + self.first_quote_char: str = quote_char[0] + self.end_quote_char: str = end_quote_char + self.end_quote_char_len: int = len(end_quote_char) + self.esc_char: str = esc_char or "" + self.has_esc_char: bool = esc_char is not None + self.esc_quote: str = esc_quote or "" + self.unquote_results: bool = unquote_results + self.convert_whitespace_escapes: bool = convert_whitespace_escapes + self.multiline = multiline + self.re_flags = re.RegexFlag(0) + + # fmt: off + # build up re pattern for the content between the quote delimiters + inner_pattern: list[str] = [] + + if esc_quote: + inner_pattern.append(rf"(?:{re.escape(esc_quote)})") + + if esc_char: + inner_pattern.append(rf"(?:{re.escape(esc_char)}.)") + + if len(self.end_quote_char) > 1: + inner_pattern.append( + "(?:" + + "|".join( + f"(?:{re.escape(self.end_quote_char[:i])}(?!{re.escape(self.end_quote_char[i:])}))" + for i in range(len(self.end_quote_char) - 1, 0, -1) + ) + + ")" + ) + + if self.multiline: + self.re_flags |= re.MULTILINE | re.DOTALL + inner_pattern.append( + rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}" + rf"{(_escape_regex_range_chars(self.esc_char) if self.has_esc_char else '')}])" + ) + else: + inner_pattern.append( + rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}\n\r" + rf"{(_escape_regex_range_chars(self.esc_char) if self.has_esc_char else '')}])" + ) + + self.pattern = "".join( + [ + re.escape(self.quote_char), + "(?:", + '|'.join(inner_pattern), + ")*", + re.escape(self.end_quote_char), + ] + ) + + if self.unquote_results: + if self.convert_whitespace_escapes: + self.unquote_scan_re = re.compile( + rf"({'|'.join(re.escape(k) for k in self.ws_map)})" + rf"|(\\[0-7]{3}|\\0|\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4})" + rf"|({re.escape(self.esc_char)}.)" + rf"|(\n|.)", + flags=self.re_flags, + ) + else: + self.unquote_scan_re = re.compile( + rf"({re.escape(self.esc_char)}.)" + rf"|(\n|.)", + flags=self.re_flags + ) + # fmt: on + + try: + self.re = re.compile(self.pattern, self.re_flags) + self.reString = self.pattern + self.re_match = self.re.match + except re.error: + raise ValueError(f"invalid pattern {self.pattern!r} passed to Regex") + + self.errmsg = f"Expected {self.name}" + self.mayIndexError = False + self._may_return_empty = True + + def _generateDefaultName(self) -> str: + if self.quote_char == self.end_quote_char and isinstance( + self.quote_char, str_type + ): + return f"string enclosed in {self.quote_char!r}" + + return f"quoted string, starting with {self.quote_char} ending with {self.end_quote_char}" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + # check first character of opening quote to see if that is a match + # before doing the more complicated regex match + result = ( + instring[loc] == self.first_quote_char + and self.re_match(instring, loc) + or None + ) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + # get ending loc and matched string from regex matching result + loc = result.end() + ret = result.group() + + def convert_escaped_numerics(s: str) -> str: + if s == "0": + return "\0" + if s.isdigit() and len(s) == 3: + return chr(int(s, base=8)) + elif s.startswith(("u", "x")): + return chr(int(s[1:], base=16)) + else: + return s + + if self.unquote_results: + # strip off quotes + ret = ret[self.quote_char_len : -self.end_quote_char_len] + + if isinstance(ret, str_type): + # fmt: off + if self.convert_whitespace_escapes: + # as we iterate over matches in the input string, + # collect from whichever match group of the unquote_scan_re + # regex matches (only 1 group will match at any given time) + ret = "".join( + # match group 1 matches \t, \n, etc. + self.ws_map[match.group(1)] if match.group(1) + # match group 2 matches escaped octal, null, hex, and Unicode + # sequences + else convert_escaped_numerics(match.group(2)[1:]) if match.group(2) + # match group 3 matches escaped characters + else match.group(3)[-1] if match.group(3) + # match group 4 matches any character + else match.group(4) + for match in self.unquote_scan_re.finditer(ret) + ) + else: + ret = "".join( + # match group 1 matches escaped characters + match.group(1)[-1] if match.group(1) + # match group 2 matches any character + else match.group(2) + for match in self.unquote_scan_re.finditer(ret) + ) + # fmt: on + + # replace escaped quotes + if self.esc_quote: + ret = ret.replace(self.esc_quote, self.end_quote_char) + + return loc, ret + + +class CharsNotIn(Token): + """Token for matching words composed of characters *not* in a given + set (will include whitespace in matched characters if not listed in + the provided exclusion set - see example). Defined with string + containing all disallowed characters, and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. + + Example: + + .. testcode:: + + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print( + DelimitedList(csv_value).parse_string( + "dkls,lsdkjf,s12 34,@!#,213" + ) + ) + + prints: + + .. testoutput:: + + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] + """ + + def __init__( + self, not_chars: str = "", min: int = 1, max: int = 0, exact: int = 0, **kwargs + ) -> None: + super().__init__() + notChars: str = deprecate_argument(kwargs, "notChars", "") + + self.skipWhitespace = False + self.notChars = not_chars or notChars + self.notCharsSet = set(self.notChars) + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use" + " Opt(CharsNotIn()) if zero-length char group is permitted" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.errmsg = f"Expected {self.name}" + self._may_return_empty = self.minLen == 0 + self.mayIndexError = False + + def _generateDefaultName(self) -> str: + not_chars_str = _collapse_string_to_ranges(self.notChars) + if len(not_chars_str) > 16: + return f"!W:({self.notChars[: 16 - 3]}...)" + else: + return f"!W:({self.notChars})" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + notchars = self.notCharsSet + if instring[loc] in notchars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + maxlen = min(start + self.maxLen, len(instring)) + while loc < maxlen and instring[loc] not in notchars: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class White(Token): + """Special matching class for matching whitespace. Normally, + whitespace is ignored by pyparsing grammars. This class is included + when some whitespace structures are significant. Define with + a string containing the whitespace characters to be matched; default + is ``" \\t\\r\\n"``. Also takes optional ``min``, + ``max``, and ``exact`` arguments, as defined for the + :class:`Word` class. + """ + + whiteStrs = { + " ": "", + "\t": "", + "\n": "", + "\r": "", + "\f": "", + "\u00a0": "", + "\u1680": "", + "\u180e": "", + "\u2000": "", + "\u2001": "", + "\u2002": "", + "\u2003": "", + "\u2004": "", + "\u2005": "", + "\u2006": "", + "\u2007": "", + "\u2008": "", + "\u2009": "", + "\u200a": "", + "\u200b": "", + "\u202f": "", + "\u205f": "", + "\u3000": "", + } + + def __init__( + self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0 + ) -> None: + super().__init__() + self.matchWhite = ws + self.set_whitespace_chars( + "".join(c for c in self.whiteStrs if c not in self.matchWhite), + copy_defaults=True, + ) + # self.leave_whitespace() + self._may_return_empty = True + self.errmsg = f"Expected {self.name}" + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + def _generateDefaultName(self) -> str: + return "".join(White.whiteStrs[c] for c in self.matchWhite) + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if instring[loc] not in self.matchWhite: + raise ParseException(instring, loc, self.errmsg, self) + start = loc + loc += 1 + maxloc = start + self.maxLen + maxloc = min(maxloc, len(instring)) + while loc < maxloc and instring[loc] in self.matchWhite: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class PositionToken(Token): + def __init__(self) -> None: + super().__init__() + self._may_return_empty = True + self.mayIndexError = False + + +class GoToColumn(PositionToken): + """Token to advance to a specific column of input text; useful for + tabular report scraping. + """ + + def __init__(self, colno: int) -> None: + super().__init__() + self.col = colno + + def preParse(self, instring: str, loc: int) -> int: + if col(loc, instring) == self.col: + return loc + + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + while ( + loc < instrlen + and instring[loc].isspace() + and col(loc, instring) != self.col + ): + loc += 1 + + return loc + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + thiscol = col(loc, instring) + if thiscol > self.col: + raise ParseException(instring, loc, "Text not in expected column", self) + newloc = loc + self.col - thiscol + ret = instring[loc:newloc] + return newloc, ret + + +class LineStart(PositionToken): + r"""Matches if current position is at the logical beginning of a line (after skipping whitespace) + within the parse string + + Example: + + .. testcode:: + + test = '''\ + AAA this line + AAA and this line + AAA and even this line + B AAA but definitely not this line + ''' + + for t in (LineStart() + 'AAA' + rest_of_line).search_string(test): + print(t) + + prints: + + .. testoutput:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + ['AAA', ' and even this line'] + + """ + + def __init__(self) -> None: + super().__init__() + self.leave_whitespace() + self.orig_whiteChars = set() | self.whiteChars + self.whiteChars.discard("\n") + self.skipper = Empty().set_whitespace_chars(self.whiteChars) + self.set_name("start of line") + + def preParse(self, instring: str, loc: int) -> int: + if loc == 0: + return loc + + ret = self.skipper.preParse(instring, loc) + + if "\n" in self.orig_whiteChars: + while instring[ret : ret + 1] == "\n": + ret = self.skipper.preParse(instring, ret + 1) + + return ret + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) + + +class LineEnd(PositionToken): + """Matches if current position is at the end of a line within the + parse string + """ + + def __init__(self) -> None: + super().__init__() + self.whiteChars.discard("\n") + self.set_whitespace_chars(self.whiteChars, copy_defaults=False) + self.set_name("end of line") + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if loc < len(instring): + if instring[loc] == "\n": + return loc + 1, "\n" + else: + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + + +class StringStart(PositionToken): + """Matches if current position is at the beginning of the parse + string + """ + + def __init__(self) -> None: + super().__init__() + self.set_name("start of text") + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + # see if entire string up to here is just whitespace and ignoreables + if loc != 0 and loc != self.preParse(instring, 0): + raise ParseException(instring, loc, self.errmsg, self) + + return loc, [] + + +class StringEnd(PositionToken): + """ + Matches if current position is at the end of the parse string + """ + + def __init__(self) -> None: + super().__init__() + self.set_name("end of text") + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if loc < len(instring): + raise ParseException(instring, loc, self.errmsg, self) + if loc == len(instring): + return loc + 1, [] + if loc > len(instring): + return loc, [] + + raise ParseException(instring, loc, self.errmsg, self) + + +class WordStart(PositionToken): + """Matches if the current position is at the beginning of a + :class:`Word`, and is not preceded by any character in a given + set of ``word_chars`` (default= ``printables``). To emulate the + ``\b`` behavior of regular expressions, use + ``WordStart(alphanums)``. ``WordStart`` will also match at + the beginning of the string being parsed, or at the beginning of + a line. + """ + + def __init__(self, word_chars: str = printables, **kwargs) -> None: + wordChars: str = deprecate_argument(kwargs, "wordChars", printables) + + wordChars = word_chars if wordChars == printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.set_name("start of a word") + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if loc != 0: + if ( + instring[loc - 1] in self.wordChars + or instring[loc] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class WordEnd(PositionToken): + """Matches if the current position is at the end of a :class:`Word`, + and is not followed by any character in a given set of ``word_chars`` + (default= ``printables``). To emulate the ``\b`` behavior of + regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` + will also match at the end of the string being parsed, or at the end + of a line. + """ + + def __init__(self, word_chars: str = printables, **kwargs) -> None: + wordChars: str = deprecate_argument(kwargs, "wordChars", printables) + + wordChars = word_chars if wordChars == printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.skipWhitespace = False + self.set_name("end of a word") + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + instrlen = len(instring) + if instrlen > 0 and loc < instrlen: + if ( + instring[loc] in self.wordChars + or instring[loc - 1] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class Tag(Token): + """ + A meta-element for inserting a named result into the parsed + tokens that may be checked later in a parse action or while + processing the parsed results. Accepts an optional tag value, + defaulting to `True`. + + Example: + + .. doctest:: + + >>> end_punc = "." | ("!" + Tag("enthusiastic")) + >>> greeting = "Hello," + Word(alphas) + end_punc + + >>> result = greeting.parse_string("Hello, World.") + >>> print(result.dump()) + ['Hello,', 'World', '.'] + + >>> result = greeting.parse_string("Hello, World!") + >>> print(result.dump()) + ['Hello,', 'World', '!'] + - enthusiastic: True + + .. versionadded:: 3.1.0 + """ + + def __init__(self, tag_name: str, value: Any = True) -> None: + super().__init__() + self._may_return_empty = True + self.mayIndexError = False + self.leave_whitespace() + self.tag_name = tag_name + self.tag_value = value + self.add_parse_action(self._add_tag) + self.show_in_diagram = False + + def _add_tag(self, tokens: ParseResults): + tokens[self.tag_name] = self.tag_value + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:{self.tag_name}={self.tag_value!r}" + + +class ParseExpression(ParserElement): + """Abstract subclass of ParserElement, for combining and + post-processing parsed tokens. + """ + + def __init__( + self, exprs: typing.Iterable[ParserElement], savelist: bool = False + ) -> None: + super().__init__(savelist) + self.exprs: list[ParserElement] + if isinstance(exprs, _generatorType): + exprs = list(exprs) + + if isinstance(exprs, str_type): + self.exprs = [self._literalStringClass(exprs)] + elif isinstance(exprs, ParserElement): + self.exprs = [exprs] + elif isinstance(exprs, Iterable): + exprs = list(exprs) + # if sequence of strings provided, wrap with Literal + if any(isinstance(expr, str_type) for expr in exprs): + exprs = ( + self._literalStringClass(e) if isinstance(e, str_type) else e + for e in exprs + ) + self.exprs = list(exprs) + else: + try: + self.exprs = list(exprs) + except TypeError: + self.exprs = [exprs] + self.callPreparse = False + + def recurse(self) -> list[ParserElement]: + return self.exprs[:] + + def append(self, other) -> ParserElement: + """ + Add an expression to the list of expressions related to this ParseExpression instance. + """ + self.exprs.append(other) + self._defaultName = None + return self + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().leave_whitespace(recursive) + + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``ignore_whitespace`` defined in base class, and also invokes ``ignore_whitespace`` on + all contained expressions. + """ + super().ignore_whitespace(recursive) + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.ignore_whitespace(recursive) + return self + + def ignore(self, other) -> ParserElement: + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + """ + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + else: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + return self + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:({self.exprs})" + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + + for e in self.exprs: + e.streamline() + + # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` + # but only if there are no parse actions or resultsNames on the nested And's + # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) + if len(self.exprs) == 2: + other = self.exprs[0] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = other.exprs[:] + [self.exprs[1]] + self._defaultName = None + self._may_return_empty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + other = self.exprs[-1] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = self.exprs[:-1] + other.exprs[:] + self._defaultName = None + self._may_return_empty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + self.errmsg = f"Expected {self}" + + return self + + def validate(self, validateTrace=None) -> None: + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + tmp = (validateTrace if validateTrace is not None else [])[:] + [self] + for e in self.exprs: + e.validate(tmp) + self._checkRecursion([]) + + def copy(self) -> ParserElement: + """ + Returns a copy of this expression. + + Generally only used internally by pyparsing. + """ + ret = super().copy() + ret = typing.cast(ParseExpression, ret) + ret.exprs = [e.copy() for e in self.exprs] + return ret + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + if not ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + return super()._setResultsName(name, list_all_matches) + + for e in self.exprs: + if ( + isinstance(e, ParserElement) + and e.resultsName + and ( + Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ) + ): + warning = ( + "warn_ungrouped_named_tokens_in_collection:" + f" setting results name {name!r} on {type(self).__name__} expression" + f" collides with {e.resultsName!r} on contained expression" + ) + warnings.warn(warning, stacklevel=3) + break + + return super()._setResultsName(name, list_all_matches) + + # Compatibility synonyms + # fmt: off + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + # fmt: on + + +class And(ParseExpression): + """ + Requires all given :class:`ParserElement` s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the ``'+'`` operator. + May also be constructed using the ``'-'`` operator, which will + suppress backtracking. + + Example: + + .. testcode:: + + integer = Word(nums) + name_expr = Word(alphas)[1, ...] + + expr = And([integer("id"), name_expr("name"), integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") + """ + + class _ErrorStop(Empty): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.leave_whitespace() + + def _generateDefaultName(self) -> str: + return "-" + + def __init__( + self, + exprs_arg: typing.Iterable[Union[ParserElement, str]], + savelist: bool = True, + ) -> None: + # instantiate exprs as a list, converting strs to ParserElements + exprs: list[ParserElement] = [ + self._literalStringClass(e) if isinstance(e, str) else e for e in exprs_arg + ] + + # convert any Ellipsis elements to SkipTo + if Ellipsis in exprs: + + # Ellipsis cannot be the last element + if exprs[-1] is Ellipsis: + raise Exception("cannot construct And with sequence ending in ...") + + tmp: list[ParserElement] = [] + for cur_expr, next_expr in zip(exprs, exprs[1:]): + if cur_expr is Ellipsis: + tmp.append(SkipTo(next_expr)("_skipped*")) + else: + tmp.append(cur_expr) + + exprs[:-1] = tmp + + super().__init__(exprs, savelist) + if self.exprs: + self._may_return_empty = all(e.mayReturnEmpty for e in self.exprs) + if not isinstance(self.exprs[0], White): + self.set_whitespace_chars( + self.exprs[0].whiteChars, + copy_defaults=self.exprs[0].copyDefaultWhiteChars, + ) + self.skipWhitespace = self.exprs[0].skipWhitespace + else: + self.skipWhitespace = False + else: + self._may_return_empty = True + self.callPreparse = True + + def streamline(self) -> ParserElement: + """ + Collapse `And` expressions like `And(And(And(A, B), C), D)` + to `And(A, B, C, D)`. + + .. doctest:: + + >>> expr = Word("A") + Word("B") + Word("C") + Word("D") + >>> # Using '+' operator creates nested And expression + >>> expr + {{{W:(A) W:(B)} W:(C)} W:(D)} + >>> # streamline simplifies to a single And with multiple expressions + >>> expr.streamline() + {W:(A) W:(B) W:(C) W:(D)} + + Guards against collapsing out expressions that have special features, + such as results names or parse actions. + + Resolves pending Skip commands defined using `...` terms. + """ + # collapse any _PendingSkip's + if self.exprs and any( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + for e in self.exprs[:-1] + ): + deleted_expr_marker = NoMatch() + for i, e in enumerate(self.exprs[:-1]): + if e is deleted_expr_marker: + continue + if ( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + ): + e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] + self.exprs[i + 1] = deleted_expr_marker + self.exprs = [e for e in self.exprs if e is not deleted_expr_marker] + + super().streamline() + + # link any IndentedBlocks to the prior expression + prev: ParserElement + cur: ParserElement + for prev, cur in zip(self.exprs, self.exprs[1:]): + # traverse cur or any first embedded expr of cur looking for an IndentedBlock + # (but watch out for recursive grammar) + seen = set() + while True: + if id(cur) in seen: + break + seen.add(id(cur)) + if isinstance(cur, IndentedBlock): + prev.add_parse_action( + lambda s, l, t, cur_=cur: setattr( + cur_, "parent_anchor", col(l, s) + ) + ) + break + subs = cur.recurse() + next_first = next(iter(subs), None) + if next_first is None: + break + cur = typing.cast(ParserElement, next_first) + + self._may_return_empty = all(e.mayReturnEmpty for e in self.exprs) + return self + + def parseImpl(self, instring, loc, do_actions=True): + # pass False as callPreParse arg to _parse for first element, since we already + # pre-parsed the string as part of our And pre-parsing + loc, resultlist = self.exprs[0]._parse( + instring, loc, do_actions, callPreParse=False + ) + errorStop = False + for e in self.exprs[1:]: + # if isinstance(e, And._ErrorStop): + if type(e) is And._ErrorStop: + errorStop = True + continue + if errorStop: + try: + loc, exprtokens = e._parse(instring, loc, do_actions) + except ParseSyntaxException: + raise + except ParseBaseException as pe: + pe.__traceback__ = None + raise ParseSyntaxException._from_exception(pe) + except IndexError: + raise ParseSyntaxException( + instring, len(instring), self.errmsg, self + ) + else: + loc, exprtokens = e._parse(instring, loc, do_actions) + resultlist += exprtokens + return loc, resultlist + + def __iadd__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # And([self, other]) + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.exprs: + e._checkRecursion(subRecCheckList) + if not e.mayReturnEmpty: + break + + def _generateDefaultName(self) -> str: + inner = " ".join(str(e) for e in self.exprs) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return f"{{{inner}}}" + + +class Or(ParseExpression): + """Requires that at least one :class:`ParserElement` is found. If + two expressions match, the expression that matches the longest + string will be used. May be constructed using the ``'^'`` + operator. + + Example: + + .. testcode:: + + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) + + prints: + + .. testoutput:: + + [['123'], ['3.1416'], ['789']] + """ + + def __init__( + self, exprs: typing.Iterable[ParserElement], savelist: bool = False + ) -> None: + super().__init__(exprs, savelist) + if self.exprs: + self._may_return_empty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self._may_return_empty = True + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self._may_return_empty = any(e.mayReturnEmpty for e in self.exprs) + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.skipWhitespace = all( + e.skipWhitespace and not isinstance(e, White) for e in self.exprs + ) + else: + self.saveAsList = False + return self + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + maxExcLoc = -1 + maxException = None + matches: list[tuple[int, ParserElement]] = [] + fatals: list[ParseFatalException] = [] + if all(e.callPreparse for e in self.exprs): + loc = self.preParse(instring, loc) + for e in self.exprs: + try: + loc2 = e.try_parse(instring, loc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parser_element = e + fatals.append(pfe) + maxException = None + maxExcLoc = -1 + except ParseException as err: + if not fatals: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + else: + # save match among all matches, to retry longest to shortest + matches.append((loc2, e)) + + if matches: + # re-evaluate all matches in descending order of length of match, in case attached actions + # might change whether or how much they match of the input. + matches.sort(key=itemgetter(0), reverse=True) + + if not do_actions: + # no further conditions or parse actions to change the selection of + # alternative, so the first match will be the best match + best_expr = matches[0][1] + return best_expr._parse(instring, loc, do_actions) + + longest: tuple[int, typing.Optional[ParseResults]] = -1, None + for loc1, expr1 in matches: + if loc1 <= longest[0]: + # already have a longer match than this one will deliver, we are done + return longest + + try: + loc2, toks = expr1._parse(instring, loc, do_actions) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + else: + if loc2 >= loc1: + return loc2, toks + # didn't match as much as before + elif loc2 > longest[0]: + longest = loc2, toks + + if longest != (-1, None): + return longest + + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parser_element)))) + max_fatal = fatals[0] + raise max_fatal + + if maxException is not None: + # infer from this check that all alternatives failed at the current position + # so emit this collective error message instead of any single error message + parse_start_loc = self.preParse(instring, loc) + if maxExcLoc == parse_start_loc: + maxException.msg = self.errmsg or "" + raise maxException + + raise ParseException(instring, loc, "no defined alternatives to match", self) + + def __ixor__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # Or([self, other]) + + def _generateDefaultName(self) -> str: + return f"{{{' ^ '.join(str(e) for e in self.exprs)}}}" + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warning = ( + "warn_multiple_tokens_in_named_alternation:" + f" setting results name {name!r} on {type(self).__name__} expression" + " will return a list of all parsed tokens in an And alternative," + " in prior versions only the first token was returned; enclose" + " contained argument in Group" + ) + warnings.warn(warning, stacklevel=3) + + return super()._setResultsName(name, list_all_matches) + + +class MatchFirst(ParseExpression): + """Requires that at least one :class:`ParserElement` is found. If + more than one expression matches, the first one listed is the one that will + match. May be constructed using the ``'|'`` operator. + + Example: Construct MatchFirst using '|' operator + + .. doctest:: + + # watch the order of expressions to match + >>> number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + >>> print(number.search_string("123 3.1416 789")) # Fail! + [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + >>> number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + >>> print(number.search_string("123 3.1416 789")) # Better + [['123'], ['3.1416'], ['789']] + """ + + def __init__( + self, exprs: typing.Iterable[ParserElement], savelist: bool = False + ) -> None: + super().__init__(exprs, savelist) + if self.exprs: + self._may_return_empty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self._may_return_empty = True + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + if self.exprs: + self.saveAsList = any(e.saveAsList for e in self.exprs) + self._may_return_empty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all( + e.skipWhitespace and not isinstance(e, White) for e in self.exprs + ) + else: + self.saveAsList = False + self._may_return_empty = True + return self + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + maxExcLoc = -1 + maxException = None + + for e in self.exprs: + try: + return e._parse(instring, loc, do_actions) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parser_element = e + raise + except ParseException as err: + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + + if maxException is not None: + # infer from this check that all alternatives failed at the current position + # so emit this collective error message instead of any individual error message + parse_start_loc = self.preParse(instring, loc) + if maxExcLoc == parse_start_loc: + maxException.msg = self.errmsg or "" + raise maxException + + raise ParseException(instring, loc, "no defined alternatives to match", self) + + def __ior__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # MatchFirst([self, other]) + + def _generateDefaultName(self) -> str: + return f"{{{' | '.join(str(e) for e in self.exprs)}}}" + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warning = ( + "warn_multiple_tokens_in_named_alternation:" + f" setting results name {name!r} on {type(self).__name__} expression" + " will return a list of all parsed tokens in an And alternative," + " in prior versions only the first token was returned; enclose" + " contained argument in Group" + ) + warnings.warn(warning, stacklevel=3) + + return super()._setResultsName(name, list_all_matches) + + +class Each(ParseExpression): + """Requires all given :class:`ParserElement` s to be found, but in + any order. Expressions may be separated by whitespace. + + May be constructed using the ``'&'`` operator. + + Example: + + .. testcode:: + + color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) + + shape_spec.run_tests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + + prints: + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: 'BLACK' + - posn: ['100', ',', '120'] + - x: '100' + - y: '120' + - shape: 'SQUARE' + ... + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', + 'posn:', ['50', ',', '80']] + - color: 'BLUE' + - posn: ['50', ',', '80'] + - x: '50' + - y: '80' + - shape: 'CIRCLE' + - size: '50' + ... + + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', + 'posn:', ['20', ',', '40']] + - color: 'GREEN' + - posn: ['20', ',', '40'] + - x: '20' + - y: '40' + - shape: 'TRIANGLE' + - size: '20' + ... + """ + + def __init__( + self, exprs: typing.Iterable[ParserElement], savelist: bool = True + ) -> None: + super().__init__(exprs, savelist) + if self.exprs: + self._may_return_empty = all(e.mayReturnEmpty for e in self.exprs) + else: + self._may_return_empty = True + self.skipWhitespace = True + self.initExprGroups = True + self.saveAsList = True + + def __iand__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + return NotImplemented + return self.append(other) # Each([self, other]) + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self._may_return_empty = all(e.mayReturnEmpty for e in self.exprs) + else: + self._may_return_empty = True + return self + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if self.initExprGroups: + self.opt1map = dict( + (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) + ) + opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] + opt2 = [ + e + for e in self.exprs + if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) + ] + self.optionals = opt1 + opt2 + self.multioptionals = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, _MultipleMatch) + ] + self.multirequired = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, OneOrMore) + ] + self.required = [ + e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) + ] + self.required += self.multirequired + self.initExprGroups = False + + tmpLoc = loc + tmpReqd = self.required[:] + tmpOpt = self.optionals[:] + multis = self.multioptionals[:] + matchOrder: list[ParserElement] = [] + + keepMatching = True + failed: list[ParserElement] = [] + fatals: list[ParseFatalException] = [] + while keepMatching: + tmpExprs = tmpReqd + tmpOpt + multis + failed.clear() + fatals.clear() + for e in tmpExprs: + try: + tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parser_element = e + fatals.append(pfe) + failed.append(e) + except ParseException: + failed.append(e) + else: + matchOrder.append(self.opt1map.get(id(e), e)) + if e in tmpReqd: + tmpReqd.remove(e) + elif e in tmpOpt: + tmpOpt.remove(e) + if len(failed) == len(tmpExprs): + keepMatching = False + + # look for any ParseFatalExceptions + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parser_element)))) + max_fatal = fatals[0] + raise max_fatal + + if tmpReqd: + missing = ", ".join([str(e) for e in tmpReqd]) + raise ParseException( + instring, + loc, + f"Missing one or more required elements ({missing})", + ) + + # add any unmatched Opts, in case they have default values defined + matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] + + total_results = ParseResults([]) + for e in matchOrder: + loc, results = e._parse(instring, loc, do_actions) + total_results += results + + return loc, total_results + + def _generateDefaultName(self) -> str: + return f"{{{' & '.join(str(e) for e in self.exprs)}}}" + + +class ParseElementEnhance(ParserElement): + """Abstract subclass of :class:`ParserElement`, for combining and + post-processing parsed tokens. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False) -> None: + super().__init__(savelist) + if isinstance(expr, str_type): + expr_str = typing.cast(str, expr) + if issubclass(self._literalStringClass, Token): + expr = self._literalStringClass(expr_str) # type: ignore[call-arg] + elif issubclass(type(self), self._literalStringClass): + expr = Literal(expr_str) + else: + expr = self._literalStringClass(Literal(expr_str)) # type: ignore[assignment, call-arg] + expr = typing.cast(ParserElement, expr) + self.expr = expr + if expr is not None: + self.mayIndexError = expr.mayIndexError + self._may_return_empty = expr.mayReturnEmpty + self.set_whitespace_chars( + expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars + ) + self.skipWhitespace = expr.skipWhitespace + self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse + self.ignoreExprs.extend(expr.ignoreExprs) + + def recurse(self) -> list[ParserElement]: + return [self.expr] if self.expr is not None else [] + + def parseImpl(self, instring, loc, do_actions=True): + if self.expr is None: + raise ParseException(instring, loc, "No expression defined", self) + + try: + return self.expr._parse(instring, loc, do_actions, callPreParse=False) + except ParseSyntaxException: + raise + except ParseBaseException as pbe: + pbe.pstr = pbe.pstr or instring + pbe.loc = pbe.loc or loc + pbe.parser_element = pbe.parser_element or self + if not isinstance(self, Forward) and self.customName is not None: + if self.errmsg: + pbe.msg = self.errmsg + raise + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + the contained expression. + """ + super().leave_whitespace(recursive) + + if recursive: + if self.expr is not None: + self.expr = self.expr.copy() + self.expr.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``ignore_whitespace`` defined in base class, and also invokes ``ignore_whitespace`` on + the contained expression. + """ + super().ignore_whitespace(recursive) + + if recursive: + if self.expr is not None: + self.expr = self.expr.copy() + self.expr.ignore_whitespace(recursive) + return self + + def ignore(self, other) -> ParserElement: + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + """ + if not isinstance(other, Suppress) or other not in self.ignoreExprs: + super().ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + + return self + + def streamline(self) -> ParserElement: + super().streamline() + if self.expr is not None: + self.expr.streamline() + return self + + def _checkRecursion(self, parseElementList): + if self in parseElementList: + raise RecursiveGrammarException(parseElementList + [self]) + subRecCheckList = parseElementList[:] + [self] + if self.expr is not None: + self.expr._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None) -> None: + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + if validateTrace is None: + validateTrace = [] + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self) -> str: + return f"{type(self).__name__}:({self.expr})" + + # Compatibility synonyms + # fmt: off + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + # fmt: on + + +class IndentedBlock(ParseElementEnhance): + """ + Expression to match one or more expressions at a given indentation level. + Useful for parsing text where structure is implied by indentation (like Python source code). + + Example: + + .. testcode:: + + ''' + BNF: + statement ::= assignment_stmt | if_stmt + assignment_stmt ::= identifier '=' rvalue + rvalue ::= identifier | integer + if_stmt ::= 'if' bool_condition block + block ::= ([indent] statement)... + identifier ::= [A..Za..z] + integer ::= [0..9]... + bool_condition ::= 'TRUE' | 'FALSE' + ''' + + IF, TRUE, FALSE = Keyword.using_each("IF TRUE FALSE".split()) + + statement = Forward() + identifier = Char(alphas) + integer = Word(nums).add_parse_action(lambda t: int(t[0])) + rvalue = identifier | integer + assignment_stmt = identifier + "=" + rvalue + + if_stmt = IF + (TRUE | FALSE) + IndentedBlock(statement) + + statement <<= Group(assignment_stmt | if_stmt) + + result = if_stmt.parse_string(''' + IF TRUE + a = 1000 + b = 2000 + IF FALSE + z = 100 + ''') + print(result.dump()) + + .. testoutput:: + + ['IF', 'TRUE', [['a', '=', 1000], ['b', '=', 2000], ['IF', 'FALSE', [['z', '=', 100]]]]] + [0]: + IF + [1]: + TRUE + [2]: + [['a', '=', 1000], ['b', '=', 2000], ['IF', 'FALSE', [['z', '=', 100]]]] + [0]: + ['a', '=', 1000] + [1]: + ['b', '=', 2000] + [2]: + ['IF', 'FALSE', [['z', '=', 100]]] + [0]: + IF + [1]: + FALSE + [2]: + [['z', '=', 100]] + [0]: + ['z', '=', 100] + """ + + class _Indent(Empty): + def __init__(self, ref_col: int) -> None: + super().__init__() + self.errmsg = f"expected indent at column {ref_col}" + self.add_condition(lambda s, l, t: col(l, s) == ref_col) + + class _IndentGreater(Empty): + def __init__(self, ref_col: int) -> None: + super().__init__() + self.errmsg = f"expected indent at column greater than {ref_col}" + self.add_condition(lambda s, l, t: col(l, s) > ref_col) + + def __init__( + self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True + ) -> None: + super().__init__(expr, savelist=True) + # if recursive: + # raise NotImplementedError("IndentedBlock with recursive is not implemented") + self._recursive = recursive + self._grouped = grouped + self.parent_anchor = 1 + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + # advance parse position to non-whitespace by using an Empty() + # this should be the column to be used for all subsequent indented lines + anchor_loc = Empty().preParse(instring, loc) + + # see if self.expr matches at the current location - if not it will raise an exception + # and no further work is necessary + self.expr.try_parse(instring, anchor_loc, do_actions=do_actions) + + indent_col = col(anchor_loc, instring) + peer_detect_expr = self._Indent(indent_col) + + inner_expr = Empty() + peer_detect_expr + self.expr + if self._recursive: + sub_indent = self._IndentGreater(indent_col) + nested_block = IndentedBlock( + self.expr, recursive=self._recursive, grouped=self._grouped + ) + nested_block.set_debug(self.debug) + nested_block.parent_anchor = indent_col + inner_expr += Opt(sub_indent + nested_block) + + inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") + block = OneOrMore(inner_expr) + + trailing_undent = self._Indent(self.parent_anchor) | StringEnd() + + if self._grouped: + wrapper = Group + else: + wrapper = lambda expr: expr # type: ignore[misc, assignment] + return (wrapper(block) + Optional(trailing_undent)).parseImpl( + instring, anchor_loc, do_actions + ) + + +class AtStringStart(ParseElementEnhance): + """Matches if expression matches at the beginning of the parse + string:: + + AtStringStart(Word(nums)).parse_string("123") + # prints ["123"] + + AtStringStart(Word(nums)).parse_string(" 123") + # raises ParseException + """ + + def __init__(self, expr: Union[ParserElement, str]) -> None: + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if loc != 0: + raise ParseException(instring, loc, "not found at string start") + return super().parseImpl(instring, loc, do_actions) + + +class AtLineStart(ParseElementEnhance): + r"""Matches if an expression matches at the beginning of a line within + the parse string + + Example: + + .. testcode:: + + test = '''\ + BBB this line + BBB and this line + BBB but not this one + A BBB and definitely not this one + ''' + + for t in (AtLineStart('BBB') + rest_of_line).search_string(test): + print(t) + + prints: + + .. testoutput:: + + ['BBB', ' this line'] + ['BBB', ' and this line'] + """ + + def __init__(self, expr: Union[ParserElement, str]) -> None: + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if col(loc, instring) != 1: + raise ParseException(instring, loc, "not found at line start") + return super().parseImpl(instring, loc, do_actions) + + +class FollowedBy(ParseElementEnhance): + """Lookahead matching of the given parse expression. + ``FollowedBy`` does *not* advance the parsing position within + the input string, it only verifies that the specified parse + expression matches at the current position. ``FollowedBy`` + always returns a null token list. If any results names are defined + in the lookahead expression, those *will* be returned for access by + name. + + Example: + + .. testcode:: + + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group( + label + Suppress(':') + + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) + ) + + attr_expr[1, ...].parse_string( + "shape: SQUARE color: BLACK posn: upper left").pprint() + + prints: + + .. testoutput:: + + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ + + def __init__(self, expr: Union[ParserElement, str]) -> None: + super().__init__(expr) + self._may_return_empty = True + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + # by using self._expr.parse and deleting the contents of the returned ParseResults list + # we keep any named results that were defined in the FollowedBy expression + _, ret = self.expr._parse(instring, loc, do_actions=do_actions) + del ret[:] + + return loc, ret + + +class PrecededBy(ParseElementEnhance): + """Lookbehind matching of the given parse expression. + ``PrecededBy`` does not advance the parsing position within the + input string, it only verifies that the specified parse expression + matches prior to the current position. ``PrecededBy`` always + returns a null token list, but if a results name is defined on the + given expression, it is returned. + + Parameters: + + - ``expr`` - expression that must match prior to the current parse + location + - ``retreat`` - (default= ``None``) - (int) maximum number of characters + to lookbehind prior to the current parse location + + If the lookbehind expression is a string, :class:`Literal`, + :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` + with a specified exact or maximum length, then the retreat + parameter is not required. Otherwise, retreat must be specified to + give a maximum number of characters to look back from + the current parse position for a lookbehind match. + + Example: + + .. testcode:: + + # VB-style variable names with type prefixes + int_var = PrecededBy("#") + pyparsing_common.identifier + str_var = PrecededBy("$") + pyparsing_common.identifier + """ + + def __init__(self, expr: Union[ParserElement, str], retreat: int = 0) -> None: + super().__init__(expr) + self.expr = self.expr().leave_whitespace() + self._may_return_empty = True + self.mayIndexError = False + self.exact = False + if isinstance(expr, str_type): + expr = typing.cast(str, expr) + retreat = len(expr) + self.exact = True + elif isinstance(expr, (Literal, Keyword)): + retreat = expr.matchLen + self.exact = True + elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: + retreat = expr.maxLen + self.exact = True + elif isinstance(expr, PositionToken): + retreat = 0 + self.exact = True + self.retreat = retreat + self.errmsg = f"not preceded by {expr}" + self.skipWhitespace = False + self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) + + def parseImpl(self, instring, loc=0, do_actions=True) -> ParseImplReturnType: + if self.exact: + if loc < self.retreat: + raise ParseException(instring, loc, self.errmsg, self) + start = loc - self.retreat + _, ret = self.expr._parse(instring, start) + return loc, ret + + # retreat specified a maximum lookbehind window, iterate + test_expr = self.expr + StringEnd() + instring_slice = instring[max(0, loc - self.retreat) : loc] + last_expr: ParseBaseException = ParseException(instring, loc, self.errmsg, self) + + for offset in range(1, min(loc, self.retreat + 1) + 1): + try: + # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) + _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) + except ParseBaseException as pbe: + last_expr = pbe + else: + break + else: + raise last_expr + + return loc, ret + + +class Located(ParseElementEnhance): + """ + Decorates a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parse_with_tabs` + + Example: + + .. testcode:: + + wd = Word(alphas) + for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints: + + .. testoutput:: + + [0, ['ljsdf'], 5] + [8, ['lksdjjf'], 15] + [18, ['lkkjj'], 23] + """ + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + start = loc + loc, tokens = self.expr._parse(instring, start, do_actions, callPreParse=False) + ret_tokens = ParseResults([start, tokens, loc]) + ret_tokens["locn_start"] = start + ret_tokens["value"] = tokens + ret_tokens["locn_end"] = loc + if self.resultsName: + # must return as a list, so that the name will be attached to the complete group + return loc, [ret_tokens] + else: + return loc, ret_tokens + + +class NotAny(ParseElementEnhance): + """ + Lookahead to disallow matching with the given parse expression. + ``NotAny`` does *not* advance the parsing position within the + input string, it only verifies that the specified parse expression + does *not* match at the current position. Also, ``NotAny`` does + *not* skip over leading whitespace. ``NotAny`` always returns + a null token list. May be constructed using the ``'~'`` operator. + + Example: + + .. testcode:: + + AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) + + # take care not to mistake keywords for identifiers + ident = ~(AND | OR | NOT) + Word(alphas) + boolean_term = Opt(NOT) + ident + + # very crude boolean expression - to support parenthesis groups and + # operation hierarchy, use infix_notation + boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...] + + # integers that are followed by "." are actually floats + integer = Word(nums) + ~Char(".") + """ + + def __init__(self, expr: Union[ParserElement, str]) -> None: + super().__init__(expr) + # do NOT use self.leave_whitespace(), don't want to propagate to exprs + # self.leave_whitespace() + self.skipWhitespace = False + + self._may_return_empty = True + self.errmsg = f"Found unwanted token, {self.expr}" + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if self.expr.can_parse_next(instring, loc, do_actions=do_actions): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + def _generateDefaultName(self) -> str: + return f"~{{{self.expr}}}" + + +class _MultipleMatch(ParseElementEnhance): + def __init__( + self, + expr: Union[str, ParserElement], + stop_on: typing.Optional[Union[ParserElement, str]] = None, + **kwargs, + ) -> None: + stopOn: typing.Optional[Union[ParserElement, str]] = deprecate_argument( + kwargs, "stopOn", None + ) + + super().__init__(expr) + stopOn = stopOn or stop_on + self.saveAsList = True + ender = stopOn + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.stopOn(ender) + + def stop_on(self, ender) -> ParserElement: + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None + return self + + stopOn = stop_on + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = False + if self.not_ender is not None: + try_not_ender = self.not_ender.try_parse + check_ender = True + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse(instring, loc, do_actions) + try: + hasIgnoreExprs = not not self.ignoreExprs + while 1: + if check_ender: + try_not_ender(instring, loc) + if hasIgnoreExprs: + preloc = self_skip_ignorables(instring, loc) + else: + preloc = loc + loc, tmptokens = self_expr_parse(instring, preloc, do_actions) + tokens += tmptokens + except (ParseException, IndexError): + pass + + return loc, tokens + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + if ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + for e in [self.expr] + self.expr.recurse(): + if ( + isinstance(e, ParserElement) + and e.resultsName + and ( + Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ) + ): + warning = ( + "warn_ungrouped_named_tokens_in_collection:" + f" setting results name {name!r} on {type(self).__name__} expression" + f" collides with {e.resultsName!r} on contained expression" + ) + warnings.warn(warning, stacklevel=3) + break + + return super()._setResultsName(name, list_all_matches) + + +class OneOrMore(_MultipleMatch): + """ + Repetition of one or more of the given expression. + + Parameters: + + - ``expr`` - expression that must match one or more times + - ``stop_on`` - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example: + + .. doctest:: + + >>> data_word = Word(alphas) + >>> label = data_word + FollowedBy(':') + >>> attr_expr = Group( + ... label + Suppress(':') + ... + OneOrMore(data_word).set_parse_action(' '.join)) + + >>> text = "shape: SQUARE posn: upper left color: BLACK" + + # Fail! read 'posn' as data instead of next label + >>> attr_expr[1, ...].parse_string(text).pprint() + [['shape', 'SQUARE posn']] + + # use stop_on attribute for OneOrMore + # to avoid reading label string as part of the data + >>> attr_expr = Group( + ... label + Suppress(':') + ... + OneOrMore( + ... data_word, stop_on=label).set_parse_action(' '.join)) + >>> OneOrMore(attr_expr).parse_string(text).pprint() # Better + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + >>> (attr_expr * (1,)).parse_string(text).pprint() + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + """ + + def _generateDefaultName(self) -> str: + return f"{{{self.expr}}}..." + + +class ZeroOrMore(_MultipleMatch): + """ + Optional repetition of zero or more of the given expression. + + Parameters: + + - ``expr`` - expression that must match zero or more times + - ``stop_on`` - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) - (default= ``None``) + + Example: similar to :class:`OneOrMore` + """ + + def __init__( + self, + expr: Union[str, ParserElement], + stop_on: typing.Optional[Union[ParserElement, str]] = None, + **kwargs, + ) -> None: + stopOn: Union[ParserElement, str] = deprecate_argument(kwargs, "stopOn", None) + + super().__init__(expr, stop_on=stopOn or stop_on) + self._may_return_empty = True + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + try: + return super().parseImpl(instring, loc, do_actions) + except (ParseException, IndexError): + return loc, ParseResults([], name=self.resultsName) + + def _generateDefaultName(self) -> str: + return f"[{self.expr}]..." + + +class DelimitedList(ParseElementEnhance): + """Helper to define a delimited list of expressions - the delimiter + defaults to ','. By default, the list elements and delimiters can + have intervening whitespace, and comments, but this can be + overridden by passing ``combine=True`` in the constructor. If + ``combine`` is set to ``True``, the matching tokens are + returned as a single token string, with the delimiters included; + otherwise, the matching tokens are returned as a list of tokens, + with the delimiters suppressed. + + If ``allow_trailing_delim`` is set to True, then the list may end with + a delimiter. + + Example: + + .. doctest:: + + >>> DelimitedList(Word(alphas)).parse_string("aa,bb,cc") + ParseResults(['aa', 'bb', 'cc'], {}) + >>> DelimitedList(Word(hexnums), delim=':', combine=True + ... ).parse_string("AA:BB:CC:DD:EE") + ParseResults(['AA:BB:CC:DD:EE'], {}) + + .. versionadded:: 3.1.0 + """ + + def __init__( + self, + expr: Union[str, ParserElement], + delim: Union[str, ParserElement] = ",", + combine: bool = False, + min: typing.Optional[int] = None, + max: typing.Optional[int] = None, + *, + allow_trailing_delim: bool = False, + ) -> None: + if isinstance(expr, str_type): + expr = ParserElement._literalStringClass(expr) + expr = typing.cast(ParserElement, expr) + + if min is not None and min < 1: + raise ValueError("min must be greater than 0") + + if max is not None and min is not None and max < min: + raise ValueError("max must be greater than, or equal to min") + + self.content = expr + self.raw_delim = str(delim) + self.delim = delim + self.combine = combine + if not combine: + self.delim = Suppress(delim) if not isinstance(delim, Suppress) else delim + self.min = min or 1 + self.max = max + self.allow_trailing_delim = allow_trailing_delim + + delim_list_expr = self.content + (self.delim + self.content) * ( + self.min - 1, + None if self.max is None else self.max - 1, + ) + if self.allow_trailing_delim: + delim_list_expr += Opt(self.delim) + + if self.combine: + delim_list_expr = Combine(delim_list_expr) + + super().__init__(delim_list_expr, savelist=True) + + def _generateDefaultName(self) -> str: + content_expr = self.content.streamline() + return f"{content_expr} [{self.raw_delim} {content_expr}]..." + + +class _NullToken: + def __bool__(self): + return False + + def __str__(self): + return "" + + +class Opt(ParseElementEnhance): + """ + Optional matching of the given expression. + + :param expr: expression that must match zero or more times + :param default: (optional) - value to be returned + if the optional expression is not found. + + Example: + + .. testcode:: + + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) + zip.run_tests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + + prints: + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + 98765- + ^ + ParseException: Expected end of text, found '-' (at char 5), (line:1, col:6) + FAIL: Expected end of text, found '-' (at char 5), (line:1, col:6) + """ + + __optionalNotMatched = _NullToken() + + def __init__( + self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched + ) -> None: + super().__init__(expr, savelist=False) + self.saveAsList = self.expr.saveAsList + self.defaultValue = default + self._may_return_empty = True + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + self_expr = self.expr + try: + loc, tokens = self_expr._parse( + instring, loc, do_actions, callPreParse=False + ) + except (ParseException, IndexError): + default_value = self.defaultValue + if default_value is not self.__optionalNotMatched: + if self_expr.resultsName: + tokens = ParseResults([default_value]) + tokens[self_expr.resultsName] = default_value + else: + tokens = [default_value] # type: ignore[assignment] + else: + tokens = [] # type: ignore[assignment] + return loc, tokens + + def _generateDefaultName(self) -> str: + inner = str(self.expr) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return f"[{inner}]" + + +Optional = Opt + + +class SkipTo(ParseElementEnhance): + """ + Token for skipping over all undefined text until the matched + expression is found. + + :param expr: target expression marking the end of the data to be skipped + :param include: if ``True``, the target expression is also parsed + (the skipped text and target expression are returned + as a 2-element list) (default= ``False``). + + :param ignore: (default= ``None``) used to define grammars + (typically quoted strings and comments) + that might contain false matches to the target expression + + :param fail_on: (default= ``None``) define expressions that + are not allowed to be included in the skipped test; + if found before the target expression is found, + the :class:`SkipTo` is not a match + + Example: + + .. testcode:: + + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quoted_string) + string_data.set_parse_action(token_map(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.search_string(report): + print(tkt.dump()) + + prints: + + .. testoutput:: + + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: '6' + - desc: 'Intermittent system crash' + - issue_num: '101' + - sev: 'Critical' + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: '14' + - desc: "Spelling error on Login ('log|n')" + - issue_num: '94' + - sev: 'Cosmetic' + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: '47' + - desc: 'System slow when running too many reports' + - issue_num: '79' + - sev: 'Minor' + """ + + def __init__( + self, + other: Union[ParserElement, str], + include: bool = False, + ignore: typing.Optional[Union[ParserElement, str]] = None, + fail_on: typing.Optional[Union[ParserElement, str]] = None, + **kwargs, + ) -> None: + failOn: typing.Optional[Union[ParserElement, str]] = deprecate_argument( + kwargs, "failOn", None + ) + + super().__init__(other) + failOn = failOn or fail_on + self.ignoreExpr = ignore + self._may_return_empty = True + self.mayIndexError = False + self.includeMatch = include + self.saveAsList = False + if isinstance(failOn, str_type): + self.failOn = self._literalStringClass(failOn) + else: + self.failOn = failOn + self.errmsg = f"No match found for {self.expr}" + self.ignorer = Empty().leave_whitespace() + self._update_ignorer() + + def _update_ignorer(self): + # rebuild internal ignore expr from current ignore exprs and assigned ignoreExpr + self.ignorer.ignoreExprs.clear() + for e in self.expr.ignoreExprs: + self.ignorer.ignore(e) + if self.ignoreExpr: + self.ignorer.ignore(self.ignoreExpr) + + def ignore(self, expr): + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + """ + super().ignore(expr) + self._update_ignorer() + + def parseImpl(self, instring, loc, do_actions=True): + startloc = loc + instrlen = len(instring) + self_expr_parse = self.expr._parse + self_failOn_canParseNext = ( + self.failOn.can_parse_next if self.failOn is not None else None + ) + ignorer_try_parse = self.ignorer.try_parse if self.ignorer.ignoreExprs else None + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if ignorer_try_parse is not None: + # advance past ignore expressions + prev_tmploc = tmploc + while 1: + try: + tmploc = ignorer_try_parse(instring, tmploc) + except ParseBaseException: + break + # see if all ignorers matched, but didn't actually ignore anything + if tmploc == prev_tmploc: + break + prev_tmploc = tmploc + + try: + self_expr_parse(instring, tmploc, do_actions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = self_expr_parse(instring, loc, do_actions, callPreParse=False) + skipresult += mat + + return loc, skipresult + + +class Forward(ParseElementEnhance): + """ + Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the ``Forward`` + instance using the ``'<<'`` operator. + + .. Note:: + + Take care when assigning to ``Forward`` not to overlook + precedence of operators. + + Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: + + fwd_expr << a | b | c + + will actually be evaluated as:: + + (fwd_expr << a) | b | c + + thereby leaving b and c out as parseable alternatives. + It is recommended that you explicitly group the values + inserted into the :class:`Forward`:: + + fwd_expr << (a | b | c) + + Converting to use the ``'<<='`` operator instead will avoid this problem. + + See :meth:`ParseResults.pprint` for an example of a recursive + parser created using :class:`Forward`. + """ + + def __init__( + self, other: typing.Optional[Union[ParserElement, str]] = None + ) -> None: + self.caller_frame = traceback.extract_stack(limit=2)[0] + super().__init__(other, savelist=False) # type: ignore[arg-type] + self.lshift_line = None + + def __lshift__(self, other) -> Forward: + if hasattr(self, "caller_frame"): + del self.caller_frame + if isinstance(other, str_type): + other = self._literalStringClass(other) + + if not isinstance(other, ParserElement): + return NotImplemented + + self.expr = other + self.streamlined = other.streamlined + self.mayIndexError = self.expr.mayIndexError + self._may_return_empty = self.expr.mayReturnEmpty + self.set_whitespace_chars( + self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars + ) + self.skipWhitespace = self.expr.skipWhitespace + self.saveAsList = self.expr.saveAsList + self.ignoreExprs.extend(self.expr.ignoreExprs) + self.lshift_line = traceback.extract_stack(limit=2)[-2] # type: ignore[assignment] + return self + + def __ilshift__(self, other) -> Forward: + if not isinstance(other, ParserElement): + return NotImplemented + + return self << other + + def __or__(self, other) -> ParserElement: + caller_line = traceback.extract_stack(limit=2)[-2] + if ( + __diag__.warn_on_match_first_with_lshift_operator + and caller_line == self.lshift_line + and Diagnostics.warn_on_match_first_with_lshift_operator + not in self.suppress_warnings_ + ): + warnings.warn( + "warn_on_match_first_with_lshift_operator:" + " using '<<' operator with '|' is probably an error, use '<<='", + stacklevel=2, + ) + ret = super().__or__(other) + return ret + + def __del__(self): + # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' + if ( + self.expr is None + and __diag__.warn_on_assignment_to_Forward + and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ + ): + warnings.warn_explicit( + "warn_on_assignment_to_Forward:" + " Forward defined here but no expression attached later using '<<=' or '<<'", + UserWarning, + filename=self.caller_frame.filename, + lineno=self.caller_frame.lineno, + ) + + def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType: + if ( + self.expr is None + and __diag__.warn_on_parse_using_empty_Forward + and Diagnostics.warn_on_parse_using_empty_Forward + not in self.suppress_warnings_ + ): + # walk stack until parse_string, scan_string, search_string, or transform_string is found + parse_fns = ( + "parse_string", + "scan_string", + "search_string", + "transform_string", + ) + tb = traceback.extract_stack(limit=200) + for i, frm in enumerate(reversed(tb), start=1): + if frm.name in parse_fns: + stacklevel = i + 1 + break + else: + stacklevel = 2 + warnings.warn( + "warn_on_parse_using_empty_Forward:" + " Forward expression was never assigned a value, will not parse any input", + stacklevel=stacklevel, + ) + if not ParserElement._left_recursion_enabled: + return super().parseImpl(instring, loc, do_actions) + # ## Bounded Recursion algorithm ## + # Recursion only needs to be processed at ``Forward`` elements, since they are + # the only ones that can actually refer to themselves. The general idea is + # to handle recursion stepwise: We start at no recursion, then recurse once, + # recurse twice, ..., until more recursion offers no benefit (we hit the bound). + # + # The "trick" here is that each ``Forward`` gets evaluated in two contexts + # - to *match* a specific recursion level, and + # - to *search* the bounded recursion level + # and the two run concurrently. The *search* must *match* each recursion level + # to find the best possible match. This is handled by a memo table, which + # provides the previous match to the next level match attempt. + # + # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. + # + # There is a complication since we not only *parse* but also *transform* via + # actions: We do not want to run the actions too often while expanding. Thus, + # we expand using `do_actions=False` and only run `do_actions=True` if the next + # recursion level is acceptable. + with ParserElement.recursion_lock: + memo = ParserElement.recursion_memos + try: + # we are parsing at a specific recursion expansion - use it as-is + prev_loc, prev_result = memo[loc, self, do_actions] + if isinstance(prev_result, Exception): + raise prev_result + return prev_loc, prev_result.copy() + except KeyError: + act_key = (loc, self, True) + peek_key = (loc, self, False) + # we are searching for the best recursion expansion - keep on improving + # both `do_actions` cases must be tracked separately here! + prev_loc, prev_peek = memo[peek_key] = ( + loc - 1, + ParseException( + instring, loc, "Forward recursion without base case", self + ), + ) + if do_actions: + memo[act_key] = memo[peek_key] + while True: + try: + new_loc, new_peek = super().parseImpl(instring, loc, False) + except ParseException: + # we failed before getting any match - do not hide the error + if isinstance(prev_peek, Exception): + raise + new_loc, new_peek = prev_loc, prev_peek + # the match did not get better: we are done + if new_loc <= prev_loc: + if do_actions: + # replace the match for do_actions=False as well, + # in case the action did backtrack + prev_loc, prev_result = memo[peek_key] = memo[act_key] + del memo[peek_key], memo[act_key] + return prev_loc, copy.copy(prev_result) + del memo[peek_key] + return prev_loc, copy.copy(prev_peek) + # the match did get better: see if we can improve further + if do_actions: + try: + memo[act_key] = super().parseImpl(instring, loc, True) + except ParseException as e: + memo[peek_key] = memo[act_key] = (new_loc, e) + raise + prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``leave_whitespace`` defined in base class. + """ + self.skipWhitespace = False + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``ignore_whitespace`` defined in base class. + """ + self.skipWhitespace = True + return self + + def streamline(self) -> ParserElement: + if not self.streamlined: + self.streamlined = True + if self.expr is not None: + self.expr.streamline() + return self + + def validate(self, validateTrace=None) -> None: + warnings.warn( + "ParserElement.validate() is deprecated, and should not be used to check for left recursion", + DeprecationWarning, + stacklevel=2, + ) + if validateTrace is None: + validateTrace = [] + + if self not in validateTrace: + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self) -> str: + # Avoid infinite recursion by setting a temporary _defaultName + save_default_name = self._defaultName + self._defaultName = ": ..." + + # Use the string representation of main expression. + try: + if self.expr is not None: + ret_string = str(self.expr)[:1000] + else: + ret_string = "None" + except Exception: + ret_string = "..." + + self._defaultName = save_default_name + return f"{type(self).__name__}: {ret_string}" + + def copy(self) -> ParserElement: + """ + Returns a copy of this expression. + + Generally only used internally by pyparsing. + """ + if self.expr is not None: + return super().copy() + else: + ret = Forward() + ret <<= self + return ret + + def _setResultsName(self, name, list_all_matches=False) -> ParserElement: + # fmt: off + if ( + __diag__.warn_name_set_on_empty_Forward + and Diagnostics.warn_name_set_on_empty_Forward not in self.suppress_warnings_ + and self.expr is None + ): + warning = ( + "warn_name_set_on_empty_Forward:" + f" setting results name {name!r} on {type(self).__name__} expression" + " that has no contained expression" + ) + warnings.warn(warning, stacklevel=3) + # fmt: on + + return super()._setResultsName(name, list_all_matches) + + # Compatibility synonyms + # fmt: off + leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace) + ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace) + # fmt: on + + +class TokenConverter(ParseElementEnhance): + """ + Abstract subclass of :class:`ParseElementEnhance`, for converting parsed results. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist=False) -> None: + super().__init__(expr) # , savelist) + self.saveAsList = False + + +class Combine(TokenConverter): + """Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the + input string; this can be disabled by specifying + ``'adjacent=False'`` in the constructor. + + Example: + + .. doctest:: + + >>> real = Word(nums) + '.' + Word(nums) + >>> print(real.parse_string('3.1416')) + ['3', '.', '1416'] + + >>> # will also erroneously match the following + >>> print(real.parse_string('3. 1416')) + ['3', '.', '1416'] + + >>> real = Combine(Word(nums) + '.' + Word(nums)) + >>> print(real.parse_string('3.1416')) + ['3.1416'] + + >>> # no match when there are internal spaces + >>> print(real.parse_string('3. 1416')) + Traceback (most recent call last): + ParseException: Expected W:(0123...) + """ + + def __init__( + self, + expr: ParserElement, + join_string: str = "", + adjacent: bool = True, + *, + joinString: typing.Optional[str] = None, + ) -> None: + super().__init__(expr) + joinString = joinString if joinString is not None else join_string + # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself + if adjacent: + self.leave_whitespace() + self.adjacent = adjacent + self.skipWhitespace = True + self.joinString = joinString + self.callPreparse = True + + def ignore(self, other) -> ParserElement: + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + """ + if self.adjacent: + ParserElement.ignore(self, other) + else: + super().ignore(other) + return self + + def postParse(self, instring, loc, tokenlist): + retToks = tokenlist.copy() + del retToks[:] + retToks += ParseResults( + ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults + ) + + if self.resultsName and retToks.haskeys(): + return [retToks] + else: + return retToks + + +class Group(TokenConverter): + """Converter to return the matched tokens as a list - useful for + returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. + + The optional ``aslist`` argument when set to True will return the + parsed tokens as a Python list instead of a pyparsing ParseResults. + + Example: + + .. doctest:: + + >>> ident = Word(alphas) + >>> num = Word(nums) + >>> term = ident | num + >>> func = ident + Opt(DelimitedList(term)) + >>> print(func.parse_string("fn a, b, 100")) + ['fn', 'a', 'b', '100'] + + >>> func = ident + Group(Opt(DelimitedList(term))) + >>> print(func.parse_string("fn a, b, 100")) + ['fn', ['a', 'b', '100']] + """ + + def __init__(self, expr: ParserElement, aslist: bool = False) -> None: + super().__init__(expr) + self.saveAsList = True + self._asPythonList = aslist + + def postParse(self, instring, loc, tokenlist): + if self._asPythonList: + return ParseResults.List( + tokenlist.as_list() + if isinstance(tokenlist, ParseResults) + else list(tokenlist) + ) + + return [tokenlist] + + +class Dict(TokenConverter): + """Converter to return a repetitive expression as a list, but also + as a dictionary. Each element can also be referenced using the first + token in the expression as its key. Useful for tabular report + scraping when the first column can be used as a item key. + + The optional ``asdict`` argument when set to True will return the + parsed tokens as a Python dict instead of a pyparsing ParseResults. + + Example: + + .. doctest:: + + >>> data_word = Word(alphas) + >>> label = data_word + FollowedBy(':') + + >>> attr_expr = ( + ... label + Suppress(':') + ... + OneOrMore(data_word, stop_on=label) + ... .set_parse_action(' '.join) + ... ) + + >>> text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + + >>> # print attributes as plain groups + >>> print(attr_expr[1, ...].parse_string(text).dump()) + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + + # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) + # Dict will auto-assign names. + >>> result = Dict(Group(attr_expr)[1, ...]).parse_string(text) + >>> print(result.dump()) + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: 'light blue' + - posn: 'upper left' + - shape: 'SQUARE' + - texture: 'burlap' + [0]: + ['shape', 'SQUARE'] + [1]: + ['posn', 'upper left'] + [2]: + ['color', 'light blue'] + [3]: + ['texture', 'burlap'] + + # access named fields as dict entries, or output as dict + >>> print(result['shape']) + SQUARE + >>> print(result.as_dict()) + {'shape': 'SQUARE', 'posn': 'upper left', 'color': 'light blue', 'texture': 'burlap'} + + See more examples at :class:`ParseResults` of accessing fields by results name. + """ + + def __init__(self, expr: ParserElement, asdict: bool = False) -> None: + super().__init__(expr) + self.saveAsList = True + self._asPythonDict = asdict + + def postParse(self, instring, loc, tokenlist): + for i, tok in enumerate(tokenlist): + if len(tok) == 0: + continue + + ikey = tok[0] + if isinstance(ikey, int): + ikey = str(ikey).strip() + + if len(tok) == 1: + tokenlist[ikey] = _ParseResultsWithOffset("", i) + + elif len(tok) == 2 and not isinstance(tok[1], ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) + + else: + try: + dictvalue = tok.copy() # ParseResults(i) + except Exception: + exc = TypeError( + "could not extract dict values from parsed results" + " - Dict expression must contain Grouped expressions" + ) + raise exc from None + + del dictvalue[0] + + if len(dictvalue) != 1 or ( + isinstance(dictvalue, ParseResults) and dictvalue.haskeys() + ): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) + else: + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) + + if self._asPythonDict: + return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() + + return [tokenlist] if self.resultsName else tokenlist + + +class Suppress(TokenConverter): + """Converter for ignoring the results of a parsed expression. + + Example: + + .. doctest:: + + >>> source = "a, b, c,d" + >>> wd = Word(alphas) + >>> wd_list1 = wd + (',' + wd)[...] + >>> print(wd_list1.parse_string(source)) + ['a', ',', 'b', ',', 'c', ',', 'd'] + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + >>> wd_list2 = wd + (Suppress(',') + wd)[...] + >>> print(wd_list2.parse_string(source)) + ['a', 'b', 'c', 'd'] + + # Skipped text (using '...') can be suppressed as well + >>> source = "lead in START relevant text END trailing text" + >>> start_marker = Keyword("START") + >>> end_marker = Keyword("END") + >>> find_body = Suppress(...) + start_marker + ... + end_marker + >>> print(find_body.parse_string(source)) + ['START', 'relevant text ', 'END'] + + (See also :class:`DelimitedList`.) + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False) -> None: + if expr is ...: + expr = _PendingSkip(NoMatch()) + super().__init__(expr) + + def __add__(self, other) -> ParserElement: + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) + other + + return super().__add__(other) + + def __sub__(self, other) -> ParserElement: + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) - other + + return super().__sub__(other) + + def postParse(self, instring, loc, tokenlist): + return [] + + def suppress(self) -> ParserElement: + return self + + +# XXX: Example needs to be re-done for updated output +def trace_parse_action(f: ParseAction) -> ParseAction: + """Decorator for debugging parse actions. + + When the parse action is called, this decorator will print + ``">> entering method-name(line:, , )"``. + When the parse action completes, the decorator will print + ``"<<"`` followed by the returned value, or any exception that the parse action raised. + + Example: + + .. testsetup:: stderr + + import sys + sys.stderr = sys.stdout + + .. testcleanup:: stderr + + sys.stderr = sys.__stderr__ + + .. testcode:: stderr + + wd = Word(alphas) + + @trace_parse_action + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = wd[1, ...].set_parse_action(remove_duplicate_chars) + print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) + + prints: + + .. testoutput:: stderr + :options: +NORMALIZE_WHITESPACE + + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', + 0, ParseResults(['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + < 3: + thisFunc = f"{type(paArgs[0]).__name__}.{thisFunc}" + sys.stderr.write(f">>entering {thisFunc}(line: {line(l, s)!r}, {l}, {t!r})\n") + try: + ret = f(*paArgs) + except Exception as exc: + sys.stderr.write( + f"< str: + r"""Helper to easily define string ranges for use in :class:`Word` + construction. Borrows syntax from regexp ``'[]'`` string range + definitions:: + + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + + The input string must be enclosed in []'s, and the returned string + is the expanded character set joined into a single string. The + values enclosed in the []'s may be: + + - a single character + - an escaped character with a leading backslash (such as ``\-`` + or ``\]``) + - an escaped hex character with a leading ``'\x'`` + (``\x21``, which is a ``'!'`` character) (``\0x##`` + is also supported for backwards compatibility) + - an escaped octal character with a leading ``'\0'`` + (``\041``, which is a ``'!'`` character) + - a range of any of the above, separated by a dash (``'a-z'``, + etc.) + - any combination of the above (``'aeiouy'``, + ``'a-zA-Z0-9_$'``, etc.) + """ + + def _expanded(p): + if isinstance(p, ParseResults): + yield from (chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) + else: + yield p + + try: + return "".join( + [c for part in _reBracketExpr.parse_string(s).body for c in _expanded(part)] + ) + except Exception as e: + return "" + + +def token_map(func, *args) -> ParseAction: + """Helper to define a parse action by mapping a function to all + elements of a :class:`ParseResults` list. If any additional args are passed, + they are forwarded to the given function as additional arguments + after the token, as in + ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, + which will convert the parsed data to an integer using base 16. + + Example (compare the last to example in :class:`ParserElement.transform_string`:: + + hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16)) + hex_ints.run_tests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).set_parse_action(token_map(str.upper)) + upperword[1, ...].run_tests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).set_parse_action(token_map(str.title)) + wd[1, ...].set_parse_action(' '.join).run_tests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + + prints:: + + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + + def pa(s, l, t): + return [func(tokn, *args) for tokn in t] + + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + pa.__name__ = func_name + + return pa + + +def autoname_elements() -> None: + """ + Utility to simplify mass-naming of parser elements, for + generating railroad diagram with named subdiagrams. + """ + + # guard against _getframe not being implemented in the current Python + getframe_fn = getattr(sys, "_getframe", lambda _: None) + calling_frame = getframe_fn(1) + if calling_frame is None: + return + + # find all locals in the calling frame that are ParserElements + calling_frame = typing.cast(types.FrameType, calling_frame) + for name, var in calling_frame.f_locals.items(): + # if no custom name defined, set the name to the var name + if isinstance(var, ParserElement) and not var.customName: + var.set_name(name) + + +dbl_quoted_string = Combine( + Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' +).set_name("string enclosed in double quotes") + +sgl_quoted_string = Combine( + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" +).set_name("string enclosed in single quotes") + +quoted_string = Combine( + (Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').set_name( + "double quoted string" + ) + | (Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").set_name( + "single quoted string" + ) +).set_name("quoted string using single or double quotes") + +# XXX: Is there some way to make this show up in API docs? +# .. versionadded:: 3.1.0 +python_quoted_string = Combine( + (Regex(r'"""(?:[^"\\]|""(?!")|"(?!"")|\\.)*', flags=re.MULTILINE) + '"""').set_name( + "multiline double quoted string" + ) + ^ ( + Regex(r"'''(?:[^'\\]|''(?!')|'(?!'')|\\.)*", flags=re.MULTILINE) + "'''" + ).set_name("multiline single quoted string") + ^ (Regex(r'"(?:[^"\n\r\\]|(?:\\")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').set_name( + "double quoted string" + ) + ^ (Regex(r"'(?:[^'\n\r\\]|(?:\\')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").set_name( + "single quoted string" + ) +).set_name("Python quoted string") + +unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") + + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs: list[ParserElement] = [ + v for v in vars().values() if isinstance(v, ParserElement) +] + +# Compatibility synonyms +# fmt: off +sglQuotedString = sgl_quoted_string +dblQuotedString = dbl_quoted_string +quotedString = quoted_string +unicodeString = unicode_string +lineStart = line_start +lineEnd = line_end +stringStart = string_start +stringEnd = string_end +nullDebugAction = replaced_by_pep8("nullDebugAction", null_debug_action) +traceParseAction = replaced_by_pep8("traceParseAction", trace_parse_action) +conditionAsParseAction = replaced_by_pep8("conditionAsParseAction", condition_as_parse_action) +tokenMap = replaced_by_pep8("tokenMap", token_map) +# fmt: on diff --git a/py311/lib/python3.11/site-packages/pyparsing/exceptions.py b/py311/lib/python3.11/site-packages/pyparsing/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..8f0ab7a0178d3fb54add4ae6f826a0081ba0bc83 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyparsing/exceptions.py @@ -0,0 +1,352 @@ +# exceptions.py +from __future__ import annotations + +import copy +import re +import sys +import typing +import warnings +from functools import cached_property + +from .unicode import pyparsing_unicode as ppu +from .util import ( + _collapse_string_to_ranges, + col, + deprecate_argument, + line, + lineno, + replaced_by_pep8, +) + + +class _ExceptionWordUnicodeSet( + ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic +): + pass + + +_extract_alphanums = _collapse_string_to_ranges(_ExceptionWordUnicodeSet.alphanums) +_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") + + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + + loc: int + msg: str + pstr: str + parser_element: typing.Any # "ParserElement" + args: tuple[str, int, typing.Optional[str]] + + __slots__ = ( + "loc", + "msg", + "pstr", + "parser_element", + "args", + ) + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, + pstr: str, + loc: int = 0, + msg: typing.Optional[str] = None, + elem=None, + ) -> None: + if msg is None: + msg, pstr = pstr, "" + + self.loc = loc + self.msg = msg + self.pstr = pstr + self.parser_element = elem + self.args = (pstr, loc, msg) + + @staticmethod + def explain_exception(exc: Exception, depth: int = 16) -> str: + """ + Method to take an exception and translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - exc - exception raised during parsing (need not be a ParseException, in support + of Python exceptions that might be raised in a parse action) + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + """ + import inspect + from .core import ParserElement + + if depth is None: + depth = sys.getrecursionlimit() + ret: list[str] = [] + if isinstance(exc, ParseBaseException): + ret.append(exc.line) + ret.append(f"{'^':>{exc.column}}") + ret.append(f"{type(exc).__name__}: {exc}") + + if depth <= 0 or exc.__traceback__ is None: + return "\n".join(ret) + + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen: set[int] = set() + for ff in callers[-depth:]: + frm = ff[0] + + f_self = frm.f_locals.get("self", None) + if isinstance(f_self, ParserElement): + if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")): + continue + if id(f_self) in seen: + continue + seen.add(id(f_self)) + + self_type = type(f_self) + ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}") + + elif f_self is not None: + self_type = type(f_self) + ret.append(f"{self_type.__module__}.{self_type.__name__}") + + else: + code = frm.f_code + if code.co_name in ("wrapper", ""): + continue + + ret.append(code.co_name) + + depth -= 1 + if not depth: + break + + return "\n".join(ret) + + @classmethod + def _from_exception(cls, pe) -> ParseBaseException: + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parser_element) + + @cached_property + def line(self) -> str: + """ + Return the line of text where the exception occurred. + """ + return line(self.loc, self.pstr) + + @cached_property + def lineno(self) -> int: + """ + Return the 1-based line number of text where the exception occurred. + """ + return lineno(self.loc, self.pstr) + + @cached_property + def col(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + @cached_property + def column(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + @cached_property + def found(self) -> str: + if not self.pstr: + return "" + + if self.loc >= len(self.pstr): + return "end of text" + + # pull out next word at error location + found_match = _exception_word_extractor.match(self.pstr, self.loc) + if found_match is not None: + found_text = found_match.group(0) + else: + found_text = self.pstr[self.loc : self.loc + 1] + + return repr(found_text).replace(r"\\", "\\") + + # pre-PEP8 compatibility + @property + def parserElement(self): + warnings.warn( + "parserElement is deprecated, use parser_element", + DeprecationWarning, + stacklevel=2, + ) + return self.parser_element + + @parserElement.setter + def parserElement(self, elem): + warnings.warn( + "parserElement is deprecated, use parser_element", + DeprecationWarning, + stacklevel=2, + ) + self.parser_element = elem + + def copy(self): + return copy.copy(self) + + def formatted_message(self) -> str: + """ + Output the formatted exception message. + Can be overridden to customize the message formatting or contents. + + .. versionadded:: 3.2.0 + """ + found_phrase = f", found {self.found}" if self.found else "" + return f"{self.msg}{found_phrase} (at char {self.loc}), (line:{self.lineno}, col:{self.column})" + + def __str__(self) -> str: + """ + .. versionchanged:: 3.2.0 + Now uses :meth:`formatted_message` to format message. + """ + try: + return self.formatted_message() + except Exception as ex: + return ( + f"{type(self).__name__}: {self.msg}" + f" ({type(ex).__name__}: {ex} while formatting message)" + ) + + def __repr__(self): + return str(self) + + def mark_input_line( + self, marker_string: typing.Optional[str] = None, **kwargs + ) -> str: + """ + Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + markerString: str = deprecate_argument(kwargs, "markerString", ">!<") + + markerString = marker_string if marker_string is not None else markerString + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = f"{line_str[:line_column]}{markerString}{line_str[line_column:]}" + return line_str.strip() + + def explain(self, depth: int = 16) -> str: + """ + Method to translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + + Example: + + .. testcode:: + + # an expression to parse 3 integers + expr = pp.Word(pp.nums) * 3 + try: + # a failing parse - the third integer is prefixed with "A" + expr.parse_string("123 456 A789") + except pp.ParseException as pe: + print(pe.explain(depth=0)) + + prints: + + .. testoutput:: + + 123 456 A789 + ^ + ParseException: Expected W:(0-9), found 'A789' (at char 8), (line:1, col:9) + + Note: the diagnostic output will include string representations of the expressions + that failed to parse. These representations will be more helpful if you use `set_name` to + give identifiable names to your expressions. Otherwise they will use the default string + forms, which may be cryptic to read. + + Note: pyparsing's default truncation of exception tracebacks may also truncate the + stack of expressions that are displayed in the ``explain`` output. To get the full listing + of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` + """ + return self.explain_exception(self, depth) + + # Compatibility synonyms + # fmt: off + markInputline = replaced_by_pep8("markInputline", mark_input_line) + # fmt: on + + +class ParseException(ParseBaseException): + """ + Exception thrown when a parse expression doesn't match the input string + + Example: + + .. testcode:: + + integer = Word(nums).set_name("integer") + try: + integer.parse_string("ABC") + except ParseException as pe: + print(pe, f"column: {pe.column}") + + prints: + + .. testoutput:: + + Expected integer, found 'ABC' (at char 0), (line:1, col:1) column: 1 + + """ + + +class ParseFatalException(ParseBaseException): + """ + User-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately + """ + + +class ParseSyntaxException(ParseFatalException): + """ + Just like :class:`ParseFatalException`, but thrown internally + when an :class:`ErrorStop` ('-' operator) indicates + that parsing is to stop immediately because an unbacktrackable + syntax error has been found. + """ + + +class RecursiveGrammarException(Exception): + """ + .. deprecated:: 3.0.0 + Only used by the deprecated :meth:`ParserElement.validate`. + + Exception thrown by :class:`ParserElement.validate` if the + grammar could be left-recursive; parser may need to enable + left recursion using :class:`ParserElement.enable_left_recursion` + """ + + def __init__(self, parseElementList) -> None: + self.parseElementTrace = parseElementList + + def __str__(self) -> str: + return f"RecursiveGrammarException: {self.parseElementTrace}" diff --git a/py311/lib/python3.11/site-packages/pyparsing/helpers.py b/py311/lib/python3.11/site-packages/pyparsing/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..597e6e5e1b0f119c982ef31501a8837cac14945d --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyparsing/helpers.py @@ -0,0 +1,1217 @@ +# helpers.py +import html.entities +import operator +import re +import sys +import typing + +from . import __diag__ +from .core import * +from .util import ( + _bslash, + _flatten, + _escape_regex_range_chars, + make_compressed_re, + replaced_by_pep8, +) + + +def _suppression(expr: Union[ParserElement, str]) -> ParserElement: + # internal helper to avoid wrapping Suppress inside another Suppress + if isinstance(expr, Suppress): + return expr + return Suppress(expr) + + +# +# global helpers +# +def counted_array( + expr: ParserElement, int_expr: typing.Optional[ParserElement] = None, **kwargs +) -> ParserElement: + """Helper to define a counted list of expressions. + + This helper defines a pattern of the form:: + + integer expr expr expr... + + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the + leading count token is suppressed. + + If ``int_expr`` is specified, it should be a pyparsing expression + that produces an integer value. + + Examples: + + .. doctest:: + + >>> counted_array(Word(alphas)).parse_string('2 ab cd ef') + ParseResults(['ab', 'cd'], {}) + + - In this parser, the leading integer value is given in binary, + '10' indicating that 2 values are in the array: + + .. doctest:: + + >>> binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) + >>> counted_array(Word(alphas), int_expr=binary_constant + ... ).parse_string('10 ab cd ef') + ParseResults(['ab', 'cd'], {}) + + - If other fields must be parsed after the count but before the + list items, give the fields results names and they will + be preserved in the returned ParseResults: + + .. doctest:: + + >>> ppc = pyparsing.common + >>> count_with_metadata = ppc.integer + Word(alphas)("type") + >>> typed_array = counted_array(Word(alphanums), + ... int_expr=count_with_metadata)("items") + >>> result = typed_array.parse_string("3 bool True True False") + >>> print(result.dump()) + ['True', 'True', 'False'] + - items: ['True', 'True', 'False'] + - type: 'bool' + """ + intExpr: typing.Optional[ParserElement] = deprecate_argument( + kwargs, "intExpr", None + ) + + intExpr = intExpr or int_expr + array_expr = Forward() + + def count_field_parse_action(s, l, t): + nonlocal array_expr + n = t[0] + array_expr <<= (expr * n) if n else Empty() + # clear list contents, but keep any named results + del t[:] + + if intExpr is None: + intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) + else: + intExpr = intExpr.copy() + intExpr.set_name("arrayLen") + intExpr.add_parse_action(count_field_parse_action, call_during_try=True) + return (intExpr + array_expr).set_name(f"(len) {expr}...") + + +def match_previous_literal(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + .. testcode:: + + first = Word(nums) + second = match_previous_literal(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches a previous literal, will also match the leading + ``"1:1"`` in ``"1:10"``. If this is not desired, use + :class:`match_previous_expr`. Do *not* use with packrat parsing + enabled. + """ + rep = Forward() + + def copy_token_to_repeater(s, l, t): + if not t: + rep << Empty() + return + + if len(t) == 1: + rep << t[0] + return + + # flatten t tokens + tflat = _flatten(t.as_list()) + rep << And(Literal(tt) for tt in tflat) + + expr.add_parse_action(copy_token_to_repeater, call_during_try=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def match_previous_expr(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example: + + .. testcode:: + + first = Word(nums) + second = match_previous_expr(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches by expressions, will *not* match the leading ``"1:1"`` + in ``"1:10"``; the expressions are evaluated first, and then + compared, so ``"1"`` is compared with ``"10"``. Do *not* use + with packrat parsing enabled. + """ + rep = Forward() + e2 = expr.copy() + rep <<= e2 + + def copy_token_to_repeater(s, l, t): + matchTokens = _flatten(t.as_list()) + + def must_match_these_tokens(s, l, t): + theseTokens = _flatten(t.as_list()) + if theseTokens != matchTokens: + raise ParseException( + s, l, f"Expected {matchTokens}, found{theseTokens}" + ) + + rep.set_parse_action(must_match_these_tokens, call_during_try=True) + + expr.add_parse_action(copy_token_to_repeater, call_during_try=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def one_of( + strs: Union[typing.Iterable[str], str], + caseless: bool = False, + use_regex: bool = True, + as_keyword: bool = False, + **kwargs, +) -> ParserElement: + """Helper to quickly define a set of alternative :class:`Literal` s, + and makes sure to do longest-first testing when there is a conflict, + regardless of the input order, but returns + a :class:`MatchFirst` for best performance. + + :param strs: a string of space-delimited literals, or a collection of + string literals + :param caseless: treat all literals as caseless + :param use_regex: bool - as an optimization, will + generate a :class:`Regex` object; otherwise, will generate + a :class:`MatchFirst` object (if ``caseless=True`` or + ``as_keyword=True``, or if creating a :class:`Regex` raises an exception) + :param as_keyword: bool - enforce :class:`Keyword`-style matching on the + generated expressions + + Parameters ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 + compatibility, but will be removed in a future release. + + Example: + + .. testcode:: + + comp_oper = one_of("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) + + prints: + + .. testoutput:: + + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] + """ + useRegex: bool = deprecate_argument(kwargs, "useRegex", True) + asKeyword: bool = deprecate_argument(kwargs, "asKeyword", False) + + asKeyword = asKeyword or as_keyword + useRegex = useRegex and use_regex + + if ( + isinstance(caseless, str_type) + and __diag__.warn_on_multiple_string_args_to_oneof + ): + warnings.warn( + "warn_on_multiple_string_args_to_oneof:" + " More than one string argument passed to one_of, pass" + " choices as a list or space-delimited string", + stacklevel=2, + ) + + if caseless: + is_equal = lambda a, b: a.upper() == b.upper() + masks = lambda a, b: b.upper().startswith(a.upper()) + else: + is_equal = operator.eq + masks = lambda a, b: b.startswith(a) + + symbols: list[str] + if isinstance(strs, str_type): + strs = typing.cast(str, strs) + symbols = strs.split() + elif isinstance(strs, Iterable): + symbols = list(strs) + else: + raise TypeError("Invalid argument to one_of, expected string or iterable") + if not symbols: + return NoMatch() + + # reorder given symbols to take care to avoid masking longer choices with shorter ones + # (but only if the given symbols are not just single characters) + i = 0 + while i < len(symbols) - 1: + cur = symbols[i] + for j, other in enumerate(symbols[i + 1 :]): + if is_equal(other, cur): + del symbols[i + j + 1] + break + if len(other) > len(cur) and masks(cur, other): + del symbols[i + j + 1] + symbols.insert(i, other) + break + else: + i += 1 + + if useRegex: + re_flags: int = re.IGNORECASE if caseless else 0 + + try: + if all(len(sym) == 1 for sym in symbols): + # symbols are just single characters, create range regex pattern + patt = f"[{''.join(_escape_regex_range_chars(sym) for sym in symbols)}]" + else: + patt = "|".join(re.escape(sym) for sym in symbols) + + # wrap with \b word break markers if defining as keywords + if asKeyword: + patt = rf"\b(?:{patt})\b" + + ret = Regex(patt, flags=re_flags) + ret.set_name(" | ".join(repr(s) for s in symbols)) + + if caseless: + # add parse action to return symbols as specified, not in random + # casing as found in input string + symbol_map = {sym.lower(): sym for sym in symbols} + ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) + + return ret + + except re.error: + warnings.warn( + "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 + ) + + # last resort, just use MatchFirst of Token class corresponding to caseless + # and asKeyword settings + CASELESS = KEYWORD = True + parse_element_class = { + (CASELESS, KEYWORD): CaselessKeyword, + (CASELESS, not KEYWORD): CaselessLiteral, + (not CASELESS, KEYWORD): Keyword, + (not CASELESS, not KEYWORD): Literal, + }[(caseless, asKeyword)] + return MatchFirst(parse_element_class(sym) for sym in symbols).set_name( + " | ".join(symbols) + ) + + +def dict_of(key: ParserElement, value: ParserElement) -> Dict: + """Helper to easily and clearly define a dictionary by specifying + the respective patterns for the key and value. Takes care of + defining the :class:`Dict`, :class:`ZeroOrMore`, and + :class:`Group` tokens in the proper order. The key pattern + can include delimiting markers or punctuation, as long as they are + suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the :class:`Dict` results + can include named token fields. + + Example: + + .. doctest:: + + >>> text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + + >>> data_word = Word(alphas) + >>> label = data_word + FollowedBy(':') + >>> attr_expr = ( + ... label + ... + Suppress(':') + ... + OneOrMore(data_word, stop_on=label) + ... .set_parse_action(' '.join)) + >>> print(attr_expr[1, ...].parse_string(text).dump()) + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + + >>> attr_label = label + >>> attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label + ... ).set_parse_action(' '.join) + + # similar to Dict, but simpler call format + >>> result = dict_of(attr_label, attr_value).parse_string(text) + >>> print(result.dump()) + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: 'light blue' + - posn: 'upper left' + - shape: 'SQUARE' + - texture: 'burlap' + [0]: + ['shape', 'SQUARE'] + [1]: + ['posn', 'upper left'] + [2]: + ['color', 'light blue'] + [3]: + ['texture', 'burlap'] + + >>> print(result['shape']) + SQUARE + >>> print(result.shape) # object attribute access works too + SQUARE + >>> print(result.as_dict()) + {'shape': 'SQUARE', 'posn': 'upper left', 'color': 'light blue', 'texture': 'burlap'} + """ + return Dict(OneOrMore(Group(key + value))) + + +def original_text_for( + expr: ParserElement, as_string: bool = True, **kwargs +) -> ParserElement: + """Helper to return the original, untokenized text for a given + expression. Useful to restore the parsed fields of an HTML start + tag into the raw tag text itself, or to revert separate tokens with + intervening whitespace back to the original matching input text. By + default, returns a string containing the original parsed text. + + If the optional ``as_string`` argument is passed as + ``False``, then the return value is + a :class:`ParseResults` containing any results names that + were originally matched, and a single token containing the original + matched text from the input string. So if the expression passed to + :class:`original_text_for` contains expressions with defined + results names, you must set ``as_string`` to ``False`` if you + want to preserve those results name values. + + The ``asString`` pre-PEP8 argument is retained for compatibility, + but will be removed in a future release. + + Example: + + .. testcode:: + + src = "this is test bold text normal text " + for tag in ("b", "i"): + opener, closer = make_html_tags(tag) + patt = original_text_for(opener + ... + closer) + print(patt.search_string(src)[0]) + + prints: + + .. testoutput:: + + [' bold text '] + ['text'] + """ + asString: bool = deprecate_argument(kwargs, "asString", True) + + asString = asString and as_string + + locMarker = Empty().set_parse_action(lambda s, loc, t: loc) + endlocMarker = locMarker.copy() + endlocMarker.callPreparse = False + matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") + if asString: + extractText = lambda s, l, t: s[t._original_start : t._original_end] + else: + + def extractText(s, l, t): + t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] + + matchExpr.set_parse_action(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs + matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) + return matchExpr + + +def ungroup(expr: ParserElement) -> ParserElement: + """Helper to undo pyparsing's default grouping of And expressions, + even if all but one are non-empty. + """ + return TokenConverter(expr).add_parse_action(lambda t: t[0]) + + +def locatedExpr(expr: ParserElement) -> ParserElement: + """ + .. deprecated:: 3.0.0 + Use the :class:`Located` class instead. Note that `Located` + returns results with one less grouping level. + + Helper to decorate a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :meth:`ParserElement.parse_with_tabs` + """ + warnings.warn( + f"{'locatedExpr'!r} deprecated - use {'Located'!r}", + DeprecationWarning, + stacklevel=2, + ) + + locator = Empty().set_parse_action(lambda ss, ll, tt: ll) + return Group( + locator("locn_start") + + expr("value") + + locator.copy().leave_whitespace()("locn_end") + ) + + +# define special default value to permit None as a significant value for +# ignore_expr +_NO_IGNORE_EXPR_GIVEN = NoMatch() + + +def nested_expr( + opener: Union[str, ParserElement] = "(", + closer: Union[str, ParserElement] = ")", + content: typing.Optional[ParserElement] = None, + ignore_expr: typing.Optional[ParserElement] = _NO_IGNORE_EXPR_GIVEN, + **kwargs, +) -> ParserElement: + """Helper method for defining nested lists enclosed in opening and + closing delimiters (``"("`` and ``")"`` are the default). + + :param opener: str - opening character for a nested list + (default= ``"("``); can also be a pyparsing expression + + :param closer: str - closing character for a nested list + (default= ``")"``); can also be a pyparsing expression + + :param content: expression for items within the nested lists + + :param ignore_expr: expression for ignoring opening and closing delimiters + (default = :class:`quoted_string`) + + Parameter ``ignoreExpr`` is retained for compatibility + but will be removed in a future release. + + If an expression is not provided for the content argument, the + nested expression will capture all whitespace-delimited content + between delimiters as a list of separate values. + + Use the ``ignore_expr`` argument to define expressions that may + contain opening or closing characters that should not be treated as + opening or closing characters for nesting, such as quoted_string or + a comment expression. Specify multiple expressions using an + :class:`Or` or :class:`MatchFirst`. The default is + :class:`quoted_string`, but if no expressions are to be ignored, then + pass ``None`` for this argument. + + Example: + + .. testcode:: + + data_type = one_of("void int short long char float double") + decl_data_type = Combine(data_type + Opt(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR, RPAR = map(Suppress, "()") + + code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Opt(DelimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(c_style_comment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.search_string(source_code): + print(f"{func.name} ({func.type}) args: {func.args}") + + + prints: + + .. testoutput:: + + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + ignoreExpr: ParserElement = deprecate_argument( + kwargs, "ignoreExpr", _NO_IGNORE_EXPR_GIVEN + ) + + if ignoreExpr != ignore_expr: + ignoreExpr = ignore_expr if ignoreExpr is _NO_IGNORE_EXPR_GIVEN else ignoreExpr # type: ignore [assignment] + + if ignoreExpr is _NO_IGNORE_EXPR_GIVEN: + ignoreExpr = quoted_string() + + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + + if content is None: + if isinstance(opener, str_type) and isinstance(closer, str_type): + opener = typing.cast(str, opener) + closer = typing.cast(str, closer) + if len(opener) == 1 and len(closer) == 1: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS, + exact=1, + ) + ) + ) + else: + content = Combine( + Empty() + + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS + ) + ) + else: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ) + else: + content = Combine( + OneOrMore( + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ) + else: + raise ValueError( + "opening and closing arguments must be strings if no content expression is given" + ) + + # for these internally-created context expressions, simulate whitespace-skipping + if ParserElement.DEFAULT_WHITE_CHARS: + content.set_parse_action( + lambda t: t[0].strip(ParserElement.DEFAULT_WHITE_CHARS) + ) + + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( + _suppression(opener) + + ZeroOrMore(ignoreExpr | ret | content) + + _suppression(closer) + ) + else: + ret <<= Group( + _suppression(opener) + ZeroOrMore(ret | content) + _suppression(closer) + ) + + ret.set_name(f"nested {opener}{closer} expression") + + # don't override error message from content expressions + ret.errmsg = None + return ret + + +def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): + """Internal helper to construct opening and closing tag expressions, + given a tag name""" + if isinstance(tagStr, str_type): + resname = tagStr + tagStr = Keyword(tagStr, caseless=not xml) + else: + resname = tagStr.name + + tagAttrName = Word(alphas, alphanums + "_-:") + if xml: + tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + else: + tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( + printables, exclude_chars=">" + ) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict( + ZeroOrMore( + Group( + tagAttrName.set_parse_action(lambda t: t[0].lower()) + + Opt(Suppress("=") + tagAttrValue) + ) + ) + ) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + closeTag = Combine(Literal("", adjacent=False) + + openTag.set_name(f"<{resname}>") + # add start results name in parse action now that ungrouped names are not reported at two levels + openTag.add_parse_action( + lambda t: t.__setitem__( + "start" + "".join(resname.replace(":", " ").title().split()), t.copy() + ) + ) + closeTag = closeTag( + "end" + "".join(resname.replace(":", " ").title().split()) + ).set_name(f"") + openTag.tag = resname + closeTag.tag = resname + openTag.tag_body = SkipTo(closeTag()) + return openTag, closeTag + + +def make_html_tags( + tag_str: Union[str, ParserElement], +) -> tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for HTML, + given a tag name. Matches tags in either upper or lower case, + attributes with namespaces and with quoted or unquoted values. + + Example: + + .. testcode:: + + text = 'More info at the pyparsing wiki page' + # make_html_tags returns pyparsing expressions for the opening and + # closing tags as a 2-tuple + a, a_end = make_html_tags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.search_string(text): + # attributes in the tag (like "href" shown here) are + # also accessible as named results + print(link.link_text, '->', link.href) + + prints: + + .. testoutput:: + + pyparsing -> https://github.com/pyparsing/pyparsing/wiki + """ + return _makeTags(tag_str, False) + + +def make_xml_tags( + tag_str: Union[str, ParserElement], +) -> tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for XML, + given a tag name. Matches tags only in the given upper/lower case. + + Example: similar to :class:`make_html_tags` + """ + return _makeTags(tag_str, True) + + +any_open_tag: ParserElement +any_close_tag: ParserElement +any_open_tag, any_close_tag = make_html_tags( + Word(alphas, alphanums + "_:").set_name("any tag") +) + +_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} +_most_common_entities = "nbsp lt gt amp quot apos cent pound euro copy".replace( + " ", "|" +) +common_html_entity = Regex( + lambda: f"&(?P{_most_common_entities}|{make_compressed_re(_htmlEntityMap)});" +).set_name("common HTML entity") + + +def replace_html_entity(s, l, t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + + +class OpAssoc(Enum): + """Enumeration of operator associativity + - used in constructing InfixNotationOperatorSpec for :class:`infix_notation`""" + + LEFT = 1 + RIGHT = 2 + + +InfixNotationOperatorArgType = Union[ + ParserElement, str, tuple[Union[ParserElement, str], Union[ParserElement, str]] +] +InfixNotationOperatorSpec = Union[ + tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + typing.Optional[ParseAction], + ], + tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + ], +] + + +def infix_notation( + base_expr: ParserElement, + op_list: list[InfixNotationOperatorSpec], + lpar: Union[str, ParserElement] = Suppress("("), + rpar: Union[str, ParserElement] = Suppress(")"), +) -> Forward: + """Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary + or binary, left- or right-associative. Parse actions can also be + attached to operator expressions. The generated parser will also + recognize the use of parentheses to override operator precedences + (see example below). + + Note: if you define a deep operator list, you may see performance + issues when using infix_notation. See + :class:`ParserElement.enable_packrat` for a mechanism to potentially + improve your parser performance. + + Parameters: + + :param base_expr: expression representing the most basic operand to + be used in the expression + :param op_list: list of tuples, one for each operator precedence level + in the expression grammar; each tuple is of the form ``(op_expr, + num_operands, right_left_assoc, (optional)parse_action)``, where: + + - ``op_expr`` is the pyparsing expression for the operator; may also + be a string, which will be converted to a Literal; if ``num_operands`` + is 3, ``op_expr`` is a tuple of two expressions, for the two + operators separating the 3 terms + - ``num_operands`` is the number of terms for this operator (must be 1, + 2, or 3) + - ``right_left_assoc`` is the indicator whether the operator is right + or left associative, using the pyparsing-defined constants + ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. + - ``parse_action`` is the parse action to be associated with + expressions matching this operator expression (the parse action + tuple member may be omitted); if the parse action is passed + a tuple or list of functions, this is equivalent to calling + ``set_parse_action(*fn)`` + (:class:`ParserElement.set_parse_action`) + + :param lpar: expression for matching left-parentheses; if passed as a + str, then will be parsed as ``Suppress(lpar)``. If lpar is passed as + an expression (such as ``Literal('(')``), then it will be kept in + the parsed results, and grouped with them. (default= ``Suppress('(')``) + :param rpar: expression for matching right-parentheses; if passed as a + str, then will be parsed as ``Suppress(rpar)``. If rpar is passed as + an expression (such as ``Literal(')')``), then it will be kept in + the parsed results, and grouped with them. (default= ``Suppress(')')``) + + Example: + + .. testcode:: + + # simple example of four-function arithmetic with ints and + # variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infix_notation(integer | varname, + [ + ('-', 1, OpAssoc.RIGHT), + (one_of('* /'), 2, OpAssoc.LEFT), + (one_of('+ -'), 2, OpAssoc.LEFT), + ]) + + arith_expr.run_tests(''' + 5+3*6 + (5+3)*6 + (5+x)*y + -2--11 + ''', full_dump=False) + + prints: + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + (5+x)*y + [[[5, '+', 'x'], '*', 'y']] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + + # captive version of FollowedBy that does not do parse actions or capture results names + class _FB(FollowedBy): + def parseImpl(self, instring, loc, doActions=True): + self.expr.try_parse(instring, loc) + return loc, [] + + _FB.__name__ = "FollowedBy>" + + ret = Forward() + ret.set_name(f"{base_expr.name}_expression") + if isinstance(lpar, str): + lpar = Suppress(lpar) + if isinstance(rpar, str): + rpar = Suppress(rpar) + + nested_expr = (lpar + ret + rpar).set_name(f"nested_{base_expr.name}_expression") + + # if lpar and rpar are not suppressed, wrap in group + if not (isinstance(lpar, Suppress) and isinstance(rpar, Suppress)): + lastExpr = base_expr | Group(nested_expr) + else: + lastExpr = base_expr | nested_expr + + arity: int + rightLeftAssoc: opAssoc + pa: typing.Optional[ParseAction] + opExpr1: ParserElement + opExpr2: ParserElement + matchExpr: ParserElement + match_lookahead: ParserElement + for operDef in op_list: + opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment] + if isinstance(opExpr, str_type): + opExpr = ParserElement._literalStringClass(opExpr) + opExpr = typing.cast(ParserElement, opExpr) + if arity == 3: + if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: + raise ValueError( + "if numterms=3, opExpr must be a tuple or list of two expressions" + ) + opExpr1, opExpr2 = opExpr + term_name = f"{opExpr1}{opExpr2} operations" + else: + term_name = f"{opExpr} operations" + + if not 1 <= arity <= 3: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + + if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): + raise ValueError("operator must indicate right or left associativity") + + thisExpr: ParserElement = Forward().set_name(term_name) + thisExpr = typing.cast(Forward, thisExpr) + match_lookahead = And([]) + if rightLeftAssoc is OpAssoc.LEFT: + if arity == 1: + match_lookahead = _FB(lastExpr + opExpr) + matchExpr = Group(lastExpr + opExpr[1, ...]) + elif arity == 2: + if opExpr is not None: + match_lookahead = _FB(lastExpr + opExpr + lastExpr) + matchExpr = Group(lastExpr + (opExpr + lastExpr)[1, ...]) + else: + match_lookahead = _FB(lastExpr + lastExpr) + matchExpr = Group(lastExpr[2, ...]) + elif arity == 3: + match_lookahead = _FB( + lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr + ) + matchExpr = Group( + lastExpr + (opExpr1 + lastExpr + opExpr2 + lastExpr)[1, ...] + ) + elif rightLeftAssoc is OpAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Opt): + opExpr = Opt(opExpr) + match_lookahead = _FB(opExpr.expr + thisExpr) + matchExpr = Group(opExpr + thisExpr) + elif arity == 2: + if opExpr is not None: + match_lookahead = _FB(lastExpr + opExpr + thisExpr) + matchExpr = Group(lastExpr + (opExpr + thisExpr)[1, ...]) + else: + match_lookahead = _FB(lastExpr + thisExpr) + matchExpr = Group(lastExpr + thisExpr[1, ...]) + elif arity == 3: + match_lookahead = _FB( + lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr + ) + matchExpr = Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + + # suppress lookahead expr from railroad diagrams + match_lookahead.show_in_diagram = False + + # TODO - determine why this statement can't be included in the following + # if pa block + matchExpr = match_lookahead + matchExpr + + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.set_parse_action(*pa) + else: + matchExpr.set_parse_action(pa) + + thisExpr <<= (matchExpr | lastExpr).set_name(term_name) + lastExpr = thisExpr + + ret <<= lastExpr + return ret + + +def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): + """ + .. deprecated:: 3.0.0 + Use the :class:`IndentedBlock` class instead. Note that `IndentedBlock` + has a difference method signature. + + Helper method for defining space-delimited indentation blocks, + such as those used to define block statements in Python source code. + + :param blockStatementExpr: expression defining syntax of statement that + is repeated within the indented block + + :param indentStack: list created by caller to manage indentation stack + (multiple ``statementWithIndentedBlock`` expressions within a single + grammar should share a common ``indentStack``) + + :param indent: boolean indicating whether block must be indented beyond + the current level; set to ``False`` for block of left-most statements + + A valid block must contain at least one ``blockStatement``. + + (Note that indentedBlock uses internal parse actions which make it + incompatible with packrat parsing.) + + Example: + + .. testcode:: + + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group(funcDecl + func_body) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << (funcDef | assignment | identifier) + + module_body = stmt[1, ...] + + parseTree = module_body.parseString(data) + parseTree.pprint() + + prints: + + .. testoutput:: + + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + warnings.warn( + f"{'indentedBlock'!r} deprecated - use {'IndentedBlock'!r}", + DeprecationWarning, + stacklevel=2, + ) + + backup_stacks.append(indentStack[:]) + + def reset_stack(): + indentStack[:] = backup_stacks[-1] + + def checkPeerIndent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseException(s, l, "illegal nesting") + raise ParseException(s, l, "not a peer entry") + + def checkSubIndent(s, l, t): + curCol = col(l, s) + if curCol > indentStack[-1]: + indentStack.append(curCol) + else: + raise ParseException(s, l, "not a subentry") + + def checkUnindent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if not (indentStack and curCol in indentStack): + raise ParseException(s, l, "not an unindent") + if curCol < indentStack[-1]: + indentStack.pop() + + NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) + INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") + PEER = Empty().set_parse_action(checkPeerIndent).set_name("") + UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") + if indent: + smExpr = Group( + Opt(NL) + + INDENT + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + UNDENT + ) + else: + smExpr = Group( + Opt(NL) + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + Opt(UNDENT) + ) + + # add a parse action to remove backup_stack from list of backups + smExpr.add_parse_action( + lambda: backup_stacks.pop(-1) and None if backup_stacks else None + ) + smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.set_name("indented block") + + +# it's easy to get these comment structures wrong - they're very common, +# so may as well make them available +c_style_comment = Regex(r"/\*(?:[^*]|\*(?!/))*\*\/").set_name("C style comment") +"Comment of the form ``/* ... */``" + +html_comment = Regex(r"").set_name("HTML comment") +"Comment of the form ````" + +rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") +dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") +"Comment of the form ``// ... (to end of line)``" + +cpp_style_comment = Regex( + r"(?:/\*(?:[^*]|\*(?!/))*\*\/)|(?://(?:\\\n|[^\n])*)" +).set_name("C++ style comment") +"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" + +java_style_comment = cpp_style_comment +"Same as :class:`cpp_style_comment`" + +python_style_comment = Regex(r"#.*").set_name("Python style comment") +"Comment of the form ``# ... (to end of line)``" + + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs: list[ParserElement] = [ + v for v in vars().values() if isinstance(v, ParserElement) +] + + +# compatibility function, superseded by DelimitedList class +def delimited_list( + expr: Union[str, ParserElement], + delim: Union[str, ParserElement] = ",", + combine: bool = False, + min: typing.Optional[int] = None, + max: typing.Optional[int] = None, + *, + allow_trailing_delim: bool = False, +) -> ParserElement: + """ + .. deprecated:: 3.1.0 + Use the :class:`DelimitedList` class instead. + """ + return DelimitedList( + expr, delim, combine, min, max, allow_trailing_delim=allow_trailing_delim + ) + + +# Compatibility synonyms +# fmt: off +opAssoc = OpAssoc +anyOpenTag = any_open_tag +anyCloseTag = any_close_tag +commonHTMLEntity = common_html_entity +cStyleComment = c_style_comment +htmlComment = html_comment +restOfLine = rest_of_line +dblSlashComment = dbl_slash_comment +cppStyleComment = cpp_style_comment +javaStyleComment = java_style_comment +pythonStyleComment = python_style_comment +delimitedList = replaced_by_pep8("delimitedList", DelimitedList) +delimited_list = replaced_by_pep8("delimited_list", DelimitedList) +countedArray = replaced_by_pep8("countedArray", counted_array) +matchPreviousLiteral = replaced_by_pep8("matchPreviousLiteral", match_previous_literal) +matchPreviousExpr = replaced_by_pep8("matchPreviousExpr", match_previous_expr) +oneOf = replaced_by_pep8("oneOf", one_of) +dictOf = replaced_by_pep8("dictOf", dict_of) +originalTextFor = replaced_by_pep8("originalTextFor", original_text_for) +nestedExpr = replaced_by_pep8("nestedExpr", nested_expr) +makeHTMLTags = replaced_by_pep8("makeHTMLTags", make_html_tags) +makeXMLTags = replaced_by_pep8("makeXMLTags", make_xml_tags) +replaceHTMLEntity = replaced_by_pep8("replaceHTMLEntity", replace_html_entity) +infixNotation = replaced_by_pep8("infixNotation", infix_notation) +# fmt: on diff --git a/py311/lib/python3.11/site-packages/pyparsing/py.typed b/py311/lib/python3.11/site-packages/pyparsing/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/pyparsing/results.py b/py311/lib/python3.11/site-packages/pyparsing/results.py new file mode 100644 index 0000000000000000000000000000000000000000..3b76986b49a0d1486d461e3ff512ed92c279860a --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyparsing/results.py @@ -0,0 +1,941 @@ +# results.py + +from __future__ import annotations + +import collections +from collections.abc import ( + MutableMapping, + Mapping, + MutableSequence, + Iterator, + Iterable, +) +import pprint +from typing import Any + +from .util import deprecate_argument + + +str_type: tuple[type, ...] = (str, bytes) +_generator_type = type((_ for _ in ())) + + +class _ParseResultsWithOffset: + tup: tuple[ParseResults, int] + __slots__ = ["tup"] + + def __init__(self, p1: ParseResults, p2: int) -> None: + self.tup: tuple[ParseResults, int] = (p1, p2) + + def __getitem__(self, i): + return self.tup[i] + + def __getstate__(self): + return self.tup + + def __setstate__(self, *args): + self.tup = args[0] + + +class ParseResults: + """Structured parse results, to provide multiple means of access to + the parsed data: + + - as a list (``len(results)``) + - by list index (``results[0], results[1]``, etc.) + - by attribute (``results.`` - see :class:`ParserElement.set_results_name`) + + Example: + + .. testcode:: + + integer = Word(nums) + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + # equivalent form: + # date_str = (integer("year") + '/' + # + integer("month") + '/' + # + integer("day")) + + # parse_string returns a ParseResults object + result = date_str.parse_string("1999/12/31") + + def test(s, fn=repr): + print(f"{s} -> {fn(eval(s))}") + + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + + prints: + + .. testoutput:: + + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: '31' + - month: '12' + - year: '1999' + + """ + + _null_values: tuple[Any, ...] = (None, [], ()) + + _name: str + _parent: ParseResults + _all_names: set[str] + _modal: bool + _toklist: list[Any] + _tokdict: dict[str, Any] + + __slots__ = ( + "_name", + "_parent", + "_all_names", + "_modal", + "_toklist", + "_tokdict", + ) + + class List(list): + """ + Simple wrapper class to distinguish parsed list results that should be preserved + as actual Python lists, instead of being converted to :class:`ParseResults`: + + .. testcode:: + + import pyparsing as pp + ppc = pp.common + + LBRACK, RBRACK, LPAR, RPAR = pp.Suppress.using_each("[]()") + element = pp.Forward() + item = ppc.integer + item_list = pp.DelimitedList(element) + element_list = LBRACK + item_list + RBRACK | LPAR + item_list + RPAR + element <<= item | element_list + + # add parse action to convert from ParseResults + # to actual Python collection types + @element_list.add_parse_action + def as_python_list(t): + return pp.ParseResults.List(t.as_list()) + + element.run_tests(''' + 100 + [2,3,4] + [[2, 1],3,4] + [(2, 1),3,4] + (2,3,4) + ([2, 3], 4) + ''', post_parse=lambda s, r: (r[0], type(r[0])) + ) + + prints: + + .. testoutput:: + :options: +NORMALIZE_WHITESPACE + + + 100 + (100, ) + + [2,3,4] + ([2, 3, 4], ) + + [[2, 1],3,4] + ([[2, 1], 3, 4], ) + + [(2, 1),3,4] + ([[2, 1], 3, 4], ) + + (2,3,4) + ([2, 3, 4], ) + + ([2, 3], 4) + ([[2, 3], 4], ) + + (Used internally by :class:`Group` when `aslist=True`.) + """ + + def __new__(cls, contained=None): + if contained is None: + contained = [] + + if not isinstance(contained, list): + raise TypeError( + f"{cls.__name__} may only be constructed with a list, not {type(contained).__name__}" + ) + + return list.__new__(cls) + + def __new__(cls, toklist=None, name=None, **kwargs): + if isinstance(toklist, ParseResults): + return toklist + self = object.__new__(cls) + self._name = None + self._parent = None + self._all_names = set() + + if toklist is None: + self._toklist = [] + elif isinstance(toklist, (list, _generator_type)): + self._toklist = ( + [toklist[:]] + if isinstance(toklist, ParseResults.List) + else list(toklist) + ) + else: + self._toklist = [toklist] + self._tokdict = dict() + return self + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, + toklist=None, + name=None, + aslist=True, + modal=True, + isinstance=isinstance, + **kwargs, + ) -> None: + asList = deprecate_argument(kwargs, "asList", True, new_name="aslist") + + asList = asList and aslist + self._tokdict: dict[str, _ParseResultsWithOffset] + self._modal = modal + + if name is None or name == "": + return + + if isinstance(name, int): + name = str(name) + + if not modal: + self._all_names = {name} + + self._name = name + + if toklist in self._null_values: + return + + if isinstance(toklist, (str_type, type)): + toklist = [toklist] + + if asList: + if isinstance(toklist, ParseResults): + self[name] = _ParseResultsWithOffset(ParseResults(toklist._toklist), 0) + else: + self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) + self[name]._name = name + return + + try: + self[name] = toklist[0] + except (KeyError, TypeError, IndexError): + if toklist is not self: + self[name] = toklist + else: + self._name = name + + def __getitem__(self, i): + if isinstance(i, (int, slice)): + return self._toklist[i] + + if i not in self._all_names: + return self._tokdict[i][-1][0] + + return ParseResults([v[0] for v in self._tokdict[i]]) + + def __setitem__(self, k, v, isinstance=isinstance): + if isinstance(v, _ParseResultsWithOffset): + self._tokdict[k] = self._tokdict.get(k, list()) + [v] + sub = v[0] + elif isinstance(k, (int, slice)): + self._toklist[k] = v + sub = v + else: + self._tokdict[k] = self._tokdict.get(k, []) + [ + _ParseResultsWithOffset(v, 0) + ] + sub = v + if isinstance(sub, ParseResults): + sub._parent = self + + def __delitem__(self, i): + if not isinstance(i, (int, slice)): + del self._tokdict[i] + return + + mylen = len(self._toklist) + del self._toklist[i] + + # convert int to slice + if isinstance(i, int): + if i < 0: + i += mylen + i = slice(i, i + 1) + # get removed indices + removed = list(range(*i.indices(mylen))) + removed.reverse() + # fixup indices in token dictionary + for occurrences in self._tokdict.values(): + for j in removed: + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position - (position > j) + ) + + def __contains__(self, k) -> bool: + return k in self._tokdict + + def __len__(self) -> int: + return len(self._toklist) + + def __bool__(self) -> bool: + return not not (self._toklist or self._tokdict) + + def __iter__(self) -> Iterator: + return iter(self._toklist) + + def __reversed__(self) -> Iterator: + return iter(self._toklist[::-1]) + + def keys(self): + return iter(self._tokdict) + + def values(self): + return (self[k] for k in self.keys()) + + def items(self): + return ((k, self[k]) for k in self.keys()) + + def haskeys(self) -> bool: + """ + Since ``keys()`` returns an iterator, this method is helpful in bypassing + code that looks for the existence of any defined results names.""" + return not not self._tokdict + + def pop(self, *args, **kwargs): + """ + Removes and returns item at specified index (default= ``last``). + Supports both ``list`` and ``dict`` semantics for ``pop()``. If + passed no argument or an integer argument, it will use ``list`` + semantics and pop tokens from the list of parsed tokens. If passed + a non-integer argument (most likely a string), it will use ``dict`` + semantics and pop the corresponding value from any defined results + names. A second default return value argument is supported, just as in + ``dict.pop()``. + + Example: + + .. doctest:: + + >>> numlist = Word(nums)[...] + >>> print(numlist.parse_string("0 123 321")) + ['0', '123', '321'] + + >>> def remove_first(tokens): + ... tokens.pop(0) + ... + >>> numlist.add_parse_action(remove_first) + [W:(0-9)]... + >>> print(numlist.parse_string("0 123 321")) + ['123', '321'] + + >>> label = Word(alphas) + >>> patt = label("LABEL") + Word(nums)[1, ...] + >>> print(patt.parse_string("AAB 123 321").dump()) + ['AAB', '123', '321'] + - LABEL: 'AAB' + + >>> # Use pop() in a parse action to remove named result + >>> # (note that corresponding value is not + >>> # removed from list form of results) + >>> def remove_LABEL(tokens): + ... tokens.pop("LABEL") + ... return tokens + ... + >>> patt.add_parse_action(remove_LABEL) + {W:(A-Za-z) {W:(0-9)}...} + >>> print(patt.parse_string("AAB 123 321").dump()) + ['AAB', '123', '321'] + + """ + if not args: + args = [-1] + for k, v in kwargs.items(): + if k == "default": + args = (args[0], v) + else: + raise TypeError(f"pop() got an unexpected keyword argument {k!r}") + if isinstance(args[0], int) or len(args) == 1 or args[0] in self: + index = args[0] + ret = self[index] + del self[index] + return ret + else: + defaultvalue = args[1] + return defaultvalue + + def get(self, key, default_value=None): + """ + Returns named result matching the given key, or if there is no + such name, then returns the given ``default_value`` or ``None`` if no + ``default_value`` is specified. + + Similar to ``dict.get()``. + + Example: + + .. doctest:: + + >>> integer = Word(nums) + >>> date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + >>> result = date_str.parse_string("1999/12/31") + >>> result.get("year") + '1999' + >>> result.get("hour", "not specified") + 'not specified' + >>> result.get("hour") + + """ + if key in self: + return self[key] + else: + return default_value + + def insert(self, index, ins_string): + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to ``list.insert()``. + + Example: + + .. doctest:: + + >>> numlist = Word(nums)[...] + >>> print(numlist.parse_string("0 123 321")) + ['0', '123', '321'] + + >>> # use a parse action to insert the parse location + >>> # in the front of the parsed results + >>> def insert_locn(locn, tokens): + ... tokens.insert(0, locn) + ... + >>> numlist.add_parse_action(insert_locn) + [W:(0-9)]... + >>> print(numlist.parse_string("0 123 321")) + [0, '0', '123', '321'] + + """ + self._toklist.insert(index, ins_string) + # fixup indices in token dictionary + for occurrences in self._tokdict.values(): + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position + (position > index) + ) + + def append(self, item): + """ + Add single element to end of ``ParseResults`` list of elements. + + Example: + + .. doctest:: + + >>> numlist = Word(nums)[...] + >>> print(numlist.parse_string("0 123 321")) + ['0', '123', '321'] + + >>> # use a parse action to compute the sum of the parsed integers, + >>> # and add it to the end + >>> def append_sum(tokens): + ... tokens.append(sum(map(int, tokens))) + ... + >>> numlist.add_parse_action(append_sum) + [W:(0-9)]... + >>> print(numlist.parse_string("0 123 321")) + ['0', '123', '321', 444] + """ + self._toklist.append(item) + + def extend(self, itemseq): + """ + Add sequence of elements to end of :class:`ParseResults` list of elements. + + Example: + + .. testcode:: + + patt = Word(alphas)[1, ...] + + # use a parse action to append the reverse of the matched strings, + # to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + + patt.add_parse_action(make_palindrome) + print(patt.parse_string("lskdj sdlkjf lksd")) + + prints: + + .. testoutput:: + + ['lskdjsdlkjflksddsklfjkldsjdksl'] + """ + if isinstance(itemseq, ParseResults): + self.__iadd__(itemseq) + else: + self._toklist.extend(itemseq) + + def clear(self): + """ + Clear all elements and results names. + """ + del self._toklist[:] + self._tokdict.clear() + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + if name.startswith("__"): + raise AttributeError(name) + return "" + + def __add__(self, other: ParseResults) -> ParseResults: + ret = self.copy() + ret += other + return ret + + def __iadd__(self, other: ParseResults) -> ParseResults: + if not other: + return self + + if other._tokdict: + offset = len(self._toklist) + addoffset = lambda a: offset if a < 0 else a + offset + otheritems = other._tokdict.items() + otherdictitems = [ + (k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) + for k, vlist in otheritems + for v in vlist + ] + for k, v in otherdictitems: + self[k] = v + if isinstance(v[0], ParseResults): + v[0]._parent = self + + self._toklist += other._toklist + self._all_names |= other._all_names + return self + + def __radd__(self, other) -> ParseResults: + if isinstance(other, int) and other == 0: + # useful for merging many ParseResults using sum() builtin + return self.copy() + else: + # this may raise a TypeError - so be it + return other + self + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._toklist!r}, {self.as_dict()})" + + def __str__(self) -> str: + return ( + "[" + + ", ".join( + [ + str(i) if isinstance(i, ParseResults) else repr(i) + for i in self._toklist + ] + ) + + "]" + ) + + def _asStringList(self, sep=""): + out = [] + for item in self._toklist: + if out and sep: + out.append(sep) + if isinstance(item, ParseResults): + out += item._asStringList() + else: + out.append(str(item)) + return out + + def as_list(self, *, flatten: bool = False) -> list: + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + If ``flatten`` is True, all the nesting levels in the returned list are collapsed. + + Example: + + .. doctest:: + + >>> patt = Word(alphas)[1, ...] + >>> result = patt.parse_string("sldkj lsdkj sldkj") + >>> # even though the result prints in string-like form, + >>> # it is actually a pyparsing ParseResults + >>> type(result) + + >>> print(result) + ['sldkj', 'lsdkj', 'sldkj'] + + .. doctest:: + + >>> # Use as_list() to create an actual list + >>> result_list = result.as_list() + >>> type(result_list) + + >>> print(result_list) + ['sldkj', 'lsdkj', 'sldkj'] + + .. versionchanged:: 3.2.0 + New ``flatten`` argument. + """ + + def flattened(pr): + to_visit = collections.deque([*self]) + while to_visit: + to_do = to_visit.popleft() + if isinstance(to_do, ParseResults): + to_visit.extendleft(to_do[::-1]) + else: + yield to_do + + if flatten: + return [*flattened(self)] + else: + return [ + res.as_list() if isinstance(res, ParseResults) else res + for res in self._toklist + ] + + def as_dict(self) -> dict: + """ + Returns the named parse results as a nested dictionary. + + Example: + + .. doctest:: + + >>> integer = pp.Word(pp.nums) + >>> date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + >>> result = date_str.parse_string('1999/12/31') + >>> type(result) + + >>> result + ParseResults(['1999', '/', '12', '/', '31'], {'year': '1999', 'month': '12', 'day': '31'}) + + >>> result_dict = result.as_dict() + >>> type(result_dict) + + >>> result_dict + {'year': '1999', 'month': '12', 'day': '31'} + + >>> # even though a ParseResults supports dict-like access, + >>> # sometime you just need to have a dict + >>> import json + >>> print(json.dumps(result)) + Traceback (most recent call last): + TypeError: Object of type ParseResults is not JSON serializable + >>> print(json.dumps(result.as_dict())) + {"year": "1999", "month": "12", "day": "31"} + """ + + def to_item(obj): + if isinstance(obj, ParseResults): + return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj] + else: + return obj + + return dict((k, to_item(v)) for k, v in self.items()) + + def copy(self) -> ParseResults: + """ + Returns a new shallow copy of a :class:`ParseResults` object. + :class:`ParseResults` items contained within the source are + shared with the copy. Use :meth:`ParseResults.deepcopy` to + create a copy with its own separate content values. + """ + ret = ParseResults(self._toklist) + ret._tokdict = self._tokdict.copy() + ret._parent = self._parent + ret._all_names |= self._all_names + ret._name = self._name + return ret + + def deepcopy(self) -> ParseResults: + """ + Returns a new deep copy of a :class:`ParseResults` object. + + .. versionadded:: 3.1.0 + """ + ret = self.copy() + # replace values with copies if they are of known mutable types + for i, obj in enumerate(self._toklist): + if isinstance(obj, ParseResults): + ret._toklist[i] = obj.deepcopy() + elif isinstance(obj, (str, bytes)): + pass + elif isinstance(obj, MutableMapping): + ret._toklist[i] = dest = type(obj)() + for k, v in obj.items(): + dest[k] = v.deepcopy() if isinstance(v, ParseResults) else v + elif isinstance(obj, Iterable): + ret._toklist[i] = type(obj)( + v.deepcopy() if isinstance(v, ParseResults) else v for v in obj # type: ignore[call-arg] + ) + return ret + + def get_name(self) -> str | None: + r""" + Returns the results name for this token expression. + + Useful when several different expressions might match + at a particular location. + + Example: + + .. testcode:: + + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = user_data[1, ...] + + result = user_info.parse_string("22 111-22-3333 #221B") + for item in result: + print(item.get_name(), ':', item[0]) + + prints: + + .. testoutput:: + + age : 22 + ssn : 111-22-3333 + house_number : 221B + + """ + if self._name: + return self._name + elif self._parent: + par: ParseResults = self._parent + parent_tokdict_items = par._tokdict.items() + return next( + ( + k + for k, vlist in parent_tokdict_items + for v, loc in vlist + if v is self + ), + None, + ) + elif ( + len(self) == 1 + and len(self._tokdict) == 1 + and next(iter(self._tokdict.values()))[0][1] in (0, -1) + ): + return next(iter(self._tokdict.keys())) + else: + return None + + def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: + """ + Diagnostic method for listing out the contents of + a :class:`ParseResults`. Accepts an optional ``indent`` argument so + that this string can be embedded in a nested display of other data. + + Example: + + .. testcode:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string('1999/12/31') + print(result.dump()) + + prints: + + .. testoutput:: + + ['1999', '/', '12', '/', '31'] + - day: '31' + - month: '12' + - year: '1999' + """ + out = [] + NL = "\n" + out.append(indent + str(self.as_list()) if include_list else "") + + if not full: + return "".join(out) + + if self.haskeys(): + items = sorted((str(k), v) for k, v in self.items()) + for k, v in items: + if out: + out.append(NL) + out.append(f"{indent}{(' ' * _depth)}- {k}: ") + if not isinstance(v, ParseResults): + out.append(repr(v)) + continue + + if not v: + out.append(str(v)) + continue + + out.append( + v.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ) + ) + if not any(isinstance(vv, ParseResults) for vv in self): + return "".join(out) + + v = self + incr = " " + nl = "\n" + for i, vv in enumerate(v): + if isinstance(vv, ParseResults): + vv_dump = vv.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ) + out.append( + f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv_dump}" + ) + else: + out.append( + f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv}" + ) + + return "".join(out) + + def pprint(self, *args, **kwargs): + """ + Pretty-printer for parsed results as a list, using the + `pprint `_ module. + Accepts additional positional or keyword args as defined for + `pprint.pprint `_ . + + Example: + + .. testcode:: + + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(DelimitedList(term))) + result = func.parse_string("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + + prints: + + .. testoutput:: + + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ + pprint.pprint(self.as_list(), *args, **kwargs) + + # add support for pickle protocol + def __getstate__(self): + return ( + self._toklist, + ( + self._tokdict.copy(), + None, + self._all_names, + self._name, + ), + ) + + def __setstate__(self, state): + self._toklist, (self._tokdict, par, inAccumNames, self._name) = state + self._all_names = set(inAccumNames) + self._parent = None + + def __getnewargs__(self): + return self._toklist, self._name + + def __dir__(self): + return dir(type(self)) + list(self.keys()) + + @classmethod + def from_dict(cls, other, name=None) -> ParseResults: + """ + Helper classmethod to construct a :class:`ParseResults` from a ``dict``, preserving the + name-value relations as results names. If an optional ``name`` argument is + given, a nested :class:`ParseResults` will be returned. + """ + + def is_iterable(obj): + try: + iter(obj) + except Exception: + return False + # str's are iterable, but in pyparsing, we don't want to iterate over them + else: + return not isinstance(obj, str_type) + + ret = cls([]) + for k, v in other.items(): + if isinstance(v, Mapping): + ret += cls.from_dict(v, name=k) + else: + ret += cls([v], name=k, aslist=is_iterable(v)) + if name is not None: + ret = cls([ret], name=name) + return ret + + asList = as_list + """ + .. deprecated:: 3.0.0 + use :meth:`as_list` + """ + asDict = as_dict + """ + .. deprecated:: 3.0.0 + use :meth:`as_dict` + """ + getName = get_name + """ + .. deprecated:: 3.0.0 + use :meth:`get_name` + """ + + +MutableMapping.register(ParseResults) +MutableSequence.register(ParseResults) diff --git a/py311/lib/python3.11/site-packages/pyparsing/testing.py b/py311/lib/python3.11/site-packages/pyparsing/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..22dbc7d77322382040f32dd9e648bb8a91809244 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyparsing/testing.py @@ -0,0 +1,398 @@ +# testing.py + +from contextlib import contextmanager +import re +import typing +import unittest + + +from .core import ( + ParserElement, + ParseException, + Keyword, + __diag__, + __compat__, +) +from . import core_builtin_exprs + + +class pyparsing_test: + """ + namespace class for classes useful in writing unit tests + """ + + class reset_pyparsing_context: + """ + Context manager to be used when writing unit tests that modify pyparsing config values: + - packrat parsing + - bounded recursion parsing + - default whitespace characters + - default keyword characters + - literal string auto-conversion class + - ``__diag__`` settings + + Example: + + .. testcode:: + + ppt = pyparsing.pyparsing_test + + class MyTestClass(ppt.TestParseResultsAsserts): + def test_literal(self): + with ppt.reset_pyparsing_context(): + # test that literals used to construct + # a grammar are automatically suppressed + ParserElement.inline_literals_using(Suppress) + + term = Word(alphas) | Word(nums) + group = Group('(' + term[...] + ')') + + # assert that the '()' characters + # are not included in the parsed tokens + self.assertParseAndCheckList( + group, + "(abc 123 def)", + ['abc', '123', 'def'] + ) + + # after exiting context manager, literals + # are converted to Literal expressions again + """ + + def __init__(self): + self._save_context = {} + + def save(self): + self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS + self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS + + self._save_context["literal_string_class"] = ( + ParserElement._literalStringClass + ) + + self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace + + self._save_context["packrat_enabled"] = ParserElement._packratEnabled + if ParserElement._packratEnabled: + self._save_context["packrat_cache_size"] = ( + ParserElement.packrat_cache.size + ) + else: + self._save_context["packrat_cache_size"] = None + self._save_context["packrat_parse"] = ParserElement._parse + self._save_context["recursion_enabled"] = ( + ParserElement._left_recursion_enabled + ) + + self._save_context["__diag__"] = { + name: getattr(__diag__, name) for name in __diag__._all_names + } + + self._save_context["__compat__"] = { + "collect_all_And_tokens": __compat__.collect_all_And_tokens + } + + return self + + def restore(self): + # reset pyparsing global state + if ( + ParserElement.DEFAULT_WHITE_CHARS + != self._save_context["default_whitespace"] + ): + ParserElement.set_default_whitespace_chars( + self._save_context["default_whitespace"] + ) + + ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] + + Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] + ParserElement.inline_literals_using( + self._save_context["literal_string_class"] + ) + + for name, value in self._save_context["__diag__"].items(): + (__diag__.enable if value else __diag__.disable)(name) + + ParserElement._packratEnabled = False + if self._save_context["packrat_enabled"]: + ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) + else: + ParserElement._parse = self._save_context["packrat_parse"] + ParserElement._left_recursion_enabled = self._save_context[ + "recursion_enabled" + ] + + # clear debug flags on all builtins + for expr in core_builtin_exprs: + expr.set_debug(False) + + __compat__.collect_all_And_tokens = self._save_context["__compat__"] + + return self + + def copy(self): + ret = type(self)() + ret._save_context.update(self._save_context) + return ret + + def __enter__(self): + return self.save() + + def __exit__(self, *args): + self.restore() + + class TestParseResultsAsserts(unittest.TestCase): + """ + A mixin class to add parse results assertion methods to normal unittest.TestCase classes. + """ + + def assertParseResultsEquals( + self, result, expected_list=None, expected_dict=None, msg=None + ): + """ + Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, + and compare any defined results names with an optional ``expected_dict``. + """ + if expected_list is not None: + self.assertEqual(expected_list, result.as_list(), msg=msg) + if expected_dict is not None: + self.assertEqual(expected_dict, result.as_dict(), msg=msg) + + def assertParseAndCheckList( + self, expr, test_string, expected_list, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting :meth:`ParseResults.as_list` is equal to the ``expected_list``. + """ + result = expr.parse_string(test_string, parse_all=True) + if verbose: + print(result.dump()) + else: + print(result.as_list()) + self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) + + def assertParseAndCheckDict( + self, expr, test_string, expected_dict, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting :meth:`ParseResults.as_dict` is equal to the ``expected_dict``. + """ + result = expr.parse_string(test_string, parse_all=True) + if verbose: + print(result.dump()) + else: + print(result.as_list()) + self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) + + def assertRunTestResults( + self, run_tests_report, expected_parse_results=None, msg=None + ): + """ + Unit test assertion to evaluate output of + :meth:`~ParserElement.run_tests`. + + If a list of list-dict tuples is given as the + ``expected_parse_results`` argument, then these are zipped + with the report tuples returned by ``run_tests()`` + and evaluated using :meth:`assertParseResultsEquals`. + Finally, asserts that the overall + `:meth:~ParserElement.run_tests` success value is ``True``. + + :param run_tests_report: the return value from :meth:`ParserElement.run_tests` + :type run_tests_report: tuple[bool, list[tuple[str, ParseResults | Exception]]] + :param expected_parse_results: (optional) + :type expected_parse_results: list[tuple[str | list | dict | Exception, ...]] + """ + run_test_success, run_test_results = run_tests_report + + if expected_parse_results is None: + self.assertTrue( + run_test_success, msg=msg if msg is not None else "failed runTests" + ) + return + + merged = [ + (*rpt, expected) + for rpt, expected in zip(run_test_results, expected_parse_results) + ] + for test_string, result, expected in merged: + # expected should be a tuple containing a list and/or a dict or an exception, + # and optional failure message string + # an empty tuple will skip any result validation + fail_msg = next((exp for exp in expected if isinstance(exp, str)), None) + expected_exception = next( + ( + exp + for exp in expected + if isinstance(exp, type) and issubclass(exp, Exception) + ), + None, + ) + if expected_exception is not None: + with self.assertRaises( + expected_exception=expected_exception, msg=fail_msg or msg + ): + if isinstance(result, Exception): + raise result + else: + expected_list = next( + (exp for exp in expected if isinstance(exp, list)), None + ) + expected_dict = next( + (exp for exp in expected if isinstance(exp, dict)), None + ) + if (expected_list, expected_dict) != (None, None): + self.assertParseResultsEquals( + result, + expected_list=expected_list, + expected_dict=expected_dict, + msg=fail_msg or msg, + ) + else: + # warning here maybe? + print(f"no validation for {test_string!r}") + + # do this last, in case some specific test results can be reported instead + self.assertTrue( + run_test_success, msg=msg if msg is not None else "failed runTests" + ) + + @contextmanager + def assertRaisesParseException( + self, exc_type=ParseException, expected_msg=None, msg=None + ): + if expected_msg is not None: + if isinstance(expected_msg, str): + expected_msg = re.escape(expected_msg) + with self.assertRaisesRegex(exc_type, expected_msg, msg=msg) as ctx: + yield ctx + + else: + with self.assertRaises(exc_type, msg=msg) as ctx: + yield ctx + + @staticmethod + def with_line_numbers( + s: str, + start_line: typing.Optional[int] = None, + end_line: typing.Optional[int] = None, + expand_tabs: bool = True, + eol_mark: str = "|", + mark_spaces: typing.Optional[str] = None, + mark_control: typing.Optional[str] = None, + *, + indent: typing.Union[str, int] = "", + base_1: bool = True, + ) -> str: + """ + Helpful method for debugging a parser - prints a string with line and column numbers. + (Line and column numbers are 1-based by default - if debugging a parse action, + pass base_1=False, to correspond to the loc value passed to the parse action.) + + :param s: string to be printed with line and column numbers + :param start_line: starting line number in s to print (default=1) + :param end_line: ending line number in s to print (default=len(s)) + :param expand_tabs: expand tabs to spaces, to match the pyparsing default + :param eol_mark: string to mark the end of lines, helps visualize trailing spaces + :param mark_spaces: special character to display in place of spaces + :param mark_control: convert non-printing control characters to a placeholding + character; valid values: + + - ``"unicode"`` - replaces control chars with Unicode symbols, such as "␍" and "␊" + - any single character string - replace control characters with given string + - ``None`` (default) - string is displayed as-is + + + :param indent: string to indent with line and column numbers; if an int + is passed, converted to ``" " * indent`` + :param base_1: whether to label string using base 1; if False, string will be + labeled based at 0 + + :returns: input string with leading line numbers and column number headers + + .. versionchanged:: 3.2.0 + New ``indent`` and ``base_1`` arguments. + """ + if expand_tabs: + s = s.expandtabs() + if isinstance(indent, int): + indent = " " * indent + indent = indent.expandtabs() + if mark_control is not None: + mark_control = typing.cast(str, mark_control) + if mark_control == "unicode": + transtable_map = { + c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433)) + } + transtable_map[127] = 0x2421 + tbl = str.maketrans(transtable_map) + eol_mark = "" + else: + ord_mark_control = ord(mark_control) + tbl = str.maketrans( + {c: ord_mark_control for c in list(range(0, 32)) + [127]} + ) + s = s.translate(tbl) + if mark_spaces is not None and mark_spaces != " ": + if mark_spaces == "unicode": + tbl = str.maketrans({9: 0x2409, 32: 0x2423}) + s = s.translate(tbl) + else: + s = s.replace(" ", mark_spaces) + if start_line is None: + start_line = 0 + if end_line is None: + end_line = len(s.splitlines()) + end_line = min(end_line, len(s.splitlines())) + start_line = min(max(0, start_line), end_line) + + if mark_control != "unicode": + s_lines = s.splitlines()[max(start_line - base_1, 0) : end_line] + else: + s_lines = [ + line + "␊" + for line in s.split("␊")[max(start_line - base_1, 0) : end_line] + ] + if not s_lines: + return "" + + lineno_width = len(str(end_line)) + max_line_len = max(len(line) for line in s_lines) + lead = indent + " " * (lineno_width + 1) + + if max_line_len >= 99: + header0 = ( + lead + + ("" if base_1 else " ") + + "".join( + f"{' ' * 99}{(i + 1) % 100}" + for i in range(max(max_line_len // 100, 1)) + ) + + "\n" + ) + else: + header0 = "" + + header1 = ( + ("" if base_1 else " ") + + lead + + "".join(f" {(i + 1) % 10}" for i in range(-(-max_line_len // 10))) + + "\n" + ) + digits = "1234567890" + header2 = ( + lead + ("" if base_1 else "0") + digits * (-(-max_line_len // 10)) + "\n" + ) + return ( + header0 + + header1 + + header2 + + "\n".join( + f"{indent}{i:{lineno_width}d}:{line}{eol_mark}" + for i, line in enumerate(s_lines, start=start_line + base_1) + ) + + "\n" + ) diff --git a/py311/lib/python3.11/site-packages/pyparsing/unicode.py b/py311/lib/python3.11/site-packages/pyparsing/unicode.py new file mode 100644 index 0000000000000000000000000000000000000000..066486c28eea020d420c2e90fdda76f69f1c9ead --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyparsing/unicode.py @@ -0,0 +1,356 @@ +# unicode.py + +import sys +from itertools import filterfalse +from typing import Union + + +class _lazyclassproperty: + def __init__(self, fn): + self.fn = fn + self.__doc__ = fn.__doc__ + self.__name__ = fn.__name__ + + def __get__(self, obj, cls): + if cls is None: + cls = type(obj) + if not hasattr(cls, "_intern") or any( + cls._intern is getattr(superclass, "_intern", []) + for superclass in cls.__mro__[1:] + ): + cls._intern = {} + attrname = self.fn.__name__ + if attrname not in cls._intern: + cls._intern[attrname] = self.fn(cls) + return cls._intern[attrname] + + +UnicodeRangeList = list[Union[tuple[int, int], tuple[int]]] + + +class unicode_set: + """ + A set of Unicode characters, for language-specific strings for + ``alphas``, ``nums``, ``alphanums``, and ``printables``. + A unicode_set is defined by a list of ranges in the Unicode character + set, in a class attribute ``_ranges``. Ranges can be specified using + 2-tuples or a 1-tuple, such as:: + + _ranges = [ + (0x0020, 0x007e), + (0x00a0, 0x00ff), + (0x0100,), + ] + + Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). + + A unicode set can also be defined using multiple inheritance of other unicode sets:: + + class CJK(Chinese, Japanese, Korean): + pass + """ + + _ranges: UnicodeRangeList = [] + + @_lazyclassproperty + def _chars_for_ranges(cls) -> list[str]: + ret: list[int] = [] + for cc in cls.__mro__: # type: ignore[attr-defined] + if cc is unicode_set: + break + for rr in getattr(cc, "_ranges", ()): + ret.extend(range(rr[0], rr[-1] + 1)) + return sorted(chr(c) for c in set(ret)) + + @_lazyclassproperty + def printables(cls) -> str: + """all non-whitespace characters in this range""" + return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphas(cls) -> str: + """all alphabetic characters in this range""" + return "".join(filter(str.isalpha, cls._chars_for_ranges)) + + @_lazyclassproperty + def nums(cls) -> str: + """all numeric digit characters in this range""" + return "".join(filter(str.isdigit, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphanums(cls) -> str: + """all alphanumeric characters in this range""" + return cls.alphas + cls.nums + + @_lazyclassproperty + def identchars(cls) -> str: + """all characters in this range that are valid identifier characters, plus underscore '_'""" + return "".join( + sorted( + set(filter(str.isidentifier, cls._chars_for_ranges)) + | set( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" + "_" + ) + ) + ) + + @_lazyclassproperty + def identbodychars(cls) -> str: + """ + all characters in this range that are valid identifier body characters, + plus the digits 0-9, and · (Unicode MIDDLE DOT) + """ + identifier_chars = set( + c for c in cls._chars_for_ranges if ("_" + c).isidentifier() + ) + return "".join( + sorted(identifier_chars | set(cls.identchars) | set("0123456789·")) + ) + + @_lazyclassproperty + def identifier(cls): + """ + a pyparsing Word expression for an identifier using this range's definitions for + identchars and identbodychars + """ + from pyparsing import Word + + return Word(cls.identchars, cls.identbodychars) + + +class pyparsing_unicode(unicode_set): + """ + A namespace class for defining common language unicode_sets. + """ + + # fmt: off + + # define ranges in language character sets + _ranges: UnicodeRangeList = [ + (0x0020, sys.maxunicode), + ] + + class BasicMultilingualPlane(unicode_set): + """Unicode set for the Basic Multilingual Plane""" + _ranges: UnicodeRangeList = [ + (0x0020, 0xFFFF), + ] + + class Latin1(unicode_set): + """Unicode set for Latin-1 Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0020, 0x007E), + (0x00A0, 0x00FF), + ] + + class LatinA(unicode_set): + """Unicode set for Latin-A Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0100, 0x017F), + ] + + class LatinB(unicode_set): + """Unicode set for Latin-B Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0180, 0x024F), + ] + + class Greek(unicode_set): + """Unicode set for Greek Unicode Character Ranges""" + _ranges: UnicodeRangeList = [ + (0x0342, 0x0345), + (0x0370, 0x0377), + (0x037A, 0x037F), + (0x0384, 0x038A), + (0x038C,), + (0x038E, 0x03A1), + (0x03A3, 0x03E1), + (0x03F0, 0x03FF), + (0x1D26, 0x1D2A), + (0x1D5E,), + (0x1D60,), + (0x1D66, 0x1D6A), + (0x1F00, 0x1F15), + (0x1F18, 0x1F1D), + (0x1F20, 0x1F45), + (0x1F48, 0x1F4D), + (0x1F50, 0x1F57), + (0x1F59,), + (0x1F5B,), + (0x1F5D,), + (0x1F5F, 0x1F7D), + (0x1F80, 0x1FB4), + (0x1FB6, 0x1FC4), + (0x1FC6, 0x1FD3), + (0x1FD6, 0x1FDB), + (0x1FDD, 0x1FEF), + (0x1FF2, 0x1FF4), + (0x1FF6, 0x1FFE), + (0x2129,), + (0x2719, 0x271A), + (0xAB65,), + (0x10140, 0x1018D), + (0x101A0,), + (0x1D200, 0x1D245), + (0x1F7A1, 0x1F7A7), + ] + + class Cyrillic(unicode_set): + """Unicode set for Cyrillic Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0400, 0x052F), + (0x1C80, 0x1C88), + (0x1D2B,), + (0x1D78,), + (0x2DE0, 0x2DFF), + (0xA640, 0xA672), + (0xA674, 0xA69F), + (0xFE2E, 0xFE2F), + ] + + class Chinese(unicode_set): + """Unicode set for Chinese Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x2E80, 0x2E99), + (0x2E9B, 0x2EF3), + (0x31C0, 0x31E3), + (0x3400, 0x4DB5), + (0x4E00, 0x9FEF), + (0xA700, 0xA707), + (0xF900, 0xFA6D), + (0xFA70, 0xFAD9), + (0x16FE2, 0x16FE3), + (0x1F210, 0x1F212), + (0x1F214, 0x1F23B), + (0x1F240, 0x1F248), + (0x20000, 0x2A6D6), + (0x2A700, 0x2B734), + (0x2B740, 0x2B81D), + (0x2B820, 0x2CEA1), + (0x2CEB0, 0x2EBE0), + (0x2F800, 0x2FA1D), + ] + + class Japanese(unicode_set): + """Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges""" + + class Kanji(unicode_set): + "Unicode set for Kanji Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x4E00, 0x9FBF), + (0x3000, 0x303F), + ] + + class Hiragana(unicode_set): + """Unicode set for Hiragana Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x3041, 0x3096), + (0x3099, 0x30A0), + (0x30FC,), + (0xFF70,), + (0x1B001,), + (0x1B150, 0x1B152), + (0x1F200,), + ] + + class Katakana(unicode_set): + """Unicode set for Katakana Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x3099, 0x309C), + (0x30A0, 0x30FF), + (0x31F0, 0x31FF), + (0x32D0, 0x32FE), + (0xFF65, 0xFF9F), + (0x1B000,), + (0x1B164, 0x1B167), + (0x1F201, 0x1F202), + (0x1F213,), + ] + + 漢字 = Kanji + カタカナ = Katakana + ひらがな = Hiragana + + _ranges = ( + Kanji._ranges + + Hiragana._ranges + + Katakana._ranges + ) + + class Hangul(unicode_set): + """Unicode set for Hangul (Korean) Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x1100, 0x11FF), + (0x302E, 0x302F), + (0x3131, 0x318E), + (0x3200, 0x321C), + (0x3260, 0x327B), + (0x327E,), + (0xA960, 0xA97C), + (0xAC00, 0xD7A3), + (0xD7B0, 0xD7C6), + (0xD7CB, 0xD7FB), + (0xFFA0, 0xFFBE), + (0xFFC2, 0xFFC7), + (0xFFCA, 0xFFCF), + (0xFFD2, 0xFFD7), + (0xFFDA, 0xFFDC), + ] + + Korean = Hangul + + class CJK(Chinese, Japanese, Hangul): + """Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range""" + + class Thai(unicode_set): + """Unicode set for Thai Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0E01, 0x0E3A), + (0x0E3F, 0x0E5B) + ] + + class Arabic(unicode_set): + """Unicode set for Arabic Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0600, 0x061B), + (0x061E, 0x06FF), + (0x0700, 0x077F), + ] + + class Hebrew(unicode_set): + """Unicode set for Hebrew Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0591, 0x05C7), + (0x05D0, 0x05EA), + (0x05EF, 0x05F4), + (0xFB1D, 0xFB36), + (0xFB38, 0xFB3C), + (0xFB3E,), + (0xFB40, 0xFB41), + (0xFB43, 0xFB44), + (0xFB46, 0xFB4F), + ] + + class Devanagari(unicode_set): + """Unicode set for Devanagari Unicode Character Range""" + _ranges: UnicodeRangeList = [ + (0x0900, 0x097F), + (0xA8E0, 0xA8FF) + ] + + BMP = BasicMultilingualPlane + + # add language identifiers using language Unicode + العربية = Arabic + 中文 = Chinese + кириллица = Cyrillic + Ελληνικά = Greek + עִברִית = Hebrew + 日本語 = Japanese + 한국어 = Korean + ไทย = Thai + देवनागरी = Devanagari + + # fmt: on diff --git a/py311/lib/python3.11/site-packages/pyparsing/util.py b/py311/lib/python3.11/site-packages/pyparsing/util.py new file mode 100644 index 0000000000000000000000000000000000000000..5f1e49776e36f3791de4c6c7dd01ff92541f4a49 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pyparsing/util.py @@ -0,0 +1,486 @@ +# util.py +import contextlib +import re +from functools import lru_cache, wraps +import inspect +import itertools +import types +from typing import Callable, Union, Iterable, TypeVar, cast, Any +import warnings + +_bslash = chr(92) +C = TypeVar("C", bound=Callable) + + +class __config_flags: + """Internal class for defining compatibility and debugging flags""" + + _all_names: list[str] = [] + _fixed_names: list[str] = [] + _type_desc = "configuration" + + @classmethod + def _set(cls, dname, value): + if dname in cls._fixed_names: + warnings.warn( + f"{cls.__name__}.{dname} {cls._type_desc} is {str(getattr(cls, dname)).upper()}" + f" and cannot be overridden", + stacklevel=3, + ) + return + if dname in cls._all_names: + setattr(cls, dname, value) + else: + raise ValueError(f"no such {cls._type_desc} {dname!r}") + + enable = classmethod(lambda cls, name: cls._set(name, True)) + disable = classmethod(lambda cls, name: cls._set(name, False)) + + +@lru_cache(maxsize=128) +def col(loc: int, strg: str) -> int: + """ + Returns current column within a string, counting newlines as line separators. + The first column is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See + :meth:`ParserElement.parse_string` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + """ + s = strg + return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) + + +@lru_cache(maxsize=128) +def lineno(loc: int, strg: str) -> int: + """Returns current line number within a string, counting newlines as line separators. + The first line is number 1. + + Note - the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :meth:`ParserElement.parse_string` + for more information on parsing strings containing ```` s, and + suggested methods to maintain a consistent view of the parsed string, the + parse location, and line and column positions within the parsed string. + """ + return strg.count("\n", 0, loc) + 1 + + +@lru_cache(maxsize=128) +def line(loc: int, strg: str) -> str: + """ + Returns the line of text containing loc within a string, counting newlines as line separators. + """ + last_cr = strg.rfind("\n", 0, loc) + next_cr = strg.find("\n", loc) + return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :] + + +class _UnboundedCache: + def __init__(self): + cache = {} + cache_get = cache.get + self.not_in_cache = not_in_cache = object() + + def get(_, key): + return cache_get(key, not_in_cache) + + def set_(_, key, value): + cache[key] = value + + def clear(_): + cache.clear() + + self.size = None + self.get = types.MethodType(get, self) + self.set = types.MethodType(set_, self) + self.clear = types.MethodType(clear, self) + + +class _FifoCache: + def __init__(self, size): + cache = {} + self.size = size + self.not_in_cache = not_in_cache = object() + cache_get = cache.get + cache_pop = cache.pop + + def get(_, key): + return cache_get(key, not_in_cache) + + def set_(_, key, value): + cache[key] = value + while len(cache) > size: + # pop oldest element in cache by getting the first key + cache_pop(next(iter(cache))) + + def clear(_): + cache.clear() + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set_, self) + self.clear = types.MethodType(clear, self) + + +class LRUMemo: + """ + A memoizing mapping that retains `capacity` deleted items + + The memo tracks retained items by their access order; once `capacity` items + are retained, the least recently used item is discarded. + """ + + def __init__(self, capacity): + self._capacity = capacity + self._active = {} + self._memory = {} + + def __getitem__(self, key): + try: + return self._active[key] + except KeyError: + self._memory[key] = self._memory.pop(key) + return self._memory[key] + + def __setitem__(self, key, value): + self._memory.pop(key, None) + self._active[key] = value + + def __delitem__(self, key): + try: + value = self._active.pop(key) + except KeyError: + pass + else: + oldest_keys = list(self._memory)[: -(self._capacity + 1)] + for key_to_delete in oldest_keys: + self._memory.pop(key_to_delete) + self._memory[key] = value + + def clear(self): + self._active.clear() + self._memory.clear() + + +class UnboundedMemo(dict): + """ + A memoizing mapping that retains all deleted items + """ + + def __delitem__(self, key): + pass + + +def _escape_regex_range_chars(s: str) -> str: + # escape these chars: ^-[] + for c in r"\^-[]": + s = s.replace(c, _bslash + c) + s = s.replace("\n", r"\n") + s = s.replace("\t", r"\t") + return str(s) + + +class _GroupConsecutive: + """ + Used as a callable `key` for itertools.groupby to group + characters that are consecutive: + + .. testcode:: + + from itertools import groupby + from pyparsing.util import _GroupConsecutive + + grouped = groupby("abcdejkmpqrs", key=_GroupConsecutive()) + for index, group in grouped: + print(tuple([index, list(group)])) + + prints: + + .. testoutput:: + + (0, ['a', 'b', 'c', 'd', 'e']) + (1, ['j', 'k']) + (2, ['m']) + (3, ['p', 'q', 'r', 's']) + """ + + def __init__(self) -> None: + self.prev = 0 + self.counter = itertools.count() + self.value = -1 + + def __call__(self, char: str) -> int: + c_int = ord(char) + self.prev, prev = c_int, self.prev + if c_int - prev > 1: + self.value = next(self.counter) + return self.value + + +def _collapse_string_to_ranges( + s: Union[str, Iterable[str]], re_escape: bool = True +) -> str: + r""" + Take a string or list of single-character strings, and return + a string of the consecutive characters in that string collapsed + into groups, as might be used in a regular expression '[a-z]' + character set:: + + 'a' -> 'a' -> '[a]' + 'bc' -> 'bc' -> '[bc]' + 'defgh' -> 'd-h' -> '[d-h]' + 'fdgeh' -> 'd-h' -> '[d-h]' + 'jklnpqrtu' -> 'j-lnp-rtu' -> '[j-lnp-rtu]' + + Duplicates get collapsed out:: + + 'aaa' -> 'a' -> '[a]' + 'bcbccb' -> 'bc' -> '[bc]' + 'defghhgf' -> 'd-h' -> '[d-h]' + 'jklnpqrjjjtu' -> 'j-lnp-rtu' -> '[j-lnp-rtu]' + + Spaces are preserved:: + + 'ab c' -> ' a-c' -> '[ a-c]' + + Characters that are significant when defining regex ranges + get escaped:: + + 'acde[]-' -> r'\-\[\]ac-e' -> r'[\-\[\]ac-e]' + """ + + # Developer notes: + # - Do not optimize this code assuming that the given input string + # or internal lists will be short (such as in loading generators into + # lists to make it easier to find the last element); this method is also + # used to generate regex ranges for character sets in the pyparsing.unicode + # classes, and these can be _very_ long lists of strings + + def escape_re_range_char(c: str) -> str: + return "\\" + c if c in r"\^-][" else c + + def no_escape_re_range_char(c: str) -> str: + return c + + if not re_escape: + escape_re_range_char = no_escape_re_range_char + + ret = [] + + # reduce input string to remove duplicates, and put in sorted order + s_chars: list[str] = sorted(set(s)) + + if len(s_chars) > 2: + # find groups of characters that are consecutive (can be collapsed + # down to "-") + for _, chars in itertools.groupby(s_chars, key=_GroupConsecutive()): + # _ is unimportant, is just used to identify groups + # chars is an iterator of one or more consecutive characters + # that comprise the current group + first = last = next(chars) + with contextlib.suppress(ValueError): + *_, last = chars + + if first == last: + # there was only a single char in this group + ret.append(escape_re_range_char(first)) + + elif last == chr(ord(first) + 1): + # there were only 2 characters in this group + # 'a','b' -> 'ab' + ret.append(f"{escape_re_range_char(first)}{escape_re_range_char(last)}") + + else: + # there were > 2 characters in this group, make into a range + # 'c','d','e' -> 'c-e' + ret.append( + f"{escape_re_range_char(first)}-{escape_re_range_char(last)}" + ) + else: + # only 1 or 2 chars were given to form into groups + # 'a' -> ['a'] + # 'bc' -> ['b', 'c'] + # 'dg' -> ['d', 'g'] + # no need to list them with "-", just return as a list + # (after escaping) + ret = [escape_re_range_char(c) for c in s_chars] + + return "".join(ret) + + +def _flatten(ll: Iterable) -> list: + ret = [] + for i in ll: + # Developer notes: + # - do not collapse this section of code, isinstance checks are done + # in optimal order + if isinstance(i, str): + ret.append(i) + elif isinstance(i, Iterable): + ret.extend(_flatten(i)) + else: + ret.append(i) + return ret + + +def make_compressed_re( + word_list: Iterable[str], + max_level: int = 2, + *, + non_capturing_groups: bool = True, + _level: int = 1, +) -> str: + """ + Create a regular expression string from a list of words, collapsing by common + prefixes and optional suffixes. + + Calls itself recursively to build nested sublists for each group of suffixes + that have a shared prefix. + """ + + def get_suffixes_from_common_prefixes(namelist: list[str]): + if len(namelist) > 1: + for prefix, suffixes in itertools.groupby(namelist, key=lambda s: s[:1]): + yield prefix, sorted([s[1:] for s in suffixes], key=len, reverse=True) + else: + yield namelist[0][0], [namelist[0][1:]] + + if _level == 1: + if not word_list: + raise ValueError("no words given to make_compressed_re()") + + if "" in word_list: + raise ValueError("word list cannot contain empty string") + else: + # internal recursive call, just return empty string if no words + if not word_list: + return "" + + # dedupe the word list + word_list = list({}.fromkeys(word_list)) + + if max_level == 0: + if any(len(wd) > 1 for wd in word_list): + return "|".join( + sorted([re.escape(wd) for wd in word_list], key=len, reverse=True) + ) + else: + return f"[{''.join(_escape_regex_range_chars(wd) for wd in word_list)}]" + + ret = [] + sep = "" + ncgroup = "?:" if non_capturing_groups else "" + + for initial, suffixes in get_suffixes_from_common_prefixes(sorted(word_list)): + ret.append(sep) + sep = "|" + + initial = re.escape(initial) + + trailing = "" + if "" in suffixes: + trailing = "?" + suffixes.remove("") + + if len(suffixes) > 1: + if all(len(s) == 1 for s in suffixes): + ret.append( + f"{initial}[{''.join(_escape_regex_range_chars(s) for s in suffixes)}]{trailing}" + ) + else: + if _level < max_level: + suffix_re = make_compressed_re( + sorted(suffixes), + max_level, + non_capturing_groups=non_capturing_groups, + _level=_level + 1, + ) + ret.append(f"{initial}({ncgroup}{suffix_re}){trailing}") + else: + if all(len(s) == 1 for s in suffixes): + ret.append( + f"{initial}[{''.join(_escape_regex_range_chars(s) for s in suffixes)}]{trailing}" + ) + else: + suffixes.sort(key=len, reverse=True) + ret.append( + f"{initial}({ncgroup}{'|'.join(re.escape(s) for s in suffixes)}){trailing}" + ) + else: + if suffixes: + suffix = re.escape(suffixes[0]) + if len(suffix) > 1 and trailing: + ret.append(f"{initial}({ncgroup}{suffix}){trailing}") + else: + ret.append(f"{initial}{suffix}{trailing}") + else: + ret.append(initial) + return "".join(ret) + + +def replaced_by_pep8(compat_name: str, fn: C) -> C: + + # Unwrap staticmethod/classmethod + fn = getattr(fn, "__func__", fn) + + # (Presence of 'self' arg in signature is used by explain_exception() methods, so we take + # some extra steps to add it if present in decorated function.) + if ["self"] == list(inspect.signature(fn).parameters)[:1]: + + @wraps(fn) + def _inner(self, *args, **kwargs): + warnings.warn( + f"{compat_name!r} deprecated - use {fn.__name__!r}", + DeprecationWarning, + stacklevel=2, + ) + return fn(self, *args, **kwargs) + + else: + + @wraps(fn) + def _inner(*args, **kwargs): + warnings.warn( + f"{compat_name!r} deprecated - use {fn.__name__!r}", + DeprecationWarning, + stacklevel=2, + ) + return fn(*args, **kwargs) + + _inner.__doc__ = f""" + .. deprecated:: 3.0.0 + Use :class:`{fn.__name__}` instead + """ + _inner.__name__ = compat_name + _inner.__annotations__ = fn.__annotations__ + if isinstance(fn, types.FunctionType): + _inner.__kwdefaults__ = fn.__kwdefaults__ # type: ignore [attr-defined] + elif isinstance(fn, type) and hasattr(fn, "__init__"): + _inner.__kwdefaults__ = fn.__init__.__kwdefaults__ # type: ignore [misc,attr-defined] + else: + _inner.__kwdefaults__ = None # type: ignore [attr-defined] + _inner.__qualname__ = fn.__qualname__ + return cast(C, _inner) + + +def deprecate_argument( + kwargs: dict[str, Any], arg_name: str, default_value=None, *, new_name: str = "" +) -> Any: + + def to_pep8_name(s: str, _re_sub_pattern=re.compile(r"([a-z])([A-Z])")) -> str: + s = _re_sub_pattern.sub(r"\1_\2", s) + return s.lower() + + if arg_name in kwargs: + new_name = new_name or to_pep8_name(arg_name) + warnings.warn( + f"{arg_name!r} argument is deprecated, use {new_name!r}", + category=DeprecationWarning, + stacklevel=3, + ) + else: + kwargs[arg_name] = default_value + + return kwargs[arg_name] diff --git a/py311/lib/python3.11/site-packages/pytokens/__init__.py b/py311/lib/python3.11/site-packages/pytokens/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ccd99b06db0d0e3f510cb9322a61ed0f1d58c7c --- /dev/null +++ b/py311/lib/python3.11/site-packages/pytokens/__init__.py @@ -0,0 +1,1165 @@ +"""pytokens - A Fast, spec compliant Python 3.12+ tokenizer that runs on older Pythons.""" + +from __future__ import annotations + +from dataclasses import dataclass, field +import enum +import string +from typing import Iterator, NewType + + +class TokenizeError(Exception): ... + + +class IndentationError(TokenizeError): ... + + +class InconsistentUseOfTabsAndSpaces(IndentationError): ... + + +class DedentDoesNotMatchAnyOuterIndent(IndentationError): ... + + +class UnterminatedString(TokenizeError): ... + + +class UnexpectedEOF(TokenizeError): ... + + +class UnexpectedCharacterAfterBackslash(TokenizeError): ... + + +class NotAnIndent(AssertionError): ... + + +class Underflow(AssertionError): ... + + +class TokenType(enum.IntEnum): + whitespace = 1 + indent = 2 + dedent = 3 + newline = 4 # semantically meaningful newline + nl = 5 # non meaningful newline + comment = 6 + + _op_start = 7 # marker used to check if a token is an operator + semicolon = 8 + lparen = 9 + rparen = 10 + lbracket = 11 + rbracket = 12 + lbrace = 13 + rbrace = 14 + colon = 15 + op = 16 + _op_end = 17 # marker used to check if a token is an operator + + identifier = 18 + number = 19 + string = 20 + fstring_start = 21 + fstring_middle = 22 + fstring_end = 23 + + tstring_start = 24 + tstring_middle = 25 + tstring_end = 26 + + endmarker = 27 + + errortoken = 28 + + def __repr__(self) -> str: + return f"TokenType.{self.name}" + + def to_python_token(self) -> str: + if self.name == "identifier": + return "NAME" + + if self.is_operator(): + return "OP" + + return self.name.upper() + + def is_operator(self) -> bool: + return TokenType._op_start < self < TokenType._op_end + + +@dataclass +class Token: + type: TokenType + # Byte offsets in the file + start_index: int + end_index: int + start_line: int + # 0-indexed offset from start of line + start_col: int + end_line: int + end_col: int + + def to_byte_slice(self, source: str) -> str: + # Newline at end of file may not exist in the file + if ( + (self.type == TokenType.newline or self.type == TokenType.nl) + and self.start_index == len(source) + and self.end_index == len(source) + 1 + ): + return "" + + # Dedents at end of file also may not exist in the file + if ( + self.type == TokenType.dedent + and self.start_index == len(source) + 1 + and self.end_index == len(source) + 1 + ): + return "" + + # Endmarkers are out of bound too + if self.type == TokenType.endmarker: + return "" + + return source[self.start_index : self.end_index] + + +class FStringState: + State = NewType("State", int) + + not_fstring = State(1) + at_fstring_middle = State(2) + at_fstring_lbrace = State(3) + in_fstring_expr = State(4) + in_fstring_expr_modifier = State(5) + at_fstring_end = State(6) + + def __init__(self) -> None: + self.state = FStringState.not_fstring + self.stack: list[FStringState.State] = [] + + def enter_fstring(self) -> None: + self.stack.append(self.state) + self.state = FStringState.at_fstring_middle + + def leave_fstring(self) -> None: + assert self.state == FStringState.at_fstring_end + self.state = self.stack.pop() + + def consume_fstring_middle_for_lbrace(self) -> None: + if self.state == FStringState.in_fstring_expr_modifier: + self.stack.append(self.state) + + self.state = FStringState.at_fstring_lbrace + + def consume_fstring_middle_for_end(self) -> None: + self.state = FStringState.at_fstring_end + + def consume_lbrace(self) -> None: + self.state = FStringState.in_fstring_expr + + def consume_rbrace(self) -> None: + assert ( + self.state == FStringState.in_fstring_expr + or self.state == FStringState.in_fstring_expr_modifier + ) + + if ( + len(self.stack) > 0 + and self.stack[-1] == FStringState.in_fstring_expr_modifier + ): + self.state = self.stack.pop() + else: + self.state = FStringState.at_fstring_middle + + def consume_colon(self) -> None: + assert self.state == FStringState.in_fstring_expr + self.state = FStringState.in_fstring_expr_modifier + + +@dataclass +class TokenIterator: + source: str + issue_128233_handling: bool + + current_index: int = 0 + prev_index: int = 0 + line_number: int = 1 + prev_line_number: int = 1 + byte_offset: int = 0 + prev_byte_offset: int = 0 + all_whitespace_on_this_line: bool = True + + bracket_level: int = 0 + bracket_level_stack: list[int] = field(default_factory=list) + prev_token: Token | None = None + + indent_stack: list[str] = field(default_factory=list) + dedent_counter: int = 0 + + # f-string state + fstring_state: FStringState = field(default_factory=FStringState) + fstring_prefix_quote_stack: list[tuple[str, str]] = field(default_factory=list) + fstring_prefix: str | None = None + fstring_quote: str | None = None + + # CPython has a weird bug where every time a bare \r is + # present, the next token becomes an OP. regardless of what it is. + weird_op_case: bool = False + weird_op_case_nl: bool = False + + weird_whitespace_case: bool = False + + def is_in_bounds(self) -> bool: + return self.current_index < len(self.source) + + def peek(self) -> str: + assert self.is_in_bounds() + return self.source[self.current_index] + + def peek_next(self) -> str: + assert self.current_index + 1 < len(self.source) + return self.source[self.current_index + 1] + + def advance(self) -> None: + self.current_index += 1 + self.byte_offset += 1 + + def advance_by(self, count: int) -> None: + self.current_index += count + self.byte_offset += count + + def next_line(self) -> None: + self.line_number += 1 + self.byte_offset = 0 + self.all_whitespace_on_this_line = True + + def advance_check_newline(self) -> None: + if self.source[self.current_index] == "\n": + self.current_index += 1 + self.next_line() + else: + self.advance() + + def match(self, *options: str, ignore_case: bool = False) -> bool: + for option in options: + if self.current_index + len(option) > len(self.source): + continue + snippet = self.source[self.current_index : self.current_index + len(option)] + if ignore_case: + option = option.lower() + snippet = snippet.lower() + + if option == snippet: + return True + + return False + + def make_token(self, tok_type: TokenType) -> Token: + if self.fstring_prefix is not None and "t" in self.fstring_prefix: + if tok_type == TokenType.fstring_start: + tok_type = TokenType.tstring_start + elif tok_type == TokenType.fstring_middle: + tok_type = TokenType.tstring_middle + elif tok_type == TokenType.fstring_end: + tok_type = TokenType.tstring_end + + token_type = ( + TokenType.op + if self.weird_op_case + and not tok_type.is_operator() + and tok_type not in (TokenType.number, TokenType.string) + else tok_type + ) + if self.weird_op_case: + # And we have another weird case INSIDE the weird case. + # For some reason when CPython accidentally captures a space + # as the next character, i.e. when the token is '\r ', + # It DOESN't see it as whitespace, so in that specific case, + # we shouldn't set all_whitespace_on_this_line. + # I think this is because CPython never expecte to have a + # ' ' token in it anyway so it doesn't classify it as + # whitespace. So it becomes non-whitespace. + # Removing this if stmt breaks test 1001 right now. + token_str = self.source[self.prev_index : self.current_index] + if token_str == "\r ": + self.all_whitespace_on_this_line = False + self.weird_op_case = False + + token = Token( + type=token_type, + start_index=self.prev_index, + end_index=self.current_index, + start_line=self.prev_line_number, + start_col=self.prev_byte_offset, + end_line=self.line_number, + end_col=self.byte_offset, + ) + if tok_type == TokenType.newline or tok_type == TokenType.nl: + self.next_line() + elif tok_type == TokenType.whitespace or tok_type == TokenType.comment: + pass + else: + self.all_whitespace_on_this_line = False + + self.prev_token = token + self.prev_index = self.current_index + self.prev_line_number = self.line_number + self.prev_byte_offset = self.byte_offset + self.weird_op_case = False + + return token + + def push_fstring_prefix_quote(self, prefix: str, quote: str) -> None: + if self.fstring_prefix is not None: + assert self.fstring_quote is not None + self.fstring_prefix_quote_stack.append( + (self.fstring_prefix, self.fstring_quote) + ) + + self.fstring_prefix = prefix + self.fstring_quote = quote + + def pop_fstring_quote(self) -> None: + if self.fstring_prefix is None: + assert self.fstring_quote is None + raise Underflow + + self.fstring_prefix, self.fstring_quote = ( + (None, None) + if len(self.fstring_prefix_quote_stack) == 0 + else self.fstring_prefix_quote_stack.pop() + ) + + def newline(self) -> Token: + if self.is_in_bounds() and self.source[self.current_index] == "\r": + self.advance() + self.advance() + token_type = ( + TokenType.nl + if ( + self.weird_op_case_nl + or self.bracket_level > 0 + or self.fstring_state.state == FStringState.in_fstring_expr + or self.all_whitespace_on_this_line + ) + else TokenType.newline + ) + token = self.make_token(token_type) + self.weird_op_case_nl = False + return token + + def endmarker(self) -> Token: + if self.bracket_level != 0: + raise UnexpectedEOF + + if len(self.indent_stack) > 0: + _ = self.indent_stack.pop() + return self.make_token(TokenType.dedent) + + return self.make_token(TokenType.endmarker) + + def decimal(self) -> Token: + digit_before_decimal = False + if self.source[self.current_index].isdigit(): + digit_before_decimal = True + self.advance() + + # TODO: this is too lax; 1__2 tokenizes successfully + while self.is_in_bounds() and ( + self.source[self.current_index].isdigit() + or self.source[self.current_index] == "_" + ): + self.advance() + + if self.is_in_bounds() and self.source[self.current_index] == ".": + self.advance() + + while self.is_in_bounds() and ( + self.source[self.current_index].isdigit() + or ( + self.source[self.current_index] == "_" + and self.source[self.current_index - 1].isdigit() + ) + ): + self.advance() + # Before advancing over the 'e', ensure that there has been at least 1 digit before the 'e' + if self.current_index + 1 < len(self.source) and ( + (digit_before_decimal or self.source[self.current_index - 1].isdigit()) + and ( + self.source[self.current_index] == "e" + or self.source[self.current_index] == "E" + ) + and ( + self.source[self.current_index + 1].isdigit() + or ( + self.current_index + 2 < len(self.source) + and ( + self.source[self.current_index + 1] == "+" + or self.source[self.current_index + 1] == "-" + ) + and self.source[self.current_index + 2].isdigit() + ) + ) + ): + self.advance() + self.advance() + # optional third advance not necessary as itll get advanced just below + + # TODO: this is too lax; 1__2 tokenizes successfully + while self.is_in_bounds() and ( + self.source[self.current_index].isdigit() + or ( + (digit_before_decimal or self.source[self.current_index - 1].isdigit()) + and self.source[self.current_index] == "_" + ) + ): + self.advance() + + # Complex numbers end in a `j`. But ensure at least 1 digit before it + if self.is_in_bounds() and ( + (digit_before_decimal or self.source[self.current_index - 1].isdigit()) + and ( + self.source[self.current_index] == "j" + or self.source[self.current_index] == "J" + ) + ): + self.advance() + # If all of this resulted in just a dot, return an operator + if ( + self.current_index - self.prev_index == 1 + and self.source[self.current_index - 1] == "." + ): + # Ellipsis check + if ( + self.current_index + 2 <= len(self.source) + and self.source[self.current_index : self.current_index + 2] == ".." + ): + self.advance() + self.advance() + + return self.make_token(TokenType.op) + + return self.make_token(TokenType.number) + + def binary(self) -> Token: + # jump over `0b` + self.advance() + self.advance() + while self.is_in_bounds() and ( + self.source[self.current_index] == "0" + or self.source[self.current_index] == "1" + or self.source[self.current_index] == "_" + ): + self.advance() + if self.is_in_bounds() and ( + self.source[self.current_index] == "e" + or self.source[self.current_index] == "E" + ): + self.advance() + if self.is_in_bounds() and self.source[self.current_index] == "-": + self.advance() + + while self.is_in_bounds() and ( + self.source[self.current_index] == "0" + or self.source[self.current_index] == "1" + or self.source[self.current_index] == "_" + ): + self.advance() + return self.make_token(TokenType.number) + + def octal(self) -> Token: + # jump over `0o` + self.advance() + self.advance() + while self.is_in_bounds() and ( + self.source[self.current_index] >= "0" + and self.source[self.current_index] <= "7" + or self.source[self.current_index] == "_" + ): + self.advance() + if self.is_in_bounds() and ( + self.source[self.current_index] == "e" + or self.source[self.current_index] == "E" + ): + self.advance() + if self.is_in_bounds() and self.source[self.current_index] == "-": + self.advance() + + while self.is_in_bounds() and ( + self.source[self.current_index] >= "0" + and self.source[self.current_index] <= "7" + or self.source[self.current_index] == "_" + ): + self.advance() + return self.make_token(TokenType.number) + + def hexadecimal(self) -> Token: + # jump over `0x` + self.advance() + self.advance() + while self.is_in_bounds() and ( + self.source[self.current_index] in string.hexdigits + or self.source[self.current_index] == "_" + ): + self.advance() + if self.is_in_bounds() and ( + self.source[self.current_index] == "e" + or self.source[self.current_index] == "E" + ): + self.advance() + if self.is_in_bounds() and self.source[self.current_index] == "-": + self.advance() + + while self.is_in_bounds() and ( + self.source[self.current_index] in string.hexdigits + or self.source[self.current_index] == "_" + ): + self.advance() + return self.make_token(TokenType.number) + + def find_opening_quote(self) -> int: + # Quotes should always be within 3 chars of the beginning of the string token + for offset in range(3): + char = self.source[self.current_index + offset] + if char == '"' or char == "'": + return self.current_index + offset + + raise AssertionError("Quote not found somehow") + + def string_prefix_and_quotes(self) -> tuple[str, str]: + quote_index = self.find_opening_quote() + prefix = self.source[self.current_index : quote_index] + quote_char = self.source[quote_index] + + # Check for triple quotes + quote = ( + self.source[quote_index : quote_index + 3] + if ( + quote_index + 3 <= len(self.source) + and self.source[quote_index + 1] == quote_char + and self.source[quote_index + 2] == quote_char + ) + else self.source[quote_index : quote_index + 1] + ) + return prefix, quote + + def fstring(self) -> Token: + if self.fstring_state.state in ( + FStringState.not_fstring, + FStringState.in_fstring_expr, + ): + prefix, quote = self.string_prefix_and_quotes() + + self.push_fstring_prefix_quote(prefix, quote) + for _ in range(len(prefix)): + self.advance() + for _ in range(len(quote)): + self.advance() + self.fstring_state.enter_fstring() + return self.make_token(TokenType.fstring_start) + + if self.fstring_state.state == FStringState.at_fstring_middle: + assert self.fstring_quote is not None + is_single_quote = len(self.fstring_quote) == 1 + start_index = self.current_index + while self.is_in_bounds(): + char = self.source[self.current_index] + # For single quotes, bail on newlines + if char == "\n" and is_single_quote: + raise UnterminatedString + + # Handle escapes + if char == "\\": + self.advance() + # But don't escape a `\{` or `\}` in f-strings + # but DO escape `\N{` in f-strings, that's for unicode characters + # but DON'T escape `\N{` in raw f-strings. + assert self.fstring_prefix is not None + if ( + "r" not in self.fstring_prefix.lower() + and self.current_index + 1 < len(self.source) + and self.peek() == "N" + and self.peek_next() == "{" + ): + self.advance() + self.advance() + + if self.is_in_bounds() and not ( + self.peek() == "{" or self.peek() == "}" + ): + self.advance_check_newline() + + continue + + # Find opening / closing quote + if char == "{": + if self.peek_next() == "{": + self.advance() + self.advance() + continue + else: + self.fstring_state.consume_fstring_middle_for_lbrace() + # If fstring-middle is empty, skip it by returning the next step token + if self.current_index == start_index: + return self.fstring() + + return self.make_token(TokenType.fstring_middle) + + assert self.fstring_quote is not None + if self.match(self.fstring_quote): + self.fstring_state.consume_fstring_middle_for_end() + # If fstring-middle is empty, skip it by returning the next step token + if self.current_index == start_index: + return self.fstring() + + return self.make_token(TokenType.fstring_middle) + + self.advance_check_newline() + + raise UnexpectedEOF + + if self.fstring_state.state == FStringState.at_fstring_lbrace: + self.advance() + self.bracket_level_stack.append(self.bracket_level) + self.bracket_level = 0 + self.fstring_state.consume_lbrace() + return self.make_token(TokenType.lbrace) + + if self.fstring_state.state == FStringState.at_fstring_end: + assert self.fstring_quote is not None + for _ in range(len(self.fstring_quote)): + self.advance() + token = self.make_token(TokenType.fstring_end) + self.pop_fstring_quote() + self.fstring_state.leave_fstring() + return token + + if self.fstring_state.state == FStringState.in_fstring_expr_modifier: + start_index = self.current_index + while self.is_in_bounds(): + char = self.source[self.current_index] + assert self.fstring_quote is not None + if (char == "\n" or char == "{") and len(self.fstring_quote) == 1: + if char == "{": + self.fstring_state.consume_fstring_middle_for_lbrace() + else: + # TODO: why? + self.fstring_state.state = FStringState.in_fstring_expr + + # If fstring-middle is empty, skip it by returning the next step token + if self.current_index == start_index: + return self.fstring() + + return self.make_token(TokenType.fstring_middle) + elif char == "}": + self.fstring_state.state = FStringState.in_fstring_expr + return self.make_token(TokenType.fstring_middle) + + self.advance_check_newline() + + raise UnexpectedEOF + + raise AssertionError("Unhandled f-string state") + + def string(self) -> Token: + prefix, quote = self.string_prefix_and_quotes() + if prefix and self.weird_op_case: + self.advance() + return self.make_token(tok_type=TokenType.op) + + for char in prefix: + if char in ("f", "F", "t", "T"): + return self.fstring() + + for _ in range(len(prefix)): + self.advance() + for _ in range(len(quote)): + self.advance() + + is_single_quote = len(quote) == 1 + + while self.is_in_bounds(): + char = self.source[self.current_index] + # For single quotes, bail on newlines + if char == "\n" and is_single_quote: + raise UnterminatedString + + # Handle escapes + if char == "\\": + self.advance() + self.advance_check_newline() + continue + + # Find closing quote + if self.match(quote): + for _ in range(len(quote)): + self.advance() + return self.make_token(TokenType.string) + + self.advance_check_newline() + + raise UnexpectedEOF + + def indent(self) -> Token: + start_index = self.current_index + saw_whitespace = False + saw_tab_or_space = False + while self.is_in_bounds(): + char = self.source[self.current_index] + if self.is_whitespace(): + self.advance() + saw_whitespace = True + if char == " " or char == "\t": + saw_tab_or_space = True + else: + break + + if not self.is_in_bounds(): + # File ends with no whitespace after newline, don't return indent + if self.current_index == start_index: + raise NotAnIndent + # If reached the end of the file, don't return an indent + return self.make_token(TokenType.whitespace) + + # If the line is preceded by just linefeeds/CR/etc., + # treat it as whitespace. + if saw_whitespace and not saw_tab_or_space: + self.weird_whitespace_case = True + return self.make_token(TokenType.whitespace) + + # For lines that are just leading whitespace and a slash or a comment, + # don't return indents + next_char = self.peek() + if next_char == "#" or next_char == "\\" or self.is_newline(): + return self.make_token(TokenType.whitespace) + + new_indent = self.source[start_index : self.current_index] + current_indent = "" if len(self.indent_stack) == 0 else self.indent_stack[-1] + + if len(new_indent) == len(current_indent): + if len(new_indent) == 0: + raise NotAnIndent + + if new_indent != current_indent: + raise InconsistentUseOfTabsAndSpaces + return self.make_token(TokenType.whitespace) + elif len(new_indent) > len(current_indent): + if len(current_indent) > 0 and current_indent not in new_indent: + raise InconsistentUseOfTabsAndSpaces + self.indent_stack.append(new_indent) + return self.make_token(TokenType.indent) + else: + while len(self.indent_stack) > 0: + top_indent = self.indent_stack[-1] + if len(top_indent) < len(new_indent): + raise DedentDoesNotMatchAnyOuterIndent + + if len(top_indent) == len(new_indent): + break + + _ = self.indent_stack.pop() + self.dedent_counter += 1 + + # Let the dedent counter make the dedents. They must be length zero + return self.make_token(TokenType.whitespace) + + def is_whitespace(self) -> bool: + if self.is_newline(): + return False + + char = self.source[self.current_index] + return ( + char == " " + or char == "\r" + or char == "\t" + or char == "\x0b" + or char == "\x0c" + ) + + def is_newline(self) -> bool: + if self.source[self.current_index] == "\n": + return True + if ( + self.source[self.current_index] == "\r" + and self.current_index + 1 < len(self.source) + and self.source[self.current_index + 1] == "\n" + ): + return True + + return False + + def name(self) -> Token: + if self.weird_op_case: + self.advance() + return self.make_token(TokenType.identifier) + + # According to PEP 3131, any non-ascii character is valid in a NAME token. + # But if we see any non-identifier ASCII character we should stop. + remaining = self.source[self.current_index :] + for index, char in enumerate(remaining): + if ord(char) < 128 and not str.isalnum(char) and char != "_": + length = index + break + else: + length = len(remaining) + + self.advance_by(length) + return self.make_token(TokenType.identifier) + + def __iter__(self) -> TokenIterator: + return self + + def __next__(self) -> Token: + if self.prev_token is not None and self.prev_token.type == TokenType.endmarker: + raise StopIteration + + # EOF checks + if self.current_index == len(self.source): + if self.prev_token is None: + return self.endmarker() + + if self.prev_token.type in { + TokenType.newline, + TokenType.nl, + TokenType.dedent, + }: + return self.endmarker() + else: + return self.newline() + + if self.current_index > len(self.source): + return self.endmarker() + + # f-string check + if ( + self.fstring_state.state != FStringState.not_fstring + and self.fstring_state.state != FStringState.in_fstring_expr + ): + return self.fstring() + + current_char = self.source[self.current_index] + + # \r on its own, in certain cases it gets merged with the next char. + # It's probably a bug: https://github.com/python/cpython/issues/128233 + # 'issue_128233_handling=True' works around this bug, but if it's False + # then we produce identical tokens to CPython. + if not self.issue_128233_handling and current_char == "\r": + self.advance() + if not self.is_in_bounds(): + return self.newline() + + current_char = self.source[self.current_index] + if current_char != "\n": + self.weird_op_case = True + if ( + self.prev_token is not None + and self.prev_token.type == TokenType.comment + ): + self.weird_op_case_nl = True + + # Comment check + if current_char == "#": + if self.weird_op_case: + self.advance() + return self.make_token(TokenType.comment) + + while self.is_in_bounds() and not self.is_newline(): + if ( + not self.issue_128233_handling + and self.source[self.current_index] == "\r" + ): + break + self.advance() + return self.make_token(TokenType.comment) + + # Empty the dedent counter + if self.dedent_counter > 0: + self.dedent_counter -= 1 + return self.make_token(TokenType.dedent) + + # Newline check + if self.is_newline(): + return self.newline() + + # \ check + if current_char == "\\": + self.advance() + if not self.is_in_bounds(): + raise UnexpectedEOF + + # Consume all whitespace on this line and the next. + found_whitespace = False + seen_newline = False + while self.is_in_bounds(): + if self.is_whitespace(): + self.advance() + found_whitespace = True + elif not seen_newline and (self.is_newline()): + char = self.source[self.current_index] + if char == "\r": + self.advance() + self.advance() + found_whitespace = True + seen_newline = True + # Move to next line without creating a newline token. But, + # if the previous line was all whitespace, whitespace on + # the next line is still valid indentation. Avoid consuming + if self.all_whitespace_on_this_line: + self.next_line() + break + else: + self.next_line() + # Preserve this boolean, we're on the same line semantically + self.all_whitespace_on_this_line = False + + else: + break + + if not found_whitespace: + raise UnexpectedCharacterAfterBackslash + + return self.make_token(TokenType.whitespace) + + # Indent / dedent checks + if ( + (self.byte_offset == 0 or self.weird_whitespace_case) + and self.bracket_level == 0 + and self.fstring_state.state == FStringState.not_fstring + ): + self.weird_whitespace_case = False + try: + indent_token = self.indent() + except NotAnIndent: + indent_token = None + + if indent_token is not None: + return indent_token + + if self.is_whitespace(): + while self.is_in_bounds() and self.is_whitespace(): + self.advance() + return self.make_token(TokenType.whitespace) + + if current_char in ("+", "&", "|", "^", "@", "%", "=", "!", "~"): + self.advance() + if self.peek() == "=": + self.advance() + return self.make_token(TokenType.op) + + if current_char == "<": + self.advance() + if self.peek() == ">": + # Barry as FLUFL easter egg + self.advance() + return self.make_token(TokenType.op) + + if self.peek() == "<": + self.advance() + if self.peek() == "=": + self.advance() + return self.make_token(TokenType.op) + + if current_char == ">": + self.advance() + if self.peek() == ">": + self.advance() + if self.peek() == "=": + self.advance() + return self.make_token(TokenType.op) + + if current_char == "/": + self.advance() + if self.peek() == "/": + self.advance() + if self.peek() == "=": + self.advance() + return self.make_token(TokenType.op) + + if current_char == "*": + self.advance() + if self.peek() == "*": + self.advance() + if self.peek() == "=": + self.advance() + return self.make_token(TokenType.op) + + if current_char == "-": + self.advance() + # -> operator + if self.peek() == ">": + self.advance() + return self.make_token(TokenType.op) + + # -= operator + if self.peek() == "=": + self.advance() + return self.make_token(TokenType.op) + + if current_char in (",", ";"): + self.advance() + return self.make_token(TokenType.op) + + # This guy is not used in Python3, but still exists + # for backwards compatibility i guess. + if current_char == "`": + self.advance() + return self.make_token(TokenType.op) + + if current_char == "(": + self.advance() + self.bracket_level += 1 + return self.make_token(TokenType.lparen) + + if current_char == ")": + self.advance() + self.bracket_level -= 1 + if self.bracket_level < 0: + self.bracket_level = 0 + return self.make_token(TokenType.rparen) + + if current_char == "[": + self.advance() + self.bracket_level += 1 + return self.make_token(TokenType.lbracket) + + if current_char == "]": + self.advance() + self.bracket_level -= 1 + if self.bracket_level < 0: + self.bracket_level = 0 + return self.make_token(TokenType.rbracket) + + if current_char == "{": + self.advance() + self.bracket_level += 1 + return self.make_token(TokenType.lbrace) + + if current_char == "}": + self.advance() + if ( + self.bracket_level == 0 + and self.fstring_state.state == FStringState.in_fstring_expr + ): + self.fstring_state.consume_rbrace() + self.bracket_level = self.bracket_level_stack.pop() + else: + self.bracket_level -= 1 + if self.bracket_level < 0: + self.bracket_level = 0 + + return self.make_token(TokenType.rbrace) + + if current_char == ":": + self.advance() + if ( + self.bracket_level == 0 + and self.fstring_state.state == FStringState.in_fstring_expr + ): + self.fstring_state.state = FStringState.in_fstring_expr_modifier + return self.make_token(TokenType.op) + else: + if self.peek() == "=": + self.advance() + return self.make_token(TokenType.op) + + if current_char in ".0123456789": + if self.current_index + 2 <= len(self.source) and self.source[ + self.current_index : self.current_index + 2 + ] in ("0b", "0B"): + return self.binary() + elif self.current_index + 2 <= len(self.source) and self.source[ + self.current_index : self.current_index + 2 + ] in ("0o", "0O"): + return self.octal() + elif self.current_index + 2 <= len(self.source) and self.source[ + self.current_index : self.current_index + 2 + ] in ("0x", "0X"): + return self.hexadecimal() + else: + return self.decimal() + + if ( + (self.current_index + 1 <= len(self.source) and self.match('"', "'")) + or ( + self.current_index + 2 <= len(self.source) + and self.match( + 'b"', + "b'", + 'r"', + "r'", + 'f"', + "f'", + 'u"', + "u'", + "t'", + 't"', + ignore_case=True, + ) + ) + or ( + self.current_index + 3 <= len(self.source) + and self.match( + 'br"', + "br'", + 'rb"', + "rb'", + 'fr"', + "fr'", + 'rf"', + "rf'", + "tr'", + 'tr"', + "rt'", + 'rt"', + ignore_case=True, + ) + ) + ): + return self.string() + + return self.name() + + +def tokenize( + source: str, + *, + fstring_tokens: bool = True, + issue_128233_handling: bool = True, +) -> Iterator[Token]: + token_iterator = TokenIterator(source, issue_128233_handling=issue_128233_handling) + if fstring_tokens: + return iter(token_iterator) + + return merge_fstring_tokens(token_iterator) + + +def merge_fstring_tokens(token_iterator: TokenIterator) -> Iterator[Token]: + """Turn post-Python-3.12 FSTRING-* tokens back to a single STRING token.""" + for token in token_iterator: + if token.type not in (TokenType.fstring_start, TokenType.tstring_start): + yield token + continue + + start_token = token + end_token = token + + fstring_starts = 1 + fstring_ends = 0 + for token in token_iterator: + if token.type in (TokenType.fstring_start, TokenType.tstring_start): + fstring_starts += 1 + if token.type in (TokenType.fstring_end, TokenType.tstring_end): + fstring_ends += 1 + + if fstring_starts == fstring_ends: + end_token = token + break + + yield Token( + type=TokenType.string, + start_index=start_token.start_index, + start_line=start_token.start_line, + start_col=start_token.start_col, + end_index=end_token.end_index, + end_line=end_token.end_line, + end_col=end_token.end_col, + ) diff --git a/py311/lib/python3.11/site-packages/pytokens/__main__.py b/py311/lib/python3.11/site-packages/pytokens/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ae31dc4d281ae51e5f5ac6995f607c86d9c3f91 --- /dev/null +++ b/py311/lib/python3.11/site-packages/pytokens/__main__.py @@ -0,0 +1,7 @@ +"""Support executing the CLI by doing `python -m pytokens`.""" +from __future__ import annotations + +from pytokens.cli import cli + +if __name__ == "__main__": + raise SystemExit(cli()) diff --git a/py311/lib/python3.11/site-packages/pytokens/cli.py b/py311/lib/python3.11/site-packages/pytokens/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..752e5c701949a292d6b3427100112a106652f29b --- /dev/null +++ b/py311/lib/python3.11/site-packages/pytokens/cli.py @@ -0,0 +1,192 @@ +"""CLI interface for pytokens.""" + +from __future__ import annotations + +import argparse +import io +import os.path +import tokenize +from typing import Iterable, NamedTuple +import warnings + +import pytokens + + +class CLIArgs: + filepath: str + validate: bool + issue_128233_handling: bool + + +def cli(argv: list[str] | None = None) -> int: + """CLI interface.""" + parser = argparse.ArgumentParser() + parser.add_argument("filepath") + parser.add_argument( + "--no-128233-handling", + dest="issue_128233_handling", + action="store_false", + ) + parser.add_argument("--validate", action="store_true") + args = parser.parse_args(argv, namespace=CLIArgs()) + + if os.path.isdir(args.filepath): + files = find_all_python_files(args.filepath) + verbose = False + else: + files = [args.filepath] + verbose = True + + for filepath in sorted(files): + with open(filepath, "rb") as file: + try: + encoding, read_bytes = tokenize.detect_encoding(file.readline) + except SyntaxError: + if args.validate: + # Broken `# coding` comment, tokenizer bails, skip file + print("\033[1;33mS\033[0m", end="", flush=True) + continue + + raise + + source = b"".join(read_bytes) + file.read() + + if args.validate: + validate( + filepath, + source, + encoding, + verbose=verbose, + issue_128233_handling=args.issue_128233_handling, + ) + + else: + source_str = source.decode(encoding) + for token in pytokens.tokenize( + source_str, + issue_128233_handling=args.issue_128233_handling, + ): + token_source = source_str[token.start_index : token.end_index] + print(repr(token_source), token) + + return 0 + + +class TokenTuple(NamedTuple): + type: str + start: tuple[int, int] + end: tuple[int, int] + + +def validate( + filepath: str, + source: bytes, + encoding: str, + *, + issue_128233_handling: bool, + verbose: bool = True, +) -> None: + """Validate the source code.""" + warnings.simplefilter("ignore") + + # Ensure all line endings have newline as a valid index + if len(source) == 0 or source[-1:] != b"\n": + source = source + b"\n" + + # Same as .splitlines(keepends=True), but doesn't split on linefeeds i.e. \x0c + sourcelines = [line + b"\n" for line in source.split(b"\n")] + # For that last newline token that exists on an imaginary line sometimes + sourcelines.append(b"\n") + + source_file = io.BytesIO(source) + builtin_tokens = tokenize.tokenize(source_file.readline) + # drop the encoding token + next(builtin_tokens) + + try: + expected_tokens_unprocessed = [ + TokenTuple(tokenize.tok_name[token.type], token.start, token.end) + for token in builtin_tokens + ] + except tokenize.TokenError: + print("\033[1;33mS\033[0m", end="", flush=True) + return + + expected_tokens = [expected_tokens_unprocessed[0]] + for index, token in enumerate(expected_tokens_unprocessed[1:], start=1): + last_token = expected_tokens[-1] + + current_token = token + # Merge consecutive FSTRING_MIDDLE tokens. it's weird cpython has it like that. + if current_token.type == last_token.type == "FSTRING_MIDDLE": + expected_tokens.pop() + current_token = TokenTuple( + current_token.type, + last_token.start, + current_token.end, + ) + + if index + 1 < len(expected_tokens_unprocessed): + # When an FSTRING_MIDDLE ends with a `{{{` like f'x{{{1}', Python eats + # the last { char as well as its end index, so we get a `x{` token + # instead of the expected `x{{` token. This fixes that case. Pretty + # much always there should be no gap between an fstring-middle ending + # and the { op after it. + # Same deal for `}}}"` + next_token = expected_tokens_unprocessed[index + 1] + if ( + (current_token.type == "FSTRING_MIDDLE" and next_token.type == "OP") + or ( + current_token.type == "FSTRING_MIDDLE" + and next_token.type == "FSTRING_END" + ) + and next_token.start[0] == current_token.end[0] + and next_token.start[1] > current_token.end[1] + ): + expected_tokens.append( + TokenTuple( + current_token.type, + current_token.start, + next_token.start, + ) + ) + continue + + expected_tokens.append(current_token) + + source_string = source.decode(encoding) + our_tokens = ( + TokenTuple( + token.type.to_python_token(), + (token.start_line, token.start_col), + (token.end_line, token.end_col), + ) + for token in pytokens.tokenize( + source_string, issue_128233_handling=issue_128233_handling + ) + if token.type != pytokens.TokenType.whitespace + ) + + for builtin_token, our_token in zip(expected_tokens, our_tokens, strict=True): + mismatch = builtin_token != our_token + if mismatch or verbose: + print("EXPECTED", builtin_token) + print("---- GOT", our_token) + + if mismatch: + print("Filepath:", filepath) + print("\033[1;31mF\033[0m", end="", flush=True) + # raise AssertionError("Tokens do not match") + return + + print("\033[1;32m.\033[0m", end="", flush=True) + + +def find_all_python_files(directory: str) -> Iterable[str]: + """Recursively find all Python files in the given directory.""" + python_files = set() + for root, _, files in os.walk(directory, followlinks=False): + for file in files: + if file.endswith(".py"): + python_files.add(os.path.join(root, file)) + return python_files diff --git a/py311/lib/python3.11/site-packages/pytokens/py.typed b/py311/lib/python3.11/site-packages/pytokens/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..d3245e747a8bc085800a555ac3ea9f4ccdafbe4b --- /dev/null +++ b/py311/lib/python3.11/site-packages/pytokens/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. This package uses inline types. diff --git a/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/LICENSE b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e821b86ff1faa271a48092581e32a4e243b9fe37 --- /dev/null +++ b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012-2017 Michele Lacchia + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/METADATA b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..00765926c0dd3906f1e65015d4cd9d0b87a2b506 --- /dev/null +++ b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/METADATA @@ -0,0 +1,263 @@ +Metadata-Version: 2.1 +Name: radon +Version: 6.0.1 +Summary: Code Metrics in Python +Home-page: https://radon.readthedocs.org/ +Download-URL: https://pypi.python.org/radon/ +Author: Michele Lacchia +Author-email: michelelacchia@gmail.com +License: MIT +Project-URL: Source, https://github.com/rubik/radon +Keywords: static analysis code complexity metrics +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Utilities +License-File: LICENSE +Requires-Dist: mando (<0.8,>=0.6) +Requires-Dist: colorama (==0.4.1) ; python_version <= "3.4" +Requires-Dist: colorama (>=0.4.1) ; python_version > "3.4" +Provides-Extra: toml +Requires-Dist: tomli (>=2.0.1) ; extra == 'toml' + +Radon +===== + +.. image:: https://img.shields.io/codacy/grade/623b84f5f6e6708c486f371e10da3610.svg?style=for-the-badge + :alt: Codacy badge + :target: https://www.codacy.com/app/rubik/radon/dashboard + +.. image:: https://img.shields.io/travis/rubik/radon/master.svg?style=for-the-badge + :alt: Travis-CI badge + :target: https://travis-ci.org/rubik/radon + +.. image:: https://img.shields.io/coveralls/rubik/radon/master.svg?style=for-the-badge + :alt: Coveralls badge + :target: https://coveralls.io/r/rubik/radon?branch=master + +.. image:: https://img.shields.io/pypi/v/radon.svg?style=for-the-badge + :alt: PyPI latest version badge + :target: https://pypi.python.org/pypi/radon + +.. image:: https://img.shields.io/pypi/l/radon.svg?style=for-the-badge + :alt: Radon license + :target: https://pypi.python.org/pypi/radon + + +---- + +Radon is a Python tool that computes various metrics from the source code. +Radon can compute: + +* **McCabe's complexity**, i.e. cyclomatic complexity +* **raw** metrics (these include SLOC, comment lines, blank lines, &c.) +* **Halstead** metrics (all of them) +* **Maintainability Index** (the one used in Visual Studio) + +Requirements +------------ + +Radon will run from **Python 2.7** to **Python 3.8** (except Python versions +from 3.0 to 3.3) with a single code base and without the need of tools like +2to3 or six. It can also run on **PyPy** without any problems (currently PyPy +3.5 v7.3.1 is used in tests). + +Radon depends on as few packages as possible. Currently only `mando` is +strictly required (for the CLI interface). `colorama` is also listed as a +dependency but if Radon cannot import it, the output simply will not be +colored. + +**Note**: +**Python 2.6** was supported until version 1.5.0. Starting from version 2.0, it +is not supported anymore. + +Installation +------------ + +With Pip: + +.. code-block:: sh + + $ pip install radon + +If you want to configure Radon from `pyproject.toml` and you run Python <3.11, +you'll need the extra `toml` dependency: + +.. code-block:: sh + + $ pip install radon[toml] + +Or download the source and run the setup file: + +.. code-block:: sh + + $ python setup.py install + +Usage +----- + +Radon can be used either from the command line or programmatically. +Documentation is at https://radon.readthedocs.org/. + +Cyclomatic Complexity Example +----------------------------- + +Quick example: + +.. code-block:: sh + + $ radon cc sympy/solvers/solvers.py -a -nc + sympy/solvers/solvers.py + F 346:0 solve - F + F 1093:0 _solve - F + F 1434:0 _solve_system - F + F 2647:0 unrad - F + F 110:0 checksol - F + F 2238:0 _tsolve - F + F 2482:0 _invert - F + F 1862:0 solve_linear_system - E + F 1781:0 minsolve_linear_system - D + F 1636:0 solve_linear - D + F 2382:0 nsolve - C + + 11 blocks (classes, functions, methods) analyzed. + Average complexity: F (61.0) + +Explanation: + +* ``cc`` is the radon command to compute Cyclomatic Complexity +* ``-a`` tells radon to calculate the average complexity at the end. Note that + the average is computed among the *shown* blocks. If you want the total + average, among all the blocks, regardless of what is being shown, you should + use ``--total-average``. +* ``-nc`` tells radon to print only results with a complexity rank of C or + worse. Other examples: ``-na`` (from A to F), or ``-nd`` (from D to F). +* The letter *in front of* the line numbers represents the type of the block + (**F** means function, **M** method and **C** class). + +Actually it's even better: it's got colors! + +.. image:: https://cloud.githubusercontent.com/assets/238549/3707477/5793aeaa-1435-11e4-98fb-00e0bd8137f5.png + :alt: A screen of Radon's cc command + + +**Note about file encoding** + +On some systems, such as Windows, the default encoding is not UTF-8. If you are +using Unicode characters in your Python file and want to analyze it with Radon, +you'll have to set the `RADONFILESENCODING` environment variable to `UTF-8`. + + +On a Continuous Integration server +---------------------------------- + +If you are looking to use `radon` on a CI server you may be better off with +`xenon `_. Although still experimental, it will +fail (that means exiting with a non-zero exit code) when various thresholds are +surpassed. `radon` is more of a reporting tool, while `xenon` is a monitoring +one. + +If you are looking for more complete solutions, read the following sections. + +Codacy +++++++++++++ + +`Codacy `_ uses Radon `by default `_ to calculate metrics from the source code. + +Code Climate +++++++++++++ + +Radon is available as a `Code Climate Engine `_. +To understand how to add Radon's checks to your Code Climate Platform, head +over to their documentation: +https://docs.codeclimate.com/v1.0/docs/radon + +coala Analyzer +++++++++++++++ + +Radon is also supported in `coala `_. To add Radon's +checks to coala, simply add the ``RadonBear`` to one of the sections in +your ``.coafile``. + +CodeFactor +++++++++++++ + +`CodeFactor `_ uses Radon `out-of-the-box `_ to calculate Cyclomatic Complexity. + +Usage with Jupyter Notebooks +---------------------------- + +Radon can be used with ``.ipynb`` files to inspect code metrics for Python cells. Any ``%`` macros will be ignored in the metrics. + +.. note:: + + Jupyter Notebook support requires the optional ``nbformat`` package. To install, run ``pip install nbformat``. + +To enable scanning of Jupyter notebooks, add the ``--include-ipynb`` flag. + +To enable reporting of individual cells, add the ``--ipynb-cells`` flag. + +Quick example: + +.. code-block:: sh + + $ radon raw --include-ipynb --ipynb-cells . + example.ipynb + LOC: 63 + LLOC: 37 + SLOC: 37 + Comments: 3 + Single comments: 2 + Multi: 10 + Blank: 14 + - Comment Stats + (C % L): 5% + (C % S): 8% + (C + M % L): 21% + example.ipynb:[0] + LOC: 0 + LLOC: 0 + SLOC: 0 + Comments: 0 + Single comments: 0 + Multi: 0 + Blank: 0 + - Comment Stats + (C % L): 0% + (C % S): 0% + (C + M % L): 0% + example.ipynb:[1] + LOC: 2 + LLOC: 2 + SLOC: 2 + Comments: 0 + Single comments: 0 + Multi: 0 + Blank: 0 + - Comment Stats + (C % L): 0% + (C % S): 0% + (C + M % L): 0% + + + +Links +----- + +* Documentation: https://radon.readthedocs.org +* PyPI: http://pypi.python.org/pypi/radon +* Issue Tracker: https://github.com/rubik/radon/issues diff --git a/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/RECORD b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..26bd29b6900aa31430de4c59e8445042cb57b510 --- /dev/null +++ b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/RECORD @@ -0,0 +1,37 @@ +../../../bin/radon,sha256=OR1-_Gj36IzLtTK4TizvvyOLpQapqLr5AwHt8xIgxfo,319 +radon-6.0.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +radon-6.0.1.dist-info/LICENSE,sha256=AnDgq1j4-FOF9tv_7QLGRl-KMZWv7WBRSQGUQiM3Nu0,1064 +radon-6.0.1.dist-info/METADATA,sha256=7WR7fzZ8YKLX_Qio_zyqlaCmuJO6zBZNV0YzY2R56JA,8221 +radon-6.0.1.dist-info/RECORD,, +radon-6.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +radon-6.0.1.dist-info/WHEEL,sha256=bb2Ot9scclHKMOLDEHY6B2sicWOgugjFKaJsT7vwMQo,110 +radon-6.0.1.dist-info/entry_points.txt,sha256=gwSrn4bhwLBkNYxqishzC8vhJco9c76BADxEbgpRHg4,151 +radon-6.0.1.dist-info/top_level.txt,sha256=N9R-CC-5I1siF33ieWfaUWd1dRpxc0LX6lCLoX3zSy4,6 +radon/__init__.py,sha256=SnCrXz160RXpFMLcvipEWmRCz3hc5MTzQLfbb3FmnWc,415 +radon/__main__.py,sha256=b1o3IreG_O-hF1ctAdvt5Df37Dwyj42LsfoNhISnD0s,82 +radon/cli/__init__.py,sha256=sPqGf5-xwiXiarh43p24uzqZF8xQSgyEiwY4U2U3f6I,15930 +radon/cli/colors.py,sha256=Z_GP9T2GJyyOJYDB9zmD8ohMKk1qyuRfB4mrLVeLACk,1133 +radon/cli/harvest.py,sha256=Sr_HN0GYk6lf1bXwjDuG_CV7xDXuXCreTy4Cf-OviTQ,15326 +radon/cli/tools.py,sha256=NrKI-1NE3tMZgVokswWFY1hpeblMT2o_-4JKwKw-Xu4,21355 +radon/complexity.py,sha256=SKNLwZpXkIyZsjlcSfXAU5wmOR2IhCuWZJSyIQkcpog,3829 +radon/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +radon/contrib/flake8.py,sha256=OhQYY7gnGEDE2IRusAyefQEZmCuYL0rAK53u7mysOpM,2280 +radon/metrics.py,sha256=hy3GVM3IH-kmdjU7dR9PZdn36smYRfPEt3SXqrSAnGg,4913 +radon/raw.py,sha256=T6xHhDL3CL6lHK-3vaRCI8CnalBOWmCtiVNaGgZXYUQ,7824 +radon/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +radon/tests/conftest.py,sha256=mwIU5ApM06YVWm9HZNB9TQmv3C5wNjS2QKk8Z-YYUa4,681 +radon/tests/data/__init__.py,sha256=MDLIfJQQ-SmJCboLJtz4UtcWPi2khw9CQtyDJVjXgZs,59 +radon/tests/data/no_encoding.py,sha256=tLkl02dDn-zWuXBiI5yz0s_oYM56AnYL_Kt3XdHWsMo,28 +radon/tests/data/py3unicode.py,sha256=mo02AAkIBVkhIof3DlfNyL1MMgbcBM-jMHO9W3uksh4,59 +radon/tests/run.py,sha256=HM6vqifb-5wq_PAxTFPPxbmeL_seteIa6xTiFhvnMIA,535 +radon/tests/test_cli.py,sha256=Y8IkYh-NAPBT3rBwtUapN_zqaTJ0m68msrLrbQSEvhk,7413 +radon/tests/test_cli_colors.py,sha256=NAyqr4Q4Ihn8nZWYrCfsVfSkC3Q8vJYXuEn_VgSOne0,559 +radon/tests/test_cli_harvest.py,sha256=qKxoxUyYTLWXXckItgN7IamxoZmro1XqukucCJSXtwo,12002 +radon/tests/test_cli_tools.py,sha256=sPSbeVDceqxeUusnHN0lONy4GQB0g-xjckU9sUQDRCs,16161 +radon/tests/test_complexity_utils.py,sha256=FtBPT6abI9QDU3H5naRm9kB9190Hd74PeaLv8w5gW1M,3121 +radon/tests/test_complexity_visitor.py,sha256=ncKYWCzY5IgH7eIHNxvFLIx0PzFsIKPFbeYaun7SgDQ,14012 +radon/tests/test_halstead.py,sha256=b1NfLeHN1W5KpCpR-nKZ1MAWtilKqalTchj-A1-Va24,1637 +radon/tests/test_ipynb.py,sha256=ENChK4eImD8r0VPcVm0PJFw9w-OgzprPh3ZybWe6vuc,4554 +radon/tests/test_other_metrics.py,sha256=xb8SgqKPUqH2X0nbFj7TSZ26oHK9Rhvcm-TNSRo684M,3677 +radon/tests/test_raw.py,sha256=aoQTZv9Gfy_rB2uKxwmOcXkm1iXC1dvRzO1WPdURKUo,8286 +radon/visitors.py,sha256=MFk5sFikEAeHtHyjArca-oJ5CMFHHMtWgHDE7AAmreU,15240 diff --git a/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/WHEEL b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..9d8f872bbf2275e6d1785238e90b0321f4b6f323 --- /dev/null +++ b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/entry_points.txt b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..d47a7fbb48fd0d383aeb536f89d119169c381c26 --- /dev/null +++ b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/entry_points.txt @@ -0,0 +1,8 @@ +[console_scripts] +radon = radon:main + +[flake8.extension] +R70 = radon.contrib.flake8:Flake8Checker + +[setuptools.installation] +eggsecutable = radon:main diff --git a/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..d76231389f7cefb521cf7e6f0254e3d1647ab376 --- /dev/null +++ b/py311/lib/python3.11/site-packages/radon-6.0.1.dist-info/top_level.txt @@ -0,0 +1 @@ +radon diff --git a/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/METADATA b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..b31773e3ea2d0e0e16e77d33fefd8b8f8ff3fc85 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/METADATA @@ -0,0 +1,133 @@ +Metadata-Version: 2.4 +Name: requests +Version: 2.32.5 +Summary: Python HTTP for Humans. +Home-page: https://requests.readthedocs.io +Author: Kenneth Reitz +Author-email: me@kennethreitz.org +License: Apache-2.0 +Project-URL: Documentation, https://requests.readthedocs.io +Project-URL: Source, https://github.com/psf/requests +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: Software Development :: Libraries +Requires-Python: >=3.9 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: charset_normalizer<4,>=2 +Requires-Dist: idna<4,>=2.5 +Requires-Dist: urllib3<3,>=1.21.1 +Requires-Dist: certifi>=2017.4.17 +Provides-Extra: security +Provides-Extra: socks +Requires-Dist: PySocks!=1.5.7,>=1.5.6; extra == "socks" +Provides-Extra: use-chardet-on-py3 +Requires-Dist: chardet<6,>=3.0.2; extra == "use-chardet-on-py3" +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: license +Dynamic: license-file +Dynamic: project-url +Dynamic: provides-extra +Dynamic: requires-dist +Dynamic: requires-python +Dynamic: summary + +# Requests + +**Requests** is a simple, yet elegant, HTTP library. + +```python +>>> import requests +>>> r = requests.get('https://httpbin.org/basic-auth/user/pass', auth=('user', 'pass')) +>>> r.status_code +200 +>>> r.headers['content-type'] +'application/json; charset=utf8' +>>> r.encoding +'utf-8' +>>> r.text +'{"authenticated": true, ...' +>>> r.json() +{'authenticated': True, ...} +``` + +Requests allows you to send HTTP/1.1 requests extremely easily. There’s no need to manually add query strings to your URLs, or to form-encode your `PUT` & `POST` data — but nowadays, just use the `json` method! + +Requests is one of the most downloaded Python packages today, pulling in around `30M downloads / week`— according to GitHub, Requests is currently [depended upon](https://github.com/psf/requests/network/dependents?package_id=UGFja2FnZS01NzA4OTExNg%3D%3D) by `1,000,000+` repositories. You may certainly put your trust in this code. + +[![Downloads](https://static.pepy.tech/badge/requests/month)](https://pepy.tech/project/requests) +[![Supported Versions](https://img.shields.io/pypi/pyversions/requests.svg)](https://pypi.org/project/requests) +[![Contributors](https://img.shields.io/github/contributors/psf/requests.svg)](https://github.com/psf/requests/graphs/contributors) + +## Installing Requests and Supported Versions + +Requests is available on PyPI: + +```console +$ python -m pip install requests +``` + +Requests officially supports Python 3.9+. + +## Supported Features & Best–Practices + +Requests is ready for the demands of building robust and reliable HTTP–speaking applications, for the needs of today. + +- Keep-Alive & Connection Pooling +- International Domains and URLs +- Sessions with Cookie Persistence +- Browser-style TLS/SSL Verification +- Basic & Digest Authentication +- Familiar `dict`–like Cookies +- Automatic Content Decompression and Decoding +- Multi-part File Uploads +- SOCKS Proxy Support +- Connection Timeouts +- Streaming Downloads +- Automatic honoring of `.netrc` +- Chunked HTTP Requests + +## API Reference and User Guide available on [Read the Docs](https://requests.readthedocs.io) + +[![Read the Docs](https://raw.githubusercontent.com/psf/requests/main/ext/ss.png)](https://requests.readthedocs.io) + +## Cloning the repository + +When cloning the Requests repository, you may need to add the `-c +fetch.fsck.badTimezone=ignore` flag to avoid an error about a bad commit timestamp (see +[this issue](https://github.com/psf/requests/issues/2690) for more background): + +```shell +git clone -c fetch.fsck.badTimezone=ignore https://github.com/psf/requests.git +``` + +You can also apply this setting to your global Git config: + +```shell +git config --global fetch.fsck.badTimezone ignore +``` + +--- + +[![Kenneth Reitz](https://raw.githubusercontent.com/psf/requests/main/ext/kr.png)](https://kennethreitz.org) [![Python Software Foundation](https://raw.githubusercontent.com/psf/requests/main/ext/psf.png)](https://www.python.org/psf) diff --git a/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/RECORD b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..5fd79a6be91e1c2c5ff19760558afe0d4d8d8fb6 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/RECORD @@ -0,0 +1,25 @@ +requests-2.32.5.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +requests-2.32.5.dist-info/METADATA,sha256=ZbWgjagfSRVRPnYJZf8Ut1GPZbe7Pv4NqzZLvMTUDLA,4945 +requests-2.32.5.dist-info/RECORD,, +requests-2.32.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +requests-2.32.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +requests-2.32.5.dist-info/licenses/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 +requests-2.32.5.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9 +requests/__init__.py,sha256=4xaAERmPDIBPsa2PsjpU9r06yooK-2mZKHTZAhWRWts,5072 +requests/__version__.py,sha256=QKDceK8K_ujqwDDc3oYrR0odOBYgKVOQQ5vFap_G_cg,435 +requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495 +requests/adapters.py,sha256=8nX113gbb123aUtx2ETkAN_6IsYX-M2fRoLGluTEcRk,26285 +requests/api.py,sha256=_Zb9Oa7tzVIizTKwFrPjDEY9ejtm_OnSRERnADxGsQs,6449 +requests/auth.py,sha256=kF75tqnLctZ9Mf_hm9TZIj4cQWnN5uxRz8oWsx5wmR0,10186 +requests/certs.py,sha256=Z9Sb410Anv6jUFTyss0jFFhU6xst8ctELqfy8Ev23gw,429 +requests/compat.py,sha256=J7sIjR6XoDGp5JTVzOxkK5fSoUVUa_Pjc7iRZhAWGmI,2142 +requests/cookies.py,sha256=bNi-iqEj4NPZ00-ob-rHvzkvObzN3lEpgw3g6paS3Xw,18590 +requests/exceptions.py,sha256=jJPS1UWATs86ShVUaLorTiJb1SaGuoNEWgICJep-VkY,4260 +requests/help.py,sha256=gPX5d_H7Xd88aDABejhqGgl9B1VFRTt5BmiYvL3PzIQ,3875 +requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733 +requests/models.py,sha256=MjZdZ4k7tnw-1nz5PKShjmPmqyk0L6DciwnFngb_Vk4,35510 +requests/packages.py,sha256=_g0gZ681UyAlKHRjH6kanbaoxx2eAb6qzcXiODyTIoc,904 +requests/sessions.py,sha256=Cl1dpEnOfwrzzPbku-emepNeN4Rt_0_58Iy2x-JGTm8,30503 +requests/status_codes.py,sha256=iJUAeA25baTdw-6PfD0eF4qhpINDJRJI-yaMqxs4LEI,4322 +requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912 +requests/utils.py,sha256=WqU86rZ3wvhC-tQjWcjtH_HEKZwWB3iWCZV6SW5DEdQ,33213 diff --git a/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/WHEEL b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e7fa31b6f3f78deb1022c1f7927f07d4d16da822 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..f2293605cf1b01dca72aad0a15c45b72ed5429a2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests-2.32.5.dist-info/top_level.txt @@ -0,0 +1 @@ +requests diff --git a/py311/lib/python3.11/site-packages/requests/__init__.py b/py311/lib/python3.11/site-packages/requests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..051cda1340effaa0706b46dd68ac002ceda3d45c --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/__init__.py @@ -0,0 +1,184 @@ +# __ +# /__) _ _ _ _ _/ _ +# / ( (- (/ (/ (- _) / _) +# / + +""" +Requests HTTP Library +~~~~~~~~~~~~~~~~~~~~~ + +Requests is an HTTP library, written in Python, for human beings. +Basic GET usage: + + >>> import requests + >>> r = requests.get('https://www.python.org') + >>> r.status_code + 200 + >>> b'Python is a programming language' in r.content + True + +... or POST: + + >>> payload = dict(key1='value1', key2='value2') + >>> r = requests.post('https://httpbin.org/post', data=payload) + >>> print(r.text) + { + ... + "form": { + "key1": "value1", + "key2": "value2" + }, + ... + } + +The other HTTP methods are supported - see `requests.api`. Full documentation +is at . + +:copyright: (c) 2017 by Kenneth Reitz. +:license: Apache 2.0, see LICENSE for more details. +""" + +import warnings + +import urllib3 + +from .exceptions import RequestsDependencyWarning + +try: + from charset_normalizer import __version__ as charset_normalizer_version +except ImportError: + charset_normalizer_version = None + +try: + from chardet import __version__ as chardet_version +except ImportError: + chardet_version = None + + +def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): + urllib3_version = urllib3_version.split(".") + assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. + + # Sometimes, urllib3 only reports its version as 16.1. + if len(urllib3_version) == 2: + urllib3_version.append("0") + + # Check urllib3 for compatibility. + major, minor, patch = urllib3_version # noqa: F811 + major, minor, patch = int(major), int(minor), int(patch) + # urllib3 >= 1.21.1 + assert major >= 1 + if major == 1: + assert minor >= 21 + + # Check charset_normalizer for compatibility. + if chardet_version: + major, minor, patch = chardet_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # chardet_version >= 3.0.2, < 6.0.0 + assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0) + elif charset_normalizer_version: + major, minor, patch = charset_normalizer_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # charset_normalizer >= 2.0.0 < 4.0.0 + assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0) + else: + warnings.warn( + "Unable to find acceptable character detection dependency " + "(chardet or charset_normalizer).", + RequestsDependencyWarning, + ) + + +def _check_cryptography(cryptography_version): + # cryptography < 1.3.4 + try: + cryptography_version = list(map(int, cryptography_version.split("."))) + except ValueError: + return + + if cryptography_version < [1, 3, 4]: + warning = "Old version of cryptography ({}) may cause slowdown.".format( + cryptography_version + ) + warnings.warn(warning, RequestsDependencyWarning) + + +# Check imported dependencies for compatibility. +try: + check_compatibility( + urllib3.__version__, chardet_version, charset_normalizer_version + ) +except (AssertionError, ValueError): + warnings.warn( + "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " + "version!".format( + urllib3.__version__, chardet_version, charset_normalizer_version + ), + RequestsDependencyWarning, + ) + +# Attempt to enable urllib3's fallback for SNI support +# if the standard library doesn't support SNI or the +# 'ssl' library isn't available. +try: + try: + import ssl + except ImportError: + ssl = None + + if not getattr(ssl, "HAS_SNI", False): + from urllib3.contrib import pyopenssl + + pyopenssl.inject_into_urllib3() + + # Check cryptography version + from cryptography import __version__ as cryptography_version + + _check_cryptography(cryptography_version) +except ImportError: + pass + +# urllib3's DependencyWarnings should be silenced. +from urllib3.exceptions import DependencyWarning + +warnings.simplefilter("ignore", DependencyWarning) + +# Set default logging handler to avoid "No handler found" warnings. +import logging +from logging import NullHandler + +from . import packages, utils +from .__version__ import ( + __author__, + __author_email__, + __build__, + __cake__, + __copyright__, + __description__, + __license__, + __title__, + __url__, + __version__, +) +from .api import delete, get, head, options, patch, post, put, request +from .exceptions import ( + ConnectionError, + ConnectTimeout, + FileModeWarning, + HTTPError, + JSONDecodeError, + ReadTimeout, + RequestException, + Timeout, + TooManyRedirects, + URLRequired, +) +from .models import PreparedRequest, Request, Response +from .sessions import Session, session +from .status_codes import codes + +logging.getLogger(__name__).addHandler(NullHandler()) + +# FileModeWarnings go off per the default. +warnings.simplefilter("default", FileModeWarning, append=True) diff --git a/py311/lib/python3.11/site-packages/requests/__version__.py b/py311/lib/python3.11/site-packages/requests/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..effdd98cf159371a36cb3ffaa8ad3e31ee3885f2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/__version__.py @@ -0,0 +1,14 @@ +# .-. .-. .-. . . .-. .-. .-. .-. +# |( |- |.| | | |- `-. | `-. +# ' ' `-' `-`.`-' `-' `-' ' `-' + +__title__ = "requests" +__description__ = "Python HTTP for Humans." +__url__ = "https://requests.readthedocs.io" +__version__ = "2.32.5" +__build__ = 0x023205 +__author__ = "Kenneth Reitz" +__author_email__ = "me@kennethreitz.org" +__license__ = "Apache-2.0" +__copyright__ = "Copyright Kenneth Reitz" +__cake__ = "\u2728 \U0001f370 \u2728" diff --git a/py311/lib/python3.11/site-packages/requests/_internal_utils.py b/py311/lib/python3.11/site-packages/requests/_internal_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f2cf635e2937ee9b123a1498c5c5f723a6e20084 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/_internal_utils.py @@ -0,0 +1,50 @@ +""" +requests._internal_utils +~~~~~~~~~~~~~~ + +Provides utility functions that are consumed internally by Requests +which depend on extremely few external helpers (such as compat) +""" +import re + +from .compat import builtin_str + +_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$") +_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$") +_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$") +_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$") + +_HEADER_VALIDATORS_STR = (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR) +_HEADER_VALIDATORS_BYTE = (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE) +HEADER_VALIDATORS = { + bytes: _HEADER_VALIDATORS_BYTE, + str: _HEADER_VALIDATORS_STR, +} + + +def to_native_string(string, encoding="ascii"): + """Given a string object, regardless of type, returns a representation of + that string in the native string type, encoding and decoding where + necessary. This assumes ASCII unless told otherwise. + """ + if isinstance(string, builtin_str): + out = string + else: + out = string.decode(encoding) + + return out + + +def unicode_is_ascii(u_string): + """Determine if unicode string only contains ASCII characters. + + :param str u_string: unicode string to check. Must be unicode + and not Python 2 `str`. + :rtype: bool + """ + assert isinstance(u_string, str) + try: + u_string.encode("ascii") + return True + except UnicodeEncodeError: + return False diff --git a/py311/lib/python3.11/site-packages/requests/adapters.py b/py311/lib/python3.11/site-packages/requests/adapters.py new file mode 100644 index 0000000000000000000000000000000000000000..670c92767c5420a21538688132ce1f4f45e45873 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/adapters.py @@ -0,0 +1,696 @@ +""" +requests.adapters +~~~~~~~~~~~~~~~~~ + +This module contains the transport adapters that Requests uses to define +and maintain connections. +""" + +import os.path +import socket # noqa: F401 +import typing +import warnings + +from urllib3.exceptions import ClosedPoolError, ConnectTimeoutError +from urllib3.exceptions import HTTPError as _HTTPError +from urllib3.exceptions import InvalidHeader as _InvalidHeader +from urllib3.exceptions import ( + LocationValueError, + MaxRetryError, + NewConnectionError, + ProtocolError, +) +from urllib3.exceptions import ProxyError as _ProxyError +from urllib3.exceptions import ReadTimeoutError, ResponseError +from urllib3.exceptions import SSLError as _SSLError +from urllib3.poolmanager import PoolManager, proxy_from_url +from urllib3.util import Timeout as TimeoutSauce +from urllib3.util import parse_url +from urllib3.util.retry import Retry + +from .auth import _basic_auth_str +from .compat import basestring, urlparse +from .cookies import extract_cookies_to_jar +from .exceptions import ( + ConnectionError, + ConnectTimeout, + InvalidHeader, + InvalidProxyURL, + InvalidSchema, + InvalidURL, + ProxyError, + ReadTimeout, + RetryError, + SSLError, +) +from .models import Response +from .structures import CaseInsensitiveDict +from .utils import ( + DEFAULT_CA_BUNDLE_PATH, + extract_zipped_paths, + get_auth_from_url, + get_encoding_from_headers, + prepend_scheme_if_needed, + select_proxy, + urldefragauth, +) + +try: + from urllib3.contrib.socks import SOCKSProxyManager +except ImportError: + + def SOCKSProxyManager(*args, **kwargs): + raise InvalidSchema("Missing dependencies for SOCKS support.") + + +if typing.TYPE_CHECKING: + from .models import PreparedRequest + + +DEFAULT_POOLBLOCK = False +DEFAULT_POOLSIZE = 10 +DEFAULT_RETRIES = 0 +DEFAULT_POOL_TIMEOUT = None + + +def _urllib3_request_context( + request: "PreparedRequest", + verify: "bool | str | None", + client_cert: "typing.Tuple[str, str] | str | None", + poolmanager: "PoolManager", +) -> "(typing.Dict[str, typing.Any], typing.Dict[str, typing.Any])": + host_params = {} + pool_kwargs = {} + parsed_request_url = urlparse(request.url) + scheme = parsed_request_url.scheme.lower() + port = parsed_request_url.port + + cert_reqs = "CERT_REQUIRED" + if verify is False: + cert_reqs = "CERT_NONE" + elif isinstance(verify, str): + if not os.path.isdir(verify): + pool_kwargs["ca_certs"] = verify + else: + pool_kwargs["ca_cert_dir"] = verify + pool_kwargs["cert_reqs"] = cert_reqs + if client_cert is not None: + if isinstance(client_cert, tuple) and len(client_cert) == 2: + pool_kwargs["cert_file"] = client_cert[0] + pool_kwargs["key_file"] = client_cert[1] + else: + # According to our docs, we allow users to specify just the client + # cert path + pool_kwargs["cert_file"] = client_cert + host_params = { + "scheme": scheme, + "host": parsed_request_url.hostname, + "port": port, + } + return host_params, pool_kwargs + + +class BaseAdapter: + """The Base Transport Adapter""" + + def __init__(self): + super().__init__() + + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest ` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + """ + raise NotImplementedError + + def close(self): + """Cleans up adapter specific items.""" + raise NotImplementedError + + +class HTTPAdapter(BaseAdapter): + """The built-in HTTP Adapter for urllib3. + + Provides a general-case interface for Requests sessions to contact HTTP and + HTTPS urls by implementing the Transport Adapter interface. This class will + usually be created by the :class:`Session ` class under the + covers. + + :param pool_connections: The number of urllib3 connection pools to cache. + :param pool_maxsize: The maximum number of connections to save in the pool. + :param max_retries: The maximum number of retries each connection + should attempt. Note, this applies only to failed DNS lookups, socket + connections and connection timeouts, never to requests where data has + made it to the server. By default, Requests does not retry failed + connections. If you need granular control over the conditions under + which we retry a request, import urllib3's ``Retry`` class and pass + that instead. + :param pool_block: Whether the connection pool should block for connections. + + Usage:: + + >>> import requests + >>> s = requests.Session() + >>> a = requests.adapters.HTTPAdapter(max_retries=3) + >>> s.mount('http://', a) + """ + + __attrs__ = [ + "max_retries", + "config", + "_pool_connections", + "_pool_maxsize", + "_pool_block", + ] + + def __init__( + self, + pool_connections=DEFAULT_POOLSIZE, + pool_maxsize=DEFAULT_POOLSIZE, + max_retries=DEFAULT_RETRIES, + pool_block=DEFAULT_POOLBLOCK, + ): + if max_retries == DEFAULT_RETRIES: + self.max_retries = Retry(0, read=False) + else: + self.max_retries = Retry.from_int(max_retries) + self.config = {} + self.proxy_manager = {} + + super().__init__() + + self._pool_connections = pool_connections + self._pool_maxsize = pool_maxsize + self._pool_block = pool_block + + self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) + + def __getstate__(self): + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + # Can't handle by adding 'proxy_manager' to self.__attrs__ because + # self.poolmanager uses a lambda function, which isn't pickleable. + self.proxy_manager = {} + self.config = {} + + for attr, value in state.items(): + setattr(self, attr, value) + + self.init_poolmanager( + self._pool_connections, self._pool_maxsize, block=self._pool_block + ) + + def init_poolmanager( + self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs + ): + """Initializes a urllib3 PoolManager. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param connections: The number of urllib3 connection pools to cache. + :param maxsize: The maximum number of connections to save in the pool. + :param block: Block when no free connections are available. + :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. + """ + # save these values for pickling + self._pool_connections = connections + self._pool_maxsize = maxsize + self._pool_block = block + + self.poolmanager = PoolManager( + num_pools=connections, + maxsize=maxsize, + block=block, + **pool_kwargs, + ) + + def proxy_manager_for(self, proxy, **proxy_kwargs): + """Return urllib3 ProxyManager for the given proxy. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param proxy: The proxy to return a urllib3 ProxyManager for. + :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. + :returns: ProxyManager + :rtype: urllib3.ProxyManager + """ + if proxy in self.proxy_manager: + manager = self.proxy_manager[proxy] + elif proxy.lower().startswith("socks"): + username, password = get_auth_from_url(proxy) + manager = self.proxy_manager[proxy] = SOCKSProxyManager( + proxy, + username=username, + password=password, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs, + ) + else: + proxy_headers = self.proxy_headers(proxy) + manager = self.proxy_manager[proxy] = proxy_from_url( + proxy, + proxy_headers=proxy_headers, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs, + ) + + return manager + + def cert_verify(self, conn, url, verify, cert): + """Verify a SSL certificate. This method should not be called from user + code, and is only exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param conn: The urllib3 connection object associated with the cert. + :param url: The requested URL. + :param verify: Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: The SSL certificate to verify. + """ + if url.lower().startswith("https") and verify: + cert_loc = None + + # Allow self-specified cert location. + if verify is not True: + cert_loc = verify + + if not cert_loc: + cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) + + if not cert_loc or not os.path.exists(cert_loc): + raise OSError( + f"Could not find a suitable TLS CA certificate bundle, " + f"invalid path: {cert_loc}" + ) + + conn.cert_reqs = "CERT_REQUIRED" + + if not os.path.isdir(cert_loc): + conn.ca_certs = cert_loc + else: + conn.ca_cert_dir = cert_loc + else: + conn.cert_reqs = "CERT_NONE" + conn.ca_certs = None + conn.ca_cert_dir = None + + if cert: + if not isinstance(cert, basestring): + conn.cert_file = cert[0] + conn.key_file = cert[1] + else: + conn.cert_file = cert + conn.key_file = None + if conn.cert_file and not os.path.exists(conn.cert_file): + raise OSError( + f"Could not find the TLS certificate file, " + f"invalid path: {conn.cert_file}" + ) + if conn.key_file and not os.path.exists(conn.key_file): + raise OSError( + f"Could not find the TLS key file, invalid path: {conn.key_file}" + ) + + def build_response(self, req, resp): + """Builds a :class:`Response ` object from a urllib3 + response. This should not be called from user code, and is only exposed + for use when subclassing the + :class:`HTTPAdapter ` + + :param req: The :class:`PreparedRequest ` used to generate the response. + :param resp: The urllib3 response object. + :rtype: requests.Response + """ + response = Response() + + # Fallback to None if there's no status_code, for whatever reason. + response.status_code = getattr(resp, "status", None) + + # Make headers case-insensitive. + response.headers = CaseInsensitiveDict(getattr(resp, "headers", {})) + + # Set encoding. + response.encoding = get_encoding_from_headers(response.headers) + response.raw = resp + response.reason = response.raw.reason + + if isinstance(req.url, bytes): + response.url = req.url.decode("utf-8") + else: + response.url = req.url + + # Add new cookies from the server. + extract_cookies_to_jar(response.cookies, req, resp) + + # Give the Response some context. + response.request = req + response.connection = self + + return response + + def build_connection_pool_key_attributes(self, request, verify, cert=None): + """Build the PoolKey attributes used by urllib3 to return a connection. + + This looks at the PreparedRequest, the user-specified verify value, + and the value of the cert parameter to determine what PoolKey values + to use to select a connection from a given urllib3 Connection Pool. + + The SSL related pool key arguments are not consistently set. As of + this writing, use the following to determine what keys may be in that + dictionary: + + * If ``verify`` is ``True``, ``"ssl_context"`` will be set and will be the + default Requests SSL Context + * If ``verify`` is ``False``, ``"ssl_context"`` will not be set but + ``"cert_reqs"`` will be set + * If ``verify`` is a string, (i.e., it is a user-specified trust bundle) + ``"ca_certs"`` will be set if the string is not a directory recognized + by :py:func:`os.path.isdir`, otherwise ``"ca_cert_dir"`` will be + set. + * If ``"cert"`` is specified, ``"cert_file"`` will always be set. If + ``"cert"`` is a tuple with a second item, ``"key_file"`` will also + be present + + To override these settings, one may subclass this class, call this + method and use the above logic to change parameters as desired. For + example, if one wishes to use a custom :py:class:`ssl.SSLContext` one + must both set ``"ssl_context"`` and based on what else they require, + alter the other keys to ensure the desired behaviour. + + :param request: + The PreparedReqest being sent over the connection. + :type request: + :class:`~requests.models.PreparedRequest` + :param verify: + Either a boolean, in which case it controls whether + we verify the server's TLS certificate, or a string, in which case it + must be a path to a CA bundle to use. + :param cert: + (optional) Any user-provided SSL certificate for client + authentication (a.k.a., mTLS). This may be a string (i.e., just + the path to a file which holds both certificate and key) or a + tuple of length 2 with the certificate file path and key file + path. + :returns: + A tuple of two dictionaries. The first is the "host parameters" + portion of the Pool Key including scheme, hostname, and port. The + second is a dictionary of SSLContext related parameters. + """ + return _urllib3_request_context(request, verify, cert, self.poolmanager) + + def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): + """Returns a urllib3 connection for the given request and TLS settings. + This should not be called from user code, and is only exposed for use + when subclassing the :class:`HTTPAdapter `. + + :param request: + The :class:`PreparedRequest ` object to be sent + over the connection. + :param verify: + Either a boolean, in which case it controls whether we verify the + server's TLS certificate, or a string, in which case it must be a + path to a CA bundle to use. + :param proxies: + (optional) The proxies dictionary to apply to the request. + :param cert: + (optional) Any user-provided SSL certificate to be used for client + authentication (a.k.a., mTLS). + :rtype: + urllib3.ConnectionPool + """ + proxy = select_proxy(request.url, proxies) + try: + host_params, pool_kwargs = self.build_connection_pool_key_attributes( + request, + verify, + cert, + ) + except ValueError as e: + raise InvalidURL(e, request=request) + if proxy: + proxy = prepend_scheme_if_needed(proxy, "http") + proxy_url = parse_url(proxy) + if not proxy_url.host: + raise InvalidProxyURL( + "Please check proxy URL. It is malformed " + "and could be missing the host." + ) + proxy_manager = self.proxy_manager_for(proxy) + conn = proxy_manager.connection_from_host( + **host_params, pool_kwargs=pool_kwargs + ) + else: + # Only scheme should be lower case + conn = self.poolmanager.connection_from_host( + **host_params, pool_kwargs=pool_kwargs + ) + + return conn + + def get_connection(self, url, proxies=None): + """DEPRECATED: Users should move to `get_connection_with_tls_context` + for all subclasses of HTTPAdapter using Requests>=2.32.2. + + Returns a urllib3 connection for the given URL. This should not be + called from user code, and is only exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param url: The URL to connect to. + :param proxies: (optional) A Requests-style dictionary of proxies used on this request. + :rtype: urllib3.ConnectionPool + """ + warnings.warn( + ( + "`get_connection` has been deprecated in favor of " + "`get_connection_with_tls_context`. Custom HTTPAdapter subclasses " + "will need to migrate for Requests>=2.32.2. Please see " + "https://github.com/psf/requests/pull/6710 for more details." + ), + DeprecationWarning, + ) + proxy = select_proxy(url, proxies) + + if proxy: + proxy = prepend_scheme_if_needed(proxy, "http") + proxy_url = parse_url(proxy) + if not proxy_url.host: + raise InvalidProxyURL( + "Please check proxy URL. It is malformed " + "and could be missing the host." + ) + proxy_manager = self.proxy_manager_for(proxy) + conn = proxy_manager.connection_from_url(url) + else: + # Only scheme should be lower case + parsed = urlparse(url) + url = parsed.geturl() + conn = self.poolmanager.connection_from_url(url) + + return conn + + def close(self): + """Disposes of any internal state. + + Currently, this closes the PoolManager and any active ProxyManager, + which closes any pooled connections. + """ + self.poolmanager.clear() + for proxy in self.proxy_manager.values(): + proxy.clear() + + def request_url(self, request, proxies): + """Obtain the url to use when making the final request. + + If the message is being sent through a HTTP proxy, the full URL has to + be used. Otherwise, we should only use the path portion of the URL. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param request: The :class:`PreparedRequest ` being sent. + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. + :rtype: str + """ + proxy = select_proxy(request.url, proxies) + scheme = urlparse(request.url).scheme + + is_proxied_http_request = proxy and scheme != "https" + using_socks_proxy = False + if proxy: + proxy_scheme = urlparse(proxy).scheme.lower() + using_socks_proxy = proxy_scheme.startswith("socks") + + url = request.path_url + if url.startswith("//"): # Don't confuse urllib3 + url = f"/{url.lstrip('/')}" + + if is_proxied_http_request and not using_socks_proxy: + url = urldefragauth(request.url) + + return url + + def add_headers(self, request, **kwargs): + """Add any headers needed by the connection. As of v2.0 this does + nothing by default, but is left for overriding by users that subclass + the :class:`HTTPAdapter `. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param request: The :class:`PreparedRequest ` to add headers to. + :param kwargs: The keyword arguments from the call to send(). + """ + pass + + def proxy_headers(self, proxy): + """Returns a dictionary of the headers to add to any request sent + through a proxy. This works with urllib3 magic to ensure that they are + correctly sent to the proxy, rather than in a tunnelled request if + CONNECT is being used. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param proxy: The url of the proxy being used for this request. + :rtype: dict + """ + headers = {} + username, password = get_auth_from_url(proxy) + + if username: + headers["Proxy-Authorization"] = _basic_auth_str(username, password) + + return headers + + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest ` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple or urllib3 Timeout object + :param verify: (optional) Either a boolean, in which case it controls whether + we verify the server's TLS certificate, or a string, in which case it + must be a path to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + :rtype: requests.Response + """ + + try: + conn = self.get_connection_with_tls_context( + request, verify, proxies=proxies, cert=cert + ) + except LocationValueError as e: + raise InvalidURL(e, request=request) + + self.cert_verify(conn, request.url, verify, cert) + url = self.request_url(request, proxies) + self.add_headers( + request, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + ) + + chunked = not (request.body is None or "Content-Length" in request.headers) + + if isinstance(timeout, tuple): + try: + connect, read = timeout + timeout = TimeoutSauce(connect=connect, read=read) + except ValueError: + raise ValueError( + f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " + f"or a single float to set both timeouts to the same value." + ) + elif isinstance(timeout, TimeoutSauce): + pass + else: + timeout = TimeoutSauce(connect=timeout, read=timeout) + + try: + resp = conn.urlopen( + method=request.method, + url=url, + body=request.body, + headers=request.headers, + redirect=False, + assert_same_host=False, + preload_content=False, + decode_content=False, + retries=self.max_retries, + timeout=timeout, + chunked=chunked, + ) + + except (ProtocolError, OSError) as err: + raise ConnectionError(err, request=request) + + except MaxRetryError as e: + if isinstance(e.reason, ConnectTimeoutError): + # TODO: Remove this in 3.0.0: see #2811 + if not isinstance(e.reason, NewConnectionError): + raise ConnectTimeout(e, request=request) + + if isinstance(e.reason, ResponseError): + raise RetryError(e, request=request) + + if isinstance(e.reason, _ProxyError): + raise ProxyError(e, request=request) + + if isinstance(e.reason, _SSLError): + # This branch is for urllib3 v1.22 and later. + raise SSLError(e, request=request) + + raise ConnectionError(e, request=request) + + except ClosedPoolError as e: + raise ConnectionError(e, request=request) + + except _ProxyError as e: + raise ProxyError(e) + + except (_SSLError, _HTTPError) as e: + if isinstance(e, _SSLError): + # This branch is for urllib3 versions earlier than v1.22 + raise SSLError(e, request=request) + elif isinstance(e, ReadTimeoutError): + raise ReadTimeout(e, request=request) + elif isinstance(e, _InvalidHeader): + raise InvalidHeader(e, request=request) + else: + raise + + return self.build_response(request, resp) diff --git a/py311/lib/python3.11/site-packages/requests/api.py b/py311/lib/python3.11/site-packages/requests/api.py new file mode 100644 index 0000000000000000000000000000000000000000..5960744552e7f8eea815429e7bdad38b0cc2741d --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/api.py @@ -0,0 +1,157 @@ +""" +requests.api +~~~~~~~~~~~~ + +This module implements the Requests API. + +:copyright: (c) 2012 by Kenneth Reitz. +:license: Apache2, see LICENSE for more details. +""" + +from . import sessions + + +def request(method, url, **kwargs): + """Constructs and sends a :class:`Request `. + + :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. + :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. + ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` + or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content_type'`` is a string + defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers + to add for the file. + :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How many seconds to wait for the server to send data + before giving up, as a float, or a :ref:`(connect timeout, read + timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. + :param stream: (optional) if ``False``, the response content will be immediately downloaded. + :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. + :return: :class:`Response ` object + :rtype: requests.Response + + Usage:: + + >>> import requests + >>> req = requests.request('GET', 'https://httpbin.org/get') + >>> req + + """ + + # By using the 'with' statement we are sure the session is closed, thus we + # avoid leaving sockets open which can trigger a ResourceWarning in some + # cases, and look like a memory leak in others. + with sessions.Session() as session: + return session.request(method=method, url=url, **kwargs) + + +def get(url, params=None, **kwargs): + r"""Sends a GET request. + + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("get", url, params=params, **kwargs) + + +def options(url, **kwargs): + r"""Sends an OPTIONS request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("options", url, **kwargs) + + +def head(url, **kwargs): + r"""Sends a HEAD request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. If + `allow_redirects` is not provided, it will be set to `False` (as + opposed to the default :meth:`request` behavior). + :return: :class:`Response ` object + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", False) + return request("head", url, **kwargs) + + +def post(url, data=None, json=None, **kwargs): + r"""Sends a POST request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("post", url, data=data, json=json, **kwargs) + + +def put(url, data=None, **kwargs): + r"""Sends a PUT request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("put", url, data=data, **kwargs) + + +def patch(url, data=None, **kwargs): + r"""Sends a PATCH request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("patch", url, data=data, **kwargs) + + +def delete(url, **kwargs): + r"""Sends a DELETE request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("delete", url, **kwargs) diff --git a/py311/lib/python3.11/site-packages/requests/auth.py b/py311/lib/python3.11/site-packages/requests/auth.py new file mode 100644 index 0000000000000000000000000000000000000000..4a7ce6dc1460e0de8aa0c38ea9123faa69bd5110 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/auth.py @@ -0,0 +1,314 @@ +""" +requests.auth +~~~~~~~~~~~~~ + +This module contains the authentication handlers for Requests. +""" + +import hashlib +import os +import re +import threading +import time +import warnings +from base64 import b64encode + +from ._internal_utils import to_native_string +from .compat import basestring, str, urlparse +from .cookies import extract_cookies_to_jar +from .utils import parse_dict_header + +CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" +CONTENT_TYPE_MULTI_PART = "multipart/form-data" + + +def _basic_auth_str(username, password): + """Returns a Basic Auth string.""" + + # "I want us to put a big-ol' comment on top of it that + # says that this behaviour is dumb but we need to preserve + # it because people are relying on it." + # - Lukasa + # + # These are here solely to maintain backwards compatibility + # for things like ints. This will be removed in 3.0.0. + if not isinstance(username, basestring): + warnings.warn( + "Non-string usernames will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(username), + category=DeprecationWarning, + ) + username = str(username) + + if not isinstance(password, basestring): + warnings.warn( + "Non-string passwords will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(type(password)), + category=DeprecationWarning, + ) + password = str(password) + # -- End Removal -- + + if isinstance(username, str): + username = username.encode("latin1") + + if isinstance(password, str): + password = password.encode("latin1") + + authstr = "Basic " + to_native_string( + b64encode(b":".join((username, password))).strip() + ) + + return authstr + + +class AuthBase: + """Base class that all auth implementations derive from""" + + def __call__(self, r): + raise NotImplementedError("Auth hooks must be callable.") + + +class HTTPBasicAuth(AuthBase): + """Attaches HTTP Basic Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + + def __eq__(self, other): + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) + + def __ne__(self, other): + return not self == other + + def __call__(self, r): + r.headers["Authorization"] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPProxyAuth(HTTPBasicAuth): + """Attaches HTTP Proxy Authentication to a given Request object.""" + + def __call__(self, r): + r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPDigestAuth(AuthBase): + """Attaches HTTP Digest Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + # Keep state in per-thread local storage + self._thread_local = threading.local() + + def init_per_thread_state(self): + # Ensure state is initialized just once per-thread + if not hasattr(self._thread_local, "init"): + self._thread_local.init = True + self._thread_local.last_nonce = "" + self._thread_local.nonce_count = 0 + self._thread_local.chal = {} + self._thread_local.pos = None + self._thread_local.num_401_calls = None + + def build_digest_header(self, method, url): + """ + :rtype: str + """ + + realm = self._thread_local.chal["realm"] + nonce = self._thread_local.chal["nonce"] + qop = self._thread_local.chal.get("qop") + algorithm = self._thread_local.chal.get("algorithm") + opaque = self._thread_local.chal.get("opaque") + hash_utf8 = None + + if algorithm is None: + _algorithm = "MD5" + else: + _algorithm = algorithm.upper() + # lambdas assume digest modules are imported at the top level + if _algorithm == "MD5" or _algorithm == "MD5-SESS": + + def md5_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.md5(x).hexdigest() + + hash_utf8 = md5_utf8 + elif _algorithm == "SHA": + + def sha_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha1(x).hexdigest() + + hash_utf8 = sha_utf8 + elif _algorithm == "SHA-256": + + def sha256_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha256(x).hexdigest() + + hash_utf8 = sha256_utf8 + elif _algorithm == "SHA-512": + + def sha512_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha512(x).hexdigest() + + hash_utf8 = sha512_utf8 + + KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731 + + if hash_utf8 is None: + return None + + # XXX not implemented yet + entdig = None + p_parsed = urlparse(url) + #: path is request-uri defined in RFC 2616 which should not be empty + path = p_parsed.path or "/" + if p_parsed.query: + path += f"?{p_parsed.query}" + + A1 = f"{self.username}:{realm}:{self.password}" + A2 = f"{method}:{path}" + + HA1 = hash_utf8(A1) + HA2 = hash_utf8(A2) + + if nonce == self._thread_local.last_nonce: + self._thread_local.nonce_count += 1 + else: + self._thread_local.nonce_count = 1 + ncvalue = f"{self._thread_local.nonce_count:08x}" + s = str(self._thread_local.nonce_count).encode("utf-8") + s += nonce.encode("utf-8") + s += time.ctime().encode("utf-8") + s += os.urandom(8) + + cnonce = hashlib.sha1(s).hexdigest()[:16] + if _algorithm == "MD5-SESS": + HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}") + + if not qop: + respdig = KD(HA1, f"{nonce}:{HA2}") + elif qop == "auth" or "auth" in qop.split(","): + noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}" + respdig = KD(HA1, noncebit) + else: + # XXX handle auth-int. + return None + + self._thread_local.last_nonce = nonce + + # XXX should the partial digests be encoded too? + base = ( + f'username="{self.username}", realm="{realm}", nonce="{nonce}", ' + f'uri="{path}", response="{respdig}"' + ) + if opaque: + base += f', opaque="{opaque}"' + if algorithm: + base += f', algorithm="{algorithm}"' + if entdig: + base += f', digest="{entdig}"' + if qop: + base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"' + + return f"Digest {base}" + + def handle_redirect(self, r, **kwargs): + """Reset num_401_calls counter on redirects.""" + if r.is_redirect: + self._thread_local.num_401_calls = 1 + + def handle_401(self, r, **kwargs): + """ + Takes the given response and tries digest-auth, if needed. + + :rtype: requests.Response + """ + + # If response is not 4xx, do not auth + # See https://github.com/psf/requests/issues/3772 + if not 400 <= r.status_code < 500: + self._thread_local.num_401_calls = 1 + return r + + if self._thread_local.pos is not None: + # Rewind the file position indicator of the body to where + # it was to resend the request. + r.request.body.seek(self._thread_local.pos) + s_auth = r.headers.get("www-authenticate", "") + + if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2: + self._thread_local.num_401_calls += 1 + pat = re.compile(r"digest ", flags=re.IGNORECASE) + self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1)) + + # Consume content and release the original connection + # to allow our new request to reuse the same one. + r.content + r.close() + prep = r.request.copy() + extract_cookies_to_jar(prep._cookies, r.request, r.raw) + prep.prepare_cookies(prep._cookies) + + prep.headers["Authorization"] = self.build_digest_header( + prep.method, prep.url + ) + _r = r.connection.send(prep, **kwargs) + _r.history.append(r) + _r.request = prep + + return _r + + self._thread_local.num_401_calls = 1 + return r + + def __call__(self, r): + # Initialize per-thread state, if needed + self.init_per_thread_state() + # If we have a saved nonce, skip the 401 + if self._thread_local.last_nonce: + r.headers["Authorization"] = self.build_digest_header(r.method, r.url) + try: + self._thread_local.pos = r.body.tell() + except AttributeError: + # In the case of HTTPDigestAuth being reused and the body of + # the previous request was a file-like object, pos has the + # file position of the previous body. Ensure it's set to + # None. + self._thread_local.pos = None + r.register_hook("response", self.handle_401) + r.register_hook("response", self.handle_redirect) + self._thread_local.num_401_calls = 1 + + return r + + def __eq__(self, other): + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) + + def __ne__(self, other): + return not self == other diff --git a/py311/lib/python3.11/site-packages/requests/certs.py b/py311/lib/python3.11/site-packages/requests/certs.py new file mode 100644 index 0000000000000000000000000000000000000000..be422c3e91e43bacf60ff3302688df0b28742333 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/certs.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python + +""" +requests.certs +~~~~~~~~~~~~~~ + +This module returns the preferred default CA certificate bundle. There is +only one — the one from the certifi package. + +If you are packaging Requests, e.g., for a Linux distribution or a managed +environment, you can change the definition of where() to return a separately +packaged CA bundle. +""" +from certifi import where + +if __name__ == "__main__": + print(where()) diff --git a/py311/lib/python3.11/site-packages/requests/compat.py b/py311/lib/python3.11/site-packages/requests/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..7f9d754350c9fe28db41e328ea880b9e4b20cc8b --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/compat.py @@ -0,0 +1,106 @@ +""" +requests.compat +~~~~~~~~~~~~~~~ + +This module previously handled import compatibility issues +between Python 2 and Python 3. It remains for backwards +compatibility until the next major version. +""" + +import importlib +import sys + +# ------- +# urllib3 +# ------- +from urllib3 import __version__ as urllib3_version + +# Detect which major version of urllib3 is being used. +try: + is_urllib3_1 = int(urllib3_version.split(".")[0]) == 1 +except (TypeError, AttributeError): + # If we can't discern a version, prefer old functionality. + is_urllib3_1 = True + +# ------------------- +# Character Detection +# ------------------- + + +def _resolve_char_detection(): + """Find supported character detection libraries.""" + chardet = None + for lib in ("chardet", "charset_normalizer"): + if chardet is None: + try: + chardet = importlib.import_module(lib) + except ImportError: + pass + return chardet + + +chardet = _resolve_char_detection() + +# ------- +# Pythons +# ------- + +# Syntax sugar. +_ver = sys.version_info + +#: Python 2.x? +is_py2 = _ver[0] == 2 + +#: Python 3.x? +is_py3 = _ver[0] == 3 + +# json/simplejson module import resolution +has_simplejson = False +try: + import simplejson as json + + has_simplejson = True +except ImportError: + import json + +if has_simplejson: + from simplejson import JSONDecodeError +else: + from json import JSONDecodeError + +# Keep OrderedDict for backwards compatibility. +from collections import OrderedDict +from collections.abc import Callable, Mapping, MutableMapping +from http import cookiejar as cookielib +from http.cookies import Morsel +from io import StringIO + +# -------------- +# Legacy Imports +# -------------- +from urllib.parse import ( + quote, + quote_plus, + unquote, + unquote_plus, + urldefrag, + urlencode, + urljoin, + urlparse, + urlsplit, + urlunparse, +) +from urllib.request import ( + getproxies, + getproxies_environment, + parse_http_list, + proxy_bypass, + proxy_bypass_environment, +) + +builtin_str = str +str = str +bytes = bytes +basestring = (str, bytes) +numeric_types = (int, float) +integer_types = (int,) diff --git a/py311/lib/python3.11/site-packages/requests/cookies.py b/py311/lib/python3.11/site-packages/requests/cookies.py new file mode 100644 index 0000000000000000000000000000000000000000..f69d0cda9e1c893401015a09f2db2de5a5960fd2 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/cookies.py @@ -0,0 +1,561 @@ +""" +requests.cookies +~~~~~~~~~~~~~~~~ + +Compatibility code to be able to use `http.cookiejar.CookieJar` with requests. + +requests.utils imports from here, so be careful with imports. +""" + +import calendar +import copy +import time + +from ._internal_utils import to_native_string +from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse + +try: + import threading +except ImportError: + import dummy_threading as threading + + +class MockRequest: + """Wraps a `requests.Request` to mimic a `urllib2.Request`. + + The code in `http.cookiejar.CookieJar` expects this interface in order to correctly + manage cookie policies, i.e., determine whether a cookie can be set, given the + domains of the request and the cookie. + + The original request object is read-only. The client is responsible for collecting + the new headers via `get_new_headers()` and interpreting them appropriately. You + probably want `get_cookie_header`, defined below. + """ + + def __init__(self, request): + self._r = request + self._new_headers = {} + self.type = urlparse(self._r.url).scheme + + def get_type(self): + return self.type + + def get_host(self): + return urlparse(self._r.url).netloc + + def get_origin_req_host(self): + return self.get_host() + + def get_full_url(self): + # Only return the response's URL if the user hadn't set the Host + # header + if not self._r.headers.get("Host"): + return self._r.url + # If they did set it, retrieve it and reconstruct the expected domain + host = to_native_string(self._r.headers["Host"], encoding="utf-8") + parsed = urlparse(self._r.url) + # Reconstruct the URL as we expect it + return urlunparse( + [ + parsed.scheme, + host, + parsed.path, + parsed.params, + parsed.query, + parsed.fragment, + ] + ) + + def is_unverifiable(self): + return True + + def has_header(self, name): + return name in self._r.headers or name in self._new_headers + + def get_header(self, name, default=None): + return self._r.headers.get(name, self._new_headers.get(name, default)) + + def add_header(self, key, val): + """cookiejar has no legitimate use for this method; add it back if you find one.""" + raise NotImplementedError( + "Cookie headers should be added with add_unredirected_header()" + ) + + def add_unredirected_header(self, name, value): + self._new_headers[name] = value + + def get_new_headers(self): + return self._new_headers + + @property + def unverifiable(self): + return self.is_unverifiable() + + @property + def origin_req_host(self): + return self.get_origin_req_host() + + @property + def host(self): + return self.get_host() + + +class MockResponse: + """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. + + ...what? Basically, expose the parsed HTTP headers from the server response + the way `http.cookiejar` expects to see them. + """ + + def __init__(self, headers): + """Make a MockResponse for `cookiejar` to read. + + :param headers: a httplib.HTTPMessage or analogous carrying the headers + """ + self._headers = headers + + def info(self): + return self._headers + + def getheaders(self, name): + self._headers.getheaders(name) + + +def extract_cookies_to_jar(jar, request, response): + """Extract the cookies from the response into a CookieJar. + + :param jar: http.cookiejar.CookieJar (not necessarily a RequestsCookieJar) + :param request: our own requests.Request object + :param response: urllib3.HTTPResponse object + """ + if not (hasattr(response, "_original_response") and response._original_response): + return + # the _original_response field is the wrapped httplib.HTTPResponse object, + req = MockRequest(request) + # pull out the HTTPMessage with the headers and put it in the mock: + res = MockResponse(response._original_response.msg) + jar.extract_cookies(res, req) + + +def get_cookie_header(jar, request): + """ + Produce an appropriate Cookie header string to be sent with `request`, or None. + + :rtype: str + """ + r = MockRequest(request) + jar.add_cookie_header(r) + return r.get_new_headers().get("Cookie") + + +def remove_cookie_by_name(cookiejar, name, domain=None, path=None): + """Unsets a cookie by name, by default over all domains and paths. + + Wraps CookieJar.clear(), is O(n). + """ + clearables = [] + for cookie in cookiejar: + if cookie.name != name: + continue + if domain is not None and domain != cookie.domain: + continue + if path is not None and path != cookie.path: + continue + clearables.append((cookie.domain, cookie.path, cookie.name)) + + for domain, path, name in clearables: + cookiejar.clear(domain, path, name) + + +class CookieConflictError(RuntimeError): + """There are two cookies that meet the criteria specified in the cookie jar. + Use .get and .set and include domain and path args in order to be more specific. + """ + + +class RequestsCookieJar(cookielib.CookieJar, MutableMapping): + """Compatibility class; is a http.cookiejar.CookieJar, but exposes a dict + interface. + + This is the CookieJar we create by default for requests and sessions that + don't specify one, since some clients may expect response.cookies and + session.cookies to support dict operations. + + Requests does not use the dict interface internally; it's just for + compatibility with external client code. All requests code should work + out of the box with externally provided instances of ``CookieJar``, e.g. + ``LWPCookieJar`` and ``FileCookieJar``. + + Unlike a regular CookieJar, this class is pickleable. + + .. warning:: dictionary operations that are normally O(1) may be O(n). + """ + + def get(self, name, default=None, domain=None, path=None): + """Dict-like get() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + + .. warning:: operation is O(n), not O(1). + """ + try: + return self._find_no_duplicates(name, domain, path) + except KeyError: + return default + + def set(self, name, value, **kwargs): + """Dict-like set() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + """ + # support client code that unsets cookies by assignment of a None value: + if value is None: + remove_cookie_by_name( + self, name, domain=kwargs.get("domain"), path=kwargs.get("path") + ) + return + + if isinstance(value, Morsel): + c = morsel_to_cookie(value) + else: + c = create_cookie(name, value, **kwargs) + self.set_cookie(c) + return c + + def iterkeys(self): + """Dict-like iterkeys() that returns an iterator of names of cookies + from the jar. + + .. seealso:: itervalues() and iteritems(). + """ + for cookie in iter(self): + yield cookie.name + + def keys(self): + """Dict-like keys() that returns a list of names of cookies from the + jar. + + .. seealso:: values() and items(). + """ + return list(self.iterkeys()) + + def itervalues(self): + """Dict-like itervalues() that returns an iterator of values of cookies + from the jar. + + .. seealso:: iterkeys() and iteritems(). + """ + for cookie in iter(self): + yield cookie.value + + def values(self): + """Dict-like values() that returns a list of values of cookies from the + jar. + + .. seealso:: keys() and items(). + """ + return list(self.itervalues()) + + def iteritems(self): + """Dict-like iteritems() that returns an iterator of name-value tuples + from the jar. + + .. seealso:: iterkeys() and itervalues(). + """ + for cookie in iter(self): + yield cookie.name, cookie.value + + def items(self): + """Dict-like items() that returns a list of name-value tuples from the + jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a + vanilla python dict of key value pairs. + + .. seealso:: keys() and values(). + """ + return list(self.iteritems()) + + def list_domains(self): + """Utility method to list all the domains in the jar.""" + domains = [] + for cookie in iter(self): + if cookie.domain not in domains: + domains.append(cookie.domain) + return domains + + def list_paths(self): + """Utility method to list all the paths in the jar.""" + paths = [] + for cookie in iter(self): + if cookie.path not in paths: + paths.append(cookie.path) + return paths + + def multiple_domains(self): + """Returns True if there are multiple domains in the jar. + Returns False otherwise. + + :rtype: bool + """ + domains = [] + for cookie in iter(self): + if cookie.domain is not None and cookie.domain in domains: + return True + domains.append(cookie.domain) + return False # there is only one domain in jar + + def get_dict(self, domain=None, path=None): + """Takes as an argument an optional domain and path and returns a plain + old Python dict of name-value pairs of cookies that meet the + requirements. + + :rtype: dict + """ + dictionary = {} + for cookie in iter(self): + if (domain is None or cookie.domain == domain) and ( + path is None or cookie.path == path + ): + dictionary[cookie.name] = cookie.value + return dictionary + + def __contains__(self, name): + try: + return super().__contains__(name) + except CookieConflictError: + return True + + def __getitem__(self, name): + """Dict-like __getitem__() for compatibility with client code. Throws + exception if there are more than one cookie with name. In that case, + use the more explicit get() method instead. + + .. warning:: operation is O(n), not O(1). + """ + return self._find_no_duplicates(name) + + def __setitem__(self, name, value): + """Dict-like __setitem__ for compatibility with client code. Throws + exception if there is already a cookie of that name in the jar. In that + case, use the more explicit set() method instead. + """ + self.set(name, value) + + def __delitem__(self, name): + """Deletes a cookie given a name. Wraps ``http.cookiejar.CookieJar``'s + ``remove_cookie_by_name()``. + """ + remove_cookie_by_name(self, name) + + def set_cookie(self, cookie, *args, **kwargs): + if ( + hasattr(cookie.value, "startswith") + and cookie.value.startswith('"') + and cookie.value.endswith('"') + ): + cookie.value = cookie.value.replace('\\"', "") + return super().set_cookie(cookie, *args, **kwargs) + + def update(self, other): + """Updates this jar with cookies from another CookieJar or dict-like""" + if isinstance(other, cookielib.CookieJar): + for cookie in other: + self.set_cookie(copy.copy(cookie)) + else: + super().update(other) + + def _find(self, name, domain=None, path=None): + """Requests uses this method internally to get cookie values. + + If there are conflicting cookies, _find arbitrarily chooses one. + See _find_no_duplicates if you want an exception thrown if there are + conflicting cookies. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :return: cookie.value + """ + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + return cookie.value + + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + + def _find_no_duplicates(self, name, domain=None, path=None): + """Both ``__get_item__`` and ``get`` call this function: it's never + used elsewhere in Requests. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :raises KeyError: if cookie is not found + :raises CookieConflictError: if there are multiple cookies + that match name and optionally domain and path + :return: cookie.value + """ + toReturn = None + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + if toReturn is not None: + # if there are multiple cookies that meet passed in criteria + raise CookieConflictError( + f"There are multiple cookies with name, {name!r}" + ) + # we will eventually return this as long as no cookie conflict + toReturn = cookie.value + + if toReturn: + return toReturn + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + + def __getstate__(self): + """Unlike a normal CookieJar, this class is pickleable.""" + state = self.__dict__.copy() + # remove the unpickleable RLock object + state.pop("_cookies_lock") + return state + + def __setstate__(self, state): + """Unlike a normal CookieJar, this class is pickleable.""" + self.__dict__.update(state) + if "_cookies_lock" not in self.__dict__: + self._cookies_lock = threading.RLock() + + def copy(self): + """Return a copy of this RequestsCookieJar.""" + new_cj = RequestsCookieJar() + new_cj.set_policy(self.get_policy()) + new_cj.update(self) + return new_cj + + def get_policy(self): + """Return the CookiePolicy instance used.""" + return self._policy + + +def _copy_cookie_jar(jar): + if jar is None: + return None + + if hasattr(jar, "copy"): + # We're dealing with an instance of RequestsCookieJar + return jar.copy() + # We're dealing with a generic CookieJar instance + new_jar = copy.copy(jar) + new_jar.clear() + for cookie in jar: + new_jar.set_cookie(copy.copy(cookie)) + return new_jar + + +def create_cookie(name, value, **kwargs): + """Make a cookie from underspecified parameters. + + By default, the pair of `name` and `value` will be set for the domain '' + and sent on every request (this is sometimes called a "supercookie"). + """ + result = { + "version": 0, + "name": name, + "value": value, + "port": None, + "domain": "", + "path": "/", + "secure": False, + "expires": None, + "discard": True, + "comment": None, + "comment_url": None, + "rest": {"HttpOnly": None}, + "rfc2109": False, + } + + badargs = set(kwargs) - set(result) + if badargs: + raise TypeError( + f"create_cookie() got unexpected keyword arguments: {list(badargs)}" + ) + + result.update(kwargs) + result["port_specified"] = bool(result["port"]) + result["domain_specified"] = bool(result["domain"]) + result["domain_initial_dot"] = result["domain"].startswith(".") + result["path_specified"] = bool(result["path"]) + + return cookielib.Cookie(**result) + + +def morsel_to_cookie(morsel): + """Convert a Morsel object into a Cookie containing the one k/v pair.""" + + expires = None + if morsel["max-age"]: + try: + expires = int(time.time() + int(morsel["max-age"])) + except ValueError: + raise TypeError(f"max-age: {morsel['max-age']} must be integer") + elif morsel["expires"]: + time_template = "%a, %d-%b-%Y %H:%M:%S GMT" + expires = calendar.timegm(time.strptime(morsel["expires"], time_template)) + return create_cookie( + comment=morsel["comment"], + comment_url=bool(morsel["comment"]), + discard=False, + domain=morsel["domain"], + expires=expires, + name=morsel.key, + path=morsel["path"], + port=None, + rest={"HttpOnly": morsel["httponly"]}, + rfc2109=False, + secure=bool(morsel["secure"]), + value=morsel.value, + version=morsel["version"] or 0, + ) + + +def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): + """Returns a CookieJar from a key/value dictionary. + + :param cookie_dict: Dict of key/values to insert into CookieJar. + :param cookiejar: (optional) A cookiejar to add the cookies to. + :param overwrite: (optional) If False, will not replace cookies + already in the jar with new ones. + :rtype: CookieJar + """ + if cookiejar is None: + cookiejar = RequestsCookieJar() + + if cookie_dict is not None: + names_from_jar = [cookie.name for cookie in cookiejar] + for name in cookie_dict: + if overwrite or (name not in names_from_jar): + cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) + + return cookiejar + + +def merge_cookies(cookiejar, cookies): + """Add cookies to cookiejar and returns a merged CookieJar. + + :param cookiejar: CookieJar object to add the cookies to. + :param cookies: Dictionary or CookieJar object to be added. + :rtype: CookieJar + """ + if not isinstance(cookiejar, cookielib.CookieJar): + raise ValueError("You can only merge into CookieJar") + + if isinstance(cookies, dict): + cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False) + elif isinstance(cookies, cookielib.CookieJar): + try: + cookiejar.update(cookies) + except AttributeError: + for cookie_in_jar in cookies: + cookiejar.set_cookie(cookie_in_jar) + + return cookiejar diff --git a/py311/lib/python3.11/site-packages/requests/exceptions.py b/py311/lib/python3.11/site-packages/requests/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..83986b489849131efeb7f286b328961205256fd8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/exceptions.py @@ -0,0 +1,151 @@ +""" +requests.exceptions +~~~~~~~~~~~~~~~~~~~ + +This module contains the set of Requests' exceptions. +""" +from urllib3.exceptions import HTTPError as BaseHTTPError + +from .compat import JSONDecodeError as CompatJSONDecodeError + + +class RequestException(IOError): + """There was an ambiguous exception that occurred while handling your + request. + """ + + def __init__(self, *args, **kwargs): + """Initialize RequestException with `request` and `response` objects.""" + response = kwargs.pop("response", None) + self.response = response + self.request = kwargs.pop("request", None) + if response is not None and not self.request and hasattr(response, "request"): + self.request = self.response.request + super().__init__(*args, **kwargs) + + +class InvalidJSONError(RequestException): + """A JSON error occurred.""" + + +class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError): + """Couldn't decode the text into json""" + + def __init__(self, *args, **kwargs): + """ + Construct the JSONDecodeError instance first with all + args. Then use it's args to construct the IOError so that + the json specific args aren't used as IOError specific args + and the error message from JSONDecodeError is preserved. + """ + CompatJSONDecodeError.__init__(self, *args) + InvalidJSONError.__init__(self, *self.args, **kwargs) + + def __reduce__(self): + """ + The __reduce__ method called when pickling the object must + be the one from the JSONDecodeError (be it json/simplejson) + as it expects all the arguments for instantiation, not just + one like the IOError, and the MRO would by default call the + __reduce__ method from the IOError due to the inheritance order. + """ + return CompatJSONDecodeError.__reduce__(self) + + +class HTTPError(RequestException): + """An HTTP error occurred.""" + + +class ConnectionError(RequestException): + """A Connection error occurred.""" + + +class ProxyError(ConnectionError): + """A proxy error occurred.""" + + +class SSLError(ConnectionError): + """An SSL error occurred.""" + + +class Timeout(RequestException): + """The request timed out. + + Catching this error will catch both + :exc:`~requests.exceptions.ConnectTimeout` and + :exc:`~requests.exceptions.ReadTimeout` errors. + """ + + +class ConnectTimeout(ConnectionError, Timeout): + """The request timed out while trying to connect to the remote server. + + Requests that produced this error are safe to retry. + """ + + +class ReadTimeout(Timeout): + """The server did not send any data in the allotted amount of time.""" + + +class URLRequired(RequestException): + """A valid URL is required to make a request.""" + + +class TooManyRedirects(RequestException): + """Too many redirects.""" + + +class MissingSchema(RequestException, ValueError): + """The URL scheme (e.g. http or https) is missing.""" + + +class InvalidSchema(RequestException, ValueError): + """The URL scheme provided is either invalid or unsupported.""" + + +class InvalidURL(RequestException, ValueError): + """The URL provided was somehow invalid.""" + + +class InvalidHeader(RequestException, ValueError): + """The header value provided was somehow invalid.""" + + +class InvalidProxyURL(InvalidURL): + """The proxy URL provided is invalid.""" + + +class ChunkedEncodingError(RequestException): + """The server declared chunked encoding but sent an invalid chunk.""" + + +class ContentDecodingError(RequestException, BaseHTTPError): + """Failed to decode response content.""" + + +class StreamConsumedError(RequestException, TypeError): + """The content for this response was already consumed.""" + + +class RetryError(RequestException): + """Custom retries logic failed""" + + +class UnrewindableBodyError(RequestException): + """Requests encountered an error when trying to rewind a body.""" + + +# Warnings + + +class RequestsWarning(Warning): + """Base warning for Requests.""" + + +class FileModeWarning(RequestsWarning, DeprecationWarning): + """A file was opened in text mode, but Requests determined its binary length.""" + + +class RequestsDependencyWarning(RequestsWarning): + """An imported dependency doesn't match the expected version range.""" diff --git a/py311/lib/python3.11/site-packages/requests/help.py b/py311/lib/python3.11/site-packages/requests/help.py new file mode 100644 index 0000000000000000000000000000000000000000..8fbcd6560a8fe2c8a07e3bd1441a81e0db9cb689 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/help.py @@ -0,0 +1,134 @@ +"""Module containing bug report helper(s).""" + +import json +import platform +import ssl +import sys + +import idna +import urllib3 + +from . import __version__ as requests_version + +try: + import charset_normalizer +except ImportError: + charset_normalizer = None + +try: + import chardet +except ImportError: + chardet = None + +try: + from urllib3.contrib import pyopenssl +except ImportError: + pyopenssl = None + OpenSSL = None + cryptography = None +else: + import cryptography + import OpenSSL + + +def _implementation(): + """Return a dict with the Python implementation and version. + + Provide both the name and the version of the Python implementation + currently running. For example, on CPython 3.10.3 it will return + {'name': 'CPython', 'version': '3.10.3'}. + + This function works best on CPython and PyPy: in particular, it probably + doesn't work for Jython or IronPython. Future investigation should be done + to work out the correct shape of the code for those platforms. + """ + implementation = platform.python_implementation() + + if implementation == "CPython": + implementation_version = platform.python_version() + elif implementation == "PyPy": + implementation_version = "{}.{}.{}".format( + sys.pypy_version_info.major, + sys.pypy_version_info.minor, + sys.pypy_version_info.micro, + ) + if sys.pypy_version_info.releaselevel != "final": + implementation_version = "".join( + [implementation_version, sys.pypy_version_info.releaselevel] + ) + elif implementation == "Jython": + implementation_version = platform.python_version() # Complete Guess + elif implementation == "IronPython": + implementation_version = platform.python_version() # Complete Guess + else: + implementation_version = "Unknown" + + return {"name": implementation, "version": implementation_version} + + +def info(): + """Generate information for a bug report.""" + try: + platform_info = { + "system": platform.system(), + "release": platform.release(), + } + except OSError: + platform_info = { + "system": "Unknown", + "release": "Unknown", + } + + implementation_info = _implementation() + urllib3_info = {"version": urllib3.__version__} + charset_normalizer_info = {"version": None} + chardet_info = {"version": None} + if charset_normalizer: + charset_normalizer_info = {"version": charset_normalizer.__version__} + if chardet: + chardet_info = {"version": chardet.__version__} + + pyopenssl_info = { + "version": None, + "openssl_version": "", + } + if OpenSSL: + pyopenssl_info = { + "version": OpenSSL.__version__, + "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}", + } + cryptography_info = { + "version": getattr(cryptography, "__version__", ""), + } + idna_info = { + "version": getattr(idna, "__version__", ""), + } + + system_ssl = ssl.OPENSSL_VERSION_NUMBER + system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""} + + return { + "platform": platform_info, + "implementation": implementation_info, + "system_ssl": system_ssl_info, + "using_pyopenssl": pyopenssl is not None, + "using_charset_normalizer": chardet is None, + "pyOpenSSL": pyopenssl_info, + "urllib3": urllib3_info, + "chardet": chardet_info, + "charset_normalizer": charset_normalizer_info, + "cryptography": cryptography_info, + "idna": idna_info, + "requests": { + "version": requests_version, + }, + } + + +def main(): + """Pretty-print the bug information as JSON.""" + print(json.dumps(info(), sort_keys=True, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/py311/lib/python3.11/site-packages/requests/hooks.py b/py311/lib/python3.11/site-packages/requests/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..d181ba2ec2e55d274897315887b78fbdca757da8 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/hooks.py @@ -0,0 +1,33 @@ +""" +requests.hooks +~~~~~~~~~~~~~~ + +This module provides the capabilities for the Requests hooks system. + +Available hooks: + +``response``: + The response generated from a Request. +""" +HOOKS = ["response"] + + +def default_hooks(): + return {event: [] for event in HOOKS} + + +# TODO: response is the only one + + +def dispatch_hook(key, hooks, hook_data, **kwargs): + """Dispatches a hook dictionary on a given piece of data.""" + hooks = hooks or {} + hooks = hooks.get(key) + if hooks: + if hasattr(hooks, "__call__"): + hooks = [hooks] + for hook in hooks: + _hook_data = hook(hook_data, **kwargs) + if _hook_data is not None: + hook_data = _hook_data + return hook_data diff --git a/py311/lib/python3.11/site-packages/requests/models.py b/py311/lib/python3.11/site-packages/requests/models.py new file mode 100644 index 0000000000000000000000000000000000000000..c4b25fa0790da44683dab185c89ac08b69fb7419 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/models.py @@ -0,0 +1,1039 @@ +""" +requests.models +~~~~~~~~~~~~~~~ + +This module contains the primary objects that power Requests. +""" + +import datetime + +# Import encoding now, to avoid implicit import later. +# Implicit import within threads may cause LookupError when standard library is in a ZIP, +# such as in Embedded Python. See https://github.com/psf/requests/issues/3578. +import encodings.idna # noqa: F401 +from io import UnsupportedOperation + +from urllib3.exceptions import ( + DecodeError, + LocationParseError, + ProtocolError, + ReadTimeoutError, + SSLError, +) +from urllib3.fields import RequestField +from urllib3.filepost import encode_multipart_formdata +from urllib3.util import parse_url + +from ._internal_utils import to_native_string, unicode_is_ascii +from .auth import HTTPBasicAuth +from .compat import ( + Callable, + JSONDecodeError, + Mapping, + basestring, + builtin_str, + chardet, + cookielib, +) +from .compat import json as complexjson +from .compat import urlencode, urlsplit, urlunparse +from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header +from .exceptions import ( + ChunkedEncodingError, + ConnectionError, + ContentDecodingError, + HTTPError, + InvalidJSONError, + InvalidURL, +) +from .exceptions import JSONDecodeError as RequestsJSONDecodeError +from .exceptions import MissingSchema +from .exceptions import SSLError as RequestsSSLError +from .exceptions import StreamConsumedError +from .hooks import default_hooks +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( + check_header_validity, + get_auth_from_url, + guess_filename, + guess_json_utf, + iter_slices, + parse_header_links, + requote_uri, + stream_decode_response_unicode, + super_len, + to_key_val_list, +) + +#: The set of HTTP status codes that indicate an automatically +#: processable redirect. +REDIRECT_STATI = ( + codes.moved, # 301 + codes.found, # 302 + codes.other, # 303 + codes.temporary_redirect, # 307 + codes.permanent_redirect, # 308 +) + +DEFAULT_REDIRECT_LIMIT = 30 +CONTENT_CHUNK_SIZE = 10 * 1024 +ITER_CHUNK_SIZE = 512 + + +class RequestEncodingMixin: + @property + def path_url(self): + """Build the path URL to use.""" + + url = [] + + p = urlsplit(self.url) + + path = p.path + if not path: + path = "/" + + url.append(path) + + query = p.query + if query: + url.append("?") + url.append(query) + + return "".join(url) + + @staticmethod + def _encode_params(data): + """Encode parameters in a piece of data. + + Will successfully encode parameters when passed as a dict or a list of + 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary + if parameters are supplied as a dict. + """ + + if isinstance(data, (str, bytes)): + return data + elif hasattr(data, "read"): + return data + elif hasattr(data, "__iter__"): + result = [] + for k, vs in to_key_val_list(data): + if isinstance(vs, basestring) or not hasattr(vs, "__iter__"): + vs = [vs] + for v in vs: + if v is not None: + result.append( + ( + k.encode("utf-8") if isinstance(k, str) else k, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) + return urlencode(result, doseq=True) + else: + return data + + @staticmethod + def _encode_files(files, data): + """Build the body for a multipart/form-data request. + + Will successfully encode files when passed as a dict or a list of + tuples. Order is retained if data is a list of tuples but arbitrary + if parameters are supplied as a dict. + The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) + or 4-tuples (filename, fileobj, contentype, custom_headers). + """ + if not files: + raise ValueError("Files must be provided.") + elif isinstance(data, basestring): + raise ValueError("Data must not be a string.") + + new_fields = [] + fields = to_key_val_list(data or {}) + files = to_key_val_list(files or {}) + + for field, val in fields: + if isinstance(val, basestring) or not hasattr(val, "__iter__"): + val = [val] + for v in val: + if v is not None: + # Don't call str() on bytestrings: in Py3 it all goes wrong. + if not isinstance(v, bytes): + v = str(v) + + new_fields.append( + ( + field.decode("utf-8") + if isinstance(field, bytes) + else field, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) + + for k, v in files: + # support for explicit filename + ft = None + fh = None + if isinstance(v, (tuple, list)): + if len(v) == 2: + fn, fp = v + elif len(v) == 3: + fn, fp, ft = v + else: + fn, fp, ft, fh = v + else: + fn = guess_filename(v) or k + fp = v + + if isinstance(fp, (str, bytes, bytearray)): + fdata = fp + elif hasattr(fp, "read"): + fdata = fp.read() + elif fp is None: + continue + else: + fdata = fp + + rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) + rf.make_multipart(content_type=ft) + new_fields.append(rf) + + body, content_type = encode_multipart_formdata(new_fields) + + return body, content_type + + +class RequestHooksMixin: + def register_hook(self, event, hook): + """Properly register a hook.""" + + if event not in self.hooks: + raise ValueError(f'Unsupported event specified, with event name "{event}"') + + if isinstance(hook, Callable): + self.hooks[event].append(hook) + elif hasattr(hook, "__iter__"): + self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) + + def deregister_hook(self, event, hook): + """Deregister a previously registered hook. + Returns True if the hook existed, False if not. + """ + + try: + self.hooks[event].remove(hook) + return True + except ValueError: + return False + + +class Request(RequestHooksMixin): + """A user-created :class:`Request ` object. + + Used to prepare a :class:`PreparedRequest `, which is sent to the server. + + :param method: HTTP method to use. + :param url: URL to send. + :param headers: dictionary of headers to send. + :param files: dictionary of {filename: fileobject} files to multipart upload. + :param data: the body to attach to the request. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param json: json for the body to attach to the request (if files or data is not specified). + :param params: URL parameters to append to the URL. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param auth: Auth handler or (user, pass) tuple. + :param cookies: dictionary or CookieJar of cookies to attach to this request. + :param hooks: dictionary of callback hooks, for internal usage. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> req.prepare() + + """ + + def __init__( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): + # Default empty dicts for dict params. + data = [] if data is None else data + files = [] if files is None else files + headers = {} if headers is None else headers + params = {} if params is None else params + hooks = {} if hooks is None else hooks + + self.hooks = default_hooks() + for k, v in list(hooks.items()): + self.register_hook(event=k, hook=v) + + self.method = method + self.url = url + self.headers = headers + self.files = files + self.data = data + self.json = json + self.params = params + self.auth = auth + self.cookies = cookies + + def __repr__(self): + return f"" + + def prepare(self): + """Constructs a :class:`PreparedRequest ` for transmission and returns it.""" + p = PreparedRequest() + p.prepare( + method=self.method, + url=self.url, + headers=self.headers, + files=self.files, + data=self.data, + json=self.json, + params=self.params, + auth=self.auth, + cookies=self.cookies, + hooks=self.hooks, + ) + return p + + +class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): + """The fully mutable :class:`PreparedRequest ` object, + containing the exact bytes that will be sent to the server. + + Instances are generated from a :class:`Request ` object, and + should not be instantiated manually; doing so may produce undesirable + effects. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> r = req.prepare() + >>> r + + + >>> s = requests.Session() + >>> s.send(r) + + """ + + def __init__(self): + #: HTTP verb to send to the server. + self.method = None + #: HTTP URL to send the request to. + self.url = None + #: dictionary of HTTP headers. + self.headers = None + # The `CookieJar` used to create the Cookie header will be stored here + # after prepare_cookies is called + self._cookies = None + #: request body to send to the server. + self.body = None + #: dictionary of callback hooks, for internal usage. + self.hooks = default_hooks() + #: integer denoting starting position of a readable file-like body. + self._body_position = None + + def prepare( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): + """Prepares the entire request with the given parameters.""" + + self.prepare_method(method) + self.prepare_url(url, params) + self.prepare_headers(headers) + self.prepare_cookies(cookies) + self.prepare_body(data, files, json) + self.prepare_auth(auth, url) + + # Note that prepare_auth must be last to enable authentication schemes + # such as OAuth to work on a fully prepared request. + + # This MUST go after prepare_auth. Authenticators could add a hook + self.prepare_hooks(hooks) + + def __repr__(self): + return f"" + + def copy(self): + p = PreparedRequest() + p.method = self.method + p.url = self.url + p.headers = self.headers.copy() if self.headers is not None else None + p._cookies = _copy_cookie_jar(self._cookies) + p.body = self.body + p.hooks = self.hooks + p._body_position = self._body_position + return p + + def prepare_method(self, method): + """Prepares the given HTTP method.""" + self.method = method + if self.method is not None: + self.method = to_native_string(self.method.upper()) + + @staticmethod + def _get_idna_encoded_host(host): + import idna + + try: + host = idna.encode(host, uts46=True).decode("utf-8") + except idna.IDNAError: + raise UnicodeError + return host + + def prepare_url(self, url, params): + """Prepares the given HTTP URL.""" + #: Accept objects that have string representations. + #: We're unable to blindly call unicode/str functions + #: as this will include the bytestring indicator (b'') + #: on python 3.x. + #: https://github.com/psf/requests/pull/2238 + if isinstance(url, bytes): + url = url.decode("utf8") + else: + url = str(url) + + # Remove leading whitespaces from url + url = url.lstrip() + + # Don't do any URL preparation for non-HTTP schemes like `mailto`, + # `data` etc to work around exceptions from `url_parse`, which + # handles RFC 3986 only. + if ":" in url and not url.lower().startswith("http"): + self.url = url + return + + # Support for unicode domain names and paths. + try: + scheme, auth, host, port, path, query, fragment = parse_url(url) + except LocationParseError as e: + raise InvalidURL(*e.args) + + if not scheme: + raise MissingSchema( + f"Invalid URL {url!r}: No scheme supplied. " + f"Perhaps you meant https://{url}?" + ) + + if not host: + raise InvalidURL(f"Invalid URL {url!r}: No host supplied") + + # In general, we want to try IDNA encoding the hostname if the string contains + # non-ASCII characters. This allows users to automatically get the correct IDNA + # behaviour. For strings containing only ASCII characters, we need to also verify + # it doesn't start with a wildcard (*), before allowing the unencoded hostname. + if not unicode_is_ascii(host): + try: + host = self._get_idna_encoded_host(host) + except UnicodeError: + raise InvalidURL("URL has an invalid label.") + elif host.startswith(("*", ".")): + raise InvalidURL("URL has an invalid label.") + + # Carefully reconstruct the network location + netloc = auth or "" + if netloc: + netloc += "@" + netloc += host + if port: + netloc += f":{port}" + + # Bare domains aren't valid URLs. + if not path: + path = "/" + + if isinstance(params, (str, bytes)): + params = to_native_string(params) + + enc_params = self._encode_params(params) + if enc_params: + if query: + query = f"{query}&{enc_params}" + else: + query = enc_params + + url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) + self.url = url + + def prepare_headers(self, headers): + """Prepares the given HTTP headers.""" + + self.headers = CaseInsensitiveDict() + if headers: + for header in headers.items(): + # Raise exception on invalid header value. + check_header_validity(header) + name, value = header + self.headers[to_native_string(name)] = value + + def prepare_body(self, data, files, json=None): + """Prepares the given HTTP body data.""" + + # Check if file, fo, generator, iterator. + # If not, run through normal process. + + # Nottin' on you. + body = None + content_type = None + + if not data and json is not None: + # urllib3 requires a bytes-like body. Python 2's json.dumps + # provides this natively, but Python 3 gives a Unicode string. + content_type = "application/json" + + try: + body = complexjson.dumps(json, allow_nan=False) + except ValueError as ve: + raise InvalidJSONError(ve, request=self) + + if not isinstance(body, bytes): + body = body.encode("utf-8") + + is_stream = all( + [ + hasattr(data, "__iter__"), + not isinstance(data, (basestring, list, tuple, Mapping)), + ] + ) + + if is_stream: + try: + length = super_len(data) + except (TypeError, AttributeError, UnsupportedOperation): + length = None + + body = data + + if getattr(body, "tell", None) is not None: + # Record the current file position before reading. + # This will allow us to rewind a file in the event + # of a redirect. + try: + self._body_position = body.tell() + except OSError: + # This differentiates from None, allowing us to catch + # a failed `tell()` later when trying to rewind the body + self._body_position = object() + + if files: + raise NotImplementedError( + "Streamed bodies and files are mutually exclusive." + ) + + if length: + self.headers["Content-Length"] = builtin_str(length) + else: + self.headers["Transfer-Encoding"] = "chunked" + else: + # Multi-part file uploads. + if files: + (body, content_type) = self._encode_files(files, data) + else: + if data: + body = self._encode_params(data) + if isinstance(data, basestring) or hasattr(data, "read"): + content_type = None + else: + content_type = "application/x-www-form-urlencoded" + + self.prepare_content_length(body) + + # Add content-type if it wasn't explicitly provided. + if content_type and ("content-type" not in self.headers): + self.headers["Content-Type"] = content_type + + self.body = body + + def prepare_content_length(self, body): + """Prepare Content-Length header based on request method and body""" + if body is not None: + length = super_len(body) + if length: + # If length exists, set it. Otherwise, we fallback + # to Transfer-Encoding: chunked. + self.headers["Content-Length"] = builtin_str(length) + elif ( + self.method not in ("GET", "HEAD") + and self.headers.get("Content-Length") is None + ): + # Set Content-Length to 0 for methods that can have a body + # but don't provide one. (i.e. not GET or HEAD) + self.headers["Content-Length"] = "0" + + def prepare_auth(self, auth, url=""): + """Prepares the given HTTP auth data.""" + + # If no Auth is explicitly provided, extract it from the URL first. + if auth is None: + url_auth = get_auth_from_url(self.url) + auth = url_auth if any(url_auth) else None + + if auth: + if isinstance(auth, tuple) and len(auth) == 2: + # special-case basic HTTP auth + auth = HTTPBasicAuth(*auth) + + # Allow auth to make its changes. + r = auth(self) + + # Update self to reflect the auth changes. + self.__dict__.update(r.__dict__) + + # Recompute Content-Length + self.prepare_content_length(self.body) + + def prepare_cookies(self, cookies): + """Prepares the given HTTP cookie data. + + This function eventually generates a ``Cookie`` header from the + given cookies using cookielib. Due to cookielib's design, the header + will not be regenerated if it already exists, meaning this function + can only be called once for the life of the + :class:`PreparedRequest ` object. Any subsequent calls + to ``prepare_cookies`` will have no actual effect, unless the "Cookie" + header is removed beforehand. + """ + if isinstance(cookies, cookielib.CookieJar): + self._cookies = cookies + else: + self._cookies = cookiejar_from_dict(cookies) + + cookie_header = get_cookie_header(self._cookies, self) + if cookie_header is not None: + self.headers["Cookie"] = cookie_header + + def prepare_hooks(self, hooks): + """Prepares the given hooks.""" + # hooks can be passed as None to the prepare method and to this + # method. To prevent iterating over None, simply use an empty list + # if hooks is False-y + hooks = hooks or [] + for event in hooks: + self.register_hook(event, hooks[event]) + + +class Response: + """The :class:`Response ` object, which contains a + server's response to an HTTP request. + """ + + __attrs__ = [ + "_content", + "status_code", + "headers", + "url", + "history", + "encoding", + "reason", + "cookies", + "elapsed", + "request", + ] + + def __init__(self): + self._content = False + self._content_consumed = False + self._next = None + + #: Integer Code of responded HTTP Status, e.g. 404 or 200. + self.status_code = None + + #: Case-insensitive Dictionary of Response Headers. + #: For example, ``headers['content-encoding']`` will return the + #: value of a ``'Content-Encoding'`` response header. + self.headers = CaseInsensitiveDict() + + #: File-like object representation of response (for advanced usage). + #: Use of ``raw`` requires that ``stream=True`` be set on the request. + #: This requirement does not apply for use internally to Requests. + self.raw = None + + #: Final URL location of Response. + self.url = None + + #: Encoding to decode with when accessing r.text. + self.encoding = None + + #: A list of :class:`Response ` objects from + #: the history of the Request. Any redirect responses will end + #: up here. The list is sorted from the oldest to the most recent request. + self.history = [] + + #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". + self.reason = None + + #: A CookieJar of Cookies the server sent back. + self.cookies = cookiejar_from_dict({}) + + #: The amount of time elapsed between sending the request + #: and the arrival of the response (as a timedelta). + #: This property specifically measures the time taken between sending + #: the first byte of the request and finishing parsing the headers. It + #: is therefore unaffected by consuming the response content or the + #: value of the ``stream`` keyword argument. + self.elapsed = datetime.timedelta(0) + + #: The :class:`PreparedRequest ` object to which this + #: is a response. + self.request = None + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def __getstate__(self): + # Consume everything; accessing the content attribute makes + # sure the content has been fully read. + if not self._content_consumed: + self.content + + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + for name, value in state.items(): + setattr(self, name, value) + + # pickled objects do not have .raw + setattr(self, "_content_consumed", True) + setattr(self, "raw", None) + + def __repr__(self): + return f"" + + def __bool__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __nonzero__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __iter__(self): + """Allows you to use a response as an iterator.""" + return self.iter_content(128) + + @property + def ok(self): + """Returns True if :attr:`status_code` is less than 400, False if not. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + try: + self.raise_for_status() + except HTTPError: + return False + return True + + @property + def is_redirect(self): + """True if this Response is a well-formed HTTP redirect that could have + been processed automatically (by :meth:`Session.resolve_redirects`). + """ + return "location" in self.headers and self.status_code in REDIRECT_STATI + + @property + def is_permanent_redirect(self): + """True if this Response one of the permanent versions of redirect.""" + return "location" in self.headers and self.status_code in ( + codes.moved_permanently, + codes.permanent_redirect, + ) + + @property + def next(self): + """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" + return self._next + + @property + def apparent_encoding(self): + """The apparent encoding, provided by the charset_normalizer or chardet libraries.""" + if chardet is not None: + return chardet.detect(self.content)["encoding"] + else: + # If no character detection library is available, we'll fall back + # to a standard Python utf-8 str. + return "utf-8" + + def iter_content(self, chunk_size=1, decode_unicode=False): + """Iterates over the response data. When stream=True is set on the + request, this avoids reading the content at once into memory for + large responses. The chunk size is the number of bytes it should + read into memory. This is not necessarily the length of each item + returned as decoding can take place. + + chunk_size must be of type int or None. A value of None will + function differently depending on the value of `stream`. + stream=True will read data as it arrives in whatever size the + chunks are received. If stream=False, data is returned as + a single chunk. + + If decode_unicode is True, content will be decoded using the best + available encoding based on the response. + """ + + def generate(): + # Special case for urllib3. + if hasattr(self.raw, "stream"): + try: + yield from self.raw.stream(chunk_size, decode_content=True) + except ProtocolError as e: + raise ChunkedEncodingError(e) + except DecodeError as e: + raise ContentDecodingError(e) + except ReadTimeoutError as e: + raise ConnectionError(e) + except SSLError as e: + raise RequestsSSLError(e) + else: + # Standard file-like object. + while True: + chunk = self.raw.read(chunk_size) + if not chunk: + break + yield chunk + + self._content_consumed = True + + if self._content_consumed and isinstance(self._content, bool): + raise StreamConsumedError() + elif chunk_size is not None and not isinstance(chunk_size, int): + raise TypeError( + f"chunk_size must be an int, it is instead a {type(chunk_size)}." + ) + # simulate reading small chunks of the content + reused_chunks = iter_slices(self._content, chunk_size) + + stream_chunks = generate() + + chunks = reused_chunks if self._content_consumed else stream_chunks + + if decode_unicode: + chunks = stream_decode_response_unicode(chunks, self) + + return chunks + + def iter_lines( + self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None + ): + """Iterates over the response data, one line at a time. When + stream=True is set on the request, this avoids reading the + content at once into memory for large responses. + + .. note:: This method is not reentrant safe. + """ + + pending = None + + for chunk in self.iter_content( + chunk_size=chunk_size, decode_unicode=decode_unicode + ): + if pending is not None: + chunk = pending + chunk + + if delimiter: + lines = chunk.split(delimiter) + else: + lines = chunk.splitlines() + + if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: + pending = lines.pop() + else: + pending = None + + yield from lines + + if pending is not None: + yield pending + + @property + def content(self): + """Content of the response, in bytes.""" + + if self._content is False: + # Read the contents. + if self._content_consumed: + raise RuntimeError("The content for this response was already consumed") + + if self.status_code == 0 or self.raw is None: + self._content = None + else: + self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b"" + + self._content_consumed = True + # don't need to release the connection; that's been handled by urllib3 + # since we exhausted the data. + return self._content + + @property + def text(self): + """Content of the response, in unicode. + + If Response.encoding is None, encoding will be guessed using + ``charset_normalizer`` or ``chardet``. + + The encoding of the response content is determined based solely on HTTP + headers, following RFC 2616 to the letter. If you can take advantage of + non-HTTP knowledge to make a better guess at the encoding, you should + set ``r.encoding`` appropriately before accessing this property. + """ + + # Try charset from content-type + content = None + encoding = self.encoding + + if not self.content: + return "" + + # Fallback to auto-detected encoding. + if self.encoding is None: + encoding = self.apparent_encoding + + # Decode unicode from given encoding. + try: + content = str(self.content, encoding, errors="replace") + except (LookupError, TypeError): + # A LookupError is raised if the encoding was not found which could + # indicate a misspelling or similar mistake. + # + # A TypeError can be raised if encoding is None + # + # So we try blindly encoding. + content = str(self.content, errors="replace") + + return content + + def json(self, **kwargs): + r"""Decodes the JSON response body (if any) as a Python object. + + This may return a dictionary, list, etc. depending on what is in the response. + + :param \*\*kwargs: Optional arguments that ``json.loads`` takes. + :raises requests.exceptions.JSONDecodeError: If the response body does not + contain valid json. + """ + + if not self.encoding and self.content and len(self.content) > 3: + # No encoding set. JSON RFC 4627 section 3 states we should expect + # UTF-8, -16 or -32. Detect which one to use; If the detection or + # decoding fails, fall back to `self.text` (using charset_normalizer to make + # a best guess). + encoding = guess_json_utf(self.content) + if encoding is not None: + try: + return complexjson.loads(self.content.decode(encoding), **kwargs) + except UnicodeDecodeError: + # Wrong UTF codec detected; usually because it's not UTF-8 + # but some other 8-bit codec. This is an RFC violation, + # and the server didn't bother to tell us what codec *was* + # used. + pass + except JSONDecodeError as e: + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + + try: + return complexjson.loads(self.text, **kwargs) + except JSONDecodeError as e: + # Catch JSON-related errors and raise as requests.JSONDecodeError + # This aliases json.JSONDecodeError and simplejson.JSONDecodeError + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + + @property + def links(self): + """Returns the parsed header links of the response, if any.""" + + header = self.headers.get("link") + + resolved_links = {} + + if header: + links = parse_header_links(header) + + for link in links: + key = link.get("rel") or link.get("url") + resolved_links[key] = link + + return resolved_links + + def raise_for_status(self): + """Raises :class:`HTTPError`, if one occurred.""" + + http_error_msg = "" + if isinstance(self.reason, bytes): + # We attempt to decode utf-8 first because some servers + # choose to localize their reason strings. If the string + # isn't utf-8, we fall back to iso-8859-1 for all other + # encodings. (See PR #3538) + try: + reason = self.reason.decode("utf-8") + except UnicodeDecodeError: + reason = self.reason.decode("iso-8859-1") + else: + reason = self.reason + + if 400 <= self.status_code < 500: + http_error_msg = ( + f"{self.status_code} Client Error: {reason} for url: {self.url}" + ) + + elif 500 <= self.status_code < 600: + http_error_msg = ( + f"{self.status_code} Server Error: {reason} for url: {self.url}" + ) + + if http_error_msg: + raise HTTPError(http_error_msg, response=self) + + def close(self): + """Releases the connection back to the pool. Once this method has been + called the underlying ``raw`` object must not be accessed again. + + *Note: Should not normally need to be called explicitly.* + """ + if not self._content_consumed: + self.raw.close() + + release_conn = getattr(self.raw, "release_conn", None) + if release_conn is not None: + release_conn() diff --git a/py311/lib/python3.11/site-packages/requests/packages.py b/py311/lib/python3.11/site-packages/requests/packages.py new file mode 100644 index 0000000000000000000000000000000000000000..5ab3d8e250de8475cb22553f564e5444e02c7460 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/packages.py @@ -0,0 +1,23 @@ +import sys + +from .compat import chardet + +# This code exists for backwards compatibility reasons. +# I don't like it either. Just look the other way. :) + +for package in ("urllib3", "idna"): + locals()[package] = __import__(package) + # This traversal is apparently necessary such that the identities are + # preserved (requests.packages.urllib3.* is urllib3.*) + for mod in list(sys.modules): + if mod == package or mod.startswith(f"{package}."): + sys.modules[f"requests.packages.{mod}"] = sys.modules[mod] + +if chardet is not None: + target = chardet.__name__ + for mod in list(sys.modules): + if mod == target or mod.startswith(f"{target}."): + imported_mod = sys.modules[mod] + sys.modules[f"requests.packages.{mod}"] = imported_mod + mod = mod.replace(target, "chardet") + sys.modules[f"requests.packages.{mod}"] = imported_mod diff --git a/py311/lib/python3.11/site-packages/requests/sessions.py b/py311/lib/python3.11/site-packages/requests/sessions.py new file mode 100644 index 0000000000000000000000000000000000000000..731550de88aceb59747460faf519c2277e300e87 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/sessions.py @@ -0,0 +1,831 @@ +""" +requests.sessions +~~~~~~~~~~~~~~~~~ + +This module provides a Session object to manage and persist settings across +requests (cookies, auth, proxies). +""" +import os +import sys +import time +from collections import OrderedDict +from datetime import timedelta + +from ._internal_utils import to_native_string +from .adapters import HTTPAdapter +from .auth import _basic_auth_str +from .compat import Mapping, cookielib, urljoin, urlparse +from .cookies import ( + RequestsCookieJar, + cookiejar_from_dict, + extract_cookies_to_jar, + merge_cookies, +) +from .exceptions import ( + ChunkedEncodingError, + ContentDecodingError, + InvalidSchema, + TooManyRedirects, +) +from .hooks import default_hooks, dispatch_hook + +# formerly defined here, reexposed here for backward compatibility +from .models import ( # noqa: F401 + DEFAULT_REDIRECT_LIMIT, + REDIRECT_STATI, + PreparedRequest, + Request, +) +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( # noqa: F401 + DEFAULT_PORTS, + default_headers, + get_auth_from_url, + get_environ_proxies, + get_netrc_auth, + requote_uri, + resolve_proxies, + rewind_body, + should_bypass_proxies, + to_key_val_list, +) + +# Preferred clock, based on which one is more accurate on a given system. +if sys.platform == "win32": + preferred_clock = time.perf_counter +else: + preferred_clock = time.time + + +def merge_setting(request_setting, session_setting, dict_class=OrderedDict): + """Determines appropriate setting for a given request, taking into account + the explicit setting on that request, and the setting in the session. If a + setting is a dictionary, they will be merged together using `dict_class` + """ + + if session_setting is None: + return request_setting + + if request_setting is None: + return session_setting + + # Bypass if not a dictionary (e.g. verify) + if not ( + isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) + ): + return request_setting + + merged_setting = dict_class(to_key_val_list(session_setting)) + merged_setting.update(to_key_val_list(request_setting)) + + # Remove keys that are set to None. Extract keys first to avoid altering + # the dictionary during iteration. + none_keys = [k for (k, v) in merged_setting.items() if v is None] + for key in none_keys: + del merged_setting[key] + + return merged_setting + + +def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): + """Properly merges both requests and session hooks. + + This is necessary because when request_hooks == {'response': []}, the + merge breaks Session hooks entirely. + """ + if session_hooks is None or session_hooks.get("response") == []: + return request_hooks + + if request_hooks is None or request_hooks.get("response") == []: + return session_hooks + + return merge_setting(request_hooks, session_hooks, dict_class) + + +class SessionRedirectMixin: + def get_redirect_target(self, resp): + """Receives a Response. Returns a redirect URI or ``None``""" + # Due to the nature of how requests processes redirects this method will + # be called at least once upon the original response and at least twice + # on each subsequent redirect response (if any). + # If a custom mixin is used to handle this logic, it may be advantageous + # to cache the redirect location onto the response object as a private + # attribute. + if resp.is_redirect: + location = resp.headers["location"] + # Currently the underlying http module on py3 decode headers + # in latin1, but empirical evidence suggests that latin1 is very + # rarely used with non-ASCII characters in HTTP headers. + # It is more likely to get UTF8 header rather than latin1. + # This causes incorrect handling of UTF8 encoded location headers. + # To solve this, we re-encode the location in latin1. + location = location.encode("latin1") + return to_native_string(location, "utf8") + return None + + def should_strip_auth(self, old_url, new_url): + """Decide whether Authorization header should be removed when redirecting""" + old_parsed = urlparse(old_url) + new_parsed = urlparse(new_url) + if old_parsed.hostname != new_parsed.hostname: + return True + # Special case: allow http -> https redirect when using the standard + # ports. This isn't specified by RFC 7235, but is kept to avoid + # breaking backwards compatibility with older versions of requests + # that allowed any redirects on the same host. + if ( + old_parsed.scheme == "http" + and old_parsed.port in (80, None) + and new_parsed.scheme == "https" + and new_parsed.port in (443, None) + ): + return False + + # Handle default port usage corresponding to scheme. + changed_port = old_parsed.port != new_parsed.port + changed_scheme = old_parsed.scheme != new_parsed.scheme + default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) + if ( + not changed_scheme + and old_parsed.port in default_port + and new_parsed.port in default_port + ): + return False + + # Standard case: root URI must match + return changed_port or changed_scheme + + def resolve_redirects( + self, + resp, + req, + stream=False, + timeout=None, + verify=True, + cert=None, + proxies=None, + yield_requests=False, + **adapter_kwargs, + ): + """Receives a Response. Returns a generator of Responses or Requests.""" + + hist = [] # keep track of history + + url = self.get_redirect_target(resp) + previous_fragment = urlparse(req.url).fragment + while url: + prepared_request = req.copy() + + # Update history and keep track of redirects. + # resp.history must ignore the original request in this loop + hist.append(resp) + resp.history = hist[1:] + + try: + resp.content # Consume socket so it can be released + except (ChunkedEncodingError, ContentDecodingError, RuntimeError): + resp.raw.read(decode_content=False) + + if len(resp.history) >= self.max_redirects: + raise TooManyRedirects( + f"Exceeded {self.max_redirects} redirects.", response=resp + ) + + # Release the connection back into the pool. + resp.close() + + # Handle redirection without scheme (see: RFC 1808 Section 4) + if url.startswith("//"): + parsed_rurl = urlparse(resp.url) + url = ":".join([to_native_string(parsed_rurl.scheme), url]) + + # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) + parsed = urlparse(url) + if parsed.fragment == "" and previous_fragment: + parsed = parsed._replace(fragment=previous_fragment) + elif parsed.fragment: + previous_fragment = parsed.fragment + url = parsed.geturl() + + # Facilitate relative 'location' headers, as allowed by RFC 7231. + # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') + # Compliant with RFC3986, we percent encode the url. + if not parsed.netloc: + url = urljoin(resp.url, requote_uri(url)) + else: + url = requote_uri(url) + + prepared_request.url = to_native_string(url) + + self.rebuild_method(prepared_request, resp) + + # https://github.com/psf/requests/issues/1084 + if resp.status_code not in ( + codes.temporary_redirect, + codes.permanent_redirect, + ): + # https://github.com/psf/requests/issues/3490 + purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") + for header in purged_headers: + prepared_request.headers.pop(header, None) + prepared_request.body = None + + headers = prepared_request.headers + headers.pop("Cookie", None) + + # Extract any cookies sent on the response to the cookiejar + # in the new request. Because we've mutated our copied prepared + # request, use the old one that we haven't yet touched. + extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) + merge_cookies(prepared_request._cookies, self.cookies) + prepared_request.prepare_cookies(prepared_request._cookies) + + # Rebuild auth and proxy information. + proxies = self.rebuild_proxies(prepared_request, proxies) + self.rebuild_auth(prepared_request, resp) + + # A failed tell() sets `_body_position` to `object()`. This non-None + # value ensures `rewindable` will be True, allowing us to raise an + # UnrewindableBodyError, instead of hanging the connection. + rewindable = prepared_request._body_position is not None and ( + "Content-Length" in headers or "Transfer-Encoding" in headers + ) + + # Attempt to rewind consumed file-like object. + if rewindable: + rewind_body(prepared_request) + + # Override the original request. + req = prepared_request + + if yield_requests: + yield req + else: + resp = self.send( + req, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + allow_redirects=False, + **adapter_kwargs, + ) + + extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) + + # extract redirect url, if any, for the next loop + url = self.get_redirect_target(resp) + yield resp + + def rebuild_auth(self, prepared_request, response): + """When being redirected we may want to strip authentication from the + request to avoid leaking credentials. This method intelligently removes + and reapplies authentication where possible to avoid credential loss. + """ + headers = prepared_request.headers + url = prepared_request.url + + if "Authorization" in headers and self.should_strip_auth( + response.request.url, url + ): + # If we get redirected to a new host, we should strip out any + # authentication headers. + del headers["Authorization"] + + # .netrc might have more auth for us on our new host. + new_auth = get_netrc_auth(url) if self.trust_env else None + if new_auth is not None: + prepared_request.prepare_auth(new_auth) + + def rebuild_proxies(self, prepared_request, proxies): + """This method re-evaluates the proxy configuration by considering the + environment variables. If we are redirected to a URL covered by + NO_PROXY, we strip the proxy configuration. Otherwise, we set missing + proxy keys for this URL (in case they were stripped by a previous + redirect). + + This method also replaces the Proxy-Authorization header where + necessary. + + :rtype: dict + """ + headers = prepared_request.headers + scheme = urlparse(prepared_request.url).scheme + new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) + + if "Proxy-Authorization" in headers: + del headers["Proxy-Authorization"] + + try: + username, password = get_auth_from_url(new_proxies[scheme]) + except KeyError: + username, password = None, None + + # urllib3 handles proxy authorization for us in the standard adapter. + # Avoid appending this to TLS tunneled requests where it may be leaked. + if not scheme.startswith("https") and username and password: + headers["Proxy-Authorization"] = _basic_auth_str(username, password) + + return new_proxies + + def rebuild_method(self, prepared_request, response): + """When being redirected we may want to change the method of the request + based on certain specs or browser behavior. + """ + method = prepared_request.method + + # https://tools.ietf.org/html/rfc7231#section-6.4.4 + if response.status_code == codes.see_other and method != "HEAD": + method = "GET" + + # Do what the browsers do, despite standards... + # First, turn 302s into GETs. + if response.status_code == codes.found and method != "HEAD": + method = "GET" + + # Second, if a POST is responded to with a 301, turn it into a GET. + # This bizarre behaviour is explained in Issue 1704. + if response.status_code == codes.moved and method == "POST": + method = "GET" + + prepared_request.method = method + + +class Session(SessionRedirectMixin): + """A Requests session. + + Provides cookie persistence, connection-pooling, and configuration. + + Basic Usage:: + + >>> import requests + >>> s = requests.Session() + >>> s.get('https://httpbin.org/get') + + + Or as a context manager:: + + >>> with requests.Session() as s: + ... s.get('https://httpbin.org/get') + + """ + + __attrs__ = [ + "headers", + "cookies", + "auth", + "proxies", + "hooks", + "params", + "verify", + "cert", + "adapters", + "stream", + "trust_env", + "max_redirects", + ] + + def __init__(self): + #: A case-insensitive dictionary of headers to be sent on each + #: :class:`Request ` sent from this + #: :class:`Session `. + self.headers = default_headers() + + #: Default Authentication tuple or object to attach to + #: :class:`Request `. + self.auth = None + + #: Dictionary mapping protocol or protocol and host to the URL of the proxy + #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to + #: be used on each :class:`Request `. + self.proxies = {} + + #: Event-handling hooks. + self.hooks = default_hooks() + + #: Dictionary of querystring data to attach to each + #: :class:`Request `. The dictionary values may be lists for + #: representing multivalued query parameters. + self.params = {} + + #: Stream response content default. + self.stream = False + + #: SSL Verification default. + #: Defaults to `True`, requiring requests to verify the TLS certificate at the + #: remote end. + #: If verify is set to `False`, requests will accept any TLS certificate + #: presented by the server, and will ignore hostname mismatches and/or + #: expired certificates, which will make your application vulnerable to + #: man-in-the-middle (MitM) attacks. + #: Only set this to `False` for testing. + self.verify = True + + #: SSL client certificate default, if String, path to ssl client + #: cert file (.pem). If Tuple, ('cert', 'key') pair. + self.cert = None + + #: Maximum number of redirects allowed. If the request exceeds this + #: limit, a :class:`TooManyRedirects` exception is raised. + #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is + #: 30. + self.max_redirects = DEFAULT_REDIRECT_LIMIT + + #: Trust environment settings for proxy configuration, default + #: authentication and similar. + self.trust_env = True + + #: A CookieJar containing all currently outstanding cookies set on this + #: session. By default it is a + #: :class:`RequestsCookieJar `, but + #: may be any other ``cookielib.CookieJar`` compatible object. + self.cookies = cookiejar_from_dict({}) + + # Default connection adapters. + self.adapters = OrderedDict() + self.mount("https://", HTTPAdapter()) + self.mount("http://", HTTPAdapter()) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def prepare_request(self, request): + """Constructs a :class:`PreparedRequest ` for + transmission and returns it. The :class:`PreparedRequest` has settings + merged from the :class:`Request ` instance and those of the + :class:`Session`. + + :param request: :class:`Request` instance to prepare with this + session's settings. + :rtype: requests.PreparedRequest + """ + cookies = request.cookies or {} + + # Bootstrap CookieJar. + if not isinstance(cookies, cookielib.CookieJar): + cookies = cookiejar_from_dict(cookies) + + # Merge with session cookies + merged_cookies = merge_cookies( + merge_cookies(RequestsCookieJar(), self.cookies), cookies + ) + + # Set environment's basic authentication if not explicitly set. + auth = request.auth + if self.trust_env and not auth and not self.auth: + auth = get_netrc_auth(request.url) + + p = PreparedRequest() + p.prepare( + method=request.method.upper(), + url=request.url, + files=request.files, + data=request.data, + json=request.json, + headers=merge_setting( + request.headers, self.headers, dict_class=CaseInsensitiveDict + ), + params=merge_setting(request.params, self.params), + auth=merge_setting(auth, self.auth), + cookies=merged_cookies, + hooks=merge_hooks(request.hooks, self.hooks), + ) + return p + + def request( + self, + method, + url, + params=None, + data=None, + headers=None, + cookies=None, + files=None, + auth=None, + timeout=None, + allow_redirects=True, + proxies=None, + hooks=None, + stream=None, + verify=None, + cert=None, + json=None, + ): + """Constructs a :class:`Request `, prepares it and sends it. + Returns :class:`Response ` object. + + :param method: method for the new :class:`Request` object. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary or bytes to be sent in the query + string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the + :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the + :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the + :class:`Request`. + :param files: (optional) Dictionary of ``'filename': file-like-objects`` + for multipart encoding upload. + :param auth: (optional) Auth tuple or callable to enable + Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How many seconds to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Set to True by default. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol or protocol and + hostname to the URL of the proxy. + :param hooks: (optional) Dictionary mapping hook name to one event or + list of events, event must be callable. + :param stream: (optional) whether to immediately download the response + content. Defaults to ``False``. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. When set to + ``False``, requests will accept any TLS certificate presented by + the server, and will ignore hostname mismatches and/or expired + certificates, which will make your application vulnerable to + man-in-the-middle (MitM) attacks. Setting verify to ``False`` + may be useful during local development or testing. + :param cert: (optional) if String, path to ssl client cert file (.pem). + If Tuple, ('cert', 'key') pair. + :rtype: requests.Response + """ + # Create the Request. + req = Request( + method=method.upper(), + url=url, + headers=headers, + files=files, + data=data or {}, + json=json, + params=params or {}, + auth=auth, + cookies=cookies, + hooks=hooks, + ) + prep = self.prepare_request(req) + + proxies = proxies or {} + + settings = self.merge_environment_settings( + prep.url, proxies, stream, verify, cert + ) + + # Send the request. + send_kwargs = { + "timeout": timeout, + "allow_redirects": allow_redirects, + } + send_kwargs.update(settings) + resp = self.send(prep, **send_kwargs) + + return resp + + def get(self, url, **kwargs): + r"""Sends a GET request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", True) + return self.request("GET", url, **kwargs) + + def options(self, url, **kwargs): + r"""Sends a OPTIONS request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", True) + return self.request("OPTIONS", url, **kwargs) + + def head(self, url, **kwargs): + r"""Sends a HEAD request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", False) + return self.request("HEAD", url, **kwargs) + + def post(self, url, data=None, json=None, **kwargs): + r"""Sends a POST request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("POST", url, data=data, json=json, **kwargs) + + def put(self, url, data=None, **kwargs): + r"""Sends a PUT request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("PUT", url, data=data, **kwargs) + + def patch(self, url, data=None, **kwargs): + r"""Sends a PATCH request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("PATCH", url, data=data, **kwargs) + + def delete(self, url, **kwargs): + r"""Sends a DELETE request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("DELETE", url, **kwargs) + + def send(self, request, **kwargs): + """Send a given PreparedRequest. + + :rtype: requests.Response + """ + # Set defaults that the hooks can utilize to ensure they always have + # the correct parameters to reproduce the previous request. + kwargs.setdefault("stream", self.stream) + kwargs.setdefault("verify", self.verify) + kwargs.setdefault("cert", self.cert) + if "proxies" not in kwargs: + kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) + + # It's possible that users might accidentally send a Request object. + # Guard against that specific failure case. + if isinstance(request, Request): + raise ValueError("You can only send PreparedRequests.") + + # Set up variables needed for resolve_redirects and dispatching of hooks + allow_redirects = kwargs.pop("allow_redirects", True) + stream = kwargs.get("stream") + hooks = request.hooks + + # Get the appropriate adapter to use + adapter = self.get_adapter(url=request.url) + + # Start time (approximately) of the request + start = preferred_clock() + + # Send the request + r = adapter.send(request, **kwargs) + + # Total elapsed time of the request (approximately) + elapsed = preferred_clock() - start + r.elapsed = timedelta(seconds=elapsed) + + # Response manipulation hooks + r = dispatch_hook("response", hooks, r, **kwargs) + + # Persist cookies + if r.history: + # If the hooks create history then we want those cookies too + for resp in r.history: + extract_cookies_to_jar(self.cookies, resp.request, resp.raw) + + extract_cookies_to_jar(self.cookies, request, r.raw) + + # Resolve redirects if allowed. + if allow_redirects: + # Redirect resolving generator. + gen = self.resolve_redirects(r, request, **kwargs) + history = [resp for resp in gen] + else: + history = [] + + # Shuffle things around if there's history. + if history: + # Insert the first (original) request at the start + history.insert(0, r) + # Get the last request made + r = history.pop() + r.history = history + + # If redirects aren't being followed, store the response on the Request for Response.next(). + if not allow_redirects: + try: + r._next = next( + self.resolve_redirects(r, request, yield_requests=True, **kwargs) + ) + except StopIteration: + pass + + if not stream: + r.content + + return r + + def merge_environment_settings(self, url, proxies, stream, verify, cert): + """ + Check the environment and merge it with some settings. + + :rtype: dict + """ + # Gather clues from the surrounding environment. + if self.trust_env: + # Set environment's proxies. + no_proxy = proxies.get("no_proxy") if proxies is not None else None + env_proxies = get_environ_proxies(url, no_proxy=no_proxy) + for k, v in env_proxies.items(): + proxies.setdefault(k, v) + + # Look for requests environment configuration + # and be compatible with cURL. + if verify is True or verify is None: + verify = ( + os.environ.get("REQUESTS_CA_BUNDLE") + or os.environ.get("CURL_CA_BUNDLE") + or verify + ) + + # Merge all the kwargs. + proxies = merge_setting(proxies, self.proxies) + stream = merge_setting(stream, self.stream) + verify = merge_setting(verify, self.verify) + cert = merge_setting(cert, self.cert) + + return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} + + def get_adapter(self, url): + """ + Returns the appropriate connection adapter for the given URL. + + :rtype: requests.adapters.BaseAdapter + """ + for prefix, adapter in self.adapters.items(): + if url.lower().startswith(prefix.lower()): + return adapter + + # Nothing matches :-/ + raise InvalidSchema(f"No connection adapters were found for {url!r}") + + def close(self): + """Closes all adapters and as such the session""" + for v in self.adapters.values(): + v.close() + + def mount(self, prefix, adapter): + """Registers a connection adapter to a prefix. + + Adapters are sorted in descending order by prefix length. + """ + self.adapters[prefix] = adapter + keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] + + for key in keys_to_move: + self.adapters[key] = self.adapters.pop(key) + + def __getstate__(self): + state = {attr: getattr(self, attr, None) for attr in self.__attrs__} + return state + + def __setstate__(self, state): + for attr, value in state.items(): + setattr(self, attr, value) + + +def session(): + """ + Returns a :class:`Session` for context-management. + + .. deprecated:: 1.0.0 + + This method has been deprecated since version 1.0.0 and is only kept for + backwards compatibility. New code should use :class:`~requests.sessions.Session` + to create a session. This may be removed at a future date. + + :rtype: Session + """ + return Session() diff --git a/py311/lib/python3.11/site-packages/requests/status_codes.py b/py311/lib/python3.11/site-packages/requests/status_codes.py new file mode 100644 index 0000000000000000000000000000000000000000..c7945a2f06897ed980cc575df2f48d9e6c1a9f7e --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/status_codes.py @@ -0,0 +1,128 @@ +r""" +The ``codes`` object defines a mapping from common names for HTTP statuses +to their numerical codes, accessible either as attributes or as dictionary +items. + +Example:: + + >>> import requests + >>> requests.codes['temporary_redirect'] + 307 + >>> requests.codes.teapot + 418 + >>> requests.codes['\o/'] + 200 + +Some codes have multiple names, and both upper- and lower-case versions of +the names are allowed. For example, ``codes.ok``, ``codes.OK``, and +``codes.okay`` all correspond to the HTTP status code 200. +""" + +from .structures import LookupDict + +_codes = { + # Informational. + 100: ("continue",), + 101: ("switching_protocols",), + 102: ("processing", "early-hints"), + 103: ("checkpoint",), + 122: ("uri_too_long", "request_uri_too_long"), + 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), + 201: ("created",), + 202: ("accepted",), + 203: ("non_authoritative_info", "non_authoritative_information"), + 204: ("no_content",), + 205: ("reset_content", "reset"), + 206: ("partial_content", "partial"), + 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), + 208: ("already_reported",), + 226: ("im_used",), + # Redirection. + 300: ("multiple_choices",), + 301: ("moved_permanently", "moved", "\\o-"), + 302: ("found",), + 303: ("see_other", "other"), + 304: ("not_modified",), + 305: ("use_proxy",), + 306: ("switch_proxy",), + 307: ("temporary_redirect", "temporary_moved", "temporary"), + 308: ( + "permanent_redirect", + "resume_incomplete", + "resume", + ), # "resume" and "resume_incomplete" to be removed in 3.0 + # Client Error. + 400: ("bad_request", "bad"), + 401: ("unauthorized",), + 402: ("payment_required", "payment"), + 403: ("forbidden",), + 404: ("not_found", "-o-"), + 405: ("method_not_allowed", "not_allowed"), + 406: ("not_acceptable",), + 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), + 408: ("request_timeout", "timeout"), + 409: ("conflict",), + 410: ("gone",), + 411: ("length_required",), + 412: ("precondition_failed", "precondition"), + 413: ("request_entity_too_large", "content_too_large"), + 414: ("request_uri_too_large", "uri_too_long"), + 415: ("unsupported_media_type", "unsupported_media", "media_type"), + 416: ( + "requested_range_not_satisfiable", + "requested_range", + "range_not_satisfiable", + ), + 417: ("expectation_failed",), + 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), + 421: ("misdirected_request",), + 422: ("unprocessable_entity", "unprocessable", "unprocessable_content"), + 423: ("locked",), + 424: ("failed_dependency", "dependency"), + 425: ("unordered_collection", "unordered", "too_early"), + 426: ("upgrade_required", "upgrade"), + 428: ("precondition_required", "precondition"), + 429: ("too_many_requests", "too_many"), + 431: ("header_fields_too_large", "fields_too_large"), + 444: ("no_response", "none"), + 449: ("retry_with", "retry"), + 450: ("blocked_by_windows_parental_controls", "parental_controls"), + 451: ("unavailable_for_legal_reasons", "legal_reasons"), + 499: ("client_closed_request",), + # Server Error. + 500: ("internal_server_error", "server_error", "/o\\", "✗"), + 501: ("not_implemented",), + 502: ("bad_gateway",), + 503: ("service_unavailable", "unavailable"), + 504: ("gateway_timeout",), + 505: ("http_version_not_supported", "http_version"), + 506: ("variant_also_negotiates",), + 507: ("insufficient_storage",), + 509: ("bandwidth_limit_exceeded", "bandwidth"), + 510: ("not_extended",), + 511: ("network_authentication_required", "network_auth", "network_authentication"), +} + +codes = LookupDict(name="status_codes") + + +def _init(): + for code, titles in _codes.items(): + for title in titles: + setattr(codes, title, code) + if not title.startswith(("\\", "/")): + setattr(codes, title.upper(), code) + + def doc(code): + names = ", ".join(f"``{n}``" for n in _codes[code]) + return "* %d: %s" % (code, names) + + global __doc__ + __doc__ = ( + __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) + if __doc__ is not None + else None + ) + + +_init() diff --git a/py311/lib/python3.11/site-packages/requests/structures.py b/py311/lib/python3.11/site-packages/requests/structures.py new file mode 100644 index 0000000000000000000000000000000000000000..188e13e4829591facb23ae0e2eda84b9807cb818 --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/structures.py @@ -0,0 +1,99 @@ +""" +requests.structures +~~~~~~~~~~~~~~~~~~~ + +Data structures that power Requests. +""" + +from collections import OrderedDict + +from .compat import Mapping, MutableMapping + + +class CaseInsensitiveDict(MutableMapping): + """A case-insensitive ``dict``-like object. + + Implements all methods and operations of + ``MutableMapping`` as well as dict's ``copy``. Also + provides ``lower_items``. + + All keys are expected to be strings. The structure remembers the + case of the last key to be set, and ``iter(instance)``, + ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` + will contain case-sensitive keys. However, querying and contains + testing is case insensitive:: + + cid = CaseInsensitiveDict() + cid['Accept'] = 'application/json' + cid['aCCEPT'] == 'application/json' # True + list(cid) == ['Accept'] # True + + For example, ``headers['content-encoding']`` will return the + value of a ``'Content-Encoding'`` response header, regardless + of how the header name was originally stored. + + If the constructor, ``.update``, or equality comparison + operations are given keys that have equal ``.lower()``s, the + behavior is undefined. + """ + + def __init__(self, data=None, **kwargs): + self._store = OrderedDict() + if data is None: + data = {} + self.update(data, **kwargs) + + def __setitem__(self, key, value): + # Use the lowercased key for lookups, but store the actual + # key alongside the value. + self._store[key.lower()] = (key, value) + + def __getitem__(self, key): + return self._store[key.lower()][1] + + def __delitem__(self, key): + del self._store[key.lower()] + + def __iter__(self): + return (casedkey for casedkey, mappedvalue in self._store.values()) + + def __len__(self): + return len(self._store) + + def lower_items(self): + """Like iteritems(), but with all lowercase keys.""" + return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) + + def __eq__(self, other): + if isinstance(other, Mapping): + other = CaseInsensitiveDict(other) + else: + return NotImplemented + # Compare insensitively + return dict(self.lower_items()) == dict(other.lower_items()) + + # Copy is required + def copy(self): + return CaseInsensitiveDict(self._store.values()) + + def __repr__(self): + return str(dict(self.items())) + + +class LookupDict(dict): + """Dictionary lookup object.""" + + def __init__(self, name=None): + self.name = name + super().__init__() + + def __repr__(self): + return f"" + + def __getitem__(self, key): + # We allow fall-through here, so values default to None + + return self.__dict__.get(key, None) + + def get(self, key, default=None): + return self.__dict__.get(key, default) diff --git a/py311/lib/python3.11/site-packages/requests/utils.py b/py311/lib/python3.11/site-packages/requests/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8ab55852cc2188c53f08462ec4319c97fa49f04b --- /dev/null +++ b/py311/lib/python3.11/site-packages/requests/utils.py @@ -0,0 +1,1086 @@ +""" +requests.utils +~~~~~~~~~~~~~~ + +This module provides utility functions that are used within Requests +that are also useful for external consumption. +""" + +import codecs +import contextlib +import io +import os +import re +import socket +import struct +import sys +import tempfile +import warnings +import zipfile +from collections import OrderedDict + +from urllib3.util import make_headers, parse_url + +from . import certs +from .__version__ import __version__ + +# to_native_string is unused here, but imported here for backwards compatibility +from ._internal_utils import ( # noqa: F401 + _HEADER_VALIDATORS_BYTE, + _HEADER_VALIDATORS_STR, + HEADER_VALIDATORS, + to_native_string, +) +from .compat import ( + Mapping, + basestring, + bytes, + getproxies, + getproxies_environment, + integer_types, + is_urllib3_1, +) +from .compat import parse_http_list as _parse_list_header +from .compat import ( + proxy_bypass, + proxy_bypass_environment, + quote, + str, + unquote, + urlparse, + urlunparse, +) +from .cookies import cookiejar_from_dict +from .exceptions import ( + FileModeWarning, + InvalidHeader, + InvalidURL, + UnrewindableBodyError, +) +from .structures import CaseInsensitiveDict + +NETRC_FILES = (".netrc", "_netrc") + +DEFAULT_CA_BUNDLE_PATH = certs.where() + +DEFAULT_PORTS = {"http": 80, "https": 443} + +# Ensure that ', ' is used to preserve previous delimiter behavior. +DEFAULT_ACCEPT_ENCODING = ", ".join( + re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"]) +) + + +if sys.platform == "win32": + # provide a proxy_bypass version on Windows without DNS lookups + + def proxy_bypass_registry(host): + try: + import winreg + except ImportError: + return False + + try: + internetSettings = winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", + ) + # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it + proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) + # ProxyOverride is almost always a string + proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] + except (OSError, ValueError): + return False + if not proxyEnable or not proxyOverride: + return False + + # make a check value list from the registry entry: replace the + # '' string by the localhost entry and the corresponding + # canonical entry. + proxyOverride = proxyOverride.split(";") + # filter out empty strings to avoid re.match return true in the following code. + proxyOverride = filter(None, proxyOverride) + # now check if we match one of the registry values. + for test in proxyOverride: + if test == "": + if "." not in host: + return True + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char + if re.match(test, host, re.I): + return True + return False + + def proxy_bypass(host): # noqa + """Return True, if the host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + """ + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_registry(host) + + +def dict_to_sequence(d): + """Returns an internal sequence dictionary update.""" + + if hasattr(d, "items"): + d = d.items() + + return d + + +def super_len(o): + total_length = None + current_position = 0 + + if not is_urllib3_1 and isinstance(o, str): + # urllib3 2.x+ treats all strings as utf-8 instead + # of latin-1 (iso-8859-1) like http.client. + o = o.encode("utf-8") + + if hasattr(o, "__len__"): + total_length = len(o) + + elif hasattr(o, "len"): + total_length = o.len + + elif hasattr(o, "fileno"): + try: + fileno = o.fileno() + except (io.UnsupportedOperation, AttributeError): + # AttributeError is a surprising exception, seeing as how we've just checked + # that `hasattr(o, 'fileno')`. It happens for objects obtained via + # `Tarfile.extractfile()`, per issue 5229. + pass + else: + total_length = os.fstat(fileno).st_size + + # Having used fstat to determine the file length, we need to + # confirm that this file was opened up in binary mode. + if "b" not in o.mode: + warnings.warn( + ( + "Requests has determined the content-length for this " + "request using the binary size of the file: however, the " + "file has been opened in text mode (i.e. without the 'b' " + "flag in the mode). This may lead to an incorrect " + "content-length. In Requests 3.0, support will be removed " + "for files in text mode." + ), + FileModeWarning, + ) + + if hasattr(o, "tell"): + try: + current_position = o.tell() + except OSError: + # This can happen in some weird situations, such as when the file + # is actually a special file descriptor like stdin. In this + # instance, we don't know what the length is, so set it to zero and + # let requests chunk it instead. + if total_length is not None: + current_position = total_length + else: + if hasattr(o, "seek") and total_length is None: + # StringIO and BytesIO have seek but no usable fileno + try: + # seek to end of file + o.seek(0, 2) + total_length = o.tell() + + # seek back to current position to support + # partially read file-like objects + o.seek(current_position or 0) + except OSError: + total_length = 0 + + if total_length is None: + total_length = 0 + + return max(0, total_length - current_position) + + +def get_netrc_auth(url, raise_errors=False): + """Returns the Requests tuple auth for a given url from netrc.""" + + netrc_file = os.environ.get("NETRC") + if netrc_file is not None: + netrc_locations = (netrc_file,) + else: + netrc_locations = (f"~/{f}" for f in NETRC_FILES) + + try: + from netrc import NetrcParseError, netrc + + netrc_path = None + + for f in netrc_locations: + loc = os.path.expanduser(f) + if os.path.exists(loc): + netrc_path = loc + break + + # Abort early if there isn't one. + if netrc_path is None: + return + + ri = urlparse(url) + host = ri.hostname + + try: + _netrc = netrc(netrc_path).authenticators(host) + if _netrc: + # Return with login / password + login_i = 0 if _netrc[0] else 1 + return (_netrc[login_i], _netrc[2]) + except (NetrcParseError, OSError): + # If there was a parsing error or a permissions issue reading the file, + # we'll just skip netrc auth unless explicitly asked to raise errors. + if raise_errors: + raise + + # App Engine hackiness. + except (ImportError, AttributeError): + pass + + +def guess_filename(obj): + """Tries to guess the filename of the given object.""" + name = getattr(obj, "name", None) + if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": + return os.path.basename(name) + + +def extract_zipped_paths(path): + """Replace nonexistent paths that look like they refer to a member of a zip + archive with the location of an extracted copy of the target, or else + just return the provided path unchanged. + """ + if os.path.exists(path): + # this is already a valid path, no need to do anything further + return path + + # find the first valid part of the provided path and treat that as a zip archive + # assume the rest of the path is the name of a member in the archive + archive, member = os.path.split(path) + while archive and not os.path.exists(archive): + archive, prefix = os.path.split(archive) + if not prefix: + # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), + # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users + break + member = "/".join([prefix, member]) + + if not zipfile.is_zipfile(archive): + return path + + zip_file = zipfile.ZipFile(archive) + if member not in zip_file.namelist(): + return path + + # we have a valid zip archive and a valid member of that archive + tmp = tempfile.gettempdir() + extracted_path = os.path.join(tmp, member.split("/")[-1]) + if not os.path.exists(extracted_path): + # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition + with atomic_open(extracted_path) as file_handler: + file_handler.write(zip_file.read(member)) + return extracted_path + + +@contextlib.contextmanager +def atomic_open(filename): + """Write a file to the disk in an atomic fashion""" + tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) + try: + with os.fdopen(tmp_descriptor, "wb") as tmp_handler: + yield tmp_handler + os.replace(tmp_name, filename) + except BaseException: + os.remove(tmp_name) + raise + + +def from_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. Unless it can not be represented as such, return an + OrderedDict, e.g., + + :: + + >>> from_key_val_list([('key', 'val')]) + OrderedDict([('key', 'val')]) + >>> from_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + >>> from_key_val_list({'key': 'val'}) + OrderedDict([('key', 'val')]) + + :rtype: OrderedDict + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError("cannot encode objects that are not 2-tuples") + + return OrderedDict(value) + + +def to_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. If it can be, return a list of tuples, e.g., + + :: + + >>> to_key_val_list([('key', 'val')]) + [('key', 'val')] + >>> to_key_val_list({'key': 'val'}) + [('key', 'val')] + >>> to_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + + :rtype: list + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError("cannot encode objects that are not 2-tuples") + + if isinstance(value, Mapping): + value = value.items() + + return list(value) + + +# From mitsuhiko/werkzeug (used with permission). +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + It basically works like :func:`parse_set_header` just that items + may appear multiple times and case sensitivity is preserved. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + To create a header from the :class:`list` again, use the + :func:`dump_header` function. + + :param value: a string with a list header. + :return: :class:`list` + :rtype: list + """ + result = [] + for item in _parse_list_header(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +# From mitsuhiko/werkzeug (used with permission). +def parse_dict_header(value): + """Parse lists of key, value pairs as described by RFC 2068 Section 2 and + convert them into a python dict: + + >>> d = parse_dict_header('foo="is a fish", bar="as well"') + >>> type(d) is dict + True + >>> sorted(d.items()) + [('bar', 'as well'), ('foo', 'is a fish')] + + If there is no value for a key it will be `None`: + + >>> parse_dict_header('key_without_value') + {'key_without_value': None} + + To create a header from the :class:`dict` again, use the + :func:`dump_header` function. + + :param value: a string with a dict header. + :return: :class:`dict` + :rtype: dict + """ + result = {} + for item in _parse_list_header(value): + if "=" not in item: + result[item] = None + continue + name, value = item.split("=", 1) + if value[:1] == value[-1:] == '"': + value = unquote_header_value(value[1:-1]) + result[name] = value + return result + + +# From mitsuhiko/werkzeug (used with permission). +def unquote_header_value(value, is_filename=False): + r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + :rtype: str + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + + # if this is a filename and the starting characters look like + # a UNC path, then just return the value without quotes. Using the + # replace sequence below on a UNC path has the effect of turning + # the leading double slash into a single slash and then + # _fix_ie_filename() doesn't work correctly. See #458. + if not is_filename or value[:2] != "\\\\": + return value.replace("\\\\", "\\").replace('\\"', '"') + return value + + +def dict_from_cookiejar(cj): + """Returns a key/value dictionary from a CookieJar. + + :param cj: CookieJar object to extract cookies from. + :rtype: dict + """ + + cookie_dict = {cookie.name: cookie.value for cookie in cj} + return cookie_dict + + +def add_dict_to_cookiejar(cj, cookie_dict): + """Returns a CookieJar from a key/value dictionary. + + :param cj: CookieJar to insert cookies into. + :param cookie_dict: Dict of key/values to insert into CookieJar. + :rtype: CookieJar + """ + + return cookiejar_from_dict(cookie_dict, cj) + + +def get_encodings_from_content(content): + """Returns encodings from given content string. + + :param content: bytestring to extract encodings from. + """ + warnings.warn( + ( + "In requests 3.0, get_encodings_from_content will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) + + charset_re = re.compile(r']', flags=re.I) + pragma_re = re.compile(r']', flags=re.I) + xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') + + return ( + charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content) + ) + + +def _parse_content_type_header(header): + """Returns content type and parameters from given header + + :param header: string + :return: tuple containing content type and dictionary of + parameters + """ + + tokens = header.split(";") + content_type, params = tokens[0].strip(), tokens[1:] + params_dict = {} + items_to_strip = "\"' " + + for param in params: + param = param.strip() + if param: + key, value = param, True + index_of_equals = param.find("=") + if index_of_equals != -1: + key = param[:index_of_equals].strip(items_to_strip) + value = param[index_of_equals + 1 :].strip(items_to_strip) + params_dict[key.lower()] = value + return content_type, params_dict + + +def get_encoding_from_headers(headers): + """Returns encodings from given HTTP Header Dict. + + :param headers: dictionary to extract encoding from. + :rtype: str + """ + + content_type = headers.get("content-type") + + if not content_type: + return None + + content_type, params = _parse_content_type_header(content_type) + + if "charset" in params: + return params["charset"].strip("'\"") + + if "text" in content_type: + return "ISO-8859-1" + + if "application/json" in content_type: + # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset + return "utf-8" + + +def stream_decode_response_unicode(iterator, r): + """Stream decodes an iterator.""" + + if r.encoding is None: + yield from iterator + return + + decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") + for chunk in iterator: + rv = decoder.decode(chunk) + if rv: + yield rv + rv = decoder.decode(b"", final=True) + if rv: + yield rv + + +def iter_slices(string, slice_length): + """Iterate over slices of a string.""" + pos = 0 + if slice_length is None or slice_length <= 0: + slice_length = len(string) + while pos < len(string): + yield string[pos : pos + slice_length] + pos += slice_length + + +def get_unicode_from_response(r): + """Returns the requested content back in unicode. + + :param r: Response object to get unicode content from. + + Tried: + + 1. charset from content-type + 2. fall back and replace all unicode characters + + :rtype: str + """ + warnings.warn( + ( + "In requests 3.0, get_unicode_from_response will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) + + tried_encodings = [] + + # Try charset from content-type + encoding = get_encoding_from_headers(r.headers) + + if encoding: + try: + return str(r.content, encoding) + except UnicodeError: + tried_encodings.append(encoding) + + # Fall back: + try: + return str(r.content, encoding, errors="replace") + except TypeError: + return r.content + + +# The unreserved URI characters (RFC 3986) +UNRESERVED_SET = frozenset( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" +) + + +def unquote_unreserved(uri): + """Un-escape any percent-escape sequences in a URI that are unreserved + characters. This leaves all reserved, illegal and non-ASCII bytes encoded. + + :rtype: str + """ + parts = uri.split("%") + for i in range(1, len(parts)): + h = parts[i][0:2] + if len(h) == 2 and h.isalnum(): + try: + c = chr(int(h, 16)) + except ValueError: + raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") + + if c in UNRESERVED_SET: + parts[i] = c + parts[i][2:] + else: + parts[i] = f"%{parts[i]}" + else: + parts[i] = f"%{parts[i]}" + return "".join(parts) + + +def requote_uri(uri): + """Re-quote the given URI. + + This function passes the given URI through an unquote/quote cycle to + ensure that it is fully and consistently quoted. + + :rtype: str + """ + safe_with_percent = "!#$%&'()*+,/:;=?@[]~" + safe_without_percent = "!#$&'()*+,/:;=?@[]~" + try: + # Unquote only the unreserved characters + # Then quote only illegal characters (do not quote reserved, + # unreserved, or '%') + return quote(unquote_unreserved(uri), safe=safe_with_percent) + except InvalidURL: + # We couldn't unquote the given URI, so let's try quoting it, but + # there may be unquoted '%'s in the URI. We need to make sure they're + # properly quoted so they do not cause issues elsewhere. + return quote(uri, safe=safe_without_percent) + + +def address_in_network(ip, net): + """This function allows you to check if an IP belongs to a network subnet + + Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 + returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 + + :rtype: bool + """ + ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] + netaddr, bits = net.split("/") + netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask + return (ipaddr & netmask) == (network & netmask) + + +def dotted_netmask(mask): + """Converts mask from /xx format to xxx.xxx.xxx.xxx + + Example: if mask is 24 function returns 255.255.255.0 + + :rtype: str + """ + bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack(">I", bits)) + + +def is_ipv4_address(string_ip): + """ + :rtype: bool + """ + try: + socket.inet_aton(string_ip) + except OSError: + return False + return True + + +def is_valid_cidr(string_network): + """ + Very simple check of the cidr format in no_proxy variable. + + :rtype: bool + """ + if string_network.count("/") == 1: + try: + mask = int(string_network.split("/")[1]) + except ValueError: + return False + + if mask < 1 or mask > 32: + return False + + try: + socket.inet_aton(string_network.split("/")[0]) + except OSError: + return False + else: + return False + return True + + +@contextlib.contextmanager +def set_environ(env_name, value): + """Set the environment variable 'env_name' to 'value' + + Save previous value, yield, and then restore the previous value stored in + the environment variable 'env_name'. + + If 'value' is None, do nothing""" + value_changed = value is not None + if value_changed: + old_value = os.environ.get(env_name) + os.environ[env_name] = value + try: + yield + finally: + if value_changed: + if old_value is None: + del os.environ[env_name] + else: + os.environ[env_name] = old_value + + +def should_bypass_proxies(url, no_proxy): + """ + Returns whether we should bypass proxies or not. + + :rtype: bool + """ + + # Prioritize lowercase environment variables over uppercase + # to keep a consistent behaviour with other http projects (curl, wget). + def get_proxy(key): + return os.environ.get(key) or os.environ.get(key.upper()) + + # First check whether no_proxy is defined. If it is, check that the URL + # we're getting isn't in the no_proxy list. + no_proxy_arg = no_proxy + if no_proxy is None: + no_proxy = get_proxy("no_proxy") + parsed = urlparse(url) + + if parsed.hostname is None: + # URLs don't always have hostnames, e.g. file:/// urls. + return True + + if no_proxy: + # We need to check whether we match here. We need to see if we match + # the end of the hostname, both with and without the port. + no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) + + if is_ipv4_address(parsed.hostname): + for proxy_ip in no_proxy: + if is_valid_cidr(proxy_ip): + if address_in_network(parsed.hostname, proxy_ip): + return True + elif parsed.hostname == proxy_ip: + # If no_proxy ip was defined in plain IP notation instead of cidr notation & + # matches the IP of the index + return True + else: + host_with_port = parsed.hostname + if parsed.port: + host_with_port += f":{parsed.port}" + + for host in no_proxy: + if parsed.hostname.endswith(host) or host_with_port.endswith(host): + # The URL does match something in no_proxy, so we don't want + # to apply the proxies on this URL. + return True + + with set_environ("no_proxy", no_proxy_arg): + # parsed.hostname can be `None` in cases such as a file URI. + try: + bypass = proxy_bypass(parsed.hostname) + except (TypeError, socket.gaierror): + bypass = False + + if bypass: + return True + + return False + + +def get_environ_proxies(url, no_proxy=None): + """ + Return a dict of environment proxies. + + :rtype: dict + """ + if should_bypass_proxies(url, no_proxy=no_proxy): + return {} + else: + return getproxies() + + +def select_proxy(url, proxies): + """Select a proxy for the url, if applicable. + + :param url: The url being for the request + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + """ + proxies = proxies or {} + urlparts = urlparse(url) + if urlparts.hostname is None: + return proxies.get(urlparts.scheme, proxies.get("all")) + + proxy_keys = [ + urlparts.scheme + "://" + urlparts.hostname, + urlparts.scheme, + "all://" + urlparts.hostname, + "all", + ] + proxy = None + for proxy_key in proxy_keys: + if proxy_key in proxies: + proxy = proxies[proxy_key] + break + + return proxy + + +def resolve_proxies(request, proxies, trust_env=True): + """This method takes proxy information from a request and configuration + input to resolve a mapping of target proxies. This will consider settings + such as NO_PROXY to strip proxy configurations. + + :param request: Request or PreparedRequest + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + :param trust_env: Boolean declaring whether to trust environment configs + + :rtype: dict + """ + proxies = proxies if proxies is not None else {} + url = request.url + scheme = urlparse(url).scheme + no_proxy = proxies.get("no_proxy") + new_proxies = proxies.copy() + + if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): + environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) + + proxy = environ_proxies.get(scheme, environ_proxies.get("all")) + + if proxy: + new_proxies.setdefault(scheme, proxy) + return new_proxies + + +def default_user_agent(name="python-requests"): + """ + Return a string representing the default user agent. + + :rtype: str + """ + return f"{name}/{__version__}" + + +def default_headers(): + """ + :rtype: requests.structures.CaseInsensitiveDict + """ + return CaseInsensitiveDict( + { + "User-Agent": default_user_agent(), + "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, + "Accept": "*/*", + "Connection": "keep-alive", + } + ) + + +def parse_header_links(value): + """Return a list of parsed link headers proxies. + + i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" + + :rtype: list + """ + + links = [] + + replace_chars = " '\"" + + value = value.strip(replace_chars) + if not value: + return links + + for val in re.split(", *<", value): + try: + url, params = val.split(";", 1) + except ValueError: + url, params = val, "" + + link = {"url": url.strip("<> '\"")} + + for param in params.split(";"): + try: + key, value = param.split("=") + except ValueError: + break + + link[key.strip(replace_chars)] = value.strip(replace_chars) + + links.append(link) + + return links + + +# Null bytes; no need to recreate these on each call to guess_json_utf +_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 +_null2 = _null * 2 +_null3 = _null * 3 + + +def guess_json_utf(data): + """ + :rtype: str + """ + # JSON always starts with two ASCII characters, so detection is as + # easy as counting the nulls and from their location and count + # determine the encoding. Also detect a BOM, if present. + sample = data[:4] + if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): + return "utf-32" # BOM included + if sample[:3] == codecs.BOM_UTF8: + return "utf-8-sig" # BOM included, MS style (discouraged) + if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): + return "utf-16" # BOM included + nullcount = sample.count(_null) + if nullcount == 0: + return "utf-8" + if nullcount == 2: + if sample[::2] == _null2: # 1st and 3rd are null + return "utf-16-be" + if sample[1::2] == _null2: # 2nd and 4th are null + return "utf-16-le" + # Did not detect 2 valid UTF-16 ascii-range characters + if nullcount == 3: + if sample[:3] == _null3: + return "utf-32-be" + if sample[1:] == _null3: + return "utf-32-le" + # Did not detect a valid UTF-32 ascii-range character + return None + + +def prepend_scheme_if_needed(url, new_scheme): + """Given a URL that may or may not have a scheme, prepend the given scheme. + Does not replace a present scheme with the one provided as an argument. + + :rtype: str + """ + parsed = parse_url(url) + scheme, auth, host, port, path, query, fragment = parsed + + # A defect in urlparse determines that there isn't a netloc present in some + # urls. We previously assumed parsing was overly cautious, and swapped the + # netloc and path. Due to a lack of tests on the original defect, this is + # maintained with parse_url for backwards compatibility. + netloc = parsed.netloc + if not netloc: + netloc, path = path, netloc + + if auth: + # parse_url doesn't provide the netloc with auth + # so we'll add it ourselves. + netloc = "@".join([auth, netloc]) + if scheme is None: + scheme = new_scheme + if path is None: + path = "" + + return urlunparse((scheme, netloc, path, "", query, fragment)) + + +def get_auth_from_url(url): + """Given a url with authentication components, extract them into a tuple of + username,password. + + :rtype: (str,str) + """ + parsed = urlparse(url) + + try: + auth = (unquote(parsed.username), unquote(parsed.password)) + except (AttributeError, TypeError): + auth = ("", "") + + return auth + + +def check_header_validity(header): + """Verifies that header parts don't contain leading whitespace + reserved characters, or return characters. + + :param header: tuple, in the format (name, value). + """ + name, value = header + _validate_header_part(header, name, 0) + _validate_header_part(header, value, 1) + + +def _validate_header_part(header, header_part, header_validator_index): + if isinstance(header_part, str): + validator = _HEADER_VALIDATORS_STR[header_validator_index] + elif isinstance(header_part, bytes): + validator = _HEADER_VALIDATORS_BYTE[header_validator_index] + else: + raise InvalidHeader( + f"Header part ({header_part!r}) from {header} " + f"must be of type str or bytes, not {type(header_part)}" + ) + + if not validator.match(header_part): + header_kind = "name" if header_validator_index == 0 else "value" + raise InvalidHeader( + f"Invalid leading whitespace, reserved character(s), or return " + f"character(s) in header {header_kind}: {header_part!r}" + ) + + +def urldefragauth(url): + """ + Given a url remove the fragment and the authentication part. + + :rtype: str + """ + scheme, netloc, path, params, query, fragment = urlparse(url) + + # see func:`prepend_scheme_if_needed` + if not netloc: + netloc, path = path, netloc + + netloc = netloc.rsplit("@", 1)[-1] + + return urlunparse((scheme, netloc, path, params, query, "")) + + +def rewind_body(prepared_request): + """Move file pointer back to its recorded starting position + so it can be read again on redirect. + """ + body_seek = getattr(prepared_request.body, "seek", None) + if body_seek is not None and isinstance( + prepared_request._body_position, integer_types + ): + try: + body_seek(prepared_request._body_position) + except OSError: + raise UnrewindableBodyError( + "An error occurred when rewinding request body for redirect." + ) + else: + raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/LICENSE b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4415505566f261c802b671426be529a31f914137 --- /dev/null +++ b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2020 Will McGugan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/METADATA b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..0b042b0a37699eec97d3e63cddeadc8fc9e6763f --- /dev/null +++ b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/METADATA @@ -0,0 +1,473 @@ +Metadata-Version: 2.3 +Name: rich +Version: 14.2.0 +Summary: Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal +License: MIT +Author: Will McGugan +Author-email: willmcgugan@gmail.com +Requires-Python: >=3.8.0 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Framework :: IPython +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Typing :: Typed +Provides-Extra: jupyter +Requires-Dist: ipywidgets (>=7.5.1,<9) ; extra == "jupyter" +Requires-Dist: markdown-it-py (>=2.2.0) +Requires-Dist: pygments (>=2.13.0,<3.0.0) +Project-URL: Documentation, https://rich.readthedocs.io/en/latest/ +Project-URL: Homepage, https://github.com/Textualize/rich +Description-Content-Type: text/markdown + +[![Supported Python Versions](https://img.shields.io/pypi/pyversions/rich)](https://pypi.org/project/rich/) [![PyPI version](https://badge.fury.io/py/rich.svg)](https://badge.fury.io/py/rich) + +[![Downloads](https://pepy.tech/badge/rich/month)](https://pepy.tech/project/rich) +[![codecov](https://img.shields.io/codecov/c/github/Textualize/rich?label=codecov&logo=codecov)](https://codecov.io/gh/Textualize/rich) +[![Rich blog](https://img.shields.io/badge/blog-rich%20news-yellowgreen)](https://www.willmcgugan.com/tag/rich/) +[![Twitter Follow](https://img.shields.io/twitter/follow/willmcgugan.svg?style=social)](https://twitter.com/willmcgugan) + +![Logo](https://github.com/textualize/rich/raw/master/imgs/logo.svg) + +[English readme](https://github.com/textualize/rich/blob/master/README.md) + • [简体中文 readme](https://github.com/textualize/rich/blob/master/README.cn.md) + • [正體中文 readme](https://github.com/textualize/rich/blob/master/README.zh-tw.md) + • [Lengua española readme](https://github.com/textualize/rich/blob/master/README.es.md) + • [Deutsche readme](https://github.com/textualize/rich/blob/master/README.de.md) + • [Läs på svenska](https://github.com/textualize/rich/blob/master/README.sv.md) + • [日本語 readme](https://github.com/textualize/rich/blob/master/README.ja.md) + • [한국어 readme](https://github.com/textualize/rich/blob/master/README.kr.md) + • [Français readme](https://github.com/textualize/rich/blob/master/README.fr.md) + • [Schwizerdütsch readme](https://github.com/textualize/rich/blob/master/README.de-ch.md) + • [हिन्दी readme](https://github.com/textualize/rich/blob/master/README.hi.md) + • [Português brasileiro readme](https://github.com/textualize/rich/blob/master/README.pt-br.md) + • [Italian readme](https://github.com/textualize/rich/blob/master/README.it.md) + • [Русский readme](https://github.com/textualize/rich/blob/master/README.ru.md) + • [Indonesian readme](https://github.com/textualize/rich/blob/master/README.id.md) + • [فارسی readme](https://github.com/textualize/rich/blob/master/README.fa.md) + • [Türkçe readme](https://github.com/textualize/rich/blob/master/README.tr.md) + • [Polskie readme](https://github.com/textualize/rich/blob/master/README.pl.md) + + +Rich is a Python library for _rich_ text and beautiful formatting in the terminal. + +The [Rich API](https://rich.readthedocs.io/en/latest/) makes it easy to add color and style to terminal output. Rich can also render pretty tables, progress bars, markdown, syntax highlighted source code, tracebacks, and more — out of the box. + +![Features](https://github.com/textualize/rich/raw/master/imgs/features.png) + +For a video introduction to Rich see [calmcode.io](https://calmcode.io/rich/introduction.html) by [@fishnets88](https://twitter.com/fishnets88). + +See what [people are saying about Rich](https://www.willmcgugan.com/blog/pages/post/rich-tweets/). + +## Compatibility + +Rich works with Linux, macOS and Windows. True color / emoji works with new Windows Terminal, classic terminal is limited to 16 colors. Rich requires Python 3.8 or later. + +Rich works with [Jupyter notebooks](https://jupyter.org/) with no additional configuration required. + +## Installing + +Install with `pip` or your favorite PyPI package manager. + +```sh +python -m pip install rich +``` + +Run the following to test Rich output on your terminal: + +```sh +python -m rich +``` + +## Rich Print + +To effortlessly add rich output to your application, you can import the [rich print](https://rich.readthedocs.io/en/latest/introduction.html#quick-start) method, which has the same signature as the builtin Python function. Try this: + +```python +from rich import print + +print("Hello, [bold magenta]World[/bold magenta]!", ":vampire:", locals()) +``` + +![Hello World](https://github.com/textualize/rich/raw/master/imgs/print.png) + +## Rich REPL + +Rich can be installed in the Python REPL, so that any data structures will be pretty printed and highlighted. + +```python +>>> from rich import pretty +>>> pretty.install() +``` + +![REPL](https://github.com/textualize/rich/raw/master/imgs/repl.png) + +## Using the Console + +For more control over rich terminal content, import and construct a [Console](https://rich.readthedocs.io/en/latest/reference/console.html#rich.console.Console) object. + +```python +from rich.console import Console + +console = Console() +``` + +The Console object has a `print` method which has an intentionally similar interface to the builtin `print` function. Here's an example of use: + +```python +console.print("Hello", "World!") +``` + +As you might expect, this will print `"Hello World!"` to the terminal. Note that unlike the builtin `print` function, Rich will word-wrap your text to fit within the terminal width. + +There are a few ways of adding color and style to your output. You can set a style for the entire output by adding a `style` keyword argument. Here's an example: + +```python +console.print("Hello", "World!", style="bold red") +``` + +The output will be something like the following: + +![Hello World](https://github.com/textualize/rich/raw/master/imgs/hello_world.png) + +That's fine for styling a line of text at a time. For more finely grained styling, Rich renders a special markup which is similar in syntax to [bbcode](https://en.wikipedia.org/wiki/BBCode). Here's an example: + +```python +console.print("Where there is a [bold cyan]Will[/bold cyan] there [u]is[/u] a [i]way[/i].") +``` + +![Console Markup](https://github.com/textualize/rich/raw/master/imgs/where_there_is_a_will.png) + +You can use a Console object to generate sophisticated output with minimal effort. See the [Console API](https://rich.readthedocs.io/en/latest/console.html) docs for details. + +## Rich Inspect + +Rich has an [inspect](https://rich.readthedocs.io/en/latest/reference/init.html?highlight=inspect#rich.inspect) function which can produce a report on any Python object, such as class, instance, or builtin. + +```python +>>> my_list = ["foo", "bar"] +>>> from rich import inspect +>>> inspect(my_list, methods=True) +``` + +![Log](https://github.com/textualize/rich/raw/master/imgs/inspect.png) + +See the [inspect docs](https://rich.readthedocs.io/en/latest/reference/init.html#rich.inspect) for details. + +# Rich Library + +Rich contains a number of builtin _renderables_ you can use to create elegant output in your CLI and help you debug your code. + +Click the following headings for details: + +
    +Log + +The Console object has a `log()` method which has a similar interface to `print()`, but also renders a column for the current time and the file and line which made the call. By default Rich will do syntax highlighting for Python structures and for repr strings. If you log a collection (i.e. a dict or a list) Rich will pretty print it so that it fits in the available space. Here's an example of some of these features. + +```python +from rich.console import Console +console = Console() + +test_data = [ + {"jsonrpc": "2.0", "method": "sum", "params": [None, 1, 2, 4, False, True], "id": "1",}, + {"jsonrpc": "2.0", "method": "notify_hello", "params": [7]}, + {"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": "2"}, +] + +def test_log(): + enabled = False + context = { + "foo": "bar", + } + movies = ["Deadpool", "Rise of the Skywalker"] + console.log("Hello from", console, "!") + console.log(test_data, log_locals=True) + + +test_log() +``` + +The above produces the following output: + +![Log](https://github.com/textualize/rich/raw/master/imgs/log.png) + +Note the `log_locals` argument, which outputs a table containing the local variables where the log method was called. + +The log method could be used for logging to the terminal for long running applications such as servers, but is also a very nice debugging aid. + +
    +
    +Logging Handler + +You can also use the builtin [Handler class](https://rich.readthedocs.io/en/latest/logging.html) to format and colorize output from Python's logging module. Here's an example of the output: + +![Logging](https://github.com/textualize/rich/raw/master/imgs/logging.png) + +
    + +
    +Emoji + +To insert an emoji in to console output place the name between two colons. Here's an example: + +```python +>>> console.print(":smiley: :vampire: :pile_of_poo: :thumbs_up: :raccoon:") +😃 🧛 💩 👍 🦝 +``` + +Please use this feature wisely. + +
    + +
    +Tables + +Rich can render flexible [tables](https://rich.readthedocs.io/en/latest/tables.html) with unicode box characters. There is a large variety of formatting options for borders, styles, cell alignment etc. + +![table movie](https://github.com/textualize/rich/raw/master/imgs/table_movie.gif) + +The animation above was generated with [table_movie.py](https://github.com/textualize/rich/blob/master/examples/table_movie.py) in the examples directory. + +Here's a simpler table example: + +```python +from rich.console import Console +from rich.table import Table + +console = Console() + +table = Table(show_header=True, header_style="bold magenta") +table.add_column("Date", style="dim", width=12) +table.add_column("Title") +table.add_column("Production Budget", justify="right") +table.add_column("Box Office", justify="right") +table.add_row( + "Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$275,000,000", "$375,126,118" +) +table.add_row( + "May 25, 2018", + "[red]Solo[/red]: A Star Wars Story", + "$275,000,000", + "$393,151,347", +) +table.add_row( + "Dec 15, 2017", + "Star Wars Ep. VIII: The Last Jedi", + "$262,000,000", + "[bold]$1,332,539,889[/bold]", +) + +console.print(table) +``` + +This produces the following output: + +![table](https://github.com/textualize/rich/raw/master/imgs/table.png) + +Note that console markup is rendered in the same way as `print()` and `log()`. In fact, anything that is renderable by Rich may be included in the headers / rows (even other tables). + +The `Table` class is smart enough to resize columns to fit the available width of the terminal, wrapping text as required. Here's the same example, with the terminal made smaller than the table above: + +![table2](https://github.com/textualize/rich/raw/master/imgs/table2.png) + +
    + +
    +Progress Bars + +Rich can render multiple flicker-free [progress](https://rich.readthedocs.io/en/latest/progress.html) bars to track long-running tasks. + +For basic usage, wrap any sequence in the `track` function and iterate over the result. Here's an example: + +```python +from rich.progress import track + +for step in track(range(100)): + do_step(step) +``` + +It's not much harder to add multiple progress bars. Here's an example taken from the docs: + +![progress](https://github.com/textualize/rich/raw/master/imgs/progress.gif) + +The columns may be configured to show any details you want. Built-in columns include percentage complete, file size, file speed, and time remaining. Here's another example showing a download in progress: + +![progress](https://github.com/textualize/rich/raw/master/imgs/downloader.gif) + +To try this out yourself, see [examples/downloader.py](https://github.com/textualize/rich/blob/master/examples/downloader.py) which can download multiple URLs simultaneously while displaying progress. + +
    + +
    +Status + +For situations where it is hard to calculate progress, you can use the [status](https://rich.readthedocs.io/en/latest/reference/console.html#rich.console.Console.status) method which will display a 'spinner' animation and message. The animation won't prevent you from using the console as normal. Here's an example: + +```python +from time import sleep +from rich.console import Console + +console = Console() +tasks = [f"task {n}" for n in range(1, 11)] + +with console.status("[bold green]Working on tasks...") as status: + while tasks: + task = tasks.pop(0) + sleep(1) + console.log(f"{task} complete") +``` + +This generates the following output in the terminal. + +![status](https://github.com/textualize/rich/raw/master/imgs/status.gif) + +The spinner animations were borrowed from [cli-spinners](https://www.npmjs.com/package/cli-spinners). You can select a spinner by specifying the `spinner` parameter. Run the following command to see the available values: + +``` +python -m rich.spinner +``` + +The above command generates the following output in the terminal: + +![spinners](https://github.com/textualize/rich/raw/master/imgs/spinners.gif) + +
    + +
    +Tree + +Rich can render a [tree](https://rich.readthedocs.io/en/latest/tree.html) with guide lines. A tree is ideal for displaying a file structure, or any other hierarchical data. + +The labels of the tree can be simple text or anything else Rich can render. Run the following for a demonstration: + +``` +python -m rich.tree +``` + +This generates the following output: + +![markdown](https://github.com/textualize/rich/raw/master/imgs/tree.png) + +See the [tree.py](https://github.com/textualize/rich/blob/master/examples/tree.py) example for a script that displays a tree view of any directory, similar to the linux `tree` command. + +
    + +
    +Columns + +Rich can render content in neat [columns](https://rich.readthedocs.io/en/latest/columns.html) with equal or optimal width. Here's a very basic clone of the (MacOS / Linux) `ls` command which displays a directory listing in columns: + +```python +import os +import sys + +from rich import print +from rich.columns import Columns + +directory = os.listdir(sys.argv[1]) +print(Columns(directory)) +``` + +The following screenshot is the output from the [columns example](https://github.com/textualize/rich/blob/master/examples/columns.py) which displays data pulled from an API in columns: + +![columns](https://github.com/textualize/rich/raw/master/imgs/columns.png) + +
    + +
    +Markdown + +Rich can render [markdown](https://rich.readthedocs.io/en/latest/markdown.html) and does a reasonable job of translating the formatting to the terminal. + +To render markdown import the `Markdown` class and construct it with a string containing markdown code. Then print it to the console. Here's an example: + +```python +from rich.console import Console +from rich.markdown import Markdown + +console = Console() +with open("README.md") as readme: + markdown = Markdown(readme.read()) +console.print(markdown) +``` + +This will produce output something like the following: + +![markdown](https://github.com/textualize/rich/raw/master/imgs/markdown.png) + +
    + +
    +Syntax Highlighting + +Rich uses the [pygments](https://pygments.org/) library to implement [syntax highlighting](https://rich.readthedocs.io/en/latest/syntax.html). Usage is similar to rendering markdown; construct a `Syntax` object and print it to the console. Here's an example: + +```python +from rich.console import Console +from rich.syntax import Syntax + +my_code = ''' +def iter_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]: + """Iterate and generate a tuple with a flag for first and last value.""" + iter_values = iter(values) + try: + previous_value = next(iter_values) + except StopIteration: + return + first = True + for value in iter_values: + yield first, False, previous_value + first = False + previous_value = value + yield first, True, previous_value +''' +syntax = Syntax(my_code, "python", theme="monokai", line_numbers=True) +console = Console() +console.print(syntax) +``` + +This will produce the following output: + +![syntax](https://github.com/textualize/rich/raw/master/imgs/syntax.png) + +
    + +
    +Tracebacks + +Rich can render [beautiful tracebacks](https://rich.readthedocs.io/en/latest/traceback.html) which are easier to read and show more code than standard Python tracebacks. You can set Rich as the default traceback handler so all uncaught exceptions will be rendered by Rich. + +Here's what it looks like on OSX (similar on Linux): + +![traceback](https://github.com/textualize/rich/raw/master/imgs/traceback.png) + +
    + +All Rich renderables make use of the [Console Protocol](https://rich.readthedocs.io/en/latest/protocol.html), which you can also use to implement your own Rich content. + +# Rich CLI + + +See also [Rich CLI](https://github.com/textualize/rich-cli) for a command line application powered by Rich. Syntax highlight code, render markdown, display CSVs in tables, and more, directly from the command prompt. + + +![Rich CLI](https://raw.githubusercontent.com/Textualize/rich-cli/main/imgs/rich-cli-splash.jpg) + +# Textual + +See also Rich's sister project, [Textual](https://github.com/Textualize/textual), which you can use to build sophisticated User Interfaces in the terminal. + +![textual-splash](https://github.com/user-attachments/assets/4caeb77e-48c0-4cf7-b14d-c53ded855ffd) + diff --git a/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/RECORD b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..c7744108da9a119ee98262aece0223784692d27d --- /dev/null +++ b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/RECORD @@ -0,0 +1,85 @@ +rich-14.2.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +rich-14.2.0.dist-info/LICENSE,sha256=3u18F6QxgVgZCj6iOcyHmlpQJxzruYrnAl9I--WNyhU,1056 +rich-14.2.0.dist-info/METADATA,sha256=Ii_jWsSNSmxZxJpZykcbufcRG8-I8ziXGQusB0_Vw18,18257 +rich-14.2.0.dist-info/RECORD,, +rich-14.2.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +rich-14.2.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88 +rich/__init__.py,sha256=lh2WcoIOJp5M5_lbAsSUMGv8oiJeumROazHH_AYMS8I,6066 +rich/__main__.py,sha256=YoXaPBcb-LeQMDj9jhZejCSY0DK4gP57uOlngbPxf4k,7752 +rich/_cell_widths.py,sha256=fbmeyetEdHjzE_Vx2l1uK7tnPOhMs2X1lJfO3vsKDpA,10209 +rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235 +rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064 +rich/_export_format.py,sha256=RI08pSrm5tBSzPMvnbTqbD9WIalaOoN5d4M1RTmLq1Y,2128 +rich/_extension.py,sha256=G66PkbH_QdTJh6jD-J228O76CmAnr2hLQv72CgPPuzE,241 +rich/_fileno.py,sha256=HWZxP5C2ajMbHryvAQZseflVfQoGzsKOHzKGsLD8ynQ,799 +rich/_inspect.py,sha256=ROT0PLC2GMWialWZkqJIjmYq7INRijQQkoSokWTaAiI,9656 +rich/_log_render.py,sha256=xBKCxqiO4FZk8eG56f8crFdrmJxFrJsQE3V3F-fFekc,3213 +rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236 +rich/_null_file.py,sha256=ADGKp1yt-k70FMKV6tnqCqecB-rSJzp-WQsD7LPL-kg,1394 +rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063 +rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423 +rich/_ratio.py,sha256=IOtl78sQCYZsmHyxhe45krkb68u9xVz7zFsXVJD-b2Y,5325 +rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919 +rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351 +rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417 +rich/_win32_console.py,sha256=o2QN_IRx10biGP3Ap1neaqX8FBGlUKSmWM6Kw4OSg-U,22719 +rich/_windows.py,sha256=is3WpbHMj8WaTHYB11hc6lP2t4hlvt4TViTlHSmjsi0,1901 +rich/_windows_renderer.py,sha256=d799xOnxLbCCCzGu9-U7YLmIQkxtxQIBFQQ6iu4veSc,2759 +rich/_wrap.py,sha256=FlSsom5EX0LVkA3KWy34yHnCfLtqX-ZIepXKh-70rpc,3404 +rich/abc.py,sha256=dALMOGfKVNeAbvqq66IpTQxQUerxD7AE4FKwqd0eQKk,878 +rich/align.py,sha256=ADa5ty1Eh_Yf68Iay3FgKyjUXgjrc4TyqBDww9FeAAs,10288 +rich/ansi.py,sha256=Avs1LHbSdcyOvDOdpELZUoULcBiYewY76eNBp6uFBhs,6921 +rich/bar.py,sha256=ldbVHOzKJOnflVNuv1xS7g6dLX2E3wMnXkdPbpzJTcs,3263 +rich/box.py,sha256=SSolg8_pzHzY9QvJQo-qp0tbPsnj8O_2W4hmi1l-Zo0,10650 +rich/cells.py,sha256=KrQkj5-LghCCpJLSNQIyAZjndc4bnEqOEmi5YuZ9UCY,5130 +rich/color.py,sha256=3HSULVDj7qQkXUdFWv78JOiSZzfy5y1nkcYhna296V0,18211 +rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054 +rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131 +rich/console.py,sha256=rgyfKfmSnJHiGxVnv-wyGGIHPoJFgbOoiYPeyJXUclU,100789 +rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288 +rich/containers.py,sha256=c_56TxcedGYqDepHBMTuZdUIijitAQgnox-Qde0Z1qo,5502 +rich/control.py,sha256=HnsraFTzBaUQDzKJWXsfPv-PPmgGypSgSv7oANackqs,6475 +rich/default_styles.py,sha256=j9eZgSn7bqnymxYzYp8h-0OGTRy2ZOj-PfY9toqp0Rw,8221 +rich/diagnose.py,sha256=1RWnQoppPXjC_49AB4vtV048DK3ksQSq671C83Y6f-g,977 +rich/emoji.py,sha256=_bTf1Y3JqiMk6Nfn4V_YOhq1wAPAHNODhGLJj95R3uI,2343 +rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642 +rich/file_proxy.py,sha256=Tl9THMDZ-Pk5Wm8sI1gGg_U5DhusmxD-FZ0fUbcU0W0,1683 +rich/filesize.py,sha256=_iz9lIpRgvW7MNSeCZnLg-HwzbP4GETg543WqD8SFs0,2484 +rich/highlighter.py,sha256=G_sn-8DKjM1sEjLG_oc4ovkWmiUpWvj8bXi0yed2LnY,9586 +rich/json.py,sha256=omC2WHTgURxEosna1ftoSJCne2EX7MDuQtCdswS3qsk,5019 +rich/jupyter.py,sha256=G9pOJmR4ESIFYSd4MKGqmHqCtstx0oRWpyeTgv54-Xc,3228 +rich/layout.py,sha256=WR8PCSroYnteIT3zawxQ3k3ad1sQO5wGG1SZOoeBuBM,13944 +rich/live.py,sha256=tF3ukAAJZ_N2ZbGclqZ-iwLoIoZ8f0HHUz79jAyJqj8,15180 +rich/live_render.py,sha256=It_39YdzrBm8o3LL0kaGorPFg-BfZWAcrBjLjFokbx4,3521 +rich/logging.py,sha256=UL6TZNlaptYKHNhQ45LREy-29Pl-tQsBh7q3HSnWIAA,12456 +rich/markdown.py,sha256=R6X_1TMxUy3j3p0fkbmP3AYj8vt9Q72jr4Rz6tdtSU8,25846 +rich/markup.py,sha256=btpr271BLhiCR1jNglRnv2BpIzVcNefYwSMeW9teDbc,8427 +rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305 +rich/padding.py,sha256=h8XnIivLrNtlxI3vQPKHXh4hAwjOJqZx0slM0z3g1_M,4896 +rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828 +rich/palette.py,sha256=Ar6ZUrYHiFt6-Rr2k-k9F8V7hxgJYHNdqjk2vVXsLgc,3288 +rich/panel.py,sha256=9sQl00hPIqH5G2gALQo4NepFwpP0k9wT-s_gOms5pIc,11157 +rich/pretty.py,sha256=eQs437AksYaCB2qO_d-z6e0DF_t5F1KfXfa1Hi-Ya0E,36355 +rich/progress.py,sha256=CUc2lkU-X59mVdGfjMCBkZeiGPL3uxdONjhNJF2T7wY,60408 +rich/progress_bar.py,sha256=mZTPpJUwcfcdgQCTTz3kyY-fc79ddLwtx6Ghhxfo064,8162 +rich/prompt.py,sha256=k0CUIW-3I55jGk8U3O1WiEhdF6yXa2EiWeRqRhuJXWA,12435 +rich/protocol.py,sha256=Wt-2HZd67OYiopUkCTOz7lM38vyo5r3HEQZ9TOPDl5Q,1367 +rich/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166 +rich/repr.py,sha256=HIsurPLZK9Gray75l3_vQx7S27AzTpAj4ChXSfe1Fes,4419 +rich/rule.py,sha256=umO21Wjw0FcYAeTB3UumNLCsDWhejzxnjlf2VwiXiDI,4590 +rich/scope.py,sha256=lf6Qet_e4JOY34lwhYSAG-NBXYKBcYu6t_igv_JoGog,2831 +rich/screen.py,sha256=rL_j2wX-4SeuIOI2oOlc418QP9EAvD59GInUmEAE6jQ,1579 +rich/segment.py,sha256=7gOdwSPrzu0a2gRmxBDtu3u2S8iG5s9l7wlB58dKMy0,24707 +rich/spinner.py,sha256=onIhpKlljRHppTZasxO8kXgtYyCHUkpSgKglRJ3o51g,4214 +rich/status.py,sha256=kkPph3YeAZBo-X-4wPp8gTqZyU466NLwZBA4PZTTewo,4424 +rich/style.py,sha256=W9Ccy8Py8lNICtlfcp-ryzMTuQaGxAU3av7-g5fHu0s,26990 +rich/styled.py,sha256=wljVsVTXbABMMZvkzkO43ZEk_-irzEtvUiQ-sNnikQ8,1234 +rich/syntax.py,sha256=5ZBNxjIj3C1FC92vLwBVN-C5YAdKjPHfH6SqCzFaOYE,36263 +rich/table.py,sha256=52hmoLoHpeJEomznWvW8Ce2m1w62HuQDSGmaG6fYyqI,40025 +rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370 +rich/text.py,sha256=v-vCOG8gS_D5QDhOhU19478-yEJGAXKVi8iYCCk7O_M,47540 +rich/theme.py,sha256=oNyhXhGagtDlbDye3tVu3esWOWk0vNkuxFw-_unlaK0,3771 +rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102 +rich/traceback.py,sha256=MtNMwDaDOH35HRbeB_Kx2ReMjfPfRC8IfRUZPMuKFPE,35789 +rich/tree.py,sha256=QoOwg424FkdwGfR8K0tZ6Q7qtzWNAUP_m4sFaYuG6nw,9391 diff --git a/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/WHEEL b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..9ed4d8facdffc976b12b8144fcda70b971ffea8f --- /dev/null +++ b/py311/lib/python3.11/site-packages/rich-14.2.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: poetry-core 2.1.3 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/py311/lib/python3.11/site-packages/sklearn/_config.py b/py311/lib/python3.11/site-packages/sklearn/_config.py new file mode 100644 index 0000000000000000000000000000000000000000..217386c81c80e97a99d2fb60ec4ac29c9fab9a61 --- /dev/null +++ b/py311/lib/python3.11/site-packages/sklearn/_config.py @@ -0,0 +1,407 @@ +"""Global configuration state and functions for management""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import os +import threading +from contextlib import contextmanager as contextmanager + +_global_config = { + "assume_finite": bool(os.environ.get("SKLEARN_ASSUME_FINITE", False)), + "working_memory": int(os.environ.get("SKLEARN_WORKING_MEMORY", 1024)), + "print_changed_only": True, + "display": "diagram", + "pairwise_dist_chunk_size": int( + os.environ.get("SKLEARN_PAIRWISE_DIST_CHUNK_SIZE", 256) + ), + "enable_cython_pairwise_dist": True, + "array_api_dispatch": False, + "transform_output": "default", + "enable_metadata_routing": False, + "skip_parameter_validation": False, +} +_threadlocal = threading.local() + + +def _get_threadlocal_config(): + """Get a threadlocal **mutable** configuration. If the configuration + does not exist, copy the default global configuration.""" + if not hasattr(_threadlocal, "global_config"): + _threadlocal.global_config = _global_config.copy() + return _threadlocal.global_config + + +def get_config(): + """Retrieve the current scikit-learn configuration. + + This reflects the effective global configurations as established by default upon + library import, or modified via :func:`set_config` or :func:`config_context`. + + Returns + ------- + config : dict + Keys are parameter names that can be passed to :func:`set_config`. + + See Also + -------- + config_context : Context manager for global scikit-learn configuration. + set_config : Set global scikit-learn configuration. + + Examples + -------- + >>> import sklearn + >>> config = sklearn.get_config() + >>> config.keys() + dict_keys([...]) + """ + # Return a copy of the threadlocal configuration so that users will + # not be able to modify the configuration with the returned dict. + return _get_threadlocal_config().copy() + + +def set_config( + assume_finite=None, + working_memory=None, + print_changed_only=None, + display=None, + pairwise_dist_chunk_size=None, + enable_cython_pairwise_dist=None, + array_api_dispatch=None, + transform_output=None, + enable_metadata_routing=None, + skip_parameter_validation=None, +): + """Set global scikit-learn configuration. + + These settings control the behaviour of scikit-learn functions during a library + usage session. Global configuration defaults (as described in the parameter list + below) take effect when scikit-learn is imported. + + This function can be used to modify the global scikit-learn configuration at + runtime. Passing `None` as an argument (the default) leaves the corresponding + setting unchanged. This allows users to selectively update the global configuration + values without affecting the others. + + .. versionadded:: 0.19 + + Parameters + ---------- + assume_finite : bool, default=None + If True, validation for finiteness will be skipped, + saving time, but leading to potential crashes. If + False, validation for finiteness will be performed, + avoiding error. Global default: False. + + .. versionadded:: 0.19 + + working_memory : int, default=None + If set, scikit-learn will attempt to limit the size of temporary arrays + to this number of MiB (per job when parallelised), often saving both + computation time and memory on expensive operations that can be + performed in chunks. Global default: 1024. + + .. versionadded:: 0.20 + + print_changed_only : bool, default=None + If True, only the parameters that were set to non-default + values will be printed when printing an estimator. For example, + ``print(SVC())`` while True will only print 'SVC()' while the default + behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with + all the non-changed parameters. Global default: True. + + .. versionadded:: 0.21 + .. versionchanged:: 0.23 + Global default configuration changed from False to True. + + display : {'text', 'diagram'}, default=None + If 'diagram', estimators will be displayed as a diagram in a Jupyter + lab or notebook context. If 'text', estimators will be displayed as + text. Global default: 'diagram'. + + .. versionadded:: 0.23 + + pairwise_dist_chunk_size : int, default=None + The number of row vectors per chunk for the accelerated pairwise- + distances reduction backend. Global default: 256 (suitable for most of + modern laptops' caches and architectures). + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + enable_cython_pairwise_dist : bool, default=None + Use the accelerated pairwise-distances reduction backend when + possible. Global default: True. + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + array_api_dispatch : bool, default=None + Use Array API dispatching when inputs follow the Array API standard. + Global default: False. + + See the :ref:`User Guide ` for more details. + + .. versionadded:: 1.2 + + transform_output : str, default=None + Configure output of `transform` and `fit_transform`. + + See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` + for an example on how to use the API. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + Global default: "default". + + .. versionadded:: 1.2 + .. versionadded:: 1.4 + `"polars"` option was added. + + enable_metadata_routing : bool, default=None + Enable metadata routing. By default this feature is disabled. + + Refer to :ref:`metadata routing user guide ` for more + details. + + - `True`: Metadata routing is enabled + - `False`: Metadata routing is disabled, use the old syntax. + - `None`: Configuration is unchanged + + Global default: False. + + .. versionadded:: 1.3 + + skip_parameter_validation : bool, default=None + If `True`, disable the validation of the hyper-parameters' types and values in + the fit method of estimators and for arguments passed to public helper + functions. It can save time in some situations but can lead to low level + crashes and exceptions with confusing error messages. + Global default: False. + + Note that for data parameters, such as `X` and `y`, only type validation is + skipped but validation with `check_array` will continue to run. + + .. versionadded:: 1.3 + + See Also + -------- + config_context : Context manager for global scikit-learn configuration. + get_config : Retrieve current values of the global configuration. + + Examples + -------- + >>> from sklearn import set_config + >>> set_config(display='diagram') # doctest: +SKIP + """ + local_config = _get_threadlocal_config() + + if assume_finite is not None: + local_config["assume_finite"] = assume_finite + if working_memory is not None: + local_config["working_memory"] = working_memory + if print_changed_only is not None: + local_config["print_changed_only"] = print_changed_only + if display is not None: + local_config["display"] = display + if pairwise_dist_chunk_size is not None: + local_config["pairwise_dist_chunk_size"] = pairwise_dist_chunk_size + if enable_cython_pairwise_dist is not None: + local_config["enable_cython_pairwise_dist"] = enable_cython_pairwise_dist + if array_api_dispatch is not None: + from sklearn.utils._array_api import _check_array_api_dispatch + + _check_array_api_dispatch(array_api_dispatch) + local_config["array_api_dispatch"] = array_api_dispatch + if transform_output is not None: + local_config["transform_output"] = transform_output + if enable_metadata_routing is not None: + local_config["enable_metadata_routing"] = enable_metadata_routing + if skip_parameter_validation is not None: + local_config["skip_parameter_validation"] = skip_parameter_validation + + +@contextmanager +def config_context( + *, + assume_finite=None, + working_memory=None, + print_changed_only=None, + display=None, + pairwise_dist_chunk_size=None, + enable_cython_pairwise_dist=None, + array_api_dispatch=None, + transform_output=None, + enable_metadata_routing=None, + skip_parameter_validation=None, +): + """Context manager to temporarily change the global scikit-learn configuration. + + This context manager can be used to apply scikit-learn configuration changes within + the scope of the with statement. Once the context exits, the global configuration is + restored again. + + The default global configurations (which take effect when scikit-learn is imported) + are defined below in the parameter list. + + Parameters + ---------- + assume_finite : bool, default=None + If True, validation for finiteness will be skipped, + saving time, but leading to potential crashes. If + False, validation for finiteness will be performed, + avoiding error. If None, the existing configuration won't change. + Global default: False. + + working_memory : int, default=None + If set, scikit-learn will attempt to limit the size of temporary arrays + to this number of MiB (per job when parallelised), often saving both + computation time and memory on expensive operations that can be + performed in chunks. If None, the existing configuration won't change. + Global default: 1024. + + print_changed_only : bool, default=None + If True, only the parameters that were set to non-default + values will be printed when printing an estimator. For example, + ``print(SVC())`` while True will only print 'SVC()', but would print + 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters + when False. If None, the existing configuration won't change. + Global default: True. + + .. versionchanged:: 0.23 + Global default configuration changed from False to True. + + display : {'text', 'diagram'}, default=None + If 'diagram', estimators will be displayed as a diagram in a Jupyter + lab or notebook context. If 'text', estimators will be displayed as + text. If None, the existing configuration won't change. + Global default: 'diagram'. + + .. versionadded:: 0.23 + + pairwise_dist_chunk_size : int, default=None + The number of row vectors per chunk for the accelerated pairwise- + distances reduction backend. Global default: 256 (suitable for most of + modern laptops' caches and architectures). + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + enable_cython_pairwise_dist : bool, default=None + Use the accelerated pairwise-distances reduction backend when + possible. Global default: True. + + Intended for easier benchmarking and testing of scikit-learn internals. + End users are not expected to benefit from customizing this configuration + setting. + + .. versionadded:: 1.1 + + array_api_dispatch : bool, default=None + Use Array API dispatching when inputs follow the Array API standard. + Global default: False. + + See the :ref:`User Guide ` for more details. + + .. versionadded:: 1.2 + + transform_output : str, default=None + Configure output of `transform` and `fit_transform`. + + See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` + for an example on how to use the API. + + - `"default"`: Default output format of a transformer + - `"pandas"`: DataFrame output + - `"polars"`: Polars output + - `None`: Transform configuration is unchanged + + Global default: "default". + + .. versionadded:: 1.2 + .. versionadded:: 1.4 + `"polars"` option was added. + + enable_metadata_routing : bool, default=None + Enable metadata routing. By default this feature is disabled. + + Refer to :ref:`metadata routing user guide ` for more + details. + + - `True`: Metadata routing is enabled + - `False`: Metadata routing is disabled, use the old syntax. + - `None`: Configuration is unchanged + + Global default: False. + + .. versionadded:: 1.3 + + skip_parameter_validation : bool, default=None + If `True`, disable the validation of the hyper-parameters' types and values in + the fit method of estimators and for arguments passed to public helper + functions. It can save time in some situations but can lead to low level + crashes and exceptions with confusing error messages. + Global default: False. + + Note that for data parameters, such as `X` and `y`, only type validation is + skipped but validation with `check_array` will continue to run. + + .. versionadded:: 1.3 + + Yields + ------ + None. + + See Also + -------- + set_config : Set global scikit-learn configuration. + get_config : Retrieve current values of the global configuration. + + Notes + ----- + All settings, not just those presently modified, will be returned to + their previous values when the context manager is exited. + + Examples + -------- + >>> import sklearn + >>> from sklearn.utils.validation import assert_all_finite + >>> with sklearn.config_context(assume_finite=True): + ... assert_all_finite([float('nan')]) + >>> with sklearn.config_context(assume_finite=True): + ... with sklearn.config_context(assume_finite=False): + ... assert_all_finite([float('nan')]) + Traceback (most recent call last): + ... + ValueError: Input contains NaN... + """ + old_config = get_config() + set_config( + assume_finite=assume_finite, + working_memory=working_memory, + print_changed_only=print_changed_only, + display=display, + pairwise_dist_chunk_size=pairwise_dist_chunk_size, + enable_cython_pairwise_dist=enable_cython_pairwise_dist, + array_api_dispatch=array_api_dispatch, + transform_output=transform_output, + enable_metadata_routing=enable_metadata_routing, + skip_parameter_validation=skip_parameter_validation, + ) + + try: + yield + finally: + set_config(**old_config) diff --git a/py311/lib/python3.11/site-packages/sklearn/_distributor_init.py b/py311/lib/python3.11/site-packages/sklearn/_distributor_init.py new file mode 100644 index 0000000000000000000000000000000000000000..d66d5d36955c1ca5debf5821e2a2f265ef1f98ed --- /dev/null +++ b/py311/lib/python3.11/site-packages/sklearn/_distributor_init.py @@ -0,0 +1,13 @@ +"""Distributor init file + +Distributors: you can add custom code here to support particular distributions +of scikit-learn. + +For example, this is a good place to put any checks for hardware requirements. + +The scikit-learn standard source distribution will not put code in this file, +so you can safely replace this file with your own version. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause diff --git a/py311/lib/python3.11/site-packages/sklearn/_min_dependencies.py b/py311/lib/python3.11/site-packages/sklearn/_min_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..82475f039e32b01970db73ccee733a7225f0975d --- /dev/null +++ b/py311/lib/python3.11/site-packages/sklearn/_min_dependencies.py @@ -0,0 +1,74 @@ +"""All minimum dependencies for scikit-learn.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import argparse +from collections import defaultdict + +# scipy and cython should by in sync with pyproject.toml +NUMPY_MIN_VERSION = "1.24.1" +SCIPY_MIN_VERSION = "1.10.0" +JOBLIB_MIN_VERSION = "1.3.0" +THREADPOOLCTL_MIN_VERSION = "3.2.0" +PYTEST_MIN_VERSION = "7.1.2" +CYTHON_MIN_VERSION = "3.1.2" + + +# 'build' and 'install' is included to have structured metadata for CI. +# It will NOT be included in setup's extras_require +# The values are (version_spec, comma separated tags) +dependent_packages = { + "numpy": (NUMPY_MIN_VERSION, "build, install"), + "scipy": (SCIPY_MIN_VERSION, "build, install"), + "joblib": (JOBLIB_MIN_VERSION, "install"), + "threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"), + "cython": (CYTHON_MIN_VERSION, "build"), + "meson-python": ("0.17.1", "build"), + "matplotlib": ("3.6.1", "benchmark, docs, examples, tests"), + "scikit-image": ("0.22.0", "docs, examples"), + "pandas": ("1.5.0", "benchmark, docs, examples, tests"), + "seaborn": ("0.13.0", "docs, examples"), + "memory_profiler": ("0.57.0", "benchmark, docs"), + "pytest": (PYTEST_MIN_VERSION, "tests"), + "pytest-cov": ("2.9.0", "tests"), + "ruff": ("0.11.7", "tests"), + "mypy": ("1.15", "tests"), + "pyamg": ("5.0.0", "tests"), + "polars": ("0.20.30", "docs, tests"), + "pyarrow": ("12.0.0", "tests"), + "sphinx": ("7.3.7", "docs"), + "sphinx-copybutton": ("0.5.2", "docs"), + "sphinx-gallery": ("0.17.1", "docs"), + "numpydoc": ("1.2.0", "docs, tests"), + "Pillow": ("10.1.0", "docs"), + "pooch": ("1.8.0", "docs, examples, tests"), + "sphinx-prompt": ("1.4.0", "docs"), + "sphinxext-opengraph": ("0.9.1", "docs"), + "plotly": ("5.18.0", "docs, examples"), + "sphinxcontrib-sass": ("0.3.4", "docs"), + "sphinx-remove-toctrees": ("1.0.0.post1", "docs"), + "sphinx-design": ("0.6.0", "docs"), + "pydata-sphinx-theme": ("0.15.3", "docs"), + "towncrier": ("24.8.0", "docs"), + # XXX: Pin conda-lock to the latest released version (needs manual update + # from time to time) + "conda-lock": ("3.0.1", "maintenance"), +} + + +# create inverse mapping for setuptools +tag_to_packages: dict = defaultdict(list) +for package, (min_version, extras) in dependent_packages.items(): + for extra in extras.split(", "): + tag_to_packages[extra].append("{}>={}".format(package, min_version)) + + +# Used by CI to get the min dependencies +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Get min dependencies for a package") + + parser.add_argument("package", choices=dependent_packages) + args = parser.parse_args() + min_version = dependent_packages[args.package][0] + print(min_version) diff --git a/py311/lib/python3.11/site-packages/sklearn/discriminant_analysis.py b/py311/lib/python3.11/site-packages/sklearn/discriminant_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..e6396462cef5d34e5aae6aac5c477d0544eab2b7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/sklearn/discriminant_analysis.py @@ -0,0 +1,1186 @@ +"""Linear and quadratic discriminant analysis.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from numbers import Integral, Real + +import numpy as np +import scipy.linalg +from scipy import linalg + +from sklearn.base import ( + BaseEstimator, + ClassifierMixin, + ClassNamePrefixFeaturesOutMixin, + TransformerMixin, + _fit_context, +) +from sklearn.covariance import empirical_covariance, ledoit_wolf, shrunk_covariance +from sklearn.linear_model._base import LinearClassifierMixin +from sklearn.preprocessing import StandardScaler +from sklearn.utils._array_api import _expit, device, get_namespace, size +from sklearn.utils._param_validation import HasMethods, Interval, StrOptions +from sklearn.utils.extmath import softmax +from sklearn.utils.multiclass import check_classification_targets, unique_labels +from sklearn.utils.validation import check_is_fitted, validate_data + +__all__ = ["LinearDiscriminantAnalysis", "QuadraticDiscriminantAnalysis"] + + +def _cov(X, shrinkage=None, covariance_estimator=None): + """Estimate covariance matrix (using optional covariance_estimator). + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + shrinkage : {'empirical', 'auto'} or float, default=None + Shrinkage parameter, possible values: + - None or 'empirical': no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + Shrinkage parameter is ignored if `covariance_estimator` + is not None. + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying on the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in :mod:`sklearn.covariance``. + If None the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Returns + ------- + s : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + """ + if covariance_estimator is None: + shrinkage = "empirical" if shrinkage is None else shrinkage + if isinstance(shrinkage, str): + if shrinkage == "auto": + sc = StandardScaler() # standardize features + X = sc.fit_transform(X) + s = ledoit_wolf(X)[0] + # rescale + s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] + elif shrinkage == "empirical": + s = empirical_covariance(X) + elif isinstance(shrinkage, Real): + s = shrunk_covariance(empirical_covariance(X), shrinkage) + else: + if shrinkage is not None and shrinkage != 0: + raise ValueError( + "covariance_estimator and shrinkage parameters " + "are not None. Only one of the two can be set." + ) + covariance_estimator.fit(X) + if not hasattr(covariance_estimator, "covariance_"): + raise ValueError( + "%s does not have a covariance_ attribute" + % covariance_estimator.__class__.__name__ + ) + s = covariance_estimator.covariance_ + return s + + +def _class_means(X, y): + """Compute class means. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + Returns + ------- + means : array-like of shape (n_classes, n_features) + Class means. + """ + xp, is_array_api_compliant = get_namespace(X) + classes, y = xp.unique_inverse(y) + means = xp.zeros((classes.shape[0], X.shape[1]), device=device(X), dtype=X.dtype) + + if is_array_api_compliant: + for i in range(classes.shape[0]): + means[i, :] = xp.mean(X[y == i], axis=0) + else: + # TODO: Explore the choice of using bincount + add.at as it seems sub optimal + # from a performance-wise + cnt = np.bincount(y) + np.add.at(means, y, X) + means /= cnt[:, None] + return means + + +def _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None): + """Compute weighted within-class covariance matrix. + + The per-class covariance are weighted by the class priors. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + priors : array-like of shape (n_classes,) + Class priors. + + shrinkage : 'auto' or float, default=None + Shrinkage parameter, possible values: + - None: no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + Shrinkage parameter is ignored if `covariance_estimator` is not None. + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in sklearn.covariance. + If None, the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Returns + ------- + cov : array-like of shape (n_features, n_features) + Weighted within-class covariance matrix + """ + classes = np.unique(y) + cov = np.zeros(shape=(X.shape[1], X.shape[1])) + for idx, group in enumerate(classes): + Xg = X[y == group, :] + cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage, covariance_estimator)) + return cov + + +class DiscriminantAnalysisPredictionMixin: + """Mixin class for QuadraticDiscriminantAnalysis and NearestCentroid.""" + + def decision_function(self, X): + """Apply decision function to an array of samples. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Array of samples (test vectors). + + Returns + ------- + y_scores : ndarray of shape (n_samples,) or (n_samples, n_classes) + Decision function values related to each class, per sample. + In the two-class case, the shape is `(n_samples,)`, giving the + log likelihood ratio of the positive class. + """ + y_scores = self._decision_function(X) + if len(self.classes_) == 2: + return y_scores[:, 1] - y_scores[:, 0] + return y_scores + + def predict(self, X): + """Perform classification on an array of vectors `X`. + + Returns the class label for each sample. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input vectors, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Class label for each sample. + """ + scores = self._decision_function(X) + return self.classes_.take(scores.argmax(axis=1)) + + def predict_proba(self, X): + """Estimate class probabilities. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + y_proba : ndarray of shape (n_samples, n_classes) + Probability estimate of the sample for each class in the + model, where classes are ordered as they are in `self.classes_`. + """ + return np.exp(self.predict_log_proba(X)) + + def predict_log_proba(self, X): + """Estimate log class probabilities. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + Returns + ------- + y_log_proba : ndarray of shape (n_samples, n_classes) + Estimated log probabilities. + """ + scores = self._decision_function(X) + log_likelihood = scores - scores.max(axis=1)[:, np.newaxis] + return log_likelihood - np.log( + np.exp(log_likelihood).sum(axis=1)[:, np.newaxis] + ) + + +class LinearDiscriminantAnalysis( + ClassNamePrefixFeaturesOutMixin, + LinearClassifierMixin, + TransformerMixin, + BaseEstimator, +): + """Linear Discriminant Analysis. + + A classifier with a linear decision boundary, generated by fitting class + conditional densities to the data and using Bayes' rule. + + The model fits a Gaussian density to each class, assuming that all classes + share the same covariance matrix. + + The fitted model can also be used to reduce the dimensionality of the input + by projecting it to the most discriminative directions, using the + `transform` method. + + .. versionadded:: 0.17 + + For a comparison between + :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis` + and :class:`~sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis`, see + :ref:`sphx_glr_auto_examples_classification_plot_lda_qda.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + solver : {'svd', 'lsqr', 'eigen'}, default='svd' + Solver to use, possible values: + - 'svd': Singular value decomposition (default). + Does not compute the covariance matrix, therefore this solver is + recommended for data with a large number of features. + - 'lsqr': Least squares solution. + Can be combined with shrinkage or custom covariance estimator. + - 'eigen': Eigenvalue decomposition. + Can be combined with shrinkage or custom covariance estimator. + + .. versionchanged:: 1.2 + `solver="svd"` now has experimental Array API support. See the + :ref:`Array API User Guide ` for more details. + + shrinkage : 'auto' or float, default=None + Shrinkage parameter, possible values: + - None: no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + This should be left to None if `covariance_estimator` is used. + Note that shrinkage works only with 'lsqr' and 'eigen' solvers. + + For a usage example, see + :ref:`sphx_glr_auto_examples_classification_plot_lda.py`. + + priors : array-like of shape (n_classes,), default=None + The class prior probabilities. By default, the class proportions are + inferred from the training data. + + n_components : int, default=None + Number of components (<= min(n_classes - 1, n_features)) for + dimensionality reduction. If None, will be set to + min(n_classes - 1, n_features). This parameter only affects the + `transform` method. + + For a usage example, see + :ref:`sphx_glr_auto_examples_decomposition_plot_pca_vs_lda.py`. + + store_covariance : bool, default=False + If True, explicitly compute the weighted within-class covariance + matrix when solver is 'svd'. The matrix is always computed + and stored for the other solvers. + + .. versionadded:: 0.17 + + tol : float, default=1.0e-4 + Absolute threshold for a singular value of X to be considered + significant, used to estimate the rank of X. Dimensions whose + singular values are non-significant are discarded. Only used if + solver is 'svd'. + + .. versionadded:: 0.17 + + covariance_estimator : covariance estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying on the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in :mod:`sklearn.covariance`. + if None the shrinkage parameter drives the estimate. + + This should be left to None if `shrinkage` is used. + Note that `covariance_estimator` works only with 'lsqr' and 'eigen' + solvers. + + .. versionadded:: 0.24 + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) or (n_classes, n_features) + Weight vector(s). + + intercept_ : ndarray of shape (n_classes,) + Intercept term. + + covariance_ : array-like of shape (n_features, n_features) + Weighted within-class covariance matrix. It corresponds to + `sum_k prior_k * C_k` where `C_k` is the covariance matrix of the + samples in class `k`. The `C_k` are estimated using the (potentially + shrunk) biased estimator of covariance. If solver is 'svd', only + exists when `store_covariance` is True. + + explained_variance_ratio_ : ndarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + If ``n_components`` is not set then all components are stored and the + sum of explained variances is equal to 1.0. Only available when eigen + or svd solver is used. + + means_ : array-like of shape (n_classes, n_features) + Class-wise means. + + priors_ : array-like of shape (n_classes,) + Class priors (sum to 1). + + scalings_ : array-like of shape (rank, n_classes - 1) + Scaling of the features in the space spanned by the class centroids. + Only available for 'svd' and 'eigen' solvers. + + xbar_ : array-like of shape (n_features,) + Overall mean. Only present if solver is 'svd'. + + classes_ : array-like of shape (n_classes,) + Unique class labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + QuadraticDiscriminantAnalysis : Quadratic Discriminant Analysis. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> y = np.array([1, 1, 1, 2, 2, 2]) + >>> clf = LinearDiscriminantAnalysis() + >>> clf.fit(X, y) + LinearDiscriminantAnalysis() + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + "solver": [StrOptions({"svd", "lsqr", "eigen"})], + "shrinkage": [StrOptions({"auto"}), Interval(Real, 0, 1, closed="both"), None], + "n_components": [Interval(Integral, 1, None, closed="left"), None], + "priors": ["array-like", None], + "store_covariance": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + "covariance_estimator": [HasMethods("fit"), None], + } + + def __init__( + self, + solver="svd", + shrinkage=None, + priors=None, + n_components=None, + store_covariance=False, + tol=1e-4, + covariance_estimator=None, + ): + self.solver = solver + self.shrinkage = shrinkage + self.priors = priors + self.n_components = n_components + self.store_covariance = store_covariance # used only in svd solver + self.tol = tol # used only in svd solver + self.covariance_estimator = covariance_estimator + + def _solve_lstsq(self, X, y, shrinkage, covariance_estimator): + """Least squares solver. + + The least squares solver computes a straightforward solution of the + optimal decision rule based directly on the discriminant functions. It + can only be used for classification (with any covariance estimator), + because + estimation of eigenvectors is not performed. Therefore, dimensionality + reduction with the transform is not supported. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_classes) + Target values. + + shrinkage : 'auto', float or None + Shrinkage parameter, possible values: + - None: no shrinkage. + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + Shrinkage parameter is ignored if `covariance_estimator` is + not None + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in sklearn.covariance. + if None the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Notes + ----- + This solver is based on [1]_, section 2.6.2, pp. 39-41. + + References + ---------- + .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification + (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN + 0-471-05669-3. + """ + self.means_ = _class_means(X, y) + self.covariance_ = _class_cov( + X, y, self.priors_, shrinkage, covariance_estimator + ) + self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T + self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log( + self.priors_ + ) + + def _solve_eigen(self, X, y, shrinkage, covariance_estimator): + """Eigenvalue solver. + + The eigenvalue solver computes the optimal solution of the Rayleigh + coefficient (basically the ratio of between class scatter to within + class scatter). This solver supports both classification and + dimensionality reduction (with any covariance estimator). + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + shrinkage : 'auto', float or None + Shrinkage parameter, possible values: + - None: no shrinkage. + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage constant. + + Shrinkage parameter is ignored if `covariance_estimator` is + not None + + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in sklearn.covariance. + if None the shrinkage parameter drives the estimate. + + .. versionadded:: 0.24 + + Notes + ----- + This solver is based on [1]_, section 3.8.3, pp. 121-124. + + References + ---------- + .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification + (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN + 0-471-05669-3. + """ + self.means_ = _class_means(X, y) + self.covariance_ = _class_cov( + X, y, self.priors_, shrinkage, covariance_estimator + ) + + Sw = self.covariance_ # within scatter + St = _cov(X, shrinkage, covariance_estimator) # total scatter + Sb = St - Sw # between scatter + + evals, evecs = linalg.eigh(Sb, Sw) + self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][ + : self._max_components + ] + evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors + + self.scalings_ = evecs + self.coef_ = np.dot(self.means_, evecs).dot(evecs.T) + self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log( + self.priors_ + ) + + def _solve_svd(self, X, y): + """SVD solver. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + """ + xp, is_array_api_compliant = get_namespace(X) + + if is_array_api_compliant: + svd = xp.linalg.svd + else: + svd = scipy.linalg.svd + + n_samples, _ = X.shape + n_classes = self.classes_.shape[0] + + self.means_ = _class_means(X, y) + if self.store_covariance: + self.covariance_ = _class_cov(X, y, self.priors_) + + Xc = [] + for idx, group in enumerate(self.classes_): + Xg = X[y == group] + Xc.append(Xg - self.means_[idx, :]) + + self.xbar_ = self.priors_ @ self.means_ + + Xc = xp.concat(Xc, axis=0) + + # 1) within (univariate) scaling by with classes std-dev + std = xp.std(Xc, axis=0) + # avoid division by zero in normalization + std[std == 0] = 1.0 + fac = xp.asarray(1.0 / (n_samples - n_classes), dtype=X.dtype, device=device(X)) + + # 2) Within variance scaling + X = xp.sqrt(fac) * (Xc / std) + # SVD of centered (within)scaled data + _, S, Vt = svd(X, full_matrices=False) + + rank = xp.sum(xp.astype(S > self.tol, xp.int32)) + # Scaling of within covariance is: V' 1/S + scalings = (Vt[:rank, :] / std).T / S[:rank] + fac = 1.0 if n_classes == 1 else 1.0 / (n_classes - 1) + + # 3) Between variance scaling + # Scale weighted centers + X = ( + (xp.sqrt((n_samples * self.priors_) * fac)) * (self.means_ - self.xbar_).T + ).T @ scalings + # Centers are living in a space with n_classes-1 dim (maximum) + # Use SVD to find projection in the space spanned by the + # (n_classes) centers + _, S, Vt = svd(X, full_matrices=False) + + if self._max_components == 0: + self.explained_variance_ratio_ = xp.empty((0,), dtype=S.dtype) + else: + self.explained_variance_ratio_ = (S**2 / xp.sum(S**2))[ + : self._max_components + ] + + rank = xp.sum(xp.astype(S > self.tol * S[0], xp.int32)) + self.scalings_ = scalings @ Vt.T[:, :rank] + coef = (self.means_ - self.xbar_) @ self.scalings_ + self.intercept_ = -0.5 * xp.sum(coef**2, axis=1) + xp.log(self.priors_) + self.coef_ = coef @ self.scalings_.T + self.intercept_ -= self.xbar_ @ self.coef_.T + + @_fit_context( + # LinearDiscriminantAnalysis.covariance_estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y): + """Fit the Linear Discriminant Analysis model. + + .. versionchanged:: 0.19 + `store_covariance` and `tol` has been moved to main constructor. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + Returns + ------- + self : object + Fitted estimator. + """ + xp, _ = get_namespace(X) + + X, y = validate_data( + self, X, y, ensure_min_samples=2, dtype=[xp.float64, xp.float32] + ) + self.classes_ = unique_labels(y) + n_samples, n_features = X.shape + n_classes = self.classes_.shape[0] + + if n_samples == n_classes: + raise ValueError( + "The number of samples must be more than the number of classes." + ) + + if self.priors is None: # estimate priors from sample + _, cnts = xp.unique_counts(y) # non-negative ints + self.priors_ = xp.astype(cnts, X.dtype) / float(n_samples) + else: + self.priors_ = xp.asarray(self.priors, dtype=X.dtype) + + if xp.any(self.priors_ < 0): + raise ValueError("priors must be non-negative") + + if xp.abs(xp.sum(self.priors_) - 1.0) > 1e-5: + warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning) + self.priors_ = self.priors_ / self.priors_.sum() + + # Maximum number of components no matter what n_components is + # specified: + max_components = min(n_classes - 1, n_features) + + if self.n_components is None: + self._max_components = max_components + else: + if self.n_components > max_components: + raise ValueError( + "n_components cannot be larger than min(n_features, n_classes - 1)." + ) + self._max_components = self.n_components + + if self.solver == "svd": + if self.shrinkage is not None: + raise NotImplementedError("shrinkage not supported with 'svd' solver.") + if self.covariance_estimator is not None: + raise ValueError( + "covariance estimator " + "is not supported " + "with svd solver. Try another solver" + ) + self._solve_svd(X, y) + elif self.solver == "lsqr": + self._solve_lstsq( + X, + y, + shrinkage=self.shrinkage, + covariance_estimator=self.covariance_estimator, + ) + elif self.solver == "eigen": + self._solve_eigen( + X, + y, + shrinkage=self.shrinkage, + covariance_estimator=self.covariance_estimator, + ) + if size(self.classes_) == 2: # treat binary case as a special case + coef_ = xp.asarray(self.coef_[1, :] - self.coef_[0, :], dtype=X.dtype) + self.coef_ = xp.reshape(coef_, (1, -1)) + intercept_ = xp.asarray( + self.intercept_[1] - self.intercept_[0], dtype=X.dtype + ) + self.intercept_ = xp.reshape(intercept_, (1,)) + self._n_features_out = self._max_components + return self + + def transform(self, X): + """Project data to maximize class separation. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + X_new : ndarray of shape (n_samples, n_components) or \ + (n_samples, min(rank, n_components)) + Transformed data. In the case of the 'svd' solver, the shape + is (n_samples, min(rank, n_components)). + """ + if self.solver == "lsqr": + raise NotImplementedError( + "transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')." + ) + check_is_fitted(self) + X = validate_data(self, X, reset=False) + + if self.solver == "svd": + X_new = (X - self.xbar_) @ self.scalings_ + elif self.solver == "eigen": + X_new = X @ self.scalings_ + + return X_new[:, : self._max_components] + + def predict_proba(self, X): + """Estimate probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Estimated probabilities. + """ + check_is_fitted(self) + xp, _ = get_namespace(X) + decision = self.decision_function(X) + if size(self.classes_) == 2: + proba = _expit(decision, xp) + return xp.stack([1 - proba, proba], axis=1) + else: + return softmax(decision) + + def predict_log_proba(self, X): + """Estimate log probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + Returns + ------- + C : ndarray of shape (n_samples, n_classes) + Estimated log probabilities. + """ + xp, _ = get_namespace(X) + prediction = self.predict_proba(X) + + smallest_normal = xp.finfo(prediction.dtype).smallest_normal + prediction[prediction == 0.0] += smallest_normal + return xp.log(prediction) + + def decision_function(self, X): + """Apply decision function to an array of samples. + + The decision function is equal (up to a constant factor) to the + log-posterior of the model, i.e. `log p(y = k | x)`. In a binary + classification setting this instead corresponds to the difference + `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Array of samples (test vectors). + + Returns + ------- + y_scores : ndarray of shape (n_samples,) or (n_samples, n_classes) + Decision function values related to each class, per sample. + In the two-class case, the shape is `(n_samples,)`, giving the + log likelihood ratio of the positive class. + """ + # Only overrides for the docstring. + return super().decision_function(X) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.array_api_support = True + return tags + + +class QuadraticDiscriminantAnalysis( + DiscriminantAnalysisPredictionMixin, ClassifierMixin, BaseEstimator +): + """Quadratic Discriminant Analysis. + + A classifier with a quadratic decision boundary, generated + by fitting class conditional densities to the data + and using Bayes' rule. + + The model fits a Gaussian density to each class. + + .. versionadded:: 0.17 + + For a comparison between + :class:`~sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis` + and :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`, see + :ref:`sphx_glr_auto_examples_classification_plot_lda_qda.py`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + solver : {'svd', 'eigen'}, default='svd' + Solver to use, possible values: + - 'svd': Singular value decomposition (default). + Does not compute the covariance matrix, therefore this solver is + recommended for data with a large number of features. + - 'eigen': Eigenvalue decomposition. + Can be combined with shrinkage or custom covariance estimator. + + shrinkage : 'auto' or float, default=None + Shrinkage parameter, possible values: + - None: no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + + Enabling shrinkage is expected to improve the model when some + classes have a relatively small number of training data points + compared to the number of features by mitigating overfitting during + the covariance estimation step. + + This should be left to `None` if `covariance_estimator` is used. + Note that shrinkage works only with 'eigen' solver. + + priors : array-like of shape (n_classes,), default=None + Class priors. By default, the class proportions are inferred from the + training data. + + reg_param : float, default=0.0 + Regularizes the per-class covariance estimates by transforming S2 as + ``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``, + where S2 corresponds to the `scaling_` attribute of a given class. + + store_covariance : bool, default=False + If True, the class covariance matrices are explicitly computed and + stored in the `self.covariance_` attribute. + + .. versionadded:: 0.17 + + tol : float, default=1.0e-4 + Absolute threshold for the covariance matrix to be considered rank + deficient after applying some regularization (see `reg_param`) to each + `Sk` where `Sk` represents covariance matrix for k-th class. This + parameter does not affect the predictions. It controls when a warning + is raised if the covariance matrix is not full rank. + + .. versionadded:: 0.17 + + covariance_estimator : covariance estimator, default=None + If not None, `covariance_estimator` is used to estimate the covariance + matrices instead of relying on the empirical covariance estimator + (with potential shrinkage). The object should have a fit method and + a ``covariance_`` attribute like the estimators in + :mod:`sklearn.covariance`. If None the shrinkage parameter drives the + estimate. + + This should be left to `None` if `shrinkage` is used. + Note that `covariance_estimator` works only with the 'eigen' solver. + + Attributes + ---------- + covariance_ : list of len n_classes of ndarray \ + of shape (n_features, n_features) + For each class, gives the covariance matrix estimated using the + samples of that class. The estimations are unbiased. Only present if + `store_covariance` is True. + + means_ : array-like of shape (n_classes, n_features) + Class-wise means. + + priors_ : array-like of shape (n_classes,) + Class priors (sum to 1). + + rotations_ : list of len n_classes of ndarray of shape (n_features, n_k) + For each class k an array of shape (n_features, n_k), where + ``n_k = min(n_features, number of elements in class k)`` + It is the rotation of the Gaussian distribution, i.e. its + principal axis. It corresponds to `V`, the matrix of eigenvectors + coming from the SVD of `Xk = U S Vt` where `Xk` is the centered + matrix of samples from class k. + + scalings_ : list of len n_classes of ndarray of shape (n_k,) + For each class, contains the scaling of + the Gaussian distributions along its principal axes, i.e. the + variance in the rotated coordinate system. It corresponds to `S^2 / + (n_samples - 1)`, where `S` is the diagonal matrix of singular values + from the SVD of `Xk`, where `Xk` is the centered matrix of samples + from class k. + + classes_ : ndarray of shape (n_classes,) + Unique class labels. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + LinearDiscriminantAnalysis : Linear Discriminant Analysis. + + Examples + -------- + >>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis + >>> import numpy as np + >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) + >>> y = np.array([1, 1, 1, 2, 2, 2]) + >>> clf = QuadraticDiscriminantAnalysis() + >>> clf.fit(X, y) + QuadraticDiscriminantAnalysis() + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + "solver": [StrOptions({"svd", "eigen"})], + "shrinkage": [StrOptions({"auto"}), Interval(Real, 0, 1, closed="both"), None], + "priors": ["array-like", None], + "reg_param": [Interval(Real, 0, 1, closed="both")], + "store_covariance": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + "covariance_estimator": [HasMethods("fit"), None], + } + + def __init__( + self, + *, + solver="svd", + shrinkage=None, + priors=None, + reg_param=0.0, + store_covariance=False, + tol=1.0e-4, + covariance_estimator=None, + ): + self.solver = solver + self.shrinkage = shrinkage + self.priors = priors + self.reg_param = reg_param + self.store_covariance = store_covariance + self.tol = tol + self.covariance_estimator = covariance_estimator + + def _solve_eigen(self, X): + """Eigenvalue solver. + + The eigenvalue solver uses the eigen decomposition of the data + to compute the rotation and scaling matrices used for scoring + new samples. This solver supports use of any covariance estimator. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + """ + n_samples, n_features = X.shape + + cov = _cov(X, self.shrinkage, self.covariance_estimator) + scaling, rotation = linalg.eigh(cov) # scalings are eigenvalues + rotation = rotation[:, np.argsort(scaling)[::-1]] # sort eigenvectors + scaling = scaling[np.argsort(scaling)[::-1]] # sort eigenvalues + return scaling, rotation, cov + + def _solve_svd(self, X): + """SVD solver for Quadratic Discriminant Analysis. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + """ + n_samples, n_features = X.shape + + mean = X.mean(0) + Xc = X - mean + # Xc = U * S * V.T + _, S, Vt = np.linalg.svd(Xc, full_matrices=False) + scaling = (S**2) / (n_samples - 1) # scalings are squared singular values + scaling = ((1 - self.reg_param) * scaling) + self.reg_param + rotation = Vt.T + + cov = None + if self.store_covariance: + # cov = V * (S^2 / (n-1)) * V.T + cov = scaling * Vt.T @ Vt + + return scaling, rotation, cov + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the model according to the given training data and parameters. + + .. versionchanged:: 0.19 + ``store_covariances`` has been moved to main constructor as + ``store_covariance``. + + .. versionchanged:: 0.19 + ``tol`` has been moved to main constructor. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target values (integers). + + Returns + ------- + self : object + Fitted estimator. + """ + X, y = validate_data(self, X, y) + check_classification_targets(y) + self.classes_ = np.unique(y) + n_samples, n_features = X.shape + n_classes = len(self.classes_) + if n_classes < 2: + raise ValueError( + "The number of classes has to be greater than one. Got " + f"{n_classes} class." + ) + if self.priors is None: + _, cnts = np.unique(y, return_counts=True) + self.priors_ = cnts / float(n_samples) + else: + self.priors_ = np.array(self.priors) + + if self.solver == "svd": + if self.shrinkage is not None: + # Support for `shrinkage` could be implemented as in + # https://github.com/scikit-learn/scikit-learn/issues/32590 + raise NotImplementedError("shrinkage not supported with 'svd' solver.") + if self.covariance_estimator is not None: + raise ValueError( + "covariance_estimator is not supported with solver='svd'. " + "Try solver='eigen' instead." + ) + specific_solver = self._solve_svd + elif self.solver == "eigen": + specific_solver = self._solve_eigen + + means = [] + cov = [] + scalings = [] + rotations = [] + for class_idx, class_label in enumerate(self.classes_): + X_class = X[y == class_label, :] + if len(X_class) == 1: + raise ValueError( + "y has only 1 sample in class %s, covariance is ill defined." + % str(self.classes_[class_idx]) + ) + + mean_class = X_class.mean(0) + means.append(mean_class) + + scaling_class, rotation_class, cov_class = specific_solver(X_class) + + rank = np.sum(scaling_class > self.tol) + if rank < n_features: + n_samples_class = X_class.shape[0] + if self.solver == "svd" and n_samples_class <= n_features: + raise linalg.LinAlgError( + f"The covariance matrix of class {class_label} is not full " + f"rank. When using `solver='svd'` the number of samples in " + f"each class should be more than the number of features, but " + f"class {class_label} has {n_samples_class} samples and " + f"{n_features} features. Try using `solver='eigen'` and " + f"setting the parameter `shrinkage` for regularization." + ) + else: + msg_param = "shrinkage" if self.solver == "eigen" else "reg_param" + raise linalg.LinAlgError( + f"The covariance matrix of class {class_label} is not full " + f"rank. Increase the value of `{msg_param}` to reduce the " + f"collinearity.", + ) + + cov.append(cov_class) + scalings.append(scaling_class) + rotations.append(rotation_class) + + if self.store_covariance: + self.covariance_ = cov + self.means_ = np.asarray(means) + self.scalings_ = scalings + self.rotations_ = rotations + return self + + def _decision_function(self, X): + # return log posterior, see eq (4.12) p. 110 of the ESL. + check_is_fitted(self) + + X = validate_data(self, X, reset=False) + norm2 = [] + for i in range(len(self.classes_)): + R = self.rotations_[i] + S = self.scalings_[i] + Xm = X - self.means_[i] + X2 = np.dot(Xm, R * (S ** (-0.5))) + norm2.append(np.sum(X2**2, axis=1)) + norm2 = np.array(norm2).T # shape = [len(X), n_classes] + u = np.asarray([np.sum(np.log(s)) for s in self.scalings_]) + return -0.5 * (norm2 + u) + np.log(self.priors_) + + def decision_function(self, X): + """Apply decision function to an array of samples. + + The decision function is equal (up to a constant factor) to the + log-posterior of the model, i.e. `log p(y = k | x)`. In a binary + classification setting this instead corresponds to the difference + `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Array of samples (test vectors). + + Returns + ------- + C : ndarray of shape (n_samples,) or (n_samples, n_classes) + Decision function values related to each class, per sample. + In the two-class case, the shape is `(n_samples,)`, giving the + log likelihood ratio of the positive class. + """ + # Only overrides for the docstring. + return super().decision_function(X) diff --git a/py311/lib/python3.11/site-packages/sklearn/dummy.py b/py311/lib/python3.11/site-packages/sklearn/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..f0823567abd9e6abc7f0c20bb818a64e726274d4 --- /dev/null +++ b/py311/lib/python3.11/site-packages/sklearn/dummy.py @@ -0,0 +1,700 @@ +"""Dummy estimators that implement simple rules of thumb.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from numbers import Integral, Real + +import numpy as np +import scipy.sparse as sp + +from sklearn.base import ( + BaseEstimator, + ClassifierMixin, + MultiOutputMixin, + RegressorMixin, + _fit_context, +) +from sklearn.utils import check_random_state +from sklearn.utils._param_validation import Interval, StrOptions +from sklearn.utils.multiclass import class_distribution +from sklearn.utils.random import _random_choice_csc +from sklearn.utils.stats import _weighted_percentile +from sklearn.utils.validation import ( + _check_sample_weight, + _num_samples, + check_array, + check_consistent_length, + check_is_fitted, + validate_data, +) + + +class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator): + """DummyClassifier makes predictions that ignore the input features. + + This classifier serves as a simple baseline to compare against other more + complex classifiers. + + The specific behavior of the baseline is selected with the `strategy` + parameter. + + All strategies make predictions that ignore the input feature values passed + as the `X` argument to `fit` and `predict`. The predictions, however, + typically depend on values observed in the `y` parameter passed to `fit`. + + Note that the "stratified" and "uniform" strategies lead to + non-deterministic predictions that can be rendered deterministic by setting + the `random_state` parameter if needed. The other strategies are naturally + deterministic and, once fit, always return the same constant prediction + for any value of `X`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + strategy : {"most_frequent", "prior", "stratified", "uniform", \ + "constant"}, default="prior" + Strategy to use to generate predictions. + + * "most_frequent": the `predict` method always returns the most + frequent class label in the observed `y` argument passed to `fit`. + The `predict_proba` method returns the matching one-hot encoded + vector. + * "prior": the `predict` method always returns the most frequent + class label in the observed `y` argument passed to `fit` (like + "most_frequent"). ``predict_proba`` always returns the empirical + class distribution of `y` also known as the empirical class prior + distribution. + * "stratified": the `predict_proba` method randomly samples one-hot + vectors from a multinomial distribution parametrized by the empirical + class prior probabilities. + The `predict` method returns the class label which got probability + one in the one-hot vector of `predict_proba`. + Each sampled row of both methods is therefore independent and + identically distributed. + * "uniform": generates predictions uniformly at random from the list + of unique classes observed in `y`, i.e. each class has equal + probability. + * "constant": always predicts a constant label that is provided by + the user. This is useful for metrics that evaluate a non-majority + class. + + .. versionchanged:: 0.24 + The default value of `strategy` has changed to "prior" in version + 0.24. + + random_state : int, RandomState instance or None, default=None + Controls the randomness to generate the predictions when + ``strategy='stratified'`` or ``strategy='uniform'``. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + constant : int or str or array-like of shape (n_outputs,), default=None + The explicit constant as predicted by the "constant" strategy. This + parameter is useful only for the "constant" strategy. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes,) or list of such arrays + Unique class labels observed in `y`. For multi-output classification + problems, this attribute is a list of arrays as each output has an + independent set of possible classes. + + n_classes_ : int or list of int + Number of label for each output. + + class_prior_ : ndarray of shape (n_classes,) or list of such arrays + Frequency of each class observed in `y`. For multioutput classification + problems, this is computed independently for each output. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` has + feature names that are all strings. + + n_outputs_ : int + Number of outputs. + + sparse_output_ : bool + True if the array returned from predict is to be in sparse CSC format. + Is automatically set to True if the input `y` is passed in sparse + format. + + See Also + -------- + DummyRegressor : Regressor that makes predictions using simple rules. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.dummy import DummyClassifier + >>> X = np.array([-1, 1, 1, 1]) + >>> y = np.array([0, 1, 1, 1]) + >>> dummy_clf = DummyClassifier(strategy="most_frequent") + >>> dummy_clf.fit(X, y) + DummyClassifier(strategy='most_frequent') + >>> dummy_clf.predict(X) + array([1, 1, 1, 1]) + >>> dummy_clf.score(X, y) + 0.75 + """ + + _parameter_constraints: dict = { + "strategy": [ + StrOptions({"most_frequent", "prior", "stratified", "uniform", "constant"}) + ], + "random_state": ["random_state"], + "constant": [Integral, str, "array-like", None], + } + + def __init__(self, *, strategy="prior", random_state=None, constant=None): + self.strategy = strategy + self.random_state = random_state + self.constant = constant + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the baseline classifier. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Returns the instance itself. + """ + validate_data(self, X, skip_check_array=True) + + self._strategy = self.strategy + + if self._strategy == "uniform" and sp.issparse(y): + y = y.toarray() + warnings.warn( + ( + "A local copy of the target data has been converted " + "to a numpy array. Predicting on sparse target data " + "with the uniform strategy would not save memory " + "and would be slower." + ), + UserWarning, + ) + + self.sparse_output_ = sp.issparse(y) + + if not self.sparse_output_: + y = np.asarray(y) + y = np.atleast_1d(y) + + if y.ndim == 1: + y = np.reshape(y, (-1, 1)) + + self.n_outputs_ = y.shape[1] + + check_consistent_length(X, y) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if self._strategy == "constant": + if self.constant is None: + raise ValueError( + "Constant target value has to be specified " + "when the constant strategy is used." + ) + else: + constant = np.reshape(np.atleast_1d(self.constant), (-1, 1)) + if constant.shape[0] != self.n_outputs_: + raise ValueError( + "Constant target value should have shape (%d, 1)." + % self.n_outputs_ + ) + + (self.classes_, self.n_classes_, self.class_prior_) = class_distribution( + y, sample_weight + ) + + if self._strategy == "constant": + for k in range(self.n_outputs_): + if not any(constant[k][0] == c for c in self.classes_[k]): + # Checking in case of constant strategy if the constant + # provided by the user is in y. + err_msg = ( + "The constant target value must be present in " + "the training data. You provided constant={}. " + "Possible values are: {}.".format( + self.constant, self.classes_[k].tolist() + ) + ) + raise ValueError(err_msg) + + if self.n_outputs_ == 1: + self.n_classes_ = self.n_classes_[0] + self.classes_ = self.classes_[0] + self.class_prior_ = self.class_prior_[0] + + return self + + def predict(self, X): + """Perform classification on test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test data. + + Returns + ------- + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Predicted target values for X. + """ + check_is_fitted(self) + + # numpy random_state expects Python int and not long as size argument + # under Windows + n_samples = _num_samples(X) + rs = check_random_state(self.random_state) + + n_classes_ = self.n_classes_ + classes_ = self.classes_ + class_prior_ = self.class_prior_ + constant = self.constant + if self.n_outputs_ == 1: + # Get same type even for self.n_outputs_ == 1 + n_classes_ = [n_classes_] + classes_ = [classes_] + class_prior_ = [class_prior_] + constant = [constant] + # Compute probability only once + if self._strategy == "stratified": + proba = self.predict_proba(X) + if self.n_outputs_ == 1: + proba = [proba] + + if self.sparse_output_: + class_prob = None + if self._strategy in ("most_frequent", "prior"): + classes_ = [np.array([cp.argmax()]) for cp in class_prior_] + + elif self._strategy == "stratified": + class_prob = class_prior_ + + elif self._strategy == "uniform": + raise ValueError( + "Sparse target prediction is not " + "supported with the uniform strategy" + ) + + elif self._strategy == "constant": + classes_ = [np.array([c]) for c in constant] + + y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state) + else: + if self._strategy in ("most_frequent", "prior"): + y = np.tile( + [ + classes_[k][class_prior_[k].argmax()] + for k in range(self.n_outputs_) + ], + [n_samples, 1], + ) + + elif self._strategy == "stratified": + y = np.vstack( + [ + classes_[k][proba[k].argmax(axis=1)] + for k in range(self.n_outputs_) + ] + ).T + + elif self._strategy == "uniform": + ret = [ + classes_[k][rs.randint(n_classes_[k], size=n_samples)] + for k in range(self.n_outputs_) + ] + y = np.vstack(ret).T + + elif self._strategy == "constant": + y = np.tile(self.constant, (n_samples, 1)) + + if self.n_outputs_ == 1: + y = np.ravel(y) + + return y + + def predict_proba(self, X): + """ + Return probability estimates for the test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test data. + + Returns + ------- + P : ndarray of shape (n_samples, n_classes) or list of such arrays + Returns the probability of the sample for each class in + the model, where classes are ordered arithmetically, for each + output. + """ + check_is_fitted(self) + + # numpy random_state expects Python int and not long as size argument + # under Windows + n_samples = _num_samples(X) + rs = check_random_state(self.random_state) + + n_classes_ = self.n_classes_ + classes_ = self.classes_ + class_prior_ = self.class_prior_ + constant = self.constant + if self.n_outputs_ == 1: + # Get same type even for self.n_outputs_ == 1 + n_classes_ = [n_classes_] + classes_ = [classes_] + class_prior_ = [class_prior_] + constant = [constant] + + P = [] + for k in range(self.n_outputs_): + if self._strategy == "most_frequent": + ind = class_prior_[k].argmax() + out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) + out[:, ind] = 1.0 + elif self._strategy == "prior": + out = np.ones((n_samples, 1)) * class_prior_[k] + + elif self._strategy == "stratified": + out = rs.multinomial(1, class_prior_[k], size=n_samples) + out = out.astype(np.float64) + + elif self._strategy == "uniform": + out = np.ones((n_samples, n_classes_[k]), dtype=np.float64) + out /= n_classes_[k] + + elif self._strategy == "constant": + ind = np.where(classes_[k] == constant[k]) + out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) + out[:, ind] = 1.0 + + P.append(out) + + if self.n_outputs_ == 1: + P = P[0] + + return P + + def predict_log_proba(self, X): + """ + Return log probability estimates for the test vectors X. + + Parameters + ---------- + X : {array-like, object with finite length or shape} + Training data. + + Returns + ------- + P : ndarray of shape (n_samples, n_classes) or list of such arrays + Returns the log probability of the sample for each class in + the model, where classes are ordered arithmetically for each + output. + """ + proba = self.predict_proba(X) + if self.n_outputs_ == 1: + return np.log(proba) + else: + return [np.log(p) for p in proba] + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + tags.classifier_tags.poor_score = True + tags.no_validation = True + return tags + + def score(self, X, y, sample_weight=None): + """Return the mean accuracy on the given test data and labels. + + In multi-label classification, this is the subset accuracy + which is a harsh metric since you require for each sample that + each label set be correctly predicted. + + Parameters + ---------- + X : None or array-like of shape (n_samples, n_features) + Test samples. Passing None as test samples gives the same result + as passing real test samples, since DummyClassifier + operates independently of the sampled observations. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + Mean accuracy of self.predict(X) w.r.t. y. + """ + if X is None: + X = np.zeros(shape=(len(y), 1)) + return super().score(X, y, sample_weight) + + +class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator): + """Regressor that makes predictions using simple rules. + + This regressor is useful as a simple baseline to compare with other + (real) regressors. Do not use it for real problems. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + strategy : {"mean", "median", "quantile", "constant"}, default="mean" + Strategy to use to generate predictions. + + * "mean": always predicts the mean of the training set + * "median": always predicts the median of the training set + * "quantile": always predicts a specified quantile of the training set, + provided with the quantile parameter. + * "constant": always predicts a constant value that is provided by + the user. + + constant : int or float or array-like of shape (n_outputs,), default=None + The explicit constant as predicted by the "constant" strategy. This + parameter is useful only for the "constant" strategy. + + quantile : float in [0.0, 1.0], default=None + The quantile to predict using the "quantile" strategy. A quantile of + 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the + maximum. + + Attributes + ---------- + constant_ : ndarray of shape (1, n_outputs) + Mean or median or quantile of the training targets or constant value + given by the user. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` has + feature names that are all strings. + + n_outputs_ : int + Number of outputs. + + See Also + -------- + DummyClassifier: Classifier that makes predictions using simple rules. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.dummy import DummyRegressor + >>> X = np.array([1.0, 2.0, 3.0, 4.0]) + >>> y = np.array([2.0, 3.0, 5.0, 10.0]) + >>> dummy_regr = DummyRegressor(strategy="mean") + >>> dummy_regr.fit(X, y) + DummyRegressor() + >>> dummy_regr.predict(X) + array([5., 5., 5., 5.]) + >>> dummy_regr.score(X, y) + 0.0 + """ + + _parameter_constraints: dict = { + "strategy": [StrOptions({"mean", "median", "quantile", "constant"})], + "quantile": [Interval(Real, 0.0, 1.0, closed="both"), None], + "constant": [ + Interval(Real, None, None, closed="neither"), + "array-like", + None, + ], + } + + def __init__(self, *, strategy="mean", constant=None, quantile=None): + self.strategy = strategy + self.constant = constant + self.quantile = quantile + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the baseline regressor. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + self : object + Fitted estimator. + """ + validate_data(self, X, skip_check_array=True) + + y = check_array(y, ensure_2d=False, input_name="y") + if len(y) == 0: + raise ValueError("y must not be empty.") + + if y.ndim == 1: + y = np.reshape(y, (-1, 1)) + self.n_outputs_ = y.shape[1] + + check_consistent_length(X, y, sample_weight) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if self.strategy == "mean": + self.constant_ = np.average(y, axis=0, weights=sample_weight) + + elif self.strategy == "median": + if sample_weight is None: + self.constant_ = np.median(y, axis=0) + else: + self.constant_ = _weighted_percentile( + y, sample_weight, percentile_rank=50.0 + ) + + elif self.strategy == "quantile": + if self.quantile is None: + raise ValueError( + "When using `strategy='quantile', you have to specify the desired " + "quantile in the range [0, 1]." + ) + percentile_rank = self.quantile * 100.0 + if sample_weight is None: + self.constant_ = np.percentile(y, axis=0, q=percentile_rank) + else: + self.constant_ = _weighted_percentile( + y, sample_weight, percentile_rank=percentile_rank + ) + + elif self.strategy == "constant": + if self.constant is None: + raise TypeError( + "Constant target value has to be specified " + "when the constant strategy is used." + ) + + self.constant_ = check_array( + self.constant, + accept_sparse=["csr", "csc", "coo"], + ensure_2d=False, + ensure_min_samples=0, + ) + + if self.n_outputs_ != 1 and self.constant_.shape[0] != y.shape[1]: + raise ValueError( + "Constant target value should have shape (%d, 1)." % y.shape[1] + ) + + self.constant_ = np.reshape(self.constant_, (1, -1)) + return self + + def predict(self, X, return_std=False): + """Perform classification on test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test data. + + return_std : bool, default=False + Whether to return the standard deviation of posterior prediction. + All zeros in this case. + + .. versionadded:: 0.20 + + Returns + ------- + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + Predicted target values for X. + + y_std : array-like of shape (n_samples,) or (n_samples, n_outputs) + Standard deviation of predictive distribution of query points. + """ + check_is_fitted(self) + n_samples = _num_samples(X) + + y = np.full( + (n_samples, self.n_outputs_), + self.constant_, + dtype=np.array(self.constant_).dtype, + ) + y_std = np.zeros((n_samples, self.n_outputs_)) + + if self.n_outputs_ == 1: + y = np.ravel(y) + y_std = np.ravel(y_std) + + return (y, y_std) if return_std else y + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + tags.regressor_tags.poor_score = True + tags.no_validation = True + return tags + + def score(self, X, y, sample_weight=None): + """Return the coefficient of determination R^2 of the prediction. + + The coefficient R^2 is defined as `(1 - u/v)`, where `u` is the + residual sum of squares `((y_true - y_pred) ** 2).sum()` and `v` is the + total sum of squares `((y_true - y_true.mean()) ** 2).sum()`. The best + possible score is 1.0 and it can be negative (because the model can be + arbitrarily worse). A constant model that always predicts the expected + value of y, disregarding the input features, would get a R^2 score of + 0.0. + + Parameters + ---------- + X : None or array-like of shape (n_samples, n_features) + Test samples. Passing None as test samples gives the same result + as passing real test samples, since `DummyRegressor` + operates independently of the sampled observations. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True values for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + R^2 of `self.predict(X)` w.r.t. y. + """ + if X is None: + X = np.zeros(shape=(len(y), 1)) + return super().score(X, y, sample_weight) diff --git a/py311/lib/python3.11/site-packages/sklearn/isotonic.py b/py311/lib/python3.11/site-packages/sklearn/isotonic.py new file mode 100644 index 0000000000000000000000000000000000000000..ee73ac2c0f545b83a2f16883c349414268c29c14 --- /dev/null +++ b/py311/lib/python3.11/site-packages/sklearn/isotonic.py @@ -0,0 +1,515 @@ +"""Isotonic regression for obtaining monotonic fit to data.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import math +import warnings +from numbers import Real + +import numpy as np +from scipy import interpolate, optimize +from scipy.stats import spearmanr + +from sklearn._isotonic import _inplace_contiguous_isotonic_regression, _make_unique +from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin, _fit_context +from sklearn.utils import check_array, check_consistent_length, metadata_routing +from sklearn.utils._param_validation import Interval, StrOptions, validate_params +from sklearn.utils.fixes import parse_version, sp_base_version +from sklearn.utils.validation import _check_sample_weight, check_is_fitted + +__all__ = ["IsotonicRegression", "check_increasing", "isotonic_regression"] + + +@validate_params( + { + "x": ["array-like"], + "y": ["array-like"], + }, + prefer_skip_nested_validation=True, +) +def check_increasing(x, y): + """Determine whether y is monotonically correlated with x. + + y is found increasing or decreasing with respect to x based on a Spearman + correlation test. + + Parameters + ---------- + x : array-like of shape (n_samples,) + Training data. + + y : array-like of shape (n_samples,) + Training target. + + Returns + ------- + increasing_bool : boolean + Whether the relationship is increasing or decreasing. + + Notes + ----- + The Spearman correlation coefficient is estimated from the data, and the + sign of the resulting estimate is used as the result. + + In the event that the 95% confidence interval based on Fisher transform + spans zero, a warning is raised. + + References + ---------- + Fisher transformation. Wikipedia. + https://en.wikipedia.org/wiki/Fisher_transformation + + Examples + -------- + >>> from sklearn.isotonic import check_increasing + >>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10] + >>> check_increasing(x, y) + np.True_ + >>> y = [10, 8, 6, 4, 2] + >>> check_increasing(x, y) + np.False_ + """ + + # Calculate Spearman rho estimate and set return accordingly. + rho, _ = spearmanr(x, y) + increasing_bool = rho >= 0 + + # Run Fisher transform to get the rho CI, but handle rho=+/-1 + if rho not in [-1.0, 1.0] and len(x) > 3: + F = 0.5 * math.log((1.0 + rho) / (1.0 - rho)) + F_se = 1 / math.sqrt(len(x) - 3) + + # Use a 95% CI, i.e., +/-1.96 S.E. + # https://en.wikipedia.org/wiki/Fisher_transformation + rho_0 = math.tanh(F - 1.96 * F_se) + rho_1 = math.tanh(F + 1.96 * F_se) + + # Warn if the CI spans zero. + if np.sign(rho_0) != np.sign(rho_1): + warnings.warn( + "Confidence interval of the Spearman " + "correlation coefficient spans zero. " + "Determination of ``increasing`` may be " + "suspect." + ) + + return increasing_bool + + +@validate_params( + { + "y": ["array-like"], + "sample_weight": ["array-like", None], + "y_min": [Interval(Real, None, None, closed="both"), None], + "y_max": [Interval(Real, None, None, closed="both"), None], + "increasing": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def isotonic_regression( + y, *, sample_weight=None, y_min=None, y_max=None, increasing=True +): + """Solve the isotonic regression model. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + y : array-like of shape (n_samples,) + The data. + + sample_weight : array-like of shape (n_samples,), default=None + Weights on each point of the regression. + If None, weight is set to 1 (equal weights). + + y_min : float, default=None + Lower bound on the lowest predicted value (the minimum value may + still be higher). If not set, defaults to -inf. + + y_max : float, default=None + Upper bound on the highest predicted value (the maximum may still be + lower). If not set, defaults to +inf. + + increasing : bool, default=True + Whether to compute ``y_`` is increasing (if set to True) or decreasing + (if set to False). + + Returns + ------- + y_ : ndarray of shape (n_samples,) + Isotonic fit of y. + + References + ---------- + "Active set algorithms for isotonic regression; A unifying framework" + by Michael J. Best and Nilotpal Chakravarti, section 3. + + Examples + -------- + >>> from sklearn.isotonic import isotonic_regression + >>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4]) + array([2.75 , 2.75 , 2.75 , 2.75 , 7.33, + 7.33, 7.33, 7.33, 7.33, 7.33]) + """ + y = check_array(y, ensure_2d=False, input_name="y", dtype=[np.float64, np.float32]) + if sp_base_version >= parse_version("1.12.0"): + res = optimize.isotonic_regression( + y=y, weights=sample_weight, increasing=increasing + ) + y = np.asarray(res.x, dtype=y.dtype) + else: + # TODO: remove this branch when Scipy 1.12 is the minimum supported version + # Also remove _inplace_contiguous_isotonic_regression. + order = np.s_[:] if increasing else np.s_[::-1] + y = np.array(y[order], dtype=y.dtype) + sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True) + sample_weight = np.ascontiguousarray(sample_weight[order]) + _inplace_contiguous_isotonic_regression(y, sample_weight) + y = y[order] + + if y_min is not None or y_max is not None: + # Older versions of np.clip don't accept None as a bound, so use np.inf + if y_min is None: + y_min = -np.inf + if y_max is None: + y_max = np.inf + np.clip(y, y_min, y_max, y) + return y + + +class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator): + """Isotonic regression model. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.13 + + Parameters + ---------- + y_min : float, default=None + Lower bound on the lowest predicted value (the minimum value may + still be higher). If not set, defaults to -inf. + + y_max : float, default=None + Upper bound on the highest predicted value (the maximum may still be + lower). If not set, defaults to +inf. + + increasing : bool or 'auto', default=True + Determines whether the predictions should be constrained to increase + or decrease with `X`. 'auto' will decide based on the Spearman + correlation estimate's sign. + + out_of_bounds : {'nan', 'clip', 'raise'}, default='nan' + Handles how `X` values outside of the training domain are handled + during prediction. + + - 'nan', predictions will be NaN. + - 'clip', predictions will be set to the value corresponding to + the nearest train interval endpoint. + - 'raise', a `ValueError` is raised. + + Attributes + ---------- + X_min_ : float + Minimum value of input array `X_` for left bound. + + X_max_ : float + Maximum value of input array `X_` for right bound. + + X_thresholds_ : ndarray of shape (n_thresholds,) + Unique ascending `X` values used to interpolate + the y = f(X) monotonic function. + + .. versionadded:: 0.24 + + y_thresholds_ : ndarray of shape (n_thresholds,) + De-duplicated `y` values suitable to interpolate the y = f(X) + monotonic function. + + .. versionadded:: 0.24 + + f_ : function + The stepwise interpolating function that covers the input domain ``X``. + + increasing_ : bool + Inferred value for ``increasing``. + + See Also + -------- + sklearn.linear_model.LinearRegression : Ordinary least squares Linear + Regression. + sklearn.ensemble.HistGradientBoostingRegressor : Gradient boosting that + is a non-parametric model accepting monotonicity constraints. + isotonic_regression : Function to solve the isotonic regression model. + + Notes + ----- + Ties are broken using the secondary method from de Leeuw, 1977. + + References + ---------- + Isotonic Median Regression: A Linear Programming Approach + Nilotpal Chakravarti + Mathematics of Operations Research + Vol. 14, No. 2 (May, 1989), pp. 303-308 + + Isotone Optimization in R : Pool-Adjacent-Violators + Algorithm (PAVA) and Active Set Methods + de Leeuw, Hornik, Mair + Journal of Statistical Software 2009 + + Correctness of Kruskal's algorithms for monotone regression with ties + de Leeuw, Psychometrica, 1977 + + Examples + -------- + >>> from sklearn.datasets import make_regression + >>> from sklearn.isotonic import IsotonicRegression + >>> X, y = make_regression(n_samples=10, n_features=1, random_state=41) + >>> iso_reg = IsotonicRegression().fit(X, y) + >>> iso_reg.predict([.1, .2]) + array([1.8628, 3.7256]) + """ + + # T should have been called X + __metadata_request__predict = {"T": metadata_routing.UNUSED} + __metadata_request__transform = {"T": metadata_routing.UNUSED} + + _parameter_constraints: dict = { + "y_min": [Interval(Real, None, None, closed="both"), None], + "y_max": [Interval(Real, None, None, closed="both"), None], + "increasing": ["boolean", StrOptions({"auto"})], + "out_of_bounds": [StrOptions({"nan", "clip", "raise"})], + } + + def __init__(self, *, y_min=None, y_max=None, increasing=True, out_of_bounds="nan"): + self.y_min = y_min + self.y_max = y_max + self.increasing = increasing + self.out_of_bounds = out_of_bounds + + def _check_input_data_shape(self, X): + if not (X.ndim == 1 or (X.ndim == 2 and X.shape[1] == 1)): + msg = ( + "Isotonic regression input X should be a 1d array or " + "2d array with 1 feature" + ) + raise ValueError(msg) + + def _build_f(self, X, y): + """Build the f_ interp1d function.""" + + bounds_error = self.out_of_bounds == "raise" + if len(y) == 1: + # single y, constant prediction + self.f_ = lambda x: y.repeat(x.shape) + else: + self.f_ = interpolate.interp1d( + X, y, kind="linear", bounds_error=bounds_error + ) + + def _build_y(self, X, y, sample_weight, trim_duplicates=True): + """Build the y_ IsotonicRegression.""" + self._check_input_data_shape(X) + X = X.reshape(-1) # use 1d view + + # Determine increasing if auto-determination requested + if self.increasing == "auto": + self.increasing_ = check_increasing(X, y) + else: + self.increasing_ = self.increasing + + # If sample_weights is passed, removed zero-weight values and clean + # order + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + mask = sample_weight > 0 + X, y, sample_weight = X[mask], y[mask], sample_weight[mask] + + order = np.lexsort((y, X)) + X, y, sample_weight = [array[order] for array in [X, y, sample_weight]] + unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight) + + X = unique_X + y = isotonic_regression( + unique_y, + sample_weight=unique_sample_weight, + y_min=self.y_min, + y_max=self.y_max, + increasing=self.increasing_, + ) + + # Handle the left and right bounds on X + self.X_min_, self.X_max_ = np.min(X), np.max(X) + + if trim_duplicates: + # Remove unnecessary points for faster prediction + keep_data = np.ones((len(y),), dtype=bool) + # Aside from the 1st and last point, remove points whose y values + # are equal to both the point before and the point after it. + keep_data[1:-1] = np.logical_or( + np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:]) + ) + return X[keep_data], y[keep_data] + else: + # The ability to turn off trim_duplicates is only used to it make + # easier to unit test that removing duplicates in y does not have + # any impact the resulting interpolation function (besides + # prediction speed). + return X, y + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples,) or (n_samples, 1) + Training data. + + .. versionchanged:: 0.24 + Also accepts 2d array with 1 feature. + + y : array-like of shape (n_samples,) + Training target. + + sample_weight : array-like of shape (n_samples,), default=None + Weights. If set to None, all weights will be set to 1 (equal + weights). + + Returns + ------- + self : object + Returns an instance of self. + + Notes + ----- + X is stored for future use, as :meth:`transform` needs X to interpolate + new input data. + """ + check_params = dict(accept_sparse=False, ensure_2d=False) + X = check_array( + X, input_name="X", dtype=[np.float64, np.float32], **check_params + ) + y = check_array(y, input_name="y", dtype=X.dtype, **check_params) + check_consistent_length(X, y, sample_weight) + + # Transform y by running the isotonic regression algorithm and + # transform X accordingly. + X, y = self._build_y(X, y, sample_weight) + + # It is necessary to store the non-redundant part of the training set + # on the model to make it possible to support model persistence via + # the pickle module as the object built by scipy.interp1d is not + # picklable directly. + self.X_thresholds_, self.y_thresholds_ = X, y + + # Build the interpolation function + self._build_f(X, y) + return self + + def _transform(self, T): + """`_transform` is called by both `transform` and `predict` methods. + + Since `transform` is wrapped to output arrays of specific types (e.g. + NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform` + directly. + + The above behaviour could be changed in the future, if we decide to output + other type of arrays when calling `predict`. + """ + if hasattr(self, "X_thresholds_"): + dtype = self.X_thresholds_.dtype + else: + dtype = np.float64 + + T = check_array(T, dtype=dtype, ensure_2d=False) + + self._check_input_data_shape(T) + T = T.reshape(-1) # use 1d view + + if self.out_of_bounds == "clip": + T = np.clip(T, self.X_min_, self.X_max_) + + res = self.f_(T) + + # on scipy 0.17, interp1d up-casts to float64, so we cast back + res = res.astype(T.dtype) + + return res + + def transform(self, T): + """Transform new data by linear interpolation. + + Parameters + ---------- + T : array-like of shape (n_samples,) or (n_samples, 1) + Data to transform. + + .. versionchanged:: 0.24 + Also accepts 2d array with 1 feature. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + The transformed data. + """ + return self._transform(T) + + def predict(self, T): + """Predict new data by linear interpolation. + + Parameters + ---------- + T : array-like of shape (n_samples,) or (n_samples, 1) + Data to transform. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Transformed data. + """ + return self._transform(T) + + # We implement get_feature_names_out here instead of using + # `ClassNamePrefixFeaturesOutMixin`` because `input_features` are ignored. + # `input_features` are ignored because `IsotonicRegression` accepts 1d + # arrays and the semantics of `feature_names_in_` are not clear for 1d arrays. + def get_feature_names_out(self, input_features=None): + """Get output feature names for transformation. + + Parameters + ---------- + input_features : array-like of str or None, default=None + Ignored. + + Returns + ------- + feature_names_out : ndarray of str objects + An ndarray with one string i.e. ["isotonicregression0"]. + """ + check_is_fitted(self, "f_") + class_name = self.__class__.__name__.lower() + return np.asarray([f"{class_name}0"], dtype=object) + + def __getstate__(self): + """Pickle-protocol - return state of the estimator.""" + state = super().__getstate__() + # remove interpolation method + state.pop("f_", None) + return state + + def __setstate__(self, state): + """Pickle-protocol - set state of the estimator. + + We need to rebuild the interpolation function. + """ + super().__setstate__(state) + if hasattr(self, "X_thresholds_") and hasattr(self, "y_thresholds_"): + self._build_f(self.X_thresholds_, self.y_thresholds_) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.one_d_array = True + tags.input_tags.two_d_array = False + return tags diff --git a/py311/lib/python3.11/site-packages/sklearn/kernel_ridge.py b/py311/lib/python3.11/site-packages/sklearn/kernel_ridge.py new file mode 100644 index 0000000000000000000000000000000000000000..900143de952d08b888ab7ce39a462c6d54433f85 --- /dev/null +++ b/py311/lib/python3.11/site-packages/sklearn/kernel_ridge.py @@ -0,0 +1,244 @@ +"""Kernel ridge regression.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from numbers import Real + +import numpy as np + +from sklearn.base import BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context +from sklearn.linear_model._ridge import _solve_cholesky_kernel +from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels +from sklearn.utils._param_validation import Interval, StrOptions +from sklearn.utils.validation import ( + _check_sample_weight, + check_is_fitted, + validate_data, +) + + +class KernelRidge(MultiOutputMixin, RegressorMixin, BaseEstimator): + """Kernel ridge regression. + + Kernel ridge regression (KRR) combines ridge regression (linear least + squares with l2-norm regularization) with the kernel trick. It thus + learns a linear function in the space induced by the respective kernel and + the data. For non-linear kernels, this corresponds to a non-linear + function in the original space. + + The form of the model learned by KRR is identical to support vector + regression (SVR). However, different loss functions are used: KRR uses + squared error loss while support vector regression uses epsilon-insensitive + loss, both combined with l2 regularization. In contrast to SVR, fitting a + KRR model can be done in closed-form and is typically faster for + medium-sized datasets. On the other hand, the learned model is non-sparse + and thus slower than SVR, which learns a sparse model for epsilon > 0, at + prediction-time. + + This estimator has built-in support for multi-variate regression + (i.e., when y is a 2d-array of shape [n_samples, n_targets]). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float or array-like of shape (n_targets,), default=1.0 + Regularization strength; must be a positive float. Regularization + improves the conditioning of the problem and reduces the variance of + the estimates. Larger values specify stronger regularization. + Alpha corresponds to ``1 / (2C)`` in other linear models such as + :class:`~sklearn.linear_model.LogisticRegression` or + :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are + assumed to be specific to the targets. Hence they must correspond in + number. See :ref:`ridge_regression` for formula. + + kernel : str or callable, default="linear" + Kernel mapping used internally. This parameter is directly passed to + :class:`~sklearn.metrics.pairwise.pairwise_kernels`. + If `kernel` is a string, it must be one of the metrics + in `pairwise.PAIRWISE_KERNEL_FUNCTIONS` or "precomputed". + If `kernel` is "precomputed", X is assumed to be a kernel matrix. + Alternatively, if `kernel` is a callable function, it is called on + each pair of instances (rows) and the resulting value recorded. The + callable should take two rows from X as input and return the + corresponding kernel value as a single number. This means that + callables from :mod:`sklearn.metrics.pairwise` are not allowed, as + they operate on matrices, not single samples. Use the string + identifying the kernel instead. + + gamma : float, default=None + Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 + and sigmoid kernels. Interpretation of the default value is left to + the kernel; see the documentation for sklearn.metrics.pairwise. + Ignored by other kernels. + + degree : float, default=3 + Degree of the polynomial kernel. Ignored by other kernels. + + coef0 : float, default=1 + Zero coefficient for polynomial and sigmoid kernels. + Ignored by other kernels. + + kernel_params : dict, default=None + Additional parameters (keyword arguments) for kernel function passed + as callable object. + + Attributes + ---------- + dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets) + Representation of weight vector(s) in kernel space + + X_fit_ : {ndarray, sparse matrix} of shape (n_samples, n_features) + Training data, which is also required for prediction. If + kernel == "precomputed" this is instead the precomputed + training matrix, of shape (n_samples, n_samples). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.gaussian_process.GaussianProcessRegressor : Gaussian + Process regressor providing automatic kernel hyperparameters + tuning and predictions uncertainty. + sklearn.linear_model.Ridge : Linear ridge regression. + sklearn.linear_model.RidgeCV : Ridge regression with built-in + cross-validation. + sklearn.svm.SVR : Support Vector Regression accepting a large variety + of kernels. + + References + ---------- + * Kevin P. Murphy + "Machine Learning: A Probabilistic Perspective", The MIT Press + chapter 14.4.3, pp. 492-493 + + Examples + -------- + >>> from sklearn.kernel_ridge import KernelRidge + >>> import numpy as np + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> krr = KernelRidge(alpha=1.0) + >>> krr.fit(X, y) + KernelRidge(alpha=1.0) + """ + + _parameter_constraints: dict = { + "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], + "kernel": [ + StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}), + callable, + ], + "gamma": [Interval(Real, 0, None, closed="left"), None], + "degree": [Interval(Real, 0, None, closed="left")], + "coef0": [Interval(Real, None, None, closed="neither")], + "kernel_params": [dict, None], + } + + def __init__( + self, + alpha=1, + *, + kernel="linear", + gamma=None, + degree=3, + coef0=1, + kernel_params=None, + ): + self.alpha = alpha + self.kernel = kernel + self.gamma = gamma + self.degree = degree + self.coef0 = coef0 + self.kernel_params = kernel_params + + def _get_kernel(self, X, Y=None): + if callable(self.kernel): + params = self.kernel_params or {} + else: + params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0} + return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + tags.input_tags.pairwise = self.kernel == "precomputed" + return tags + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit Kernel Ridge regression model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. If kernel == "precomputed" this is instead + a precomputed kernel matrix, of shape (n_samples, n_samples). + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + sample_weight : float or array-like of shape (n_samples,), default=None + Individual weights for each sample, ignored if None is passed. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Convert data + X, y = validate_data( + self, X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True + ) + if sample_weight is not None and not isinstance(sample_weight, float): + sample_weight = _check_sample_weight(sample_weight, X) + + K = self._get_kernel(X) + alpha = np.atleast_1d(self.alpha) + + ravel = False + if len(y.shape) == 1: + y = y.reshape(-1, 1) + ravel = True + + copy = self.kernel == "precomputed" + self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy) + if ravel: + self.dual_coef_ = self.dual_coef_.ravel() + + self.X_fit_ = X + + return self + + def predict(self, X): + """Predict using the kernel ridge model. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. If kernel == "precomputed" this is instead a + precomputed kernel matrix, shape = [n_samples, + n_samples_fitted], where n_samples_fitted is the number of + samples used in the fitting for this estimator. + + Returns + ------- + C : ndarray of shape (n_samples,) or (n_samples, n_targets) + Returns predicted values. + """ + check_is_fitted(self) + X = validate_data(self, X, accept_sparse=("csr", "csc"), reset=False) + K = self._get_kernel(X, self.X_fit_) + return np.dot(K, self.dual_coef_) diff --git a/py311/lib/python3.11/site-packages/sklearn/meson.build b/py311/lib/python3.11/site-packages/sklearn/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..cce803dd668b6939ab555fd3f08b4dbbbfff93bc --- /dev/null +++ b/py311/lib/python3.11/site-packages/sklearn/meson.build @@ -0,0 +1,264 @@ +fs = import('fs') + +# Platform detection +is_windows = host_machine.system() == 'windows' +is_mingw = is_windows and cc.get_id() == 'gcc' + +# Adapted from Scipy. mingw is untested and not officially supported. If you +# ever bump into issues when trying to compile for mingw, please open an issue +# in the scikit-learn issue tracker +if is_mingw + # For mingw-w64, link statically against the UCRT. + gcc_link_args = ['-lucrt', '-static'] + add_project_link_arguments(gcc_link_args, language: ['c', 'cpp']) + # Force gcc to float64 long doubles for compatibility with MSVC + # builds, for C only. + add_project_arguments('-mlong-double-64', language: 'c') +endif + +# Only check build dependencies version when not cross-compiling, as running +# Python interpreter can be tricky in cross-compilation settings. For more +# details, see https://docs.scipy.org/doc/scipy/building/cross_compilation.html +if not meson.is_cross_build() + if not py.version().version_compare('>=3.11') + error('scikit-learn requires Python>=3.11, got ' + py.version() + ' instead') + endif + + cython_min_version = run_command(py, ['_min_dependencies.py', 'cython'], check: true).stdout().strip() + if not cython.version().version_compare('>=' + cython_min_version) + error('scikit-learn requires Cython>=' + cython_min_version + ', got ' + cython.version() + ' instead') + endif + + numpy_version = run_command(py, + ['-c', 'import numpy; print(numpy.__version__)'], check: true).stdout().strip() + numpy_min_version = run_command(py, ['_min_dependencies.py', 'numpy'], check: true).stdout().strip() + if not numpy_version.version_compare('>=' + numpy_min_version) + error('scikit-learn requires numpy>=' + numpy_min_version + ', got ' + numpy_version + ' instead') + endif + + scipy_version = run_command(py, + ['-c', 'import scipy; print(scipy.__version__)'], check: true).stdout().strip() + scipy_min_version = run_command(py, ['_min_dependencies.py', 'scipy'], check: true).stdout().strip() + if not scipy_version.version_compare('>=' + scipy_min_version) + error('scikit-learn requires scipy>=' + scipy_min_version + ', got ' + scipy_version + ' instead') + endif + + # meson-python is required only when going through pip. Using meson directly + # should not check meson-python version. + meson_python_version_command_result = run_command(py, + ['-c', 'import importlib.metadata; print(importlib.metadata.version("meson-python"))'], check: false) + meson_python_installed = meson_python_version_command_result.returncode() == 0 + if meson_python_installed + meson_python_version = meson_python_version_command_result.stdout().strip() + meson_python_min_version = run_command(py, ['_min_dependencies.py', 'meson-python'], check: true).stdout().strip() + if not meson_python_version.version_compare('>=' + meson_python_min_version) + error('scikit-learn requires meson-python>=' + meson_python_min_version + ', got ' + meson_python_version + ' instead') + endif + endif + +endif + +# Adapted from scipy, each project seems to have its own tweaks for this. One +# day using dependency('numpy') will be a thing, see +# https://github.com/mesonbuild/meson/issues/9598. +# NumPy include directory - needed in all submodules +# Relative paths are needed when for example a virtualenv is +# placed inside the source tree; Meson rejects absolute paths to places inside +# the source tree. The try-except is needed because when things are split +# across drives on Windows, there is no relative path and an exception gets +# raised. There may be other such cases, so add a catch-all and switch to +# an absolute path. +# For cross-compilation it is often not possible to run the Python interpreter +# in order to retrieve numpy's include directory. It can be specified in the +# cross file instead: +# [properties] +# numpy-include-dir = /abspath/to/host-pythons/site-packages/numpy/core/include +# +# This uses the path as is, and avoids running the interpreter. +incdir_numpy = meson.get_external_property('numpy-include-dir', 'not-given') +if incdir_numpy == 'not-given' + incdir_numpy = run_command(py, + [ + '-c', + ''' +import os +import numpy as np +try: + incdir = os.path.relpath(np.get_include()) +except Exception: + incdir = np.get_include() +print(incdir) +''' + ], + check: true + ).stdout().strip() +endif + +inc_np = include_directories(incdir_numpy) +# Don't use the deprecated NumPy C API. Define this to a fixed version instead of +# NPY_API_VERSION in order not to break compilation for released SciPy versions +# when NumPy introduces a new deprecation. +numpy_no_deprecated_api = ['-DNPY_NO_DEPRECATED_API=NPY_1_22_API_VERSION'] +np_dep = declare_dependency(include_directories: inc_np, compile_args: numpy_no_deprecated_api) + +openmp_dep = dependency('OpenMP', language: 'c', required: false) + +if not openmp_dep.found() + warn_about_missing_openmp = true + # On Apple Clang avoid a misleading warning if compiler variables are set. + # See https://github.com/scikit-learn/scikit-learn/issues/28710 for more + # details. This may be removed if the OpenMP detection on Apple Clang improves, + # see https://github.com/mesonbuild/meson/issues/7435#issuecomment-2047585466. + if host_machine.system() == 'darwin' and cc.get_id() == 'clang' + compiler_env_vars_with_openmp = run_command(py, + [ + '-c', + ''' +import os + +compiler_env_vars_to_check = ["CPPFLAGS", "CFLAGS", "CXXFLAGS"] + +compiler_env_vars_with_openmp = [ + var for var in compiler_env_vars_to_check if "-fopenmp" in os.getenv(var, "")] +print(compiler_env_vars_with_openmp) +'''], check: true).stdout().strip() + warn_about_missing_openmp = compiler_env_vars_with_openmp == '[]' + endif + if warn_about_missing_openmp + warning( +''' + *********** + * WARNING * + *********** + +It seems that scikit-learn cannot be built with OpenMP. + +- Make sure you have followed the installation instructions: + + https://scikit-learn.org/dev/developers/advanced_installation.html + +- If your compiler supports OpenMP but you still see this + message, please submit a bug report at: + + https://github.com/scikit-learn/scikit-learn/issues + +- The build will continue with OpenMP-based parallelism + disabled. Note however that some estimators will run in + sequential mode instead of leveraging thread-based + parallelism. + + *** +''') + else + warning( +'''It looks like compiler environment variables were set to enable OpenMP support. +Check the output of "import sklearn; sklearn.show_versions()" after the build +to make sure that scikit-learn was actually built with OpenMP support. +''') + endif +endif + +# For now, we keep supporting SKLEARN_ENABLE_DEBUG_CYTHON_DIRECTIVES variable +# (see how it is done in sklearn/_build_utils/__init__.py when building with +# setuptools). Accessing environment variables in meson.build is discouraged, +# so once we drop setuptools this functionality should be behind a meson option +# or buildtype +boundscheck = run_command(py, + [ + '-c', + ''' +import os + +if os.environ.get("SKLEARN_ENABLE_DEBUG_CYTHON_DIRECTIVES", "0") != "0": + print(True) +else: + print(False) + ''' + ], + check: true + ).stdout().strip() + +cython_args = [] +cython_program = find_program(cython.cmd_array()[0]) + +scikit_learn_cython_args = [ + '--depfile', + '-X language_level=3', '-X boundscheck=' + boundscheck, '-X wraparound=False', + '-X initializedcheck=False', '-X nonecheck=False', '-X cdivision=True', + '-X profile=False', + # Needed for cython imports across subpackages, e.g. cluster pyx that + # cimports metrics pxd + '--include-dir', meson.global_build_root(), +] +cython_args += scikit_learn_cython_args + +if cython.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] + cython_shared_src = custom_target( + install: false, + output: '_cyutility.c', + command: [ + cython_program, '-3', '--fast-fail', '-Xfreethreading_compatible=True', + '--generate-shared=' + meson.current_build_dir()/'_cyutility.c' + ], + ) + + py.extension_module('_cyutility', + cython_shared_src, + subdir: 'sklearn', + cython_args: cython_args, + install: true, + ) + + cython_args += ['--shared=sklearn._cyutility'] +endif + +cython_gen = generator(cython_program, + arguments : cython_args + ['@INPUT@', '--output-file', '@OUTPUT@'], + output : '@BASENAME@.c', + depfile: '@BASENAME@.c.dep', +) + +cython_gen_cpp = generator(cython_program, + arguments : cython_args + ['--cplus', '@INPUT@', '--output-file', '@OUTPUT@'], + output : '@BASENAME@.cpp', + depfile: '@BASENAME@.cpp.dep' +) + +extensions = ['_isotonic'] + +py.extension_module( + '_isotonic', + cython_gen.process('_isotonic.pyx'), + cython_args: cython_args, + install: true, + subdir: 'sklearn', +) + +# Need for Cython cimports across subpackages to work, i.e. avoid errors like +# relative cimport from non-package directory is not allowed +sklearn_root_cython_tree = [ + fs.copyfile('__init__.py') +] + +sklearn_dir = py.get_install_dir() / 'sklearn' + +# Subpackages are mostly in alphabetical order except to handle Cython +# dependencies across subpackages +subdir('__check_build') +subdir('_loss') +# utils needs to be early since plenty of other modules cimports utils .pxd +subdir('utils') +# metrics needs to be to be before cluster since cluster cimports metrics .pxd +subdir('metrics') +subdir('cluster') +subdir('datasets') +subdir('decomposition') +subdir('ensemble') +subdir('feature_extraction') +subdir('linear_model') +subdir('manifold') +subdir('neighbors') +subdir('preprocessing') +subdir('svm') +subdir('tree') diff --git a/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/LICENSE b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..51f3442917839f8e0f0cccb52b3c10968ad0779e --- /dev/null +++ b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/LICENSE @@ -0,0 +1,3 @@ +This software is made available under the terms of *either* of the +licenses found in LICENSE.APACHE2 or LICENSE.MIT. Contributions to are +made under the terms of *both* these licenses. diff --git a/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/LICENSE.APACHE2 b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/LICENSE.APACHE2 new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/LICENSE.APACHE2 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/LICENSE.MIT b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/LICENSE.MIT new file mode 100644 index 0000000000000000000000000000000000000000..b8bb97185926d7daed314609753173945ed4ff1a --- /dev/null +++ b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/LICENSE.MIT @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/METADATA b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..75e0057bbad50ca4c117b9130b92f1bed2720669 --- /dev/null +++ b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/METADATA @@ -0,0 +1,104 @@ +Metadata-Version: 2.1 +Name: sniffio +Version: 1.3.1 +Summary: Sniff out which async library your code is running under +Author-email: "Nathaniel J. Smith" +License: MIT OR Apache-2.0 +Project-URL: Homepage, https://github.com/python-trio/sniffio +Project-URL: Documentation, https://sniffio.readthedocs.io/ +Project-URL: Changelog, https://sniffio.readthedocs.io/en/latest/history.html +Keywords: async,trio,asyncio +Classifier: License :: OSI Approved :: MIT License +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Framework :: Trio +Classifier: Framework :: AsyncIO +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Intended Audience :: Developers +Classifier: Development Status :: 5 - Production/Stable +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: LICENSE.APACHE2 +License-File: LICENSE.MIT + +.. image:: https://img.shields.io/badge/chat-join%20now-blue.svg + :target: https://gitter.im/python-trio/general + :alt: Join chatroom + +.. image:: https://img.shields.io/badge/docs-read%20now-blue.svg + :target: https://sniffio.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +.. image:: https://img.shields.io/pypi/v/sniffio.svg + :target: https://pypi.org/project/sniffio + :alt: Latest PyPi version + +.. image:: https://img.shields.io/conda/vn/conda-forge/sniffio.svg + :target: https://anaconda.org/conda-forge/sniffio + :alt: Latest conda-forge version + +.. image:: https://travis-ci.org/python-trio/sniffio.svg?branch=master + :target: https://travis-ci.org/python-trio/sniffio + :alt: Automated test status + +.. image:: https://codecov.io/gh/python-trio/sniffio/branch/master/graph/badge.svg + :target: https://codecov.io/gh/python-trio/sniffio + :alt: Test coverage + +================================================================= +sniffio: Sniff out which async library your code is running under +================================================================= + +You're writing a library. You've decided to be ambitious, and support +multiple async I/O packages, like `Trio +`__, and `asyncio +`__, and ... You've +written a bunch of clever code to handle all the differences. But... +how do you know *which* piece of clever code to run? + +This is a tiny package whose only purpose is to let you detect which +async library your code is running under. + +* Documentation: https://sniffio.readthedocs.io + +* Bug tracker and source code: https://github.com/python-trio/sniffio + +* License: MIT or Apache License 2.0, your choice + +* Contributor guide: https://trio.readthedocs.io/en/latest/contributing.html + +* Code of conduct: Contributors are requested to follow our `code of + conduct + `_ + in all project spaces. + +This library is maintained by the Trio project, as a service to the +async Python community as a whole. + + +Quickstart +---------- + +.. code-block:: python3 + + from sniffio import current_async_library + import trio + import asyncio + + async def print_library(): + library = current_async_library() + print("This is:", library) + + # Prints "This is trio" + trio.run(print_library) + + # Prints "This is asyncio" + asyncio.run(print_library()) + +For more details, including how to add support to new async libraries, +`please peruse our fine manual `__. diff --git a/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/RECORD b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..679721606361968288d68df6580f117e423d593e --- /dev/null +++ b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/RECORD @@ -0,0 +1,15 @@ +sniffio-1.3.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +sniffio-1.3.1.dist-info/LICENSE,sha256=ZSyHhIjRRWNh4Iw_hgf9e6WYkqFBA9Fczk_5PIW1zIs,185 +sniffio-1.3.1.dist-info/LICENSE.APACHE2,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +sniffio-1.3.1.dist-info/LICENSE.MIT,sha256=Pm2uVV65J4f8gtHUg1Vnf0VMf2Wus40_nnK_mj2vA0s,1046 +sniffio-1.3.1.dist-info/METADATA,sha256=CzGLVwmO3sz1heYKiJprantcQIbzqapi7_dqHTzuEtk,3875 +sniffio-1.3.1.dist-info/RECORD,, +sniffio-1.3.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sniffio-1.3.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +sniffio-1.3.1.dist-info/top_level.txt,sha256=v9UJXGs5CyddCVeAqXkQiWOrpp6Wtx6GeRrPt9-jjHg,8 +sniffio/__init__.py,sha256=9WJEJlXu7yluP0YtI5SQ9M9OTQfbNHkadarK1vXGDPM,335 +sniffio/_impl.py,sha256=UmUFMZpiuOrcjnuHhuYiYMxeCNWfqu9kBlaPf0xk6X8,2843 +sniffio/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sniffio/_tests/test_sniffio.py,sha256=MMJZZJjQrUi95RANNM-a_55BZquA_gv4rHU1pevcTCM,2058 +sniffio/_version.py,sha256=iVes5xwsHeRzQDexBaAhyx_taNt2ucfA7CWAo4QDt6Q,89 +sniffio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/WHEEL b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..98c0d20b7a64f4f998d7913e1d38a05dba20916c --- /dev/null +++ b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/top_level.txt b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..01c650244d0ccb6043c603b736fcf8d9e622bc71 --- /dev/null +++ b/py311/lib/python3.11/site-packages/sniffio-1.3.1.dist-info/top_level.txt @@ -0,0 +1 @@ +sniffio diff --git a/py311/lib/python3.11/site-packages/ty/__init__.py b/py311/lib/python3.11/site-packages/ty/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/ty/__main__.py b/py311/lib/python3.11/site-packages/ty/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..47c44e37a018edfda3936811e040dd09f897b621 --- /dev/null +++ b/py311/lib/python3.11/site-packages/ty/__main__.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import os +import sys +import sysconfig + + +def find_ty_bin() -> str: + """Return the ty binary path.""" + + ty_exe = "ty" + sysconfig.get_config_var("EXE") + + scripts_path = os.path.join(sysconfig.get_path("scripts"), ty_exe) + if os.path.isfile(scripts_path): + return scripts_path + + if sys.version_info >= (3, 10): + user_scheme = sysconfig.get_preferred_scheme("user") + elif os.name == "nt": + user_scheme = "nt_user" + elif sys.platform == "darwin" and sys._framework: + user_scheme = "osx_framework_user" + else: + user_scheme = "posix_user" + + user_path = os.path.join(sysconfig.get_path("scripts", scheme=user_scheme), ty_exe) + if os.path.isfile(user_path): + return user_path + + # Search in `bin` adjacent to package root (as created by `pip install --target`). + pkg_root = os.path.dirname(os.path.dirname(__file__)) + target_path = os.path.join(pkg_root, "bin", ty_exe) + if os.path.isfile(target_path): + return target_path + + # Search for pip-specific build environments. + # + # Expect to find ty in /pip-build-env-/overlay/bin/ty + # Expect to find a "normal" folder at /pip-build-env-/normal + # + # See: https://github.com/pypa/pip/blob/102d8187a1f5a4cd5de7a549fd8a9af34e89a54f/src/pip/_internal/build_env.py#L87 + paths = os.environ.get("PATH", "").split(os.pathsep) + if len(paths) >= 2: + + def get_last_three_path_parts(path: str) -> list[str]: + """Return a list of up to the last three parts of a path.""" + parts = [] + + while len(parts) < 3: + head, tail = os.path.split(path) + if tail or head != path: + parts.append(tail) + path = head + else: + parts.append(path) + break + + return parts + + maybe_overlay = get_last_three_path_parts(paths[0]) + maybe_normal = get_last_three_path_parts(paths[1]) + if ( + len(maybe_normal) >= 3 + and maybe_normal[-1].startswith("pip-build-env-") + and maybe_normal[-2] == "normal" + and len(maybe_overlay) >= 3 + and maybe_overlay[-1].startswith("pip-build-env-") + and maybe_overlay[-2] == "overlay" + ): + # The overlay must contain the ty binary. + candidate = os.path.join(paths[0], ty_exe) + if os.path.isfile(candidate): + return candidate + + raise FileNotFoundError(scripts_path) + + +if __name__ == "__main__": + ty = os.fsdecode(find_ty_bin()) + if sys.platform == "win32": + import subprocess + + completed_process = subprocess.run([ty, *sys.argv[1:]]) + sys.exit(completed_process.returncode) + else: + os.execvp(ty, [ty, *sys.argv[1:]]) diff --git a/py311/lib/python3.11/site-packages/ty/py.typed b/py311/lib/python3.11/site-packages/ty/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/py311/lib/python3.11/site-packages/ty/py.typed @@ -0,0 +1 @@ + diff --git a/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/INSTALLER b/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/METADATA b/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..b09cb50e1f9a42a8eb58a4339cbdb26250368375 --- /dev/null +++ b/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/METADATA @@ -0,0 +1,72 @@ +Metadata-Version: 2.4 +Name: typing_extensions +Version: 4.15.0 +Summary: Backported and Experimental Type Hints for Python 3.9+ +Keywords: annotations,backport,checker,checking,function,hinting,hints,type,typechecking,typehinting,typehints,typing +Author-email: "Guido van Rossum, Jukka Lehtosalo, Łukasz Langa, Michael Lee" +Requires-Python: >=3.9 +Description-Content-Type: text/markdown +License-Expression: PSF-2.0 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Topic :: Software Development +License-File: LICENSE +Project-URL: Bug Tracker, https://github.com/python/typing_extensions/issues +Project-URL: Changes, https://github.com/python/typing_extensions/blob/main/CHANGELOG.md +Project-URL: Documentation, https://typing-extensions.readthedocs.io/ +Project-URL: Home, https://github.com/python/typing_extensions +Project-URL: Q & A, https://github.com/python/typing/discussions +Project-URL: Repository, https://github.com/python/typing_extensions + +# Typing Extensions + +[![Chat at https://gitter.im/python/typing](https://badges.gitter.im/python/typing.svg)](https://gitter.im/python/typing) + +[Documentation](https://typing-extensions.readthedocs.io/en/latest/#) – +[PyPI](https://pypi.org/project/typing-extensions/) + +## Overview + +The `typing_extensions` module serves two related purposes: + +- Enable use of new type system features on older Python versions. For example, + `typing.TypeGuard` is new in Python 3.10, but `typing_extensions` allows + users on previous Python versions to use it too. +- Enable experimentation with new type system PEPs before they are accepted and + added to the `typing` module. + +`typing_extensions` is treated specially by static type checkers such as +mypy and pyright. Objects defined in `typing_extensions` are treated the same +way as equivalent forms in `typing`. + +`typing_extensions` uses +[Semantic Versioning](https://semver.org/). The +major version will be incremented only for backwards-incompatible changes. +Therefore, it's safe to depend +on `typing_extensions` like this: `typing_extensions ~=x.y`, +where `x.y` is the first version that includes all features you need. +[This](https://packaging.python.org/en/latest/specifications/version-specifiers/#compatible-release) +is equivalent to `typing_extensions >=x.y, <(x+1)`. Do not depend on `~= x.y.z` +unless you really know what you're doing; that defeats the purpose of +semantic versioning. + +## Included items + +See [the documentation](https://typing-extensions.readthedocs.io/en/latest/#) for a +complete listing of module contents. + +## Contributing + +See [CONTRIBUTING.md](https://github.com/python/typing_extensions/blob/main/CONTRIBUTING.md) +for how to contribute to `typing_extensions`. + diff --git a/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/RECORD b/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ec54c2e277f970bd4ae36e237f8fe0f4dff7f900 --- /dev/null +++ b/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/RECORD @@ -0,0 +1,7 @@ +typing_extensions-4.15.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +typing_extensions-4.15.0.dist-info/METADATA,sha256=wTg3j-jxiTSsmd4GBTXFPsbBOu7WXpTDJkHafuMZKnI,3259 +typing_extensions-4.15.0.dist-info/RECORD,, +typing_extensions-4.15.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +typing_extensions-4.15.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +typing_extensions-4.15.0.dist-info/licenses/LICENSE,sha256=Oy-B_iHRgcSZxZolbI4ZaEVdZonSaaqFNzv7avQdo78,13936 +typing_extensions.py,sha256=Qz0R0XDTok0usGXrwb_oSM6n49fOaFZ6tSvqLUwvftg,160429 diff --git a/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/REQUESTED b/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/WHEEL b/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d8b9936dad9ab2513fa6979f411560d3b6b57e37 --- /dev/null +++ b/py311/lib/python3.11/site-packages/typing_extensions-4.15.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.12.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/py311/lib/python3.11/site-packages/tzdata/__init__.py b/py311/lib/python3.11/site-packages/tzdata/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b1534dfb64286636dcdf93f172b4ac5bdcb44232 --- /dev/null +++ b/py311/lib/python3.11/site-packages/tzdata/__init__.py @@ -0,0 +1,6 @@ +# IANA versions like 2020a are not valid PEP 440 identifiers; the recommended +# way to translate the version is to use YYYY.n where `n` is a 0-based index. +__version__ = "2025.3" + +# This exposes the original IANA version number. +IANA_VERSION = "2025c" diff --git a/py311/lib/python3.11/site-packages/tzdata/zones b/py311/lib/python3.11/site-packages/tzdata/zones new file mode 100644 index 0000000000000000000000000000000000000000..d4c3ef55638268a2ab4d17f7db071075db94747d --- /dev/null +++ b/py311/lib/python3.11/site-packages/tzdata/zones @@ -0,0 +1,598 @@ +Africa/Abidjan +Africa/Algiers +Africa/Bissau +Africa/Cairo +Africa/Casablanca +Africa/Ceuta +Africa/El_Aaiun +Africa/Johannesburg +Africa/Juba +Africa/Khartoum +Africa/Lagos +Africa/Maputo +Africa/Monrovia +Africa/Nairobi +Africa/Ndjamena +Africa/Sao_Tome +Africa/Tripoli +Africa/Tunis +Africa/Windhoek +America/Adak +America/Anchorage +America/Araguaina +America/Argentina/Buenos_Aires +America/Argentina/Catamarca +America/Argentina/Cordoba +America/Argentina/Jujuy +America/Argentina/La_Rioja +America/Argentina/Mendoza +America/Argentina/Rio_Gallegos +America/Argentina/Salta +America/Argentina/San_Juan +America/Argentina/San_Luis +America/Argentina/Tucuman +America/Argentina/Ushuaia +America/Asuncion +America/Bahia +America/Bahia_Banderas +America/Barbados +America/Belem +America/Belize +America/Boa_Vista +America/Bogota +America/Boise +America/Cambridge_Bay +America/Campo_Grande +America/Cancun +America/Caracas +America/Cayenne +America/Chicago +America/Chihuahua +America/Ciudad_Juarez +America/Costa_Rica +America/Coyhaique +America/Cuiaba +America/Danmarkshavn +America/Dawson +America/Dawson_Creek +America/Denver +America/Detroit +America/Edmonton +America/Eirunepe +America/El_Salvador +America/Fort_Nelson +America/Fortaleza +America/Glace_Bay +America/Goose_Bay +America/Grand_Turk +America/Guatemala +America/Guayaquil +America/Guyana +America/Halifax +America/Havana +America/Hermosillo +America/Indiana/Indianapolis +America/Indiana/Knox +America/Indiana/Marengo +America/Indiana/Petersburg +America/Indiana/Tell_City +America/Indiana/Vevay +America/Indiana/Vincennes +America/Indiana/Winamac +America/Inuvik +America/Iqaluit +America/Jamaica +America/Juneau +America/Kentucky/Louisville +America/Kentucky/Monticello +America/La_Paz +America/Lima +America/Los_Angeles +America/Maceio +America/Managua +America/Manaus +America/Martinique +America/Matamoros +America/Mazatlan +America/Menominee +America/Merida +America/Metlakatla +America/Mexico_City +America/Miquelon +America/Moncton +America/Monterrey +America/Montevideo +America/New_York +America/Nome +America/Noronha +America/North_Dakota/Beulah +America/North_Dakota/Center +America/North_Dakota/New_Salem +America/Nuuk +America/Ojinaga +America/Panama +America/Paramaribo +America/Phoenix +America/Port-au-Prince +America/Porto_Velho +America/Puerto_Rico +America/Punta_Arenas +America/Rankin_Inlet +America/Recife +America/Regina +America/Resolute +America/Rio_Branco +America/Santarem +America/Santiago +America/Santo_Domingo +America/Sao_Paulo +America/Scoresbysund +America/Sitka +America/St_Johns +America/Swift_Current +America/Tegucigalpa +America/Thule +America/Tijuana +America/Toronto +America/Vancouver +America/Whitehorse +America/Winnipeg +America/Yakutat +Antarctica/Casey +Antarctica/Davis +Antarctica/Macquarie +Antarctica/Mawson +Antarctica/Palmer +Antarctica/Rothera +Antarctica/Troll +Antarctica/Vostok +Asia/Almaty +Asia/Amman +Asia/Anadyr +Asia/Aqtau +Asia/Aqtobe +Asia/Ashgabat +Asia/Atyrau +Asia/Baghdad +Asia/Baku +Asia/Bangkok +Asia/Barnaul +Asia/Beirut +Asia/Bishkek +Asia/Chita +Asia/Colombo +Asia/Damascus +Asia/Dhaka +Asia/Dili +Asia/Dubai +Asia/Dushanbe +Asia/Famagusta +Asia/Gaza +Asia/Hebron +Asia/Ho_Chi_Minh +Asia/Hong_Kong +Asia/Hovd +Asia/Irkutsk +Asia/Jakarta +Asia/Jayapura +Asia/Jerusalem +Asia/Kabul +Asia/Kamchatka +Asia/Karachi +Asia/Kathmandu +Asia/Khandyga +Asia/Kolkata +Asia/Krasnoyarsk +Asia/Kuching +Asia/Macau +Asia/Magadan +Asia/Makassar +Asia/Manila +Asia/Nicosia +Asia/Novokuznetsk +Asia/Novosibirsk +Asia/Omsk +Asia/Oral +Asia/Pontianak +Asia/Pyongyang +Asia/Qatar +Asia/Qostanay +Asia/Qyzylorda +Asia/Riyadh +Asia/Sakhalin +Asia/Samarkand +Asia/Seoul +Asia/Shanghai +Asia/Singapore +Asia/Srednekolymsk +Asia/Taipei +Asia/Tashkent +Asia/Tbilisi +Asia/Tehran +Asia/Thimphu +Asia/Tokyo +Asia/Tomsk +Asia/Ulaanbaatar +Asia/Urumqi +Asia/Ust-Nera +Asia/Vladivostok +Asia/Yakutsk +Asia/Yangon +Asia/Yekaterinburg +Asia/Yerevan +Atlantic/Azores +Atlantic/Bermuda +Atlantic/Canary +Atlantic/Cape_Verde +Atlantic/Faroe +Atlantic/Madeira +Atlantic/South_Georgia +Atlantic/Stanley +Australia/Adelaide +Australia/Brisbane +Australia/Broken_Hill +Australia/Darwin +Australia/Eucla +Australia/Hobart +Australia/Lindeman +Australia/Lord_Howe +Australia/Melbourne +Australia/Perth +Australia/Sydney +Etc/GMT +Etc/GMT+1 +Etc/GMT+10 +Etc/GMT+11 +Etc/GMT+12 +Etc/GMT+2 +Etc/GMT+3 +Etc/GMT+4 +Etc/GMT+5 +Etc/GMT+6 +Etc/GMT+7 +Etc/GMT+8 +Etc/GMT+9 +Etc/GMT-1 +Etc/GMT-10 +Etc/GMT-11 +Etc/GMT-12 +Etc/GMT-13 +Etc/GMT-14 +Etc/GMT-2 +Etc/GMT-3 +Etc/GMT-4 +Etc/GMT-5 +Etc/GMT-6 +Etc/GMT-7 +Etc/GMT-8 +Etc/GMT-9 +Etc/UTC +Europe/Andorra +Europe/Astrakhan +Europe/Athens +Europe/Belgrade +Europe/Berlin +Europe/Brussels +Europe/Bucharest +Europe/Budapest +Europe/Chisinau +Europe/Dublin +Europe/Gibraltar +Europe/Helsinki +Europe/Istanbul +Europe/Kaliningrad +Europe/Kirov +Europe/Kyiv +Europe/Lisbon +Europe/London +Europe/Madrid +Europe/Malta +Europe/Minsk +Europe/Moscow +Europe/Paris +Europe/Prague +Europe/Riga +Europe/Rome +Europe/Samara +Europe/Saratov +Europe/Simferopol +Europe/Sofia +Europe/Tallinn +Europe/Tirane +Europe/Ulyanovsk +Europe/Vienna +Europe/Vilnius +Europe/Volgograd +Europe/Warsaw +Europe/Zurich +Factory +Indian/Chagos +Indian/Maldives +Indian/Mauritius +Pacific/Apia +Pacific/Auckland +Pacific/Bougainville +Pacific/Chatham +Pacific/Easter +Pacific/Efate +Pacific/Fakaofo +Pacific/Fiji +Pacific/Galapagos +Pacific/Gambier +Pacific/Guadalcanal +Pacific/Guam +Pacific/Honolulu +Pacific/Kanton +Pacific/Kiritimati +Pacific/Kosrae +Pacific/Kwajalein +Pacific/Marquesas +Pacific/Nauru +Pacific/Niue +Pacific/Norfolk +Pacific/Noumea +Pacific/Pago_Pago +Pacific/Palau +Pacific/Pitcairn +Pacific/Port_Moresby +Pacific/Rarotonga +Pacific/Tahiti +Pacific/Tarawa +Pacific/Tongatapu +GMT +Australia/ACT +Australia/LHI +Australia/NSW +Australia/North +Australia/Queensland +Australia/South +Australia/Tasmania +Australia/Victoria +Australia/West +Australia/Yancowinna +Brazil/Acre +Brazil/DeNoronha +Brazil/East +Brazil/West +CET +CST6CDT +Canada/Atlantic +Canada/Central +Canada/Eastern +Canada/Mountain +Canada/Newfoundland +Canada/Pacific +Canada/Saskatchewan +Canada/Yukon +Chile/Continental +Chile/EasterIsland +Cuba +EET +EST +EST5EDT +Egypt +Eire +Etc/GMT+0 +Etc/GMT-0 +Etc/GMT0 +Etc/Greenwich +Etc/UCT +Etc/Universal +Etc/Zulu +GB +GB-Eire +GMT+0 +GMT-0 +GMT0 +Greenwich +Hongkong +Iceland +Iran +Israel +Jamaica +Japan +Kwajalein +Libya +MET +MST +MST7MDT +Mexico/BajaNorte +Mexico/BajaSur +Mexico/General +NZ +NZ-CHAT +Navajo +PRC +Poland +Portugal +ROC +ROK +Singapore +Turkey +UCT +US/Alaska +US/Aleutian +US/Arizona +US/Central +US/East-Indiana +US/Eastern +US/Hawaii +US/Indiana-Starke +US/Michigan +US/Mountain +US/Pacific +US/Samoa +UTC +Universal +W-SU +Zulu +America/Buenos_Aires +America/Catamarca +America/Cordoba +America/Indianapolis +America/Jujuy +America/Knox_IN +America/Louisville +America/Mendoza +America/Virgin +Pacific/Samoa +Africa/Accra +Africa/Addis_Ababa +Africa/Asmara +Africa/Bamako +Africa/Bangui +Africa/Banjul +Africa/Blantyre +Africa/Brazzaville +Africa/Bujumbura +Africa/Conakry +Africa/Dakar +Africa/Dar_es_Salaam +Africa/Djibouti +Africa/Douala +Africa/Freetown +Africa/Gaborone +Africa/Harare +Africa/Kampala +Africa/Kigali +Africa/Kinshasa +Africa/Libreville +Africa/Lome +Africa/Luanda +Africa/Lubumbashi +Africa/Lusaka +Africa/Malabo +Africa/Maseru +Africa/Mbabane +Africa/Mogadishu +Africa/Niamey +Africa/Nouakchott +Africa/Ouagadougou +Africa/Porto-Novo +America/Anguilla +America/Antigua +America/Aruba +America/Atikokan +America/Blanc-Sablon +America/Cayman +America/Creston +America/Curacao +America/Dominica +America/Grenada +America/Guadeloupe +America/Kralendijk +America/Lower_Princes +America/Marigot +America/Montserrat +America/Nassau +America/Port_of_Spain +America/St_Barthelemy +America/St_Kitts +America/St_Lucia +America/St_Thomas +America/St_Vincent +America/Tortola +Antarctica/DumontDUrville +Antarctica/McMurdo +Antarctica/Syowa +Arctic/Longyearbyen +Asia/Aden +Asia/Bahrain +Asia/Brunei +Asia/Kuala_Lumpur +Asia/Kuwait +Asia/Muscat +Asia/Phnom_Penh +Asia/Vientiane +Atlantic/Reykjavik +Atlantic/St_Helena +Europe/Amsterdam +Europe/Bratislava +Europe/Busingen +Europe/Copenhagen +Europe/Guernsey +Europe/Isle_of_Man +Europe/Jersey +Europe/Ljubljana +Europe/Luxembourg +Europe/Mariehamn +Europe/Monaco +Europe/Oslo +Europe/Podgorica +Europe/San_Marino +Europe/Sarajevo +Europe/Skopje +Europe/Stockholm +Europe/Vaduz +Europe/Vatican +Europe/Zagreb +Indian/Antananarivo +Indian/Christmas +Indian/Cocos +Indian/Comoro +Indian/Kerguelen +Indian/Mahe +Indian/Mayotte +Indian/Reunion +Pacific/Chuuk +Pacific/Funafuti +Pacific/Majuro +Pacific/Midway +Pacific/Pohnpei +Pacific/Saipan +Pacific/Wake +Pacific/Wallis +Africa/Timbuktu +America/Argentina/ComodRivadavia +America/Atka +America/Coral_Harbour +America/Ensenada +America/Fort_Wayne +America/Montreal +America/Nipigon +America/Pangnirtung +America/Porto_Acre +America/Rainy_River +America/Rosario +America/Santa_Isabel +America/Shiprock +America/Thunder_Bay +America/Yellowknife +Antarctica/South_Pole +Asia/Choibalsan +Asia/Chongqing +Asia/Harbin +Asia/Kashgar +Asia/Tel_Aviv +Atlantic/Jan_Mayen +Australia/Canberra +Australia/Currie +Europe/Belfast +Europe/Tiraspol +Europe/Uzhgorod +Europe/Zaporozhye +Pacific/Enderbury +Pacific/Johnston +Pacific/Yap +WET +Africa/Asmera +America/Godthab +Asia/Ashkhabad +Asia/Calcutta +Asia/Chungking +Asia/Dacca +Asia/Istanbul +Asia/Katmandu +Asia/Macao +Asia/Rangoon +Asia/Saigon +Asia/Thimbu +Asia/Ujung_Pandang +Asia/Ulan_Bator +Atlantic/Faeroe +Europe/Kiev +Europe/Nicosia +HST +PST8PDT +Pacific/Ponape +Pacific/Truk diff --git a/py311/lib/python3.11/site-packages/virtualenv/__init__.py b/py311/lib/python3.11/site-packages/virtualenv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc11e7f3e299f4ce2ff611a68bbfa3992e45148d --- /dev/null +++ b/py311/lib/python3.11/site-packages/virtualenv/__init__.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +from .run import cli_run, session_via_cli +from .version import __version__ + +__all__ = [ + "__version__", + "cli_run", + "session_via_cli", +] diff --git a/py311/lib/python3.11/site-packages/virtualenv/__main__.py b/py311/lib/python3.11/site-packages/virtualenv/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..49f59da3818f647545a29ff315634eec07d09cda --- /dev/null +++ b/py311/lib/python3.11/site-packages/virtualenv/__main__.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +import errno +import logging +import os +import sys +from timeit import default_timer + +LOGGER = logging.getLogger(__name__) + + +def run(args=None, options=None, env=None): + env = os.environ if env is None else env + start = default_timer() + from virtualenv.run import cli_run # noqa: PLC0415 + from virtualenv.util.error import ProcessCallFailedError # noqa: PLC0415 + + if args is None: + args = sys.argv[1:] + try: + session = cli_run(args, options, env) + LOGGER.warning(LogSession(session, start)) + except ProcessCallFailedError as exception: + print(f"subprocess call failed for {exception.cmd} with code {exception.code}") # noqa: T201 + print(exception.out, file=sys.stdout, end="") # noqa: T201 + print(exception.err, file=sys.stderr, end="") # noqa: T201 + raise SystemExit(exception.code) # noqa: B904 + except OSError as exception: + if exception.errno == errno.EMFILE: + print( # noqa: T201 + "OSError: [Errno 24] Too many open files. You may need to increase your OS open files limit.\n" + " On macOS/Linux, try 'ulimit -n 2048'.\n" + " For Windows, this is not a common issue, but you can try to close some applications.", + file=sys.stderr, + ) + raise + + +class LogSession: + def __init__(self, session, start) -> None: + self.session = session + self.start = start + + def __str__(self) -> str: + spec = self.session.creator.interpreter.spec + elapsed = (default_timer() - self.start) * 1000 + lines = [ + f"created virtual environment {spec} in {elapsed:.0f}ms", + f" creator {self.session.creator!s}", + ] + if self.session.seeder.enabled: + lines.append(f" seeder {self.session.seeder!s}") + path = self.session.creator.purelib.iterdir() + packages = sorted("==".join(i.stem.split("-")) for i in path if i.suffix == ".dist-info") + lines.append(f" added seed packages: {', '.join(packages)}") + + if self.session.activators: + lines.append(f" activators {','.join(i.__class__.__name__ for i in self.session.activators)}") + return "\n".join(lines) + + +def run_with_catch(args=None, env=None): + from virtualenv.config.cli.parser import VirtualEnvOptions # noqa: PLC0415 + + env = os.environ if env is None else env + options = VirtualEnvOptions() + try: + run(args, options, env) + except (KeyboardInterrupt, SystemExit, Exception) as exception: # noqa: BLE001 + try: + if getattr(options, "with_traceback", False): + raise + if not (isinstance(exception, SystemExit) and exception.code == 0): + LOGGER.error("%s: %s", type(exception).__name__, exception) # noqa: TRY400 + code = exception.code if isinstance(exception, SystemExit) else 1 + sys.exit(code) + finally: + for handler in LOGGER.handlers: # force flush of log messages before the trace is printed + handler.flush() + + +if __name__ == "__main__": # pragma: no cov + run_with_catch() # pragma: no cov diff --git a/py311/lib/python3.11/site-packages/virtualenv/info.py b/py311/lib/python3.11/site-packages/virtualenv/info.py new file mode 100644 index 0000000000000000000000000000000000000000..e3542a7e2ba65758ecb2f3f00c8aa618a7d08f06 --- /dev/null +++ b/py311/lib/python3.11/site-packages/virtualenv/info.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import logging +import os +import platform +import sys +import tempfile + +IMPLEMENTATION = platform.python_implementation() +IS_PYPY = IMPLEMENTATION == "PyPy" +IS_GRAALPY = IMPLEMENTATION == "GraalVM" +IS_CPYTHON = IMPLEMENTATION == "CPython" +IS_WIN = sys.platform == "win32" +IS_MAC_ARM64 = sys.platform == "darwin" and platform.machine() == "arm64" +ROOT = os.path.realpath(os.path.join(os.path.abspath(__file__), os.path.pardir, os.path.pardir)) +IS_ZIPAPP = os.path.isfile(ROOT) +_CAN_SYMLINK = _FS_CASE_SENSITIVE = _CFG_DIR = _DATA_DIR = None +LOGGER = logging.getLogger(__name__) + + +def fs_is_case_sensitive(): + global _FS_CASE_SENSITIVE # noqa: PLW0603 + + if _FS_CASE_SENSITIVE is None: + with tempfile.NamedTemporaryFile(prefix="TmP") as tmp_file: + _FS_CASE_SENSITIVE = not os.path.exists(tmp_file.name.lower()) + LOGGER.debug("filesystem is %scase-sensitive", "" if _FS_CASE_SENSITIVE else "not ") + return _FS_CASE_SENSITIVE + + +def fs_supports_symlink(): + global _CAN_SYMLINK # noqa: PLW0603 + + if _CAN_SYMLINK is None: + can = False + if hasattr(os, "symlink"): + # Creating a symlink can fail for a variety of reasons, indicating that the filesystem does not support it. + # E.g. on Linux with a VFAT partition mounted. + with tempfile.NamedTemporaryFile(prefix="TmP") as tmp_file: + temp_dir = os.path.dirname(tmp_file.name) + dest = os.path.join(temp_dir, f"{tmp_file.name}-{'b'}") + try: + os.symlink(tmp_file.name, dest) + can = True + except (OSError, NotImplementedError): + pass # symlink is not supported + finally: + if os.path.lexists(dest): + os.remove(dest) + LOGGER.debug("symlink on filesystem does%s work", "" if can else " not") + _CAN_SYMLINK = can + return _CAN_SYMLINK + + +def fs_path_id(path: str) -> str: + return path.casefold() if fs_is_case_sensitive() else path + + +__all__ = ( + "IS_CPYTHON", + "IS_GRAALPY", + "IS_MAC_ARM64", + "IS_PYPY", + "IS_WIN", + "IS_ZIPAPP", + "ROOT", + "fs_is_case_sensitive", + "fs_path_id", + "fs_supports_symlink", +) diff --git a/py311/lib/python3.11/site-packages/virtualenv/report.py b/py311/lib/python3.11/site-packages/virtualenv/report.py new file mode 100644 index 0000000000000000000000000000000000000000..c9682a8f63393db2dea860a8a20027764d81e178 --- /dev/null +++ b/py311/lib/python3.11/site-packages/virtualenv/report.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import logging +import sys + +LEVELS = { + 0: logging.CRITICAL, + 1: logging.ERROR, + 2: logging.WARNING, + 3: logging.INFO, + 4: logging.DEBUG, + 5: logging.NOTSET, +} + +MAX_LEVEL = max(LEVELS.keys()) +LOGGER = logging.getLogger() + + +def setup_report(verbosity, show_pid=False): # noqa: FBT002 + _clean_handlers(LOGGER) + verbosity = min(verbosity, MAX_LEVEL) # pragma: no cover + level = LEVELS[verbosity] + msg_format = "%(message)s" + if level <= logging.DEBUG: + locate = "module" + msg_format = f"%(relativeCreated)d {msg_format} [%(levelname)s %({locate})s:%(lineno)d]" + if show_pid: + msg_format = f"[%(process)d] {msg_format}" + formatter = logging.Formatter(msg_format) + stream_handler = logging.StreamHandler(stream=sys.stdout) + stream_handler.setLevel(level) + LOGGER.setLevel(logging.NOTSET) + stream_handler.setFormatter(formatter) + LOGGER.addHandler(stream_handler) + level_name = logging.getLevelName(level) + LOGGER.debug("setup logging to %s", level_name) + logging.getLogger("distlib").setLevel(logging.ERROR) + return verbosity + + +def _clean_handlers(log): + for log_handler in list(log.handlers): # remove handlers of libraries + log.removeHandler(log_handler) + + +__all__ = [ + "LEVELS", + "MAX_LEVEL", + "setup_report", +] diff --git a/py311/lib/python3.11/site-packages/virtualenv/version.py b/py311/lib/python3.11/site-packages/virtualenv/version.py new file mode 100644 index 0000000000000000000000000000000000000000..391542c294b473a0b4c5693e8e41ed5e9fcba064 --- /dev/null +++ b/py311/lib/python3.11/site-packages/virtualenv/version.py @@ -0,0 +1,34 @@ +# file generated by setuptools-scm +# don't change, don't track in version control + +__all__ = [ + "__version__", + "__version_tuple__", + "version", + "version_tuple", + "__commit_id__", + "commit_id", +] + +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple + from typing import Union + + VERSION_TUPLE = Tuple[Union[int, str], ...] + COMMIT_ID = Union[str, None] +else: + VERSION_TUPLE = object + COMMIT_ID = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE +commit_id: COMMIT_ID +__commit_id__: COMMIT_ID + +__version__ = version = '20.36.1' +__version_tuple__ = version_tuple = (20, 36, 1) + +__commit_id__ = commit_id = None