language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
scikit-image__scikit-image
tests/skimage/graph/test_flexible.py
{ "start": 164, "end": 1560 }
class ____(mcp.MCP_Flexible): """Simple MCP subclass that allows the front to travel a certain distance from the seed point, and uses a constant cost factor that is independent of the cost array. """ def _reset(self): mcp.MCP_Flexible._reset(self) self._distance = np.zeros((8, 8), dtype=np.float32).ravel() def goal_reached(self, index, cumcost): if self._distance[index] > 4: return 2 else: return 0 def travel_cost(self, index, new_index, offset_length): return 1.0 # fixed cost def examine_neighbor(self, index, new_index, offset_length): pass # We do not test this def update_node(self, index, new_index, offset_length): self._distance[new_index] = self._distance[index] + 1 def test_flexible(): # Create MCP and do a traceback mcp = FlexibleMCP(a) costs, traceback = mcp.find_costs([(0, 0)]) # Check that inner part is correct. This basically # tests whether travel_cost works. assert_array_equal( costs[:4, :4], [[1, 2, 3, 4], [2, 2, 3, 4], [3, 3, 3, 4], [4, 4, 4, 4]] ) # Test that the algorithm stopped at the right distance. # Note that some of the costs are filled in but not yet frozen, # so we take a bit of margin assert np.all(costs[-2:, :] == np.inf) assert np.all(costs[:, -2:] == np.inf)
FlexibleMCP
python
falconry__falcon
tests/test_recipes.py
{ "start": 2106, "end": 3297 }
class ____: class QuoteResource: def on_get(self, req, resp): resp.media = { 'author': 'Grace Hopper', 'quote': ( "I've always been more interested in the future than in the past." ), } class NegotiationMiddleware: def process_request(self, req, resp): resp.content_type = req.accept def test_optional_indent(self, util): recipe = util.load_module('examples/recipes/pretty_json_main.py') app = falcon.App(middleware=[self.NegotiationMiddleware()]) app.add_route('/quote', self.QuoteResource()) app.resp_options.media_handlers.update( {falcon.MEDIA_JSON: recipe.CustomJSONHandler()} ) result = falcon.testing.simulate_get( app, '/quote', headers={'Accept': 'application/json; indent=4'} ) assert result.status_code == 200 assert result.text == ( '{\n' ' "author": "Grace Hopper",\n' ' "quote": "I\'ve always been more interested in the future ' 'than in the past."\n' '}' )
TestPrettyJSON
python
matplotlib__matplotlib
lib/matplotlib/backends/backend_qt.py
{ "start": 45019, "end": 45430 }
class ____(backend_tools.RubberbandBase): def draw_rubberband(self, x0, y0, x1, y1): NavigationToolbar2QT.draw_rubberband( self._make_classic_style_pseudo_toolbar(), None, x0, y0, x1, y1) def remove_rubberband(self): NavigationToolbar2QT.remove_rubberband( self._make_classic_style_pseudo_toolbar()) @backend_tools._register_tool_class(FigureCanvasQT)
RubberbandQt
python
kamyu104__LeetCode-Solutions
Python/number-of-pairs-satisfying-inequality.py
{ "start": 1774, "end": 2930 }
class ____(object): def numberOfPairs(self, nums1, nums2, diff): """ :type nums1: List[int] :type nums2: List[int] :type diff: int :rtype: int """ def merge_sort(nums, left, right, result): if left == right: return mid = left+(right-left)//2 merge_sort(nums, left, mid, result) merge_sort(nums, mid+1, right, result) r = mid+1 for l in xrange(left, mid+1): while r < right+1 and nums[l]-nums[r] > diff: r += 1 result[0] += right-r+1 tmp = [] l, r = left, mid+1 while l < mid+1 or r < right+1: if r >= right+1 or (l < mid+1 and nums[l] <= nums[r]): tmp.append(nums[l]) l += 1 else: tmp.append(nums[r]) r += 1 nums[left:right+1] = tmp nums = [x-y for x, y in itertools.izip(nums1, nums2)] result = [0] merge_sort(nums, 0, len(nums)-1, result) return result[0]
Solution3
python
bokeh__bokeh
src/bokeh/embed/bundle.py
{ "start": 7454, "end": 7781 }
class ____: """ Opaque type for representing URLs. """ url: str def __truediv__(self, path: str) -> URL: url = self.url if self.url.endswith("/") else f"{self.url}/" return URL(urljoin(url, path.replace(os.sep, "/"))) def __str__(self) -> str: return self.url @dataclass(frozen=True)
URL
python
gevent__gevent
src/greentest/3.13/test_httplib.py
{ "start": 64767, "end": 67820 }
class ____(TestCase, ExtraAssertions): def test_all(self): # Documented objects defined in the module should be in __all__ expected = {"responses"} # Allowlist documented dict() object # HTTPMessage, parse_headers(), and the HTTP status code constants are # intentionally omitted for simplicity denylist = {"HTTPMessage", "parse_headers"} for name in dir(client): if name.startswith("_") or name in denylist: continue module_object = getattr(client, name) if getattr(module_object, "__module__", None) == "http.client": expected.add(name) self.assertCountEqual(client.__all__, expected) def test_responses(self): self.assertEqual(client.responses[client.NOT_FOUND], "Not Found") def test_client_constants(self): # Make sure we don't break backward compatibility with 3.4 expected = [ 'CONTINUE', 'SWITCHING_PROTOCOLS', 'PROCESSING', 'OK', 'CREATED', 'ACCEPTED', 'NON_AUTHORITATIVE_INFORMATION', 'NO_CONTENT', 'RESET_CONTENT', 'PARTIAL_CONTENT', 'MULTI_STATUS', 'IM_USED', 'MULTIPLE_CHOICES', 'MOVED_PERMANENTLY', 'FOUND', 'SEE_OTHER', 'NOT_MODIFIED', 'USE_PROXY', 'TEMPORARY_REDIRECT', 'BAD_REQUEST', 'UNAUTHORIZED', 'PAYMENT_REQUIRED', 'FORBIDDEN', 'NOT_FOUND', 'METHOD_NOT_ALLOWED', 'NOT_ACCEPTABLE', 'PROXY_AUTHENTICATION_REQUIRED', 'REQUEST_TIMEOUT', 'CONFLICT', 'GONE', 'LENGTH_REQUIRED', 'PRECONDITION_FAILED', 'CONTENT_TOO_LARGE', 'REQUEST_ENTITY_TOO_LARGE', 'URI_TOO_LONG', 'REQUEST_URI_TOO_LONG', 'UNSUPPORTED_MEDIA_TYPE', 'RANGE_NOT_SATISFIABLE', 'REQUESTED_RANGE_NOT_SATISFIABLE', 'EXPECTATION_FAILED', 'IM_A_TEAPOT', 'MISDIRECTED_REQUEST', 'UNPROCESSABLE_CONTENT', 'UNPROCESSABLE_ENTITY', 'LOCKED', 'FAILED_DEPENDENCY', 'UPGRADE_REQUIRED', 'PRECONDITION_REQUIRED', 'TOO_MANY_REQUESTS', 'REQUEST_HEADER_FIELDS_TOO_LARGE', 'UNAVAILABLE_FOR_LEGAL_REASONS', 'INTERNAL_SERVER_ERROR', 'NOT_IMPLEMENTED', 'BAD_GATEWAY', 'SERVICE_UNAVAILABLE', 'GATEWAY_TIMEOUT', 'HTTP_VERSION_NOT_SUPPORTED', 'INSUFFICIENT_STORAGE', 'NOT_EXTENDED', 'NETWORK_AUTHENTICATION_REQUIRED', 'EARLY_HINTS', 'TOO_EARLY' ] for const in expected: with self.subTest(constant=const): self.assertHasAttr(client, const)
OfflineTest
python
numba__numba
numba/core/types/abstract.py
{ "start": 965, "end": 2694 }
class ____(ABCMeta): """ A metaclass that will intern instances after they are created. This is done by first creating a new instance (including calling __init__, which sets up the required attributes for equality and hashing), then looking it up in the _typecache registry. """ def __init__(cls, name, bases, orig_vars): # __init__ is hooked to mark whether a Type class being defined is a # Numba internal type (one which is defined somewhere under the `numba` # module) or an external type (one which is defined elsewhere, for # example a user defined type). super(_TypeMetaclass, cls).__init__(name, bases, orig_vars) root = (cls.__module__.split('.'))[0] cls._is_internal = root == "numba" def _intern(cls, inst): # Try to intern the created instance wr = weakref.ref(inst, _on_type_disposal) orig = _typecache.get(wr) orig = orig and orig() if orig is not None: return orig else: inst._code = _autoincr() _typecache[wr] = wr return inst def __call__(cls, *args, **kwargs): """ Instantiate *cls* (a Type subclass, presumably) and intern it. If an interned instance already exists, it is returned, otherwise the new instance is returned. """ inst = type.__call__(cls, *args, **kwargs) return cls._intern(inst) def _type_reconstructor(reconstructor, reconstructor_args, state): """ Rebuild function for unpickling types. """ obj = reconstructor(*reconstructor_args) if state: obj.__dict__.update(state) return type(obj)._intern(obj)
_TypeMetaclass
python
realpython__materials
python-313/typing/generic_queue.py
{ "start": 32, "end": 776 }
class ____[T]: def __init__(self) -> None: self.elements: deque[T] = deque() def push(self, element: T) -> None: self.elements.append(element) def pop(self) -> T: return self.elements.popleft() # %% Python 3.13 # # class Queue[T=str]: # def __init__(self) -> None: # self.elements: deque[T] = deque() # # def push(self, element: T) -> None: # self.elements.append(element) # # def pop(self) -> T: # return self.elements.popleft() # %% Use the queue # string_queue = Queue() integer_queue = Queue[int]() string_queue.push("three") string_queue.push("thirteen") print(string_queue.elements) integer_queue.push(3) integer_queue.push(13) print(integer_queue.elements)
Queue
python
walkccc__LeetCode
solutions/792. Number of Matching Subsequences/792-2.py
{ "start": 0, "end": 729 }
class ____: def numMatchingSubseq(self, s: str, words: list[str]) -> int: ans = 0 # [(i, j)] := words[i] and the letter words[i][j] is waiting for bucket = [[] for _ in range(26)] # For each word, it's waiting for word[0]. for i, word in enumerate(words): bucket[ord(word[0]) - ord('a')].append((i, 0)) for c in s: # Let prevBucket = bucket[c] and clear bucket[c]. index = ord(c) - ord('a') prevBucket = bucket[index] bucket[index] = [] for i, j in prevBucket: j += 1 if j == len(words[i]): # All the letters in words[i] are matched. ans += 1 else: bucket[ord(words[i][j]) - ord('a')].append((i, j)) return ans
Solution
python
ansible__ansible
lib/ansible/module_utils/facts/system/dns.py
{ "start": 841, "end": 2664 }
class ____(BaseFactCollector): name = 'dns' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): dns_facts = {} # TODO: flatten dns_facts['dns'] = {} for line in get_file_content('/etc/resolv.conf', '').splitlines(): if line.startswith('#') or line.startswith(';') or line.strip() == '': continue tokens = line.split() if len(tokens) == 0: continue if tokens[0] == 'nameserver': if 'nameservers' not in dns_facts['dns']: dns_facts['dns']['nameservers'] = [] for nameserver in tokens[1:]: dns_facts['dns']['nameservers'].append(nameserver) elif tokens[0] == 'domain': if len(tokens) > 1: dns_facts['dns']['domain'] = tokens[1] elif tokens[0] == 'search': dns_facts['dns']['search'] = [] for suffix in tokens[1:]: dns_facts['dns']['search'].append(suffix) elif tokens[0] == 'sortlist': dns_facts['dns']['sortlist'] = [] for address in tokens[1:]: dns_facts['dns']['sortlist'].append(address) elif tokens[0] == 'options': dns_facts['dns']['options'] = {} if len(tokens) > 1: for option in tokens[1:]: option_tokens = option.split(':', 1) if len(option_tokens) == 0: continue val = len(option_tokens) == 2 and option_tokens[1] or True dns_facts['dns']['options'][option_tokens[0]] = val return dns_facts
DnsFactCollector
python
walkccc__LeetCode
solutions/700. Search in a Binary Search Tree/700.py
{ "start": 0, "end": 278 }
class ____: def searchBST(self, root: TreeNode | None, val: int) -> TreeNode | None: if not root: return None if root.val == val: return root if root.val > val: return self.searchBST(root.left, val) return self.searchBST(root.right, val)
Solution
python
viewflow__viewflow
viewflow/forms/renderers.py
{ "start": 2809, "end": 2877 }
class ____(InputRenderer): tag = "vf-field-file"
FileInputRenderer
python
kamyu104__LeetCode-Solutions
Python/most-common-word.py
{ "start": 111, "end": 655 }
class ____(object): def mostCommonWord(self, paragraph, banned): """ :type paragraph: str :type banned: List[str] :rtype: str """ lookup = set(banned) counts = collections.Counter(word.strip("!?',.") for word in paragraph.lower().split()) result = '' for word in counts: if (not result or counts[word] > counts[result]) and \ word not in lookup: result = word return result
Solution
python
nedbat__coveragepy
tests/test_process.py
{ "start": 49568, "end": 57622 }
class ____(CoverageTest): """Test that we can measure coverage in subprocesses.""" def make_main_and_sub(self) -> None: """Create main.py and sub.py.""" # Main will run sub.py self.make_file( "main.py", """\ import os, os.path, sys ex = os.path.basename(sys.executable) os.system(ex + " sub.py") """, ) # sub.py will write a few lines. self.make_file( "sub.py", """\ f = open("out.txt", "w", encoding="utf-8") f.write("Hello, world!\\n") f.close() """, ) def test_patch_subprocess(self) -> None: self.make_main_and_sub() self.make_file( ".coveragerc", """\ [run] patch = subprocess """, ) self.run_command("coverage run main.py") self.run_command("coverage combine") self.assert_exists(".coverage") data = coverage.CoverageData() data.read() assert line_counts(data)["main.py"] == 3 assert line_counts(data)["sub.py"] == 3 def test_subprocess_with_pth_files(self, _create_pth_file: None) -> None: # An existing data file should not be read when a subprocess gets # measured automatically. Create the data file here with bogus data in # it. self.make_main_and_sub() data = coverage.CoverageData(".mycovdata") data.add_lines({os.path.abspath("sub.py"): range(100)}) data.write() self.make_file( "coverage.ini", """\ [run] data_file = .mycovdata """, ) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") import main # pylint: disable=unused-import, import-error with open("out.txt", encoding="utf-8") as f: assert f.read() == "Hello, world!\n" # Read the data from .coverage self.assert_exists(".mycovdata") data = coverage.CoverageData(".mycovdata") data.read() assert line_counts(data)["sub.py"] == 3 def test_subprocess_with_pth_files_and_parallel(self, _create_pth_file: None) -> None: # https://github.com/coveragepy/coveragepy/issues/492 self.make_main_and_sub() self.make_file( "coverage.ini", """\ [run] parallel = true """, ) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") self.run_command("coverage run main.py") with open("out.txt", encoding="utf-8") as f: assert f.read() == "Hello, world!\n" self.run_command("coverage combine") # assert that the combined .coverage data file is correct self.assert_exists(".coverage") data = coverage.CoverageData() data.read() assert line_counts(data)["sub.py"] == 3 # assert that there are *no* extra data files left over after a combine data_files = glob.glob(os.getcwd() + "/.coverage*") msg = ( "Expected only .coverage after combine, looks like there are " + f"extra data files that were not cleaned up: {data_files!r}" ) assert len(data_files) == 1, msg def test_subprocess_in_directories(self) -> None: # Bug 2025: patch=subprocess didn't find data files from subdirectory # subprocesses. self.make_file( "main.py", """\ import subprocess import sys print(subprocess.check_output( [sys.executable, "subproc.py"], cwd="subdir", encoding="utf-8", )) """, ) self.make_file( "subdir/subproc.py", """\ with open("readme.txt", encoding="utf-8") as f: print(f.read(), end="") """, ) self.make_file( ".coveragerc", """\ [run] patch = subprocess data_file = .covdata """, ) self.make_file("subdir/readme.txt", "hello") out = self.run_command("coverage run main.py") assert out == "hello\n" self.run_command("coverage combine") data = coverage.CoverageData(".covdata") data.read() assert line_counts(data)["main.py"] == 6 assert line_counts(data)["subproc.py"] == 2 @pytest.mark.skipif( not testenv.CAN_MEASURE_BRANCHES, reason="Can't measure branches with this core" ) def test_subprocess_gets_nonfile_config(self) -> None: # https://github.com/coveragepy/coveragepy/issues/2021 self.make_file( "subfunctions.py", """\ import subprocess, sys def f1(): print("function 1") def f2(): print("function 2") functions = [f1, f2] cases = sys.argv[1:] if len(cases) > 1: for c in cases: subprocess.call([sys.executable, __file__, c]) else: functions[int(cases[0])]() """, ) self.make_file( ".coveragerc", """\ [run] patch = subprocess """, ) out = self.run_command("coverage run --branch subfunctions.py 0 1") assert out.endswith("function 1\nfunction 2\n") self.run_command("coverage combine") data = coverage.CoverageData() data.read() assert line_counts(data)["subfunctions.py"] == 11 def test_subprocess_dir_with_source(self) -> None: # https://github.com/coveragepy/coveragepy/issues/1499 self.make_file("main/d/README", "A sub-directory") self.make_file( "main/main.py", """\ import os, subprocess, sys orig = os.getcwd() os.chdir("./d") subprocess.run([sys.executable, f"{orig}/sub.py"]) os.chdir(orig) """, ) self.make_file("lib/other.py", "print('Other', flush=True)") self.make_file( "main/sub.py", """ import other print("Hello, world!", flush=True) """, ) self.make_file( "main/pyproject.toml", """\ [tool.coverage.run] patch = ["subprocess"] source = [".", "other"] disable_warnings = ["module-not-imported"] """, ) self.set_environ("PYTHONPATH", os.path.abspath("lib")) with change_dir("main"): out = self.run_command("coverage run main.py") assert out == "Other\nHello, world!\n" self.run_command("coverage combine") data = coverage.CoverageData() data.read() assert line_counts(data) == {"main.py": 5, "sub.py": 2, "other.py": 1} @pytest.fixture def _clean_pth_files() -> Iterable[None]: """A fixture to clean up any .pth files we created during the test.""" # The execv test needs to make .pth files so that subprocesses will get # measured. But there's no way for coverage to remove those files because # they need to exist when the new program starts, and there's no # information carried over to clean them automatically. # # So for these tests, we clean them as part of the test suite. pth_files: set[Path] = set() for d in site.getsitepackages(): pth_files.update(Path(d).glob("*.pth")) try: yield finally: for d in site.getsitepackages(): for pth in Path(d).glob("*.pth"): if pth not in pth_files: pth.unlink() @pytest.mark.skipif(env.WINDOWS, reason="patch=execv isn't supported on Windows") @pytest.mark.xdist_group(name="needs_pth")
ProcessStartupTest
python
astropy__astropy
astropy/config/configuration.py
{ "start": 1635, "end": 1861 }
class ____(ValueError): """An exception that is raised when the configuration defaults (which should be generated at build-time) are missing. """ # this is used in astropy/__init__.py
ConfigurationDefaultMissingError
python
walkccc__LeetCode
solutions/370. Range Addition/370.py
{ "start": 0, "end": 302 }
class ____: def getModifiedArray( self, length: int, updates: list[list[int]], ) -> list[int]: line = [0] * length for start, end, inc in updates: line[start] += inc if end + 1 < length: line[end + 1] -= inc return itertools.accumulate(line)
Solution
python
scrapy__scrapy
scrapy/exporters.py
{ "start": 11945, "end": 12287 }
class ____(BaseItemExporter): def __init__(self, file: BytesIO, **kwargs: Any): super().__init__(**kwargs) self.file: BytesIO = file def export_item(self, item: Any) -> None: itemdict = dict(self._get_serialized_fields(item)) self.file.write(to_bytes(pprint.pformat(itemdict) + "\n"))
PprintItemExporter
python
PrefectHQ__prefect
src/prefect/utilities/dockerutils.py
{ "start": 13623, "end": 21608 }
class ____(Exception): """Raised when a Docker image push fails""" @silence_docker_warnings() def push_image( image_id: str, registry_url: str, name: str, tag: Optional[str] = None, stream_progress_to: Optional[TextIO] = None, ) -> str: """Pushes a local image to a Docker registry, returning the registry-qualified tag for that image This assumes that the environment's Docker daemon is already authenticated to the given registry, and currently makes no attempt to authenticate. Args: image_id (str): a Docker image ID registry_url (str): the URL of a Docker registry name (str): the name of this image tag (str): the tag to give this image (defaults to a short representation of the image's ID) stream_progress_to: an optional stream (like sys.stdout, or an io.TextIO) that will collect the build output as it is reported by Docker Returns: A registry-qualified tag, like my-registry.example.com/my-image:abcdefg """ if not tag: tag = slugify(prefect.types._datetime.now("UTC").isoformat()) _, registry, _, _, _ = urlsplit(registry_url) repository = f"{registry}/{name}" with docker_client() as client: image: "Image" = client.images.get(image_id) image.tag(repository, tag=tag) # type: ignore # typing stub is not complete events = cast( Iterator[dict[str, Any]], client.api.push(repository, tag=tag, stream=True, decode=True), # type: ignore # typing stub is not complete ) try: for event in events: if "status" in event: if not stream_progress_to: continue stream_progress_to.write(event["status"]) if "progress" in event: stream_progress_to.write(" " + event["progress"]) stream_progress_to.write("\n") stream_progress_to.flush() elif "error" in event: raise PushError(event["error"]) finally: client.api.remove_image(f"{repository}:{tag}", noprune=True) # type: ignore # typing stub is not complete return f"{repository}:{tag}" def to_run_command(command: list[str]) -> str: """ Convert a process-style list of command arguments to a single Dockerfile RUN instruction. """ if not command: return "" run_command = f"RUN {command[0]}" if len(command) > 1: run_command += " " + " ".join([repr(arg) for arg in command[1:]]) # TODO: Consider performing text-wrapping to improve readability of the generated # Dockerfile # return textwrap.wrap( # run_command, # subsequent_indent=" " * 4, # break_on_hyphens=False, # break_long_words=False # ) return run_command def parse_image_tag(name: str) -> tuple[str, Optional[str]]: """ Parse Docker Image String - If a tag or digest exists, this function parses and returns the image registry and tag/digest, separately as a tuple. - Example 1: 'prefecthq/prefect:latest' -> ('prefecthq/prefect', 'latest') - Example 2: 'hostname.io:5050/folder/subfolder:latest' -> ('hostname.io:5050/folder/subfolder', 'latest') - Example 3: 'prefecthq/prefect@sha256:abc123' -> ('prefecthq/prefect', 'sha256:abc123') - Supports parsing Docker Image strings that follow Docker Image Specification v1.1.0 - Image building tools typically enforce this standard Args: name (str): Name of Docker Image Return: tuple: image registry, image tag/digest """ tag = None name_parts = name.split("/") # First handles the simplest image names (DockerHub-based, index-free, potentially with a tag or digest) # - Example: simplename:latest or simplename@sha256:abc123 if len(name_parts) == 1: if "@" in name_parts[0]: image_name, tag = name_parts[0].split("@") elif ":" in name_parts[0]: image_name, tag = name_parts[0].split(":") else: image_name = name_parts[0] else: # 1. Separates index (hostname.io or prefecthq) from path:tag (folder/subfolder:latest or prefect:latest) # 2. Separates path and tag/digest (if exists) # 3. Reunites index and path (without tag/digest) as image name index_name = name_parts[0] image_path = "/".join(name_parts[1:]) if "@" in image_path: image_path, tag = image_path.split("@") elif ":" in image_path: image_path, tag = image_path.split(":") image_name = f"{index_name}/{image_path}" return image_name, tag def split_repository_path(repository_path: str) -> tuple[Optional[str], str]: """ Splits a Docker repository path into its namespace and repository components. Args: repository_path: The Docker repository path to split. Returns: Tuple[Optional[str], str]: A tuple containing the namespace and repository components. - namespace (Optional[str]): The Docker namespace, combining the registry and organization. None if not present. - repository (Optionals[str]): The repository name. """ parts = repository_path.split("/", 2) # Check if the path includes a registry and organization or just organization/repository if len(parts) == 3 or (len(parts) == 2 and ("." in parts[0] or ":" in parts[0])): # Namespace includes registry and organization namespace = "/".join(parts[:-1]) repository = parts[-1] elif len(parts) == 2: # Only organization/repository provided, so namespace is just the first part namespace = parts[0] repository = parts[1] else: # No namespace provided namespace = None repository = parts[0] return namespace, repository def format_outlier_version_name(version: str) -> str: """ Formats outlier docker version names to pass `packaging.version.parse` validation - Current cases are simple, but creates stub for more complicated formatting if eventually needed. - Example outlier versions that throw a parsing exception: - "20.10.0-ce" (variant of community edition label) - "20.10.0-ee" (variant of enterprise edition label) Args: version (str): raw docker version value Returns: str: value that can pass `packaging.version.parse` validation """ return version.replace("-ce", "").replace("-ee", "") @contextmanager def generate_default_dockerfile(context: Optional[Path] = None): """ Generates a default Dockerfile used for deploying flows. The Dockerfile is written to a temporary file and yielded. The temporary file is removed after the context manager exits. Args: - context: The context to use for the Dockerfile. Defaults to the current working directory. """ if not context: context = Path.cwd() lines: list[str] = [] base_image = get_prefect_image_name() lines.append(f"FROM {base_image}") dir_name = context.name if (context / "requirements.txt").exists(): lines.append(f"COPY requirements.txt /opt/prefect/{dir_name}/requirements.txt") lines.append(f"RUN uv pip install -r /opt/prefect/{dir_name}/requirements.txt") lines.append(f"COPY . /opt/prefect/{dir_name}/") lines.append(f"WORKDIR /opt/prefect/{dir_name}/") temp_dockerfile = context / "Dockerfile" if Path(temp_dockerfile).exists(): raise RuntimeError( "Failed to generate Dockerfile. Dockerfile already exists in the" " current directory." ) with Path(temp_dockerfile).open("w") as f: f.writelines(line + "\n" for line in lines) try: yield temp_dockerfile finally: temp_dockerfile.unlink()
PushError
python
automl__auto-sklearn
test/test_evaluation/test_abstract_evaluator.py
{ "start": 627, "end": 11538 }
class ____(unittest.TestCase): _multiprocess_can_split_ = True def setUp(self): """ Creates a backend mock """ self.ev_path = os.path.join(this_directory, ".tmp_evaluations") if not os.path.exists(self.ev_path): os.mkdir(self.ev_path) dummy_model_files = [os.path.join(self.ev_path, str(n)) for n in range(100)] dummy_pred_files = [os.path.join(self.ev_path, str(n)) for n in range(100, 200)] backend_mock = unittest.mock.Mock() backend_mock.get_model_dir.return_value = self.ev_path backend_mock.get_model_path.side_effect = dummy_model_files backend_mock.get_prediction_output_path.side_effect = dummy_pred_files D = get_multiclass_classification_datamanager() backend_mock.load_datamanager.return_value = D backend_mock.temporary_directory = tempfile.gettempdir() self.backend_mock = backend_mock self.port = logging.handlers.DEFAULT_TCP_LOGGING_PORT self.working_directory = os.path.join(this_directory, ".tmp_%s" % self.id()) def tearDown(self): if os.path.exists(self.ev_path): try: os.rmdir(self.ev_path) except: # noqa E722 pass def test_finish_up_model_predicts_NaN(self): """Tests by handing in predictions which contain NaNs""" rs = np.random.RandomState(1) queue_mock = unittest.mock.Mock() ae = AbstractEvaluator( backend=self.backend_mock, port=self.port, output_y_hat_optimization=False, queue=queue_mock, metrics=[accuracy], additional_components=dict(), ) ae.Y_optimization = rs.rand(33, 3) predictions_ensemble = rs.rand(33, 3) predictions_test = rs.rand(25, 3) # NaNs in prediction ensemble predictions_ensemble[5, 2] = np.NaN _, loss, _, additional_run_info = ae.finish_up( loss=0.1, train_loss=0.1, opt_pred=predictions_ensemble, test_pred=predictions_test, additional_run_info=None, final_call=True, file_output=True, status=StatusType.SUCCESS, ) self.assertEqual(loss, 1.0) self.assertEqual( additional_run_info, {"error": "Model predictions for optimization set contains NaNs."}, ) predictions_ensemble = rs.rand(33, 3) predictions_test[5, 2] = np.NaN _, loss, _, additional_run_info = ae.finish_up( loss=0.1, train_loss=0.1, opt_pred=predictions_ensemble, test_pred=predictions_test, additional_run_info=None, final_call=True, file_output=True, status=StatusType.SUCCESS, ) self.assertEqual(loss, 1.0) self.assertEqual( additional_run_info, {"error": "Model predictions for test set contains NaNs."}, ) self.assertEqual(self.backend_mock.save_predictions_as_npy.call_count, 0) def test_disable_file_output(self): queue_mock = unittest.mock.Mock() rs = np.random.RandomState(1) ae = AbstractEvaluator( backend=self.backend_mock, queue=queue_mock, disable_file_output=True, metrics=[accuracy], port=self.port, additional_components=dict(), ) predictions_ensemble = rs.rand(33, 3) predictions_test = rs.rand(25, 3) loss_, additional_run_info_ = ae.file_output( predictions_ensemble, predictions_test, ) self.assertIsNone(loss_) self.assertEqual(additional_run_info_, {}) # This function is never called as there is a return before self.assertEqual(self.backend_mock.save_numrun_to_dir.call_count, 0) for call_count, disable in enumerate(["model", "cv_model"], start=1): ae = AbstractEvaluator( backend=self.backend_mock, output_y_hat_optimization=False, queue=queue_mock, disable_file_output=[disable], metrics=[accuracy], port=self.port, additional_components=dict(), ) ae.Y_optimization = predictions_ensemble ae.model = unittest.mock.Mock() ae.models = [unittest.mock.Mock()] loss_, additional_run_info_ = ae.file_output( predictions_ensemble, predictions_test, ) self.assertIsNone(loss_) self.assertEqual(additional_run_info_, {}) self.assertEqual( self.backend_mock.save_numrun_to_dir.call_count, call_count ) if disable == "model": self.assertIsNone( self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]["model"] ) self.assertIsNotNone( self.backend_mock.save_numrun_to_dir.call_args_list[-1][1][ "cv_model" ] ) else: self.assertIsNotNone( self.backend_mock.save_numrun_to_dir.call_args_list[-1][1]["model"] ) self.assertIsNone( self.backend_mock.save_numrun_to_dir.call_args_list[-1][1][ "cv_model" ] ) self.assertIsNotNone( self.backend_mock.save_numrun_to_dir.call_args_list[-1][1][ "ensemble_predictions" ] ) self.assertIsNotNone( self.backend_mock.save_numrun_to_dir.call_args_list[-1][1][ "test_predictions" ] ) ae = AbstractEvaluator( backend=self.backend_mock, output_y_hat_optimization=False, queue=queue_mock, metrics=[accuracy], disable_file_output=["y_optimization"], port=self.port, additional_components=dict(), ) ae.Y_optimization = predictions_ensemble ae.model = "model" ae.models = [unittest.mock.Mock()] loss_, additional_run_info_ = ae.file_output( predictions_ensemble, predictions_test, ) self.assertIsNone(loss_) self.assertEqual(additional_run_info_, {}) self.assertIsNone( self.backend_mock.save_numrun_to_dir.call_args_list[-1][1][ "ensemble_predictions" ] ) self.assertIsNotNone( self.backend_mock.save_numrun_to_dir.call_args_list[-1][1][ "test_predictions" ] ) def test_file_output(self): shutil.rmtree(self.working_directory, ignore_errors=True) os.mkdir(self.working_directory) queue_mock = unittest.mock.Mock() context = BackendContext( temporary_directory=os.path.join(self.working_directory, "tmp"), output_directory=os.path.join(self.working_directory, "tmp_output"), delete_tmp_folder_after_terminate=True, delete_output_folder_after_terminate=True, prefix="auto-sklearn", ) with unittest.mock.patch.object( Backend, "load_datamanager" ) as load_datamanager_mock: load_datamanager_mock.return_value = ( get_multiclass_classification_datamanager() ) backend = Backend(context, prefix="auto-sklearn") ae = AbstractEvaluator( backend=backend, output_y_hat_optimization=False, queue=queue_mock, metrics=[accuracy], port=self.port, additional_components=dict(), ) ae.model = sklearn.dummy.DummyClassifier() rs = np.random.RandomState(1) ae.Y_optimization = rs.rand(33, 3) predictions_ensemble = rs.rand(33, 3) predictions_test = rs.rand(25, 3) ae.file_output( Y_optimization_pred=predictions_ensemble, Y_test_pred=predictions_test, ) self.assertTrue( os.path.exists( os.path.join( self.working_directory, "tmp", ".auto-sklearn", "runs", "1_0_None", ) ) ) shutil.rmtree(self.working_directory, ignore_errors=True) def test_add_additional_components(self): shutil.rmtree(self.working_directory, ignore_errors=True) os.mkdir(self.working_directory) queue_mock = unittest.mock.Mock() context = BackendContext( temporary_directory=os.path.join(self.working_directory, "tmp"), output_directory=os.path.join(self.working_directory, "tmp_output"), delete_tmp_folder_after_terminate=True, delete_output_folder_after_terminate=True, prefix="auto-sklearn", ) with unittest.mock.patch.object( Backend, "load_datamanager" ) as load_datamanager_mock: load_datamanager_mock.return_value = ( get_multiclass_classification_datamanager() ) backend = Backend(context, prefix="auto-sklearn") with unittest.mock.patch.object( _addons["classification"], "add_component" ) as _: # If the components in the argument `additional_components` are an # empty dict there is no call to `add_component`, # if there's something in it, `add_component is called (2nd case) for fixture, case in ((0, dict()), (1, dict(abc="def"))): thirdparty_components_patch = unittest.mock.Mock() thirdparty_components_patch.components = case additional_components = dict( classification=thirdparty_components_patch ) AbstractEvaluator( backend=backend, output_y_hat_optimization=False, queue=queue_mock, metrics=[accuracy], port=self.port, additional_components=additional_components, ) self.assertEqual( _addons["classification"].add_component.call_count, fixture )
AbstractEvaluatorTest
python
mlflow__mlflow
mlflow/entities/trace_info.py
{ "start": 717, "end": 9744 }
class ____(_MlflowObject): """Metadata about a trace, such as its ID, location, timestamp, etc. Args: trace_id: The primary identifier for the trace. trace_location: The location where the trace is stored, represented as a :py:class:`~mlflow.entities.TraceLocation` object. MLflow currently support MLflow Experiment or Databricks Inference Table as a trace location. request_time: Start time of the trace, in milliseconds. state: State of the trace, represented as a :py:class:`~mlflow.entities.TraceState` enum. Can be one of [`OK`, `ERROR`, `IN_PROGRESS`, `STATE_UNSPECIFIED`]. request_preview: Request to the model/agent, equivalent to the input of the root, span but JSON-encoded and can be truncated. response_preview: Response from the model/agent, equivalent to the output of the root span but JSON-encoded and can be truncated. client_request_id: Client supplied request ID associated with the trace. This could be used to identify the trace/request from an external system that produced the trace, e.g., a session ID in a web application. execution_duration: Duration of the trace, in milliseconds. trace_metadata: Key-value pairs associated with the trace. They are designed for immutable values like run ID associated with the trace. tags: Tags associated with the trace. They are designed for mutable values, that can be updated after the trace is created via MLflow UI or API. assessments: List of assessments associated with the trace. """ trace_id: str trace_location: TraceLocation request_time: int state: TraceState request_preview: str | None = None response_preview: str | None = None client_request_id: str | None = None execution_duration: int | None = None trace_metadata: dict[str, str] = field(default_factory=dict) tags: dict[str, str] = field(default_factory=dict) assessments: list[Assessment] = field(default_factory=list) def to_dict(self) -> dict[str, Any]: """Convert the TraceInfoV3 object to a dictionary.""" res = MessageToDict(self.to_proto(), preserving_proto_field_name=True) if self.execution_duration is not None: res.pop("execution_duration", None) res["execution_duration_ms"] = self.execution_duration # override trace_id to be the same as trace_info.trace_id since it's parsed # when converting to proto if it's v4 res["trace_id"] = self.trace_id return res @classmethod def from_dict(cls, d: dict[str, Any]) -> "TraceInfo": """Create a TraceInfoV3 object from a dictionary.""" if "request_id" in d: from mlflow.entities.trace_info_v2 import TraceInfoV2 return TraceInfoV2.from_dict(d).to_v3() d = d.copy() if assessments := d.get("assessments"): d["assessments"] = [Assessment.from_dictionary(a) for a in assessments] if trace_location := d.get("trace_location"): d["trace_location"] = TraceLocation.from_dict(trace_location) if state := d.get("state"): d["state"] = TraceState(state) if request_time := d.get("request_time"): timestamp = Timestamp() timestamp.FromJsonString(request_time) d["request_time"] = timestamp.ToMilliseconds() if (execution_duration := d.pop("execution_duration_ms", None)) is not None: d["execution_duration"] = execution_duration return cls(**d) def to_proto(self) -> ProtoTraceInfoV3 | ProtoTraceInfoV4: from mlflow.entities.trace_info_v2 import _truncate_request_metadata, _truncate_tags if self._is_v4(): from mlflow.utils.databricks_tracing_utils import trace_info_to_v4_proto return trace_info_to_v4_proto(self) request_time = Timestamp() request_time.FromMilliseconds(self.request_time) execution_duration = None if self.execution_duration is not None: execution_duration = Duration() execution_duration.FromMilliseconds(self.execution_duration) return ProtoTraceInfoV3( trace_id=self.trace_id, client_request_id=self.client_request_id, trace_location=self.trace_location.to_proto(), request_preview=self.request_preview, response_preview=self.response_preview, request_time=request_time, execution_duration=execution_duration, state=self.state.to_proto(), trace_metadata=_truncate_request_metadata(self.trace_metadata), tags=_truncate_tags(self.tags), assessments=[a.to_proto() for a in self.assessments], ) @classmethod def from_proto(cls, proto) -> "TraceInfo": if "request_id" in proto.DESCRIPTOR.fields_by_name: from mlflow.entities.trace_info_v2 import TraceInfoV2 return TraceInfoV2.from_proto(proto).to_v3() # import inside the function to avoid introducing top-level dependency on # mlflow.tracing.utils in entities module from mlflow.tracing.utils import construct_trace_id_v4 trace_location = TraceLocation.from_proto(proto.trace_location) if trace_location.uc_schema: trace_id = construct_trace_id_v4( location=f"{trace_location.uc_schema.catalog_name}.{trace_location.uc_schema.schema_name}", trace_id=proto.trace_id, ) else: trace_id = proto.trace_id return cls( trace_id=trace_id, client_request_id=( proto.client_request_id if proto.HasField("client_request_id") else None ), trace_location=trace_location, request_preview=proto.request_preview if proto.HasField("request_preview") else None, response_preview=proto.response_preview if proto.HasField("response_preview") else None, request_time=proto.request_time.ToMilliseconds(), execution_duration=( proto.execution_duration.ToMilliseconds() if proto.HasField("execution_duration") else None ), state=TraceState.from_proto(proto.state), trace_metadata=dict(proto.trace_metadata), tags=dict(proto.tags), assessments=[Assessment.from_proto(a) for a in proto.assessments], ) # Aliases for backward compatibility with V2 format @property def request_id(self) -> str: """Deprecated. Use `trace_id` instead.""" return self.trace_id @property def experiment_id(self) -> str | None: """ An MLflow experiment ID associated with the trace, if the trace is stored in MLflow tracking server. Otherwise, None. """ return ( self.trace_location.mlflow_experiment and self.trace_location.mlflow_experiment.experiment_id ) @experiment_id.setter def experiment_id(self, value: str | None) -> None: self.trace_location.mlflow_experiment.experiment_id = value @property def request_metadata(self) -> dict[str, str]: """Deprecated. Use `trace_metadata` instead.""" return self.trace_metadata @property def timestamp_ms(self) -> int: return self.request_time @timestamp_ms.setter def timestamp_ms(self, value: int) -> None: self.request_time = value @property def execution_time_ms(self) -> int | None: return self.execution_duration @execution_time_ms.setter def execution_time_ms(self, value: int | None) -> None: self.execution_duration = value @property def status(self) -> TraceStatus: """Deprecated. Use `state` instead.""" return TraceStatus.from_state(self.state) @status.setter def status(self, value: TraceStatus) -> None: self.state = value.to_state() @property def token_usage(self) -> dict[str, int] | None: """ Returns the aggregated token usage for the trace. Returns: A dictionary containing the aggregated LLM token usage for the trace. - "input_tokens": The total number of input tokens. - "output_tokens": The total number of output tokens. - "total_tokens": Sum of input and output tokens. .. note:: The token usage tracking is not supported for all LLM providers. Refer to the MLflow Tracing documentation for which providers support token usage tracking. """ if usage_json := self.trace_metadata.get(TraceMetadataKey.TOKEN_USAGE): return json.loads(usage_json) return None def _is_v4(self) -> bool: return self.trace_location.uc_schema is not None
TraceInfo
python
psf__black
src/blib2to3/pgen2/parse.py
{ "start": 4141, "end": 15480 }
class ____: """Parser engine. The proper usage sequence is: p = Parser(grammar, [converter]) # create instance p.setup([start]) # prepare for parsing <for each input token>: if p.addtoken(...): # parse a token; may raise ParseError break root = p.rootnode # root of abstract syntax tree A Parser instance may be reused by calling setup() repeatedly. A Parser instance contains state pertaining to the current token sequence, and should not be used concurrently by different threads to parse separate token sequences. See driver.py for how to get input tokens by tokenizing a file or string. Parsing is complete when addtoken() returns True; the root of the abstract syntax tree can then be retrieved from the rootnode instance variable. When a syntax error occurs, addtoken() raises the ParseError exception. There is no error recovery; the parser cannot be used after a syntax error was reported (but it can be reinitialized by calling setup()). """ def __init__(self, grammar: Grammar, convert: Convert | None = None) -> None: """Constructor. The grammar argument is a grammar.Grammar instance; see the grammar module for more information. The parser is not ready yet for parsing; you must call the setup() method to get it started. The optional convert argument is a function mapping concrete syntax tree nodes to abstract syntax tree nodes. If not given, no conversion is done and the syntax tree produced is the concrete syntax tree. If given, it must be a function of two arguments, the first being the grammar (a grammar.Grammar instance), and the second being the concrete syntax tree node to be converted. The syntax tree is converted from the bottom up. **post-note: the convert argument is ignored since for Black's usage, convert will always be blib2to3.pytree.convert. Allowing this to be dynamic hurts mypyc's ability to use early binding. These docs are left for historical and informational value. A concrete syntax tree node is a (type, value, context, nodes) tuple, where type is the node type (a token or symbol number), value is None for symbols and a string for tokens, context is None or an opaque value used for error reporting (typically a (lineno, offset) pair), and nodes is a list of children for symbols, and None for tokens. An abstract syntax tree node may be anything; this is entirely up to the converter function. """ self.grammar = grammar # See note in docstring above. TL;DR this is ignored. self.convert = convert or lam_sub self.is_backtracking = False self.last_token: int | None = None def setup(self, proxy: "TokenProxy", start: int | None = None) -> None: """Prepare for parsing. This *must* be called before starting to parse. The optional argument is an alternative start symbol; it defaults to the grammar's start symbol. You can use a Parser instance to parse any number of programs; each time you call setup() the parser is reset to an initial state determined by the (implicit or explicit) start symbol. """ if start is None: start = self.grammar.start # Each stack entry is a tuple: (dfa, state, node). # A node is a tuple: (type, value, context, children), # where children is a list of nodes or None, and context may be None. newnode: RawNode = (start, None, None, []) stackentry = (self.grammar.dfas[start], 0, newnode) self.stack: list[tuple[DFAS, int, RawNode]] = [stackentry] self.rootnode: NL | None = None self.used_names: set[str] = set() self.proxy = proxy self.last_token = None def addtoken(self, type: int, value: str, context: Context) -> bool: """Add a token; return True iff this is the end of the program.""" # Map from token to label ilabels = self.classify(type, value, context) assert len(ilabels) >= 1 # If we have only one state to advance, we'll directly # take it as is. if len(ilabels) == 1: [ilabel] = ilabels return self._addtoken(ilabel, type, value, context) # If there are multiple states which we can advance (only # happen under soft-keywords), then we will try all of them # in parallel and as soon as one state can reach further than # the rest, we'll choose that one. This is a pretty hacky # and hopefully temporary algorithm. # # For a more detailed explanation, check out this post: # https://tree.science/what-the-backtracking.html with self.proxy.release() as proxy: counter, force = 0, False recorder = Recorder(self, ilabels, context) recorder.add_token(type, value, raw=True) next_token_value = value while recorder.determine_route(next_token_value) is None: if not proxy.can_advance(counter): force = True break next_token_type, next_token_value, *_ = proxy.eat(counter) if next_token_type in (tokenize.COMMENT, tokenize.NL): counter += 1 continue if next_token_type == tokenize.OP: next_token_type = grammar.opmap[next_token_value] recorder.add_token(next_token_type, next_token_value) counter += 1 ilabel = cast(int, recorder.determine_route(next_token_value, force=force)) assert ilabel is not None return self._addtoken(ilabel, type, value, context) def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> bool: # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: t = self.grammar.labels[i][0] if t >= 256: # See if it's a symbol and if we're in its first set itsdfa = self.grammar.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol self.push(t, itsdfa, newstate, context) break # To continue the outer while loop elif ilabel == i: # Look it up in the list of labels # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: self.pop() if not self.stack: # Done parsing! return True dfa, state, node = self.stack[-1] states, first = dfa # Done with this token self.last_token = type return False else: if (0, state) in arcs: # An accepting state, pop it and try something else self.pop() if not self.stack: # Done parsing, but another token is input raise ParseError("too much input", type, value, context) else: # No success finding a transition raise ParseError("bad input", type, value, context) def classify(self, type: int, value: str, context: Context) -> list[int]: """Turn a token into a label. (Internal) Depending on whether the value is a soft-keyword or not, this function may return multiple labels to choose from.""" if type == token.NAME: # Keep a listing of all used names self.used_names.add(value) # Check for reserved words if value in self.grammar.keywords: return [self.grammar.keywords[value]] elif value in self.grammar.soft_keywords: assert type in self.grammar.tokens # Current soft keywords (match, case, type) can only appear at the # beginning of a statement. So as a shortcut, don't try to treat them # like keywords in any other context. # ('_' is also a soft keyword in the real grammar, but for our grammar # it's just an expression, so we don't need to treat it specially.) if self.last_token not in ( None, token.INDENT, token.DEDENT, token.NEWLINE, token.SEMI, token.COLON, ): return [self.grammar.tokens[type]] return [ self.grammar.tokens[type], self.grammar.soft_keywords[value], ] ilabel = self.grammar.tokens.get(type) if ilabel is None: raise ParseError("bad token", type, value, context) return [ilabel] def shift(self, type: int, value: str, newstate: int, context: Context) -> None: """Shift a token. (Internal)""" if self.is_backtracking: dfa, state, _ = self.stack[-1] self.stack[-1] = (dfa, newstate, DUMMY_NODE) else: dfa, state, node = self.stack[-1] rawnode: RawNode = (type, value, context, None) newnode = convert(self.grammar, rawnode) assert node[-1] is not None node[-1].append(newnode) self.stack[-1] = (dfa, newstate, node) def push(self, type: int, newdfa: DFAS, newstate: int, context: Context) -> None: """Push a nonterminal. (Internal)""" if self.is_backtracking: dfa, state, _ = self.stack[-1] self.stack[-1] = (dfa, newstate, DUMMY_NODE) self.stack.append((newdfa, 0, DUMMY_NODE)) else: dfa, state, node = self.stack[-1] newnode: RawNode = (type, None, context, []) self.stack[-1] = (dfa, newstate, node) self.stack.append((newdfa, 0, newnode)) def pop(self) -> None: """Pop a nonterminal. (Internal)""" if self.is_backtracking: self.stack.pop() else: popdfa, popstate, popnode = self.stack.pop() newnode = convert(self.grammar, popnode) if self.stack: dfa, state, node = self.stack[-1] assert node[-1] is not None node[-1].append(newnode) else: self.rootnode = newnode self.rootnode.used_names = self.used_names
Parser
python
tensorflow__tensorflow
tensorflow/python/autograph/tests/basic_ifexp_test.py
{ "start": 1284, "end": 1608 }
class ____(reference_test_base.TestCase): def test_basic(self): for x in [-1, 1, 5, tf.constant(-1), tf.constant(1), tf.constant(5)]: self.assertFunctionMatchesEager(consecutive_conds, x) self.assertFunctionMatchesEager(cond_with_multiple_values, x) if __name__ == '__main__': tf.test.main()
ReferenceTest
python
getsentry__sentry
src/sentry/unmerge.py
{ "start": 4487, "end": 7376 }
class ____(abc.ABC): """ Parsed arguments of the Sentry unmerge task. Since events of the source issue are processed in batches, one can think of each batch as belonging to a state in a statemachine. That statemachine has only two states: Processing the first page (`InitialUnmergeArgs`), processing second, third, ... page (`SuccessiveUnmergeArgs`). On the first page postgres hashes are migrated, activity models are created, eventstream and pagination state is initialized, and so the successive tasks need to carry significantly more state with them. """ project_id: int source_id: int replacement: UnmergeReplacement actor_id: int | None batch_size: int @staticmethod def parse_arguments( project_id: int, source_id: int, destination_id: int | None, fingerprints: Sequence[str], actor_id: int | None, last_event: Mapping[str, Any] | None = None, batch_size: int = 500, source_fields_reset: bool = False, eventstream_state: EventstreamState | None = None, replacement: UnmergeReplacement | None = None, locked_primary_hashes: Collection[str] | None = None, destinations: Destinations | None = None, ) -> "UnmergeArgs": if destinations is None: if destination_id is not None: destinations = {_DEFAULT_UNMERGE_KEY: (destination_id, eventstream_state)} else: destinations = {} if last_event is None: assert eventstream_state is None assert not source_fields_reset return InitialUnmergeArgs( project_id=project_id, source_id=source_id, replacement=UnmergeReplacement.parse_arguments(fingerprints, replacement), actor_id=actor_id, batch_size=batch_size, destinations=destinations, ) else: assert locked_primary_hashes is not None or fingerprints is not None return SuccessiveUnmergeArgs( project_id=project_id, source_id=source_id, replacement=UnmergeReplacement.parse_arguments(fingerprints, replacement), actor_id=actor_id, batch_size=batch_size, last_event=last_event, destinations=destinations, locked_primary_hashes=locked_primary_hashes or fingerprints or [], source_fields_reset=source_fields_reset, ) def dump_arguments(self) -> Mapping[str, Any]: rv = dataclasses.asdict(self) rv["fingerprints"] = None rv["destination_id"] = None rv["replacement"]["type"] = _REPLACEMENT_TYPE_LABELS[type(self.replacement)] return rv @dataclass(frozen=True)
UnmergeArgsBase
python
huggingface__transformers
src/transformers/models/ovis2/modeling_ovis2.py
{ "start": 14751, "end": 15722 }
class ____(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Ovis2VisionEncoderLayer`]. Args: config: Ovis2VisionConfig """ def __init__(self, config: Ovis2VisionConfig): super().__init__() self.config = config self.layers = nn.ModuleList([Ovis2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False # Ignore copy @can_return_tuple @auto_docstring def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutput: hidden_states = inputs_embeds for encoder_layer in self.layers: hidden_states = encoder_layer(hidden_states, attention_mask, **kwargs) return BaseModelOutput(last_hidden_state=hidden_states)
Ovis2VisionEncoder
python
encode__django-rest-framework
tests/test_serializer_lists.py
{ "start": 13850, "end": 22680 }
class ____: """ When not submitting key for list fields or multiple choice, partial serialization should result in an empty state (key not there), not an empty list. Regression test for Github issue #2761. """ def test_partial_listfield(self): class ListSerializer(serializers.Serializer): listdata = serializers.ListField() serializer = ListSerializer(data=MultiValueDict(), partial=True) result = serializer.to_internal_value(data={}) assert "listdata" not in result assert serializer.is_valid() assert serializer.validated_data == {} assert serializer.errors == {} def test_partial_multiplechoice(self): class MultipleChoiceSerializer(serializers.Serializer): multiplechoice = serializers.MultipleChoiceField(choices=[1, 2, 3]) serializer = MultipleChoiceSerializer(data=MultiValueDict(), partial=True) result = serializer.to_internal_value(data={}) assert "multiplechoice" not in result assert serializer.is_valid() assert serializer.validated_data == {} assert serializer.errors == {} def test_allow_empty_true(self): class ListSerializer(serializers.Serializer): update_field = serializers.IntegerField() store_field = serializers.IntegerField() instance = [ {'update_field': 11, 'store_field': 12}, {'update_field': 21, 'store_field': 22}, ] serializer = ListSerializer(instance, data=[], partial=True, many=True) assert serializer.is_valid() assert serializer.validated_data == [] assert serializer.errors == [] def test_update_allow_empty_true(self): class ListSerializer(serializers.Serializer): update_field = serializers.IntegerField() store_field = serializers.IntegerField() instance = [ {'update_field': 11, 'store_field': 12}, {'update_field': 21, 'store_field': 22}, ] input_data = [{'update_field': 31}, {'update_field': 41}] updated_data_list = [ {'update_field': 31, 'store_field': 12}, {'update_field': 41, 'store_field': 22}, ] serializer = ListSerializer( instance, data=input_data, partial=True, many=True) assert serializer.is_valid() for index, data in enumerate(serializer.validated_data): for key, value in data.items(): assert value == updated_data_list[index][key] assert serializer.errors == [] def test_allow_empty_false(self): class ListSerializer(serializers.Serializer): update_field = serializers.IntegerField() store_field = serializers.IntegerField() instance = [ {'update_field': 11, 'store_field': 12}, {'update_field': 21, 'store_field': 22}, ] serializer = ListSerializer( instance, data=[], allow_empty=False, partial=True, many=True) assert not serializer.is_valid() assert serializer.validated_data == [] assert len(serializer.errors) == 1 assert serializer.errors['non_field_errors'][0] == 'This list may not be empty.' def test_update_allow_empty_false(self): class ListSerializer(serializers.Serializer): update_field = serializers.IntegerField() store_field = serializers.IntegerField() instance = [ {'update_field': 11, 'store_field': 12}, {'update_field': 21, 'store_field': 22}, ] input_data = [{'update_field': 31}, {'update_field': 41}] updated_data_list = [ {'update_field': 31, 'store_field': 12}, {'update_field': 41, 'store_field': 22}, ] serializer = ListSerializer( instance, data=input_data, allow_empty=False, partial=True, many=True) assert serializer.is_valid() for index, data in enumerate(serializer.validated_data): for key, value in data.items(): assert value == updated_data_list[index][key] assert serializer.errors == [] def test_as_field_allow_empty_true(self): class ListSerializer(serializers.Serializer): update_field = serializers.IntegerField() store_field = serializers.IntegerField() class Serializer(serializers.Serializer): extra_field = serializers.IntegerField() list_field = ListSerializer(many=True) instance = { 'extra_field': 1, 'list_field': [ {'update_field': 11, 'store_field': 12}, {'update_field': 21, 'store_field': 22}, ] } serializer = Serializer(instance, data={}, partial=True) assert serializer.is_valid() assert serializer.validated_data == {} assert serializer.errors == {} def test_update_as_field_allow_empty_true(self): class ListSerializer(serializers.Serializer): update_field = serializers.IntegerField() store_field = serializers.IntegerField() class Serializer(serializers.Serializer): extra_field = serializers.IntegerField() list_field = ListSerializer(many=True) instance = { 'extra_field': 1, 'list_field': [ {'update_field': 11, 'store_field': 12}, {'update_field': 21, 'store_field': 22}, ] } input_data_1 = {'extra_field': 2} input_data_2 = { 'list_field': [ {'update_field': 31}, {'update_field': 41}, ] } # data_1 serializer = Serializer(instance, data=input_data_1, partial=True) assert serializer.is_valid() assert len(serializer.validated_data) == 1 assert serializer.validated_data['extra_field'] == 2 assert serializer.errors == {} # data_2 serializer = Serializer(instance, data=input_data_2, partial=True) assert serializer.is_valid() updated_data_list = [ {'update_field': 31, 'store_field': 12}, {'update_field': 41, 'store_field': 22}, ] for index, data in enumerate(serializer.validated_data['list_field']): for key, value in data.items(): assert value == updated_data_list[index][key] assert serializer.errors == {} def test_as_field_allow_empty_false(self): class ListSerializer(serializers.Serializer): update_field = serializers.IntegerField() store_field = serializers.IntegerField() class Serializer(serializers.Serializer): extra_field = serializers.IntegerField() list_field = ListSerializer(many=True, allow_empty=False) instance = { 'extra_field': 1, 'list_field': [ {'update_field': 11, 'store_field': 12}, {'update_field': 21, 'store_field': 22}, ] } serializer = Serializer(instance, data={}, partial=True) assert serializer.is_valid() assert serializer.validated_data == {} assert serializer.errors == {} def test_update_as_field_allow_empty_false(self): class ListSerializer(serializers.Serializer): update_field = serializers.IntegerField() store_field = serializers.IntegerField() class Serializer(serializers.Serializer): extra_field = serializers.IntegerField() list_field = ListSerializer(many=True, allow_empty=False) instance = { 'extra_field': 1, 'list_field': [ {'update_field': 11, 'store_field': 12}, {'update_field': 21, 'store_field': 22}, ] } input_data_1 = {'extra_field': 2} input_data_2 = { 'list_field': [ {'update_field': 31}, {'update_field': 41}, ] } updated_data_list = [ {'update_field': 31, 'store_field': 12}, {'update_field': 41, 'store_field': 22}, ] # data_1 serializer = Serializer(instance, data=input_data_1, partial=True) assert serializer.is_valid() assert serializer.errors == {} # data_2 serializer = Serializer(instance, data=input_data_2, partial=True) assert serializer.is_valid() for index, data in enumerate(serializer.validated_data['list_field']): for key, value in data.items(): assert value == updated_data_list[index][key] assert serializer.errors == {}
TestSerializerPartialUsage
python
networkx__networkx
networkx/algorithms/isomorphism/vf2userfunc.py
{ "start": 7268, "end": 7371 }
class ____(DiGraphMatcher): """VF2 isomorphism checker for directed multigraphs."""
MultiDiGraphMatcher
python
pydantic__pydantic
pydantic/v1/types.py
{ "start": 34622, "end": 35455 }
class ____(date, metaclass=ConstrainedNumberMeta): gt: OptionalDate = None ge: OptionalDate = None lt: OptionalDate = None le: OptionalDate = None @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none(field_schema, exclusiveMinimum=cls.gt, exclusiveMaximum=cls.lt, minimum=cls.ge, maximum=cls.le) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield parse_date yield number_size_validator def condate( *, gt: date = None, ge: date = None, lt: date = None, le: date = None, ) -> Type[date]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict(gt=gt, ge=ge, lt=lt, le=le) return type('ConstrainedDateValue', (ConstrainedDate,), namespace)
ConstrainedDate
python
kamyu104__LeetCode-Solutions
Python/sort-integers-by-the-number-of-1-bits.py
{ "start": 33, "end": 443 }
class ____(object): def sortByBits(self, arr): """ :type arr: List[int] :rtype: List[int] """ def popcount(n): # Time: O(logn) ~= O(1) if n is a 32-bit number result = 0 while n: n &= n - 1 result += 1 return result arr.sort(key=lambda x: (popcount(x), x)) return arr
Solution
python
huggingface__transformers
src/transformers/models/parakeet/configuration_parakeet.py
{ "start": 812, "end": 6987 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`ParakeetEncoder`]. It is used to instantiate a `ParakeetEncoder` model according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1024): Dimension of the layers and the hidden states. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 4096): Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the encoder and pooler. attention_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in the attention layers. convolution_bias (`bool`, *optional*, defaults to `True`): Whether to use bias in convolutions of the conformer's convolution module. conv_kernel_size (`int`, *optional*, defaults to 9): The kernel size of the convolution layers in the Conformer block. subsampling_factor (`int`, *optional*, defaults to 8): The factor by which the input sequence is subsampled. subsampling_conv_channels (`int`, *optional*, defaults to 256): The number of channels in the subsampling convolution layers. num_mel_bins (`int`, *optional*, defaults to 80): Number of mel features. subsampling_conv_kernel_size (`int`, *optional*, defaults to 3): The kernel size of the subsampling convolution layers. subsampling_conv_stride (`int`, *optional*, defaults to 2): The stride of the subsampling convolution layers. dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for all fully connected layers in the embeddings, encoder, and pooler. dropout_positions (`float`, *optional*, defaults to 0.0): The dropout ratio for the positions in the input sequence. layerdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the layers in the encoder. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention layers. max_position_embeddings (`int`, *optional*, defaults to 5000): The maximum sequence length that this model might ever be used with. scale_input (`bool`, *optional*, defaults to `True`): Whether to scale the input embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import ParakeetEncoderModel, ParakeetEncoderConfig >>> # Initializing a `ParakeetEncoder` configuration >>> configuration = ParakeetEncoderConfig() >>> # Initializing a model from the configuration >>> model = ParakeetEncoderModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` This configuration class is based on the ParakeetEncoder architecture from NVIDIA NeMo. You can find more details and pre-trained models at [nvidia/parakeet-ctc-1.1b](https://huggingface.co/nvidia/parakeet-ctc-1.1b). """ model_type = "parakeet_encoder" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, hidden_size=1024, num_hidden_layers=24, num_attention_heads=8, intermediate_size=4096, hidden_act="silu", attention_bias=True, convolution_bias=True, conv_kernel_size=9, subsampling_factor=8, subsampling_conv_channels=256, num_mel_bins=80, subsampling_conv_kernel_size=3, subsampling_conv_stride=2, dropout=0.1, dropout_positions=0.0, layerdrop=0.1, activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=5000, scale_input=True, initializer_range=0.02, **kwargs, ): super().__init__( **kwargs, ) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_attention_heads # LlamaAttention compatibility self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.attention_bias = attention_bias self.convolution_bias = convolution_bias if (conv_kernel_size - 1) % 2 != 0: raise ValueError(f"conv_kernel_size must be odd, got {conv_kernel_size}") self.conv_kernel_size = conv_kernel_size self.subsampling_conv_kernel_size = subsampling_conv_kernel_size self.subsampling_conv_stride = subsampling_conv_stride self.subsampling_factor = subsampling_factor self.subsampling_conv_channels = subsampling_conv_channels self.num_mel_bins = num_mel_bins self.dropout = dropout self.dropout_positions = dropout_positions self.layerdrop = layerdrop self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.scale_input = scale_input self.initializer_range = initializer_range
ParakeetEncoderConfig
python
getsentry__sentry
src/sentry/api/serializers/types.py
{ "start": 165, "end": 348 }
class ____(TypedDict, total=False): avatarType: str avatarUuid: str | None avatarUrl: str | None # Reponse type for OrganizationReleaseDetailsEndpoint
SerializedAvatarFields
python
getsentry__sentry
src/sentry/integrations/vercel/client.py
{ "start": 348, "end": 3943 }
class ____(ApiClient): base_url = "https://api.vercel.com" integration_name = "vercel" pagination_limit = 100 # Current User API (Read) # https://vercel.com/docs/integrations/reference#using-the-vercel-api/scopes/user GET_USER_URL = "/v2/user" # Teams API Scope (Read) # https://vercel.com/docs/rest-api#endpoints/teams/get-a-team GET_TEAM_URL = "/v2/teams/%s" # Projects API Scope (Read) # https://vercel.com/docs/rest-api#endpoints/projects/find-a-project-by-id-or-name GET_PROJECT_URL = "/v9/projects/%s" # https://vercel.com/docs/rest-api#endpoints/projects/retrieve-a-list-of-projects GET_PROJECTS_URL = "/v9/projects/" # Project Environment Variables API Scope (Read/Write) # https://vercel.com/docs/rest-api#endpoints/projects/retrieve-the-environment-variables-of-a-project-by-id-or-name GET_ENV_VAR_URL = "/v9/projects/%s/env" # https://vercel.com/docs/rest-api#endpoints/projects/create-one-or-more-environment-variables CREATE_ENV_VAR_URL = "/v9/projects/%s/env" # https://vercel.com/docs/rest-api#endpoints/projects/edit-an-environment-variable UPDATE_ENV_VAR_URL = "/v9/projects/%s/env/%s" # Integration Configuration API Scope (Read/Write) # https://vercel.com/docs/rest-api#endpoints/integrations/delete-an-integration-configuration UNINSTALL = "/v1/integrations/configuration/%s" def __init__(self, access_token, team_id=None): super().__init__() self.access_token = access_token self.team_id = team_id def request(self, method, path, data=None, params=None, allow_text=False): if self.team_id: # always need to use the team_id as a param for requests params = params or {} params["teamId"] = self.team_id headers = {"Authorization": f"Bearer {self.access_token}"} try: return self._request( method, path, headers=headers, data=data, params=params, allow_text=allow_text ) except ApiError as e: if not e.code == 402: raise def get_team(self): assert self.team_id return self.get(self.GET_TEAM_URL % self.team_id) def get_user(self): return self.get(self.GET_USER_URL)["user"] def get_from_pagination(self, url, data_key): # Vercel Pagination Guide: https://vercel.com/docs/rest-api#introduction/api-basics/pagination params: _ParamsDict = {"limit": self.pagination_limit} results = [] next_timestamp: str | None = "" while next_timestamp is not None: response = self.get(url, params=params) results += response[data_key] next_timestamp = response["pagination"]["next"] params["until"] = next_timestamp return results def get_projects(self): return self.get_from_pagination(self.GET_PROJECTS_URL, "projects") def get_project(self, vercel_project_id): return self.get(self.GET_PROJECT_URL % vercel_project_id) def get_env_vars(self, vercel_project_id): return self.get(self.GET_ENV_VAR_URL % vercel_project_id) def create_env_variable(self, vercel_project_id, data): return self.post(self.CREATE_ENV_VAR_URL % vercel_project_id, data=data) def update_env_variable(self, vercel_project_id, env_var_id, data): return self.patch(self.UPDATE_ENV_VAR_URL % (vercel_project_id, env_var_id), data=data) def uninstall(self, configuration_id): return self.delete(self.UNINSTALL % configuration_id)
VercelClient
python
keras-team__keras
keras/src/layers/pooling/global_average_pooling_test.py
{ "start": 2815, "end": 6108 }
class ____(testing.TestCase): @parameterized.parameters( ("channels_last", False), ("channels_last", True), ("channels_first", False), ("channels_first", True), ) def test_global_average_pooling1d(self, data_format, keepdims): def np_gap1d(x, data_format, keepdims, mask=None): steps_axis = 1 if data_format == "channels_last" else 2 if mask is not None: mask = np.expand_dims( mask, 2 if data_format == "channels_last" else 1 ) x *= mask res = np.sum(x, axis=steps_axis) / np.sum(mask, axis=steps_axis) else: res = np.mean(x, axis=steps_axis) if keepdims: res = np.expand_dims(res, axis=steps_axis) return res inputs = np.arange(24, dtype="float32").reshape((2, 3, 4)) layer = layers.GlobalAveragePooling1D( data_format=data_format, keepdims=keepdims, ) outputs = layer(inputs) expected = np_gap1d(inputs, data_format, keepdims) self.assertAllClose(outputs, expected) if data_format == "channels_last": mask = np.array([[1, 1, 0], [0, 1, 0]], dtype="int32") else: mask = np.array([[1, 1, 0, 0], [0, 1, 0, 1]], dtype="int32") outputs = layer(inputs, mask) expected = np_gap1d(inputs, data_format, keepdims, mask) self.assertAllClose(outputs, expected) @parameterized.parameters( ("channels_last", False), ("channels_last", True), ("channels_first", False), ("channels_first", True), ) def test_global_average_pooling2d(self, data_format, keepdims): def np_gap2d(x, data_format, keepdims): steps_axis = [1, 2] if data_format == "channels_last" else [2, 3] res = np.apply_over_axes(np.mean, x, steps_axis) if not keepdims: res = res.squeeze() return res inputs = np.arange(96, dtype="float32").reshape((2, 3, 4, 4)) layer = layers.GlobalAveragePooling2D( data_format=data_format, keepdims=keepdims, ) outputs = layer(inputs) expected = np_gap2d(inputs, data_format, keepdims) self.assertAllClose(outputs, expected) @parameterized.parameters( ("channels_last", False), ("channels_last", True), ("channels_first", False), ("channels_first", True), ) def test_global_average_pooling3d(self, data_format, keepdims): def np_gap3d(x, data_format, keepdims): steps_axis = ( [1, 2, 3] if data_format == "channels_last" else [2, 3, 4] ) res = np.apply_over_axes(np.mean, x, steps_axis) if not keepdims: res = res.squeeze() return res inputs = np.arange(360, dtype="float32").reshape((2, 3, 3, 5, 4)) layer = layers.GlobalAveragePooling3D( data_format=data_format, keepdims=keepdims, ) outputs = layer(inputs) expected = np_gap3d(inputs, data_format, keepdims) self.assertAllClose(outputs, expected)
GlobalAveragePoolingCorrectnessTest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/classes5.py
{ "start": 6536, "end": 6623 }
class ____: def __eq__(self, other: object) -> bool: return True
ParentClass5
python
lepture__authlib
authlib/oauth2/rfc7591/endpoint.py
{ "start": 527, "end": 7183 }
class ____: """The client registration endpoint is an OAuth 2.0 endpoint designed to allow a client to be registered with the authorization server. """ ENDPOINT_NAME = "client_registration" #: Rewrite this value with a list to support ``software_statement`` #: e.g. ``software_statement_alg_values_supported = ['RS256']`` software_statement_alg_values_supported = None def __init__(self, server=None, claims_classes=None): self.server = server self.claims_classes = claims_classes or [ClientMetadataClaims] def __call__(self, request): return self.create_registration_response(request) def create_registration_response(self, request): token = self.authenticate_token(request) if not token: raise AccessDeniedError() request.credential = token client_metadata = self.extract_client_metadata(request) client_info = self.generate_client_info(request) body = {} body.update(client_metadata) body.update(client_info) client = self.save_client(client_info, client_metadata, request) registration_info = self.generate_client_registration_info(client, request) if registration_info: body.update(registration_info) return 201, body, default_json_headers def extract_client_metadata(self, request): if not request.payload.data: raise InvalidRequestError() json_data = request.payload.data.copy() software_statement = json_data.pop("software_statement", None) if software_statement and self.software_statement_alg_values_supported: data = self.extract_software_statement(software_statement, request) json_data.update(data) client_metadata = {} server_metadata = self.get_server_metadata() for claims_class in self.claims_classes: options = ( claims_class.get_claims_options(server_metadata) if hasattr(claims_class, "get_claims_options") and server_metadata else {} ) claims = claims_class(json_data, {}, options, server_metadata) try: claims.validate() except JoseError as error: raise InvalidClientMetadataError(error.description) from error client_metadata.update(**claims.get_registered_claims()) return client_metadata def extract_software_statement(self, software_statement, request): key = self.resolve_public_key(request) if not key: raise UnapprovedSoftwareStatementError() try: jwt = JsonWebToken(self.software_statement_alg_values_supported) claims = jwt.decode(software_statement, key) # there is no need to validate claims return claims except JoseError as exc: raise InvalidSoftwareStatementError() from exc def generate_client_info(self, request): # https://tools.ietf.org/html/rfc7591#section-3.2.1 try: client_id = self.generate_client_id(request) except TypeError: # pragma: no cover client_id = self.generate_client_id() deprecate( "generate_client_id takes a 'request' parameter. " "It will become mandatory in coming releases", version="1.8", ) try: client_secret = self.generate_client_secret(request) except TypeError: # pragma: no cover client_secret = self.generate_client_secret() deprecate( "generate_client_secret takes a 'request' parameter. " "It will become mandatory in coming releases", version="1.8", ) client_id_issued_at = int(time.time()) client_secret_expires_at = 0 return dict( client_id=client_id, client_secret=client_secret, client_id_issued_at=client_id_issued_at, client_secret_expires_at=client_secret_expires_at, ) def generate_client_registration_info(self, client, request): """Generate ```registration_client_uri`` and ``registration_access_token`` for RFC7592. This method returns ``None`` by default. Developers MAY rewrite this method to return registration information. """ return None def create_endpoint_request(self, request): return self.server.create_json_request(request) def generate_client_id(self, request): """Generate ``client_id`` value. Developers MAY rewrite this method to use their own way to generate ``client_id``. """ return generate_token(42) def generate_client_secret(self, request): """Generate ``client_secret`` value. Developers MAY rewrite this method to use their own way to generate ``client_secret``. """ return binascii.hexlify(os.urandom(24)).decode("ascii") def get_server_metadata(self): """Return server metadata which includes supported grant types, response types and etc. """ raise NotImplementedError() def authenticate_token(self, request): """Authenticate current credential who is requesting to register a client. Developers MUST implement this method in subclass:: def authenticate_token(self, request): auth = request.headers.get("Authorization") return get_token_by_auth(auth) :return: token instance """ raise NotImplementedError() def resolve_public_key(self, request): """Resolve a public key for decoding ``software_statement``. If ``enable_software_statement=True``, developers MUST implement this method in subclass:: def resolve_public_key(self, request): return get_public_key_from_user(request.credential) :return: JWK or Key string """ raise NotImplementedError() def save_client(self, client_info, client_metadata, request): """Save client into database. Developers MUST implement this method in subclass:: def save_client(self, client_info, client_metadata, request): client = OAuthClient( client_id=client_info['client_id'], client_secret=client_info['client_secret'], ... ) client.save() return client """ raise NotImplementedError()
ClientRegistrationEndpoint
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 924449, "end": 925315 }
class ____(sgqlc.types.Type): """Autogenerated return type of RemoveEnterpriseAdmin""" __schema__ = github_schema __field_names__ = ("admin", "client_mutation_id", "enterprise", "message", "viewer") admin = sgqlc.types.Field("User", graphql_name="admin") """The user who was removed as an administrator.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" enterprise = sgqlc.types.Field("Enterprise", graphql_name="enterprise") """The updated enterprise.""" message = sgqlc.types.Field(String, graphql_name="message") """A message confirming the result of removing an administrator.""" viewer = sgqlc.types.Field("User", graphql_name="viewer") """The viewer performing the mutation."""
RemoveEnterpriseAdminPayload
python
pytorch__pytorch
test/torch_np/numpy_tests/linalg/test_linalg.py
{ "start": 12207, "end": 12538 }
class ____(LinalgTestCase): @slow def test_generalized_nonsq_cases(self): self.check_cases(require={"generalized", "nonsquare"}, exclude={"size-0"}) @slow def test_generalized_empty_nonsq_cases(self): self.check_cases(require={"generalized", "nonsquare", "size-0"})
LinalgGeneralizedNonsquareTestCase
python
django__django
tests/serializers/models/data.py
{ "start": 849, "end": 920 }
class ____(models.Model): data = models.DateField(null=True)
DateData
python
kamyu104__LeetCode-Solutions
Python/construct-the-minimum-bitwise-array-ii.py
{ "start": 48, "end": 254 }
class ____(object): def minBitwiseArray(self, nums): """ :type nums: List[int] :rtype: List[int] """ return [x-(((x+1)&~x)>>1) if x&1 else -1 for x in nums]
Solution
python
huggingface__transformers
tests/models/switch_transformers/test_modeling_switch_transformers.py
{ "start": 40285, "end": 44847 }
class ____(unittest.TestCase): @require_torch_accelerator @require_torch_bf16 def test_small_logits(self): r""" Logits testing to check implementation consistency between `t5x` implementation and `transformers` implementation of Switch-C transformers. We only check the logits of the first batch. """ model = SwitchTransformersModel.from_pretrained("google/switch-base-8", dtype=torch.bfloat16).to(torch_device) input_ids = torch.ones((32, 64), dtype=torch.long).to(torch_device) decoder_input_ids = torch.ones((32, 64), dtype=torch.long).to(torch_device) # fmt: off expectations = Expectations( { (None, None): [ -0.204102, -0.193359, 0.523438, -0.296875, 0.108887, 0.0211182, 0.605469, -0.100586, -0.0551758, 0.296875, 0.0090332, 0.174805, 0.139648, -0.170898, -0.0981445, 0.0245361, 0.0373535, 0.050293, -0.212891, 0.129883, 0.390625, -0.203125, -0.122559, -0.180664, 0.0437012, -0.349609, -0.0250244, -0.104004, -0.15918, -0.133789 ], ("cuda", 8): [ -0.2051, -0.1914, 0.5352, -0.2988, 0.1108, 0.0200, 0.6094, -0.1025, -0.0549, 0.2988, -0.0018, 0.1758, 0.1348, -0.1689, -0.1035, 0.0266, 0.0383, 0.0493, -0.2119, 0.1328, 0.3906, -0.2041, -0.1240, -0.1836, 0.0454, -0.3477, -0.0256, -0.1050, -0.1572, -0.1338 ], } ) EXPECTED_MEAN_LOGITS = torch.tensor(expectations.get_expectation()).to(torch_device, dtype=torch.bfloat16) # fmt: on hf_logits = model(input_ids, decoder_input_ids=decoder_input_ids).last_hidden_state hf_logits = hf_logits[0, 0, :30] torch.testing.assert_close(hf_logits, EXPECTED_MEAN_LOGITS, rtol=6e-3, atol=9e-3) @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def test_small_generate(self): # Generate test using the smalled switch-C model. model = SwitchTransformersForConditionalGeneration.from_pretrained( "google/switch-base-8", dtype=torch.bfloat16 ).eval() tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small", use_fast=False, legacy=False) model = model.to(torch_device) input_ids = tokenizer( "The human walks into a bar and orders a <extra_id_0>", return_tensors="pt" ).input_ids.to(torch_device) sequences = model.generate(input_ids) output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertEqual(output_str, "drink.") input_ids = tokenizer( "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", return_tensors="pt", ).input_ids.to(torch_device) sequences = model.generate(input_ids) output_str = tokenizer.batch_decode(sequences, skip_special_tokens=False)[0] EXPECTED_OUTPUT = "<pad><extra_id_0> man<extra_id_1> beer<extra_id_2> a<extra_id_3> whiskey<extra_id_4>.</s>" self.assertEqual(output_str, EXPECTED_OUTPUT) @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def test_small_batch_generate(self): BATCH_SIZE = 4 model = SwitchTransformersForConditionalGeneration.from_pretrained( "google/switch-base-8", dtype=torch.bfloat16 ).eval() tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small", use_fast=False, legacy=False) inputs = [ "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." ] * BATCH_SIZE encoded_input = tokenizer.batch_encode_plus(inputs, return_tensors="pt") sequences = model.generate(**encoded_input) batch_output = tokenizer.batch_decode(sequences, skip_special_tokens=False) for i in range(0, BATCH_SIZE, 2): self.assertEqual(batch_output[i], batch_output[i + 1]) @require_torch
SwitchTransformerModelIntegrationTests
python
huggingface__transformers
src/transformers/models/efficientnet/modeling_efficientnet.py
{ "start": 3283, "end": 3932 }
class ____(nn.Conv2d): def __init__( self, in_channels, depth_multiplier=1, kernel_size=3, stride=1, padding=0, dilation=1, bias=True, padding_mode="zeros", ): out_channels = in_channels * depth_multiplier super().__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias, padding_mode=padding_mode, )
EfficientNetDepthwiseConv2d
python
xlwings__xlwings
xlwings/constants.py
{ "start": 49973, "end": 50164 }
class ____: xlValidAlertInformation = 3 # from enum XlDVAlertStyle xlValidAlertStop = 1 # from enum XlDVAlertStyle xlValidAlertWarning = 2 # from enum XlDVAlertStyle
DVAlertStyle
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/asset_checks/asset_check_evaluation.py
{ "start": 449, "end": 1115 }
class ____( NamedTuple( "_AssetCheckEvaluationPlanned", [ ("asset_key", AssetKey), ("check_name", str), ], ) ): """Metadata for the event when an asset check is launched.""" def __new__(cls, asset_key: AssetKey, check_name: str): return super().__new__( cls, asset_key=check.inst_param(asset_key, "asset_key", AssetKey), check_name=check.str_param(check_name, "check_name"), ) @property def asset_check_key(self) -> AssetCheckKey: return AssetCheckKey(self.asset_key, self.check_name) @whitelist_for_serdes
AssetCheckEvaluationPlanned
python
encode__django-rest-framework
tests/test_generics.py
{ "start": 19546, "end": 23725 }
class ____(TestCase): def test_create_api_view_post(self): class MockCreateApiView(generics.CreateAPIView): def create(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockCreateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.post('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_destroy_api_view_delete(self): class MockDestroyApiView(generics.DestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockDestroyApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_update_api_view_partial_update(self): class MockUpdateApiView(generics.UpdateAPIView): def partial_update(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_get(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_put(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def update(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.put('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_update_api_view_patch(self): class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView): def partial_update(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveUpdateApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.patch('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_get(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def retrieve(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.get('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data def test_retrieve_destroy_api_view_delete(self): class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView): def destroy(self, request, *args, **kwargs): self.called = True self.call_args = (request, args, kwargs) view = MockRetrieveDestroyUApiView() data = ('test request', ('test arg',), {'test_kwarg': 'test'}) view.delete('test request', 'test arg', test_kwarg='test') assert view.called is True assert view.call_args == data
ApiViewsTests
python
ray-project__ray
python/ray/train/_internal/state/schema.py
{ "start": 3236, "end": 4615 }
class ____(BaseModel): """Metadata for a Ray Train run and information about its workers.""" name: str = Field(description="The name of the Train run.") id: str = Field(description="The unique identifier for each Train run.") job_id: str = Field(description="The Ray Job ID.") controller_actor_id: str = Field(description="Actor Id of the Train controller.") workers: List[TrainWorkerInfo] = Field( description="A List of Train workers sorted by global ranks." ) datasets: List[TrainDatasetInfo] = Field( description="A List of dataset info for this Train run." ) run_status: RunStatusEnum = Field( description="The current status of the train run. It can be one of the " "following: RUNNING, FINISHED, ERRORED, or ABORTED." ) status_detail: str = Field( description="Detailed information about the current run status, " "such as error messages." ) start_time_ms: int = Field( description="The UNIX timestamp of the start time of this Train run." ) end_time_ms: Optional[int] = Field( description="The UNIX timestamp of the end time of this Train run. " "If null, the Train run has not ended yet." ) resources: List[Dict[str, float]] = Field( description="The resources allocated to the worker." ) @DeveloperAPI
TrainRunInfo
python
getsentry__sentry
tests/sentry/api/endpoints/test_project_performance_general_settings.py
{ "start": 213, "end": 2423 }
class ____(APITestCase): endpoint = "sentry-api-0-project-performance-general-settings" def setUp(self) -> None: super().setUp() self.login_as(user=self.user, superuser=True) self.project = self.create_project() self.url = reverse( self.endpoint, kwargs={ "organization_id_or_slug": self.project.organization.slug, "project_id_or_slug": self.project.slug, }, ) def test_get_project_general_settings_defaults(self) -> None: with self.feature(PERFORMANCE_SETTINGS_FEATURES): response = self.client.get(self.url, format="json") assert response.status_code == 200, response.content assert response.data["enable_images"] is False def test_get_returns_error_without_feature_enabled(self) -> None: with self.feature({}): response = self.client.get(self.url, format="json") assert response.status_code == 404 def test_updates_to_new_value(self) -> None: with self.feature(PERFORMANCE_SETTINGS_FEATURES): response = self.client.post( self.url, data={ "enable_images": True, }, ) response = self.client.get(self.url, format="json") assert response.data["enable_images"] is True response = self.client.post( self.url, data={ "enable_images": False, }, ) response = self.client.get(self.url, format="json") assert response.data["enable_images"] is False def test_update_project_setting_check_validation(self) -> None: with self.feature(PERFORMANCE_SETTINGS_FEATURES): response = self.client.post( self.url, data={ "enable_images": -1, }, ) assert response.status_code == 400, response.content assert response.data == { "enable_images": [ErrorDetail(string="Must be a valid boolean.", code="invalid")] }
ProjectPerformanceGeneralSettingsTest
python
getsentry__sentry
tests/sentry/api/bases/test_organization.py
{ "start": 1732, "end": 1822 }
class ____: @property def is_active(self) -> bool: return True
MockSuperUser
python
tornadoweb__tornado
tornado/template.py
{ "start": 18871, "end": 19267 }
class ____: def each_child(self) -> Iterable["_Node"]: return () def generate(self, writer: "_CodeWriter") -> None: raise NotImplementedError() def find_named_blocks( self, loader: Optional[BaseLoader], named_blocks: Dict[str, "_NamedBlock"] ) -> None: for child in self.each_child(): child.find_named_blocks(loader, named_blocks)
_Node
python
python-excel__xlwt
xlwt/BIFFRecords.py
{ "start": 21581, "end": 25062 }
class ____(BiffRecord): """ WARNING The font with index 4 is omitted in all BIFF versions. This means the first four fonts have zero-based indexes, and the fifth font and all following fonts are referenced with one-based indexes. Offset Size Contents 0 2 Height of the font (in twips = 1/20 of a point) 2 2 Option flags: Bit Mask Contents 0 0001H 1 = Characters are bold (redundant, see below) 1 0002H 1 = Characters are italic 2 0004H 1 = Characters are underlined (redundant, see below) 3 0008H 1 = Characters are struck out 0010H 1 = Outline 0020H 1 = Shadow 4 2 Colour index 6 2 Font weight (100-1000). Standard values are 0190H (400) for normal text and 02BCH (700) for bold text. 8 2 Escapement type: 0000H = None 0001H = Superscript 0002H = Subscript 10 1 Underline type: 00H = None 01H = Single 21H = Single accounting 02H = Double 22H = Double accounting 11 1 Font family: 00H = None (unknown or don't care) 01H = Roman (variable width, serifed) 02H = Swiss (variable width, sans-serifed) 03H = Modern (fixed width, serifed or sans-serifed) 04H = Script (cursive) 05H = Decorative (specialised, i.e. Old English, Fraktur) 12 1 Character set: 00H = 0 = ANSI Latin 01H = 1 = System default 02H = 2 = Symbol 4DH = 77 = Apple Roman 80H = 128 = ANSI Japanese Shift-JIS 81H = 129 = ANSI Korean (Hangul) 82H = 130 = ANSI Korean (Johab) 86H = 134 = ANSI Chinese Simplified GBK 88H = 136 = ANSI Chinese Traditional BIG5 A1H = 161 = ANSI Greek A2H = 162 = ANSI Turkish A3H = 163 = ANSI Vietnamese B1H = 177 = ANSI Hebrew B2H = 178 = ANSI Arabic BAH = 186 = ANSI Baltic CCH = 204 = ANSI Cyrillic DEH = 222 = ANSI Thai EEH = 238 = ANSI Latin II (Central European) FFH = 255 = OEM Latin I 13 1 Not used 14 var. Font name: BIFF5/BIFF7: Byte string, 8-bit string length BIFF8: Unicode string, 8-bit string length The boldness and underline flags are still set in the options field, but not used on reading the font. Font weight and underline type are specified in separate fields instead. """ _REC_ID = 0x0031 def __init__(self, height, options, colour_index, weight, escapement, underline, family, charset, name): uname = upack1(name) uname_len = len(uname) self._rec_data = pack('<5H4B%ds' % uname_len, height, options, colour_index, weight, escapement, underline, family, charset, 0x00, uname)
FontRecord
python
ray-project__ray
python/ray/serve/_private/request_router/replica_wrapper.py
{ "start": 3496, "end": 6898 }
class ____: """Contains info on a running replica. Also defines the interface for a request router to talk to a replica. """ def __init__(self, replica_info: RunningReplicaInfo): self._replica_info = replica_info self._multiplexed_model_ids = set(replica_info.multiplexed_model_ids) # Fetch and cache the actor handle once per RunningReplica instance. # This avoids the borrower-of-borrower pattern while minimizing GCS lookups. actor_handle = replica_info.get_actor_handle() if replica_info.is_cross_language: self._actor_handle = JavaActorHandleProxy(actor_handle) else: self._actor_handle = actor_handle @property def replica_id(self) -> ReplicaID: """ID of this replica.""" return self._replica_info.replica_id @property def actor_id(self) -> ray.ActorID: """Actor ID of this replica.""" return self._actor_handle._actor_id @property def node_id(self) -> str: """Node ID of the node this replica is running on.""" return self._replica_info.node_id @property def availability_zone(self) -> Optional[str]: """Availability zone of the node this replica is running on.""" return self._replica_info.availability_zone @property def multiplexed_model_ids(self) -> Set[str]: """Set of model IDs on this replica.""" return self._multiplexed_model_ids @property def routing_stats(self) -> Dict[str, Any]: """Dictionary of routing stats.""" return self._replica_info.routing_stats @property def max_ongoing_requests(self) -> int: """Max concurrent requests that can be sent to this replica.""" return self._replica_info.max_ongoing_requests @property def is_cross_language(self) -> bool: """Whether this replica is cross-language (Java).""" return self._replica_info.is_cross_language def _get_replica_wrapper(self, pr: PendingRequest) -> ReplicaWrapper: return ActorReplicaWrapper(self._actor_handle) def push_proxy_handle(self, handle: ActorHandle): """When on proxy, push proxy's self handle to replica""" self._actor_handle.push_proxy_handle.remote(handle) async def get_queue_len(self, *, deadline_s: float) -> int: """Returns current queue len for the replica. `deadline_s` is passed to verify backoff for testing. """ # NOTE(edoakes): the `get_num_ongoing_requests` method name is shared by # the Python and Java replica implementations. If you change it, you need to # change both (or introduce a branch here). obj_ref = self._actor_handle.get_num_ongoing_requests.remote() try: return await obj_ref except asyncio.CancelledError: ray.cancel(obj_ref) raise def try_send_request( self, pr: PendingRequest, with_rejection: bool ) -> ReplicaResult: """Try to send the request to this replica. It may be rejected.""" wrapper = self._get_replica_wrapper(pr) if self._replica_info.is_cross_language: assert not with_rejection, "Request rejection not supported for Java." return wrapper.send_request_java(pr) return wrapper.send_request_python(pr, with_rejection=with_rejection)
RunningReplica
python
apache__airflow
providers/vertica/src/airflow/providers/vertica/hooks/vertica.py
{ "start": 2236, "end": 7110 }
class ____(DbApiHook): """ Interact with Vertica. This hook use a customized version of default fetch_all_handler named vertica_fetch_all_handler. """ conn_name_attr = "vertica_conn_id" default_conn_name = "vertica_default" conn_type = "vertica" hook_name = "Vertica" supports_autocommit = True def get_conn(self) -> connect: """Return vertica connection object.""" conn = self.get_connection(self.get_conn_id()) conn_config: dict[str, Any] = { "user": conn.login, "password": conn.password or "", "database": conn.schema, "host": conn.host or "localhost", } if not conn.port: conn_config["port"] = 5433 else: conn_config["port"] = int(conn.port) bool_options = [ "connection_load_balance", "binary_transfer", "disable_copy_local", "request_complex_types", "use_prepared_statements", ] std_options = [ "session_label", "backup_server_node", "kerberos_host_name", "kerberos_service_name", "unicode_error", "workload", "ssl", ] conn_extra = conn.extra_dejson for bo in bool_options: if bo in conn_extra: conn_config[bo] = str(conn_extra[bo]).lower() in ["true", "on"] for so in std_options: if so in conn_extra: conn_config[so] = conn_extra[so] if "connection_timeout" in conn_extra: conn_config["connection_timeout"] = float(conn_extra["connection_timeout"]) if "log_level" in conn_extra: import logging log_lvl = conn_extra["log_level"] conn_config["log_path"] = None if isinstance(log_lvl, str): log_lvl = log_lvl.lower() if log_lvl == "critical": conn_config["log_level"] = logging.CRITICAL elif log_lvl == "error": conn_config["log_level"] = logging.ERROR elif log_lvl == "warning": conn_config["log_level"] = logging.WARNING elif log_lvl == "info": conn_config["log_level"] = logging.INFO elif log_lvl == "debug": conn_config["log_level"] = logging.DEBUG elif log_lvl == "notset": conn_config["log_level"] = logging.NOTSET else: conn_config["log_level"] = int(conn_extra["log_level"]) conn = connect(**conn_config) return conn @property def sqlalchemy_url(self) -> URL: """Return a SQLAlchemy URL object with properly formatted query parameters.""" conn = self.get_connection(self.get_conn_id()) extra = conn.extra_dejson or {} # Normalize query dictionary query = { k: ([str(x) for x in v] if isinstance(v, (list, tuple)) else str(v)) for k, v in extra.items() if v is not None } return URL.create( drivername="vertica-python", username=conn.login, password=conn.password or "", host=conn.host or "localhost", port=conn.port or 5433, database=conn.schema, query=query, ) def get_uri(self) -> str: """Return a URI string with password visible.""" return self.sqlalchemy_url.render_as_string(hide_password=False) @overload def run( self, sql: str | Iterable[str], autocommit: bool = ..., parameters: Iterable | Mapping[str, Any] | None = ..., handler: None = ..., split_statements: bool = ..., return_last: bool = ..., ) -> None: ... @overload def run( self, sql: str | Iterable[str], autocommit: bool = ..., parameters: Iterable | Mapping[str, Any] | None = ..., handler: Callable[[Any], Any] = ..., split_statements: bool = ..., return_last: bool = ..., ) -> Any | list[Any]: ... def run( self, sql: str | Iterable[str], autocommit: bool = False, parameters: Iterable | Mapping | None = None, handler: Callable[[Any], Any] | None = None, split_statements: bool = False, return_last: bool = True, ) -> Any | list[Any] | None: """ Overwrite the common sql run. Will automatically replace fetch_all_handler by vertica_fetch_all_handler. """ if handler == fetch_all_handler: handler = vertica_fetch_all_handler return DbApiHook.run(self, sql, autocommit, parameters, handler, split_statements, return_last)
VerticaHook
python
numba__numba
numba/tests/test_overlap.py
{ "start": 1383, "end": 3839 }
class ____(TestCase): def check_overlap(self, pyfunc, min_ndim, have_k_argument=False): N = 4 def vary_layouts(orig): yield orig.copy(order='C') yield orig.copy(order='F') a = orig[::-1].copy()[::-1] assert not a.flags.c_contiguous and not a.flags.f_contiguous yield a def check(pyfunc, cfunc, pydest, cdest, kwargs): pyfunc(pydest, pydest, **kwargs) cfunc(cdest, cdest, **kwargs) self.assertPreciseEqual(pydest, cdest) cfunc = jit(nopython=True)(pyfunc) # Check for up to 3d arrays for ndim in range(min_ndim, 4): shape = (N,) * ndim orig = np.arange(0, N**ndim).reshape(shape) # Note we cannot copy a 'A' layout array exactly (bitwise), # so instead we call vary_layouts() twice for pydest, cdest in zip(vary_layouts(orig), vary_layouts(orig)): if have_k_argument: for k in range(1, N): check(pyfunc, cfunc, pydest, cdest, dict(k=k)) else: check(pyfunc, cfunc, pydest, cdest, {}) def check_overlap_with_k(self, pyfunc, min_ndim): self.check_overlap(pyfunc, min_ndim=min_ndim, have_k_argument=True) def test_overlap1(self): self.check_overlap_with_k(array_overlap1, min_ndim=1) def test_overlap2(self): self.check_overlap_with_k(array_overlap2, min_ndim=1) def test_overlap3(self): self.check_overlap_with_k(array_overlap3, min_ndim=2) def test_overlap4(self): self.check_overlap_with_k(array_overlap4, min_ndim=2) def test_overlap5(self): self.check_overlap_with_k(array_overlap5, min_ndim=1) def test_overlap6(self): self.check_overlap_with_k(array_overlap6, min_ndim=1) def test_overlap11(self): self.check_overlap(array_overlap11, min_ndim=1) def test_overlap12(self): self.check_overlap(array_overlap12, min_ndim=1) def test_overlap13(self): self.check_overlap(array_overlap13, min_ndim=2) def test_overlap14(self): self.check_overlap(array_overlap14, min_ndim=2) def test_overlap15(self): self.check_overlap(array_overlap15, min_ndim=1) def test_overlap16(self): self.check_overlap(array_overlap16, min_ndim=1) if __name__ == '__main__': unittest.main()
TestArrayOverlap
python
aimacode__aima-python
planning.py
{ "start": 37238, "end": 40452 }
class ____: """ Class for formulation GraphPlan algorithm Constructs a graph of state and action space Returns solution for the planning problem """ def __init__(self, planning_problem): self.graph = Graph(planning_problem) self.no_goods = [] self.solution = [] def check_leveloff(self): """Checks if the graph has levelled off""" check = (set(self.graph.levels[-1].current_state) == set(self.graph.levels[-2].current_state)) if check: return True def extract_solution(self, goals, index): """Extracts the solution""" level = self.graph.levels[index] if not self.graph.non_mutex_goals(goals, index): self.no_goods.append((level, goals)) return level = self.graph.levels[index - 1] # Create all combinations of actions that satisfy the goal actions = [] for goal in goals: actions.append(level.next_state_links[goal]) all_actions = list(itertools.product(*actions)) # Filter out non-mutex actions non_mutex_actions = [] for action_tuple in all_actions: action_pairs = itertools.combinations(list(set(action_tuple)), 2) non_mutex_actions.append(list(set(action_tuple))) for pair in action_pairs: if set(pair) in level.mutex: non_mutex_actions.pop(-1) break # Recursion for action_list in non_mutex_actions: if [action_list, index] not in self.solution: self.solution.append([action_list, index]) new_goals = [] for act in set(action_list): if act in level.current_action_links: new_goals = new_goals + level.current_action_links[act] if abs(index) + 1 == len(self.graph.levels): return elif (level, new_goals) in self.no_goods: return else: self.extract_solution(new_goals, index - 1) # Level-Order multiple solutions solution = [] for item in self.solution: if item[1] == -1: solution.append([]) solution[-1].append(item[0]) else: solution[-1].append(item[0]) for num, item in enumerate(solution): item.reverse() solution[num] = item return solution def goal_test(self, kb): return all(kb.ask(q) is not False for q in self.graph.planning_problem.goals) def execute(self): """Executes the GraphPlan algorithm for the given problem""" while True: self.graph.expand_graph() if (self.goal_test(self.graph.levels[-1].kb) and self.graph.non_mutex_goals( self.graph.planning_problem.goals, -1)): solution = self.extract_solution(self.graph.planning_problem.goals, -1) if solution: return solution if len(self.graph.levels) >= 2 and self.check_leveloff(): return None
GraphPlan
python
django__django
tests/syndication_tests/feeds.py
{ "start": 6659, "end": 7326 }
class ____(feedgenerator.Atom1Feed): """ Test of a custom feed generator class. """ def root_attributes(self): attrs = super().root_attributes() attrs["django"] = "rocks" return attrs def add_root_elements(self, handler): super().add_root_elements(handler) handler.addQuickElement("spam", "eggs") def item_attributes(self, item): attrs = super().item_attributes(item) attrs["bacon"] = "yum" return attrs def add_item_elements(self, handler, item): super().add_item_elements(handler, item) handler.addQuickElement("ministry", "silly walks")
MyCustomAtom1Feed
python
pypa__setuptools
setuptools/_vendor/zipp/__init__.py
{ "start": 1832, "end": 3690 }
class ____: """ ZipFile mix-in to ensure names are sanitized. """ def namelist(self): return list(map(self._sanitize, super().namelist())) @staticmethod def _sanitize(name): r""" Ensure a relative path with posix separators and no dot names. Modeled after https://github.com/python/cpython/blob/bcc1be39cb1d04ad9fc0bd1b9193d3972835a57c/Lib/zipfile/__init__.py#L1799-L1813 but provides consistent cross-platform behavior. >>> san = SanitizedNames._sanitize >>> san('/foo/bar') 'foo/bar' >>> san('//foo.txt') 'foo.txt' >>> san('foo/.././bar.txt') 'foo/bar.txt' >>> san('foo../.bar.txt') 'foo../.bar.txt' >>> san('\\foo\\bar.txt') 'foo/bar.txt' >>> san('D:\\foo.txt') 'D/foo.txt' >>> san('\\\\server\\share\\file.txt') 'server/share/file.txt' >>> san('\\\\?\\GLOBALROOT\\Volume3') '?/GLOBALROOT/Volume3' >>> san('\\\\.\\PhysicalDrive1\\root') 'PhysicalDrive1/root' Retain any trailing slash. >>> san('abc/') 'abc/' Raises a ValueError if the result is empty. >>> san('../..') Traceback (most recent call last): ... ValueError: Empty filename """ def allowed(part): return part and part not in {'..', '.'} # Remove the drive letter. # Don't use ntpath.splitdrive, because that also strips UNC paths bare = re.sub('^([A-Z]):', r'\1', name, flags=re.IGNORECASE) clean = bare.replace('\\', '/') parts = clean.split('/') joined = '/'.join(filter(allowed, parts)) if not joined: raise ValueError("Empty filename") return joined + '/' * name.endswith('/')
SanitizedNames
python
pandas-dev__pandas
pandas/tests/extension/test_common.py
{ "start": 233, "end": 285 }
class ____(dtypes.ExtensionDtype): pass
DummyDtype
python
getsentry__sentry
tests/sentry/workflow_engine/processors/test_data_sources.py
{ "start": 298, "end": 5292 }
class ____(BaseWorkflowTest): def setUp(self) -> None: # check that test_base registers the data_source_type_registry assert isinstance(data_source_type_registry.get("test"), mock.Mock) self.query = self.create_snuba_query() self.query_two = self.create_snuba_query() self.detector_one = self.create_detector(name="test_detector1") self.detector_two = self.create_detector(name="test_detector2", type="metric_issue") self.detector_one.workflow_condition_group = self.create_data_condition_group( logic_type="any" ) self.create_data_condition( condition_group=self.detector_one.workflow_condition_group, type="eq", comparison="bar", condition_result=True, ) self.detector_one.save() source_id_1 = "12345-test-source-1" self.ds1 = self.create_data_source(source_id=source_id_1, type="test") self.ds1.detectors.set([self.detector_one]) source_id_2 = "56789-test-source-2" self.ds2 = self.create_data_source(source_id=source_id_2, type="test") self.ds2.detectors.set([self.detector_one, self.detector_two]) self.packet = DataPacket[dict[str, str]]( source_id_1, {"source_id": source_id_1, "foo": "bar"} ) self.two_detector_packet = DataPacket[dict[str, str]]( source_id_2, {"source_id": source_id_2, "foo": "baz"} ) def test_single_data_packet(self) -> None: assert process_data_source(self.packet, "test") == (self.packet, [self.detector_one]) def test_disabled_detector(self) -> None: self.detector_one.enabled = False self.detector_one.save() assert process_data_source(self.two_detector_packet, "test") == ( self.two_detector_packet, [self.detector_two], ) def test_multiple_detectors(self) -> None: self.detector_three = self.create_detector(name="test_detector3") self.detector_four = self.create_detector(name="test_detector4") self.ds2.detectors.add(self.detector_three) self.ds2.detectors.add(self.detector_four) assert process_data_source(self.two_detector_packet, "test") == ( self.two_detector_packet, [self.detector_one, self.detector_two, self.detector_three, self.detector_four], ) def test_no_results(self) -> None: self.ds2.detectors.clear() assert process_data_source(self.two_detector_packet, "test") == ( self.two_detector_packet, [], ) def test_different_data_packet_type__no_results(self) -> None: assert process_data_source(self.packet, "test2") == (self.packet, []) def test_metrics_are_sent_for_data_sources(self) -> None: with mock.patch("sentry.utils.metrics.incr") as mock_incr: process_data_source(self.packet, "test") mock_incr.assert_any_call( "workflow_engine.process_data_sources", tags={"query_type": "test"} ) def test_metrics_are_sent_for_no_detectors(self) -> None: with mock.patch("sentry.utils.metrics.incr") as mock_incr: process_data_source(self.packet, "test3") mock_incr.assert_any_call( "workflow_engine.process_data_sources.no_detectors", tags={"query_type": "test3"}, ) def test_metrics_for_many_detectors(self) -> None: self.detector_three = self.create_detector(name="test_detector3") self.ds1.detectors.add(self.detector_three) with mock.patch("sentry.utils.metrics.incr") as mock_incr: process_data_source(self.packet, "test") mock_incr.assert_any_call( "workflow_engine.process_data_sources.detectors", 2, tags={"query_type": "test"}, ) def test_sql_cascades(self) -> None: with self.assertNumQueries(2): """ There should be 2 total SQL queries for `bulk_fetch_enabled_detectors`: - Get the detector and data condition group associated with it - Get all the data conditions for the group """ _, detectors = process_data_source(self.two_detector_packet, "test") # If the detector is not prefetched this will increase the query count assert all(detector.enabled for detector in detectors) for detector in detectors: if detector.workflow_condition_group: # Trigger a SQL query if not prefetched, and fail the assertion assert detector.workflow_condition_group.id is not None for condition in detector.workflow_condition_group.conditions.all(): # Trigger a SQL query if not prefetched, and fail the assertion assert condition.id is not None
TestProcessDataSources
python
pypa__warehouse
warehouse/manage/forms.py
{ "start": 8951, "end": 11218 }
class ____(wtforms.Form): __params__ = ["description", "token_scope"] def __init__( self, *args, user_id, macaroon_service, project_names, selected_project=None, **kwargs, ): super().__init__(*args, **kwargs) self.user_id = user_id self.macaroon_service = macaroon_service self.project_names = project_names if selected_project is not None: self.token_scope.data = self.scope_prefix + selected_project description = wtforms.StringField( validators=[ wtforms.validators.InputRequired(message="Specify a token name"), wtforms.validators.Length( max=100, message="Description must be 100 characters or less" ), ] ) token_scope = wtforms.StringField( validators=[wtforms.validators.InputRequired(message="Specify the token scope")] ) scope_prefix = "scope:project:" def validate_description(self, field): description = field.data if ( self.macaroon_service.get_macaroon_by_description(self.user_id, description) is not None ): raise wtforms.validators.ValidationError("API token name already in use") def validate_token_scope(self, field): scope = field.data try: _, scope_kind = scope.split(":", 1) except ValueError: raise wtforms.ValidationError(f"Unknown token scope: {scope}") if scope_kind == "unspecified": raise wtforms.ValidationError("Specify the token scope") if scope_kind == "user": self.validated_scope = scope_kind return try: scope_kind, scope_value = scope_kind.split(":", 1) except ValueError: raise wtforms.ValidationError(f"Unknown token scope: {scope}") if scope_kind != "project": raise wtforms.ValidationError(f"Unknown token scope: {scope}") if scope_value not in self.project_names: raise wtforms.ValidationError( f"Unknown or invalid project name: {scope_value}" ) self.validated_scope = {"projects": [scope_value]}
CreateMacaroonForm
python
gevent__gevent
src/greentest/3.10/test_asyncore.py
{ "start": 26437, "end": 26608 }
class ____(TestAPI_UseUnixSockets, unittest.TestCase): use_poll = False @unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
TestAPI_UseUnixSocketsSelect
python
walkccc__LeetCode
solutions/986. Interval List Intersections/986.py
{ "start": 0, "end": 578 }
class ____: def intervalIntersection(self, firstList: list[list[int]], secondList: list[list[int]]) -> list[list[int]]: ans = [] i = 0 j = 0 while i < len(firstList) and j < len(secondList): # lo := the start of the intersection # hi := the end of the intersection lo = max(firstlist[i][0], secondlist[j][0]) hi = min(firstlist[i][1], secondlist[j][1]) if lo <= hi: ans.append([lo, hi]) if firstlist[i][1] < secondlist[j][1]: i += 1 else: j += 1 return ans
Solution
python
keras-team__keras
keras/src/initializers/random_initializers.py
{ "start": 21113, "end": 23631 }
class ____(RandomInitializer): """Initializer that generates an orthogonal matrix. If the shape of the tensor to initialize is two-dimensional, it is initialized with an orthogonal matrix obtained from the QR decomposition of a matrix of random numbers drawn from a normal distribution. If the matrix has fewer rows than columns then the output will have orthogonal rows. Otherwise, the output will have orthogonal columns. If the shape of the tensor to initialize is more than two-dimensional, a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])` is initialized, where `n` is the length of the shape vector. The matrix is subsequently reshaped to give a tensor of the desired shape. Examples: >>> # Standalone usage: >>> initializer = keras.initializers.Orthogonal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = keras.initializers.Orthogonal() >>> layer = keras.layers.Dense(3, kernel_initializer=initializer) Args: gain: Multiplicative factor to apply to the orthogonal matrix. seed: A Python integer. Used to make the behavior of the initializer deterministic. Reference: - [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C) """ def __init__(self, gain=1.0, seed=None): self.gain = gain super().__init__(seed=seed) def __call__(self, shape, dtype=None): if len(shape) < 2: raise ValueError( "The tensor to initialize must be " "at least two-dimensional. Received: " f"shape={shape} of rank {len(shape)}." ) # Flatten the input shape with the last dimension remaining # its original shape so it works for conv2d num_rows = 1 for dim in shape[:-1]: num_rows *= dim num_cols = shape[-1] flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows)) # Generate a random matrix a = random.normal(flat_shape, seed=self.seed, dtype=dtype) # Compute the qr factorization q, r = ops.qr(a) # Make Q uniform d = ops.diag(r) q *= ops.sign(d) if num_rows < num_cols: q = ops.transpose(q) return self.gain * ops.reshape(q, shape) def get_config(self): base_config = super().get_config() config = {"gain": self.gain} return {**base_config, **config}
Orthogonal
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_type_lookup.py
{ "start": 8928, "end": 9179 }
class ____(Sequence[str]): """Represent a sequence of text lines. It turns out that resolving a class which inherits from a parametrised generic type is... tricky. See https://github.com/HypothesisWorks/hypothesis/issues/2951 """
Lines
python
python-poetry__poetry
src/poetry/repositories/single_page_repository.py
{ "start": 329, "end": 727 }
class ____(LegacyRepository): def _get_page(self, name: NormalizedName) -> HTMLPage: """ Single page repositories only have one page irrespective of endpoint. """ response = self._get_response("") if not response: raise PackageNotFoundError(f"Package [{name}] not found.") return HTMLPage(response.url, response.text)
SinglePageRepository
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/overloadOverlap1.py
{ "start": 8156, "end": 9224 }
class ____(Protocol): def __call__(self, *args: Any, **kwargs: Any) -> Any: ... @overload def func30(func: CBProto30) -> None: ... @overload # This should generate an error because this overload will never be used. def func30(func: Callable[..., Any]) -> None: ... def func30(func: Any) -> None: ... @overload # This should generate an error because of a partial overlap. def func31(*args: Any, a: int = ...) -> int: ... @overload def func31(*args: Any, a: str = ...) -> str: ... def func31(*args: Any, a: int | str = 0) -> int | str: ... type NestedList[V] = V | list[V | NestedList[V]] @overload def func32[V](n: list[NestedList[V]]) -> list[V]: ... @overload def func32(n: list[NestedList[Any]]) -> list[Any]: ... def func32(n: Any) -> Any: ... @overload def func33(a: int, /, *args: str) -> None: ... @overload def func33(*args: str) -> None: ... def func33(*args: int | str) -> None: ... @overload def func34(fn: Callable[[], Any]) -> None: ... @overload def func34[**P](fn: Callable[P, Any]) -> None: ... def func34(fn: ...) -> ...: ...
CBProto30
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/extra/redis.py
{ "start": 701, "end": 5547 }
class ____(ExampleDatabase): """Store Hypothesis examples as sets in the given :class:`~redis.Redis` datastore. This is particularly useful for shared databases, as per the recipe for a :class:`~hypothesis.database.MultiplexedDatabase`. .. note:: If a test has not been run for ``expire_after``, those examples will be allowed to expire. The default time-to-live persists examples between weekly runs. """ def __init__( self, redis: Redis, *, expire_after: timedelta = timedelta(days=8), key_prefix: bytes = b"hypothesis-example:", listener_channel: str = "hypothesis-changes", ): super().__init__() check_type(Redis, redis, "redis") check_type(timedelta, expire_after, "expire_after") check_type(bytes, key_prefix, "key_prefix") check_type(str, listener_channel, "listener_channel") self.redis = redis self._expire_after = expire_after self._prefix = key_prefix self.listener_channel = listener_channel self._pubsub: Any = None def __repr__(self) -> str: return ( f"RedisExampleDatabase({self.redis!r}, expire_after={self._expire_after!r})" ) def __eq__(self, other: object) -> bool: return ( isinstance(other, RedisExampleDatabase) and self.redis == other.redis and self._prefix == other._prefix and self.listener_channel == other.listener_channel ) @contextmanager def _pipeline( self, *reset_expire_keys, execute_and_publish=True, event_type=None, to_publish=None, ): # Context manager to batch updates and expiry reset, reducing TCP roundtrips pipe = self.redis.pipeline() yield pipe for key in reset_expire_keys: pipe.expire(self._prefix + key, self._expire_after) if execute_and_publish: changed = pipe.execute() # pipe.execute returns the rows modified for each operation, which includes # the operations performed during the yield, followed by the n operations # from pipe.exire. Look at just the operations from during the yield. changed = changed[: -len(reset_expire_keys)] if any(count > 0 for count in changed): assert to_publish is not None assert event_type is not None self._publish((event_type, to_publish)) def _publish(self, event): event = (event[0], tuple(self._encode(v) for v in event[1])) self.redis.publish(self.listener_channel, json.dumps(event)) def _encode(self, value: bytes) -> str: return base64.b64encode(value).decode("ascii") def _decode(self, value: str) -> bytes: return base64.b64decode(value) def fetch(self, key: bytes) -> Iterable[bytes]: with self._pipeline(key, execute_and_publish=False) as pipe: pipe.smembers(self._prefix + key) yield from pipe.execute()[0] def save(self, key: bytes, value: bytes) -> None: with self._pipeline(key, event_type="save", to_publish=(key, value)) as pipe: pipe.sadd(self._prefix + key, value) def delete(self, key: bytes, value: bytes) -> None: with self._pipeline(key, event_type="delete", to_publish=(key, value)) as pipe: pipe.srem(self._prefix + key, value) def move(self, src: bytes, dest: bytes, value: bytes) -> None: if src == dest: self.save(dest, value) return with self._pipeline(src, dest, execute_and_publish=False) as pipe: pipe.srem(self._prefix + src, value) pipe.sadd(self._prefix + dest, value) changed = pipe.execute() if changed[0] > 0: # did the value set of the first key change? self._publish(("delete", (src, value))) if changed[1] > 0: # did the value set of the second key change? self._publish(("save", (dest, value))) def _handle_message(self, message: dict) -> None: # other message types include "subscribe" and "unsubscribe". these are # sent to the client, but not to the pubsub channel. assert message["type"] == "message" data = json.loads(message["data"]) event_type = data[0] self._broadcast_change( (event_type, tuple(self._decode(v) for v in data[1])) # type: ignore ) def _start_listening(self) -> None: self._pubsub = self.redis.pubsub() self._pubsub.subscribe(**{self.listener_channel: self._handle_message}) def _stop_listening(self) -> None: self._pubsub.unsubscribe() self._pubsub.close() self._pubsub = None
RedisExampleDatabase
python
gevent__gevent
src/greentest/3.11/test_ssl.py
{ "start": 45751, "end": 76291 }
class ____(unittest.TestCase): def test_constructor(self): for protocol in PROTOCOLS: if has_tls_protocol(protocol): with warnings_helper.check_warnings(): ctx = ssl.SSLContext(protocol) self.assertEqual(ctx.protocol, protocol) with warnings_helper.check_warnings(): ctx = ssl.SSLContext() self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS) self.assertRaises(ValueError, ssl.SSLContext, -1) self.assertRaises(ValueError, ssl.SSLContext, 42) def test_ciphers(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.set_ciphers("ALL") ctx.set_ciphers("DEFAULT") with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"): ctx.set_ciphers("^$:,;?*'dorothyx") @unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1, "Test applies only to Python default ciphers") def test_python_ciphers(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ciphers = ctx.get_ciphers() for suite in ciphers: name = suite['name'] self.assertNotIn("PSK", name) self.assertNotIn("SRP", name) self.assertNotIn("MD5", name) self.assertNotIn("RC4", name) self.assertNotIn("3DES", name) def test_get_ciphers(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.set_ciphers('AESGCM') names = set(d['name'] for d in ctx.get_ciphers()) expected = { 'AES128-GCM-SHA256', 'ECDHE-ECDSA-AES128-GCM-SHA256', 'ECDHE-RSA-AES128-GCM-SHA256', 'DHE-RSA-AES128-GCM-SHA256', 'AES256-GCM-SHA384', 'ECDHE-ECDSA-AES256-GCM-SHA384', 'ECDHE-RSA-AES256-GCM-SHA384', 'DHE-RSA-AES256-GCM-SHA384', } intersection = names.intersection(expected) self.assertGreaterEqual( len(intersection), 2, f"\ngot: {sorted(names)}\nexpected: {sorted(expected)}" ) def test_options(self): # Test default SSLContext options ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) # OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3) # SSLContext also enables these by default default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE | OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE | OP_ENABLE_MIDDLEBOX_COMPAT) self.assertEqual(default, ctx.options) # disallow TLSv1 with warnings_helper.check_warnings(): ctx.options |= ssl.OP_NO_TLSv1 self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options) # allow TLSv1 with warnings_helper.check_warnings(): ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1) self.assertEqual(default, ctx.options) # clear all options ctx.options = 0 # Ubuntu has OP_NO_SSLv3 forced on by default self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3) # invalid options with self.assertRaises(OverflowError): ctx.options = -1 with self.assertRaises(OverflowError): ctx.options = 2 ** 100 with self.assertRaises(TypeError): ctx.options = "abc" def test_verify_mode_protocol(self): with warnings_helper.check_warnings(): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) # Default value self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) ctx.verify_mode = ssl.CERT_OPTIONAL self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL) ctx.verify_mode = ssl.CERT_REQUIRED self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx.verify_mode = ssl.CERT_NONE self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) with self.assertRaises(TypeError): ctx.verify_mode = None with self.assertRaises(ValueError): ctx.verify_mode = 42 ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertFalse(ctx.check_hostname) ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertTrue(ctx.check_hostname) def test_hostname_checks_common_name(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertTrue(ctx.hostname_checks_common_name) if ssl.HAS_NEVER_CHECK_COMMON_NAME: ctx.hostname_checks_common_name = True self.assertTrue(ctx.hostname_checks_common_name) ctx.hostname_checks_common_name = False self.assertFalse(ctx.hostname_checks_common_name) ctx.hostname_checks_common_name = True self.assertTrue(ctx.hostname_checks_common_name) else: with self.assertRaises(AttributeError): ctx.hostname_checks_common_name = True @ignore_deprecation def test_min_max_version(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # OpenSSL default is MINIMUM_SUPPORTED, however some vendors like # Fedora override the setting to TLS 1.0. minimum_range = { # stock OpenSSL ssl.TLSVersion.MINIMUM_SUPPORTED, # Fedora 29 uses TLS 1.0 by default ssl.TLSVersion.TLSv1, # RHEL 8 uses TLS 1.2 by default ssl.TLSVersion.TLSv1_2 } maximum_range = { # stock OpenSSL ssl.TLSVersion.MAXIMUM_SUPPORTED, # Fedora 32 uses TLS 1.3 by default ssl.TLSVersion.TLSv1_3 } self.assertIn( ctx.minimum_version, minimum_range ) self.assertIn( ctx.maximum_version, maximum_range ) ctx.minimum_version = ssl.TLSVersion.TLSv1_1 ctx.maximum_version = ssl.TLSVersion.TLSv1_2 self.assertEqual( ctx.minimum_version, ssl.TLSVersion.TLSv1_1 ) self.assertEqual( ctx.maximum_version, ssl.TLSVersion.TLSv1_2 ) ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED ctx.maximum_version = ssl.TLSVersion.TLSv1 self.assertEqual( ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED ) self.assertEqual( ctx.maximum_version, ssl.TLSVersion.TLSv1 ) ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED self.assertEqual( ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED ) ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED self.assertIn( ctx.maximum_version, {ssl.TLSVersion.TLSv1, ssl.TLSVersion.TLSv1_1, ssl.TLSVersion.SSLv3} ) ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED self.assertIn( ctx.minimum_version, {ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3} ) with self.assertRaises(ValueError): ctx.minimum_version = 42 if has_tls_protocol(ssl.PROTOCOL_TLSv1_1): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1) self.assertIn( ctx.minimum_version, minimum_range ) self.assertEqual( ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED ) with self.assertRaises(ValueError): ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED with self.assertRaises(ValueError): ctx.maximum_version = ssl.TLSVersion.TLSv1 @unittest.skipUnless( hasattr(ssl.SSLContext, 'security_level'), "requires OpenSSL >= 1.1.0" ) def test_security_level(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) # The default security callback allows for levels between 0-5 # with OpenSSL defaulting to 1, however some vendors override the # default value (e.g. Debian defaults to 2) security_level_range = { 0, 1, # OpenSSL default 2, # Debian 3, 4, 5, } self.assertIn(ctx.security_level, security_level_range) def test_verify_flags(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # default value tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0) self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf) ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF) ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN) ctx.verify_flags = ssl.VERIFY_DEFAULT self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT) ctx.verify_flags = ssl.VERIFY_ALLOW_PROXY_CERTS self.assertEqual(ctx.verify_flags, ssl.VERIFY_ALLOW_PROXY_CERTS) # supports any value ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT) with self.assertRaises(TypeError): ctx.verify_flags = None def test_load_cert_chain(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # Combined key and cert in a single file ctx.load_cert_chain(CERTFILE, keyfile=None) ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE) self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE) with self.assertRaises(OSError) as cm: ctx.load_cert_chain(NONEXISTINGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(BADCERT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(EMPTYCERT) # Separate key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ctx.load_cert_chain(ONLYCERT, ONLYKEY) ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY) ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(ONLYCERT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(ONLYKEY) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT) # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"): ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) ctx.load_cert_chain(CERTFILE_PROTECTED, password=bytearray(KEY_PASSWORD.encode())) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode()) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, bytearray(KEY_PASSWORD.encode())) with self.assertRaisesRegex(TypeError, "should be a string"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=True) with self.assertRaises(ssl.SSLError): ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass") with self.assertRaisesRegex(ValueError, "cannot be longer"): # openssl has a fixed limit on the password buffer. # PEM_BUFSIZE is generally set to 1kb. # Return a string larger than this. ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400) # Password callback def getpass_unicode(): return KEY_PASSWORD def getpass_bytes(): return KEY_PASSWORD.encode() def getpass_bytearray(): return bytearray(KEY_PASSWORD.encode()) def getpass_badpass(): return "badpass" def getpass_huge(): return b'a' * (1024 * 1024) def getpass_bad_type(): return 9 def getpass_exception(): raise Exception('getpass error') class GetPassCallable: def __call__(self): return KEY_PASSWORD def getpass(self): return KEY_PASSWORD ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode) ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes) ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray) ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable()) ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable().getpass) with self.assertRaises(ssl.SSLError): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass) with self.assertRaisesRegex(ValueError, "cannot be longer"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge) with self.assertRaisesRegex(TypeError, "must return a string"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type) with self.assertRaisesRegex(Exception, "getpass error"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception) # Make sure the password function isn't called if it isn't needed ctx.load_cert_chain(CERTFILE, password=getpass_exception) def test_load_verify_locations(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ctx.load_verify_locations(CERTFILE) ctx.load_verify_locations(cafile=CERTFILE, capath=None) ctx.load_verify_locations(BYTES_CERTFILE) ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None) self.assertRaises(TypeError, ctx.load_verify_locations) self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None) with self.assertRaises(OSError) as cm: ctx.load_verify_locations(NONEXISTINGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaisesRegex(ssl.SSLError, "PEM lib"): ctx.load_verify_locations(BADCERT) ctx.load_verify_locations(CERTFILE, CAPATH) ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH) # Issue #10989: crash if the second argument type is invalid self.assertRaises(TypeError, ctx.load_verify_locations, None, True) def test_load_verify_cadata(self): # test cadata with open(CAFILE_CACERT) as f: cacert_pem = f.read() cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem) with open(CAFILE_NEURONIO) as f: neuronio_pem = f.read() neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem) # test PEM ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0) ctx.load_verify_locations(cadata=cacert_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1) ctx.load_verify_locations(cadata=neuronio_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # cert already in hash table ctx.load_verify_locations(cadata=neuronio_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # combined ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) combined = "\n".join((cacert_pem, neuronio_pem)) ctx.load_verify_locations(cadata=combined) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # with junk around the certs ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) combined = ["head", cacert_pem, "other", neuronio_pem, "again", neuronio_pem, "tail"] ctx.load_verify_locations(cadata="\n".join(combined)) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # test DER ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_verify_locations(cadata=cacert_der) ctx.load_verify_locations(cadata=neuronio_der) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # cert already in hash table ctx.load_verify_locations(cadata=cacert_der) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # combined ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) combined = b"".join((cacert_der, neuronio_der)) ctx.load_verify_locations(cadata=combined) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # error cases ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object) with self.assertRaisesRegex( ssl.SSLError, "no start line: cadata does not contain a certificate" ): ctx.load_verify_locations(cadata="broken") with self.assertRaisesRegex( ssl.SSLError, "not enough data: cadata does not contain a certificate" ): ctx.load_verify_locations(cadata=b"broken") @unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows") def test_load_dh_params(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ctx.load_dh_params(DHFILE) if os.name != 'nt': ctx.load_dh_params(BYTES_DHFILE) self.assertRaises(TypeError, ctx.load_dh_params) self.assertRaises(TypeError, ctx.load_dh_params, None) with self.assertRaises(FileNotFoundError) as cm: ctx.load_dh_params(NONEXISTINGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(ssl.SSLError) as cm: ctx.load_dh_params(CERTFILE) def test_session_stats(self): for proto in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}: ctx = ssl.SSLContext(proto) self.assertEqual(ctx.session_stats(), { 'number': 0, 'connect': 0, 'connect_good': 0, 'connect_renegotiate': 0, 'accept': 0, 'accept_good': 0, 'accept_renegotiate': 0, 'hits': 0, 'misses': 0, 'timeouts': 0, 'cache_full': 0, }) def test_set_default_verify_paths(self): # There's not much we can do to test that it acts as expected, # so just check it doesn't crash or raise an exception. ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.set_default_verify_paths() @unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build") def test_set_ecdh_curve(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ctx.set_ecdh_curve("prime256v1") ctx.set_ecdh_curve(b"prime256v1") self.assertRaises(TypeError, ctx.set_ecdh_curve) self.assertRaises(TypeError, ctx.set_ecdh_curve, None) self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo") self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo") def test_sni_callback(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # set_servername_callback expects a callable, or None self.assertRaises(TypeError, ctx.set_servername_callback) self.assertRaises(TypeError, ctx.set_servername_callback, 4) self.assertRaises(TypeError, ctx.set_servername_callback, "") self.assertRaises(TypeError, ctx.set_servername_callback, ctx) def dummycallback(sock, servername, ctx): pass ctx.set_servername_callback(None) ctx.set_servername_callback(dummycallback) def test_sni_callback_refcycle(self): # Reference cycles through the servername callback are detected # and cleared. ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) def dummycallback(sock, servername, ctx, cycle=ctx): pass ctx.set_servername_callback(dummycallback) wr = weakref.ref(ctx) del ctx, dummycallback gc.collect() self.assertIs(wr(), None) def test_cert_store_stats(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 0}) ctx.load_cert_chain(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 0}) ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) def test_get_ca_certs(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.get_ca_certs(), []) # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) # but CAFILE_CACERT is a CA cert ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), (('commonName', 'CA Cert Signing Authority'),), (('emailAddress', 'support@cacert.org'),)), 'notAfter': 'Mar 29 12:29:49 2033 GMT', 'notBefore': 'Mar 30 12:29:49 2003 GMT', 'serialNumber': '00', 'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',), 'subject': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), (('commonName', 'CA Cert Signing Authority'),), (('emailAddress', 'support@cacert.org'),)), 'version': 3}]) with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) def test_load_default_certs(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_default_certs() ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_default_certs(ssl.Purpose.SERVER_AUTH) ctx.load_default_certs() ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH) ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertRaises(TypeError, ctx.load_default_certs, None) self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH') @unittest.skipIf(sys.platform == "win32", "not-Windows specific") def test_load_default_certs_env(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) with os_helper.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE ctx.load_default_certs() self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0}) @unittest.skipUnless(sys.platform == "win32", "Windows specific") @unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs") def test_load_default_certs_env_windows(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_default_certs() stats = ctx.cert_store_stats() ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) with os_helper.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE ctx.load_default_certs() stats["x509"] += 1 self.assertEqual(ctx.cert_store_stats(), stats) def _assert_context_options(self, ctx): self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) if OP_NO_COMPRESSION != 0: self.assertEqual(ctx.options & OP_NO_COMPRESSION, OP_NO_COMPRESSION) if OP_SINGLE_DH_USE != 0: self.assertEqual(ctx.options & OP_SINGLE_DH_USE, OP_SINGLE_DH_USE) if OP_SINGLE_ECDH_USE != 0: self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE, OP_SINGLE_ECDH_USE) if OP_CIPHER_SERVER_PREFERENCE != 0: self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE, OP_CIPHER_SERVER_PREFERENCE) def test_create_default_context(self): ctx = ssl.create_default_context() self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertTrue(ctx.check_hostname) self._assert_context_options(ctx) with open(SIGNING_CA) as f: cadata = f.read() ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH, cadata=cadata) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self._assert_context_options(ctx) ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self._assert_context_options(ctx) def test__create_stdlib_context(self): ctx = ssl._create_stdlib_context() self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertFalse(ctx.check_hostname) self._assert_context_options(ctx) if has_tls_protocol(ssl.PROTOCOL_TLSv1): with warnings_helper.check_warnings(): ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self._assert_context_options(ctx) with warnings_helper.check_warnings(): ctx = ssl._create_stdlib_context( ssl.PROTOCOL_TLSv1_2, cert_reqs=ssl.CERT_REQUIRED, check_hostname=True ) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1_2) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertTrue(ctx.check_hostname) self._assert_context_options(ctx) ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self._assert_context_options(ctx) def test_check_hostname(self): with warnings_helper.check_warnings(): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS) self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) # Auto set CERT_REQUIRED ctx.check_hostname = True self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_REQUIRED self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) # Changing verify_mode does not affect check_hostname ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE ctx.check_hostname = False self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) # Auto set ctx.check_hostname = True self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_OPTIONAL ctx.check_hostname = False self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL) # keep CERT_OPTIONAL ctx.check_hostname = True self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL) # Cannot set CERT_NONE with check_hostname enabled with self.assertRaises(ValueError): ctx.verify_mode = ssl.CERT_NONE ctx.check_hostname = False self.assertFalse(ctx.check_hostname) ctx.verify_mode = ssl.CERT_NONE self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) def test_context_client_server(self): # PROTOCOL_TLS_CLIENT has sane defaults ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) # PROTOCOL_TLS_SERVER has different but also sane defaults ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) def test_context_custom_class(self): class MySSLSocket(ssl.SSLSocket): pass class MySSLObject(ssl.SSLObject): pass ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) ctx.sslsocket_class = MySSLSocket ctx.sslobject_class = MySSLObject with ctx.wrap_socket(socket.socket(), server_side=True) as sock: self.assertIsInstance(sock, MySSLSocket) obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(), server_side=True) self.assertIsInstance(obj, MySSLObject) def test_num_tickest(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) self.assertEqual(ctx.num_tickets, 2) ctx.num_tickets = 1 self.assertEqual(ctx.num_tickets, 1) ctx.num_tickets = 0 self.assertEqual(ctx.num_tickets, 0) with self.assertRaises(ValueError): ctx.num_tickets = -1 with self.assertRaises(TypeError): ctx.num_tickets = None ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) self.assertEqual(ctx.num_tickets, 2) with self.assertRaises(ValueError): ctx.num_tickets = 1
ContextTests
python
huggingface__transformers
src/transformers/convert_slow_tokenizer.py
{ "start": 27023, "end": 27971 }
class ____(SpmConverter): def vocab(self, proto): vocab = [ ("<s>NOTUSED", 0.0), ("<pad>", 0.0), ("</s>NOTUSED", 0.0), ("<unk>", 0.0), ("<unk>NOTUSED", -100), ] # We down-grade the original SentencePiece by -100 to avoid using it and use our added token instead vocab += [(piece.piece, piece.score) for piece in proto.pieces[1:]] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): # See vocab unk position return 3 def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], )
CamembertConverter
python
walkccc__LeetCode
solutions/2944. Minimum Number of Coins for Fruits/2944-3.py
{ "start": 0, "end": 519 }
class ____: def minimumCoins(self, prices: list[int]) -> int: n = len(prices) ans = math.inf # Stores (dp[i], i), where dp[i] := the minimum number of coins to acquire # fruits[i:] (0-indexed) in ascending order. minQ = collections.deque([(0, n)]) for i in range(n - 1, -1, -1): while minQ and minQ[0][1] > (i + 1) * 2: minQ.popleft() ans = prices[i] + minQ[0][0] while minQ and minQ[-1][0] >= ans: minQ.pop() minQ.append((ans, i)) return ans
Solution
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-couchbase/llama_index/vector_stores/couchbase/base.py
{ "start": 17467, "end": 24922 }
class ____(CouchbaseVectorStoreBase): """ Couchbase Vector Store using Search Vector Indexes (FTS-based). This implementation uses Couchbase's Search Vector Indexes, which combine Full-Text Search (FTS) with vector search capabilities. Ideal for hybrid searches combining vector similarity, full-text search, and geospatial queries. Supports datasets up to tens of millions of documents. Requires Couchbase Server 7.6 or later. To use, you should have the ``couchbase`` python package installed. For more information, see: https://docs.couchbase.com/server/current/vector-index/use-vector-indexes.html """ _index_name: str = PrivateAttr() _scoped_index: bool = PrivateAttr() def __init__( self, cluster: Any, bucket_name: str, scope_name: str, collection_name: str, index_name: str, text_key: Optional[str] = "text", embedding_key: Optional[str] = "embedding", metadata_key: Optional[str] = "metadata", scoped_index: bool = True, query_options: Optional[QueryOptions] = None, ) -> None: """ Initializes a connection to a Couchbase Vector Store using FTS. Args: cluster (Cluster): Couchbase cluster object with active connection. bucket_name (str): Name of bucket to store documents in. scope_name (str): Name of scope in the bucket to store documents in. collection_name (str): Name of collection in the scope to store documents in. index_name (str): Name of the Search index. text_key (Optional[str], optional): The field for the document text. Defaults to "text". embedding_key (Optional[str], optional): The field for the document embedding. Defaults to "embedding". metadata_key (Optional[str], optional): The field for the document metadata. Defaults to "metadata". scoped_index (Optional[bool]): specify whether the index is a scoped index. Set to True by default. query_options (Optional[QueryOptions]): Query options for SQL++ queries. Defaults to None. Returns: None """ super().__init__( cluster=cluster, bucket_name=bucket_name, scope_name=scope_name, collection_name=collection_name, text_key=text_key, embedding_key=embedding_key, metadata_key=metadata_key, query_options=query_options, ) if not index_name: raise ValueError("index_name must be provided.") self._index_name = index_name self._scoped_index = scoped_index # Check if the index exists. Throws ValueError if it doesn't try: self._check_index_exists() except Exception as e: raise def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult: """ Executes a query in the vector store and returns the result. Args: query (VectorStoreQuery): The query object containing the search parameters. **kwargs (Any): Additional keyword arguments. cb_search_options (Dict): Search options to pass to Couchbase Search Returns: VectorStoreQueryResult: The result of the query containing the top-k nodes, similarities, and ids. """ fields = query.output_fields if not fields: fields = ["*"] # Document text field needs to be returned from the search if self._text_key not in fields and fields != ["*"]: fields.append(self._text_key) logger.debug("Output Fields: ", fields) k = query.similarity_top_k # Get the search options search_options = kwargs.get("cb_search_options", {}) if search_options and query.filters: raise ValueError("Cannot use both filters and cb_search_options") elif query.filters: couchbase_options = _to_couchbase_filter(query.filters) logger.debug(f"Filters transformed to Couchbase: {couchbase_options}") search_options = couchbase_options logger.debug(f"Filters: {search_options}") # Create Search Request search_req = search.SearchRequest.create( VectorSearch.from_vector_query( VectorQuery( self._embedding_key, query.query_embedding, k, ) ) ) try: logger.debug("Querying Couchbase") if self._scoped_index: search_iter = self._scope.search( self._index_name, search_req, SearchOptions(limit=k, fields=fields, raw=search_options), ) else: search_iter = self._cluster.search( self._index_name, search_req, SearchOptions(limit=k, fields=fields, raw=search_options), ) except Exception as e: logger.debug(f"Search failed with error {e}") raise ValueError(f"Search failed with error: {e}") top_k_nodes = [] top_k_scores = [] top_k_ids = [] # Parse the results for result in search_iter.rows(): text = result.fields.pop(self._text_key, "") score = result.score # Format the metadata into a dictionary metadata_dict = self._format_metadata(result.fields) id = result.id try: node = metadata_dict_to_node(metadata_dict, text) except Exception: # Deprecated legacy logic for backwards compatibility node = TextNode( text=text, id_=id, score=score, metadata=metadata_dict, ) top_k_nodes.append(node) top_k_scores.append(score) top_k_ids.append(id) return VectorStoreQueryResult( nodes=top_k_nodes, similarities=top_k_scores, ids=top_k_ids ) def _check_index_exists(self) -> bool: """ Check if the Search index exists in the linked Couchbase cluster Returns: bool: True if the index exists, False otherwise. Raises a ValueError if the index does not exist. """ if self._scoped_index: all_indexes = [ index.name for index in self._scope.search_indexes().get_all_indexes() ] if self._index_name not in all_indexes: raise ValueError( f"Index {self._index_name} does not exist. " " Please create the index before searching." ) else: all_indexes = [ index.name for index in self._cluster.search_indexes().get_all_indexes() ] if self._index_name not in all_indexes: raise ValueError( f"Index {self._index_name} does not exist. " " Please create the index before searching." ) return True
CouchbaseSearchVectorStore
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/scalarstring.py
{ "start": 2624, "end": 4286 }
class ____(ScalarString): __slots__ = () style = '' def __new__(cls, value, anchor=None): # type: (Text, Any) -> Any return ScalarString.__new__(cls, value, anchor=anchor) def preserve_literal(s): # type: (Text) -> Text return LiteralScalarString(s.replace('\r\n', '\n').replace('\r', '\n')) def walk_tree(base, map=None): # type: (Any, Any) -> None """ the routine here walks over a simple yaml tree (recursing in dict values and list items) and converts strings that have multiple lines to literal scalars You can also provide an explicit (ordered) mapping for multiple transforms (first of which is executed): map = spack.vendor.ruamel.yaml.compat.ordereddict map['\n'] = preserve_literal map[':'] = SingleQuotedScalarString walk_tree(data, map=map) """ from collections.abc import MutableMapping, MutableSequence if map is None: map = {'\n': preserve_literal} if isinstance(base, MutableMapping): for k in base: v = base[k] # type: Text if isinstance(v, str): for ch in map: if ch in v: base[k] = map[ch](v) break else: walk_tree(v, map=map) elif isinstance(base, MutableSequence): for idx, elem in enumerate(base): if isinstance(elem, str): for ch in map: if ch in elem: base[idx] = map[ch](elem) break else: walk_tree(elem, map=map)
PlainScalarString
python
huggingface__transformers
src/transformers/models/sam2/modeling_sam2.py
{ "start": 15178, "end": 18858 }
class ____(nn.Module): def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, activation: str = "relu", sigmoid_output: bool = False, ): super().__init__() self.num_layers = num_layers self.activation = ACT2FN[activation] self.proj_in = nn.Linear(input_dim, hidden_dim) self.proj_out = nn.Linear(hidden_dim, output_dim) self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)]) self.sigmoid_output = sigmoid_output def forward(self, hidden_states): hidden_states = self.proj_in(hidden_states) hidden_states = self.activation(hidden_states) for layer in self.layers: hidden_states = self.activation(layer(hidden_states)) hidden_states = self.proj_out(hidden_states) if self.sigmoid_output: hidden_states = F.sigmoid(hidden_states) return hidden_states def window_partition(hidden_state, window_size): """ Partition into non-overlapping windows with padding if needed. Args: hidden_state (`torch.Tensor`): Input tokens with [batch_size, height, width, num_channels]. window_size (`int`): Window size. Returns: `tuple(torch.FloatTensor)` comprising various elements: - windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels]. - (padded_height, padded_width): padded height and width before partition """ batch_size, height, width, num_channels = hidden_state.shape pad_height = (window_size - height % window_size) % window_size pad_width = (window_size - width % window_size) % window_size # Noop in case pad_width == 0 and pad_height == 0. hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height)) padded_height, padded_width = height + pad_height, width + pad_width hidden_state = hidden_state.view( batch_size, padded_height // window_size, window_size, padded_width // window_size, window_size, num_channels ) windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows, (padded_height, padded_width) def window_unpartition(windows, window_size, pad_height_width, height_width): """ Window unpartition into original sequences and removing padding. Args: windows (`torch.Tensor`): Input tokens with [batch_size * num_windows, window_size, window_size, num_channels]. window_size (`int`): Window size. pad_height_width (`tuple[int]`): Padded height and width (padded_height, padded_width). height_width (`tuple[int]`): Original height and width before padding. Returns: hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels]. """ padded_height, padded_width = pad_height_width height, width = height_width batch_size = windows.shape[0] // (padded_height * padded_width // window_size // window_size) hidden_state = windows.view( batch_size, padded_height // window_size, padded_width // window_size, window_size, window_size, -1 ) hidden_state = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous() hidden_state = hidden_state.view(batch_size, padded_height, padded_width, -1) # We always have height <= padded_height and width <= padded_width hidden_state = hidden_state[:, :height, :width, :].contiguous() return hidden_state
Sam2FeedForward
python
getsentry__sentry
tests/sentry/lang/javascript/test_sourcemaps.py
{ "start": 1785, "end": 3220 }
class ____(TestCase): def test_simple(self) -> None: smap_view = SourceMapView.from_json_bytes(sourcemap) result = smap_view.lookup(0, 56) assert result == SourceMapTokenMatch( dst_line=0, dst_col=50, src="foo/file2.js", src_line=0, src_col=9, src_id=1, name="multiply", ) # Start of minified file (exact match first line/col tuple) result = smap_view.lookup(0, 0) assert result == SourceMapTokenMatch( dst_line=0, dst_col=0, src="foo/file1.js", src_line=0, src_col=0, src_id=0, name=None ) # Last character in mapping result = smap_view.lookup(0, 36) assert result == SourceMapTokenMatch( dst_line=0, dst_col=30, src="foo/file1.js", src_line=2, src_col=1, src_id=0, name=None ) # First character in mapping (exact match line/col tuple) result = smap_view.lookup(0, 37) assert result == SourceMapTokenMatch( dst_line=0, dst_col=37, src="foo/file1.js", src_line=2, src_col=8, src_id=0, name="a" ) # End of minified file (character *beyond* last line/col tuple) result = smap_view.lookup(0, 192) assert result == SourceMapTokenMatch( dst_line=0, dst_col=191, src="foo/file2.js", src_line=9, src_col=25, src_id=1, name="e" )
FindSourceTest
python
numpy__numpy
numpy/f2py/tests/test_assumed_shape.py
{ "start": 63, "end": 847 }
class ____(util.F2PyTest): sources = [ util.getpath("tests", "src", "assumed_shape", "foo_free.f90"), util.getpath("tests", "src", "assumed_shape", "foo_use.f90"), util.getpath("tests", "src", "assumed_shape", "precision.f90"), util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"), util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"), ] @pytest.mark.slow def test_all(self): r = self.module.fsum([1, 2]) assert r == 3 r = self.module.sum([1, 2]) assert r == 3 r = self.module.sum_with_use([1, 2]) assert r == 3 r = self.module.mod.sum([1, 2]) assert r == 3 r = self.module.mod.fsum([1, 2]) assert r == 3
TestAssumedShapeSumExample
python
streamlit__streamlit
lib/tests/streamlit/web/server/media_file_handler_test.py
{ "start": 1126, "end": 5299 }
class ____(tornado.testing.AsyncHTTPTestCase): def setUp(self) -> None: super().setUp() # Create a new MediaFileManager and assign its storage to # MediaFileHandler. storage = MemoryMediaFileStorage(MOCK_ENDPOINT) self.media_file_manager = MediaFileManager(storage) MediaFileHandler.initialize_storage(storage) def get_app(self) -> tornado.web.Application: return tornado.web.Application( [(f"{MOCK_ENDPOINT}/(.*)", MediaFileHandler, {"path": ""})] ) @mock.patch( "streamlit.runtime.media_file_manager._get_session_id", MagicMock(return_value="mock_session_id"), ) def test_media_file(self) -> None: """Requests for media files in MediaFileManager should succeed.""" # Add a media file and read it back url = self.media_file_manager.add(b"mock_data", "video/mp4", "mock_coords") rsp = self.fetch(url, method="GET") assert rsp.code == 200 assert rsp.body == b"mock_data" assert rsp.headers["Content-Type"] == "video/mp4" assert rsp.headers["Content-Length"] == str(len(b"mock_data")) assert rsp.headers["Access-Control-Allow-Origin"] == "*" @mock.patch( "streamlit.runtime.media_file_manager._get_session_id", MagicMock(return_value="mock_session_id"), ) @mock.patch( "streamlit.web.server.media_file_handler.allow_all_cross_origin_requests", mock.MagicMock(return_value=False), ) @patch_config_options({"server.corsAllowedOrigins": ["http://example.com"]}) def test_media_file_allowed_origin(self) -> None: """Requests for media files in MediaFileManager should succeed with allowlisted origin.""" # Add a media file and read it back url = self.media_file_manager.add(b"mock_data", "video/mp4", "mock_coords") rsp = self.fetch(url, method="GET", headers={"Origin": "http://example.com"}) assert rsp.code == 200 assert rsp.body == b"mock_data" assert rsp.headers["Content-Type"] == "video/mp4" assert rsp.headers["Content-Length"] == str(len(b"mock_data")) assert rsp.headers["Access-Control-Allow-Origin"] == "http://example.com" @parameterized.expand( [ ("MockVideo.mp4", "video/mp4", 'attachment; filename="MockVideo.mp4"'), ( b"\xe6\xbc\xa2\xe5\xad\x97.mp3".decode(), "video/mp4", "attachment; filename*=utf-8''%E6%BC%A2%E5%AD%97.mp3", ), (None, "text/plain", 'attachment; filename="streamlit_download.txt"'), (None, "video/mp4", 'attachment; filename="streamlit_download.mp4"'), ( None, "application/octet-stream", 'attachment; filename="streamlit_download.bin"', ), ] ) @mock.patch( "streamlit.runtime.media_file_manager._get_session_id", MagicMock(return_value="mock_session_id"), ) def test_downloadable_file( self, file_name, mimetype, content_disposition_header ) -> None: """Downloadable files get an additional 'Content-Disposition' header that includes their user-specified filename or generic filename if filename is not specified. """ # Add a downloadable file with an optional filename url = self.media_file_manager.add( b"mock_data", mimetype, "mock_coords", file_name=file_name, is_for_static_download=True, ) rsp = self.fetch(url, method="GET") assert rsp.code == 200 assert rsp.body == b"mock_data" assert rsp.headers["Content-Type"] == mimetype assert rsp.headers["Content-Length"] == str(len(b"mock_data")) assert rsp.headers["Content-Disposition"] == content_disposition_header def test_invalid_file(self) -> None: """Requests for invalid files fail with 404.""" url = f"{MOCK_ENDPOINT}/invalid_media_file.mp4" rsp = self.fetch(url, method="GET") assert rsp.code == 404
MediaFileHandlerTest
python
sympy__sympy
sympy/functions/combinatorial/numbers.py
{ "start": 42496, "end": 47263 }
class ____(DefinedFunction): r""" Catalan numbers The `n^{th}` catalan number is given by: .. math :: C_n = \frac{1}{n+1} \binom{2n}{n} * ``catalan(n)`` gives the `n^{th}` Catalan number, `C_n` Examples ======== >>> from sympy import (Symbol, binomial, gamma, hyper, ... catalan, diff, combsimp, Rational, I) >>> [catalan(i) for i in range(1,10)] [1, 2, 5, 14, 42, 132, 429, 1430, 4862] >>> n = Symbol("n", integer=True) >>> catalan(n) catalan(n) Catalan numbers can be transformed into several other, identical expressions involving other mathematical functions >>> catalan(n).rewrite(binomial) binomial(2*n, n)/(n + 1) >>> catalan(n).rewrite(gamma) 4**n*gamma(n + 1/2)/(sqrt(pi)*gamma(n + 2)) >>> catalan(n).rewrite(hyper) hyper((-n, 1 - n), (2,), 1) For some non-integer values of n we can get closed form expressions by rewriting in terms of gamma functions: >>> catalan(Rational(1, 2)).rewrite(gamma) 8/(3*pi) We can differentiate the Catalan numbers C(n) interpreted as a continuous real function in n: >>> diff(catalan(n), n) (polygamma(0, n + 1/2) - polygamma(0, n + 2) + log(4))*catalan(n) As a more advanced example consider the following ratio between consecutive numbers: >>> combsimp((catalan(n + 1)/catalan(n)).rewrite(binomial)) 2*(2*n + 1)/(n + 2) The Catalan numbers can be generalized to complex numbers: >>> catalan(I).rewrite(gamma) 4**I*gamma(1/2 + I)/(sqrt(pi)*gamma(2 + I)) and evaluated with arbitrary precision: >>> catalan(I).evalf(20) 0.39764993382373624267 - 0.020884341620842555705*I See Also ======== andre, bell, bernoulli, euler, fibonacci, harmonic, lucas, genocchi, partition, tribonacci, sympy.functions.combinatorial.factorials.binomial References ========== .. [1] https://en.wikipedia.org/wiki/Catalan_number .. [2] https://mathworld.wolfram.com/CatalanNumber.html .. [3] https://functions.wolfram.com/GammaBetaErf/CatalanNumber/ .. [4] http://geometer.org/mathcircles/catalan.pdf """ @classmethod def eval(cls, n): from sympy.functions.special.gamma_functions import gamma if (n.is_Integer and n.is_nonnegative) or \ (n.is_noninteger and n.is_negative): return 4**n*gamma(n + S.Half)/(gamma(S.Half)*gamma(n + 2)) if (n.is_integer and n.is_negative): if (n + 1).is_negative: return S.Zero if (n + 1).is_zero: return Rational(-1, 2) def fdiff(self, argindex=1): from sympy.functions.elementary.exponential import log from sympy.functions.special.gamma_functions import polygamma n = self.args[0] return catalan(n)*(polygamma(0, n + S.Half) - polygamma(0, n + 2) + log(4)) def _eval_rewrite_as_binomial(self, n, **kwargs): return binomial(2*n, n)/(n + 1) def _eval_rewrite_as_factorial(self, n, **kwargs): return factorial(2*n) / (factorial(n+1) * factorial(n)) def _eval_rewrite_as_gamma(self, n, piecewise=True, **kwargs): from sympy.functions.special.gamma_functions import gamma # The gamma function allows to generalize Catalan numbers to complex n return 4**n*gamma(n + S.Half)/(gamma(S.Half)*gamma(n + 2)) def _eval_rewrite_as_hyper(self, n, **kwargs): from sympy.functions.special.hyper import hyper return hyper([1 - n, -n], [2], 1) def _eval_rewrite_as_Product(self, n, **kwargs): from sympy.concrete.products import Product if not (n.is_integer and n.is_nonnegative): return self k = Dummy('k', integer=True, positive=True) return Product((n + k) / k, (k, 2, n)) def _eval_is_integer(self): if self.args[0].is_integer and self.args[0].is_nonnegative: return True def _eval_is_positive(self): if self.args[0].is_nonnegative: return True def _eval_is_composite(self): if self.args[0].is_integer and (self.args[0] - 3).is_positive: return True def _eval_evalf(self, prec): from sympy.functions.special.gamma_functions import gamma if self.args[0].is_number: return self.rewrite(gamma)._eval_evalf(prec) #----------------------------------------------------------------------------# # # # Genocchi numbers # # # #----------------------------------------------------------------------------#
catalan
python
kamyu104__LeetCode-Solutions
Python/integer-replacement.py
{ "start": 32, "end": 509 }
class ____(object): def integerReplacement(self, n): """ :type n: int :rtype: int """ result = 0 while n != 1: b = n & 3 if n == 3: n -= 1 elif b == 3: n += 1 elif b == 1: n -= 1 else: n /= 2 result += 1 return result # Time: O(logn) # Space: O(logn) # Recursive solution.
Solution
python
pydantic__pydantic
pydantic-core/tests/serializers/test_union.py
{ "start": 1899, "end": 3363 }
class ____: def __init__(self, c, d): self.c = c self.d = d @pytest.fixture(scope='module') def model_serializer() -> SchemaSerializer: return SchemaSerializer( core_schema.union_schema( [ core_schema.model_schema( ModelA, core_schema.model_fields_schema( { 'a': core_schema.model_field(core_schema.bytes_schema()), 'b': core_schema.model_field( core_schema.float_schema( serialization=core_schema.format_ser_schema('0.1f', when_used='unless-none') ) ), } ), ), core_schema.model_schema( ModelB, core_schema.model_fields_schema( { 'c': core_schema.model_field(core_schema.bytes_schema()), 'd': core_schema.model_field( core_schema.float_schema( serialization=core_schema.format_ser_schema('0.2f', when_used='unless-none') ) ), } ), ), ], ) )
ModelB
python
python__mypy
mypy/types.py
{ "start": 46188, "end": 47901 }
class ____(ProperType): """This type has no members. This type is the bottom type. With strict Optional checking, it is the only common subtype between all other types, which allows `meet` to be well defined. Without strict Optional checking, NoneType fills this role. In general, for any type T: join(UninhabitedType, T) = T meet(UninhabitedType, T) = UninhabitedType is_subtype(UninhabitedType, T) = True """ __slots__ = ("ambiguous",) ambiguous: bool # Is this a result of inference for a variable without constraints? def __init__(self, line: int = -1, column: int = -1) -> None: super().__init__(line, column) self.ambiguous = False def can_be_true_default(self) -> bool: return False def can_be_false_default(self) -> bool: return False def accept(self, visitor: TypeVisitor[T]) -> T: return visitor.visit_uninhabited_type(self) def __hash__(self) -> int: return hash((UninhabitedType, self.ambiguous)) def __eq__(self, other: object) -> bool: return isinstance(other, UninhabitedType) and other.ambiguous == self.ambiguous def serialize(self) -> JsonDict: return {".class": "UninhabitedType"} @classmethod def deserialize(cls, data: JsonDict) -> UninhabitedType: assert data[".class"] == "UninhabitedType" return UninhabitedType() def write(self, data: WriteBuffer) -> None: write_tag(data, UNINHABITED_TYPE) write_tag(data, END_TAG) @classmethod def read(cls, data: ReadBuffer) -> UninhabitedType: assert read_tag(data) == END_TAG return UninhabitedType()
UninhabitedType
python
gevent__gevent
src/gevent/tests/test__pool.py
{ "start": 4194, "end": 4307 }
class ____(object): def write(self, *_args): raise RuntimeError('Writing to the file failed')
FakeFile
python
sqlalchemy__sqlalchemy
test/orm/test_instrumentation.py
{ "start": 15400, "end": 19442 }
class ____(fixtures.MappedTest): """Seems basic, but not directly covered elsewhere!""" def test_compileonattr(self): t = Table( "t", MetaData(), Column("id", Integer, primary_key=True), Column("x", Integer), ) class A: pass self.mapper_registry.map_imperatively(A, t) a = A() assert a.id is None def test_compileonattr_rel(self): m = MetaData() t1 = Table( "t1", m, Column("id", Integer, primary_key=True), Column("x", Integer), ) t2 = Table( "t2", m, Column("id", Integer, primary_key=True), Column("t1_id", Integer, ForeignKey("t1.id")), ) class A: pass class B: pass self.mapper_registry.map_imperatively( A, t1, properties=dict(bs=relationship(B)) ) self.mapper_registry.map_imperatively(B, t2) a = A() assert not a.bs def test_uninstrument(self): class A: pass manager = instrumentation.register_class(A) attributes._register_attribute( A, "x", comparator=object(), parententity=object(), uselist=False, useobject=False, ) assert instrumentation.manager_of_class(A) is manager instrumentation.unregister_class(A) assert instrumentation.opt_manager_of_class(A) is None assert not hasattr(A, "x") with expect_raises_message( sa.orm.exc.UnmappedClassError, r"Can't locate an instrumentation manager for class .*A", ): instrumentation.manager_of_class(A) assert A.__init__ == object.__init__ def test_compileonattr_rel_backref_a(self): m = MetaData() t1 = Table( "t1", m, Column("id", Integer, primary_key=True), Column("x", Integer), ) t2 = Table( "t2", m, Column("id", Integer, primary_key=True), Column("t1_id", Integer, ForeignKey("t1.id")), ) class Base: def __init__(self, *args, **kwargs): pass for base in object, Base: class A(base): pass class B(base): pass self.mapper_registry.map_imperatively( A, t1, properties=dict(bs=relationship(B, backref="a")) ) self.mapper_registry.map_imperatively(B, t2) b = B() assert b.a is None a = A() b.a = a session = fixture_session() session.add(b) assert a in session, "base is %s" % base clear_mappers() def test_compileonattr_rel_backref_b(self): m = MetaData() t1 = Table( "t1", m, Column("id", Integer, primary_key=True), Column("x", Integer), ) t2 = Table( "t2", m, Column("id", Integer, primary_key=True), Column("t1_id", Integer, ForeignKey("t1.id")), ) class Base: def __init__(self): pass class Base_AKW: def __init__(self, *args, **kwargs): pass for base in object, Base, Base_AKW: class A(base): pass class B(base): pass self.mapper_registry.map_imperatively(A, t1) self.mapper_registry.map_imperatively( B, t2, properties=dict(a=relationship(A, backref="bs")) ) a = A() b = B() b.a = a session = fixture_session() session.add(a) assert b in session, "base: %s" % base clear_mappers()
MiscTest
python
numpy__numpy
numpy/_core/tests/test__exceptions.py
{ "start": 2209, "end": 2922 }
class ____: def test_attr(self, args): """Validate attribute types.""" exc = AxisError(*args) if len(args) == 1: assert exc.axis is None assert exc.ndim is None else: axis, ndim, *_ = args assert exc.axis == axis assert exc.ndim == ndim def test_pickling(self, args): """Test that `AxisError` can be pickled.""" exc = AxisError(*args) exc2 = pickle.loads(pickle.dumps(exc)) assert type(exc) is type(exc2) for name in ("axis", "ndim", "args"): attr1 = getattr(exc, name) attr2 = getattr(exc2, name) assert attr1 == attr2, name
TestAxisError
python
pennersr__django-allauth
allauth/socialaccount/providers/oauth2/views.py
{ "start": 1157, "end": 3238 }
class ____: expires_in_key = "expires_in" client_class = OAuth2Client supports_state = True redirect_uri_protocol: Optional[str] = None access_token_method = "POST" # nosec login_cancelled_error = "access_denied" scope_delimiter = " " basic_auth = False headers: Optional[Dict[str, str]] = None def __init__(self, request): self.request = request self.did_fetch_access_token = False def get_provider(self): return get_adapter(self.request).get_provider( self.request, provider=self.provider_id ) def complete_login(self, request, app, token: SocialToken, **kwargs): """ Returns a SocialLogin instance """ raise NotImplementedError def get_callback_url(self, request, app): callback_url = reverse(self.provider_id + "_callback") protocol = self.redirect_uri_protocol return build_absolute_uri(request, callback_url, protocol) def parse_token(self, data): token = SocialToken(token=data["access_token"]) token.token_secret = data.get("refresh_token", "") expires_in = data.get(self.expires_in_key, None) if expires_in: token.expires_at = timezone.now() + timedelta(seconds=int(expires_in)) return token def get_access_token_data(self, request, app, client, pkce_code_verifier=None): code = get_request_param(self.request, "code") data = client.get_access_token(code, pkce_code_verifier=pkce_code_verifier) self.did_fetch_access_token = True return data def get_client(self, request, app): callback_url = self.get_callback_url(request, app) client = self.client_class( self.request, app.client_id, app.secret, self.access_token_method, self.access_token_url, callback_url, scope_delimiter=self.scope_delimiter, headers=self.headers, basic_auth=self.basic_auth, ) return client
OAuth2Adapter
python
geekcomputers__Python
venv/Lib/site-packages/pip/_internal/utils/logging.py
{ "start": 4099, "end": 6441 }
class ____(RichHandler): KEYWORDS: ClassVar[Optional[List[str]]] = [] def __init__(self, stream: Optional[TextIO], no_color: bool) -> None: super().__init__( console=Console(file=stream, no_color=no_color, soft_wrap=True), show_time=False, show_level=False, show_path=False, highlighter=NullHighlighter(), ) # Our custom override on Rich's logger, to make things work as we need them to. def emit(self, record: logging.LogRecord) -> None: style: Optional[Style] = None # If we are given a diagnostic error to present, present it with indentation. if getattr(record, "rich", False): assert isinstance(record.args, tuple) (rich_renderable,) = record.args assert isinstance( rich_renderable, (ConsoleRenderable, RichCast, str) ), f"{rich_renderable} is not rich-console-renderable" renderable: RenderableType = IndentedRenderable( rich_renderable, indent=get_indentation() ) else: message = self.format(record) renderable = self.render_message(record, message) if record.levelno is not None: if record.levelno >= logging.ERROR: style = Style(color="red") elif record.levelno >= logging.WARNING: style = Style(color="yellow") try: self.console.print(renderable, overflow="ignore", crop=False, style=style) except Exception: self.handleError(record) def handleError(self, record: logging.LogRecord) -> None: """Called when logging is unable to log some output.""" exc_class, exc = sys.exc_info()[:2] # If a broken pipe occurred while calling write() or flush() on the # stdout stream in logging's Handler.emit(), then raise our special # exception so we can handle it in main() instead of logging the # broken pipe error and continuing. if ( exc_class and exc and self.console.file is sys.stdout and _is_broken_pipe_error(exc_class, exc) ): raise BrokenStdoutLoggingError() return super().handleError(record)
RichPipStreamHandler
python
walkccc__LeetCode
solutions/2193. Minimum Number of Moves to Make Palindrome/2193.py
{ "start": 0, "end": 413 }
class ____: def minMovesToMakePalindrome(self, s: str) -> int: ans = 0 chars = list(s) while len(chars) > 1: # Greedily match the last digit. i = chars.index(chars[-1]) if i == len(chars) - 1: # s[i] is the middle letter. ans += i // 2 else: chars.pop(i) ans += i # Swap the matched letter to the left. chars.pop() return ans
Solution
python
pandas-dev__pandas
pandas/tests/plotting/test_converter.py
{ "start": 1391, "end": 4141 }
class ____: @pytest.mark.single_cpu def test_dont_register_by_default(self): # Run in subprocess to ensure a clean state code = ( "import matplotlib.units; " "import pandas as pd; " "units = dict(matplotlib.units.registry); " "assert pd.Timestamp not in units" ) call = [sys.executable, "-c", code] assert subprocess.check_call(call) == 0 def test_registering_no_warning(self): s = Series(range(12), index=date_range("2017", periods=12)) _, ax = plt.subplots() # Set to the "warn" state, in case this isn't the first test run register_matplotlib_converters() ax.plot(s.index, s.values) def test_pandas_plots_register(self): s = Series(range(12), index=date_range("2017", periods=12)) # Set to the "warn" state, in case this isn't the first test run with tm.assert_produces_warning(None) as w: s.plot() assert len(w) == 0 def test_matplotlib_formatters(self): # Can't make any assertion about the start state. # We we check that toggling converters off removes it, and toggling it # on restores it. with cf.option_context("plotting.matplotlib.register_converters", True): with cf.option_context("plotting.matplotlib.register_converters", False): assert Timestamp not in units.registry assert Timestamp in units.registry def test_option_no_warning(self): s = Series(range(12), index=date_range("2017", periods=12)) _, ax = plt.subplots() # Test without registering first, no warning with cf.option_context("plotting.matplotlib.register_converters", False): ax.plot(s.index, s.values) # Now test with registering register_matplotlib_converters() with cf.option_context("plotting.matplotlib.register_converters", False): ax.plot(s.index, s.values) def test_registry_resets(self): # make a copy, to reset to original = dict(units.registry) try: # get to a known state units.registry.clear() date_converter = dates.DateConverter() units.registry[datetime] = date_converter units.registry[date] = date_converter register_matplotlib_converters() assert units.registry[date] is not date_converter deregister_matplotlib_converters() assert units.registry[date] is date_converter finally: # restore original stater units.registry.clear() for k, v in original.items(): units.registry[k] = v
TestRegistration
python
mkdocs__mkdocs
mkdocs/tests/config/config_options_legacy_tests.py
{ "start": 39007, "end": 42473 }
class ____(TestCase): def test_subconfig_wrong_type(self): # Test that an error is raised if subconfig does not receive a dict class Schema: option = c.SubConfig() for val in "not_a_dict", ("not_a_dict",), ["not_a_dict"]: with self.subTest(val): with self.expect_error( option=re.compile( r"The configuration is invalid. Expected a key-value mapping " r"\(dict\) but received: .+" ) ): self.get_config(Schema, {'option': val}) def test_subconfig_ignored(self): """Default behaviour of subconfig: validation is ignored.""" # Nominal class Schema1: option = c.SubConfig(('cc', c.Choice(('foo', 'bar')))) conf = self.get_config(Schema1, {'option': {'cc': 'foo'}}) self.assertEqual(conf, {'option': {'cc': 'foo'}}) # Invalid option: No error class Schema2: option = c.SubConfig(('cc', c.Choice(('foo', 'bar')))) conf = self.get_config(Schema2, {'option': {'cc': True}}) self.assertEqual(conf, {'option': {'cc': True}}) # Missing option: Will be considered optional with default None class Schema3: option = c.SubConfig(('cc', c.Choice(('foo', 'bar')))) conf = self.get_config(Schema3, {'option': {}}) self.assertEqual(conf, {'option': {'cc': None}}) # Unknown option: No warning class Schema4: option = c.SubConfig(('cc', c.Choice(('foo', 'bar')))) conf = self.get_config(Schema4, {'option': {'unknown_key_is_ok': 0}}) self.assertEqual(conf, {'option': {'cc': None, 'unknown_key_is_ok': 0}}) def test_subconfig_unknown_option(self): class Schema: option = c.SubConfig(validate=True) conf = self.get_config( Schema, {'option': {'unknown': 0}}, warnings=dict(option="Sub-option 'unknown': Unrecognised configuration name: unknown"), ) self.assertEqual(conf['option'], {"unknown": 0}) def test_subconfig_invalid_option(self): class Schema: option = c.SubConfig( ('cc', c.Choice(('foo', 'bar'))), validate=True, ) with self.expect_error( option="Sub-option 'cc': Expected one of: ('foo', 'bar') but received: True" ): self.get_config(Schema, {'option': {'cc': True}}) def test_subconfig_normal(self): class Schema: option = c.SubConfig( ('cc', c.Choice(('foo', 'bar'))), ) conf = self.get_config(Schema, {'option': {'cc': 'foo'}}) self.assertEqual(conf['option'], {'cc': 'foo'}) def test_config_file_path_pass_through(self): """Necessary to ensure FilesystemObject validates the correct path.""" passed_config_path = None class SubType(c.BaseConfigOption): def pre_validation(self, config, key_name): nonlocal passed_config_path passed_config_path = config.config_file_path class Schema: sub = c.SubConfig(('opt', SubType())) config_path = "foo/mkdocs.yaml" self.get_config(Schema, {"sub": {"opt": "bar"}}, config_file_path=config_path) self.assertEqual(passed_config_path, config_path)
SubConfigTest
python
python-attrs__attrs
tests/test_setattr.py
{ "start": 267, "end": 341 }
class ____: x = attr.ib(on_setattr=lambda *args: None)
WithOnSetAttrHook
python
doocs__leetcode
solution/0300-0399/0319.Bulb Switcher/Solution.py
{ "start": 0, "end": 85 }
class ____: def bulbSwitch(self, n: int) -> int: return int(sqrt(n))
Solution
python
PrefectHQ__prefect
src/prefect/server/services/pause_expirations.py
{ "start": 812, "end": 3320 }
class ____(LoopService): """ Fails flow runs that have been paused and never resumed """ @classmethod def service_settings(cls) -> ServicesBaseSetting: return get_current_settings().server.services.pause_expirations def __init__(self, loop_seconds: Optional[float] = None, **kwargs: Any): super().__init__( loop_seconds=loop_seconds or PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS.value(), **kwargs, ) # query for this many runs to mark failed at once self.batch_size = 200 @db_injector async def run_once(self, db: PrefectDBInterface) -> None: """ Mark flow runs as failed by: - Querying for flow runs in a Paused state that have timed out - For any runs past the "expiration" threshold, setting the flow run state to a new `Failed` state """ while True: async with db.session_context(begin_transaction=True) as session: query = ( sa.select(db.FlowRun) .where( db.FlowRun.state_type == states.StateType.PAUSED, ) .limit(self.batch_size) ) result = await session.execute(query) runs = result.scalars().all() # mark each run as failed for run in runs: await self._mark_flow_run_as_failed(session=session, flow_run=run) # if no runs were found, exit the loop if len(runs) < self.batch_size: break self.logger.info("Finished monitoring for late runs.") async def _mark_flow_run_as_failed( self, session: AsyncSession, flow_run: FlowRun ) -> None: """ Mark a flow run as failed. Pass-through method for overrides. """ if ( flow_run.state is not None and flow_run.state.state_details.pause_timeout is not None and flow_run.state.state_details.pause_timeout < now("UTC") ): await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=states.Failed(message="The flow was paused and never resumed."), force=True, ) if __name__ == "__main__": asyncio.run(FailExpiredPauses(handle_signals=True).start())
FailExpiredPauses
python
huggingface__transformers
tests/models/mt5/test_modeling_mt5.py
{ "start": 1516, "end": 20376 }
class ____: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size).clamp(2) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_pipeline_config(self): return MT5Config( vocab_size=166, # t5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def get_config(self): return MT5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_with_sequence_classification_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = MT5ForSequenceClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=input_ids, labels=labels, ) # self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config).get_decoder() model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5ForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = MT5Model(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [MT5Model, MT5ForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def check_resize_embeddings_t5_v1_1( self, config, ): prev_vocab_size = config.vocab_size config.tie_word_embeddings = False model = MT5ForConditionalGeneration(config=config).to(torch_device).eval() model.resize_token_embeddings(prev_vocab_size - 10) self.parent.assertEqual(model.get_input_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.get_output_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.config.vocab_size, prev_vocab_size - 10) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict @require_torch # Copied from tests.models.t5.test_modeling_t5.T5ModelTest with T5->MT5, google-t5/t5-small->google/mt5-small
MT5ModelTester
python
great-expectations__great_expectations
contrib/experimental/great_expectations_experimental/expectations/expect_column_distribution_to_match_benfords_law.py
{ "start": 6181, "end": 11932 }
class ____(ColumnAggregateExpectation): """Expect column distribution to match Benford's Law. Tests whether data matches Benford's Law Fraud Detection Algorithm. Uses a Chi-Square Goodness of Fit test with an 80@ p-value. """ # These examples will be shown in the public gallery, and also executed as unit tests for your Expectation examples = [ { "data": { "benford_distribution": [1, 1, 1, 1, 2, 2, 3, 4, 5, 6, 9], "non_benford_distribution": [1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9], }, "tests": [ { "title": "matching_distribution", "include_in_gallery": True, "exact_match_out": False, "in": {"column": "benford_distribution"}, "out": {"success": True, "observed_value": True}, }, { "title": "non_matching_distribution", "include_in_gallery": True, "exact_match_out": False, "in": {"column": "non_benford_distribution"}, "out": {"success": False, "observed_value": False}, }, ], }, ] # This dictionary contains metadata for display in the public gallery library_metadata = { "maturity": "experimental", # "experimental", "beta", or "production" "tags": ["experimental"], # Tags for this Expectation in the gallery "contributors": [ # Github handles for all contributors to this Expectation. "@shekark642", "@vinodkri1", # Don't forget to add your github handle here! ], } # Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\ metric_dependencies = ("column.custom.DistributionMatchesBenfordsLaw",) # success_keys = ("min_value", "strict_min", "max_value", "strict_max") success_keys = tuple() # Default values # default_kwarg_values = { # "min_value": None, # "max_value": None, # "strict_min": None, # "strict_max": None, # "result_format": "BASIC", # # "catch_exceptions": False, # } # @classmethod # @renderer(renderer_type="renderer.prescriptive") # @render_suite_parameter_string # def _prescriptive_renderer( # cls, # configuration=None, # result=None, # runtime_configuration=None, # **kwargs, # ): # runtime_configuration = runtime_configuration or {} # include_column_name = False if runtime_configuration.get("include_column_name") is False else True # styling = runtime_configuration.get("styling") # params = substitute_none_for_missing( # configuration.kwargs, # [ # "column", # "min_value", # "max_value", # "row_condition", # "condition_parser", # "strict_min", # "strict_max", # ], # ) # # if (params["min_value"] is None) and (params["max_value"] is None): # template_str = "median may have any numerical value." # else: # at_least_str, at_most_str = handle_strict_min_max(params) # if params["min_value"] is not None and params["max_value"] is not None: # template_str = f"median must be {at_least_str} $min_value and {at_most_str} $max_value." # elif params["min_value"] is None: # template_str = f"median must be {at_most_str} $max_value." # elif params["max_value"] is None: # template_str = f"median must be {at_least_str} $min_value." # # if include_column_name: # template_str = "$column " + template_str # # if params["row_condition"] is not None: # ( # conditional_template_str, # conditional_params, # ) = parse_row_condition_string_pandas_engine(params["row_condition"]) # template_str = conditional_template_str + ", then " + template_str # params.update(conditional_params) # # return [ # RenderedStringTemplateContent( # **{ # "content_block_type": "string_template", # "string_template": { # "template": template_str, # "params": params, # "styling": styling, # }, # } # ) # ] def _validate( self, metrics: Dict, runtime_configuration: dict = None, execution_engine: ExecutionEngine = None, ): # return self._validate_metric_value_between( # metric_name="column.custom.DistributionMatchesBenfordsLaw", # configuration=configuration, # metrics=metrics, # runtime_configuration=runtime_configuration, # execution_engine=execution_engine, # ) # return {"success": metrics.get("column.custom.DistributionMatchesBenfordsLaw"), "observed_value": metrics.get("column.custom.DistributionMatchesBenfordsLaw")} return { "success": metrics.get("column.custom.DistributionMatchesBenfordsLaw"), "result": { "observed_value": metrics.get("column.custom.DistributionMatchesBenfordsLaw") }, } if __name__ == "__main__": ExpectColumnDistributionToMatchBenfordsLaw().print_diagnostic_checklist()
ExpectColumnDistributionToMatchBenfordsLaw
python
huggingface__transformers
src/transformers/models/vit/modeling_vit.py
{ "start": 19269, "end": 24143 }
class ____(ViTPreTrainedModel): def __init__(self, config: ViTConfig): super().__init__(config) self.vit = ViTModel(config, add_pooling_layer=False, use_mask_token=True) self.decoder = nn.Sequential( nn.Conv2d( in_channels=config.hidden_size, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1, ), nn.PixelShuffle(config.encoder_stride), ) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> MaskedImageModelingOutput: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Examples: ```python >>> from transformers import AutoImageProcessor, ViTForMaskedImageModeling >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") >>> model = ViTForMaskedImageModeling.from_pretrained("google/vit-base-patch16-224-in21k") >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values >>> # create random boolean mask of shape (batch_size, num_patches) >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool() >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction >>> list(reconstructed_pixel_values.shape) [1, 3, 224, 224] ```""" if bool_masked_pos is not None and (self.config.patch_size != self.config.encoder_stride): raise ValueError( "When `bool_masked_pos` is provided, `patch_size` must be equal to `encoder_stride` to ensure that " "the reconstructed image has the same dimensions as the input. " f"Got `patch_size` = {self.config.patch_size} and `encoder_stride` = {self.config.encoder_stride}." ) outputs: BaseModelOutputWithPooling = self.vit( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs, ) sequence_output = outputs.last_hidden_state # Reshape to (batch_size, num_channels, height, width) sequence_output = sequence_output[:, 1:] batch_size, sequence_length, num_channels = sequence_output.shape height = width = math.floor(sequence_length**0.5) sequence_output = sequence_output.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) # Reconstruct pixel values reconstructed_pixel_values = self.decoder(sequence_output) masked_im_loss = None if bool_masked_pos is not None: size = self.config.image_size // self.config.patch_size bool_masked_pos = bool_masked_pos.reshape(-1, size, size) mask = ( bool_masked_pos.repeat_interleave(self.config.patch_size, 1) .repeat_interleave(self.config.patch_size, 2) .unsqueeze(1) .contiguous() ) reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none") masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels return MaskedImageModelingOutput( loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. <Tip> Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained position embeddings to the higher resolution. </Tip> """ )
ViTForMaskedImageModeling
python
plotly__plotly.py
plotly/graph_objs/scattermapbox/legendgrouptitle/_font.py
{ "start": 233, "end": 9957 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "scattermapbox.legendgrouptitle" _path_str = "scattermapbox.legendgrouptitle.font" _valid_props = { "color", "family", "lineposition", "shadow", "size", "style", "textcase", "variant", "weight", } @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. The 'family' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["family"] @family.setter def family(self, val): self["family"] = val @property def lineposition(self): """ Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. The 'lineposition' property is a flaglist and may be specified as a string containing: - Any combination of ['under', 'over', 'through'] joined with '+' characters (e.g. 'under+over') OR exactly one of ['none'] (e.g. 'none') Returns ------- Any """ return self["lineposition"] @lineposition.setter def lineposition(self, val): self["lineposition"] = val @property def shadow(self): """ Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow for additional options. The 'shadow' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["shadow"] @shadow.setter def shadow(self, val): self["shadow"] = val @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def style(self): """ Sets whether a font should be styled with a normal or italic face from its family. The 'style' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'italic'] Returns ------- Any """ return self["style"] @style.setter def style(self, val): self["style"] = val @property def textcase(self): """ Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. The 'textcase' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'word caps', 'upper', 'lower'] Returns ------- Any """ return self["textcase"] @textcase.setter def textcase(self, val): self["textcase"] = val @property def variant(self): """ Sets the variant of the font. The 'variant' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'small-caps', 'all-small-caps', 'all-petite-caps', 'petite-caps', 'unicase'] Returns ------- Any """ return self["variant"] @variant.setter def variant(self, val): self["variant"] = val @property def weight(self): """ Sets the weight (or boldness) of the font. The 'weight' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 1000] OR exactly one of ['normal', 'bold'] (e.g. 'bold') Returns ------- int """ return self["weight"] @weight.setter def weight(self, val): self["weight"] = val @property def _prop_descriptions(self): return """\ color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. """ def __init__( self, arg=None, color=None, family=None, lineposition=None, shadow=None, size=None, style=None, textcase=None, variant=None, weight=None, **kwargs, ): """ Construct a new Font object Sets this legend group's title font. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scattermapbox. legendgrouptitle.Font` color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. Returns ------- Font """ super().__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.scattermapbox.legendgrouptitle.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.scattermapbox.legendgrouptitle.Font`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("family", arg, family) self._set_property("lineposition", arg, lineposition) self._set_property("shadow", arg, shadow) self._set_property("size", arg, size) self._set_property("style", arg, style) self._set_property("textcase", arg, textcase) self._set_property("variant", arg, variant) self._set_property("weight", arg, weight) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Font
python
getsentry__sentry
src/sentry/seer/endpoints/organization_seer_explorer_update.py
{ "start": 688, "end": 838 }
class ____(OrganizationPermission): scope_map = { "POST": ["org:read"], } @region_silo_endpoint
OrganizationSeerExplorerUpdatePermission
python
apache__airflow
providers/mysql/src/airflow/providers/mysql/transfers/s3_to_mysql.py
{ "start": 1159, "end": 3938 }
class ____(BaseOperator): """ Loads a file from S3 into a MySQL table. :param s3_source_key: The path to the file (S3 key) that will be loaded into MySQL. :param mysql_table: The MySQL table into where the data will be sent. :param mysql_duplicate_key_handling: Specify what should happen to duplicate data. You can choose either `IGNORE` or `REPLACE`. .. seealso:: https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-duplicate-key-handling :param mysql_extra_options: MySQL options to specify exactly how to load the data. :param aws_conn_id: The S3 connection that contains the credentials to the S3 Bucket. :param mysql_conn_id: Reference to :ref:`mysql connection id <howto/connection:mysql>`. :param mysql_local_infile: flag to enable local_infile option on the MySQLHook. This loads MySQL directly using the LOAD DATA LOCAL INFILE command. Defaults to False. """ template_fields: Sequence[str] = ( "s3_source_key", "mysql_table", ) template_ext: Sequence[str] = () ui_color = "#f4a460" def __init__( self, *, s3_source_key: str, mysql_table: str, mysql_duplicate_key_handling: str = "IGNORE", mysql_extra_options: str | None = None, aws_conn_id: str | None = "aws_default", mysql_conn_id: str = "mysql_default", mysql_local_infile: bool = False, **kwargs, ) -> None: super().__init__(**kwargs) self.s3_source_key = s3_source_key self.mysql_table = mysql_table self.mysql_duplicate_key_handling = mysql_duplicate_key_handling self.mysql_extra_options = mysql_extra_options or "" self.aws_conn_id = aws_conn_id self.mysql_conn_id = mysql_conn_id self.mysql_local_infile = mysql_local_infile def execute(self, context: Context) -> None: """ Execute the transfer operation from S3 to MySQL. :param context: The context that is being provided when executing. """ self.log.info("Loading %s to MySql table %s...", self.s3_source_key, self.mysql_table) s3_hook = S3Hook(aws_conn_id=self.aws_conn_id) file = s3_hook.download_file(key=self.s3_source_key) try: mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id, local_infile=self.mysql_local_infile) mysql.bulk_load_custom( table=self.mysql_table, tmp_file=file, duplicate_key_handling=self.mysql_duplicate_key_handling, extra_options=self.mysql_extra_options, ) finally: # Remove file downloaded from s3 to be idempotent. os.remove(file)
S3ToMySqlOperator
python
plotly__plotly.py
plotly/graph_objs/_deprecations.py
{ "start": 14409, "end": 15227 }
class ____(dict): """ plotly.graph_objs.Stream is deprecated. Please replace it with one of the following more specific types - plotly.graph_objs.scatter.Stream - plotly.graph_objs.area.Stream """ def __init__(self, *args, **kwargs): """ plotly.graph_objs.Stream is deprecated. Please replace it with one of the following more specific types - plotly.graph_objs.scatter.Stream - plotly.graph_objs.area.Stream """ warnings.warn( """plotly.graph_objs.Stream is deprecated. Please replace it with one of the following more specific types - plotly.graph_objs.scatter.Stream - plotly.graph_objs.area.Stream """, DeprecationWarning, ) super().__init__(*args, **kwargs)
Stream
python
dagster-io__dagster
python_modules/dagster/dagster_tests/components_tests/integration_tests/integration_test_defs/definitions/other_local_component_sample/__init__.py
{ "start": 54, "end": 129 }
class ____(BaseModel): a_string: str an_int: int
MyNewComponentSchema
python
readthedocs__readthedocs.org
readthedocs/proxito/tests/test_old_redirects.py
{ "start": 5416, "end": 33792 }
class ____(MockStorageMixin, BaseDocServing): def test_forced_redirect(self): fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/install.html", to_url="/en/latest/tutorial/install.html", force=True, ) r = self.client.get( "/en/latest/install.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial/install.html", ) def test_disabled_redirect(self): redirect = fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/install.html", to_url="/en/latest/tutorial/install.html", enabled=True, ) url = "/en/latest/install.html" r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"}) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial/install.html", ) redirect.enabled = False redirect.save() r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"}) self.assertEqual(r.status_code, 404) def test_redirect_order(self): redirect_a = fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/*", to_url="/en/latest/tutorial/:splat", enabled=True, ) redirect_b = fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/install.html", to_url="/en/latest/installation.html", enabled=True, ) redirect_a.refresh_from_db() self.assertEqual(redirect_b.position, 0) self.assertEqual(redirect_a.position, 1) url = "/en/latest/install.html" r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"}) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/installation.html", ) redirect_a.position = 0 redirect_a.save() redirect_b.refresh_from_db() self.assertEqual(redirect_a.position, 0) self.assertEqual(redirect_b.position, 1) url = "/en/latest/install.html" r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"}) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial/install.html", ) def test_redirect_ignored_on_external_domain(self): fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/*", to_url="/en/latest/:splat", ) url = "/install.html" r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"}) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/install.html" ) fixture.get( Version, project=self.project, active=True, built=True, slug="22", type=EXTERNAL, ) r = self.client.get(url, headers={"host": "project--22.readthedocs.build"}) self.assertEqual(r.status_code, 404) def test_infinite_redirect(self): host = "project.dev.readthedocs.io" fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/install.html", to_url="/en/latest/install.html", ) r = self.client.get("/en/latest/install.html", headers={"host": host}) self.assertEqual(r.status_code, 404) r = self.client.get("/en/latest/install.html?foo=bar", headers={"host": host}) self.assertEqual(r.status_code, 404) def test_infinite_redirect_on_404_view(self): """ Explicitly test using the ``proxito_404_handler`` view. This mimics the actual request that happens when a page is not found. """ host = "project.dev.readthedocs.io" fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/install.html", to_url="/en/latest/install.html", ) r = self.client.get( reverse( "proxito_404_handler", kwargs={"proxito_path": "/en/latest/install.html"}, ), headers={"host": host}, ) assert r.status_code == 404 r = self.client.get( reverse( "proxito_404_handler", kwargs={"proxito_path": "/en/latest/install.html"}, query={"foo": "bar"}, ), headers={"host": host}, ) assert r.status_code == 404 def test_infinite_redirect_changing_protocol(self): host = "project.dev.readthedocs.io" fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/install.html", to_url=f"https://{host}/en/latest/install.html", ) r = self.client.get("/en/latest/install.html", headers={"host": host}) self.assertEqual(r.status_code, 404) r = self.client.get("/en/latest/install.html?foo=bar", headers={"host": host}) self.assertEqual(r.status_code, 404) def test_exact_redirect_avoid_infinite_redirect(self): """ Avoid infinite redirects. If the URL hit is the same that the URL returned for redirection, we return a 404. These examples comes from this issue: * http://github.com/readthedocs/readthedocs.org/issues/4673 """ fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/*", to_url="/en/latest/:splat", ) r = self.client.get( "/redirect.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/redirect.html", ) r = self.client.get( "/redirect/", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/redirect/", ) r = self.client.get( "/en/latest/redirect/", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 404) fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/*", to_url="/en/latest/subdir/:splat", ) r = self.client.get( "/en/latest/redirect.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/subdir/redirect.html", ) r = self.client.get( "/en/latest/subdir/redirect.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 404) def test_page_redirect_avoid_infinite_redirect(self): fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/*", to_url="/subdir/:splat", ) r = self.client.get( "/en/latest/redirect.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/subdir/redirect.html", ) r = self.client.get( "/en/latest/subdir/redirect.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 404) fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/dir/*", to_url="/dir/subdir/:splat", ) r = self.client.get( "/en/latest/dir/redirect.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/dir/subdir/redirect.html", ) r = self.client.get( "/en/latest/dir/subdir/redirect.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 404) def test_exact_redirect_to_parent_path(self): self.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS self.project.save() fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/*", to_url="/:splat", ) r = self.client.get( "/en/latest/dir/redirect.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/dir/redirect.html", ) fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/one/two/*", to_url="/one/:splat", ) r = self.client.get( "/one/two/three/redirect.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/one/three/redirect.html", ) def test_page_redirect_to_parent_path(self): fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/guides/*", to_url="/:splat", ) r = self.client.get( "/en/latest/guides/redirect.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/redirect.html", ) fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/one/two/*", to_url="/one/:splat", ) r = self.client.get( "/en/latest/one/two/three/redirect.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/one/three/redirect.html", ) def test_redirect_root(self): Redirect.objects.create( project=self.project, redirect_type=EXACT_REDIRECT, from_url="/woot/*", to_url="/en/latest/:splat", ) r = self.client.get( "/woot/faq.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/faq.html", ) # Prefix redirects should match the whole path. r = self.client.get( "/en/latest/woot/faq.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 404) def test_redirect_page(self): Redirect.objects.create( project=self.project, redirect_type=PAGE_REDIRECT, from_url="/install.html", to_url="/tutorial/install.html", ) r = self.client.get( "/install.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial/install.html", ) def test_redirect_with_query_params_from_url(self): self._storage_exists( [ "/media/html/project/latest/tutorial/install.html", ] ) Redirect.objects.create( project=self.project, redirect_type=PAGE_REDIRECT, from_url="/install.html", to_url="/tutorial/install.html", ) r = self.client.get( "/install.html?foo=bar", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial/install.html?foo=bar", ) def test_redirect_with_query_params_to_url(self): self._storage_exists( [ "/media/html/project/latest/tutorial/install.html", ] ) Redirect.objects.create( project=self.project, redirect_type=PAGE_REDIRECT, from_url="/install.html", to_url="/tutorial/install.html?query=one", ) r = self.client.get( "/install.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial/install.html?query=one", ) r = self.client.get( "/install.html?query=two", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial/install.html?query=two&query=one", ) def test_redirect_exact(self): self._storage_exists( [ "/media/html/project/latest/guides/install.html", ] ) fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/install.html", to_url="/en/latest/tutorial/install.html", ) r = self.client.get( "/en/latest/install.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial/install.html", ) def test_redirect_exact_looks_like_version(self): fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/versions.json", to_url="/en/latest/versions.json", ) r = self.client.get( "/en/versions.json", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/versions.json", ) def test_redirect_exact_with_wildcard(self): """ Exact redirects can have a ``*`` at the end of ``from_url``. Use case: we want to deprecate version ``2.0`` and replace it by ``3.0``. We write an exact redirect from ``/en/2.0/*`` to ``/en/3.0/:splat``. """ fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/*", to_url="/en/version/:splat", # change version ) self.assertEqual(self.project.redirects.count(), 1) r = self.client.get( "/en/latest/guides/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/version/guides/install.html", ) # NOTE: I had to modify this test to add the Redirect in # ``self.translation`` instead of the root project. I think it makes # sense, but just wanted to mention to not forget to talk about # brackward compatibility fixture.get( Redirect, project=self.translation, redirect_type=EXACT_REDIRECT, from_url="/es/version/*", to_url="/en/master/:splat", # change language and version ) r = self.client.get( "/es/version/guides/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/master/guides/install.html", ) fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/tutorials/*", to_url="/en/latest/tutorial.html", ) r = self.client.get( "/en/latest/tutorials/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial.html" ) def test_page_redirect_with_wildcard(self): fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/*", to_url="/guides/:splat", ) r = self.client.get( "/en/latest/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/guides/install.html", ) fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/guides/*", to_url="/guides/redirects/:splat", ) r = self.client.get( "/en/latest/guides/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/guides/redirects/install.html", ) fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/tutorials/*", to_url="/tutorial.html", ) r = self.client.get( "/en/latest/tutorials/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial.html" ) def test_redirect_inactive_version(self): """ Inactive Version (``active=False``) should redirect properly. The function that servers the page should return 404 when serving a page of an inactive version and the redirect system should work. """ fixture.get( Version, slug="oldversion", project=self.project, active=False, ) fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/oldversion/", to_url="/en/newversion/", ) r = self.client.get( "/en/oldversion/", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/newversion/", ) def test_redirect_keeps_version_number(self): fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/how_to_install.html", to_url="/install.html", ) r = self.client.get( "/en/0.8.2/how_to_install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/0.8.2/install.html", ) def test_redirect_keeps_language(self): self.project.language = "de" self.project.save() fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/how_to_install.html", to_url="/install.html", ) r = self.client.get( "/de/0.8.2/how_to_install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/de/0.8.2/install.html", ) def test_redirect_recognizes_custom_cname(self): fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/install.html", to_url="/tutorial/install.html", ) fixture.get( Feature, feature_id=Feature.RESOLVE_PROJECT_FROM_HEADER, projects=[self.project], ) r = self.client.get( "/install.html", headers={"host": "docs.project.io", "x-rtd-slug": "project"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://docs.project.io/en/latest/tutorial/install.html", ) def test_redirect_html(self): self._storage_exists( [ "/media/html/project/latest/faq.html", ] ) fixture.get( Redirect, project=self.project, redirect_type=CLEAN_URL_TO_HTML_REDIRECT, ) r = self.client.get( "/en/latest/faq/", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/faq.html", ) def test_redirect_html_root_index(self): # The redirection code should never be executed when hitting # ``/en/latest/`` because we serve that path for both ``html`` and # ``htmldir``. In both cases, the project will have a # ``/html/project/latest/index.html`` in the storage and should never # hit our redirection code because we MUST check for that file before # calling our redirection code. In other words, this should never # jump into the ``ServeError404`` handler. fixture.get( Redirect, project=self.project, redirect_type=CLEAN_URL_TO_HTML_REDIRECT, ) with override_settings(PYTHON_MEDIA=False): # File exists in storage media r = self.client.get( "/en/latest/", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 200) self.assertEqual( r["X-Accel-Redirect"], "/proxito/media/html/project/latest/index.html", ) with override_settings(PYTHON_MEDIA=True): # File does not exist in storage media r = self.client.get( "/en/latest/", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 404) def test_redirect_html_index(self): fixture.get( Redirect, project=self.project, redirect_type=CLEAN_URL_TO_HTML_REDIRECT, ) r = self.client.get( "/en/latest/faq/index.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/faq.html", ) def test_redirect_htmldir(self): fixture.get( Redirect, project=self.project, redirect_type=HTML_TO_CLEAN_URL_REDIRECT, ) r = self.client.get( "/en/latest/faq.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/faq/", ) def test_redirect_root_with_301_status(self): fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/woot/*", to_url="/en/latest/:splat", http_status=301, ) r = self.client.get( "/woot/faq.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 301) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/faq.html", ) def test_not_found_page_without_trailing_slash(self): # https://github.com/readthedocs/readthedocs.org/issues/4673 fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/*", to_url="/en/latest/:splat", ) # Avoid infinite redirect r = self.client.get( "/en/latest/section/file-not-found", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 404) def test_page_redirect_with_and_without_trailing_slash(self): fixture.get( Redirect, project=self.project, redirect_type=PAGE_REDIRECT, from_url="/install", to_url="/tutorial/install/", ) for url in ["/en/latest/install", "/en/latest/install/"]: resp = self.client.get(url, headers={"host": "project.dev.readthedocs.io"}) self.assertEqual(resp.status_code, 302) self.assertEqual( resp["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial/install/", ) def test_exact_redirect_with_and_without_trailing_slash(self): fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/install", to_url="/en/latest/tutorial/install/", ) for url in ["/en/latest/install", "/en/latest/install/"]: resp = self.client.get(url, headers={"host": "project.dev.readthedocs.io"}) self.assertEqual(resp.status_code, 302) self.assertEqual( resp["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial/install/", ) @override_settings(PUBLIC_DOMAIN="dev.readthedocs.io")
UserRedirectTests
python
marshmallow-code__marshmallow
tests/mypy_test_cases/test_schema.py
{ "start": 163, "end": 748 }
class ____(Schema): foo = String() bar = Integer() class Meta(Schema.Meta): fields = ("foo", "bar") additional = ("baz", "qux") include = { "foo2": String(), } exclude = ("bar", "baz") many = True dateformat = "%Y-%m-%d" datetimeformat = "%Y-%m-%dT%H:%M:%S" timeformat = "%H:%M:%S" render_module = json ordered = False index_errors = True load_only = ("foo", "bar") dump_only = ("baz", "qux") unknown = EXCLUDE register = False
MySchema
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/ddl.py
{ "start": 29196, "end": 29347 }
class ____(BaseDDLElement): element: Constraint def __init__(self, element: Constraint) -> None: self.element = element
CreateConstraint
python
altair-viz__altair
altair/vegalite/v6/schema/_config.py
{ "start": 99890, "end": 101129 }
class ____(TypedDict, total=False): """ :class:`altair.GeoJsonFeature` ``TypedDict`` wrapper. Parameters ---------- geometry The feature's geometry properties Properties associated with this feature. type Specifies the type of GeoJSON object. bbox Bounding box of the coordinate range of the object's Geometries, Features, or Feature Collections. The value of the bbox member is an array of length 2*n where n is the number of dimensions represented in the contained geometries, with all axes of the most southwesterly point followed by all axes of the more northeasterly point. The axes order of a bbox follows the axes order of geometries. https://tools.ietf.org/html/rfc7946#section-5 id A value that uniquely identifies this feature in a https://tools.ietf.org/html/rfc7946#section-3.2. """ geometry: ( PointKwds | PolygonKwds | LineStringKwds | MultiPointKwds | MultiPolygonKwds | MultiLineStringKwds | GeometryCollectionKwds ) properties: None type: Literal["Feature"] bbox: Sequence[float] id: str | float
GeoJsonFeatureKwds