language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeNarrowingFalsy1.py
{ "start": 284, "end": 341 }
class ____: def __bool__(self) -> Literal[False]: ...
C
python
getsentry__sentry
src/sentry/workflow_engine/processors/workflow.py
{ "start": 1596, "end": 21568 }
class ____(StrEnum): ACTION_FILTER = "action_filter" WORKFLOW_TRIGGER = "workflow_trigger" def delete_workflow(workflow: Workflow) -> bool: with transaction.atomic(router.db_for_write(Workflow)): action_filters = DataConditionGroup.objects.filter( workflowdataconditiongroup__workflow=workflow ) actions = Action.objects.filter( dataconditiongroupaction__condition_group__in=action_filters ) # Delete the actions associated with a workflow, this is not a cascade delete # because we want to create a UI to maintain notification actions separately if actions: actions.delete() if action_filters: action_filters.delete() if workflow.when_condition_group: workflow.when_condition_group.delete() workflow.delete() return True @scopedstats.timer() def enqueue_workflows( client: DelayedWorkflowClient, items_by_workflow: dict[Workflow, DelayedWorkflowItem], ) -> None: items_by_project_id = DefaultDict[int, list[DelayedWorkflowItem]](list) for queue_item in items_by_workflow.values(): if not queue_item.delayed_if_group_ids and not queue_item.passing_if_group_ids: # Skip because there are no IF groups we could possibly fire actions for if # the WHEN/IF delayed conditions are met continue project_id = queue_item.event.project_id items_by_project_id[project_id].append(queue_item) items = 0 project_to_workflow: dict[int, list[int]] = {} if not items_by_project_id: sentry_sdk.set_tag("delayed_workflow_items", items) return for project_id, queue_items in items_by_project_id.items(): client.for_project(project_id).push_to_hash( batch_key=None, data={queue_item.buffer_key(): queue_item.buffer_value() for queue_item in queue_items}, ) items += len(queue_items) project_to_workflow[project_id] = sorted({item.workflow.id for item in queue_items}) sentry_sdk.set_tag("delayed_workflow_items", items) client.add_project_ids(list(items_by_project_id.keys())) logger.debug( "workflow_engine.workflows.enqueued", extra={ "project_to_workflow": project_to_workflow, }, ) @scopedstats.timer() def _get_data_conditions_for_group_by_dcg(dcg_ids: Sequence[int]) -> dict[int, list[DataCondition]]: """ Given a list of DataConditionGroup IDs, return a dict mapping them to their DataConditions. Fetching them individually as needed is typically simple; this is for cases where the performance benefit is worth passing around a dict. """ if not dcg_ids: return {} # `batch` wants param tuples and associates return results by index. return dict( zip(dcg_ids, get_data_conditions_for_group.batch([(dcg_id,) for dcg_id in dcg_ids])) ) @sentry_sdk.trace @scopedstats.timer() def evaluate_workflow_triggers( workflows: set[Workflow], event_data: WorkflowEventData, event_start_time: datetime, ) -> tuple[set[Workflow], dict[Workflow, DelayedWorkflowItem]]: """ Returns a tuple of (triggered_workflows, queue_items_by_workflow) - triggered_workflows: set of workflows that were triggered - queue_items_by_workflow: mapping of workflow to the delayed workflow item, used in the next step (evaluate action filters) to enqueue workflows with slow conditions within that function """ triggered_workflows: set[Workflow] = set() queue_items_by_workflow: dict[Workflow, DelayedWorkflowItem] = {} dcg_ids = [ workflow.when_condition_group_id for workflow in workflows if workflow.when_condition_group_id ] # Retrieve these as a batch to avoid a query/cache-lookup per DCG. data_conditions_by_dcg_id = _get_data_conditions_for_group_by_dcg(dcg_ids) project = event_data.event.project # expected to be already cached dual_processing_logs_enabled = features.has( "organizations:workflow-engine-metric-alert-dual-processing-logs", project.organization, ) for workflow in workflows: when_data_conditions = None if dcg_id := workflow.when_condition_group_id: when_data_conditions = data_conditions_by_dcg_id.get(dcg_id) evaluation, remaining_conditions = workflow.evaluate_trigger_conditions( event_data, when_data_conditions ) if remaining_conditions: if isinstance(event_data.event, GroupEvent): queue_items_by_workflow[workflow] = DelayedWorkflowItem( workflow=workflow, event=event_data.event, delayed_when_group_id=workflow.when_condition_group_id, delayed_if_group_ids=[], passing_if_group_ids=[], timestamp=event_start_time, ) else: """ Tracking when we try to enqueue a slow condition for an activity. Currently, we are assuming those cases are evaluating as True since an activity update is meant to respond to a previous event. """ metrics_incr("process_workflows.enqueue_workflow.activity") logger.debug( "workflow_engine.process_workflows.enqueue_workflow.activity", extra={ "event_id": event_data.event.id, "workflow_id": workflow.id, }, ) else: if evaluation.triggered: triggered_workflows.add(workflow) if dual_processing_logs_enabled: try: detector = WorkflowEventContext.get().detector detector_id = detector.id if detector else None logger.info( "workflow_engine.process_workflows.workflow_triggered", extra={ "workflow_id": workflow.id, "detector_id": detector_id, "organization_id": project.organization.id, "project_id": project.id, "group_type": event_data.group.type, }, ) except DetectorWorkflow.DoesNotExist: continue metrics_incr( "process_workflows.triggered_workflows", len(triggered_workflows), ) # TODO - Remove `environment` access once it's in the shared logger. environment = WorkflowEventContext.get().environment if environment is None: try: environment = get_environment_by_event(event_data) except Environment.DoesNotExist: return set(), {} event_id = ( event_data.event.event_id if isinstance(event_data.event, GroupEvent) else event_data.event.id ) logger.debug( "workflow_engine.process_workflows.triggered_workflows", extra={ "group_id": event_data.group.id, "event_id": event_id, "event_data": asdict(event_data), "event_environment_id": environment.id if environment else None, "triggered_workflows": [workflow.id for workflow in triggered_workflows], "queue_workflows": sorted(wf.id for wf in queue_items_by_workflow.keys()), }, ) return triggered_workflows, queue_items_by_workflow @sentry_sdk.trace @scopedstats.timer() def evaluate_workflows_action_filters( workflows: set[Workflow], event_data: WorkflowEventData, queue_items_by_workflow: dict[Workflow, DelayedWorkflowItem], event_start_time: datetime, ) -> tuple[set[DataConditionGroup], dict[Workflow, DelayedWorkflowItem]]: """ Evaluate the action filters for the given workflows. Returns a set of DataConditionGroups that were evaluated to True. Enqueues workflows with slow conditions to be evaluated in a batched task. """ # Collect all workflows, including those with pending slow condition results (queue_items_by_workflow) # to evaluate all fast conditions all_workflows = workflows.union(set(queue_items_by_workflow.keys())) action_conditions_to_workflow = { wdcg.condition_group: wdcg.workflow for wdcg in WorkflowDataConditionGroup.objects.select_related( "workflow", "condition_group" ).filter(workflow__in=all_workflows) } filtered_action_groups: set[DataConditionGroup] = set() # Retrieve these as a batch to avoid a query/cache-lookup per DCG. data_conditions_by_dcg_id = _get_data_conditions_for_group_by_dcg( [dcg.id for dcg in action_conditions_to_workflow.keys()] ) env_by_id: dict[int, Environment] = { env.id: env for env in Environment.objects.get_many_from_cache( { wf.environment_id for wf in action_conditions_to_workflow.values() if wf.environment_id } ) } for action_condition_group, workflow in action_conditions_to_workflow.items(): env = env_by_id.get(workflow.environment_id) if workflow.environment_id else None workflow_event_data = replace(event_data, workflow_env=env) group_evaluation, slow_conditions = process_data_condition_group( action_condition_group, workflow_event_data, data_conditions_by_dcg_id.get(action_condition_group.id), ) if slow_conditions: # If there are remaining conditions for the action filter to evaluate, # then return the list of conditions to enqueue. if isinstance(event_data.event, GroupEvent): if delayed_workflow_item := queue_items_by_workflow.get(workflow): delayed_workflow_item.delayed_if_group_ids.append(action_condition_group.id) else: queue_items_by_workflow[workflow] = DelayedWorkflowItem( workflow=workflow, delayed_when_group_id=None, delayed_if_group_ids=[action_condition_group.id], event=event_data.event, passing_if_group_ids=[], timestamp=event_start_time, ) else: # We should not include activity updates in delayed conditions, # this is because the actions should always be triggered if this condition is met. # The original snuba queries would have to be over threshold to create this event metrics_incr("process_workflows.enqueue_workflow.activity") logger.debug( "workflow_engine.process_workflows.enqueue_workflow.activity", extra={ "event_id": event_data.event.id, "action_condition_id": action_condition_group.id, "workflow_id": workflow.id, }, ) else: if group_evaluation.logic_result.triggered: if delayed_workflow_item := queue_items_by_workflow.get(workflow): if delayed_workflow_item.delayed_when_group_id: # If there are already delayed when conditions, # we need to evaluate them before firing the action group delayed_workflow_item.passing_if_group_ids.append(action_condition_group.id) else: filtered_action_groups.add(action_condition_group) event_id = ( event_data.event.event_id if isinstance(event_data.event, GroupEvent) else event_data.event.id ) logger.debug( "workflow_engine.evaluate_workflows_action_filters", extra={ "group_id": event_data.group.id, "event_id": event_id, "workflow_ids": [workflow.id for workflow in action_conditions_to_workflow.values()], "action_conditions": [ action_condition_group.id for action_condition_group in action_conditions_to_workflow.keys() ], "filtered_action_groups": [action_group.id for action_group in filtered_action_groups], "queue_workflows": sorted(wf.id for wf in queue_items_by_workflow.keys()), }, ) return filtered_action_groups, queue_items_by_workflow def get_environment_by_event(event_data: WorkflowEventData) -> Environment | None: if isinstance(event_data.event, GroupEvent): try: environment = event_data.event.get_environment() except Environment.DoesNotExist: metrics_incr("process_workflows.error") logger.exception( "Missing environment for event", extra={"event_id": event_data.event.event_id} ) raise Environment.DoesNotExist("Environment does not exist for the event") return environment elif isinstance(event_data.event, Activity): return None raise TypeError(f"Cannot access the environment from, {type(event_data.event)}.") @scopedstats.timer() def _get_associated_workflows( detectors: Collection[Detector], environment: Environment | None, event_data: WorkflowEventData ) -> set[Workflow]: """ This is a wrapper method to get the workflows associated with a detector and environment. Used in process_workflows to wrap the query + logging into a single method """ environment_filter = ( (Q(environment_id=None) | Q(environment_id=environment.id)) if environment else Q(environment_id=None) ) workflows = set( Workflow.objects.filter( environment_filter, detectorworkflow__detector_id__in=[detector.id for detector in detectors], enabled=True, ) .select_related("environment") .distinct() ) if workflows: metrics_incr( "process_workflows", len(workflows), ) event_id = ( event_data.event.event_id if isinstance(event_data.event, GroupEvent) else event_data.event.id ) logger.debug( "workflow_engine.process_workflows", extra={ "payload": event_data, "group_id": event_data.group.id, "event_id": event_id, "event_data": asdict(event_data), "event_environment_id": environment.id if environment else None, "workflows": [workflow.id for workflow in workflows], "detector_types": [detector.type for detector in detectors], }, ) return workflows @log_context.root() def process_workflows( batch_client: DelayedWorkflowClient, event_data: WorkflowEventData, event_start_time: datetime, detector: Detector | None = None, ) -> WorkflowEvaluation: """ This method will get the detector based on the event, and then gather the associated workflows. Next, it will evaluate the "when" (or trigger) conditions for each workflow, if the conditions are met, the workflow will be added to a unique list of triggered workflows. Finally, each of the triggered workflows will have their actions evaluated and executed. """ from sentry.notifications.notification_action.utils import should_fire_workflow_actions from sentry.workflow_engine.processors.action import ( filter_recently_fired_workflow_actions, fire_actions, ) workflow_evaluation_data = WorkflowEvaluationData(event=event_data.event) try: event_detectors = get_detectors_for_event(event_data, detector) if not event_detectors: raise Detector.DoesNotExist("No Detectors associated with the issue were found") log_context.add_extras(detector_id=event_detectors.preferred_detector.id) organization = event_data.event.project.organization # set the detector / org information asap, this is used in `get_environment_by_event` as well. WorkflowEventContext.set( WorkflowEventContextData( detector=event_detectors.preferred_detector, organization=organization, ) ) except Detector.DoesNotExist: return WorkflowEvaluation( tainted=True, msg="No Detectors associated with the issue were found", data=workflow_evaluation_data, ) workflow_evaluation_data.associated_detector = event_detectors.preferred_detector try: environment = get_environment_by_event(event_data) # Set the full context now that we've gotten everything. WorkflowEventContext.set( WorkflowEventContextData( detector=event_detectors.preferred_detector, environment=environment, organization=organization, ) ) except Environment.DoesNotExist: return WorkflowEvaluation( tainted=True, msg="Environment for event not found", data=workflow_evaluation_data, ) if features.has("organizations:workflow-engine-process-workflows-logs", organization): log_context.set_verbose(True) workflows = _get_associated_workflows(event_detectors.detectors, environment, event_data) workflow_evaluation_data.workflows = workflows if not workflows: return WorkflowEvaluation( tainted=True, msg="No workflows are associated with the detector in the event", data=workflow_evaluation_data, ) triggered_workflows, queue_items_by_workflow_id = evaluate_workflow_triggers( workflows, event_data, event_start_time ) workflow_evaluation_data.triggered_workflows = triggered_workflows if not triggered_workflows and not queue_items_by_workflow_id: # TODO - re-think tainted once the actions are removed from process_workflows. return WorkflowEvaluation( tainted=True, msg="No items were triggered or queued for slow evaluation", data=workflow_evaluation_data, ) # TODO - we should probably return here and have the rest from here be # `process_actions`, this will take a list of "triggered_workflows" actions_to_trigger, queue_items_by_workflow_id = evaluate_workflows_action_filters( triggered_workflows, event_data, queue_items_by_workflow_id, event_start_time ) enqueue_workflows(batch_client, queue_items_by_workflow_id) actions = filter_recently_fired_workflow_actions(actions_to_trigger, event_data) sentry_sdk.set_tag("workflow_engine.triggered_actions", len(actions)) workflow_evaluation_data.action_groups = actions_to_trigger workflow_evaluation_data.triggered_actions = set(actions) workflow_evaluation_data.delayed_conditions = queue_items_by_workflow_id if not actions: return WorkflowEvaluation( tainted=True, msg="No actions to evaluate; filtered or not triggered", data=workflow_evaluation_data, ) should_trigger_actions = should_fire_workflow_actions(organization, event_data.group.type) create_workflow_fire_histories( actions, event_data, should_trigger_actions, is_delayed=False, start_timestamp=event_start_time, ) fire_actions(actions, event_data) return WorkflowEvaluation(tainted=False, data=workflow_evaluation_data)
WorkflowDataConditionGroupType
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_default_format11.py
{ "start": 315, "end": 2242 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("default_format11.xlsx") def test_create_file(self): """Test the creation of a file with user defined default format""" workbook = Workbook( self.got_filename, { "default_format_properties": {"font_name": "Arial", "font_size": 8}, "default_row_height": 15, "default_column_width": 56, }, ) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png", {"x_offset": 32}) # Set user column width and row height to test positioning calculation. worksheet.set_column_pixels(4, 4, 96) worksheet.set_row_pixels(8, 32) # Set column to text column width less than 1 character. worksheet.set_column_pixels(6, 6, 10) workbook.close() self.assertExcelEqual() def test_create_file_with_character_units(self): """Test the creation of a file with user defined default format""" # Same as workbook = Workbook( self.got_filename, { "default_format_properties": {"font_name": "Arial", "font_size": 8}, "default_row_height": 15, "default_column_width": 56, }, ) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png", {"x_offset": 32}) # Set user column width and row height to test positioning calculation. worksheet.set_column(4, 4, 15.17) worksheet.set_row(8, 24.0) # Set column to text column width less than 1 character. worksheet.set_column(6, 6, 0.91) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
docker__docker-py
tests/unit/api_test.py
{ "start": 19391, "end": 20390 }
class ____(unittest.TestCase): def setUp(self): self.patcher = mock.patch.object( APIClient, 'send', return_value=fake_resp("GET", f"{fake_api.prefix}/version") ) self.mock_send = self.patcher.start() def tearDown(self): self.patcher.stop() def test_default_user_agent(self): client = APIClient(version=DEFAULT_DOCKER_API_VERSION) client.version() assert self.mock_send.call_count == 1 headers = self.mock_send.call_args[0][0].headers expected = f'docker-sdk-python/{docker.__version__}' assert headers['User-Agent'] == expected def test_custom_user_agent(self): client = APIClient( user_agent='foo/bar', version=DEFAULT_DOCKER_API_VERSION) client.version() assert self.mock_send.call_count == 1 headers = self.mock_send.call_args[0][0].headers assert headers['User-Agent'] == 'foo/bar'
UserAgentTest
python
getsentry__sentry
tests/sentry/api/test_utils.py
{ "start": 8765, "end": 9946 }
class ____(unittest.TestCase): def test_no_clamp_if_range_under_max(self) -> None: start = datetime.datetime(2024, 1, 1) end = datetime.datetime(2024, 1, 2) max_timedelta = datetime.timedelta(days=7) assert clamp_date_range((start, end), max_timedelta) == (start, end) def test_no_clamp_for_negative_range(self) -> None: start = datetime.datetime(2024, 1, 1) end = datetime.datetime(2023, 1, 2) max_timedelta = datetime.timedelta(hours=1) assert clamp_date_range((start, end), max_timedelta) == (start, end) def test_clamps_even_to_zero(self) -> None: start = datetime.datetime(2024, 1, 1) end = datetime.datetime(2024, 1, 2) max_timedelta = datetime.timedelta(0) assert clamp_date_range((start, end), max_timedelta) == (end, end) def test_clamps_to_end(self) -> None: start = datetime.datetime(2024, 1, 1) end = datetime.datetime(2024, 1, 14) max_timedelta = datetime.timedelta(days=1) assert clamp_date_range((start, end), max_timedelta) == ( datetime.datetime(2024, 1, 13), end, )
ClampDateRangeTest
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_glue.py
{ "start": 22453, "end": 27521 }
class ____: RUN_ID = "1234567890" DATA_SOURCE = {"GlueTable": {"DatabaseName": "TestDB", "TableName": "TestTable"}} ROLE = "role_arn" RULE_SET_NAMES = ["TestRuleSet"] @pytest.fixture def mock_conn(self) -> Generator[BaseAwsConnection, None, None]: with mock.patch.object(GlueDataQualityHook, "conn") as _conn: _conn.start_data_quality_ruleset_evaluation_run.return_value = {"RunId": self.RUN_ID} yield _conn @pytest.fixture def glue_data_quality_hook(self) -> Generator[GlueDataQualityHook, None, None]: with mock_aws(): hook = GlueDataQualityHook(aws_conn_id="aws_default") yield hook def setup_method(self): self.operator = GlueDataQualityRuleSetEvaluationRunOperator( task_id="stat_evaluation_run", datasource=self.DATA_SOURCE, role=self.ROLE, rule_set_names=self.RULE_SET_NAMES, show_results=False, ) self.operator.defer = mock.MagicMock() def test_init(self): assert self.operator.datasource == self.DATA_SOURCE assert self.operator.role == self.ROLE assert self.operator.rule_set_names == self.RULE_SET_NAMES @mock.patch.object(GlueDataQualityHook, "conn") def test_start_data_quality_ruleset_evaluation_run(self, glue_data_quality_mock_conn): glue_data_quality_mock_conn.get_data_quality_ruleset.return_value = {"Name": "TestRuleSet"} self.op = GlueDataQualityRuleSetEvaluationRunOperator( task_id="stat_evaluation_run", datasource=self.DATA_SOURCE, role=self.ROLE, number_of_workers=10, timeout=1000, rule_set_names=self.RULE_SET_NAMES, rule_set_evaluation_run_kwargs={"AdditionalRunOptions": {"CloudWatchMetricsEnabled": True}}, ) self.op.wait_for_completion = False self.op.execute({}) glue_data_quality_mock_conn.start_data_quality_ruleset_evaluation_run.assert_called_once_with( DataSource=self.DATA_SOURCE, Role=self.ROLE, NumberOfWorkers=10, Timeout=1000, RulesetNames=self.RULE_SET_NAMES, AdditionalRunOptions={"CloudWatchMetricsEnabled": True}, ) def test_validate_inputs(self, mock_conn): mock_conn.get_data_quality_ruleset.return_value = {"Name": "TestRuleSet"} assert self.operator.validate_inputs() is None def test_validate_inputs_error(self, mock_conn): class RuleSetNotFoundException(Exception): pass mock_conn.exceptions.EntityNotFoundException = RuleSetNotFoundException mock_conn.get_data_quality_ruleset.side_effect = RuleSetNotFoundException() self.operator = GlueDataQualityRuleSetEvaluationRunOperator( task_id="stat_evaluation_run", datasource=self.DATA_SOURCE, role=self.ROLE, rule_set_names=["dummy"], ) with pytest.raises(AirflowException, match="Following RulesetNames are not found \\['dummy'\\]"): self.operator.validate_inputs() @pytest.mark.parametrize( ("wait_for_completion", "deferrable"), [ pytest.param(False, False, id="no_wait"), pytest.param(True, False, id="wait"), pytest.param(False, True, id="defer"), ], ) @mock.patch.object(GlueDataQualityHook, "get_waiter") def test_start_data_quality_ruleset_evaluation_run_wait_combinations( self, _, wait_for_completion, deferrable, mock_conn, glue_data_quality_hook ): mock_conn.get_data_quality_ruleset.return_value = {"Name": "TestRuleSet"} self.operator.wait_for_completion = wait_for_completion self.operator.deferrable = deferrable response = self.operator.execute({}) assert response == self.RUN_ID assert glue_data_quality_hook.get_waiter.call_count == wait_for_completion assert self.operator.defer.call_count == deferrable def test_template_fields(self): validate_template_fields(self.operator) def test_overwritten_conn_passed_to_hook(self): OVERWRITTEN_CONN = "new-conn-id" op = GlueDataQualityRuleSetEvaluationRunOperator( task_id="test_overwritten_conn_passed_to_hook", datasource=self.DATA_SOURCE, role=self.ROLE, rule_set_names=self.RULE_SET_NAMES, show_results=False, aws_conn_id=OVERWRITTEN_CONN, ) assert op.hook.aws_conn_id == OVERWRITTEN_CONN def test_default_conn_passed_to_hook(self): DEFAULT_CONN = "aws_default" op = GlueDataQualityRuleSetEvaluationRunOperator( task_id="test_default_conn_passed_to_hook", datasource=self.DATA_SOURCE, role=self.ROLE, rule_set_names=self.RULE_SET_NAMES, show_results=False, ) assert op.hook.aws_conn_id == DEFAULT_CONN
TestGlueDataQualityRuleSetEvaluationRunOperator
python
scipy__scipy
scipy/special/tests/test_orthogonal.py
{ "start": 7843, "end": 8732 }
class ____: def test_sh_chebyu(self): # U*_n(x) = U_n(2x-1) psub = np.poly1d([2,-1]) Us0 = orth.sh_chebyu(0) Us1 = orth.sh_chebyu(1) Us2 = orth.sh_chebyu(2) Us3 = orth.sh_chebyu(3) Us4 = orth.sh_chebyu(4) Us5 = orth.sh_chebyu(5) use0 = orth.chebyu(0)(psub) use1 = orth.chebyu(1)(psub) use2 = orth.chebyu(2)(psub) use3 = orth.chebyu(3)(psub) use4 = orth.chebyu(4)(psub) use5 = orth.chebyu(5)(psub) assert_allclose(Us0.c, use0.c, atol=1.5e-13, rtol=0) assert_allclose(Us1.c, use1.c, atol=1.5e-13, rtol=0) assert_allclose(Us2.c, use2.c, atol=1.5e-13, rtol=0) assert_allclose(Us3.c, use3.c, atol=1.5e-13, rtol=0) assert_allclose(Us4.c, use4.c, atol=1.5e-12, rtol=0) assert_allclose(Us5.c, use5.c, atol=1.5e-11, rtol=0)
TestShChebyu
python
getsentry__sentry
src/sentry/api/helpers/group_index/validators/in_commit.py
{ "start": 323, "end": 1665 }
class ____(serializers.Serializer[InCommitResult]): commit = serializers.CharField(required=True, help_text="The SHA of the resolving commit.") repository = serializers.CharField( required=True, help_text="The name of the repository (as it appears in Sentry)." ) def validate_repository(self, value: str) -> Repository: project = self.context["project"] try: return Repository.objects.get(organization_id=project.organization_id, name=value) except Repository.DoesNotExist: raise serializers.ValidationError("Unable to find the given repository.") def validate(self, attrs: dict[str, Any]) -> Commit: attrs = super().validate(attrs) repository = attrs.get("repository") commit = attrs.get("commit") if not repository: raise serializers.ValidationError( {"repository": ["Unable to find the given repository."]} ) if not commit: raise serializers.ValidationError({"commit": ["Unable to find the given commit."]}) try: commit = Commit.objects.get(repository_id=repository.id, key=commit) except Commit.DoesNotExist: raise serializers.ValidationError({"commit": ["Unable to find the given commit."]}) return commit
InCommitValidator
python
joke2k__faker
tests/providers/test_automotive.py
{ "start": 7223, "end": 7384 }
class ____(_SimpleAutomotiveTestMixin): license_plate_pattern: Pattern = re.compile( r"^\d{2,3}[가나다라마거너더러머버서어저고노도로모보소오조구누두루무부수우주]\d{4}$" )
TestKoKr
python
jupyterlab__jupyterlab
jupyterlab/labextensions.py
{ "start": 18331, "end": 20039 }
class ____(JupyterApp): """Base jupyter labextension command entry point""" name = "jupyter labextension" version = VERSION description = "Work with JupyterLab extensions" examples = _EXAMPLES subcommands = { "install": (InstallLabExtensionApp, "Install labextension(s)"), "update": (UpdateLabExtensionApp, "Update labextension(s)"), "uninstall": (UninstallLabExtensionApp, "Uninstall labextension(s)"), "list": (ListLabExtensionsApp, "List labextensions"), "link": (LinkLabExtensionApp, "Link labextension(s)"), "unlink": (UnlinkLabExtensionApp, "Unlink labextension(s)"), "enable": (EnableLabExtensionsApp, "Enable labextension(s)"), "disable": (DisableLabExtensionsApp, "Disable labextension(s)"), "lock": (LockLabExtensionsApp, "Lock labextension(s)"), "unlock": (UnlockLabExtensionsApp, "Unlock labextension(s)"), "check": (CheckLabExtensionsApp, "Check labextension(s)"), "develop": (DevelopLabExtensionApp, "(developer) Develop labextension(s)"), "build": (BuildLabExtensionApp, "(developer) Build labextension"), "watch": (WatchLabExtensionApp, "(developer) Watch labextension"), } def start(self): """Perform the App's functions as configured""" super().start() # The above should have called a subcommand and raised NoStart; if we # get here, it didn't, so we should self.log.info a message. subcmds = ", ".join(sorted(self.subcommands)) self.exit(f"Please supply at least one subcommand: {subcmds}") main = LabExtensionApp.launch_instance if __name__ == "__main__": sys.exit(main())
LabExtensionApp
python
facebook__pyre-check
tools/generate_taint_models/tests/get_models_filtered_by_callable_test.py
{ "start": 409, "end": 792 }
class ____(Model): def __init__(self, index: int) -> None: self.index = index def __eq__(self, other: "TestModel") -> int: return self.index == other.index # pyre-fixme[7]: Expected `int` but got implicit return value of `None`. def __hash__(self) -> int: pass def __str__(self) -> str: return f"TestModel({self.index})"
TestModel
python
great-expectations__great_expectations
contrib/experimental/great_expectations_experimental/expectations/expect_column_skew_to_be_between.py
{ "start": 5372, "end": 14107 }
class ____(ColumnAggregateExpectation): """Expect column skew to be between. Currently tests against Gamma and Beta distributions.""" # These examples will be shown in the public gallery, and also executed as unit tests for your Expectation examples = [ { "data": { "a": [ 5.27071512, 7.05981507, 8.46671693, 10.20629973, 6.15519149, 7.11709362, 5.31915535, 6.56441299, 5.69143401, 5.0389317, 6.48222587, 5.62433534, 5.46219467, 5.74686441, 6.05413964, 7.09435276, 6.43876861, 6.05301145, 6.12727457, 6.80603351, ], # sampled from Gamma(1, 5) "b": [ 81.11265955, 76.7836479, 85.25019592, 93.93285666, 83.63587009, 81.88712944, 80.37321975, 86.786491, 80.05277435, 70.36302516, 79.4907302, 84.1288281, 87.79298488, 78.02771047, 80.63975023, 88.59461893, 84.05632481, 84.54128192, 78.74152549, 83.60684806, ], # sampled from Beta(50, 10) "c": [ 95.74648827, 80.4031074, 85.41863916, 93.98001949, 97.84607818, 89.01205412, 89.55045229, 97.32734707, 93.94199505, 88.19992377, 98.3336087, 97.66984436, 97.39464709, 95.55637873, 96.10980996, 90.18004343, 96.2019293, 89.19519753, 94.01807868, 93.23978285, ], # sampled from Beta(20, 2) }, "suppress_test_for": ["sqlite", "mssql"], "tests": [ { "title": "positive_test_positive_skew", "exact_match_out": False, "include_in_gallery": True, "tolerance": 0.1, "in": {"column": "a", "min_value": 0.25, "max_value": 10}, "out": {"success": True, "observed_value": 1.6974323016687487}, }, { "title": "negative_test_no_skew", "exact_match_out": False, "include_in_gallery": True, "tolerance": 0.1, "in": {"column": "b", "min_value": 0.25, "max_value": 10}, "out": {"success": False, "observed_value": -0.07638895580386174}, }, { "title": "positive_test_negative_skew", "exact_match_out": False, "include_in_gallery": True, "tolerance": 0.1, "in": {"column": "c", "min_value": -10, "max_value": -0.5}, "out": {"success": True, "observed_value": -0.9979514313860596}, }, { "title": "negative_test_abs_skew", "exact_match_out": False, "include_in_gallery": True, "tolerance": 0.1, "in": { "column": "c", "abs": True, "min_value": 0, "max_value": 0.5, }, "out": {"success": False, "observed_value": 0.9979514313860596}, }, { "title": "positive_test_abs_skew", "exact_match_out": False, "include_in_gallery": True, "tolerance": 0.1, "in": { "column": "c", "abs": True, "min_value": 0.5, "max_value": 10, }, "out": {"success": True, "observed_value": 0.9979514313860596}, }, ], } ] # This dictionary contains metadata for display in the public gallery library_metadata = { "maturity": "experimental", # "experimental", "beta", or "production" "tags": [ # Tags for this Expectation in the gallery # "experimental" ], "contributors": [ # Github handles for all contributors to this Expectation. "@lodeous", "@rexboyce", "@bragleg", "@mkopec87", ], } # Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\ metric_dependencies = ("column.custom.skew",) success_keys = ("min_value", "strict_min", "max_value", "strict_max", "abs") # Default values default_kwarg_values = { "min_value": None, "max_value": None, "strict_min": None, "strict_max": None, "abs": False, "result_format": "BASIC", "catch_exceptions": False, } # @classmethod # @renderer(renderer_type="renderer.prescriptive") # @render_suite_parameter_string # def _prescriptive_renderer( # cls, # configuration=None, # result=None, # runtime_configuration=None, # **kwargs, # ): # runtime_configuration = runtime_configuration or {} # include_column_name = False if runtime_configuration.get("include_column_name") is False else True # styling = runtime_configuration.get("styling") # params = substitute_none_for_missing( # configuration.kwargs, # [ # "column", # "min_value", # "max_value", # "row_condition", # "condition_parser", # "strict_min", # "strict_max", # ], # ) # # if (params["min_value"] is None) and (params["max_value"] is None): # template_str = "median may have any numerical value." # else: # at_least_str, at_most_str = handle_strict_min_max(params) # if params["min_value"] is not None and params["max_value"] is not None: # template_str = f"median must be {at_least_str} $min_value and {at_most_str} $max_value." # elif params["min_value"] is None: # template_str = f"median must be {at_most_str} $max_value." # elif params["max_value"] is None: # template_str = f"median must be {at_least_str} $min_value." # # if include_column_name: # template_str = "$column " + template_str # # if params["row_condition"] is not None: # ( # conditional_template_str, # conditional_params, # ) = parse_row_condition_string_pandas_engine(params["row_condition"]) # template_str = conditional_template_str + ", then " + template_str # params.update(conditional_params) # # return [ # RenderedStringTemplateContent( # **{ # "content_block_type": "string_template", # "string_template": { # "template": template_str, # "params": params, # "styling": styling, # }, # } # ) # ] def _validate( self, metrics: Dict, runtime_configuration: dict = None, execution_engine: ExecutionEngine = None, ): return self._validate_metric_value_between( metric_name="column.custom.skew", metrics=metrics, runtime_configuration=runtime_configuration, execution_engine=execution_engine, ) if __name__ == "__main__": ExpectColumnSkewToBeBetween().print_diagnostic_checklist()
ExpectColumnSkewToBeBetween
python
PrefectHQ__prefect
src/prefect/cli/transfer/_migratable_resources/variables.py
{ "start": 500, "end": 2179 }
class ____(MigratableResource[Variable]): _instances: dict[uuid.UUID, Self] = {} def __init__(self, variable: Variable): self.source_variable = variable self.destination_variable: Variable | None = None @property def source_id(self) -> uuid.UUID: return self.source_variable.id @property def destination_id(self) -> uuid.UUID | None: return self.destination_variable.id if self.destination_variable else None @classmethod async def construct(cls, obj: Variable) -> Self: if obj.id in cls._instances: return cls._instances[obj.id] instance = cls(obj) cls._instances[obj.id] = instance return instance @classmethod async def get_instance(cls, id: uuid.UUID) -> "MigratableResource[Variable] | None": if id in cls._instances: return cls._instances[id] return None async def get_dependencies(self) -> "list[MigratableProtocol]": return [] async def migrate(self) -> None: async with get_client() as client: try: self.destination_variable = await client.create_variable( variable=VariableCreate( name=self.source_variable.name, value=self.source_variable.value, tags=self.source_variable.tags, ), ) except ObjectAlreadyExists: self.destination_variable = await client.read_variable_by_name( self.source_variable.name ) raise TransferSkipped("Already exists")
MigratableVariable
python
realpython__materials
python-textual/layouts.py
{ "start": 164, "end": 1050 }
class ____(App): CSS_PATH = "layouts.tcss" def compose(self): with Horizontal(id="horizontal"): yield Static("Left", classes="box") with HorizontalScroll(id="horizontalscroll"): for i in range(NUM_BOXES): yield Static( f"Center.{i + 1}", classes="box yellowbox", ) with VerticalScroll(id="verticalscroll"): for i in range(NUM_BOXES): yield Static( f"Right.{i + 1}", classes="box redbox", ) yield Label( "I am a docked label.\nI don't move!", id="docked-label", ) if __name__ == "__main__": app = NestedContainersApp() app.run()
NestedContainersApp
python
apache__airflow
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_providers.py
{ "start": 1694, "end": 3132 }
class ____: @pytest.mark.parametrize( ("query_params", "expected_total_entries", "expected_package_name"), [ # Filters ({}, 2, ["apache-airflow-providers-amazon", "apache-airflow-providers-apache-cassandra"]), ({"limit": 1}, 2, ["apache-airflow-providers-amazon"]), ({"offset": 1}, 2, ["apache-airflow-providers-apache-cassandra"]), ], ) @mock.patch( "airflow.providers_manager.ProvidersManager.providers", new_callable=mock.PropertyMock, return_value=MOCK_PROVIDERS, ) def test_should_respond_200( self, mock_provider, test_client, query_params, expected_total_entries, expected_package_name ): with assert_queries_count(0): response = test_client.get("/providers", params=query_params) assert response.status_code == 200 body = response.json() assert body["total_entries"] == expected_total_entries assert [provider["package_name"] for provider in body["providers"]] == expected_package_name def test_should_response_401(self, unauthenticated_test_client): response = unauthenticated_test_client.get("/providers") assert response.status_code == 401 def test_should_response_403(self, unauthorized_test_client): response = unauthorized_test_client.get("/providers") assert response.status_code == 403
TestGetProviders
python
pennersr__django-allauth
allauth/socialaccount/providers/gitea/views.py
{ "start": 228, "end": 1242 }
class ____(OAuth2Adapter): provider_id = "gitea" settings = app_settings.PROVIDERS.get(provider_id, {}) if "GITEA_URL" in settings: web_url = settings.get("GITEA_URL").rstrip("/") else: web_url = "https://gitea.com" api_url = "{0}/api/v1".format(web_url) access_token_url = "{0}/login/oauth/access_token".format(web_url) authorize_url = "{0}/login/oauth/authorize".format(web_url) profile_url = "{0}/user".format(api_url) def complete_login(self, request, app, token, **kwargs): headers = {"Authorization": "token {}".format(token.token)} resp = ( get_adapter().get_requests_session().get(self.profile_url, headers=headers) ) resp.raise_for_status() extra_data = resp.json() return self.get_provider().sociallogin_from_response(request, extra_data) oauth2_login = OAuth2LoginView.adapter_view(GiteaOAuth2Adapter) oauth2_callback = OAuth2CallbackView.adapter_view(GiteaOAuth2Adapter)
GiteaOAuth2Adapter
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeParams6.py
{ "start": 329, "end": 367 }
class ____[T3](dict[T1, T3]): ...
ClassA
python
modin-project__modin
modin/tests/pandas/extensions/test_pd_extensions.py
{ "start": 2508, "end": 7038 }
class ____: def test_add_new_function(self): backend = "Pandas" expected_string_val = "Some string value" method_name = "new_method" @register_pd_accessor(method_name, backend=backend) def my_method_implementation(): return expected_string_val with config_context(Backend=backend): assert getattr(pd, method_name)() == expected_string_val with config_context(Backend="Python_Test"): with pytest.raises( AttributeError, match=re.escape( f"module 'modin.pandas' has no attribute {method_name}" ), ): getattr(pd, method_name)() def test_override_function(self): backend = "Pandas" expected_string_val = "Some string value" @register_pd_accessor("to_datetime", backend=backend) def my_method_implementation(*args, **kwargs): return expected_string_val with config_context(Backend=backend): # Since there are no query compiler inputs to to_datetime(), use # the to_datetime() implementation for Backend.get() assert pd.to_datetime(1) == expected_string_val with config_context(Backend="Python_Test"): # There are no query compiler inputs to to_datetime(), and # the current Backend.get() does not have a to_datetime() extension, # so fall back to the default to_datetime() implementation, which # should return the same result as pandas.to_datetime(). eval_general(pd, pandas, lambda lib: lib.to_datetime(1)) def test_add_new_non_method(self): backend = "Pandas" expected_val = 4 attribute_name = "four" register_pd_accessor(attribute_name, backend=backend)(expected_val) with config_context(Backend=backend): assert pd.four == expected_val with config_context(Backend="Python_Test"): assert not hasattr(pd, attribute_name) def test_to_datetime_dispatches_to_implementation_for_input(self): @register_pd_accessor("to_datetime", backend="Pandas") def pandas_to_datetime(*args, **kwargs): return "pandas_to_datetime_result" with config_context(Backend="Pandas"): pandas_backend_series = pd.Series(1) with config_context(Backend="Python_Test"): python_backend_df = pd.Series(1) assert pd.to_datetime(pandas_backend_series) == "pandas_to_datetime_result" df_equals( pd.to_datetime(python_backend_df), pandas.to_datetime(python_backend_df._to_pandas()), ) def test_concat_with_two_different_backends(self): with config_context(Backend="Pandas"): modin_on_pandas_df = pd.DataFrame({"a": [1, 2, 3]}) with config_context(Backend="Python_Test"): modin_on_python_df = pd.DataFrame({"a": [4, 5, 6]}) @register_pd_accessor("concat", backend="Pandas") def pandas_concat(*args, **kwargs): return "pandas_concat_result" @register_pd_accessor("concat", backend="Python_Test") def python_concat(*args, **kwargs): return "python_concat_result" # If the backends are different, we dispatch to the concat() override # for the backend of the first argument. assert ( pd.concat([modin_on_pandas_df, modin_on_python_df]) == "pandas_concat_result" ) # With inplace casting we need to reset the original dataframes modin_on_pandas_df.move_to("Pandas", inplace=True) modin_on_python_df.move_to("Python_Test", inplace=True) assert ( pd.concat([modin_on_python_df, modin_on_pandas_df]) == "python_concat_result" ) def test_index_class_override(self): class FakeIndex: def __init__(self, _values): pass def fake_method(self) -> str: return "python_fake_index" register_pd_accessor("Index", backend="Python_Test")(FakeIndex) with config_context(Backend="Pandas"): # Should return an actual native pandas index object df_equals(pd.Index([1]).to_series(), pd.Series([1], index=[1])) with config_context(Backend="Python_Test"): # Should just return a string assert pd.Index([1]).fake_method() == "python_fake_index"
TestRegisterForOneBackend
python
huggingface__transformers
src/transformers/models/gemma3n/modeling_gemma3n.py
{ "start": 64297, "end": 68384 }
class ____(GradientCheckpointingLayer): def __init__(self, config: Gemma3nTextConfig, layer_idx: int): super().__init__() self.config = config self.hidden_size = config.hidden_size self.layer_idx = layer_idx self.attention_type = config.layer_types[layer_idx] self.self_attn = Gemma3nTextAttention(config, layer_idx) self.mlp = Gemma3nTextMLP(config, layer_idx=layer_idx) self.input_layernorm = Gemma3nRMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Gemma3nRMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.pre_feedforward_layernorm = Gemma3nRMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.post_feedforward_layernorm = Gemma3nRMSNorm(self.hidden_size, eps=config.rms_norm_eps) self.hidden_size_per_layer_input = config.hidden_size_per_layer_input self.act_fn = ACT2FN[config.hidden_activation] self.altup = Gemma3nTextAltUp(config) self.laurel = Gemma3nTextLaurelBlock(config) self.per_layer_input_gate = nn.Linear(self.hidden_size, self.hidden_size_per_layer_input, bias=False) self.per_layer_projection = nn.Linear(self.hidden_size_per_layer_input, self.hidden_size, bias=False) self.post_per_layer_input_norm = Gemma3nRMSNorm(self.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, position_embeddings: torch.Tensor = None, per_layer_input: torch.Tensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: predictions = self.altup.predict(hidden_states) active_prediction = predictions[self.config.altup_active_idx] active_prediction_normed = self.input_layernorm(active_prediction) laurel_output = self.laurel(active_prediction_normed) attn, self_attn_weights = self.self_attn( hidden_states=active_prediction_normed, attention_mask=attention_mask, position_ids=position_ids, position_embeddings=position_embeddings, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) attn = self.post_attention_layernorm(attn) attn_gated = active_prediction + attn attn_laurel = (attn_gated + laurel_output) / math.sqrt(2) attn_norm = self.pre_feedforward_layernorm(attn_laurel) attn_ffw = self.mlp(attn_norm) attn_ffw_norm = self.post_feedforward_layernorm(attn_ffw) attn_ffw_laurel_gated = attn_laurel + attn_ffw_norm corrected_predictions = self.altup.correct(predictions, attn_ffw_laurel_gated) first_prediction = corrected_predictions[self.config.altup_active_idx].clone() if self.config.altup_correct_scale: first_prediction = self.altup.scale_corrected_output(first_prediction) # per_layer_input_gate adapted from jax.numpy.einsum("btd,dp->btp", ...) first_prediction = self.per_layer_input_gate(first_prediction) first_prediction = self.act_fn(first_prediction) first_prediction = torch.multiply(first_prediction, per_layer_input) # per_layer_projection adapted from jax.numpy.einsum("btp,pd->btd", ...) first_prediction = self.per_layer_projection(first_prediction) first_prediction = self.post_per_layer_input_norm(first_prediction) corrected_predictions[1:] += first_prediction outputs = (corrected_predictions,) if output_attentions: outputs += (self_attn_weights,) return outputs
Gemma3nTextDecoderLayer
python
openai__openai-python
tests/lib/test_pydantic.py
{ "start": 10465, "end": 10631 }
class ____(BaseModel): name: str = Field(description="The name of the galaxy.") largest_star: Star = Field(description="The largest star in the galaxy.")
Galaxy
python
sqlalchemy__sqlalchemy
test/orm/test_core_compilation.py
{ "start": 83873, "end": 90279 }
class ____( _poly_fixtures._PolymorphicAliasedJoins, RelationshipNaturalInheritedTest ): # this is the label style for the polymorphic selectable, not the # outside query label_style = LABEL_STYLE_TABLENAME_PLUS_COL straight_company_to_person_expected = ( "SELECT companies.company_id, companies.name FROM companies " "JOIN (SELECT people.person_id AS people_person_id, people.company_id " "AS people_company_id, people.name AS people_name, people.type " "AS people_type, engineers.person_id AS engineers_person_id, " "engineers.status AS engineers_status, engineers.engineer_name " "AS engineers_engineer_name, engineers.primary_language AS " "engineers_primary_language, managers.person_id AS " "managers_person_id, managers.status AS managers_status, " "managers.manager_name AS managers_manager_name FROM people " "LEFT OUTER JOIN engineers ON people.person_id = " "engineers.person_id LEFT OUTER JOIN managers ON people.person_id = " "managers.person_id) AS pjoin ON companies.company_id = " "pjoin.people_company_id" ) person_paperwork_expected = ( "SELECT companies.company_id, companies.name FROM companies JOIN " "(SELECT people.person_id AS people_person_id, people.company_id " "AS people_company_id, people.name AS people_name, people.type " "AS people_type, engineers.person_id AS engineers_person_id, " "engineers.status AS engineers_status, engineers.engineer_name " "AS engineers_engineer_name, engineers.primary_language AS " "engineers_primary_language, managers.person_id AS " "managers_person_id, managers.status AS managers_status, " "managers.manager_name AS managers_manager_name FROM people " "LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id " "LEFT OUTER JOIN managers ON people.person_id = managers.person_id) " "AS pjoin ON companies.company_id = pjoin.people_company_id " "JOIN paperwork ON pjoin.people_person_id = paperwork.person_id" ) default_pjoin = ( "(SELECT people.person_id AS people_person_id, " "people.company_id AS people_company_id, people.name AS people_name, " "people.type AS people_type, engineers.person_id AS " "engineers_person_id, engineers.status AS engineers_status, " "engineers.engineer_name AS engineers_engineer_name, " "engineers.primary_language AS engineers_primary_language, " "managers.person_id AS managers_person_id, managers.status " "AS managers_status, managers.manager_name AS managers_manager_name " "FROM people LEFT OUTER JOIN engineers ON people.person_id = " "engineers.person_id LEFT OUTER JOIN managers " "ON people.person_id = managers.person_id) AS pjoin " "ON companies.company_id = pjoin.people_company_id" ) flat_aliased_pjoin = ( "(SELECT people.person_id AS people_person_id, " "people.company_id AS people_company_id, people.name AS people_name, " "people.type AS people_type, engineers.person_id " "AS engineers_person_id, engineers.status AS engineers_status, " "engineers.engineer_name AS engineers_engineer_name, " "engineers.primary_language AS engineers_primary_language, " "managers.person_id AS managers_person_id, " "managers.status AS managers_status, managers.manager_name " "AS managers_manager_name FROM people " "LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id " "LEFT OUTER JOIN managers ON people.person_id = managers.person_id) " "AS pjoin_1 ON companies.company_id = pjoin_1.people_company_id" ) aliased_pjoin = ( "(SELECT people.person_id AS people_person_id, people.company_id " "AS people_company_id, people.name AS people_name, " "people.type AS people_type, engineers.person_id AS " "engineers_person_id, engineers.status AS engineers_status, " "engineers.engineer_name AS engineers_engineer_name, " "engineers.primary_language AS engineers_primary_language, " "managers.person_id AS managers_person_id, managers.status " "AS managers_status, managers.manager_name AS managers_manager_name " "FROM people LEFT OUTER JOIN engineers ON people.person_id = " "engineers.person_id LEFT OUTER JOIN managers " "ON people.person_id = managers.person_id) AS pjoin_1 " "ON companies.company_id = pjoin_1.people_company_id" ) c_to_p_whereclause = ( "SELECT companies.company_id, companies.name FROM companies " "JOIN (SELECT people.person_id AS people_person_id, " "people.company_id AS people_company_id, people.name AS people_name, " "people.type AS people_type, engineers.person_id AS " "engineers_person_id, engineers.status AS engineers_status, " "engineers.engineer_name AS engineers_engineer_name, " "engineers.primary_language AS engineers_primary_language, " "managers.person_id AS managers_person_id, managers.status " "AS managers_status, managers.manager_name AS managers_manager_name " "FROM people LEFT OUTER JOIN engineers " "ON people.person_id = engineers.person_id " "LEFT OUTER JOIN managers ON people.person_id = managers.person_id) " "AS pjoin ON companies.company_id = pjoin.people_company_id " "WHERE pjoin.people_name = :people_name_1" ) poly_columns = ( "SELECT pjoin.people_person_id FROM (SELECT people.person_id AS " "people_person_id, people.company_id AS people_company_id, " "people.name AS people_name, people.type AS people_type, " "engineers.person_id AS engineers_person_id, engineers.status " "AS engineers_status, engineers.engineer_name AS " "engineers_engineer_name, engineers.primary_language AS " "engineers_primary_language, managers.person_id AS " "managers_person_id, managers.status AS managers_status, " "managers.manager_name AS managers_manager_name FROM people " "LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id " "LEFT OUTER JOIN managers ON people.person_id = managers.person_id) " "AS pjoin" )
RelNaturalAliasedJoinsTest
python
huggingface__transformers
tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py
{ "start": 24523, "end": 31628 }
class ____(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): # with these settings num_masked_spans=0.5, which means probabilistic rounding # ensures that in 5 out of 10 method calls, num_masked_spans=0, and in # the other 5 out of 10, cases num_masked_spans=1 n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 # as we test for at least 10 masked dimension and at least # 10 non-masked dimension, this test could fail with probability: # P(100 coin flips, at most 9 heads) = 1.66e-18 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) # force one example to be heavily padded attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) # make sure that non-padded examples cannot be padded self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) # Ignore copy @unittest.skip(reason="Kept to make #Copied from working. Test a class used for pretraining, not yet supported.") def test_compute_perplexity(self): pass def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consists of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # sample negative indices sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 # second half of last input tensor is padded mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consists of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # replace masked feature vectors with -100 to test that those are not sampled features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) # sample negative indices sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @slow
Wav2Vec2BertUtilsTest
python
dask__dask
dask/dataframe/dask_expr/_str_accessor.py
{ "start": 298, "end": 3379 }
class ____(Accessor): """Accessor object for string properties of the Series values. Examples -------- >>> s.str.lower() # doctest: +SKIP """ _accessor_name = "str" _accessor_methods = ( "capitalize", "casefold", "center", "contains", "count", "decode", "encode", "endswith", "extract", "extractall", "find", "findall", "fullmatch", "get", "index", "isalnum", "isalpha", "isdecimal", "isdigit", "islower", "isnumeric", "isspace", "istitle", "isupper", "join", "len", "ljust", "lower", "lstrip", "match", "normalize", "pad", "partition", "removeprefix", "removesuffix", "repeat", "replace", "rfind", "rindex", "rjust", "rpartition", "rstrip", "slice", "slice_replace", "startswith", "strip", "swapcase", "title", "translate", "upper", "wrap", "zfill", ) _accessor_properties = () def _split(self, method, pat=None, n=-1, expand=False): from dask.dataframe.dask_expr import new_collection if expand: if n == -1: raise NotImplementedError( "To use the expand parameter you must specify the number of " "expected splits with the n= parameter. Usually n splits " "result in n+1 output columns." ) return new_collection( SplitMap( self._series, self._accessor_name, method, (), {"pat": pat, "n": n, "expand": expand}, ) ) return self._function_map(method, pat=pat, n=n, expand=expand) def split(self, pat=None, n=-1, expand=False): """Known inconsistencies: ``expand=True`` with unknown ``n`` will raise a ``NotImplementedError``.""" return self._split("split", pat=pat, n=n, expand=expand) def rsplit(self, pat=None, n=-1, expand=False): return self._split("rsplit", pat=pat, n=n, expand=expand) def cat(self, others=None, sep=None, na_rep=None): import pandas as pd from dask.dataframe.dask_expr._collection import Index, Series, new_collection if others is None: return new_collection(Cat(self._series, sep, na_rep)) valid_types = (Series, Index, pd.Series, pd.Index) if isinstance(others, valid_types): others = [others] elif not all(isinstance(a, valid_types) for a in others): raise TypeError("others must be Series/Index") return new_collection(CatBlockwise(self._series, sep, na_rep, *others)) def __getitem__(self, index): return self._function_map("__getitem__", index)
StringAccessor
python
jazzband__django-model-utils
tests/models.py
{ "start": 2009, "end": 2118 }
class ____(InheritanceManagerTestChild1): text_field = models.TextField()
InheritanceManagerTestGrandChild1
python
oauthlib__oauthlib
oauthlib/oauth2/rfc6749/errors.py
{ "start": 11495, "end": 11856 }
class ____(OAuth2Error): """ The Authorization Server requires End-User consent. This error MAY be returned when the prompt parameter value in the Authentication Request is none, but the Authentication Request cannot be completed without displaying a user interface for End-User consent. """ error = 'consent_required'
ConsentRequired
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/dependency.py
{ "start": 18440, "end": 19544 }
class ____(NamedTuple("_NodeInput", [("node", Node), ("input_def", InputDefinition)])): def __new__(cls, node: Node, input_def: InputDefinition): return super().__new__( cls, check.inst_param(node, "node", Node), check.inst_param(input_def, "input_def", InputDefinition), ) def _inner_str(self) -> str: return struct_to_string( "NodeInput", node_name=self.node.name, input_name=self.input_def.name, ) def __str__(self): return self._inner_str() def __repr__(self): return self._inner_str() def __hash__(self): return hash((self.node.name, self.input_def.name)) def __eq__(self, other: object) -> bool: return ( isinstance(other, NodeInput) and self.node.name == other.node.name and self.input_def.name == other.input_def.name ) @property def node_name(self) -> str: return self.node.name @property def input_name(self) -> str: return self.input_def.name
NodeInput
python
apache__airflow
helm-tests/tests/helm_tests/airflow_core/test_scheduler.py
{ "start": 44368, "end": 44876 }
class ____: """Tests scheduler deployment creation.""" def test_can_be_disabled(self): """ Scheduler should be able to be disabled if the users desires. For example, user may be disabled when using scheduler and having it deployed on another host. """ docs = render_chart( values={"scheduler": {"enabled": False}}, show_only=["templates/scheduler/scheduler-deployment.yaml"], ) assert len(docs) == 0
TestSchedulerCreation
python
apache__airflow
airflow-core/src/airflow/utils/log/non_caching_file_handler.py
{ "start": 2211, "end": 3042 }
class ____(RotatingFileHandler): """ An extension of RotatingFileHandler, advises the Kernel to not cache the file in PageCache when written. While there is nothing wrong with such cache (it will be cleaned when memory is needed), it causes ever-growing memory usage when scheduler is running as it keeps on writing new log files and the files are not rotated later on. This might lead to confusion for our users, who are monitoring memory usage of Scheduler - without realising that it is harmless and expected in this case. See https://github.com/apache/airflow/issues/27065 Adding the advice to Kernel might help with not generating the cache memory growth in the first place. """ def _open(self): return make_file_io_non_caching(super()._open())
NonCachingRotatingFileHandler
python
pytorch__pytorch
torch/_dynamo/variables/distributed.py
{ "start": 9963, "end": 12453 }
class ____(DistributedVariable): @staticmethod def is_device_mesh(value: object) -> bool: # we can't rely on importing/accessing torch distributed, it is not always built. if not DistributedVariable.is_available(): return False from torch.distributed.device_mesh import DeviceMesh return istype(value, DeviceMesh) def as_python_constant(self) -> Any: return self.value def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker: if name == "ndim": return ConstantVariable.create(self.value.ndim) if name == "device_type": return ConstantVariable.create(self.value.device_type) if name == "mesh_dim_names": source = self.source if source: source = AttrSource(base=source, member="mesh_dim_names") return VariableTracker.build(tx, self.value.mesh_dim_names, source) return super().var_getattr(tx, name) def call_method( self, tx: "InstructionTranslator", name: str, args: list[VariableTracker], kwargs: dict[str, VariableTracker], ) -> VariableTracker: if name == "size": const_args = [x.as_python_constant() for x in args] const_kwargs = {k: v.as_python_constant() for k, v in kwargs.items()} return ConstantVariable.create(self.value.size(*const_args, **const_kwargs)) if name == "get_coordinate": return ConstantVariable.create(self.value.get_coordinate()) if name == "get_rank": return ConstantVariable.create(self.value.get_rank()) if name == "get_local_rank": const_args = [x.as_python_constant() for x in args] const_kwargs = {k: v.as_python_constant() for k, v in kwargs.items()} return ConstantVariable.create( self.value.get_local_rank(*const_args, **const_kwargs) ) if name == "get_group": const_args = [x.as_python_constant() for x in args] const_kwargs = {k: v.as_python_constant() for k, v in kwargs.items()} return ProcessGroupVariable( self.value.get_group(*const_args, **const_kwargs) ) if name == "_get_or_create_default_group": return ProcessGroupVariable(self.value._get_or_create_default_group()) return super().call_method(tx, name, args, kwargs)
DeviceMeshVariable
python
pyinstaller__pyinstaller
hatch_build.py
{ "start": 932, "end": 3131 }
class ____(BuildHookInterface): def initialize(self, version, build_data): # Inject the platform specifier into the wheel's filename. if os.environ.get("PYI_WHEEL_TAG"): build_data["tag"] = "py3-none-" + os.environ["PYI_WHEEL_TAG"] pyi_platform = os.environ.get("PYI_PLATFORM") if pyi_platform: if "Darwin" in pyi_platform: icons = ["icns"] elif "Windows" in pyi_platform: icons = ["ico"] else: icons = [] else: icons = ["ico", "icns"] build_data["artifacts"] += [ f"PyInstaller/bootloader/{pyi_platform or '*'}/*", *(f"PyInstaller/bootloader/images/*.{suffix}" for suffix in icons), ] self.run() def bootloader_exists(self): # Checks if the console, non-debug bootloader exists from PyInstaller import HOMEPATH, PLATFORM exe = 'run' pyi_platform = os.environ.get("PYI_PLATFORM", PLATFORM) if "Windows" in pyi_platform: exe = 'run.exe' exe = os.path.join(HOMEPATH, 'PyInstaller', 'bootloader', pyi_platform, exe) return os.path.isfile(exe) def compile_bootloader(self): import subprocess from PyInstaller import HOMEPATH src_dir = os.path.join(HOMEPATH, 'bootloader') additional_args = os.getenv('PYINSTALLER_BOOTLOADER_WAF_ARGS', '').strip().split() cmd = [sys.executable, './waf', 'configure', 'all'] cmd += additional_args rc = subprocess.call(cmd, cwd=src_dir) if rc: raise SystemExit('ERROR: Failed compiling the bootloader. Please compile manually and rerun') def run(self): if self.bootloader_exists() and not os.environ.get("PYINSTALLER_COMPILE_BOOTLOADER"): return print( 'No precompiled bootloader found or compile forced. Trying to compile the bootloader for you ...', file=sys.stderr ) self.compile_bootloader() if not self.bootloader_exists(): raise SystemExit("ERROR: Bootloaders have been compiled for the wrong platform")
CustomBuildHook
python
sympy__sympy
sympy/stats/crv_types.py
{ "start": 118850, "end": 120730 }
class ____(SingleContinuousDistribution): _argnames = ('alpha', 'beta') set = Interval(0, oo) @staticmethod def check(alpha, beta): _value_check(alpha > 0, "Alpha must be positive") _value_check(beta > 0, "Beta must be positive") def pdf(self, x): alpha, beta = self.alpha, self.beta return beta * (x/alpha)**(beta - 1) * exp(-(x/alpha)**beta) / alpha def Weibull(name, alpha, beta): r""" Create a continuous random variable with a Weibull distribution. Explanation =========== The density of the Weibull distribution is given by .. math:: f(x) := \begin{cases} \frac{k}{\lambda}\left(\frac{x}{\lambda}\right)^{k-1} e^{-(x/\lambda)^{k}} & x\geq0\\ 0 & x<0 \end{cases} Parameters ========== lambda : Real number, $\lambda > 0$, a scale k : Real number, $k > 0$, a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Weibull, density, E, variance >>> from sympy import Symbol, simplify >>> l = Symbol("lambda", positive=True) >>> k = Symbol("k", positive=True) >>> z = Symbol("z") >>> X = Weibull("x", l, k) >>> density(X)(z) k*(z/lambda)**(k - 1)*exp(-(z/lambda)**k)/lambda >>> simplify(E(X)) lambda*gamma(1 + 1/k) >>> simplify(variance(X)) lambda**2*(-gamma(1 + 1/k)**2 + gamma(1 + 2/k)) References ========== .. [1] https://en.wikipedia.org/wiki/Weibull_distribution .. [2] https://mathworld.wolfram.com/WeibullDistribution.html """ return rv(name, WeibullDistribution, (alpha, beta)) #------------------------------------------------------------------------------- # Wigner semicircle distribution -----------------------------------------------
WeibullDistribution
python
pypa__warehouse
tests/unit/oidc/test_services.py
{ "start": 27717, "end": 38056 }
class ____: def test_interface_matches(self): assert verifyClass( interfaces.IOIDCPublisherService, services.NullOIDCPublisherService ) def test_warns_on_init(self, monkeypatch): warnings = pretend.stub(warn=pretend.call_recorder(lambda m, c: None)) monkeypatch.setattr(services, "warnings", warnings) service = services.NullOIDCPublisherService( session=pretend.stub(), publisher="example", issuer_url="https://example.com", audience="fakeaudience", cache_url="rediss://fake.example.com", metrics=pretend.stub(), ) assert service is not None assert warnings.warn.calls == [ pretend.call( "NullOIDCPublisherService is intended only for use in development, " "you should not use it in production due to the lack of actual " "JWT verification.", warehouse.utils.exceptions.InsecureOIDCPublisherWarning, ) ] def test_verify_jwt_signature_malformed_jwt(self): service = services.NullOIDCPublisherService( session=pretend.stub(), publisher="example", issuer_url="https://example.com", audience="fakeaudience", cache_url="rediss://fake.example.com", metrics=pretend.stub(), ) assert ( service.verify_jwt_signature("malformed-jwt", "https://example.com") is None ) def test_verify_jwt_signature_missing_aud(self): # { # "iss": "foo", # "iat": 1516239022, # "nbf": 1516239022, # "exp": 9999999999 # } jwt = ( "eyJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJmb28iLCJpYXQiOjE1MTYyMzkwMjIsIm5iZ" "iI6MTUxNjIzOTAyMiwiZXhwIjo5OTk5OTk5OTk5fQ.CAR9tx9_A6kxIDYWzXotuLfQ" "0wmvHDDO98rLO4F46y7QDWOalIok9yX3OzkWz-30TIBl1dleGVYbtZQzFNEJY13OLB" "gzFvxEpsAWvKJGyOLz-YDeGd2ApEZaggLvJiPZCngxFTH5fAyEcUUxQs5sCO9lGbkc" "E6lg_Di3VQhPohSuj_V7-DkcXefL3lV7m_JNOBoDWx_nDOFx4w2f8Z2NmswMrsu1vU" "NUZH7POiQBeyEsbY1at3u6gGerjyeYl8SIbeeRUWL0rtWxTgktoiKKgyPI-8F8Fpug" "jwtKZU_WFhIF4nA0les81hxnm8HFnoun2kx5cSF4Db3N8h6m8wRTUw" ) service = services.NullOIDCPublisherService( session=pretend.stub(), publisher="example", issuer_url="https://example.com", audience="fakeaudience", cache_url="rediss://fake.example.com", metrics=pretend.stub(), ) assert service.verify_jwt_signature(jwt, "https://example.com") is None def test_verify_jwt_signature_wrong_aud(self): # { # "iss": "foo", # "iat": 1516239022, # "nbf": 1516239022, # "exp": 9999999999, # "aud": "notpypi" # } jwt = ( "eyJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJmb28iLCJpYXQiOjE1MTYyMzkwMjIsIm5iZ" "iI6MTUxNjIzOTAyMiwiZXhwIjo5OTk5OTk5OTk5LCJhdWQiOiJub3RweXBpIn0.rFf" "rBXfGyRjU-tIo9dpJRkbnB2BLKK6uwjrE6g4pqwN-5BDn_UNR1Cw4t6Pw8kYOCRmVD" "aacu01L-GwHaXJmXyKsqIGie-bcp40zn1FX7dP000PQkAdhuQ-lILGhzscWNJK0J_g" "IewoFV9jNUVHJmK9UXx0hHl4eaH_3Ob22kzzIqNKuao2625qfLAdNfV44efArEubXT" "vBR-Y8HFzj7-7Zz7rHApImFYmC4E1aMDn_XEYJsXaJcwhhXJx8WB8SAhD7JZ-zotrd" "hlqkRMD9rXpv4DAMU15SEnw19tztVRf9OA4PO5Hd4uTKxPA1euBJgXa2g9QgIc1aFA" "FYKICTVgQ" ) service = services.NullOIDCPublisherService( session=pretend.stub(), publisher="example", issuer_url="https://example.com", audience="fakeaudience", cache_url="rediss://fake.example.com", metrics=pretend.stub(), ) assert service.verify_jwt_signature(jwt, "https://example.com") is None def test_verify_jwt_signature_strict_aud(self): # { # "iss": "foo", # "iat": 1516239022, # "nbf": 1516239022, # "exp": 9999999999, # "aud": ["notpypi", "pypi"] # } jwt = ( "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJmb28iLCJpYXQiOjE1M" "TYyMzkwMjIsIm5iZiI6MTUxNjIzOTAyMiwiZXhwIjo5OTk5OTk5OTk5LCJhdWQiOls" "ibm90cHlwaSIsInB5cGkiXX0.NhUFfjwUdXPT0IAVRuXeHbCq9ZDSY5JLEiDbAjrNwDM" ) service = services.NullOIDCPublisherService( session=pretend.stub(), publisher="example", issuer_url="https://example.com", audience="pypi", cache_url="rediss://fake.example.com", metrics=pretend.stub(), ) assert service.verify_jwt_signature(jwt, "https://example.com") is None def test_find_publisher(self, monkeypatch): claims = SignedClaims( { "iss": "foo", "iat": 1516239022, "nbf": 1516239022, "exp": 9999999999, "aud": "pypi", "jti": "6e67b1cb-2b8d-4be5-91cb-757edb2ec970", } ) service = services.NullOIDCPublisherService( session=pretend.stub(), publisher="example", issuer_url="https://example.com", audience="pypi", cache_url="rediss://fake.example.com", metrics=pretend.stub(), ) publisher = pretend.stub(verify_claims=pretend.call_recorder(lambda c, s: True)) find_publisher_by_issuer = pretend.call_recorder(lambda *a, **kw: publisher) monkeypatch.setattr( services, "find_publisher_by_issuer", find_publisher_by_issuer ) assert service.find_publisher(claims) == publisher def test_find_publisher_full_pending(self, github_oidc_service): pending_publisher = PendingGitHubPublisherFactory.create( project_name="does-not-exist", repository_name="bar", repository_owner="foo", repository_owner_id="123", workflow_filename="example.yml", environment="", ) claims = { "jti": "6e67b1cb-2b8d-4be5-91cb-757edb2ec970", "sub": "repo:foo/bar", "aud": "pypi", "ref": "fake", "sha": "fake", "repository": "foo/bar", "repository_owner": "foo", "repository_owner_id": "123", "run_id": "fake", "run_number": "fake", "run_attempt": "1", "repository_id": "fake", "actor_id": "fake", "actor": "foo", "workflow": "fake", "head_ref": "fake", "base_ref": "fake", "event_name": "fake", "ref_type": "fake", "environment": "fake", "job_workflow_ref": "foo/bar/.github/workflows/example.yml@fake", "iss": "https://token.actions.githubusercontent.com", "nbf": 1650663265, "exp": 1650664165, "iat": 1650663865, } expected_pending_publisher = github_oidc_service.find_publisher( claims, pending=True ) assert expected_pending_publisher == pending_publisher def test_find_publisher_full(self, github_oidc_service): publisher = GitHubPublisherFactory.create( repository_name="bar", repository_owner="foo", repository_owner_id="123", workflow_filename="example.yml", environment="", ) claims = { "jti": "6e67b1cb-2b8d-4be5-91cb-757edb2ec970", "sub": "repo:foo/bar", "aud": "pypi", "ref": "fake", "sha": "fake", "repository": "foo/bar", "repository_owner": "foo", "repository_owner_id": "123", "run_id": "fake", "run_number": "fake", "run_attempt": "1", "repository_id": "fake", "actor_id": "fake", "actor": "foo", "workflow": "fake", "head_ref": "fake", "base_ref": "fake", "event_name": "fake", "ref_type": "fake", "environment": "fake", "job_workflow_ref": "foo/bar/.github/workflows/example.yml@fake", "iss": "https://token.actions.githubusercontent.com", "nbf": 1650663265, "exp": 1650664165, "iat": 1650663865, } expected_publisher = github_oidc_service.find_publisher(claims, pending=False) assert expected_publisher == publisher def test_reify_publisher(self): service = services.NullOIDCPublisherService( session=pretend.stub(), publisher="example", issuer_url="https://example.com", audience="fakeaudience", cache_url="rediss://fake.example.com", metrics=pretend.stub(), ) publisher = pretend.stub() pending_publisher = pretend.stub( reify=pretend.call_recorder(lambda *a: publisher) ) project = pretend.stub( oidc_publishers=[], ) assert service.reify_pending_publisher(pending_publisher, project) == publisher assert pending_publisher.reify.calls == [pretend.call(service.db)] assert project.oidc_publishers == [publisher] def test_jwt_identifier_exists(self): service = services.NullOIDCPublisherService( session=pretend.stub(), publisher="example", issuer_url="https://example.com", audience="fakeaudience", cache_url="rediss://fake.example.com", metrics=pretend.stub(), ) assert service.jwt_identifier_exists(pretend.stub()) is False def test_store_jwt_identifier(self): service = services.NullOIDCPublisherService( session=pretend.stub(), publisher="example", issuer_url="https://example.com", audience="fakeaudience", cache_url="rediss://fake.example.com", metrics=pretend.stub(), ) assert service.store_jwt_identifier(pretend.stub(), pretend.stub()) is None
TestNullOIDCPublisherService
python
PyCQA__pylint
tests/functional/ext/docparams/parameter/missing_param_doc_required_Google.py
{ "start": 5853, "end": 6226 }
class ____: # [missing-param-doc, missing-type-doc] """test_constr_params_in_class_google Example of a class with missing constructor parameter documentation (Google style) Everything is completely analogous to functions. Args: y: bla missing constructor parameter documentation """ def __init__(self, x, y): pass
ClassFoo
python
wandb__wandb
wandb/automations/_generated/fragments.py
{ "start": 1879, "end": 2107 }
class ____(GQLResult): typename__: Typename[Literal["SlackIntegration"]] = "SlackIntegration" id: GQLId team_name: str = Field(alias="teamName") channel_name: str = Field(alias="channelName")
SlackIntegrationFields
python
joke2k__faker
faker/providers/automotive/th_TH/__init__.py
{ "start": 59, "end": 860 }
class ____(AutomotiveProvider): """Implement automotive provider for ``th_TH`` locale. Sources: - https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Thailand """ license_formats = ( "# ?? ####", "# ?? ###", "# ?? ##", "# ?? #", "?? ####", "?? ###", "?? ##", "?? #", "??? ###", "??? ##", "??? #", "##-####", ) thai_consonants = "กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรลวศษสหฬอฮ" def license_plate(self) -> str: """Generate a license plate.""" temp = re.sub( r"\?", lambda x: self.random_element(self.thai_consonants), self.random_element(self.license_formats), ) return self.numerify(temp)
Provider
python
pytransitions__transitions
tests/test_experimental.py
{ "start": 9022, "end": 9207 }
class ____(TestExperimental): def setUp(self): self.machine_cls = HierarchicalMachine # type: Type[HierarchicalMachine] self.create_trigger_class()
TestHSMExperimental
python
getsentry__sentry
src/sentry/issue_detection/detectors/mn_plus_one_db_span_detector.py
{ "start": 731, "end": 1507 }
class ____(ABC): """Abstract base class for the MNPlusOneDBSpanDetector state machine.""" @abstractmethod def next(self, span: Span) -> tuple[MNPlusOneState, PerformanceProblem | None]: raise NotImplementedError def finish(self) -> PerformanceProblem | None: return None def _equivalent(self, a: Span, b: Span) -> bool: """db spans are equivalent if their ops and hashes match. Other spans are equivalent if their ops match.""" first_op = a.get("op") or None second_op = b.get("op") or None if not first_op or not second_op or first_op != second_op: return False if first_op.startswith("db"): return a.get("hash") == b.get("hash") return True
MNPlusOneState
python
GoogleCloudPlatform__python-docs-samples
appengine/standard/images/guestbook/main.py
{ "start": 3410, "end": 3867 }
class ____(webapp2.RequestHandler): def get(self): greeting_key = ndb.Key(urlsafe=self.request.get("img_id")) greeting = greeting_key.get() if greeting.avatar: self.response.headers["Content-Type"] = "image/png" self.response.out.write(greeting.avatar) else: self.response.out.write("No image") # [END gae_images_guestbook_image_handler] # [START gae_images_guestbook_sign_handler]
Image
python
pyca__cryptography
src/cryptography/x509/general_name.py
{ "start": 766, "end": 2125 }
class ____(GeneralName): def __init__(self, value: str) -> None: if isinstance(value, str): try: value.encode("ascii") except UnicodeEncodeError: raise ValueError( "RFC822Name values should be passed as an A-label string. " "This means unicode characters should be encoded via " "a library like idna." ) else: raise TypeError("value must be string") name, address = parseaddr(value) if name or not address: # parseaddr has found a name (e.g. Name <email>) or the entire # value is an empty string. raise ValueError("Invalid rfc822name value") self._value = value @property def value(self) -> str: return self._value @classmethod def _init_without_validation(cls, value: str) -> RFC822Name: instance = cls.__new__(cls) instance._value = value return instance def __repr__(self) -> str: return f"<RFC822Name(value={self.value!r})>" def __eq__(self, other: object) -> bool: if not isinstance(other, RFC822Name): return NotImplemented return self.value == other.value def __hash__(self) -> int: return hash(self.value)
RFC822Name
python
ray-project__ray
rllib/utils/exploration/per_worker_gaussian_noise.py
{ "start": 256, "end": 1779 }
class ____(GaussianNoise): """A per-worker Gaussian noise class for distributed algorithms. Sets the `scale` schedules of individual workers to a constant: 0.4 ^ (1 + [worker-index] / float([num-workers] - 1) * 7) See Ape-X paper. """ def __init__( self, action_space: Space, *, framework: Optional[str], num_workers: Optional[int], worker_index: Optional[int], **kwargs ): """ Args: action_space: The gym action space used by the environment. num_workers: The overall number of workers used. worker_index: The index of the Worker using this Exploration. framework: One of None, "tf", "torch". """ scale_schedule = None # Use a fixed, different epsilon per worker. See: Ape-X paper. if num_workers > 0: if worker_index > 0: num_workers_minus_1 = float(num_workers - 1) if num_workers > 1 else 1.0 exponent = 1 + (worker_index / num_workers_minus_1) * 7 scale_schedule = ConstantSchedule(0.4**exponent, framework=framework) # Local worker should have zero exploration so that eval # rollouts run properly. else: scale_schedule = ConstantSchedule(0.0, framework=framework) super().__init__( action_space, scale_schedule=scale_schedule, framework=framework, **kwargs )
PerWorkerGaussianNoise
python
sympy__sympy
sympy/plotting/tests/test_plot.py
{ "start": 1533, "end": 1807 }
class ____(Plot): """ Used to verify if users can create their own backends. This backend is meant to raise NotImplementedError for methods `show`, `save`, `close`. """ def __new__(cls, *args, **kwargs): return object.__new__(cls)
DummyBackendNotOk
python
Unity-Technologies__ml-agents
ml-agents/mlagents/trainers/torch_entities/components/reward_providers/curiosity_reward_provider.py
{ "start": 2543, "end": 9339 }
class ____(torch.nn.Module): EPSILON = 1e-10 def __init__(self, specs: BehaviorSpec, settings: CuriositySettings) -> None: super().__init__() self._action_spec = specs.action_spec state_encoder_settings = settings.network_settings if state_encoder_settings.memory is not None: state_encoder_settings.memory = None logger.warning( "memory was specified in network_settings but is not supported by Curiosity. It is being ignored." ) self._state_encoder = NetworkBody( specs.observation_specs, state_encoder_settings ) self._action_flattener = ActionFlattener(self._action_spec) self.inverse_model_action_encoding = torch.nn.Sequential( LinearEncoder(2 * state_encoder_settings.hidden_units, 1, 256) ) if self._action_spec.continuous_size > 0: self.continuous_action_prediction = linear_layer( 256, self._action_spec.continuous_size ) if self._action_spec.discrete_size > 0: self.discrete_action_prediction = linear_layer( 256, sum(self._action_spec.discrete_branches) ) self.forward_model_next_state_prediction = torch.nn.Sequential( LinearEncoder( state_encoder_settings.hidden_units + self._action_flattener.flattened_size, 1, 256, ), linear_layer(256, state_encoder_settings.hidden_units), ) def get_current_state(self, mini_batch: AgentBuffer) -> torch.Tensor: """ Extracts the current state embedding from a mini_batch. """ n_obs = len(self._state_encoder.processors) np_obs = ObsUtil.from_buffer(mini_batch, n_obs) # Convert to tensors tensor_obs = [ModelUtils.list_to_tensor(obs) for obs in np_obs] hidden, _ = self._state_encoder.forward(tensor_obs) return hidden def get_next_state(self, mini_batch: AgentBuffer) -> torch.Tensor: """ Extracts the next state embedding from a mini_batch. """ n_obs = len(self._state_encoder.processors) np_obs = ObsUtil.from_buffer_next(mini_batch, n_obs) # Convert to tensors tensor_obs = [ModelUtils.list_to_tensor(obs) for obs in np_obs] hidden, _ = self._state_encoder.forward(tensor_obs) return hidden def predict_action(self, mini_batch: AgentBuffer) -> ActionPredictionTuple: """ In the continuous case, returns the predicted action. In the discrete case, returns the logits. """ inverse_model_input = torch.cat( (self.get_current_state(mini_batch), self.get_next_state(mini_batch)), dim=1 ) continuous_pred = None discrete_pred = None hidden = self.inverse_model_action_encoding(inverse_model_input) if self._action_spec.continuous_size > 0: continuous_pred = self.continuous_action_prediction(hidden) if self._action_spec.discrete_size > 0: raw_discrete_pred = self.discrete_action_prediction(hidden) branches = ModelUtils.break_into_branches( raw_discrete_pred, self._action_spec.discrete_branches ) branches = [torch.softmax(b, dim=1) for b in branches] discrete_pred = torch.cat(branches, dim=1) return ActionPredictionTuple(continuous_pred, discrete_pred) def predict_next_state(self, mini_batch: AgentBuffer) -> torch.Tensor: """ Uses the current state embedding and the action of the mini_batch to predict the next state embedding. """ actions = AgentAction.from_buffer(mini_batch) flattened_action = self._action_flattener.forward(actions) forward_model_input = torch.cat( (self.get_current_state(mini_batch), flattened_action), dim=1 ) return self.forward_model_next_state_prediction(forward_model_input) def compute_inverse_loss(self, mini_batch: AgentBuffer) -> torch.Tensor: """ Computes the inverse loss for a mini_batch. Corresponds to the error on the action prediction (given the current and next state). """ predicted_action = self.predict_action(mini_batch) actions = AgentAction.from_buffer(mini_batch) _inverse_loss = 0 if self._action_spec.continuous_size > 0: sq_difference = ( actions.continuous_tensor - predicted_action.continuous ) ** 2 sq_difference = torch.sum(sq_difference, dim=1) _inverse_loss += torch.mean( ModelUtils.dynamic_partition( sq_difference, ModelUtils.list_to_tensor( mini_batch[BufferKey.MASKS], dtype=torch.float ), 2, )[1] ) if self._action_spec.discrete_size > 0: true_action = torch.cat( ModelUtils.actions_to_onehot( actions.discrete_tensor, self._action_spec.discrete_branches ), dim=1, ) cross_entropy = torch.sum( -torch.log(predicted_action.discrete + self.EPSILON) * true_action, dim=1, ) _inverse_loss += torch.mean( ModelUtils.dynamic_partition( cross_entropy, ModelUtils.list_to_tensor( mini_batch[BufferKey.MASKS], dtype=torch.float ), # use masks not action_masks 2, )[1] ) return _inverse_loss def compute_reward(self, mini_batch: AgentBuffer) -> torch.Tensor: """ Calculates the curiosity reward for the mini_batch. Corresponds to the error between the predicted and actual next state. """ predicted_next_state = self.predict_next_state(mini_batch) target = self.get_next_state(mini_batch) sq_difference = 0.5 * (target - predicted_next_state) ** 2 sq_difference = torch.sum(sq_difference, dim=1) return sq_difference def compute_forward_loss(self, mini_batch: AgentBuffer) -> torch.Tensor: """ Computes the loss for the next state prediction """ return torch.mean( ModelUtils.dynamic_partition( self.compute_reward(mini_batch), ModelUtils.list_to_tensor( mini_batch[BufferKey.MASKS], dtype=torch.float ), 2, )[1] )
CuriosityNetwork
python
tensorflow__tensorflow
tensorflow/python/ops/linalg/linear_operator_addition.py
{ "start": 9816, "end": 10983 }
class ____(_Adder): """Handles additions resulting in an Identity family member. The Identity (`LinearOperatorScaledIdentity`, `LinearOperatorIdentity`) family is closed under addition. This `Adder` respects that, and returns an Identity """ def can_add(self, op1, op2): types = {_type(op1), _type(op2)} return not types.difference(_IDENTITY_FAMILY) def _add(self, op1, op2, operator_name, hints): # Will build a LinearOperatorScaledIdentity. if _type(op1) == _SCALED_IDENTITY: multiplier_1 = op1.multiplier else: multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype) if _type(op2) == _SCALED_IDENTITY: multiplier_2 = op2.multiplier else: multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype) return linear_operator_identity.LinearOperatorScaledIdentity( num_rows=op1.range_dimension_tensor(), multiplier=multiplier_1 + multiplier_2, is_non_singular=hints.is_non_singular, is_self_adjoint=hints.is_self_adjoint, is_positive_definite=hints.is_positive_definite, name=operator_name)
_AddAndReturnScaledIdentity
python
mwaskom__seaborn
seaborn/distributions.py
{ "start": 3018, "end": 86810 }
class ____(VectorPlotter): wide_structure = {"x": "@values", "hue": "@columns"} flat_structure = {"x": "@values"} def __init__( self, data=None, variables={}, ): super().__init__(data=data, variables=variables) @property def univariate(self): """Return True if only x or y are used.""" # TODO this could go down to core, but putting it here now. # We'd want to be conceptually clear that univariate only applies # to x/y and not to other semantics, which can exist. # We haven't settled on a good conceptual name for x/y. return bool({"x", "y"} - set(self.variables)) @property def data_variable(self): """Return the variable with data for univariate plots.""" # TODO This could also be in core, but it should have a better name. if not self.univariate: raise AttributeError("This is not a univariate plot") return {"x", "y"}.intersection(self.variables).pop() @property def has_xy_data(self): """Return True at least one of x or y is defined.""" # TODO see above points about where this should go return bool({"x", "y"} & set(self.variables)) def _add_legend( self, ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws, ): """Add artists that reflect semantic mappings and put then in a legend.""" # TODO note that this doesn't handle numeric mappings like the relational plots handles = [] labels = [] for level in self._hue_map.levels: color = self._hue_map(level) kws = self._artist_kws( artist_kws, fill, element, multiple, color, alpha ) # color gets added to the kws to workaround an issue with barplot's color # cycle integration but it causes problems in this context where we are # setting artist properties directly, so pop it off here if "facecolor" in kws: kws.pop("color", None) handles.append(artist(**kws)) labels.append(level) if isinstance(ax_obj, mpl.axes.Axes): ax_obj.legend(handles, labels, title=self.variables["hue"], **legend_kws) else: # i.e. a FacetGrid. TODO make this better legend_data = dict(zip(labels, handles)) ax_obj.add_legend( legend_data, title=self.variables["hue"], label_order=self.var_levels["hue"], **legend_kws ) def _artist_kws(self, kws, fill, element, multiple, color, alpha): """Handle differences between artists in filled/unfilled plots.""" kws = kws.copy() if fill: kws = normalize_kwargs(kws, mpl.collections.PolyCollection) kws.setdefault("facecolor", to_rgba(color, alpha)) if element == "bars": # Make bar() interface with property cycle correctly # https://github.com/matplotlib/matplotlib/issues/19385 kws["color"] = "none" if multiple in ["stack", "fill"] or element == "bars": kws.setdefault("edgecolor", mpl.rcParams["patch.edgecolor"]) else: kws.setdefault("edgecolor", to_rgba(color, 1)) elif element == "bars": kws["facecolor"] = "none" kws["edgecolor"] = to_rgba(color, alpha) else: kws["color"] = to_rgba(color, alpha) return kws def _quantile_to_level(self, data, quantile): """Return data levels corresponding to quantile cuts of mass.""" isoprop = np.asarray(quantile) values = np.ravel(data) sorted_values = np.sort(values)[::-1] normalized_values = np.cumsum(sorted_values) / values.sum() idx = np.searchsorted(normalized_values, 1 - isoprop) levels = np.take(sorted_values, idx, mode="clip") return levels def _cmap_from_color(self, color): """Return a sequential colormap given a color seed.""" # Like so much else here, this is broadly useful, but keeping it # in this class to signify that I haven't thought overly hard about it... r, g, b, _ = to_rgba(color) h, s, _ = husl.rgb_to_husl(r, g, b) xx = np.linspace(-1, 1, int(1.15 * 256))[:256] ramp = np.zeros((256, 3)) ramp[:, 0] = h ramp[:, 1] = s * np.cos(xx) ramp[:, 2] = np.linspace(35, 80, 256) colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1) return mpl.colors.ListedColormap(colors[::-1]) def _default_discrete(self): """Find default values for discrete hist estimation based on variable type.""" if self.univariate: discrete = self.var_types[self.data_variable] == "categorical" else: discrete_x = self.var_types["x"] == "categorical" discrete_y = self.var_types["y"] == "categorical" discrete = discrete_x, discrete_y return discrete def _resolve_multiple(self, curves, multiple): """Modify the density data structure to handle multiple densities.""" # Default baselines have all densities starting at 0 baselines = {k: np.zeros_like(v) for k, v in curves.items()} # TODO we should have some central clearinghouse for checking if any # "grouping" (terminnology?) semantics have been assigned if "hue" not in self.variables: return curves, baselines if multiple in ("stack", "fill"): # Setting stack or fill means that the curves share a # support grid / set of bin edges, so we can make a dataframe # Reverse the column order to plot from top to bottom curves = pd.DataFrame(curves).iloc[:, ::-1] # Find column groups that are nested within col/row variables column_groups = {} for i, keyd in enumerate(map(dict, curves.columns)): facet_key = keyd.get("col", None), keyd.get("row", None) column_groups.setdefault(facet_key, []) column_groups[facet_key].append(i) baselines = curves.copy() for col_idxs in column_groups.values(): cols = curves.columns[col_idxs] norm_constant = curves[cols].sum(axis="columns") # Take the cumulative sum to stack curves[cols] = curves[cols].cumsum(axis="columns") # Normalize by row sum to fill if multiple == "fill": curves[cols] = curves[cols].div(norm_constant, axis="index") # Define where each segment starts baselines[cols] = curves[cols].shift(1, axis=1).fillna(0) if multiple == "dodge": # Account for the unique semantic (non-faceting) levels # This will require rethiniking if we add other semantics! hue_levels = self.var_levels["hue"] n = len(hue_levels) f_fwd, f_inv = self._get_scale_transforms(self.data_variable) for key in curves: level = dict(key)["hue"] hist = curves[key].reset_index(name="heights") level_idx = hue_levels.index(level) a = f_fwd(hist["edges"]) b = f_fwd(hist["edges"] + hist["widths"]) w = (b - a) / n new_min = f_inv(a + level_idx * w) new_max = f_inv(a + (level_idx + 1) * w) hist["widths"] = new_max - new_min hist["edges"] = new_min curves[key] = hist.set_index(["edges", "widths"])["heights"] return curves, baselines # -------------------------------------------------------------------------------- # # Computation # -------------------------------------------------------------------------------- # def _compute_univariate_density( self, data_variable, common_norm, common_grid, estimate_kws, warn_singular=True, ): # Initialize the estimator object estimator = KDE(**estimate_kws) if set(self.variables) - {"x", "y"}: if common_grid: all_observations = self.comp_data.dropna() estimator.define_support(all_observations[data_variable]) else: common_norm = False all_data = self.plot_data.dropna() if common_norm and "weights" in all_data: whole_weight = all_data["weights"].sum() else: whole_weight = len(all_data) densities = {} for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True): # Extract the data points from this sub set and remove nulls observations = sub_data[data_variable] # Extract the weights for this subset of observations if "weights" in self.variables: weights = sub_data["weights"] part_weight = weights.sum() else: weights = None part_weight = len(sub_data) # Estimate the density of observations at this level variance = np.nan_to_num(observations.var()) singular = len(observations) < 2 or math.isclose(variance, 0) try: if not singular: # Convoluted approach needed because numerical failures # can manifest in a few different ways. density, support = estimator(observations, weights=weights) except np.linalg.LinAlgError: singular = True if singular: msg = ( "Dataset has 0 variance; skipping density estimate. " "Pass `warn_singular=False` to disable this warning." ) if warn_singular: warnings.warn(msg, UserWarning, stacklevel=4) continue # Invert the scaling of the support points _, f_inv = self._get_scale_transforms(self.data_variable) support = f_inv(support) # Apply a scaling factor so that the integral over all subsets is 1 if common_norm: density *= part_weight / whole_weight # Store the density for this level key = tuple(sub_vars.items()) densities[key] = pd.Series(density, index=support) return densities # -------------------------------------------------------------------------------- # # Plotting # -------------------------------------------------------------------------------- # def plot_univariate_histogram( self, multiple, element, fill, common_norm, common_bins, shrink, kde, kde_kws, color, legend, line_kws, estimate_kws, **plot_kws, ): # -- Default keyword dicts kde_kws = {} if kde_kws is None else kde_kws.copy() line_kws = {} if line_kws is None else line_kws.copy() estimate_kws = {} if estimate_kws is None else estimate_kws.copy() # -- Input checking _check_argument("multiple", ["layer", "stack", "fill", "dodge"], multiple) _check_argument("element", ["bars", "step", "poly"], element) auto_bins_with_weights = ( "weights" in self.variables and estimate_kws["bins"] == "auto" and estimate_kws["binwidth"] is None and not estimate_kws["discrete"] ) if auto_bins_with_weights: msg = ( "`bins` cannot be 'auto' when using weights. " "Setting `bins=10`, but you will likely want to adjust." ) warnings.warn(msg, UserWarning) estimate_kws["bins"] = 10 # Simplify downstream code if we are not normalizing if estimate_kws["stat"] == "count": common_norm = False orient = self.data_variable # Now initialize the Histogram estimator estimator = Hist(**estimate_kws) histograms = {} # Do pre-compute housekeeping related to multiple groups all_data = self.comp_data.dropna() all_weights = all_data.get("weights", None) multiple_histograms = set(self.variables) - {"x", "y"} if multiple_histograms: if common_bins: bin_kws = estimator._define_bin_params(all_data, orient, None) else: common_norm = False if common_norm and all_weights is not None: whole_weight = all_weights.sum() else: whole_weight = len(all_data) # Estimate the smoothed kernel densities, for use later if kde: # TODO alternatively, clip at min/max bins? kde_kws.setdefault("cut", 0) kde_kws["cumulative"] = estimate_kws["cumulative"] densities = self._compute_univariate_density( self.data_variable, common_norm, common_bins, kde_kws, warn_singular=False, ) # First pass through the data to compute the histograms for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True): # Prepare the relevant data key = tuple(sub_vars.items()) orient = self.data_variable if "weights" in self.variables: sub_data["weight"] = sub_data.pop("weights") part_weight = sub_data["weight"].sum() else: part_weight = len(sub_data) # Do the histogram computation if not (multiple_histograms and common_bins): bin_kws = estimator._define_bin_params(sub_data, orient, None) res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws)) heights = res[estimator.stat].to_numpy() widths = res["space"].to_numpy() edges = res[orient].to_numpy() - widths / 2 # Rescale the smoothed curve to match the histogram if kde and key in densities: density = densities[key] if estimator.cumulative: hist_norm = heights.max() else: hist_norm = (heights * widths).sum() densities[key] *= hist_norm # Convert edges back to original units for plotting ax = self._get_axes(sub_vars) _, inv = _get_transform_functions(ax, self.data_variable) widths = inv(edges + widths) - inv(edges) edges = inv(edges) # Pack the histogram data and metadata together edges = edges + (1 - shrink) / 2 * widths widths *= shrink index = pd.MultiIndex.from_arrays([ pd.Index(edges, name="edges"), pd.Index(widths, name="widths"), ]) hist = pd.Series(heights, index=index, name="heights") # Apply scaling to normalize across groups if common_norm: hist *= part_weight / whole_weight # Store the finalized histogram data for future plotting histograms[key] = hist # Modify the histogram and density data to resolve multiple groups histograms, baselines = self._resolve_multiple(histograms, multiple) if kde: densities, _ = self._resolve_multiple( densities, None if multiple == "dodge" else multiple ) # Set autoscaling-related meta sticky_stat = (0, 1) if multiple == "fill" else (0, np.inf) if multiple == "fill": # Filled plots should not have any margins bin_vals = histograms.index.to_frame() edges = bin_vals["edges"] widths = bin_vals["widths"] sticky_data = ( edges.min(), edges.max() + widths.loc[edges.idxmax()] ) else: sticky_data = [] # --- Handle default visual attributes # Note: default linewidth is determined after plotting # Default alpha should depend on other parameters if fill: # Note: will need to account for other grouping semantics if added if "hue" in self.variables and multiple == "layer": default_alpha = .5 if element == "bars" else .25 elif kde: default_alpha = .5 else: default_alpha = .75 else: default_alpha = 1 alpha = plot_kws.pop("alpha", default_alpha) # TODO make parameter? hist_artists = [] # Go back through the dataset and draw the plots for sub_vars, _ in self.iter_data("hue", reverse=True): key = tuple(sub_vars.items()) hist = histograms[key].rename("heights").reset_index() bottom = np.asarray(baselines[key]) ax = self._get_axes(sub_vars) # Define the matplotlib attributes that depend on semantic mapping if "hue" in self.variables: sub_color = self._hue_map(sub_vars["hue"]) else: sub_color = color artist_kws = self._artist_kws( plot_kws, fill, element, multiple, sub_color, alpha ) if element == "bars": # Use matplotlib bar plotting plot_func = ax.bar if self.data_variable == "x" else ax.barh artists = plot_func( hist["edges"], hist["heights"] - bottom, hist["widths"], bottom, align="edge", **artist_kws, ) for bar in artists: if self.data_variable == "x": bar.sticky_edges.x[:] = sticky_data bar.sticky_edges.y[:] = sticky_stat else: bar.sticky_edges.x[:] = sticky_stat bar.sticky_edges.y[:] = sticky_data hist_artists.extend(artists) else: # Use either fill_between or plot to draw hull of histogram if element == "step": final = hist.iloc[-1] x = np.append(hist["edges"], final["edges"] + final["widths"]) y = np.append(hist["heights"], final["heights"]) b = np.append(bottom, bottom[-1]) if self.data_variable == "x": step = "post" drawstyle = "steps-post" else: step = "post" # fillbetweenx handles mapping internally drawstyle = "steps-pre" elif element == "poly": x = hist["edges"] + hist["widths"] / 2 y = hist["heights"] b = bottom step = None drawstyle = None if self.data_variable == "x": if fill: artist = ax.fill_between(x, b, y, step=step, **artist_kws) else: artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws) artist.sticky_edges.x[:] = sticky_data artist.sticky_edges.y[:] = sticky_stat else: if fill: artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws) else: artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws) artist.sticky_edges.x[:] = sticky_stat artist.sticky_edges.y[:] = sticky_data hist_artists.append(artist) if kde: # Add in the density curves try: density = densities[key] except KeyError: continue support = density.index if "x" in self.variables: line_args = support, density sticky_x, sticky_y = None, (0, np.inf) else: line_args = density, support sticky_x, sticky_y = (0, np.inf), None line_kws["color"] = to_rgba(sub_color, 1) line, = ax.plot( *line_args, **line_kws, ) if sticky_x is not None: line.sticky_edges.x[:] = sticky_x if sticky_y is not None: line.sticky_edges.y[:] = sticky_y if element == "bars" and "linewidth" not in plot_kws: # Now we handle linewidth, which depends on the scaling of the plot # We will base everything on the minimum bin width hist_metadata = pd.concat([ # Use .items for generality over dict or df h.index.to_frame() for _, h in histograms.items() ]).reset_index(drop=True) thin_bar_idx = hist_metadata["widths"].idxmin() binwidth = hist_metadata.loc[thin_bar_idx, "widths"] left_edge = hist_metadata.loc[thin_bar_idx, "edges"] # Set initial value default_linewidth = math.inf # Loop through subsets based only on facet variables for sub_vars, _ in self.iter_data(): ax = self._get_axes(sub_vars) # Needed in some cases to get valid transforms. # Innocuous in other cases? ax.autoscale_view() # Convert binwidth from data coordinates to pixels pts_x, pts_y = 72 / ax.figure.dpi * abs( ax.transData.transform([left_edge + binwidth] * 2) - ax.transData.transform([left_edge] * 2) ) if self.data_variable == "x": binwidth_points = pts_x else: binwidth_points = pts_y # The relative size of the lines depends on the appearance # This is a provisional value and may need more tweaking default_linewidth = min(.1 * binwidth_points, default_linewidth) # Set the attributes for bar in hist_artists: # Don't let the lines get too thick max_linewidth = bar.get_linewidth() if not fill: max_linewidth *= 1.5 linewidth = min(default_linewidth, max_linewidth) # If not filling, don't let lines disappear if not fill: min_linewidth = .5 linewidth = max(linewidth, min_linewidth) bar.set_linewidth(linewidth) # --- Finalize the plot ---- # Axis labels ax = self.ax if self.ax is not None else self.facets.axes.flat[0] default_x = default_y = "" if self.data_variable == "x": default_y = estimator.stat.capitalize() if self.data_variable == "y": default_x = estimator.stat.capitalize() self._add_axis_labels(ax, default_x, default_y) # Legend for semantic variables if "hue" in self.variables and legend: if fill or element == "bars": artist = partial(mpl.patches.Patch) else: artist = partial(mpl.lines.Line2D, [], []) ax_obj = self.ax if self.ax is not None else self.facets self._add_legend( ax_obj, artist, fill, element, multiple, alpha, plot_kws, {}, ) def plot_bivariate_histogram( self, common_bins, common_norm, thresh, pthresh, pmax, color, legend, cbar, cbar_ax, cbar_kws, estimate_kws, **plot_kws, ): # Default keyword dicts cbar_kws = {} if cbar_kws is None else cbar_kws.copy() # Now initialize the Histogram estimator estimator = Histogram(**estimate_kws) # Do pre-compute housekeeping related to multiple groups if set(self.variables) - {"x", "y"}: all_data = self.comp_data.dropna() if common_bins: estimator.define_bin_params( all_data["x"], all_data["y"], all_data.get("weights", None), ) else: common_norm = False # -- Determine colormap threshold and norm based on the full data full_heights = [] for _, sub_data in self.iter_data(from_comp_data=True): sub_heights, _ = estimator( sub_data["x"], sub_data["y"], sub_data.get("weights", None) ) full_heights.append(sub_heights) common_color_norm = not set(self.variables) - {"x", "y"} or common_norm if pthresh is not None and common_color_norm: thresh = self._quantile_to_level(full_heights, pthresh) plot_kws.setdefault("vmin", 0) if common_color_norm: if pmax is not None: vmax = self._quantile_to_level(full_heights, pmax) else: vmax = plot_kws.pop("vmax", max(map(np.max, full_heights))) else: vmax = None # Get a default color # (We won't follow the color cycle here, as multiple plots are unlikely) if color is None: color = "C0" # --- Loop over data (subsets) and draw the histograms for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True): if sub_data.empty: continue # Do the histogram computation heights, (x_edges, y_edges) = estimator( sub_data["x"], sub_data["y"], weights=sub_data.get("weights", None), ) # Get the axes for this plot ax = self._get_axes(sub_vars) # Invert the scale for the edges _, inv_x = _get_transform_functions(ax, "x") _, inv_y = _get_transform_functions(ax, "y") x_edges = inv_x(x_edges) y_edges = inv_y(y_edges) # Apply scaling to normalize across groups if estimator.stat != "count" and common_norm: heights *= len(sub_data) / len(all_data) # Define the specific kwargs for this artist artist_kws = plot_kws.copy() if "hue" in self.variables: color = self._hue_map(sub_vars["hue"]) cmap = self._cmap_from_color(color) artist_kws["cmap"] = cmap else: cmap = artist_kws.pop("cmap", None) if isinstance(cmap, str): cmap = color_palette(cmap, as_cmap=True) elif cmap is None: cmap = self._cmap_from_color(color) artist_kws["cmap"] = cmap # Set the upper norm on the colormap if not common_color_norm and pmax is not None: vmax = self._quantile_to_level(heights, pmax) if vmax is not None: artist_kws["vmax"] = vmax # Make cells at or below the threshold transparent if not common_color_norm and pthresh: thresh = self._quantile_to_level(heights, pthresh) if thresh is not None: heights = np.ma.masked_less_equal(heights, thresh) # pcolormesh is going to turn the grid off, but we want to keep it # I'm not sure if there's a better way to get the grid state x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()]) y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()]) mesh = ax.pcolormesh( x_edges, y_edges, heights.T, **artist_kws, ) # pcolormesh sets sticky edges, but we only want them if not thresholding if thresh is not None: mesh.sticky_edges.x[:] = [] mesh.sticky_edges.y[:] = [] # Add an optional colorbar # Note, we want to improve this. When hue is used, it will stack # multiple colorbars with redundant ticks in an ugly way. # But it's going to take some work to have multiple colorbars that # share ticks nicely. if cbar: ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws) # Reset the grid state if x_grid: ax.grid(True, axis="x") if y_grid: ax.grid(True, axis="y") # --- Finalize the plot ax = self.ax if self.ax is not None else self.facets.axes.flat[0] self._add_axis_labels(ax) if "hue" in self.variables and legend: # TODO if possible, I would like to move the contour # intensity information into the legend too and label the # iso proportions rather than the raw density values artist_kws = {} artist = partial(mpl.patches.Patch) ax_obj = self.ax if self.ax is not None else self.facets self._add_legend( ax_obj, artist, True, False, "layer", 1, artist_kws, {}, ) def plot_univariate_density( self, multiple, common_norm, common_grid, warn_singular, fill, color, legend, estimate_kws, **plot_kws, ): # Handle conditional defaults if fill is None: fill = multiple in ("stack", "fill") # Preprocess the matplotlib keyword dictionaries if fill: artist = mpl.collections.PolyCollection else: artist = mpl.lines.Line2D plot_kws = normalize_kwargs(plot_kws, artist) # Input checking _check_argument("multiple", ["layer", "stack", "fill"], multiple) # Always share the evaluation grid when stacking subsets = bool(set(self.variables) - {"x", "y"}) if subsets and multiple in ("stack", "fill"): common_grid = True # Do the computation densities = self._compute_univariate_density( self.data_variable, common_norm, common_grid, estimate_kws, warn_singular, ) # Adjust densities based on the `multiple` rule densities, baselines = self._resolve_multiple(densities, multiple) # Control the interaction with autoscaling by defining sticky_edges # i.e. we don't want autoscale margins below the density curve sticky_density = (0, 1) if multiple == "fill" else (0, np.inf) if multiple == "fill": # Filled plots should not have any margins sticky_support = densities.index.min(), densities.index.max() else: sticky_support = [] if fill: if multiple == "layer": default_alpha = .25 else: default_alpha = .75 else: default_alpha = 1 alpha = plot_kws.pop("alpha", default_alpha) # TODO make parameter? # Now iterate through the subsets and draw the densities # We go backwards so stacked densities read from top-to-bottom for sub_vars, _ in self.iter_data("hue", reverse=True): # Extract the support grid and density curve for this level key = tuple(sub_vars.items()) try: density = densities[key] except KeyError: continue support = density.index fill_from = baselines[key] ax = self._get_axes(sub_vars) if "hue" in self.variables: sub_color = self._hue_map(sub_vars["hue"]) else: sub_color = color artist_kws = self._artist_kws( plot_kws, fill, False, multiple, sub_color, alpha ) # Either plot a curve with observation values on the x axis if "x" in self.variables: if fill: artist = ax.fill_between(support, fill_from, density, **artist_kws) else: artist, = ax.plot(support, density, **artist_kws) artist.sticky_edges.x[:] = sticky_support artist.sticky_edges.y[:] = sticky_density # Or plot a curve with observation values on the y axis else: if fill: artist = ax.fill_betweenx(support, fill_from, density, **artist_kws) else: artist, = ax.plot(density, support, **artist_kws) artist.sticky_edges.x[:] = sticky_density artist.sticky_edges.y[:] = sticky_support # --- Finalize the plot ---- ax = self.ax if self.ax is not None else self.facets.axes.flat[0] default_x = default_y = "" if self.data_variable == "x": default_y = "Density" if self.data_variable == "y": default_x = "Density" self._add_axis_labels(ax, default_x, default_y) if "hue" in self.variables and legend: if fill: artist = partial(mpl.patches.Patch) else: artist = partial(mpl.lines.Line2D, [], []) ax_obj = self.ax if self.ax is not None else self.facets self._add_legend( ax_obj, artist, fill, False, multiple, alpha, plot_kws, {}, ) def plot_bivariate_density( self, common_norm, fill, levels, thresh, color, legend, cbar, warn_singular, cbar_ax, cbar_kws, estimate_kws, **contour_kws, ): contour_kws = contour_kws.copy() estimator = KDE(**estimate_kws) if not set(self.variables) - {"x", "y"}: common_norm = False all_data = self.plot_data.dropna() # Loop through the subsets and estimate the KDEs densities, supports = {}, {} for sub_vars, sub_data in self.iter_data("hue", from_comp_data=True): # Extract the data points from this sub set observations = sub_data[["x", "y"]] min_variance = observations.var().fillna(0).min() observations = observations["x"], observations["y"] # Extract the weights for this subset of observations if "weights" in self.variables: weights = sub_data["weights"] else: weights = None # Estimate the density of observations at this level singular = math.isclose(min_variance, 0) try: if not singular: density, support = estimator(*observations, weights=weights) except np.linalg.LinAlgError: # Testing for 0 variance doesn't catch all cases where scipy raises, # but we can also get a ValueError, so we need this convoluted approach singular = True if singular: msg = ( "KDE cannot be estimated (0 variance or perfect covariance). " "Pass `warn_singular=False` to disable this warning." ) if warn_singular: warnings.warn(msg, UserWarning, stacklevel=3) continue # Transform the support grid back to the original scale ax = self._get_axes(sub_vars) _, inv_x = _get_transform_functions(ax, "x") _, inv_y = _get_transform_functions(ax, "y") support = inv_x(support[0]), inv_y(support[1]) # Apply a scaling factor so that the integral over all subsets is 1 if common_norm: density *= len(sub_data) / len(all_data) key = tuple(sub_vars.items()) densities[key] = density supports[key] = support # Define a grid of iso-proportion levels if thresh is None: thresh = 0 if isinstance(levels, Number): levels = np.linspace(thresh, 1, levels) else: if min(levels) < 0 or max(levels) > 1: raise ValueError("levels must be in [0, 1]") # Transform from iso-proportions to iso-densities if common_norm: common_levels = self._quantile_to_level( list(densities.values()), levels, ) draw_levels = {k: common_levels for k in densities} else: draw_levels = { k: self._quantile_to_level(d, levels) for k, d in densities.items() } # Define the coloring of the contours if "hue" in self.variables: for param in ["cmap", "colors"]: if param in contour_kws: msg = f"{param} parameter ignored when using hue mapping." warnings.warn(msg, UserWarning) contour_kws.pop(param) else: # Work out a default coloring of the contours coloring_given = set(contour_kws) & {"cmap", "colors"} if fill and not coloring_given: cmap = self._cmap_from_color(color) contour_kws["cmap"] = cmap if not fill and not coloring_given: contour_kws["colors"] = [color] # Use our internal colormap lookup cmap = contour_kws.pop("cmap", None) if isinstance(cmap, str): cmap = color_palette(cmap, as_cmap=True) if cmap is not None: contour_kws["cmap"] = cmap # Loop through the subsets again and plot the data for sub_vars, _ in self.iter_data("hue"): if "hue" in sub_vars: color = self._hue_map(sub_vars["hue"]) if fill: contour_kws["cmap"] = self._cmap_from_color(color) else: contour_kws["colors"] = [color] ax = self._get_axes(sub_vars) # Choose the function to plot with # TODO could add a pcolormesh based option as well # Which would look something like element="raster" if fill: contour_func = ax.contourf else: contour_func = ax.contour key = tuple(sub_vars.items()) if key not in densities: continue density = densities[key] xx, yy = supports[key] # Pop the label kwarg which is unused by contour_func (but warns) contour_kws.pop("label", None) cset = contour_func( xx, yy, density, levels=draw_levels[key], **contour_kws, ) # Add a color bar representing the contour heights # Note: this shows iso densities, not iso proportions # See more notes in histplot about how this could be improved if cbar: cbar_kws = {} if cbar_kws is None else cbar_kws ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws) # --- Finalize the plot ax = self.ax if self.ax is not None else self.facets.axes.flat[0] self._add_axis_labels(ax) if "hue" in self.variables and legend: # TODO if possible, I would like to move the contour # intensity information into the legend too and label the # iso proportions rather than the raw density values artist_kws = {} if fill: artist = partial(mpl.patches.Patch) else: artist = partial(mpl.lines.Line2D, [], []) ax_obj = self.ax if self.ax is not None else self.facets self._add_legend( ax_obj, artist, fill, False, "layer", 1, artist_kws, {}, ) def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws): estimator = ECDF(**estimate_kws) # Set the draw style to step the right way for the data variable drawstyles = dict(x="steps-post", y="steps-pre") plot_kws["drawstyle"] = drawstyles[self.data_variable] # Loop through the subsets, transform and plot the data for sub_vars, sub_data in self.iter_data( "hue", reverse=True, from_comp_data=True, ): # Compute the ECDF if sub_data.empty: continue observations = sub_data[self.data_variable] weights = sub_data.get("weights", None) stat, vals = estimator(observations, weights=weights) # Assign attributes based on semantic mapping artist_kws = plot_kws.copy() if "hue" in self.variables: artist_kws["color"] = self._hue_map(sub_vars["hue"]) # Return the data variable to the linear domain ax = self._get_axes(sub_vars) _, inv = _get_transform_functions(ax, self.data_variable) vals = inv(vals) # Manually set the minimum value on a "log" scale if isinstance(inv.__self__, mpl.scale.LogTransform): vals[0] = -np.inf # Work out the orientation of the plot if self.data_variable == "x": plot_args = vals, stat stat_variable = "y" else: plot_args = stat, vals stat_variable = "x" if estimator.stat == "count": top_edge = len(observations) else: top_edge = 1 # Draw the line for this subset artist, = ax.plot(*plot_args, **artist_kws) sticky_edges = getattr(artist.sticky_edges, stat_variable) sticky_edges[:] = 0, top_edge # --- Finalize the plot ---- ax = self.ax if self.ax is not None else self.facets.axes.flat[0] stat = estimator.stat.capitalize() default_x = default_y = "" if self.data_variable == "x": default_y = stat if self.data_variable == "y": default_x = stat self._add_axis_labels(ax, default_x, default_y) if "hue" in self.variables and legend: artist = partial(mpl.lines.Line2D, [], []) alpha = plot_kws.get("alpha", 1) ax_obj = self.ax if self.ax is not None else self.facets self._add_legend( ax_obj, artist, False, False, None, alpha, plot_kws, {}, ) def plot_rug(self, height, expand_margins, legend, **kws): for sub_vars, sub_data, in self.iter_data(from_comp_data=True): ax = self._get_axes(sub_vars) kws.setdefault("linewidth", 1) if expand_margins: xmarg, ymarg = ax.margins() if "x" in self.variables: ymarg += height * 2 if "y" in self.variables: xmarg += height * 2 ax.margins(x=xmarg, y=ymarg) if "hue" in self.variables: kws.pop("c", None) kws.pop("color", None) if "x" in self.variables: self._plot_single_rug(sub_data, "x", height, ax, kws) if "y" in self.variables: self._plot_single_rug(sub_data, "y", height, ax, kws) # --- Finalize the plot self._add_axis_labels(ax) if "hue" in self.variables and legend: # TODO ideally i'd like the legend artist to look like a rug legend_artist = partial(mpl.lines.Line2D, [], []) self._add_legend( ax, legend_artist, False, False, None, 1, {}, {}, ) def _plot_single_rug(self, sub_data, var, height, ax, kws): """Draw a rugplot along one axis of the plot.""" vector = sub_data[var] n = len(vector) # Return data to linear domain _, inv = _get_transform_functions(ax, var) vector = inv(vector) # We'll always add a single collection with varying colors if "hue" in self.variables: colors = self._hue_map(sub_data["hue"]) else: colors = None # Build the array of values for the LineCollection if var == "x": trans = tx.blended_transform_factory(ax.transData, ax.transAxes) xy_pairs = np.column_stack([ np.repeat(vector, 2), np.tile([0, height], n) ]) if var == "y": trans = tx.blended_transform_factory(ax.transAxes, ax.transData) xy_pairs = np.column_stack([ np.tile([0, height], n), np.repeat(vector, 2) ]) # Draw the lines on the plot line_segs = xy_pairs.reshape([n, 2, 2]) ax.add_collection(LineCollection( line_segs, transform=trans, colors=colors, **kws )) ax.autoscale_view(scalex=var == "x", scaley=var == "y") # ==================================================================================== # # External API # ==================================================================================== # def histplot( data=None, *, # Vector variables x=None, y=None, hue=None, weights=None, # Histogram computation parameters stat="count", bins="auto", binwidth=None, binrange=None, discrete=None, cumulative=False, common_bins=True, common_norm=True, # Histogram appearance parameters multiple="layer", element="bars", fill=True, shrink=1, # Histogram smoothing with a kernel density estimate kde=False, kde_kws=None, line_kws=None, # Bivariate histogram parameters thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None, # Hue mapping parameters palette=None, hue_order=None, hue_norm=None, color=None, # Axes information log_scale=None, legend=True, ax=None, # Other appearance keywords **kwargs, ): p = _DistributionPlotter( data=data, variables=dict(x=x, y=y, hue=hue, weights=weights), ) p.map_hue(palette=palette, order=hue_order, norm=hue_norm) if ax is None: ax = plt.gca() p._attach(ax, log_scale=log_scale) if p.univariate: # Note, bivariate plots won't cycle if fill: method = ax.bar if element == "bars" else ax.fill_between else: method = ax.plot color = _default_color(method, hue, color, kwargs) if not p.has_xy_data: return ax # Default to discrete bins for categorical variables if discrete is None: discrete = p._default_discrete() estimate_kws = dict( stat=stat, bins=bins, binwidth=binwidth, binrange=binrange, discrete=discrete, cumulative=cumulative, ) if p.univariate: p.plot_univariate_histogram( multiple=multiple, element=element, fill=fill, shrink=shrink, common_norm=common_norm, common_bins=common_bins, kde=kde, kde_kws=kde_kws, color=color, legend=legend, estimate_kws=estimate_kws, line_kws=line_kws, **kwargs, ) else: p.plot_bivariate_histogram( common_bins=common_bins, common_norm=common_norm, thresh=thresh, pthresh=pthresh, pmax=pmax, color=color, legend=legend, cbar=cbar, cbar_ax=cbar_ax, cbar_kws=cbar_kws, estimate_kws=estimate_kws, **kwargs, ) return ax histplot.__doc__ = """\ Plot univariate or bivariate histograms to show distributions of datasets. A histogram is a classic visualization tool that represents the distribution of one or more variables by counting the number of observations that fall within discrete bins. This function can normalize the statistic computed within each bin to estimate frequency, density or probability mass, and it can add a smooth curve obtained using a kernel density estimate, similar to :func:`kdeplot`. More information is provided in the :ref:`user guide <tutorial_hist>`. Parameters ---------- {params.core.data} {params.core.xy} {params.core.hue} weights : vector or key in ``data`` If provided, weight the contribution of the corresponding data points towards the count in each bin by these factors. {params.hist.stat} {params.hist.bins} {params.hist.binwidth} {params.hist.binrange} discrete : bool If True, default to ``binwidth=1`` and draw the bars so that they are centered on their corresponding data points. This avoids "gaps" that may otherwise appear when using discrete (integer) data. cumulative : bool If True, plot the cumulative counts as bins increase. common_bins : bool If True, use the same bins when semantic variables produce multiple plots. If using a reference rule to determine the bins, it will be computed with the full dataset. common_norm : bool If True and using a normalized statistic, the normalization will apply over the full dataset. Otherwise, normalize each histogram independently. multiple : {{"layer", "dodge", "stack", "fill"}} Approach to resolving multiple elements when semantic mapping creates subsets. Only relevant with univariate data. element : {{"bars", "step", "poly"}} Visual representation of the histogram statistic. Only relevant with univariate data. fill : bool If True, fill in the space under the histogram. Only relevant with univariate data. shrink : number Scale the width of each bar relative to the binwidth by this factor. Only relevant with univariate data. kde : bool If True, compute a kernel density estimate to smooth the distribution and show on the plot as (one or more) line(s). Only relevant with univariate data. kde_kws : dict Parameters that control the KDE computation, as in :func:`kdeplot`. line_kws : dict Parameters that control the KDE visualization, passed to :meth:`matplotlib.axes.Axes.plot`. thresh : number or None Cells with a statistic less than or equal to this value will be transparent. Only relevant with bivariate data. pthresh : number or None Like ``thresh``, but a value in [0, 1] such that cells with aggregate counts (or other statistics, when used) up to this proportion of the total will be transparent. pmax : number or None A value in [0, 1] that sets that saturation point for the colormap at a value such that cells below constitute this proportion of the total count (or other statistic, when used). {params.dist.cbar} {params.dist.cbar_ax} {params.dist.cbar_kws} {params.core.palette} {params.core.hue_order} {params.core.hue_norm} {params.core.color} {params.dist.log_scale} {params.dist.legend} {params.core.ax} kwargs Other keyword arguments are passed to one of the following matplotlib functions: - :meth:`matplotlib.axes.Axes.bar` (univariate, element="bars") - :meth:`matplotlib.axes.Axes.fill_between` (univariate, other element, fill=True) - :meth:`matplotlib.axes.Axes.plot` (univariate, other element, fill=False) - :meth:`matplotlib.axes.Axes.pcolormesh` (bivariate) Returns ------- {returns.ax} See Also -------- {seealso.displot} {seealso.kdeplot} {seealso.rugplot} {seealso.ecdfplot} {seealso.jointplot} Notes ----- The choice of bins for computing and plotting a histogram can exert substantial influence on the insights that one is able to draw from the visualization. If the bins are too large, they may erase important features. On the other hand, bins that are too small may be dominated by random variability, obscuring the shape of the true underlying distribution. The default bin size is determined using a reference rule that depends on the sample size and variance. This works well in many cases, (i.e., with "well-behaved" data) but it fails in others. It is always a good to try different bin sizes to be sure that you are not missing something important. This function allows you to specify bins in several different ways, such as by setting the total number of bins to use, the width of each bin, or the specific locations where the bins should break. Examples -------- .. include:: ../docstrings/histplot.rst """.format( params=_param_docs, returns=_core_docs["returns"], seealso=_core_docs["seealso"], ) def kdeplot( data=None, *, x=None, y=None, hue=None, weights=None, palette=None, hue_order=None, hue_norm=None, color=None, fill=None, multiple="layer", common_norm=True, common_grid=False, cumulative=False, bw_method="scott", bw_adjust=1, warn_singular=True, log_scale=None, levels=10, thresh=.05, gridsize=200, cut=3, clip=None, legend=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None, **kwargs, ): # --- Start with backwards compatability for versions < 0.11.0 ---------------- # Handle (past) deprecation of `data2` if "data2" in kwargs: msg = "`data2` has been removed (replaced by `y`); please update your code." raise TypeError(msg) # Handle deprecation of `vertical` vertical = kwargs.pop("vertical", None) if vertical is not None: if vertical: action_taken = "assigning data to `y`." if x is None: data, y = y, data else: x, y = y, x else: action_taken = "assigning data to `x`." msg = textwrap.dedent(f"""\n The `vertical` parameter is deprecated; {action_taken} This will become an error in seaborn v0.14.0; please update your code. """) warnings.warn(msg, UserWarning, stacklevel=2) # Handle deprecation of `bw` bw = kwargs.pop("bw", None) if bw is not None: msg = textwrap.dedent(f"""\n The `bw` parameter is deprecated in favor of `bw_method` and `bw_adjust`. Setting `bw_method={bw}`, but please see the docs for the new parameters and update your code. This will become an error in seaborn v0.14.0. """) warnings.warn(msg, UserWarning, stacklevel=2) bw_method = bw # Handle deprecation of `kernel` if kwargs.pop("kernel", None) is not None: msg = textwrap.dedent("""\n Support for alternate kernels has been removed; using Gaussian kernel. This will become an error in seaborn v0.14.0; please update your code. """) warnings.warn(msg, UserWarning, stacklevel=2) # Handle deprecation of shade_lowest shade_lowest = kwargs.pop("shade_lowest", None) if shade_lowest is not None: if shade_lowest: thresh = 0 msg = textwrap.dedent(f"""\n `shade_lowest` has been replaced by `thresh`; setting `thresh={thresh}. This will become an error in seaborn v0.14.0; please update your code. """) warnings.warn(msg, UserWarning, stacklevel=2) # Handle "soft" deprecation of shade `shade` is not really the right # terminology here, but unlike some of the other deprecated parameters it # is probably very commonly used and much hard to remove. This is therefore # going to be a longer process where, first, `fill` will be introduced and # be used throughout the documentation. In 0.12, when kwarg-only # enforcement hits, we can remove the shade/shade_lowest out of the # function signature all together and pull them out of the kwargs. Then we # can actually fire a FutureWarning, and eventually remove. shade = kwargs.pop("shade", None) if shade is not None: fill = shade msg = textwrap.dedent(f"""\n `shade` is now deprecated in favor of `fill`; setting `fill={shade}`. This will become an error in seaborn v0.14.0; please update your code. """) warnings.warn(msg, FutureWarning, stacklevel=2) # Handle `n_levels` # This was never in the formal API but it was processed, and appeared in an # example. We can treat as an alias for `levels` now and deprecate later. levels = kwargs.pop("n_levels", levels) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # p = _DistributionPlotter( data=data, variables=dict(x=x, y=y, hue=hue, weights=weights), ) p.map_hue(palette=palette, order=hue_order, norm=hue_norm) if ax is None: ax = plt.gca() p._attach(ax, allowed_types=["numeric", "datetime"], log_scale=log_scale) method = ax.fill_between if fill else ax.plot color = _default_color(method, hue, color, kwargs) if not p.has_xy_data: return ax # Pack the kwargs for statistics.KDE estimate_kws = dict( bw_method=bw_method, bw_adjust=bw_adjust, gridsize=gridsize, cut=cut, clip=clip, cumulative=cumulative, ) if p.univariate: plot_kws = kwargs.copy() p.plot_univariate_density( multiple=multiple, common_norm=common_norm, common_grid=common_grid, fill=fill, color=color, legend=legend, warn_singular=warn_singular, estimate_kws=estimate_kws, **plot_kws, ) else: p.plot_bivariate_density( common_norm=common_norm, fill=fill, levels=levels, thresh=thresh, legend=legend, color=color, warn_singular=warn_singular, cbar=cbar, cbar_ax=cbar_ax, cbar_kws=cbar_kws, estimate_kws=estimate_kws, **kwargs, ) return ax kdeplot.__doc__ = """\ Plot univariate or bivariate distributions using kernel density estimation. A kernel density estimate (KDE) plot is a method for visualizing the distribution of observations in a dataset, analogous to a histogram. KDE represents the data using a continuous probability density curve in one or more dimensions. The approach is explained further in the :ref:`user guide <tutorial_kde>`. Relative to a histogram, KDE can produce a plot that is less cluttered and more interpretable, especially when drawing multiple distributions. But it has the potential to introduce distortions if the underlying distribution is bounded or not smooth. Like a histogram, the quality of the representation also depends on the selection of good smoothing parameters. Parameters ---------- {params.core.data} {params.core.xy} {params.core.hue} weights : vector or key in ``data`` If provided, weight the kernel density estimation using these values. {params.core.palette} {params.core.hue_order} {params.core.hue_norm} {params.core.color} fill : bool or None If True, fill in the area under univariate density curves or between bivariate contours. If None, the default depends on ``multiple``. {params.dist.multiple} common_norm : bool If True, scale each conditional density by the number of observations such that the total area under all densities sums to 1. Otherwise, normalize each density independently. common_grid : bool If True, use the same evaluation grid for each kernel density estimate. Only relevant with univariate data. {params.kde.cumulative} {params.kde.bw_method} {params.kde.bw_adjust} warn_singular : bool If True, issue a warning when trying to estimate the density of data with zero variance. {params.dist.log_scale} levels : int or vector Number of contour levels or values to draw contours at. A vector argument must have increasing values in [0, 1]. Levels correspond to iso-proportions of the density: e.g., 20% of the probability mass will lie below the contour drawn for 0.2. Only relevant with bivariate data. thresh : number in [0, 1] Lowest iso-proportion level at which to draw a contour line. Ignored when ``levels`` is a vector. Only relevant with bivariate data. gridsize : int Number of points on each dimension of the evaluation grid. {params.kde.cut} {params.kde.clip} {params.dist.legend} {params.dist.cbar} {params.dist.cbar_ax} {params.dist.cbar_kws} {params.core.ax} kwargs Other keyword arguments are passed to one of the following matplotlib functions: - :meth:`matplotlib.axes.Axes.plot` (univariate, ``fill=False``), - :meth:`matplotlib.axes.Axes.fill_between` (univariate, ``fill=True``), - :meth:`matplotlib.axes.Axes.contour` (bivariate, ``fill=False``), - :meth:`matplotlib.axes.contourf` (bivariate, ``fill=True``). Returns ------- {returns.ax} See Also -------- {seealso.displot} {seealso.histplot} {seealso.ecdfplot} {seealso.jointplot} {seealso.violinplot} Notes ----- The *bandwidth*, or standard deviation of the smoothing kernel, is an important parameter. Misspecification of the bandwidth can produce a distorted representation of the data. Much like the choice of bin width in a histogram, an over-smoothed curve can erase true features of a distribution, while an under-smoothed curve can create false features out of random variability. The rule-of-thumb that sets the default bandwidth works best when the true distribution is smooth, unimodal, and roughly bell-shaped. It is always a good idea to check the default behavior by using ``bw_adjust`` to increase or decrease the amount of smoothing. Because the smoothing algorithm uses a Gaussian kernel, the estimated density curve can extend to values that do not make sense for a particular dataset. For example, the curve may be drawn over negative values when smoothing data that are naturally positive. The ``cut`` and ``clip`` parameters can be used to control the extent of the curve, but datasets that have many observations close to a natural boundary may be better served by a different visualization method. Similar considerations apply when a dataset is naturally discrete or "spiky" (containing many repeated observations of the same value). Kernel density estimation will always produce a smooth curve, which would be misleading in these situations. The units on the density axis are a common source of confusion. While kernel density estimation produces a probability distribution, the height of the curve at each point gives a density, not a probability. A probability can be obtained only by integrating the density across a range. The curve is normalized so that the integral over all possible values is 1, meaning that the scale of the density axis depends on the data values. Examples -------- .. include:: ../docstrings/kdeplot.rst """.format( params=_param_docs, returns=_core_docs["returns"], seealso=_core_docs["seealso"], ) def ecdfplot( data=None, *, # Vector variables x=None, y=None, hue=None, weights=None, # Computation parameters stat="proportion", complementary=False, # Hue mapping parameters palette=None, hue_order=None, hue_norm=None, # Axes information log_scale=None, legend=True, ax=None, # Other appearance keywords **kwargs, ): p = _DistributionPlotter( data=data, variables=dict(x=x, y=y, hue=hue, weights=weights), ) p.map_hue(palette=palette, order=hue_order, norm=hue_norm) # We could support other semantics (size, style) here fairly easily # But it would make distplot a bit more complicated. # It's always possible to add features like that later, so I am going to defer. # It will be even easier to wait until after there is a more general/abstract # way to go from semantic specs to artist attributes. if ax is None: ax = plt.gca() p._attach(ax, log_scale=log_scale) color = kwargs.pop("color", kwargs.pop("c", None)) kwargs["color"] = _default_color(ax.plot, hue, color, kwargs) if not p.has_xy_data: return ax # We could add this one day, but it's of dubious value if not p.univariate: raise NotImplementedError("Bivariate ECDF plots are not implemented") estimate_kws = dict( stat=stat, complementary=complementary, ) p.plot_univariate_ecdf( estimate_kws=estimate_kws, legend=legend, **kwargs, ) return ax ecdfplot.__doc__ = """\ Plot empirical cumulative distribution functions. An ECDF represents the proportion or count of observations falling below each unique value in a dataset. Compared to a histogram or density plot, it has the advantage that each observation is visualized directly, meaning that there are no binning or smoothing parameters that need to be adjusted. It also aids direct comparisons between multiple distributions. A downside is that the relationship between the appearance of the plot and the basic properties of the distribution (such as its central tendency, variance, and the presence of any bimodality) may not be as intuitive. More information is provided in the :ref:`user guide <tutorial_ecdf>`. Parameters ---------- {params.core.data} {params.core.xy} {params.core.hue} weights : vector or key in ``data`` If provided, weight the contribution of the corresponding data points towards the cumulative distribution using these values. {params.ecdf.stat} {params.ecdf.complementary} {params.core.palette} {params.core.hue_order} {params.core.hue_norm} {params.dist.log_scale} {params.dist.legend} {params.core.ax} kwargs Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.plot`. Returns ------- {returns.ax} See Also -------- {seealso.displot} {seealso.histplot} {seealso.kdeplot} {seealso.rugplot} Examples -------- .. include:: ../docstrings/ecdfplot.rst """.format( params=_param_docs, returns=_core_docs["returns"], seealso=_core_docs["seealso"], ) def rugplot( data=None, *, x=None, y=None, hue=None, height=.025, expand_margins=True, palette=None, hue_order=None, hue_norm=None, legend=True, ax=None, **kwargs ): # A note: I think it would make sense to add multiple= to rugplot and allow # rugs for different hue variables to be shifted orthogonal to the data axis # But is this stacking, or dodging? # A note: if we want to add a style semantic to rugplot, # we could make an option that draws the rug using scatterplot # A note, it would also be nice to offer some kind of histogram/density # rugplot, since alpha blending doesn't work great in the large n regime # --- Start with backwards compatability for versions < 0.11.0 ---------------- a = kwargs.pop("a", None) axis = kwargs.pop("axis", None) if a is not None: data = a msg = textwrap.dedent("""\n The `a` parameter has been replaced; use `x`, `y`, and/or `data` instead. Please update your code; This will become an error in seaborn v0.14.0. """) warnings.warn(msg, UserWarning, stacklevel=2) if axis is not None: if axis == "x": x = data elif axis == "y": y = data data = None msg = textwrap.dedent(f"""\n The `axis` parameter has been deprecated; use the `{axis}` parameter instead. Please update your code; this will become an error in seaborn v0.14.0. """) warnings.warn(msg, UserWarning, stacklevel=2) vertical = kwargs.pop("vertical", None) if vertical is not None: if vertical: action_taken = "assigning data to `y`." if x is None: data, y = y, data else: x, y = y, x else: action_taken = "assigning data to `x`." msg = textwrap.dedent(f"""\n The `vertical` parameter is deprecated; {action_taken} This will become an error in seaborn v0.14.0; please update your code. """) warnings.warn(msg, UserWarning, stacklevel=2) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # p = _DistributionPlotter( data=data, variables=dict(x=x, y=y, hue=hue), ) p.map_hue(palette=palette, order=hue_order, norm=hue_norm) if ax is None: ax = plt.gca() p._attach(ax) color = kwargs.pop("color", kwargs.pop("c", None)) kwargs["color"] = _default_color(ax.plot, hue, color, kwargs) if not p.has_xy_data: return ax p.plot_rug(height, expand_margins, legend, **kwargs) return ax rugplot.__doc__ = """\ Plot marginal distributions by drawing ticks along the x and y axes. This function is intended to complement other plots by showing the location of individual observations in an unobtrusive way. Parameters ---------- {params.core.data} {params.core.xy} {params.core.hue} height : float Proportion of axes extent covered by each rug element. Can be negative. expand_margins : bool If True, increase the axes margins by the height of the rug to avoid overlap with other elements. {params.core.palette} {params.core.hue_order} {params.core.hue_norm} legend : bool If False, do not add a legend for semantic variables. {params.core.ax} kwargs Other keyword arguments are passed to :meth:`matplotlib.collections.LineCollection` Returns ------- {returns.ax} Examples -------- .. include:: ../docstrings/rugplot.rst """.format( params=_param_docs, returns=_core_docs["returns"], ) def displot( data=None, *, # Vector variables x=None, y=None, hue=None, row=None, col=None, weights=None, # Other plot parameters kind="hist", rug=False, rug_kws=None, log_scale=None, legend=True, # Hue-mapping parameters palette=None, hue_order=None, hue_norm=None, color=None, # Faceting parameters col_wrap=None, row_order=None, col_order=None, height=5, aspect=1, facet_kws=None, **kwargs, ): p = _DistributionPlotter( data=data, variables=dict(x=x, y=y, hue=hue, weights=weights, row=row, col=col), ) p.map_hue(palette=palette, order=hue_order, norm=hue_norm) _check_argument("kind", ["hist", "kde", "ecdf"], kind) # --- Initialize the FacetGrid object # Check for attempt to plot onto specific axes and warn if "ax" in kwargs: msg = ( "`displot` is a figure-level function and does not accept " "the ax= parameter. You may wish to try {}plot.".format(kind) ) warnings.warn(msg, UserWarning) kwargs.pop("ax") for var in ["row", "col"]: # Handle faceting variables that lack name information if var in p.variables and p.variables[var] is None: p.variables[var] = f"_{var}_" # Adapt the plot_data dataframe for use with FacetGrid grid_data = p.plot_data.rename(columns=p.variables) grid_data = grid_data.loc[:, ~grid_data.columns.duplicated()] col_name = p.variables.get("col") row_name = p.variables.get("row") if facet_kws is None: facet_kws = {} g = FacetGrid( data=grid_data, row=row_name, col=col_name, col_wrap=col_wrap, row_order=row_order, col_order=col_order, height=height, aspect=aspect, **facet_kws, ) # Now attach the axes object to the plotter object if kind == "kde": allowed_types = ["numeric", "datetime"] else: allowed_types = None p._attach(g, allowed_types=allowed_types, log_scale=log_scale) # Check for a specification that lacks x/y data and return early if not p.has_xy_data: return g if color is None and hue is None: color = "C0" # XXX else warn if hue is not None? kwargs["legend"] = legend # --- Draw the plots if kind == "hist": hist_kws = kwargs.copy() # Extract the parameters that will go directly to Histogram estimate_defaults = {} _assign_default_kwargs(estimate_defaults, Histogram.__init__, histplot) estimate_kws = {} for key, default_val in estimate_defaults.items(): estimate_kws[key] = hist_kws.pop(key, default_val) # Handle derivative defaults if estimate_kws["discrete"] is None: estimate_kws["discrete"] = p._default_discrete() hist_kws["estimate_kws"] = estimate_kws hist_kws.setdefault("color", color) if p.univariate: _assign_default_kwargs(hist_kws, p.plot_univariate_histogram, histplot) p.plot_univariate_histogram(**hist_kws) else: _assign_default_kwargs(hist_kws, p.plot_bivariate_histogram, histplot) p.plot_bivariate_histogram(**hist_kws) elif kind == "kde": kde_kws = kwargs.copy() # Extract the parameters that will go directly to KDE estimate_defaults = {} _assign_default_kwargs(estimate_defaults, KDE.__init__, kdeplot) estimate_kws = {} for key, default_val in estimate_defaults.items(): estimate_kws[key] = kde_kws.pop(key, default_val) kde_kws["estimate_kws"] = estimate_kws kde_kws["color"] = color if p.univariate: _assign_default_kwargs(kde_kws, p.plot_univariate_density, kdeplot) p.plot_univariate_density(**kde_kws) else: _assign_default_kwargs(kde_kws, p.plot_bivariate_density, kdeplot) p.plot_bivariate_density(**kde_kws) elif kind == "ecdf": ecdf_kws = kwargs.copy() # Extract the parameters that will go directly to the estimator estimate_kws = {} estimate_defaults = {} _assign_default_kwargs(estimate_defaults, ECDF.__init__, ecdfplot) for key, default_val in estimate_defaults.items(): estimate_kws[key] = ecdf_kws.pop(key, default_val) ecdf_kws["estimate_kws"] = estimate_kws ecdf_kws["color"] = color if p.univariate: _assign_default_kwargs(ecdf_kws, p.plot_univariate_ecdf, ecdfplot) p.plot_univariate_ecdf(**ecdf_kws) else: raise NotImplementedError("Bivariate ECDF plots are not implemented") # All plot kinds can include a rug if rug: # TODO with expand_margins=True, each facet expands margins... annoying! if rug_kws is None: rug_kws = {} _assign_default_kwargs(rug_kws, p.plot_rug, rugplot) rug_kws["legend"] = False if color is not None: rug_kws["color"] = color p.plot_rug(**rug_kws) # Call FacetGrid annotation methods # Note that the legend is currently set inside the plotting method g.set_axis_labels( x_var=p.variables.get("x", g.axes.flat[0].get_xlabel()), y_var=p.variables.get("y", g.axes.flat[0].get_ylabel()), ) g.set_titles() g.tight_layout() if data is not None and (x is not None or y is not None): if not isinstance(data, pd.DataFrame): data = pd.DataFrame(data) g.data = pd.merge( data, g.data[g.data.columns.difference(data.columns)], left_index=True, right_index=True, ) else: wide_cols = { k: f"_{k}_" if v is None else v for k, v in p.variables.items() } g.data = p.plot_data.rename(columns=wide_cols) return g displot.__doc__ = """\ Figure-level interface for drawing distribution plots onto a FacetGrid. This function provides access to several approaches for visualizing the univariate or bivariate distribution of data, including subsets of data defined by semantic mapping and faceting across multiple subplots. The ``kind`` parameter selects the approach to use: - :func:`histplot` (with ``kind="hist"``; the default) - :func:`kdeplot` (with ``kind="kde"``) - :func:`ecdfplot` (with ``kind="ecdf"``; univariate-only) Additionally, a :func:`rugplot` can be added to any kind of plot to show individual observations. Extra keyword arguments are passed to the underlying function, so you should refer to the documentation for each to understand the complete set of options for making plots with this interface. See the :doc:`distribution plots tutorial <../tutorial/distributions>` for a more in-depth discussion of the relative strengths and weaknesses of each approach. The distinction between figure-level and axes-level functions is explained further in the :doc:`user guide <../tutorial/function_overview>`. Parameters ---------- {params.core.data} {params.core.xy} {params.core.hue} {params.facets.rowcol} weights : vector or key in ``data`` Observation weights used for computing the distribution function. kind : {{"hist", "kde", "ecdf"}} Approach for visualizing the data. Selects the underlying plotting function and determines the additional set of valid parameters. rug : bool If True, show each observation with marginal ticks (as in :func:`rugplot`). rug_kws : dict Parameters to control the appearance of the rug plot. {params.dist.log_scale} {params.dist.legend} {params.core.palette} {params.core.hue_order} {params.core.hue_norm} {params.core.color} {params.facets.col_wrap} {params.facets.rowcol_order} {params.facets.height} {params.facets.aspect} {params.facets.facet_kws} kwargs Other keyword arguments are documented with the relevant axes-level function: - :func:`histplot` (with ``kind="hist"``) - :func:`kdeplot` (with ``kind="kde"``) - :func:`ecdfplot` (with ``kind="ecdf"``) Returns ------- {returns.facetgrid} See Also -------- {seealso.histplot} {seealso.kdeplot} {seealso.rugplot} {seealso.ecdfplot} {seealso.jointplot} Examples -------- See the API documentation for the axes-level functions for more details about the breadth of options available for each plot kind. .. include:: ../docstrings/displot.rst """.format( params=_param_docs, returns=_core_docs["returns"], seealso=_core_docs["seealso"], ) # =========================================================================== # # DEPRECATED FUNCTIONS LIVE BELOW HERE # =========================================================================== # def _freedman_diaconis_bins(a): """Calculate number of hist bins using Freedman-Diaconis rule.""" # From https://stats.stackexchange.com/questions/798/ a = np.asarray(a) if len(a) < 2: return 1 iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25])) h = 2 * iqr / (len(a) ** (1 / 3)) # fall back to sqrt(a) bins if iqr is 0 if h == 0: return int(np.sqrt(a.size)) else: return int(np.ceil((a.max() - a.min()) / h)) def distplot(a=None, bins=None, hist=True, kde=True, rug=False, fit=None, hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None, color=None, vertical=False, norm_hist=False, axlabel=None, label=None, ax=None, x=None): """ DEPRECATED This function has been deprecated and will be removed in seaborn v0.14.0. It has been replaced by :func:`histplot` and :func:`displot`, two functions with a modern API and many more capabilities. For a guide to updating, please see this notebook: https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751 """ if kde and not hist: axes_level_suggestion = ( "`kdeplot` (an axes-level function for kernel density plots)" ) else: axes_level_suggestion = ( "`histplot` (an axes-level function for histograms)" ) msg = textwrap.dedent(f""" `distplot` is a deprecated function and will be removed in seaborn v0.14.0. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or {axes_level_suggestion}. For a guide to updating your code to use the new functions, please see https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751 """) warnings.warn(msg, UserWarning, stacklevel=2) if ax is None: ax = plt.gca() # Intelligently label the support axis label_ax = bool(axlabel) if axlabel is None and hasattr(a, "name"): axlabel = a.name if axlabel is not None: label_ax = True # Support new-style API if x is not None: a = x # Make a a 1-d float array a = np.asarray(a, float) if a.ndim > 1: a = a.squeeze() # Drop null values from array a = remove_na(a) # Decide if the hist is normed norm_hist = norm_hist or kde or (fit is not None) # Handle dictionary defaults hist_kws = {} if hist_kws is None else hist_kws.copy() kde_kws = {} if kde_kws is None else kde_kws.copy() rug_kws = {} if rug_kws is None else rug_kws.copy() fit_kws = {} if fit_kws is None else fit_kws.copy() # Get the color from the current color cycle if color is None: if vertical: line, = ax.plot(0, a.mean()) else: line, = ax.plot(a.mean(), 0) color = line.get_color() line.remove() # Plug the label into the right kwarg dictionary if label is not None: if hist: hist_kws["label"] = label elif kde: kde_kws["label"] = label elif rug: rug_kws["label"] = label elif fit: fit_kws["label"] = label if hist: if bins is None: bins = min(_freedman_diaconis_bins(a), 50) hist_kws.setdefault("alpha", 0.4) hist_kws.setdefault("density", norm_hist) orientation = "horizontal" if vertical else "vertical" hist_color = hist_kws.pop("color", color) ax.hist(a, bins, orientation=orientation, color=hist_color, **hist_kws) if hist_color != color: hist_kws["color"] = hist_color axis = "y" if vertical else "x" if kde: kde_color = kde_kws.pop("color", color) kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws) if kde_color != color: kde_kws["color"] = kde_color if rug: rug_color = rug_kws.pop("color", color) rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws) if rug_color != color: rug_kws["color"] = rug_color if fit is not None: def pdf(x): return fit.pdf(x, *params) fit_color = fit_kws.pop("color", "#282828") gridsize = fit_kws.pop("gridsize", 200) cut = fit_kws.pop("cut", 3) clip = fit_kws.pop("clip", (-np.inf, np.inf)) bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1) x = _kde_support(a, bw, gridsize, cut, clip) params = fit.fit(a) y = pdf(x) if vertical: x, y = y, x ax.plot(x, y, color=fit_color, **fit_kws) if fit_color != "#282828": fit_kws["color"] = fit_color if label_ax: if vertical: ax.set_ylabel(axlabel) else: ax.set_xlabel(axlabel) return ax
_DistributionPlotter
python
getsentry__sentry
src/sentry/web/frontend/base.py
{ "start": 27997, "end": 28693 }
class ____(AbstractOrganizationView): """ A view which has direct ORM access to organization objects. Only endpoints that exist in the region silo should use this class. """ def _get_organization(self) -> Organization | None: if not self.active_organization: return None try: return Organization.objects.get(id=self.active_organization.organization.id) except Organization.DoesNotExist: return None except SiloLimit.AvailabilityError as e: raise SiloLimit.AvailabilityError( f"{type(self).__name__} should extend ControlSiloOrganizationView?" ) from e
OrganizationView
python
eth-brownie__brownie
brownie/exceptions.py
{ "start": 5418, "end": 5491 }
class ____(AttributeError): pass # project/ @final
NamespaceCollision
python
ansible__ansible
lib/ansible/_internal/_wrapt.py
{ "start": 1990, "end": 3448 }
class ____(object): # We use properties to override the values of __module__ and # __doc__. If we add these in ObjectProxy, the derived class # __dict__ will still be setup to have string variants of these # attributes and the rules of descriptors means that they appear to # take precedence over the properties in the base class. To avoid # that, we copy the properties into the derived class type itself # via a meta class. In that way the properties will always take # precedence. @property def __module__(self): return self.__wrapped__.__module__ @__module__.setter def __module__(self, value): self.__wrapped__.__module__ = value @property def __doc__(self): return self.__wrapped__.__doc__ @__doc__.setter def __doc__(self, value): self.__wrapped__.__doc__ = value # We similar use a property for __dict__. We need __dict__ to be # explicit to ensure that vars() works as expected. @property def __dict__(self): return self.__wrapped__.__dict__ # Need to also propagate the special __weakref__ attribute for case # where decorating classes which will define this. If do not define # it and use a function like inspect.getmembers() on a decorator # class it will fail. This can't be in the derived classes. @property def __weakref__(self): return self.__wrapped__.__weakref__
_ObjectProxyMethods
python
walkccc__LeetCode
solutions/1428. Leftmost Column with at Least a One/1428.py
{ "start": 237, "end": 587 }
class ____: def leftMostColumnWithOne(self, binaryMatrix: 'BinaryMatrix') -> int: m, n = binaryMatrix.dimensions() ans = -1 l = 0 r = n - 1 while l <= r: mid = (l + r) // 2 if any(binaryMatrix.get(i, mid) for i in range(m)): ans = mid r = mid - 1 else: l = mid + 1 return ans
Solution
python
pytorch__pytorch
torch/ao/quantization/observer.py
{ "start": 61996, "end": 63096 }
class ____(Enum): """How floating point number is mapped to integer number symmetric mapping means floating point range is symmetrically mapped to integer range let's say we have floating point range (-3.5, 10.2) and integer range (-8, 7) (int4) we'll use (-10.2, 10.2) as the range for floating point and map that to (-8, 7) e.g. scale = (10.2 - (-10.2)) / (7 - (-8)) SYMMETRIC_NO_CLIPPING_ERR is a variant of symmetric mapping, where the scale is the max of smin and smax, where smin = min_val_neg / quant_min, and smax = max_val_pos / quant_max. By calculating smin and smax individually, there can be less round error on negative values, and no out-of-range of all floating point values. asymmetric mapping means we just directly map the floating point range to integer range, for the above example, we will map (-3.5, 10.2) to (-8, 7) and calculate quantization parameter based on this mapping e.g. scale = (10.2 - (-3.5)) / (7 - (-8)) """ SYMMETRIC = auto() SYMMETRIC_NO_CLIPPING_ERR = auto() ASYMMETRIC = auto()
MappingType
python
walkccc__LeetCode
solutions/3019. Number of Changing Keys/3019.py
{ "start": 0, "end": 148 }
class ____: def countKeyChanges(self, s: str) -> int: return sum(a.lower() != b.lower() for a, b in itertools.pairwise(s))
Solution
python
ApeWorX__ape
src/ape/types/coverage.py
{ "start": 15160, "end": 34637 }
class ____(BaseModel): """ Coverage report schema inspired from coverage.py. """ source_folders: list[Path] """ All source folders to use. This is needed for codecov. """ timestamp: int """ The timestamp the report was generated, in milliseconds. """ projects: list[CoverageProject] = [] """ Each project with individual coverage tracked. """ @field_validator("timestamp", mode="before") @classmethod def validate_timestamp(cls, value): # Default to current UTC timestamp (ms). return value or get_current_timestamp_ms() @property def sources(self) -> list[str]: """ Every source ID in the report. """ return [s.source_id for p in self.projects for s in p.sources] @property def statements(self) -> list[CoverageStatement]: """ All valid coverage lines from every function in every contract in every source from every project in this report. """ return list(itertools.chain.from_iterable(p.statements for p in self.projects)) @property def lines_covered(self) -> NonNegativeInt: """ All lines with a hit count greater than zero from every function in every contract in every source in every project in this report. """ return sum(p.lines_covered for p in self.projects) @property def lines_valid(self) -> NonNegativeInt: """ The number of lines valid for coverage. """ return len(self.statements) @property def miss_count(self) -> NonNegativeInt: """ The number of lines missed. """ return self.lines_valid - self.lines_covered @property def line_rate(self) -> float: """ The number of lines hit divided by number of lines. """ return self.lines_covered / self.lines_valid if self.lines_valid > 0 else 0 @property def total_functions(self) -> NonNegativeInt: """ The total number of functions in this source. """ return sum(x.total_functions for x in self.projects) @property def function_hits(self) -> NonNegativeInt: """ The number of functions with a hit counter greater than zero. """ return sum(x.function_hits for x in self.projects) @property def function_rate(self) -> float: """ The rate of functions hit versus total functions. """ return self.function_hits / self.total_functions if self.total_functions > 0 else 0 def get_xml(self) -> str: """ The coverage XML report as a string. The XML coverage data schema is meant to be compatible with codecov.io. Thus, some of coverage is modified slightly, and some of the naming conventions (based on 90s Java) won't be super relevant to smart-contract projects. """ # See _DTD_URL to learn more about the schema. xml_out = self._get_xml() return xml_out.toprettyxml(indent=" ") def _get_xml(self): # NOTE: Some of this implementation is borrowed from coverage.py. impl = getDOMImplementation() if not impl: # Only for mypy. raise ValueError("Failed to get XML DOM.") xml_out = impl.createDocument(None, "coverage", None) if not (xcoverage := xml_out.documentElement): return xml_out # Unable to use too exotic of a version. xversion = ape_version.split(".dev")[0].strip() xcoverage.setAttribute("version", xversion) # Add top-level statement stats. xcoverage.setAttribute("timestamp", f"{self.timestamp}") xcoverage.setAttribute("lines-valid", f"{self.lines_valid}") xcoverage.setAttribute("lines-covered", f"{self.lines_covered}") xcoverage.setAttribute("line-rate", f"{round(self.line_rate, 4)}") # NOTE: Branch fields are required in the schema. # TODO: Replace with actual branch coverage when exists. xcoverage.setAttribute("branches-covered", "0") xcoverage.setAttribute("branches-valid", "0") xcoverage.setAttribute("branch-rate", "0") # I don't know what this, but it is also required. xcoverage.setAttribute("complexity", "0") # Add comments. xcoverage.appendChild( xml_out.createComment(f" Generated by Ape Framework: {_APE_DOCS_URL}") ) xcoverage.appendChild(xml_out.createComment(f" Based on {_DTD_URL} ")) # In the XML schema, sources refer to root directories containing source code. # In our case, that would typically be the "contracts" folder of the project. # NOTE: This is critical and necessary for codecov to map sources correctly. xsources = xml_out.createElement("sources") for source_path in self.source_folders: xsource = xml_out.createElement("source") xtxt = xml_out.createTextNode(source_path.as_posix()) xsource.appendChild(xtxt) xsources.appendChild(xsource) xcoverage.appendChild(xsources) # projects = packages. xpackages = xml_out.createElement("packages") for project in self.projects: xpackage = xml_out.createElement("package") # NOTE: For now, always use "." as the package name. # TODO: Experiment with using `self.project.name` as package name. # If it is "__local__", definitely use "." instead here. xpackage.setAttribute("name", ".") # Add package-level stats. xpackage.setAttribute("line-rate", f"{round(project.line_rate, 4)}") xpackage.setAttribute("branch-rate", "0") # TODO xpackage.setAttribute("complexity", "0") # The `classes` field refers to `contracts` in our case. xclasses = xml_out.createElement("classes") for src in project.sources: for contract in src.contracts: xclass = xml_out.createElement("class") xclass.setAttribute("name", src.source_id) xclass.setAttribute("line-rate", f"{round(contract.line_rate, 4)}") xclass.setAttribute("branch-rate", "0") # TODO xclass.setAttribute("complexity", "0") # NOTE: I am not sure what this does or why it is needed. # Also, I am not sure why we don't map statements to the methods. # because we totally could do that. Nonetheless, we have to follow # the schema. xml_out.createElement("methods") # Use name unless the same function found twice, then use full name. fn_map: dict[str, FunctionCoverage] = {} fn_singles_used = [] # For the XML report, we split all statements to be only 1 line long. # Each class (contract) can only identify the statement (line number) once. lines_to_add: dict[int, int] = {} xlines = xml_out.createElement("lines") for function in contract.functions: fn_singles_used.append(function.name) if ( function.name in fn_map and function.full_name != fn_map[function.name].full_name ): # Another method with the same name already in map. # Use full name for both. existing_fn = fn_map[function.name] fn_map[existing_fn.full_name] = existing_fn del fn_map[function.name] fn_map[function.full_name] = function elif function.name in fn_singles_used: # Because this name has already been found once, # we can assume we are using full names for these. fn_map[function.full_name] = function else: # Is first time coming across this name. fn_map[function.name] = function for fn_name, function in fn_map.items(): if not function.statements or not any( s.location for s in function.statements ): # Functions without source-locatable statements are not # permitted in the XML report. This mean auto-getter hits # won't be included as well as any builtin lines. Use other # reportsF to find that level of information. continue for statement in function.statements: if not statement.location: # Statements without line numbers are excluded from this report. # That level of granularity is present in other reports however. # The XML report is strict so it can merge with others. continue for lineno in range(statement.location[0], statement.location[2] + 1): if lineno in lines_to_add: lines_to_add[lineno] += statement.hit_count else: lines_to_add[lineno] = statement.hit_count # NOTE: Line numbers must be sorted in the XML! sorted_nos = sorted(list(lines_to_add.keys())) for no in sorted_nos: hits = lines_to_add[no] xline = xml_out.createElement("line") xline.setAttribute("number", f"{no}") xline.setAttribute("hits", f"{hits}") xlines.appendChild(xline) xclass.appendChild(xlines) xclass.appendChild(xlines) xclasses.appendChild(xclass) xpackage.appendChild(xclasses) xpackages.appendChild(xpackage) xcoverage.appendChild(xpackages) return xml_out def write_xml(self, path: Path): if not (xml := self.get_xml()): return elif path.is_dir(): path = path / "coverage.xml" path.unlink(missing_ok=True) path.write_text(xml, encoding="utf8") def write_html(self, path: Path, verbose: bool = False): if not (html := self.get_html(verbose=verbose)): return elif path.is_dir(): # Use as base path if given. html_path = path / "htmlcov" html_path.mkdir(exist_ok=True) elif not path.exists() and path.parent.is_dir(): # Write to path if given a new one. html_path = path else: raise ValueError("Invalid path argument to `write_html()`.") # Create new index.html. index = html_path / "index.html" index.unlink(missing_ok=True) index.write_text(html, encoding="utf8") favicon = html_path / "favicon.ico" if not favicon.is_file(): # Use favicon that already ships with Ape's docs. root = Path(__file__).parent docs_folder = root / "docs" while "ape" in root.as_posix() and not docs_folder.is_dir(): root = root.parent docs_folder = root / "docs" docs_favicon = docs_folder / "favicon.ico" if docs_folder.is_dir() and docs_favicon.is_file(): favicon.write_bytes(docs_favicon.read_bytes()) else: # Try downloading from the internet. This may happen if running # ape in an isolated file system or a temporary directory, # such as CI/CD tests for Ape. try: url = "https://github.com/ApeWorX/ape/blob/main/docs/favicon.ico" response = requests.get(url) response.raise_for_status() # Check for any errors during the request favicon.write_bytes(response.content) except Exception as err: # Don't let this stop us from generating the report. logger.debug(f"Failed finding favicon for coverage HTML. {err}") css = html_path / "styles.css" css.unlink(missing_ok=True) css.write_text(_CSS, encoding="utf8") def get_html(self, verbose: bool = False) -> str: """ The coverage HTML report as a string. """ html = self._get_html(verbose=verbose) html_str = tostring(html, encoding="utf8", method="html").decode() return _HTMLPrettfier().prettify(html_str) def _get_html(self, verbose: bool = False) -> Any: html = Element("html") head = SubElement(html, "head") meta = SubElement(head, "meta") meta.set("http-equiv", "Content-Type") meta.set("content", "text/html; charset=utf-8") title = SubElement(head, "title") title.text = "Contract Coverage Report" favicon = SubElement(head, "link") favicon.set("rel", "icon") favicon.set("sizes", "32x32") favicon.set("href", "favicon.ico") css = SubElement(head, "link") css.set("rel", "stylesheet") css.set("href", "styles.css") SubElement(html, "body") self._html_header_sub_element(html) self._html_main_sub_element(html, verbose=verbose) return html def _html_header_sub_element(self, html: Any) -> Any: header = SubElement(html, "header") div = SubElement(header, "div") h1 = SubElement(div, "h1") h1.text = "Coverage report" if len(self.projects) == 1: # If only one project, include information here instead of below. h1.text += f" for {self.projects[0].name}" paragraph = SubElement(div, "p") datetime_obj = datetime.fromtimestamp(self.timestamp / 1000) datetime_string = datetime_obj.strftime("%Y-%m-%d %H:%M:%S") paragraph.text = f"Generated by Ape Framework v{ape_version}, {datetime_string}" def _html_main_sub_element(self, html: Any, verbose: bool = False): main = SubElement(html, "main") show_project_header = len(self.projects) > 1 if verbose: self._html_main_verbose_sub_element(main, show_project_header=show_project_header) else: self._html_main_non_verbose_sub_element(main, show_project_header=show_project_header) def _html_main_non_verbose_sub_element(self, main: Any, show_project_header: bool = True): columns = ("Source", "Statements", "Missing", "Statement Coverage", "Function Coverage") for project in self.projects: if show_project_header: # If only 1 project, it is shown at the top. title = SubElement(main, "h2") title.text = project.name table = SubElement(main, "table", {}) thread = SubElement(table, "thread") thread_tr = SubElement(thread, "tr") for idx, column in enumerate(columns): th = SubElement(thread_tr, "th", {}, **{"class": f"column{idx + 1}"}) th.text = column tbody = SubElement(table, "tbody") for src in project.sources: tbody_tr = SubElement(tbody, "tr") source_td = SubElement(tbody_tr, "td", {}, **{"class": "column1"}) source_td.text = src.source_id self._set_common_td(tbody_tr, src) fn_cov_td = SubElement(tbody_tr, "td", {}, **{"class": "column5"}) fn_cov_td.text = f"{round(src.function_rate * 100, 2)}%" def _html_main_verbose_sub_element(self, main: Any, show_project_header: bool = True): columns = ( "Source", "Statements", "Missing", "Statement Coverage", ) for project in self.projects: if show_project_header: # If only 1 project, it is shown at the top. title = SubElement(main, "h2") title.text = str(project.name) src_type = "h3" else: src_type = "h2" for src in project.sources: table_header_h = SubElement(main, src_type, {}, **{"class": "table-center"}) stmt_cov = f"{round(src.line_rate * 100, 2)}%" fn_cov = f"{round(src.function_rate * 100, 2)}%" left_span = SubElement(table_header_h, "span", {}, **{"class": "left-aligned"}) left_span.text = src.source_id right_span = SubElement(table_header_h, "span", {}, **{"class": "right-aligned"}) right_span.text = f"stmt={stmt_cov} function={fn_cov}" table = SubElement(main, "table") thread = SubElement(table, "thread") thread_tr = SubElement(thread, "tr") for idx, column in enumerate(columns): th = SubElement(thread_tr, "th", {}, **{"class": f"column{idx + 1}"}) th.text = column tbody = SubElement(table, "tbody") for contract in src.contracts: for function in contract.functions: tbody_tr = SubElement(tbody, "tr") function_td = SubElement(tbody_tr, "td", {}, **{"class": "column1"}) # NOTE: Use the full name if the short name is repeated. function_td.text = ( function.full_name if len([fn for fn in contract.functions if fn.name == function.name]) > 1 else function.name ) self._set_common_td(tbody_tr, function) def _set_common_td(self, tbody_tr: Any, src_or_fn: Any): stmts_td = SubElement(tbody_tr, "td", {}, **{"class": "column2"}) stmts_td.text = f"{src_or_fn.lines_valid}" missing_td = SubElement(tbody_tr, "td", {}, **{"class": "column3"}) missing_td.text = f"{src_or_fn.miss_count}" stmt_cov_td = SubElement(tbody_tr, "td", {}, **{"class": "column4"}) stmt_cov_td.text = f"{round(src_or_fn.line_rate * 100, 2)}%" def model_dump(self, *args, **kwargs) -> dict: attribs = super().model_dump(*args, **kwargs) # Add coverage stats. attribs["lines_covered"] = self.lines_covered attribs["lines_valid"] = self.lines_valid attribs["line_rate"] = self.line_rate return attribs def get_source_coverage(self, source_id: str) -> Optional[ContractSourceCoverage]: for project in self.projects: for src in project.sources: if src.source_id == source_id: return src return None
CoverageReport
python
walkccc__LeetCode
solutions/2334. Subarray With Elements Greater Than Varying Threshold/2334.py
{ "start": 0, "end": 758 }
class ____: # Similar to 907. Sum of Subarray Minimums def validSubarraySize(self, nums: list[int], threshold: int) -> int: n = len(nums) ans = 0 # prev[i] := the index k s.t. nums[k] is the previous minimum in nums[0..n) prev = [-1] * n # next[i] := the index k s.t. nums[k] is the next minimum in nums[i + 1..n) next = [n] * n stack = [] for i, a in enumerate(nums): while stack and nums[stack[-1]] > a: index = stack.pop() next[index] = i if stack: prev[i] = stack[-1] stack.append(i) for i, (num, prevIndex, nextIndex) in enumerate(zip(nums, prev, next)): k = (i - prevIndex) + (nextIndex - i) - 1 if num > threshold / k: return k return -1
Solution
python
bokeh__bokeh
tests/unit/bokeh/server/test_server__server.py
{ "start": 2118, "end": 33040 }
class ____(Handler): def __init__(self) -> None: super().__init__() self.load_count = 0 self.unload_count = 0 self.session_creation_async_value = 0 self.hooks = [] self.periodic_remover = None def modify_document(self, doc): # checks that session created hook has run, and session destroyed has not. assert self.session_creation_async_value == 3 doc.title = "Modified" doc.roots[0].hooks.append("modify") self.hooks.append("modify") doc.add_next_tick_callback(self.on_next_tick) doc.add_timeout_callback(self.on_timeout, 2) periodic_cb = doc.add_periodic_callback(self.on_periodic, 3) self.periodic_remover = lambda: doc.remove_periodic_callback(periodic_cb) def on_server_loaded(self, server_context): assert len(server_context.sessions) == 0 self.load_count += 1 self.hooks.append("server_loaded") def on_server_unloaded(self, server_context): self.unload_count += 1 self.hooks.append("server_unloaded") # important to test that this can be async async def on_session_created(self, session_context): async def setup_document(doc): # session creation hook is allowed to init the document before modify_document from bokeh.document import DEFAULT_TITLE hook_list = HookListModel() assert doc.title == DEFAULT_TITLE assert len(doc.roots) == 0 hook_list.hooks.append("session_created") doc.add_root(hook_list) self.session_creation_async_value = await async_value(1) self.session_creation_async_value = await async_value(2) self.session_creation_async_value = await async_value(3) await session_context.with_locked_document(setup_document) self.hooks.append("session_created") # this has to be async too async def on_session_destroyed(self, session_context): # this should be no-op'd, because the session is already destroyed async def shutdown_document(doc): doc.roots[0].hooks.append("session_destroyed") self.session_creation_async_value = await async_value(4) self.session_creation_async_value = await async_value(5) self.session_creation_async_value = await async_value(6) await session_context.with_locked_document(shutdown_document) self.hooks.append("session_destroyed") def on_next_tick(self): self.hooks.append("next_tick") def on_timeout(self): self.hooks.append("timeout") def on_periodic(self): self.hooks.append("periodic") self.periodic_remover() #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- def test_prefix(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: assert server.prefix == "" with ManagedServerLoop(application, prefix="foo") as server: assert server.prefix == "/foo" def test_index(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: assert server.index is None with ManagedServerLoop(application, index="foo") as server: assert server.index == "foo" async def test_get_sessions(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: server_sessions = server.get_sessions('/') assert len(server_sessions) == 0 await http_get(server.io_loop, url(server)) server_sessions = server.get_sessions('/') assert len(server_sessions) == 1 await http_get(server.io_loop, url(server)) server_sessions = server.get_sessions('/') assert len(server_sessions) == 2 server_sessions = server.get_sessions() assert len(server_sessions) == 2 with pytest.raises(ValueError): server.get_sessions("/foo") with ManagedServerLoop({"/foo": application, "/bar": application}) as server: await http_get(server.io_loop, url(server) + "foo") server_sessions = server.get_sessions('/foo') assert len(server_sessions) == 1 server_sessions = server.get_sessions('/bar') assert len(server_sessions) == 0 server_sessions = server.get_sessions() assert len(server_sessions) == 1 await http_get(server.io_loop, url(server) + "foo") server_sessions = server.get_sessions('/foo') assert len(server_sessions) == 2 server_sessions = server.get_sessions('/bar') assert len(server_sessions) == 0 server_sessions = server.get_sessions() assert len(server_sessions) == 2 await http_get(server.io_loop, url(server) + "bar") server_sessions = server.get_sessions('/foo') assert len(server_sessions) == 2 server_sessions = server.get_sessions('/bar') assert len(server_sessions) == 1 server_sessions = server.get_sessions() assert len(server_sessions) == 3 token_in_json = re.compile("""["']token["'] *: *["']([^"]+)["']""") def extract_token_from_json(html): if not isinstance(html, str): import codecs html = codecs.decode(html, 'utf-8') match = token_in_json.search(html) return match.group(1) use_for_title_in_json = re.compile("""["']use_for_title["'] *: *(false|true)""") def extract_use_for_title_from_json(html): if not isinstance(html, str): import codecs html = codecs.decode(html, 'utf-8') match = use_for_title_in_json.search(html) return match.group(1) def autoload_url(server): return url(server) + \ "autoload.js?bokeh-autoload-element=foo" def resource_files_requested(response, requested=True): if not isinstance(response, str): import codecs response = codecs.decode(response, 'utf-8') for file in [ 'static/js/bokeh.min.js', 'static/js/bokeh-widgets.min.js']: if requested: assert file in response else: assert file not in response def test_use_xheaders(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, use_xheaders=True) as server: assert server._http.xheaders is True def test_ssl_args_plumbing(ManagedServerLoop: MSL) -> None: with mock.patch.object(ssl, 'SSLContext'): with ManagedServerLoop({}, ssl_certfile="foo") as server: assert server._http.ssl_options.load_cert_chain.call_args[0] == () assert server._http.ssl_options.load_cert_chain.call_args[1] == dict(certfile='foo', keyfile=None, password=None) with mock.patch.object(ssl, 'SSLContext'): with ManagedServerLoop({}, ssl_certfile="foo", ssl_keyfile="baz") as server: assert server._http.ssl_options.load_cert_chain.call_args[0] == () assert server._http.ssl_options.load_cert_chain.call_args[1] == dict(certfile='foo', keyfile="baz", password=None) with mock.patch.object(ssl, 'SSLContext'): with ManagedServerLoop({}, ssl_certfile="foo", ssl_keyfile="baz", ssl_password="bar") as server: assert server._http.ssl_options.load_cert_chain.call_args[0] == () assert server._http.ssl_options.load_cert_chain.call_args[1] == dict(certfile='foo', keyfile="baz", password="bar") def test_base_server() -> None: app = BokehTornado(Application()) httpserver = HTTPServer(app) httpserver.start() loop = IOLoop() loop.make_current() server = BaseServer(loop, app, httpserver) server.start() assert server.io_loop == loop assert server._tornado.io_loop == loop httpserver.stop() server.stop() server.io_loop.close() async def test_server_applications_callable_arg(ManagedServerLoop: MSL) -> None: def modify_doc(doc): doc.title = "Hello, world!" with ManagedServerLoop(modify_doc, port=0) as server: await http_get(server.io_loop, url(server)) session = server.get_sessions('/')[0] assert session.document.title == "Hello, world!" with ManagedServerLoop({"/foo": modify_doc}, port=0) as server: await http_get(server.io_loop, url(server) + "foo") session = server.get_sessions('/foo')[0] assert session.document.title == "Hello, world!" async def test__token_arguments(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: response = await http_get(server.io_loop, url(server) + "?foo=10") html = response.body token = extract_token_from_json(html) payload = get_token_payload(token) assert 'arguments' in payload assert payload['arguments'] == {'foo': [b'10']} async def test__include_headers(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, include_headers=['Custom']) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, url(server), headers={'Custom': 'Test'}) html = response.body token = extract_token_from_json(html) payload = get_token_payload(token) assert 'headers' in payload assert payload['headers'] == {'Custom': 'Test'} async def test__exclude_headers(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, exclude_headers=['Connection', 'Host']) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, url(server)) html = response.body token = extract_token_from_json(html) payload = get_token_payload(token) assert 'headers' in payload assert payload["headers"].get("Accept-Encoding") == "gzip" async def test__include_cookies(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, include_cookies=['custom']) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, url(server), headers={'Cookie': 'custom = test ; custom2 = test2'}) html = response.body token = extract_token_from_json(html) payload = get_token_payload(token) assert 'cookies' in payload assert payload['cookies'] == {'custom': 'test'} async def test__exclude_cookies(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, exclude_cookies=['custom']) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, url(server), headers={'Cookie': 'custom = test ; custom2 = test2'}) html = response.body token = extract_token_from_json(html) payload = get_token_payload(token) assert 'cookies' in payload assert payload['cookies'] == {'custom2': 'test2'} #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- @pytest.mark.skipif(sys.platform == "win32", reason="Lifecycle hooks order different on Windows (TODO open issue)") def test__lifecycle_hooks(ManagedServerLoop: MSL) -> None: application = Application() handler = HookTestHandler() application.add(handler) with ManagedServerLoop(application, check_unused_sessions_milliseconds=30) as server: client_session = pull_session(session_id=ID("test__lifecycle_hooks"), url=url(server), io_loop=server.io_loop) client_doc = client_session.document assert len(client_doc.roots) == 1 server_session = server.get_session('/', client_session.id) server_doc = server_session.document assert len(server_doc.roots) == 1 # save for later, since doc.roots will be emptied after the session is closed client_hook_list = list(client_doc.roots[0].hooks) server_hook_list = list(server_doc.roots[0].hooks) client_session.close() # expire the session quickly rather than after the usual timeout server_session.request_expiration() server.io_loop.call_later(0.1, lambda: server.io_loop.stop()) server.io_loop.start() assert handler.hooks == [ "server_loaded", "session_created", "modify", "next_tick", "timeout", "periodic", "session_destroyed", "server_unloaded", ] assert handler.load_count == 1 assert handler.unload_count == 1 # 3 instead of 6, because locked callbacks on destroyed sessions become no-ops assert handler.session_creation_async_value == 3 assert client_doc.title == "Modified" assert server_doc.title == "Modified" # only the handler sees "session_destroyed" since the session is shut down at that point. assert client_hook_list == ["session_created", "modify"] assert server_hook_list == ["session_created", "modify"] async def test__request_in_session_context(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: response = await http_get(server.io_loop, url(server) + "?foo=10") html = response.body token = extract_token_from_json(html) sessionid = get_session_id(token) server_session = server.get_session('/', sessionid) server_doc = server_session.document session_context = server_doc.session_context # do we have a request assert session_context.request is not None async def test__request_in_session_context_has_arguments(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: response = await http_get(server.io_loop, url(server) + "?foo=10") html = response.body token = extract_token_from_json(html) sessionid = get_session_id(token) server_session = server.get_session('/', sessionid) server_doc = server_session.document session_context = server_doc.session_context # test if we can get the argument from the request assert session_context.request.arguments['foo'] == [b'10'] async def test__no_request_arguments_in_session_context(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: response = await http_get(server.io_loop, url(server)) html = response.body token = extract_token_from_json(html) sessionid = get_session_id(token) server_session = server.get_session('/', sessionid) server_doc = server_session.document session_context = server_doc.session_context # if we do not pass any arguments to the url, the request arguments # should be empty assert len(session_context.request.arguments) == 0 @pytest.mark.parametrize("querystring,requested", [ ("", True), ("&resources=default", True), ("&resources=whatever", True), ("&resources=none", False), ]) async def test__resource_files_requested(querystring, requested, ManagedServerLoop: MSL) -> None: """ Checks if the loading of resource files is requested by the autoload.js response based on the value of the "resources" parameter. """ application = Application() with ManagedServerLoop(application) as server: response = await http_get(server.io_loop, autoload_url(server) + querystring) resource_files_requested(response.body, requested=requested) async def test__autocreate_session_autoload(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, autoload_url(server)) js = response.body token = extract_token_from_json(js) sessionid = get_session_id(token) sessions = server.get_sessions('/') assert 1 == len(sessions) assert sessionid == sessions[0].id async def test__no_set_title_autoload(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, autoload_url(server)) js = response.body use_for_title = extract_use_for_title_from_json(js) assert use_for_title == "false" async def test__autocreate_session_doc(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, url(server)) html = response.body token = extract_token_from_json(html) sessionid = get_session_id(token) sessions = server.get_sessions('/') assert 1 == len(sessions) assert sessionid == sessions[0].id async def test__no_autocreate_session_websocket(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) token = generate_jwt_token("") await websocket_open(server.io_loop, ws_url(server), subprotocols=["bokeh", token]) sessions = server.get_sessions('/') assert 0 == len(sessions) async def test__use_provided_session_autoload(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) expected = 'foo' response = await http_get(server.io_loop, autoload_url(server) + "&bokeh-session-id=" + expected) js = response.body token = extract_token_from_json(js) sessionid = get_session_id(token) assert expected == sessionid sessions = server.get_sessions('/') assert 1 == len(sessions) assert expected == sessions[0].id async def test__use_provided_session_header_autoload(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) expected = 'foo' response = await http_get(server.io_loop, autoload_url(server), headers={'Bokeh-Session-Id': expected}) js = response.body token = extract_token_from_json(js) sessionid = get_session_id(token) assert expected == sessionid sessions = server.get_sessions('/') assert 1 == len(sessions) assert expected == sessions[0].id async def test__use_provided_session_autoload_token(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) expected = 'foo' expected_token = generate_jwt_token(expected) response = await http_get(server.io_loop, autoload_url(server) + "&bokeh-token=" + expected_token) js = response.body token = extract_token_from_json(js) assert expected_token == token sessionid = get_session_id(token) assert expected == sessionid sessions = server.get_sessions('/') assert 1 == len(sessions) assert expected == sessions[0].id async def test__use_provided_session_doc(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) expected = 'foo' response = await http_get(server.io_loop, url(server) + "?bokeh-session-id=" + expected) html = response.body token = extract_token_from_json(html) sessionid = get_session_id(token) assert expected == sessionid sessions = server.get_sessions('/') assert 1 == len(sessions) assert expected == sessions[0].id async def test__use_provided_session_websocket(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) expected = 'foo' token = generate_jwt_token(expected) await websocket_open(server.io_loop, ws_url(server), subprotocols=["bokeh", token]) sessions = server.get_sessions('/') assert 1 == len(sessions) assert expected == sessions[0].id async def test__autocreate_signed_session_autoload(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, sign_sessions=True, secret_key='foo') as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, autoload_url(server)) js = response.body token = extract_token_from_json(js) sessionid = get_session_id(token) sessions = server.get_sessions('/') assert 1 == len(sessions) assert sessionid == sessions[0].id assert check_token_signature(token, signed=True, secret_key='foo') async def test__autocreate_signed_session_doc(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, sign_sessions=True, secret_key='foo') as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, url(server)) html = response.body token = extract_token_from_json(html) sessionid = get_session_id(token) sessions = server.get_sessions('/') assert 1 == len(sessions) assert sessionid == sessions[0].id assert check_token_signature(token, signed=True, secret_key='foo') async def test__accept_session_websocket(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, session_token_expiration=10) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, url(server)) html = response.body token = extract_token_from_json(html) ws = await websocket_open(server.io_loop, ws_url(server), subprotocols=["bokeh", token]) msg = await ws.read_queue.get() assert isinstance(msg, str) assert 'ACK' in msg async def test__reject_expired_session_websocket(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, session_token_expiration=1) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, url(server)) html = response.body token = extract_token_from_json(html) time.sleep(1.1) ws = await websocket_open(server.io_loop, ws_url(server), subprotocols=["bokeh", token]) assert await ws.read_queue.get() is None async def test__reject_wrong_subprotocol_websocket(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) response = await http_get(server.io_loop, url(server)) html = response.body token = extract_token_from_json(html) sessions = server.get_sessions('/') assert 1 == len(sessions) ws = await websocket_open(server.io_loop, ws_url(server), subprotocols=["foo", token]) assert await ws.read_queue.get() is None async def test__reject_no_token_websocket(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) await http_get(server.io_loop, url(server)) sessions = server.get_sessions('/') assert 1 == len(sessions) ws = await websocket_open(server.io_loop, ws_url(server), subprotocols=["foo"]) assert await ws.read_queue.get() is None async def test__reject_unsigned_session_autoload(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server: sessions = server.get_sessions('/') assert 0 == len(sessions) expected = 'foo' with (pytest.raises(HTTPError)) as info: await http_get(server.io_loop, autoload_url(server) + "&bokeh-session-id=" + expected) assert 'Invalid token or session ID' in repr(info.value) sessions = server.get_sessions('/') assert 0 == len(sessions) async def test__reject_unsigned_token_autoload(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server: sessions = server.get_sessions('/') assert 0 == len(sessions) expected = 'foo' token = generate_jwt_token(expected) with (pytest.raises(HTTPError)) as info: await http_get(server.io_loop, autoload_url(server) + "&bokeh-token=" + token) assert 'Invalid token or session ID' in repr(info.value) sessions = server.get_sessions('/') assert 0 == len(sessions) async def test__reject_unsigned_session_doc(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server: sessions = server.get_sessions('/') assert 0 == len(sessions) expected = 'foo' with (pytest.raises(HTTPError)) as info: await http_get(server.io_loop, url(server) + "?bokeh-session-id=" + expected) assert 'Invalid token or session ID' in repr(info.value) sessions = server.get_sessions('/') assert 0 == len(sessions) async def test__reject_unsigned_session_header_doc(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server: sessions = server.get_sessions('/') assert 0 == len(sessions) expected = 'foo' with (pytest.raises(HTTPError)) as info: await http_get(server.io_loop, url(server), headers={"Bokeh-Session-Id": expected}) assert 'Invalid token or session ID' in repr(info.value) sessions = server.get_sessions('/') assert 0 == len(sessions) async def test__reject_unsigned_session_websocket(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server: sessions = server.get_sessions('/') assert 0 == len(sessions) expected = 'foo' token = generate_jwt_token(expected) await websocket_open(server.io_loop, ws_url(server), subprotocols=["bokeh", token]) sessions = server.get_sessions('/') assert 0 == len(sessions) async def test__no_generate_session_autoload(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, generate_session_ids=False) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) with (pytest.raises(HTTPError)) as info: await http_get(server.io_loop, autoload_url(server)) assert 'No bokeh-session-id provided' in repr(info.value) sessions = server.get_sessions('/') assert 0 == len(sessions) async def test__no_generate_session_doc(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, generate_session_ids=False) as server: sessions = server.get_sessions('/') assert 0 == len(sessions) with (pytest.raises(HTTPError)) as info: await http_get(server.io_loop, url(server)) assert 'No bokeh-session-id provided' in repr(info.value) sessions = server.get_sessions('/') assert 0 == len(sessions) @pytest.mark.skipif(sys.platform == "win32", reason="multiple processes not supported on Windows") def test__server_multiple_processes() -> None: # Can't use an ioloop in this test with mock.patch('tornado.httpserver.HTTPServer.add_sockets'): with mock.patch('tornado.process.fork_processes') as tornado_fp: application = Application() server.Server(application, num_procs=3, port=0) assert tornado_fp.mock_calls == [ mock.call(3, None) if tornado.version_info >= (6,) else mock.call(3), ] def test__existing_ioloop_with_multiple_processes_exception(ManagedServerLoop, event_loop) -> None: application = Application() loop = IOLoop.current() with pytest.raises(RuntimeError): with ManagedServerLoop(application, io_loop=loop, num_procs=3): pass async def test__actual_port_number(ManagedServerLoop: MSL) -> None: application = Application() with ManagedServerLoop(application, port=0) as server: port = server.port assert port > 0 await http_get(server.io_loop, url(server)) def test__ioloop_not_forcibly_stopped() -> None: # Issue #5494 application = Application() loop = IOLoop() loop.make_current() server = Server(application, io_loop=loop) server.start() result = [] def f(): server.unlisten() server.stop() # If server.stop() were to stop the Tornado IO loop, # g() wouldn't be called and `result` would remain empty. loop.add_timeout(timedelta(seconds=0.01), g) def g(): result.append(None) loop.stop() loop.add_callback(f) loop.start() assert result == [None] #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
HookTestHandler
python
great-expectations__great_expectations
great_expectations/data_context/types/base.py
{ "start": 46994, "end": 50799 }
class ____(enum.Enum): DEFAULT_CONFIG_VERSION = CURRENT_GX_CONFIG_VERSION UNCOMMITTED = "uncommitted" DEFAULT_EXPECTATIONS_STORE_NAME = "expectations_store" EXPECTATIONS_BASE_DIRECTORY = "expectations" DEFAULT_EXPECTATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME = f"{EXPECTATIONS_BASE_DIRECTORY}/" DEFAULT_VALIDATIONS_STORE_NAME = "validation_results_store" VALIDATIONS_BASE_DIRECTORY = "validations" DEFAULT_VALIDATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME = ( f"{UNCOMMITTED}/{VALIDATIONS_BASE_DIRECTORY}/" ) DEFAULT_VALIDATION_DEFINITION_STORE_NAME = "validation_definition_store" VALIDATION_DEFINITIONS_BASE_DIRECTORY = "validation_definitions" DEFAULT_VALIDATION_DEFINITION_STORE_BASE_DIRECTORY_RELATIVE_NAME = ( f"{VALIDATION_DEFINITIONS_BASE_DIRECTORY}/" ) DATA_DOCS_BASE_DIRECTORY = "data_docs" DEFAULT_DATA_DOCS_BASE_DIRECTORY_RELATIVE_NAME = f"{UNCOMMITTED}/{DATA_DOCS_BASE_DIRECTORY}" # Datasource DEFAULT_DATASOURCE_STORE_NAME = "datasource_store" # DataAsset DEFAULT_DATA_ASSET_STORE_NAME = "data_asset_store" # Checkpoints DEFAULT_CHECKPOINT_STORE_NAME = "checkpoint_store" CHECKPOINTS_BASE_DIRECTORY = "checkpoints" DEFAULT_CHECKPOINT_STORE_BASE_DIRECTORY_RELATIVE_NAME = f"{CHECKPOINTS_BASE_DIRECTORY}/" DEFAULT_DATA_DOCS_SITE_NAME = "local_site" DEFAULT_CONFIG_VARIABLES_FILEPATH = f"{UNCOMMITTED}/config_variables.yml" PLUGINS_BASE_DIRECTORY = "plugins" DEFAULT_PLUGINS_DIRECTORY = f"{PLUGINS_BASE_DIRECTORY}/" DEFAULT_ACTION_LIST = [ { "name": "store_validation_result", "action": {"class_name": "StoreValidationResultAction"}, }, { "name": "update_data_docs", "action": {"class_name": "UpdateDataDocsAction"}, }, ] DEFAULT_EXPECTATIONS_STORE = { "class_name": "ExpectationsStore", "store_backend": { "class_name": "TupleFilesystemStoreBackend", "base_directory": DEFAULT_EXPECTATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME, }, } DEFAULT_VALIDATIONS_STORE = { "class_name": "ValidationResultsStore", "store_backend": { "class_name": "TupleFilesystemStoreBackend", "base_directory": DEFAULT_VALIDATIONS_STORE_BASE_DIRECTORY_RELATIVE_NAME, }, } DEFAULT_VALIDATION_DEFINITION_STORE = { "class_name": "ValidationDefinitionStore", "store_backend": { "class_name": "TupleFilesystemStoreBackend", "base_directory": DEFAULT_VALIDATION_DEFINITION_STORE_BASE_DIRECTORY_RELATIVE_NAME, }, } DEFAULT_CHECKPOINT_STORE = { "class_name": "CheckpointStore", "store_backend": { "class_name": "TupleFilesystemStoreBackend", "suppress_store_backend_id": True, "base_directory": DEFAULT_CHECKPOINT_STORE_BASE_DIRECTORY_RELATIVE_NAME, }, } DEFAULT_STORES = { DEFAULT_EXPECTATIONS_STORE_NAME: DEFAULT_EXPECTATIONS_STORE, DEFAULT_VALIDATIONS_STORE_NAME: DEFAULT_VALIDATIONS_STORE, DEFAULT_VALIDATION_DEFINITION_STORE_NAME: DEFAULT_VALIDATION_DEFINITION_STORE, DEFAULT_CHECKPOINT_STORE_NAME: DEFAULT_CHECKPOINT_STORE, } DEFAULT_DATA_DOCS_SITES = { DEFAULT_DATA_DOCS_SITE_NAME: { "class_name": "SiteBuilder", "show_how_to_buttons": True, "store_backend": { "class_name": "TupleFilesystemStoreBackend", "base_directory": f"{DEFAULT_DATA_DOCS_BASE_DIRECTORY_RELATIVE_NAME}/local_site/", }, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, } }
DataContextConfigDefaults
python
google__jax
jax/experimental/_private_mm/mm.py
{ "start": 1009, "end": 11435 }
class ____: """A generalization of jax.Array that also supports fully remote arrays.""" aval: jax.core.ShapedArray sharding: Sharding _complete: Callable[[], jax.Array | tuple] | None _result: jax.Array | tuple | None = None def __repr__(self): remote_str = ', fully-remote' if self.is_fully_remote else '' return ( f'MpmdArray({self.aval}, sharding={self.sharding}, ' f'devices={self.sharding.mesh.devices}{remote_str})' ) def block_until_ready(self): if self._complete is None: # Already awaited. assert self._result is not None return result = self._complete() if isinstance(result, jax.Array): # Recv result, store array. self._result = result else: # No-op result or send result. Drop objects kept alive, but register # completion. self._result = () # Drop the closure. self._complete = None return self @cached_property def is_fully_remote(self): return is_fully_remote_sharding(self.sharding) @property def jax_array(self): if self.is_fully_remote: raise ValueError('cannot convert fully-remote MpmdArray to jax.Array') self.block_until_ready() assert isinstance(self._result, jax.Array), ( 'expected non-fully-remote MpmdArray to hold some local data, but got: ' f'{self._result} (mesh devices: {self.sharding.mesh.devices})' ) return self._result @property def shape(self): return self.aval.shape @property def dtype(self): return self.aval.dtype JaxOrMpmdArray = jax.Array | MpmdArray def is_local_device(device) -> bool: return device.process_index == jax.process_index() def is_fully_remote_sharding(sharding: Sharding) -> bool: # TODO: Handle shardings other than NamedSharding? assert isinstance(sharding, NamedSharding) return not any(map(is_local_device, sharding.mesh.devices.flat)) def is_fully_local_sharding(sharding: Sharding) -> bool: # TODO: Handle shardings other than NamedSharding? assert isinstance(sharding, NamedSharding) return all(map(is_local_device, sharding.mesh.devices.flat)) def is_fully_remote_array(arr: JaxOrMpmdArray) -> bool: return isinstance(arr, MpmdArray) and arr.is_fully_remote def as_jax_array(arr: JaxOrMpmdArray) -> jax.Array: if isinstance(arr, MpmdArray): return arr.jax_array assert isinstance(arr, jax.Array) return arr def fix_sharding(sharding: Sharding) -> Sharding: # FIXME: During jax.device_put(..., sharding) jaxlib/XLA fills in a memory # kind if none was explicitly given. We don't always call into # jax.device_put here, but we want to mirror this behavior so that even # processes that don't call jax.device_put end up with the exact same # metadata. (The bandaid below is likely incomplete.) if sharding.memory_kind is None: sharding = sharding.with_memory_kind('device') return sharding @lru_cache def recv_buf_factory(shape, dtype, tgt_sharding): @partial(jax.jit, out_shardings=tgt_sharding) def recv_buf_init(): return jnp.zeros(shape, dtype) return recv_buf_init # TODO: Generalize mm.device_put to mix jax.device_put, send and recv as # needed. For the moment, we only allow cases that neatly fall into one of the # above three cases, i.e. the present process either issue a jax.device_put, # a NCCL send or a NCCL recv. This means that every submesh (e.g. a stage) needs # to be managed by a single process for now. def device_put(arr: JaxOrMpmdArray, device: Sharding) -> MpmdArray: assert isinstance(device, Sharding) tgt_sharding = fix_sharding(device) src_sharding = fix_sharding(arr.sharding) def complete_with(complete): return MpmdArray( aval=arr.aval, sharding=tgt_sharding, _complete=complete, ) if is_fully_remote_array(arr): if is_fully_remote_sharding(tgt_sharding): # FullyRemote->FullyRemote: Nothing to be done. return complete_with(lambda: ()) else: # FullyRemote->NonFullyRemote: Recv. # NOTE: We run the same jitted fun on each participating device, # rather than jax.device_put(jnp.zeros(...), tgt_sharding). The # latter produces jnp.zeros first on one local device and then P2P- # copies to the others, which anecdotally appears to be slower, but # also litters the profile, so we avoid it. recv_buf = recv_buf_factory( arr.aval.shape, arr.aval.dtype, tgt_sharding, )() return complete_with( mini_dime.send_or_recv( recv_buf, tgt_sharding, src_sharding, ) ) # arr has some locally-addressable shards. jax_array = as_jax_array(arr) if jax_array.committed: if is_fully_remote_sharding(tgt_sharding): # NonFullyRemote->FullyRemote: Send. # FIXME: Should force completion at some point. return complete_with( mini_dime.send_or_recv( jax_array, tgt_sharding, ) ) elif ( is_fully_local_sharding(src_sharding) and is_fully_local_sharding(tgt_sharding) ): # NonFullyRemote->NonFullyRemote: jax.device_put new_jax_array = jax.device_put(jax_array, tgt_sharding) return complete_with(lambda: new_jax_array) else: # NOTE: We exclude cases of NonFullyRemote -> NonFullyRemote # which would require a mix of jax.device_put, Send and Recv. raise NotImplementedError('unsupported transfer') else: # Uncommitted array. assert isinstance(jax_array.sharding, SingleDeviceSharding) if is_fully_remote_sharding(tgt_sharding): # Uncommitted->FullyRemote: Nothing to be done return complete_with(lambda: ()) else: # Uncommitted->NonFullyRemote: jax.device_put # NOTE: Uncommitted arrays arise when the user hasn't yet specified # a device or sharding, so the current (single-device) sharding is # somewhat arbitrary. # An important assumption here is that, though said device will vary # from process to process, we expect all of the processes to have # the same values. # # Now we'd like to do something like # new_jax_array = jax.device_put(jax_array, tgt_sharding) # where we'd expect jax.device_put to simply simply transfer from # the current local single device to all the other relevant local # devices. # # This unfortunately doesn't work, because jax.device_put will check # the above assumption of same-values-everywhere by introducing a # broadcast from process 0 to all others. But in an MPMD program # only a subset of processes will participate in any given # device_put, so this might lead to hangs! # # We could likely work around this by doing appropriate device_puts # with single-device shardings and subsequently using # jax.make_array_from_single_device_arrays to build a global array. if not is_fully_local_sharding(tgt_sharding): raise NotImplementedError('unsupported transfer') new_jax_array = jax.device_put(jax_array, tgt_sharding) return complete_with(lambda: new_jax_array) def jit(*args, **kwargs): if (out_shardings := kwargs.get('out_shardings')) is None: raise ValueError('missing out_shardings') fun = jax.jit(*args, **kwargs) @wraps(fun) def wrapped(*in_vals): first_fully_remote_input = next( ( (path, in_val) for path, in_val in tree_leaves_with_path(in_vals) if is_fully_remote_array(in_val) ), None, ) # This computation does not concern us, return fully-remote arrays. if first_fully_remote_input is not None: out_shape_dtypes = jax.eval_shape(fun, *in_vals) # Allow out_shardings to be a prefix tree try: out_shardings_flat = broadcast_prefix( out_shardings, out_shape_dtypes, is_leaf=lambda x: x is None, # FIXME: Correct? ) except ValueError: e, *_ = prefix_errors(out_shardings, out_shape_dtypes) raise e('mm.jit out_shardings') from None out_shardings_full = jax.tree.unflatten( jax.tree.structure(out_shape_dtypes), out_shardings_flat, ) # Make an MpmdArray for every out value def make_fully_remote_output(shape_dtype, sharding): if not is_fully_remote_sharding(sharding): path, in_val = first_fully_remote_input raise ValueError( 'mm.jit produces a non-fully-remote output, but ' f'was invoked on fully-remote input: {in_val} @ {path}') return MpmdArray( aval=jax.core.ShapedArray( shape_dtype.shape, shape_dtype.dtype, ), sharding=sharding, _complete=lambda: (), ) return jax.tree.map( make_fully_remote_output, out_shape_dtypes, out_shardings_full, ) # This computations concerns us, run the jax.jit-ed function. in_vals = jax.tree.map(as_jax_array, in_vals) out_vals = fun(*in_vals) return jax.tree.map( lambda jax_array: MpmdArray( jax_array.aval, jax_array.sharding, lambda: jax_array, ), out_vals, ) return wrapped
MpmdArray
python
PyCQA__pyflakes
pyflakes/test/test_undefined_names.py
{ "start": 23099, "end": 23544 }
class ____(TestCase): """ Tests for some extra cases of name handling. """ def test_impossibleContext(self): """ A Name node with an unrecognized context results in a RuntimeError being raised. """ tree = ast.parse("x = 10") # Make it into something unrecognizable. tree.body[0].targets[0].ctx = object() self.assertRaises(RuntimeError, checker.Checker, tree)
NameTests
python
getlogbook__logbook
src/logbook/handlers.py
{ "start": 62969, "end": 68640 }
class ____(Handler): """This handler wraps another handler and will log everything in memory until a certain level (`action_level`, defaults to `ERROR`) is exceeded. When that happens the fingers crossed handler will activate forever and log all buffered records as well as records yet to come into another handled which was passed to the constructor. Alternatively it's also possible to pass a factory function to the constructor instead of a handler. That factory is then called with the triggering log entry and the finger crossed handler to create a handler which is then cached. The idea of this handler is to enable debugging of live systems. For example it might happen that code works perfectly fine 99% of the time, but then some exception happens. But the error that caused the exception alone might not be the interesting bit, the interesting information were the warnings that lead to the error. Here a setup that enables this for a web application:: from logbook import FileHandler from logbook import FingersCrossedHandler def issue_logging(): def factory(record, handler): return FileHandler("/var/log/app/issue-%s.log" % record.time) return FingersCrossedHandler(factory) def application(environ, start_response): with issue_logging(): return the_actual_wsgi_application(environ, start_response) Whenever an error occours, a new file in ``/var/log/app`` is created with all the logging calls that lead up to the error up to the point where the `with` block is exited. Please keep in mind that the :class:`~logbook.FingersCrossedHandler` handler is a one-time handler. Once triggered, it will not reset. Because of that you will have to re-create it whenever you bind it. In this case the handler is created when it's bound to the thread. Due to how the handler is implemented, the filter, bubble and level flags of the wrapped handler are ignored. .. versionchanged:: 0.3 The default behaviour is to buffer up records and then invoke another handler when a severity threshold was reached with the buffer emitting. This now enables this logger to be properly used with the :class:`~logbook.MailHandler`. You will now only get one mail for each buffered record. However once the threshold was reached you would still get a mail for each record which is why the `reset` flag was added. When set to `True`, the handler will instantly reset to the untriggered state and start buffering again:: handler = FingersCrossedHandler(MailHandler(...), buffer_size=10, reset=True) .. versionadded:: 0.3 The `reset` flag was added. """ #: the reason to be used for the batch emit. The default is #: ``'escalation'``. #: #: .. versionadded:: 0.3 batch_emit_reason = "escalation" def __init__( self, handler, action_level=ERROR, buffer_size=0, pull_information=True, reset=False, filter=None, bubble=False, ): Handler.__init__(self, NOTSET, filter, bubble) self.lock = _new_fine_grained_lock() self._level = action_level if isinstance(handler, Handler): self._handler = handler self._handler_factory = None else: self._handler = None self._handler_factory = handler #: the buffered records of the handler. Once the action is triggered #: (:attr:`triggered`) this list will be None. This attribute can #: be helpful for the handler factory function to select a proper #: filename (for example time of first log record) self.buffered_records = deque() #: the maximum number of entries in the buffer. If this is exhausted #: the oldest entries will be discarded to make place for new ones self.buffer_size = buffer_size self._buffer_full = False self._pull_information = pull_information self._action_triggered = False self._reset = reset def close(self): if self._handler is not None: self._handler.close() def enqueue(self, record): if self._pull_information: record.pull_information() if self._action_triggered: self._handler.emit(record) else: self.buffered_records.append(record) if self._buffer_full: self.buffered_records.popleft() elif self.buffer_size and len(self.buffered_records) >= self.buffer_size: self._buffer_full = True return record.level >= self._level return False def rollover(self, record): if self._handler is None: self._handler = self._handler_factory(record, self) self._handler.emit_batch(iter(self.buffered_records), "escalation") self.buffered_records.clear() self._action_triggered = not self._reset @property def triggered(self): """This attribute is `True` when the action was triggered. From this point onwards the finger crossed handler transparently forwards all log records to the inner handler. If the handler resets itself this will always be `False`. """ return self._action_triggered def emit(self, record): self.lock.acquire() try: if self.enqueue(record): self.rollover(record) finally: self.lock.release()
FingersCrossedHandler
python
django__django
tests/model_inheritance/models.py
{ "start": 4355, "end": 4451 }
class ____(models.Model): id = models.IntegerField(primary_key=True, default=1)
CommonAncestor
python
kamyu104__LeetCode-Solutions
Python/substrings-that-begin-and-end-with-the-same-letter.py
{ "start": 50, "end": 377 }
class ____(object): def numberOfSubstrings(self, s): """ :type s: str :rtype: int """ result = 0 cnt = collections.Counter() for c in s: cnt[c] += 1 result += cnt[c] return result # Time: O(n) # Space: O(1) import collections
Solution
python
tensorflow__tensorflow
tensorflow/python/debug/cli/command_parser_test.py
{ "start": 910, "end": 3907 }
class ____(test_util.TensorFlowTestCase): def testParseNoBracketsOrQuotes(self): command = "" self.assertEqual([], command_parser.parse_command(command)) command = "a" self.assertEqual(["a"], command_parser.parse_command(command)) command = "foo bar baz qux" self.assertEqual(["foo", "bar", "baz", "qux"], command_parser.parse_command(command)) command = "foo bar\tbaz\t qux" self.assertEqual(["foo", "bar", "baz", "qux"], command_parser.parse_command(command)) def testParseLeadingTrailingWhitespaces(self): command = " foo bar baz qux " self.assertEqual(["foo", "bar", "baz", "qux"], command_parser.parse_command(command)) command = "\nfoo bar baz qux\n" self.assertEqual(["foo", "bar", "baz", "qux"], command_parser.parse_command(command)) def testParseCommandsWithBrackets(self): command = "pt foo[1, 2, :]" self.assertEqual(["pt", "foo[1, 2, :]"], command_parser.parse_command(command)) command = "pt foo[1, 2, :] -a" self.assertEqual(["pt", "foo[1, 2, :]", "-a"], command_parser.parse_command(command)) command = "inject_value foo [1, 2,:] 0" self.assertEqual(["inject_value", "foo", "[1, 2,:]", "0"], command_parser.parse_command(command)) def testParseCommandWithTwoArgsContainingBrackets(self): command = "pt foo[1, :] bar[:, 2]" self.assertEqual(["pt", "foo[1, :]", "bar[:, 2]"], command_parser.parse_command(command)) command = "pt foo[] bar[:, 2]" self.assertEqual(["pt", "foo[]", "bar[:, 2]"], command_parser.parse_command(command)) def testParseCommandWithUnmatchedBracket(self): command = "pt foo[1, 2, :" self.assertNotEqual(["pt", "foo[1, 2, :]"], command_parser.parse_command(command)) def testParseCommandsWithQuotes(self): command = "inject_value foo \"np.zeros([100, 500])\"" self.assertEqual(["inject_value", "foo", "np.zeros([100, 500])"], command_parser.parse_command(command)) # The pair of double quotes should have been stripped. command = "inject_value foo 'np.zeros([100, 500])'" self.assertEqual(["inject_value", "foo", "np.zeros([100, 500])"], command_parser.parse_command(command)) # The pair of single quotes should have been stripped. command = "\"command prefix with spaces\" arg1" self.assertEqual(["command prefix with spaces", "arg1"], command_parser.parse_command(command)) def testParseCommandWithTwoArgsContainingQuotes(self): command = "foo \"bar\" \"qux\"" self.assertEqual(["foo", "bar", "qux"], command_parser.parse_command(command)) command = "foo \"\" \"qux\"" self.assertEqual(["foo", "", "qux"], command_parser.parse_command(command))
ParseCommandTest
python
matplotlib__matplotlib
lib/matplotlib/backends/backend_gtk3.py
{ "start": 21811, "end": 22384 }
class ____(backend_tools.ToolCopyToClipboardBase): def trigger(self, *args, **kwargs): clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) window = self.canvas.get_window() x, y, width, height = window.get_geometry() pb = Gdk.pixbuf_get_from_window(window, x, y, width, height) clipboard.set_image(pb) Toolbar = ToolbarGTK3 backend_tools._register_tool_class( FigureCanvasGTK3, _backend_gtk.ConfigureSubplotsGTK) backend_tools._register_tool_class( FigureCanvasGTK3, _backend_gtk.RubberbandGTK)
ToolCopyToClipboardGTK3
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/event/attr.py
{ "start": 12924, "end": 16682 }
class ____(_InstanceLevelDispatch[_ET]): __slots__ = ( "_exec_once_mutex", "_exec_once", "_exec_w_sync_once", "_is_asyncio", ) _exec_once_mutex: Optional[_MutexProtocol] parent_listeners: Collection[_ListenerFnType] listeners: Collection[_ListenerFnType] _exec_once: bool _exec_w_sync_once: bool def __init__(self, *arg: Any, **kw: Any): super().__init__(*arg, **kw) self._is_asyncio = False def _set_asyncio(self) -> None: self._is_asyncio = True def _get_exec_once_mutex(self) -> _MutexProtocol: with util.mini_gil: if self._exec_once_mutex is not None: return self._exec_once_mutex if self._is_asyncio: mutex = AsyncAdaptedLock() else: mutex = threading.Lock() # type: ignore[assignment] self._exec_once_mutex = mutex return mutex def _exec_once_impl( self, retry_on_exception: bool, *args: Any, **kw: Any ) -> None: with self._get_exec_once_mutex(): if not self._exec_once: try: self(*args, **kw) exception = False except: exception = True raise finally: if not exception or not retry_on_exception: self._exec_once = True def exec_once(self, *args: Any, **kw: Any) -> None: """Execute this event, but only if it has not been executed already for this collection.""" if not self._exec_once: self._exec_once_impl(False, *args, **kw) def exec_once_unless_exception(self, *args: Any, **kw: Any) -> None: """Execute this event, but only if it has not been executed already for this collection, or was called by a previous exec_once_unless_exception call and raised an exception. If exec_once was already called, then this method will never run the callable regardless of whether it raised or not. """ if not self._exec_once: self._exec_once_impl(True, *args, **kw) def _exec_w_sync_on_first_run(self, *args: Any, **kw: Any) -> None: """Execute this event, and use a mutex if it has not been executed already for this collection, or was called by a previous _exec_w_sync_on_first_run call and raised an exception. If _exec_w_sync_on_first_run was already called and didn't raise an exception, then a mutex is not used. It's not guaranteed the mutex won't be used more than once in the case of very rare race conditions. .. versionadded:: 1.4.11 """ if not self._exec_w_sync_once: with self._get_exec_once_mutex(): try: self(*args, **kw) except: raise else: self._exec_w_sync_once = True else: self(*args, **kw) def __call__(self, *args: Any, **kw: Any) -> None: """Execute this event.""" for fn in self.parent_listeners: fn(*args, **kw) for fn in self.listeners: fn(*args, **kw) def __contains__(self, item: Any) -> bool: return item in self.parent_listeners or item in self.listeners def __len__(self) -> int: return len(self.parent_listeners) + len(self.listeners) def __iter__(self) -> Iterator[_ListenerFnType]: return chain(self.parent_listeners, self.listeners) def __bool__(self) -> bool: return bool(self.listeners or self.parent_listeners)
_CompoundListener
python
scikit-image__scikit-image
benchmarks/benchmark_segmentation.py
{ "start": 2454, "end": 3696 }
class ____(SlicSegmentation): """Benchmark for segmentation routines in scikit-image.""" def setup(self): try: mask = np.zeros((64, 64)) > 0 mask[10:-10, 10:-10] = 1 segmentation.slic(np.ones_like(mask), mask=mask, **_channel_kwarg(False)) except TypeError: raise NotImplementedError("masked slic unavailable") self.image = np.random.random((200, 200, 100)) self.image[:100, :100, :] += 1 self.image[150:, 150:, :] += 0.5 self.msk = np.zeros((200, 200, 100)) self.msk[10:-10, 10:-10, 10:-10] = 1 self.msk_slice = self.msk[..., 50] if Version(skimage.__version__) >= Version('0.17.0'): self.slic_kwargs = dict(start_label=1) else: self.slic_kwargs = {} def time_mask_slic(self): segmentation.slic( self.image, enforce_connectivity=False, mask=self.msk, **_channel_kwarg(False), ) def time_mask_slic_multichannel(self): segmentation.slic( self.image, enforce_connectivity=False, mask=self.msk_slice, **_channel_kwarg(True), )
MaskSlicSegmentation
python
django__django
tests/forms_tests/tests/test_forms.py
{ "start": 231904, "end": 232143 }
class ____(BoundField): def label_tag(self, contents=None, attrs=None, label_suffix=None, tag=None): return super().label_tag( contents=contents, attrs=attrs, label_suffix="", tag=None )
BoundFieldWithoutColon
python
mlflow__mlflow
mlflow/telemetry/schemas.py
{ "start": 1633, "end": 1708 }
class ____: ingestion_url: str disable_events: set[str]
TelemetryConfig
python
vyperlang__vyper
vyper/codegen/function_definitions/common.py
{ "start": 746, "end": 2699 }
class ____: func_t: ContractFunctionT gas_estimate: Optional[int] = None frame_info: Optional[FrameInfo] = None func_ir: Optional["InternalFuncIR"] = None @property def visibility(self): return "internal" if self.func_t.is_internal else "external" @property def exit_sequence_label(self) -> str: return self.ir_identifier + "_cleanup" @cached_property def ir_identifier(self) -> str: argz = ",".join([str(argtyp) for argtyp in self.func_t.argument_types]) name = self.func_t.name function_id = self.func_t._function_id assert function_id is not None # include module id in the ir identifier to disambiguate functions # with the same name but which come from different modules return f"{self.visibility} {function_id} {name}({argz})" def set_frame_info(self, frame_info: FrameInfo) -> None: # XXX: when can this happen? if self.frame_info is not None: assert frame_info == self.frame_info else: self.frame_info = frame_info def set_func_ir(self, func_ir: "InternalFuncIR") -> None: assert self.func_t.is_internal or self.func_t.is_deploy self.func_ir = func_ir @property # common entry point for external function with kwargs def external_function_base_entry_label(self) -> str: assert not self.func_t.is_internal, "uh oh, should be external" return self.ir_identifier + "_common" def internal_function_label(self, is_ctor_context: bool = False) -> str: f = self.func_t assert f.is_internal or f.is_constructor, "uh oh, should be internal" if f.is_constructor: # sanity check - imported init functions only callable from main init assert is_ctor_context suffix = "_deploy" if is_ctor_context else "_runtime" return self.ir_identifier + suffix @dataclass
_FuncIRInfo
python
has2k1__plotnine
plotnine/themes/themeable.py
{ "start": 38550, "end": 38946 }
class ____(themeable): """ Horizontal major grid lines Parameters ---------- theme_element : element_line """ def apply_ax(self, ax: Axes): super().apply_ax(ax) ax.yaxis.grid(which="major", **blend_alpha(self.properties)) def blank_ax(self, ax: Axes): super().blank_ax(ax) ax.grid(False, which="major", axis="y")
panel_grid_major_y
python
plotly__plotly.py
plotly/graph_objs/isosurface/slices/_x.py
{ "start": 233, "end": 5303 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "isosurface.slices" _path_str = "isosurface.slices.x" _valid_props = {"fill", "locations", "locationssrc", "show"} @property def fill(self): """ Sets the fill ratio of the `slices`. The default fill value of the `slices` is 1 meaning that they are entirely shaded. On the other hand Applying a `fill` ratio less than one would allow the creation of openings parallel to the edges. The 'fill' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- int|float """ return self["fill"] @fill.setter def fill(self, val): self["fill"] = val @property def locations(self): """ Specifies the location(s) of slices on the axis. When not specified slices would be created for all points of the axis x except start and end. The 'locations' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray """ return self["locations"] @locations.setter def locations(self, val): self["locations"] = val @property def locationssrc(self): """ Sets the source reference on Chart Studio Cloud for `locations`. The 'locationssrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["locationssrc"] @locationssrc.setter def locationssrc(self, val): self["locationssrc"] = val @property def show(self): """ Determines whether or not slice planes about the x dimension are drawn. The 'show' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["show"] @show.setter def show(self, val): self["show"] = val @property def _prop_descriptions(self): return """\ fill Sets the fill ratio of the `slices`. The default fill value of the `slices` is 1 meaning that they are entirely shaded. On the other hand Applying a `fill` ratio less than one would allow the creation of openings parallel to the edges. locations Specifies the location(s) of slices on the axis. When not specified slices would be created for all points of the axis x except start and end. locationssrc Sets the source reference on Chart Studio Cloud for `locations`. show Determines whether or not slice planes about the x dimension are drawn. """ def __init__( self, arg=None, fill=None, locations=None, locationssrc=None, show=None, **kwargs, ): """ Construct a new X object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.isosurface.slices.X` fill Sets the fill ratio of the `slices`. The default fill value of the `slices` is 1 meaning that they are entirely shaded. On the other hand Applying a `fill` ratio less than one would allow the creation of openings parallel to the edges. locations Specifies the location(s) of slices on the axis. When not specified slices would be created for all points of the axis x except start and end. locationssrc Sets the source reference on Chart Studio Cloud for `locations`. show Determines whether or not slice planes about the x dimension are drawn. Returns ------- X """ super().__init__("x") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.isosurface.slices.X constructor must be a dict or an instance of :class:`plotly.graph_objs.isosurface.slices.X`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("fill", arg, fill) self._set_property("locations", arg, locations) self._set_property("locationssrc", arg, locationssrc) self._set_property("show", arg, show) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
X
python
apache__airflow
task-sdk/src/airflow/sdk/api/datamodels/_generated.py
{ "start": 4994, "end": 5511 }
class ____(BaseModel): """ Schema for response with previous successful DagRun information for Task Template Context. """ data_interval_start: Annotated[AwareDatetime | None, Field(title="Data Interval Start")] = None data_interval_end: Annotated[AwareDatetime | None, Field(title="Data Interval End")] = None start_date: Annotated[AwareDatetime | None, Field(title="Start Date")] = None end_date: Annotated[AwareDatetime | None, Field(title="End Date")] = None
PrevSuccessfulDagRunResponse
python
huggingface__transformers
src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py
{ "start": 1339, "end": 14384 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`MMGroundingDinoModel`]. It is used to instantiate a MM Grounding DINO model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MM Grounding DINO tiny architecture [openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det](https://huggingface.co/openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: backbone_config (`PreTrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, defaults to `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`): The config object or dictionary of the text backbone. num_queries (`int`, *optional*, defaults to 900): Number of object queries, i.e. detection slots. This is the maximal number of objects [`MMGroundingDinoModel`] can detect in a single image. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as an encoder/decoder or not. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. num_feature_levels (`int`, *optional*, defaults to 4): The number of input feature levels. encoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the encoder. decoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the decoder. two_stage (`bool`, *optional*, defaults to `True`): Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of Grounding DINO, which are further fed into the decoder for iterative bounding box refinement. class_cost (`float`, *optional*, defaults to 1.0): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5.0): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. bbox_loss_coefficient (`float`, *optional*, defaults to 5.0): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. disable_custom_kernels (`bool`, *optional*, defaults to `False`): Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom kernels are not supported by PyTorch ONNX export. max_text_len (`int`, *optional*, defaults to 256): The maximum length of the text input. text_enhancer_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the text enhancer. fusion_droppath (`float`, *optional*, defaults to 0.1): The droppath ratio for the fusion module. fusion_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the fusion module. embedding_init_target (`bool`, *optional*, defaults to `True`): Whether to initialize the target with Embedding weights. query_dim (`int`, *optional*, defaults to 4): The dimension of the query vector. positional_embedding_temperature (`float`, *optional*, defaults to 20): The temperature for Sine Positional Embedding that is used together with vision backbone. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. Examples: ```python >>> from transformers import MMGroundingDinoConfig, MMGroundingDinoModel >>> # Initializing a MM Grounding DINO configuration >>> configuration = MMGroundingDinoConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = MMGroundingDinoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mm-grounding-dino" sub_configs = {"backbone_config": AutoConfig, "text_config": AutoConfig} attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, text_config=None, num_queries=900, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, auxiliary_loss=False, position_embedding_type="sine", num_feature_levels=4, encoder_n_points=4, decoder_n_points=4, two_stage=True, class_cost=1.0, bbox_cost=5.0, giou_cost=2.0, bbox_loss_coefficient=5.0, giou_loss_coefficient=2.0, focal_alpha=0.25, disable_custom_kernels=False, # other parameters max_text_len=256, text_enhancer_dropout=0.0, fusion_droppath=0.1, fusion_dropout=0.0, embedding_init_target=True, query_dim=4, positional_embedding_temperature=20, init_std=0.02, layer_norm_eps=1e-5, **kwargs, ): if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") backbone_config = CONFIG_MAPPING["swin"]( window_size=7, image_size=224, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], out_indices=[2, 3, 4], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type # deformable attributes self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.two_stage = two_stage # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.focal_alpha = focal_alpha self.disable_custom_kernels = disable_custom_kernels # Text backbone if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "bert") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["bert"]() self.text_config = text_config self.max_text_len = max_text_len # Text Enhancer self.text_enhancer_dropout = text_enhancer_dropout # Fusion self.fusion_droppath = fusion_droppath self.fusion_dropout = fusion_dropout # Others self.embedding_init_target = embedding_init_target self.query_dim = query_dim self.positional_embedding_temperature = positional_embedding_temperature self.init_std = init_std self.layer_norm_eps = layer_norm_eps super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) self.tie_encoder_decoder = True
MMGroundingDinoConfig
python
spyder-ide__spyder
spyder/app/tests/script_outline_2.py
{ "start": 111, "end": 218 }
class ____: D = 1 def three(self): return 3 def four(self): return 4
MyOtherClass
python
PrefectHQ__prefect
src/prefect/client/schemas/sorting.py
{ "start": 1384, "end": 1687 }
class ____(AutoEnum): """Defines deployment sorting options.""" CREATED_DESC = AutoEnum.auto() UPDATED_DESC = AutoEnum.auto() NAME_ASC = AutoEnum.auto() NAME_DESC = AutoEnum.auto() CONCURRENCY_LIMIT_ASC = AutoEnum.auto() CONCURRENCY_LIMIT_DESC = AutoEnum.auto()
DeploymentSort
python
dagster-io__dagster
python_modules/libraries/dagster-dbt/dagster_dbt/core/dbt_cli_event.py
{ "start": 8213, "end": 23827 }
class ____(ABC): """The representation of a dbt CLI event. Args: raw_event (Dict[str, Any]): The raw event dictionary. See https://docs.getdbt.com/reference/events-logging#structured-logging for more information. event_history_metadata (Dict[str, Any]): A dictionary of metadata about the current event, gathered from previous historical events. """ raw_event: dict[str, Any] event_history_metadata: InitVar[dict[str, Any]] def __post_init__(self, event_history_metadata: dict[str, Any]): self._event_history_metadata = event_history_metadata def __str__(self) -> str: return self.raw_event["info"]["msg"] @property def has_column_lineage_metadata(self) -> bool: """Whether the event has column level lineage metadata.""" return bool(self._event_history_metadata) and "parents" in self._event_history_metadata @cached_property def _unique_id(self) -> str: return self.raw_event["data"]["node_info"]["unique_id"] @cached_property def _raw_data(self) -> Mapping[str, Any]: return self.raw_event["data"] @cached_property def _raw_node_info(self) -> Mapping[str, Any]: return self.raw_event["data"]["node_info"] @property @abstractmethod def is_result_event(self) -> bool: ... def _is_model_execution_event(self, manifest: Mapping[str, Any]) -> bool: resource_props = self._get_resource_props(self._unique_id, manifest) materialized_type = ( # check event info self._raw_node_info.get("materialized") # then top-level props or resource_props.get("materialized") # then config or resource_props.get("config", {}).get("materialized") ) return ( resource_props["resource_type"] in REFABLE_NODE_TYPES and materialized_type != "ephemeral" and self._get_node_status() == NodeStatus.Success ) def _is_test_execution_event(self, manifest: Mapping[str, Any]) -> bool: resource_props = self._get_resource_props(self._unique_id, manifest) return resource_props["resource_type"] == NodeType.Test def _get_resource_props(self, unique_id: str, manifest: Mapping[str, Any]) -> dict[str, Any]: return manifest["nodes"][unique_id] def _get_execution_duration_metadata(self) -> Mapping[str, float]: raw_started_at = self._raw_node_info.get("node_started_at") raw_finished_at = self._raw_node_info.get("node_finished_at") has_started_at = raw_started_at not in _EMPTY_VALUES has_finished_at = raw_finished_at not in _EMPTY_VALUES if has_started_at and has_finished_at: started_at = dateutil.parser.isoparse(cast("str", raw_started_at)) finished_at = dateutil.parser.isoparse(cast("str", raw_finished_at)) duration = (finished_at - started_at).total_seconds() return {"Execution Duration": (finished_at - started_at).total_seconds()} else: # if model materialization is incremental microbatch, node_started_at and # node_finished_at are empty strings and require fallback to data.execution_time duration = self._raw_data.get("execution_time") return {"Execution Duration": duration} if duration else {} ############### # MODEL PARSING ############### def _get_column_schema_metadata(self, manifest: Mapping[str, Any]) -> Mapping[str, Any]: try: return default_metadata_from_dbt_resource_props(self._event_history_metadata) except Exception as e: logger.warning( "An error occurred while building column schema metadata from event history" f" `{self._event_history_metadata}` for the dbt resource" f" `{self._get_resource_props(self._unique_id, manifest)['original_file_path']}`." " Column schema metadata will not be included in the event.\n\n" f"Exception: {e}", exc_info=True, ) return {} def _get_default_metadata(self, manifest: Mapping[str, Any]) -> dict[str, Any]: return { **self._get_column_schema_metadata(manifest), **self._get_execution_duration_metadata(), "unique_id": self._unique_id, "invocation_id": self.raw_event["info"]["invocation_id"], } def _get_node_status(self) -> str: # if model materialization is incremental microbatch, node_status # property is "None", hence fall back to status raw_node_status = self._raw_node_info.get("node_status") return ( raw_node_status if raw_node_status and raw_node_status not in _EMPTY_VALUES else self._raw_data["status"].lower() ) def _get_lineage_metadata( self, translator: DagsterDbtTranslator, manifest: Mapping[str, Any], target_path: Optional[Path], ) -> Mapping[str, Any]: try: column_data = self._event_history_metadata.get("columns", {}) parent_column_data = { parent_key: parent_data["columns"] for parent_key, parent_data in self._event_history_metadata.get( "parents", {} ).items() } # Column lineage can only be built if initial metadata is provided. if self.has_column_lineage_metadata: return _build_column_lineage_metadata( event_history_metadata=EventHistoryMetadata( columns=column_data, parents=parent_column_data ), dbt_resource_props=self._get_resource_props(self._unique_id, manifest), manifest=manifest, dagster_dbt_translator=translator, target_path=target_path, ) except Exception as e: logger.warning( "An error occurred while building column lineage metadata for the dbt resource" f" `{self._get_resource_props(self._unique_id, manifest)['original_file_path']}`." " Lineage metadata will not be included in the event.\n\n" f"Exception: {e}", exc_info=True, ) return {} def _get_materialization_metadata( self, translator: DagsterDbtTranslator, manifest: Mapping[str, Any], target_path: Optional[Path], ) -> dict[str, Any]: return { **self._get_default_metadata(manifest), **self._get_lineage_metadata(translator, manifest, target_path), } def _to_model_events( self, manifest: Mapping[str, Any], dagster_dbt_translator: DagsterDbtTranslator, context: Optional[Union[OpExecutionContext, AssetExecutionContext]], target_path: Optional[Path], project: Optional[DbtProject], ) -> Iterator[Union[Output, AssetMaterialization]]: asset_key = dagster_dbt_translator.get_asset_spec(manifest, self._unique_id, project).key metadata = self._get_materialization_metadata(dagster_dbt_translator, manifest, target_path) if context and context.has_assets_def: yield Output( value=None, output_name=asset_key.to_python_identifier(), metadata=metadata ) else: yield AssetMaterialization(asset_key=asset_key, metadata=metadata) ############## # TEST PARSING ############## def _get_check_execution_metadata(self, manifest: Mapping[str, Any]) -> dict[str, Any]: failure_count = self._raw_data.get("num_failures") return { **self._get_default_metadata(manifest), "status": self._get_node_status(), **({} if failure_count is None else {"dagster_dbt/failed_row_count": failure_count}), } @abstractmethod def _get_check_passed(self) -> bool: ... @abstractmethod def _get_check_severity(self) -> AssetCheckSeverity: ... def _get_check_properties( self, key: AssetCheckKey, manifest: Mapping[str, Any] ) -> CheckProperties: return CheckProperties( passed=self._get_check_passed(), asset_key=key.asset_key, check_name=key.name, severity=self._get_check_severity(), metadata=self._get_check_execution_metadata(manifest), ) def _get_result_check_keys( self, context: Optional[Union[OpExecutionContext, AssetExecutionContext]] ) -> AbstractSet[AssetCheckKey]: """Returns the set of check keys for which we should emit AssetCheckResult events.""" if context is None or not context.has_assets_def: return set() return { *context.selected_asset_check_keys, *get_checks_on_sources_upstream_of_selected_assets( assets_def=context.assets_def, selected_asset_keys=context.selected_asset_keys, ), } def _to_observation_events_for_test( self, key: Optional[AssetCheckKey], dagster_dbt_translator: DagsterDbtTranslator, validated_manifest: Mapping[str, Any], metadata: Mapping[str, Any], ) -> Iterator[AssetObservation]: resource_props = self._get_resource_props(self._unique_id, validated_manifest) message = None # dbt's default indirect selection (eager) will select relationship tests # on unselected assets, if they're compared with a selected asset. # This doesn't match Dagster's default check selection which is to only # select checks on selected assets. When we use eager, we may receive # unexpected test results so we log those as observations as if # asset checks were disabled. if dagster_dbt_translator.settings.enable_asset_checks: # If the test did not have an asset key associated with it, it was a singular # test with multiple dependencies without a configured asset key. test_name = resource_props["name"] additional_message = ( ( f"`{test_name}` is a singular test with multiple dependencies." " Configure an asset key in the test's dbt meta to load it as an" " asset check.\n\n" ) if not key else "" ) message = ( "Logging an `AssetObservation` instead of an `AssetCheckResult`" f" for dbt test `{test_name}`.\n\n" f"{additional_message}" "This test was not included in Dagster's asset check" " selection, and was likely executed due to dbt indirect selection." ) logger.warning(message) for upstream_unique_id in resource_props["depends_on"]["nodes"]: upstream_resource_props: dict[str, Any] = validated_manifest["nodes"].get( upstream_unique_id ) or validated_manifest["sources"].get(upstream_unique_id) upstream_asset_key = dagster_dbt_translator.get_asset_key(upstream_resource_props) yield AssetObservation( asset_key=upstream_asset_key, metadata=metadata, description=message ) def _to_test_events( self, manifest: Mapping[str, Any], translator: DagsterDbtTranslator, project: Optional[DbtProject], context: Optional[Union[OpExecutionContext, AssetExecutionContext]], ) -> Iterator[Union[AssetCheckResult, AssetCheckEvaluation, AssetObservation]]: """Converts a dbt CLI event to a set of Dagster events corresponding to a test execution.""" key = get_asset_check_key_for_test(manifest, translator, self._unique_id, project=project) has_assets_def = context is not None and context.has_assets_def if key is not None and has_assets_def and key in self._get_result_check_keys(context): # key was expected to be evaluated, use AssetCheckResult properties = self._get_check_properties(key, manifest) yield AssetCheckResult(**properties) return elif key is not None and not has_assets_def: # in an op definition, we don't have an assets def, so we use AssetCheckEvaluation properties = self._get_check_properties(key, manifest) yield AssetCheckEvaluation(**properties) return # fallback case, emit observation events if we have no key to associate with the # test, or if the test was not expected to be evaluated. metadata = self._get_check_execution_metadata(manifest) yield from self._to_observation_events_for_test( key=key, dagster_dbt_translator=translator, validated_manifest=manifest, metadata=metadata, ) @public def to_default_asset_events( self, manifest: DbtManifestParam, dagster_dbt_translator: DagsterDbtTranslator = DagsterDbtTranslator(), context: Optional[Union[OpExecutionContext, AssetExecutionContext]] = None, target_path: Optional[Path] = None, project: Optional[DbtProject] = None, ) -> Iterator[ Union[ Output, AssetMaterialization, AssetObservation, AssetCheckResult, AssetCheckEvaluation ] ]: """Convert a dbt CLI event to a set of corresponding Dagster events. Args: manifest (Union[Mapping[str, Any], str, Path]): The dbt manifest blob. dagster_dbt_translator (DagsterDbtTranslator): Optionally, a custom translator for linking dbt nodes to Dagster assets. context (Optional[Union[OpExecutionContext, AssetExecutionContext]]): The execution context. target_path (Optional[Path]): An explicit path to a target folder used to retrieve dbt artifacts while generating events. Returns: Iterator[Union[Output, AssetMaterialization, AssetObservation, AssetCheckResult, AssetCheckEvaluation]]: A set of corresponding Dagster events. In a Dagster asset definition, the following are yielded: - Output for refables (e.g. models, seeds, snapshots.) - AssetCheckResult for dbt test results that are enabled as asset checks. - AssetObservation for dbt test results that are not enabled as asset checks. In a Dagster op definition, the following are yielded: - AssetMaterialization refables (e.g. models, seeds, snapshots.) - AssetCheckEvaluation for dbt test results that are enabled as asset checks. - AssetObservation for dbt test results that are not enabled as asset checks. """ if not self.is_result_event: return dagster_dbt_translator = validate_translator(dagster_dbt_translator) manifest = validate_manifest(manifest) if self._is_model_execution_event(manifest): yield from self._to_model_events( manifest, dagster_dbt_translator, context, target_path, project ) if self._is_test_execution_event(manifest): yield from self._to_test_events(manifest, dagster_dbt_translator, project, context)
DbtCliEventMessage
python
tensorflow__tensorflow
tensorflow/python/ops/io_ops.py
{ "start": 15675, "end": 16770 }
class ____(ReaderBase): """A Reader that outputs the lines of a file delimited by newlines. Newlines are stripped from the output. See ReaderBase for supported methods. @compatibility(eager) Readers are not compatible with eager execution. Instead, please use `tf.data` to get data into your model. @end_compatibility """ # TODO(josh11b): Support serializing and restoring state. @deprecation.deprecated( None, "Queue-based input pipelines have been replaced by `tf.data`. Use " "`tf.data.TextLineDataset`.") def __init__(self, skip_header_lines=None, name=None): """Create a TextLineReader. Args: skip_header_lines: An optional int. Defaults to 0. Number of lines to skip from the beginning of every file. name: A name for the operation (optional). """ rr = gen_io_ops.text_line_reader_v2(skip_header_lines=skip_header_lines, name=name) super(TextLineReader, self).__init__(rr) ops.NotDifferentiable("TextLineReader") @tf_export(v1=["FixedLengthRecordReader"])
TextLineReader
python
viewflow__viewflow
tests/components/test_field_checkbox.py
{ "start": 298, "end": 949 }
class ____(LiveTestCase): def test_field_input(self): self.browser.get(f"{self.live_server_url}/application/form/") self.assertNoJsErrors() wrapper = self.browser.find_element(By.CSS_SELECTOR, ".mdc-checkbox") input = self.browser.find_element(By.CSS_SELECTOR, "vf-field-checkbox input") wrapper_classes = wrapper.get_attribute("class").split(" ") self.assertNotIn("mdc-checkbox--selected", wrapper_classes) input.click() wrapper_classes = wrapper.get_attribute("class").split(" ") self.assertIn("mdc-checkbox--selected", wrapper_classes) self.assertNoJsErrors()
Test
python
huggingface__transformers
src/transformers/models/metaclip_2/modeling_metaclip_2.py
{ "start": 39079, "end": 40629 }
class ____(nn.Module): def __init__(self, config: MetaClip2VisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = MetaClip2VisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = MetaClip2Encoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: Optional[bool] = False, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, **kwargs, ) last_hidden_state = encoder_outputs.last_hidden_state pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, ) @auto_docstring( custom_intro=""" The vision model from METACLIP_2 without any head or projection on top. """ )
MetaClip2VisionTransformer
python
mkdocs__mkdocs
mkdocs/exceptions.py
{ "start": 283, "end": 457 }
class ____(MkDocsException, SystemExit): """Abort the build.""" code = 1 def show(self, *args, **kwargs) -> None: echo('\n' + self.format_message())
Abort
python
PrefectHQ__prefect
tests/server/orchestration/api/test_block_types.py
{ "start": 5974, "end": 11725 }
class ____: @pytest.fixture async def block_types_with_associated_capabilities(self, session): class CanRun(Block): _block_schema_capabilities = ["run"] def run(self): pass class CanFly(Block): _block_schema_capabilities = ["fly"] def fly(self): pass class CanSwim(Block): _block_schema_capabilities = ["swim"] def swim(self): pass class Duck(CanSwim, CanFly, Block): a: str class Bird(CanFly, Block): b: str class Cat(CanRun, Block): c: str block_type_duck = await models.block_types.create_block_type( session=session, block_type=Duck._to_block_type() ) await models.block_schemas.create_block_schema( session=session, block_schema=Duck._to_block_schema(block_type_id=block_type_duck.id), ) block_type_bird = await models.block_types.create_block_type( session=session, block_type=Bird._to_block_type() ) await models.block_schemas.create_block_schema( session=session, block_schema=Bird._to_block_schema(block_type_id=block_type_bird.id), ) block_type_cat = await models.block_types.create_block_type( session=session, block_type=Cat._to_block_type() ) await models.block_schemas.create_block_schema( session=session, block_schema=Cat._to_block_schema(block_type_id=block_type_cat.id), ) await session.commit() return block_type_duck, block_type_bird, block_type_cat async def test_read_block_types( self, client, block_type_x, block_type_y, block_type_z ): response = await client.post("/block_types/filter") assert response.status_code == status.HTTP_200_OK read_block_types = parse_obj_as(List[BlockType], response.json()) assert [block_type.id for block_type in read_block_types] == [ block_type_x.id, block_type_y.id, block_type_z.id, ] async def test_read_block_types_with_limit_and_offset( self, client, block_type_x, block_type_y, block_type_z ): response = await client.post("/block_types/filter", json=dict(limit=2)) assert response.status_code == status.HTTP_200_OK read_block_types = parse_obj_as(List[BlockType], response.json()) assert [block_type.id for block_type in read_block_types] == [ block_type_x.id, block_type_y.id, ] response = await client.post("/block_types/filter", json=dict(offset=2)) assert response.status_code == status.HTTP_200_OK read_block_types = parse_obj_as(List[BlockType], response.json()) assert [block_type.id for block_type in read_block_types] == [ block_type_z.id, ] async def test_read_block_types_filter_by_name( self, client, block_types_with_associated_capabilities ): response = await client.post( "/block_types/filter", json=dict(block_types=dict(name=dict(like_="duck"))) ) assert response.status_code == 200 read_block_types = parse_obj_as(List[BlockType], response.json()) assert len(read_block_types) == 1 assert read_block_types[0].id == block_types_with_associated_capabilities[0].id response = await client.post( "/block_types/filter", json=dict(block_types=dict(name=dict(like_="c"))) ) assert response.status_code == 200 read_block_types = parse_obj_as(List[BlockType], response.json()) assert len(read_block_types) == 2 assert [b.id for b in read_block_types] == [ block_types_with_associated_capabilities[2].id, block_types_with_associated_capabilities[0].id, ] response = await client.post( "/block_types/filter", json=dict(block_types=dict(name=dict(like_="z"))) ) assert response.status_code == 200 read_block_types = parse_obj_as(List[BlockType], response.json()) async def test_read_block_types_filter_by_associated_capability( self, client, block_types_with_associated_capabilities ): response = await client.post( "/block_types/filter", json=dict( block_schemas=dict(block_capabilities=dict(all_=["fly", "swim"])) ), ) assert response.status_code == 200 read_block_types = parse_obj_as(List[BlockType], response.json()) assert len(read_block_types) == 1 assert read_block_types[0].id == block_types_with_associated_capabilities[0].id response = await client.post( "/block_types/filter", json=dict(block_schemas=dict(block_capabilities=dict(all_=["fly"]))), ) assert response.status_code == 200 read_block_types = parse_obj_as(List[BlockType], response.json()) assert len(read_block_types) == 2 assert [b.id for b in read_block_types] == [ block_types_with_associated_capabilities[1].id, block_types_with_associated_capabilities[0].id, ] response = await client.post( "/block_types/filter", json=dict(block_schemas=dict(block_capabilities=dict(all_=["swim"]))), ) assert response.status_code == 200 read_block_types = parse_obj_as(List[BlockType], response.json()) assert len(read_block_types) == 1 assert read_block_types[0].id == block_types_with_associated_capabilities[0].id
TestReadBlockTypes
python
PyCQA__pylint
pylint/checkers/base/pass_checker.py
{ "start": 356, "end": 1041 }
class ____(_BasicChecker): """Check if the pass statement is really necessary.""" msgs = { "W0107": ( "Unnecessary pass statement", "unnecessary-pass", 'Used when a "pass" statement can be removed without affecting ' "the behaviour of the code.", ) } @utils.only_required_for_messages("unnecessary-pass") def visit_pass(self, node: nodes.Pass) -> None: if len(node.parent.child_sequence(node)) > 1 or ( isinstance(node.parent, (nodes.ClassDef, nodes.FunctionDef)) and node.parent.doc_node ): self.add_message("unnecessary-pass", node=node)
PassChecker
python
gevent__gevent
src/greentest/3.14/test_thread.py
{ "start": 937, "end": 10999 }
class ____(BasicThreadTest): def newtask(self): with self.running_mutex: self.next_ident += 1 verbose_print("creating task %s" % self.next_ident) thread.start_new_thread(self.task, (self.next_ident,)) self.created += 1 self.running += 1 def task(self, ident): with self.random_mutex: delay = random.random() / 10000.0 verbose_print("task %s will run for %sus" % (ident, round(delay*1e6))) time.sleep(delay) verbose_print("task %s done" % ident) with self.running_mutex: self.running -= 1 if self.created == NUMTASKS and self.running == 0: self.done_mutex.release() def test_starting_threads(self): with threading_helper.wait_threads_exit(): # Basic test for thread creation. for i in range(NUMTASKS): self.newtask() verbose_print("waiting for tasks to complete...") self.done_mutex.acquire() verbose_print("all tasks done") def test_stack_size(self): # Various stack size tests. self.assertEqual(thread.stack_size(), 0, "initial stack size is not 0") thread.stack_size(0) self.assertEqual(thread.stack_size(), 0, "stack_size not reset to default") @unittest.skipIf(os.name not in ("nt", "posix"), 'test meant for nt and posix') def test_nt_and_posix_stack_size(self): try: thread.stack_size(4096) except ValueError: verbose_print("caught expected ValueError setting " "stack_size(4096)") except thread.error: self.skipTest("platform does not support changing thread stack " "size") fail_msg = "stack_size(%d) failed - should succeed" for tss in (262144, 0x100000, 0): thread.stack_size(tss) self.assertEqual(thread.stack_size(), tss, fail_msg % tss) verbose_print("successfully set stack_size(%d)" % tss) for tss in (262144, 0x100000): verbose_print("trying stack_size = (%d)" % tss) self.next_ident = 0 self.created = 0 with threading_helper.wait_threads_exit(): for i in range(NUMTASKS): self.newtask() verbose_print("waiting for all tasks to complete") self.done_mutex.acquire() verbose_print("all tasks done") thread.stack_size(0) def test__count(self): # Test the _count() function. orig = thread._count() mut = thread.allocate_lock() mut.acquire() started = [] def task(): started.append(None) mut.acquire() mut.release() with threading_helper.wait_threads_exit(): thread.start_new_thread(task, ()) for _ in support.sleeping_retry(support.LONG_TIMEOUT): if started: break self.assertEqual(thread._count(), orig + 1) # Allow the task to finish. mut.release() # The only reliable way to be sure that the thread ended from the # interpreter's point of view is to wait for the function object to # be destroyed. done = [] wr = weakref.ref(task, lambda _: done.append(None)) del task for _ in support.sleeping_retry(support.LONG_TIMEOUT): if done: break support.gc_collect() # For PyPy or other GCs. self.assertEqual(thread._count(), orig) def test_unraisable_exception(self): def task(): started.release() raise ValueError("task failed") started = thread.allocate_lock() with support.catch_unraisable_exception() as cm: with threading_helper.wait_threads_exit(): started.acquire() thread.start_new_thread(task, ()) started.acquire() self.assertEqual(str(cm.unraisable.exc_value), "task failed") self.assertIsNone(cm.unraisable.object) self.assertEqual(cm.unraisable.err_msg, f"Exception ignored in thread started by {task!r}") self.assertIsNotNone(cm.unraisable.exc_traceback) def test_join_thread(self): finished = [] def task(): time.sleep(0.05) finished.append(thread.get_ident()) with threading_helper.wait_threads_exit(): handle = thread.start_joinable_thread(task) handle.join() self.assertEqual(len(finished), 1) self.assertEqual(handle.ident, finished[0]) def test_join_thread_already_exited(self): def task(): pass with threading_helper.wait_threads_exit(): handle = thread.start_joinable_thread(task) time.sleep(0.05) handle.join() def test_join_several_times(self): def task(): pass with threading_helper.wait_threads_exit(): handle = thread.start_joinable_thread(task) handle.join() # Subsequent join() calls should succeed handle.join() def test_joinable_not_joined(self): handle_destroyed = thread.allocate_lock() handle_destroyed.acquire() def task(): handle_destroyed.acquire() with threading_helper.wait_threads_exit(): handle = thread.start_joinable_thread(task) del handle handle_destroyed.release() def test_join_from_self(self): errors = [] handles = [] start_joinable_thread_returned = thread.allocate_lock() start_joinable_thread_returned.acquire() task_tried_to_join = thread.allocate_lock() task_tried_to_join.acquire() def task(): start_joinable_thread_returned.acquire() try: handles[0].join() except Exception as e: errors.append(e) finally: task_tried_to_join.release() with threading_helper.wait_threads_exit(): handle = thread.start_joinable_thread(task) handles.append(handle) start_joinable_thread_returned.release() # Can still join after joining failed in other thread task_tried_to_join.acquire() handle.join() assert len(errors) == 1 with self.assertRaisesRegex(RuntimeError, "Cannot join current thread"): raise errors[0] def test_join_then_self_join(self): # make sure we can't deadlock in the following scenario with # threads t0 and t1 (see comment in `ThreadHandle_join()` for more # details): # # - t0 joins t1 # - t1 self joins def make_lock(): lock = thread.allocate_lock() lock.acquire() return lock error = None self_joiner_handle = None self_joiner_started = make_lock() self_joiner_barrier = make_lock() def self_joiner(): nonlocal error self_joiner_started.release() self_joiner_barrier.acquire() try: self_joiner_handle.join() except Exception as e: error = e joiner_started = make_lock() def joiner(): joiner_started.release() self_joiner_handle.join() with threading_helper.wait_threads_exit(): self_joiner_handle = thread.start_joinable_thread(self_joiner) # Wait for the self-joining thread to start self_joiner_started.acquire() # Start the thread that joins the self-joiner joiner_handle = thread.start_joinable_thread(joiner) # Wait for the joiner to start joiner_started.acquire() # Not great, but I don't think there's a deterministic way to make # sure that the self-joining thread has been joined. time.sleep(0.1) # Unblock the self-joiner self_joiner_barrier.release() self_joiner_handle.join() joiner_handle.join() with self.assertRaisesRegex(RuntimeError, "Cannot join current thread"): raise error def test_join_with_timeout(self): lock = thread.allocate_lock() lock.acquire() def thr(): lock.acquire() with threading_helper.wait_threads_exit(): handle = thread.start_joinable_thread(thr) handle.join(0.1) self.assertFalse(handle.is_done()) lock.release() handle.join() self.assertTrue(handle.is_done()) def test_join_unstarted(self): handle = thread._ThreadHandle() with self.assertRaisesRegex(RuntimeError, "thread not started"): handle.join() def test_set_done_unstarted(self): handle = thread._ThreadHandle() with self.assertRaisesRegex(RuntimeError, "thread not started"): handle._set_done() def test_start_duplicate_handle(self): lock = thread.allocate_lock() lock.acquire() def func(): lock.acquire() handle = thread._ThreadHandle() with threading_helper.wait_threads_exit(): thread.start_joinable_thread(func, handle=handle) with self.assertRaisesRegex(RuntimeError, "thread already started"): thread.start_joinable_thread(func, handle=handle) lock.release() handle.join() def test_start_with_none_handle(self): def func(): pass with threading_helper.wait_threads_exit(): handle = thread.start_joinable_thread(func, handle=None) handle.join()
ThreadRunningTests
python
streamlit__streamlit
lib/tests/streamlit/elements/markdown_test.py
{ "start": 12487, "end": 14068 }
class ____(DeltaGeneratorTestCase): """Test st.markdown text_alignment parameter.""" @parameterized.expand( [ ("left", 1), ("center", 2), ("right", 3), ("justify", 4), (None, 1), # Default case ] ) def test_st_markdown_text_alignment( self, text_alignment: str | None, expected_alignment: int ): """Test st.markdown with various text_alignment values. Parameters ---------- text_alignment : str | None The text alignment value to test, or None for default behavior. expected_alignment : int The expected protobuf alignment enum value (1=LEFT, 2=CENTER, 3=RIGHT, 4=JUSTIFY). """ if text_alignment is None: st.markdown("Test") else: st.markdown("Test", text_alignment=text_alignment) el = self.get_delta_from_queue().new_element assert el.markdown.body == "Test" assert el.text_alignment_config.alignment == expected_alignment def test_st_markdown_text_alignment_invalid(self): """Test st.markdown with invalid text_alignment raises error.""" with pytest.raises(StreamlitAPIException) as exc: st.markdown("Test", text_alignment="invalid") assert 'Invalid text_alignment value: "invalid"' in str(exc.value) assert "left" in str(exc.value) assert "center" in str(exc.value) assert "right" in str(exc.value) assert "justify" in str(exc.value)
StMarkdownTextAlignmentTest
python
apache__airflow
airflow-ctl/src/airflowctl/api/datamodels/generated.py
{ "start": 26146, "end": 26546 }
class ____(BaseModel): """ Task outlet reference serializer for assets. """ model_config = ConfigDict( extra="forbid", ) dag_id: Annotated[str, Field(title="Dag Id")] task_id: Annotated[str, Field(title="Task Id")] created_at: Annotated[datetime, Field(title="Created At")] updated_at: Annotated[datetime, Field(title="Updated At")]
TaskOutletAssetReference
python
kamyu104__LeetCode-Solutions
Python/vowels-of-all-substrings.py
{ "start": 29, "end": 277 }
class ____(object): def countVowels(self, word): """ :type word: str :rtype: int """ VOWELS = set("aeiou") return sum((i-0+1) * ((len(word)-1)-i+1) for i, c in enumerate(word) if c in VOWELS)
Solution
python
sphinx-doc__sphinx
sphinx/addnodes.py
{ "start": 14110, "end": 14238 }
class ____(nodes.Element): """Node for specifying tabular columns, used for LaTeX output.""" # inline nodes
tabular_col_spec
python
airbytehq__airbyte
airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_custom_report.py
{ "start": 10152, "end": 17564 }
class ____(HourlyReportsTestWithStateChangesAfterMigration): stream_name = "custom_report" report_file = "custom_report_hourly" records_number = 8 state_file = "hourly_reports_state" incremental_report_file = "custom_report_hourly_incremental" report_file_with_records_further_start_date = "custom_hourly_with_record_further_config_start_date" state_file_legacy = "hourly_reports_state_legacy" state_file_after_migration = "hourly_reports_state_after_migration" state_file_after_migration_with_cursor_further_config_start_date = ( "hourly_reports_state_after_migration_with_cursor_further_config_start_date" ) incremental_report_file_with_records_further_cursor = "custom_report_hourly_incremental_with_records_further_cursor" custom_report_aggregation = "Hourly" @property def _config(self) -> dict[str, Any]: return ( ConfigBuilder() .with_reports_start_date(self.start_date) .with_custom_reports( [ { "name": self.stream_name, "reporting_object": "AgeGenderAudienceReportRequest", "report_columns": [ "AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", ], "report_aggregation": self.custom_report_aggregation, } ] ) .build() ) def mock_report_apis(self): self.mock_user_query_api(response_template="user_query") self.mock_accounts_search_api( response_template="accounts_search_for_report", body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}', ) self.mock_generate_report_api( endpoint="Submit", response_template="generate_report", body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}', ) # # for second read self.mock_generate_report_api( endpoint="Submit", response_template="generate_report", body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}', ) # # for no config start date test self.mock_generate_report_api( endpoint="Submit", response_template="generate_report", body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}', ) self.mock_generate_report_api( endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}' )
TestCustomReportHourly
python
django__django
tests/admin_inlines/admin.py
{ "start": 9414, "end": 9530 }
class ____(admin.StackedInline): model = Class extra = 1 filter_vertical = ["person"]
ClassStackedVertical
python
ray-project__ray
python/ray/_common/tests/test_ray_option_utils.py
{ "start": 3937, "end": 6684 }
class ____: def test_validate_task_options_valid(self): validate_task_options({"num_cpus": 2, "max_retries": 3}, in_options=False) def test_validate_task_options_invalid_keyword(self): with pytest.raises(ValueError, match="Invalid option keyword"): validate_task_options({"invalid_option": 1}, in_options=False) def test_validate_task_options_in_options_invalid(self): with pytest.raises( ValueError, match=re.escape("Setting 'max_calls' is not supported in '.options()'."), ): validate_task_options({"max_calls": 5}, in_options=True) def test_validate_actor_options_valid(self): validate_actor_options({"max_concurrency": 2, "name": "abc"}, in_options=False) def test_validate_actor_options_invalid_keyword(self): with pytest.raises(ValueError, match="Invalid option keyword"): validate_actor_options({"invalid_option": 1}, in_options=False) def test_validate_actor_options_in_options_invalid(self): with pytest.raises( ValueError, match=re.escape( "Setting 'concurrency_groups' is not supported in '.options()'." ), ): validate_actor_options({"concurrency_groups": {}}, in_options=True) def test_validate_actor_get_if_exists_no_name(self): with pytest.raises( ValueError, match="must be specified to use `get_if_exists`" ): validate_actor_options({"get_if_exists": True}, in_options=False) def test_validate_actor_object_store_memory_warning(self): with pytest.warns( DeprecationWarning, match="Setting 'object_store_memory' for actors is deprecated", ): validate_actor_options({"object_store_memory": 100}, in_options=False) def test_check_deprecate_placement_group(self): pg = PlacementGroup.empty() # No error if only one is specified _check_deprecate_placement_group({"placement_group": pg}) _check_deprecate_placement_group({"scheduling_strategy": "SPREAD"}) # Error if both are specified with pytest.raises( ValueError, match="Placement groups should be specified via" ): _check_deprecate_placement_group( {"placement_group": pg, "scheduling_strategy": "SPREAD"} ) # Check no error with default or None placement_group _check_deprecate_placement_group( {"placement_group": "default", "scheduling_strategy": "SPREAD"} ) _check_deprecate_placement_group( {"placement_group": None, "scheduling_strategy": "SPREAD"} )
TestTaskActorOptionValidation
python
pydantic__pydantic
tests/benchmarks/basemodel_eq_performance.py
{ "start": 1642, "end": 2759 }
class ____(pydantic.BaseModel, frozen=True): def __eq__(self, other: Any) -> bool: if isinstance(other, pydantic.BaseModel): # When comparing instances of generic types for equality, as long as all field values are equal, # only require their generic origin types to be equal, rather than exact type equality. # This prevents headaches like MyGeneric(x=1) != MyGeneric[Any](x=1). self_type = self.__pydantic_generic_metadata__['origin'] or self.__class__ other_type = other.__pydantic_generic_metadata__['origin'] or other.__class__ field_names = type(self).model_fields.keys() return ( self_type == other_type and ({k: self.__dict__[k] for k in field_names} == {k: other.__dict__[k] for k in field_names}) and self.__pydantic_private__ == other.__pydantic_private__ and self.__pydantic_extra__ == other.__pydantic_extra__ ) else: return NotImplemented # delegate to the other item in the comparison
DictComprehensionEqModel
python
pyodide__pyodide
src/py/pyodide/http/_exceptions.py
{ "start": 1556, "end": 1747 }
class ____(XHRError): """Network-related XMLHttpRequest error.""" def __init__(self, message: str = "Network error occurred") -> None: super().__init__(message)
XHRNetworkError
python
huggingface__transformers
src/transformers/models/x_clip/modeling_x_clip.py
{ "start": 44758, "end": 46710 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.num_heads = config.prompt_num_attention_heads dim = config.projection_dim head_dim = dim // self.num_heads self.scale = head_dim**-0.5 self.q_proj = nn.Linear(dim, dim, bias=False) self.k_proj = nn.Linear(dim, dim, bias=False) self.v_proj = nn.Linear(dim, dim, bias=False) self.attn_drop = nn.Dropout(config.prompt_attention_dropout) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(config.prompt_projection_dropout) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, queries, keys, values): """Input shape: Batch x Time x Channel""" batch_size, query_seq_len, hidden_size = queries.shape batch_size, key_seq_len, hidden_size = keys.shape queries = ( self.q_proj(queries) .reshape(batch_size, query_seq_len, self.num_heads, hidden_size // self.num_heads) .permute(0, 2, 1, 3) ) keys = ( self.k_proj(keys) .reshape(batch_size, key_seq_len, self.num_heads, hidden_size // self.num_heads) .permute(0, 2, 1, 3) ) values = ( self.v_proj(values) .reshape(batch_size, key_seq_len, self.num_heads, hidden_size // self.num_heads) .permute(0, 2, 1, 3) ) attn = (queries @ keys.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ values).transpose(1, 2).reshape(batch_size, query_seq_len, hidden_size) x = self.proj(x) x = self.proj_drop(x) return x
XCLIPCrossAttention
python
facebook__pyre-check
pyre_extensions/__init__.py
{ "start": 4255, "end": 4299 }
class ____(Generic[_Ts], int): pass
Length
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/properties.py
{ "start": 6183, "end": 6773 }
class ____: @property def my_property(self) -> str: return "" @my_property.setter def my_property(self, value: Base) -> None: value.foo() if isinstance(value, A): value.foo() def test_property_augmented_assign(p: PropertySetterTitoModel): # We see two calls for the same expression/location: # p.my_property@setter(p.my_property@getter | 0) p.my_property |= 0 def string_source() -> str: return _test_source() def test_object_class() -> None: x = string_source() _test_sink(x.__class__)
TestTypeInferenceInSetter
python
numba__numba
numba/core/utils.py
{ "start": 9629, "end": 10151 }
class ____(MutableSet[T]): def __init__(self, iterable: _tp.Iterable[T] = ()): # Just uses a dictionary under-the-hood to maintain insertion order. self._data = dict.fromkeys(iterable, None) def __contains__(self, key): return key in self._data def __iter__(self): return iter(self._data) def __len__(self): return len(self._data) def add(self, item): self._data[item] = None def discard(self, item): self._data.pop(item, None)
OrderedSet
python
great-expectations__great_expectations
tests/integration/metrics/column_pair_values/test_in_set.py
{ "start": 1108, "end": 5688 }
class ____: @parameterize_batch_for_data_sources( data_source_configs=PANDAS_DATA_SOURCES + SPARK_DATA_SOURCES + SQL_DATA_SOURCES, data=DATA_FRAME, ) def test_success(self, batch_for_datasource: Batch) -> None: batch = batch_for_datasource metric = ColumnPairValuesInSetUnexpectedCount( value_pairs_set=SUCCESS_VALUE_PAIR_SET, column_A=COL_A, column_B=COL_B, ) result = batch.compute_metrics(metric) assert isinstance(result, ColumnPairValuesInSetUnexpectedCountResult) assert result.value == 0 @parameterize_batch_for_data_sources( data_source_configs=PANDAS_DATA_SOURCES + SPARK_DATA_SOURCES + SQL_DATA_SOURCES, data=DATA_FRAME, ) def test_failure(self, batch_for_datasource: Batch) -> None: batch = batch_for_datasource metric = ColumnPairValuesInSetUnexpectedCount( value_pairs_set=FAILURE_VALUE_PAIR_SET, column_A=COL_A, column_B=COL_B, ) result = batch.compute_metrics(metric) assert isinstance(result, ColumnPairValuesInSetUnexpectedCountResult) assert result.value == FAILURE_RESULT_COUNT @pytest.mark.parametrize( "ignore_row_if,unexpected_count", [ ("either_value_is_missing", 1), ("both_values_are_missing", 3), ("neither", 6), ], ) @parameterize_batch_for_data_sources( data_source_configs=PANDAS_DATA_SOURCES, data=DATA_FRAME_WITH_NULLS, ) def test_ignore_row_if__pandas( self, batch_for_datasource: Batch, ignore_row_if, unexpected_count ) -> None: batch = batch_for_datasource metric = ColumnPairValuesInSetUnexpectedCount( value_pairs_set=NO_MATCH_PAIR_SET, column_A=COL_A_WITH_NULLS, column_B=COL_B_WITH_NULLS, ignore_row_if=ignore_row_if, ) result = batch.compute_metrics(metric) assert result.value == unexpected_count @pytest.mark.parametrize( "ignore_row_if,unexpected_count", [ pytest.param( "either_value_is_missing", 1, marks=pytest.mark.xfail(reason="returns 2", strict=True), ), pytest.param( "both_values_are_missing", 3, marks=pytest.mark.xfail(reason="returns 6", strict=True), ), pytest.param("neither", 6), ], ) @parameterize_batch_for_data_sources( data_source_configs=[SparkFilesystemCsvDatasourceTestConfig()], data=DATA_FRAME_WITH_NULLS, ) def test_ignore_row_if__spark( self, batch_for_datasource: Batch, ignore_row_if, unexpected_count ) -> None: """This test captures unexpected behavior for Spark FileSystem data sources.""" batch = batch_for_datasource metric = ColumnPairValuesInSetUnexpectedCount( value_pairs_set=NO_MATCH_PAIR_SET, column_A=COL_A_WITH_NULLS, column_B=COL_B_WITH_NULLS, ignore_row_if=ignore_row_if, ) result = batch.compute_metrics(metric) assert result.value == unexpected_count @pytest.mark.parametrize( "ignore_row_if,unexpected_count", [ pytest.param("either_value_is_missing", 1), pytest.param("both_values_are_missing", 3), pytest.param( "neither", 6, marks=pytest.mark.xfail( reason=("returns 3 - pairs where both are null are dropped"), strict=True, ), ), ], ) @parameterize_batch_for_data_sources( data_source_configs=SQL_DATA_SOURCES, data=DATA_FRAME_WITH_NULLS, ) def test_ignore_row_if__sql( self, batch_for_datasource: Batch, ignore_row_if, unexpected_count ) -> None: """This test captures a bug with SQL data sources and the ignore_row_if condition, where column pairs are dropped if both values are null. """ batch = batch_for_datasource metric = ColumnPairValuesInSetUnexpectedCount( value_pairs_set=NO_MATCH_PAIR_SET, column_A=COL_A_WITH_NULLS, column_B=COL_B_WITH_NULLS, ignore_row_if=ignore_row_if, ) result = batch.compute_metrics(metric) assert result.value == unexpected_count
TestColumnPairValuesInSetUnexpectedValues
python
mwaskom__seaborn
seaborn/_core/scales.py
{ "start": 13028, "end": 13123 }
class ____(Scale): # Numeric, integral, can skip ticks/ticklabels ... @dataclass
Discrete
python
pydata__xarray
xarray/namedarray/_typing.py
{ "start": 6083, "end": 6359 }
class ____( _arrayfunction[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Chunked duck array supporting NEP 18. Corresponds to np.ndarray. """ @property def chunks(self) -> _Chunks: ... @runtime_checkable
_chunkedarrayfunction
python
psf__black
src/black/parsing.py
{ "start": 3821, "end": 8824 }
class ____(Exception): """Raised when Black's generated code is not equivalent to the old AST.""" def _parse_single_version( src: str, version: tuple[int, int], *, type_comments: bool ) -> ast.AST: filename = "<unknown>" with warnings.catch_warnings(): warnings.simplefilter("ignore", SyntaxWarning) warnings.simplefilter("ignore", DeprecationWarning) return ast.parse( src, filename, feature_version=version, type_comments=type_comments ) def parse_ast(src: str) -> ast.AST: # TODO: support Python 4+ ;) versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)] first_error = "" for version in sorted(versions, reverse=True): try: return _parse_single_version(src, version, type_comments=True) except SyntaxError as e: if not first_error: first_error = str(e) # Try to parse without type comments for version in sorted(versions, reverse=True): try: return _parse_single_version(src, version, type_comments=False) except SyntaxError: pass raise SyntaxError(first_error) def _normalize(lineend: str, value: str) -> str: # To normalize, we strip any leading and trailing space from # each line... stripped: list[str] = [i.strip() for i in value.splitlines()] normalized = lineend.join(stripped) # ...and remove any blank lines at the beginning and end of # the whole string return normalized.strip() def stringify_ast(node: ast.AST) -> Iterator[str]: """Simple visitor generating strings to compare ASTs by content.""" return _stringify_ast(node, []) def _stringify_ast_with_new_parent( node: ast.AST, parent_stack: list[ast.AST], new_parent: ast.AST ) -> Iterator[str]: parent_stack.append(new_parent) yield from _stringify_ast(node, parent_stack) parent_stack.pop() def _stringify_ast(node: ast.AST, parent_stack: list[ast.AST]) -> Iterator[str]: if ( isinstance(node, ast.Constant) and isinstance(node.value, str) and node.kind == "u" ): # It's a quirk of history that we strip the u prefix over here. We used to # rewrite the AST nodes for Python version compatibility and we never copied # over the kind node.kind = None yield f"{' ' * len(parent_stack)}{node.__class__.__name__}(" for field in sorted(node._fields): # TypeIgnore has only one field 'lineno' which breaks this comparison if isinstance(node, ast.TypeIgnore): break try: value: object = getattr(node, field) except AttributeError: continue yield f"{' ' * (len(parent_stack) + 1)}{field}=" if isinstance(value, list): for item in value: # Ignore nested tuples within del statements, because we may insert # parentheses and they change the AST. if ( field == "targets" and isinstance(node, ast.Delete) and isinstance(item, ast.Tuple) ): for elt in _unwrap_tuples(item): yield from _stringify_ast_with_new_parent( elt, parent_stack, node ) elif isinstance(item, ast.AST): yield from _stringify_ast_with_new_parent(item, parent_stack, node) elif isinstance(value, ast.AST): yield from _stringify_ast_with_new_parent(value, parent_stack, node) else: normalized: object if ( isinstance(node, ast.Constant) and field == "value" and isinstance(value, str) and len(parent_stack) >= 2 # Any standalone string, ideally this would # exactly match black.nodes.is_docstring and isinstance(parent_stack[-1], ast.Expr) ): # Constant strings may be indented across newlines, if they are # docstrings; fold spaces after newlines when comparing. Similarly, # trailing and leading space may be removed. normalized = _normalize("\n", value) elif field == "type_comment" and isinstance(value, str): # Trailing whitespace in type comments is removed. normalized = value.rstrip() else: normalized = value yield ( f"{' ' * (len(parent_stack) + 1)}{normalized!r}, #" f" {value.__class__.__name__}" ) yield f"{' ' * len(parent_stack)}) # /{node.__class__.__name__}" def _unwrap_tuples(node: ast.Tuple) -> Iterator[ast.AST]: for elt in node.elts: if isinstance(elt, ast.Tuple): yield from _unwrap_tuples(elt) else: yield elt
ASTSafetyError
python
apache__avro
lang/py/avro/errors.py
{ "start": 3777, "end": 3874 }
class ____(AvroException): """Raised when a protocol failed to parse."""
ProtocolParseException
python
numba__numba
numba/tests/test_unicode.py
{ "start": 5936, "end": 7317 }
class ____(MemoryLeakMixin, TestCase): def setUp(self): super(BaseTest, self).setUp() UNICODE_EXAMPLES = [ '', 'ascii', '12345', '1234567890', '¡Y tú quién te crees?', '🐍⚡', '大处着眼,小处着手。', ] UNICODE_ORDERING_EXAMPLES = [ '', 'a' 'aa', 'aaa', 'b', 'aab', 'ab', 'asc', 'ascih', 'ascii', 'ascij', '大处着眼,小处着手', '大处着眼,小处着手。', '大处着眼,小处着手。🐍⚡', ] UNICODE_COUNT_EXAMPLES = [ ('', ''), ('', 'ascii'), ('ascii', ''), ('asc ii', ' '), ('ascii', 'ci'), ('ascii', 'ascii'), ('ascii', 'Ă'), ('ascii', '大处'), ('ascii', 'étú?'), ('', '大处 着眼,小处着手。大大大处'), ('大处 着眼,小处着手。大大大处', ''), ('大处 着眼,小处着手。大大大处', ' '), ('大处 着眼,小处着手。大大大处', 'ci'), ('大处 着眼,小处着手。大大大处', '大处大处'), ('大处 着眼,小处着手。大大大处', '大处 着眼,小处着手。大大大处'), ('大处 着眼,小处着手。大大大处', 'Ă'), ('大处 着眼,小处着手。大大大处', '大处'), ('大处 着眼,小处着手。大大大处', 'étú?'), ('', 'tú quién te crees?'), ('tú quién te crees?', ''), ('tú quién te crees?', ' '), ('tú quién te crees?', 'ci'), ('tú quién te crees?', 'tú quién te crees?'), ('tú quién te crees?', 'Ă'), ('tú quién te crees?', '大处'), ('tú quién te crees?', 'étú?'), ('abababab', 'a'), ('abababab', 'ab'), ('abababab', 'aba'), ('aaaaaaaaaa', 'aaa'), ('aaaaaaaaaa', 'aĂ'), ('aabbaaaabbaa', 'aa') ]
BaseTest
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_lookup_py39.py
{ "start": 4968, "end": 5481 }
class ____: __is_annotated_types_grouped_metadata__ = True def __iter__(self): return iter([st.just(sentinel)]) @given(...) def test_grouped_protocol_strategy(x: typing.Annotated[int, LazyStrategyAnnotation()]): assert x is sentinel def test_collections_abc_callable_none(): # https://github.com/HypothesisWorks/hypothesis/issues/4192 s = st.from_type(collections.abc.Callable[[None], None]) assert_all_examples(s, lambda x: callable(x) and x(None) is None)
LazyStrategyAnnotation