Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
200
def hvac_modes(self) -> list[HVACMode]: hvac_state = HVAC_MODES_MAPPING[self._climate.get_hvac_state()] return [HVACMode.AUTO, hvac_state]
Return the list of available hvac operation modes. HEAT and COOL mode are exclusive. End user has to enable a mode manually within the Somfy application. So only one mode can be displayed. Auto mode is a scheduler.
38
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def hvac_modes(self) -> list[HVACMode]: hvac_state = HVAC_MODES_MAPPING[self._climate.get_hvac_state()] return [HVACMode.AUTO, hvac_state] ``` ###Assistant : Return the list of available hvac operation modes. HEAT and COOL mode are exclusive. End user has to enable a mode manually within the Somfy application. So only one mode can be displayed. Auto mode is a scheduler.
201
def _assert_expected_task_states(self, dagrun, expected_states): tis = dagrun.get_task_instances() for ti in tis: try: expected_state = expected_states[ti.task_id] except KeyError: raise ValueError(f"Invalid task id {ti.task_id} found!") else: assert ti.state == expected_state all_downstream_skipped_states = { "short_circuit": State.SUCCESS, "op1": State.SKIPPED, "op2": State.SKIPPED, } all_success_states = {"short_circuit": State.SUCCESS, "op1": State.SUCCESS, "op2": State.SUCCESS}
Helper function that asserts `TaskInstances` of a given `task_id` are in a given state.
14
46
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _assert_expected_task_states(self, dagrun, expected_states): tis = dagrun.get_task_instances() for ti in tis: try: expected_state = expected_states[ti.task_id] except KeyError: raise ValueError(f"Invalid task id {ti.task_id} found!") else: assert ti.state == expected_state all_downstream_skipped_states = { "short_circuit": State.SUCCESS, "op1": State.SKIPPED, "op2": State.SKIPPED, } all_success_states = {"short_circuit": State.SUCCESS, "op1": State.SUCCESS, "op2": State.SUCCESS} ``` ###Assistant : Helper function that asserts `TaskInstances` of a given `task_id` are in a given state.
202
def set_customer_info(fieldname, customer, value=""): if fieldname == "loyalty_program": frappe.db.set_value("Customer", customer, "loyalty_program", value) contact = frappe.get_cached_value("Customer", customer, "customer_primary_contact") if not contact: contact = frappe.db.sql( , (customer), as_dict=1, ) contact = contact[0].get("parent") if contact else None if not contact: new_contact = frappe.new_doc("Contact") new_contact.is_primary_contact = 1 new_contact.first_name = customer new_contact.set("links", [{"link_doctype": "Customer", "link_name": customer}]) new_contact.save() contact = new_contact.name frappe.db.set_value("Customer", customer, "customer_primary_contact", contact) contact_doc = frappe.get_doc("Contact", contact) if fieldname == "email_id": contact_doc.set("email_ids", [{"email_id": value, "is_primary": 1}]) frappe.db.set_value("Customer", customer, "email_id", value) elif fieldname == "mobile_no": contact_doc.set("phone_nos", [{"phone": value, "is_primary_mobile_no": 1}]) frappe.db.set_value("Customer", customer, "mobile_no", value) contact_doc.save() @frappe.whitelist()
SELECT parent FROM `tabDynamic Link` WHERE parenttype = 'Contact' AND parentfield = 'links' AND link_doctype = 'Customer' AND link_name = %s
21
91
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_customer_info(fieldname, customer, value=""): if fieldname == "loyalty_program": frappe.db.set_value("Customer", customer, "loyalty_program", value) contact = frappe.get_cached_value("Customer", customer, "customer_primary_contact") if not contact: contact = frappe.db.sql( , (customer), as_dict=1, ) contact = contact[0].get("parent") if contact else None if not contact: new_contact = frappe.new_doc("Contact") new_contact.is_primary_contact = 1 new_contact.first_name = customer new_contact.set("links", [{"link_doctype": "Customer", "link_name": customer}]) new_contact.save() contact = new_contact.name frappe.db.set_value("Customer", customer, "customer_primary_contact", contact) contact_doc = frappe.get_doc("Contact", contact) if fieldname == "email_id": contact_doc.set("email_ids", [{"email_id": value, "is_primary": 1}]) frappe.db.set_value("Customer", customer, "email_id", value) elif fieldname == "mobile_no": contact_doc.set("phone_nos", [{"phone": value, "is_primary_mobile_no": 1}]) frappe.db.set_value("Customer", customer, "mobile_no", value) contact_doc.save() @frappe.whitelist() ``` ###Assistant : SELECT parent FROM `tabDynamic Link` WHERE parenttype = 'Contact' AND parentfield = 'links' AND link_doctype = 'Customer' AND link_name = %s
203
def test_unpublish_not_include_children_view_post(self): # Post to the unpublish page response = self.client.post( reverse("wagtailadmin_pages:unpublish", args=(self.test_page.id,)), {} ) # Should be redirected to explorer page self.assertRedirects( response, reverse("wagtailadmin_explore", args=(self.root_page.id,)) ) # Check that the page was unpublished self.assertFalse(SimplePage.objects.get(id=self.test_page.id).live) # Check that the descendant pages were not unpublished self.assertTrue(SimplePage.objects.get(id=self.test_child_page.id).live) self.assertTrue(SimplePage.objects.get(id=self.test_another_child_page.id).live)
This posts to the unpublish view and checks that the page was unpublished but its descendants were not
18
46
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unpublish_not_include_children_view_post(self): # Post to the unpublish page response = self.client.post( reverse("wagtailadmin_pages:unpublish", args=(self.test_page.id,)), {} ) # Should be redirected to explorer page self.assertRedirects( response, reverse("wagtailadmin_explore", args=(self.root_page.id,)) ) # Check that the page was unpublished self.assertFalse(SimplePage.objects.get(id=self.test_page.id).live) # Check that the descendant pages were not unpublished self.assertTrue(SimplePage.objects.get(id=self.test_child_page.id).live) self.assertTrue(SimplePage.objects.get(id=self.test_another_child_page.id).live) ``` ###Assistant : This posts to the unpublish view and checks that the page was unpublished but its descendants were not
204
def _get_data_from_filepath(self, filepath_or_buffer): # if it is a string but the file does not exist, it might be a JSON string filepath_or_buffer = stringify_path(filepath_or_buffer) if ( not isinstance(filepath_or_buffer, str) or is_url(filepath_or_buffer) or is_fsspec_url(filepath_or_buffer) or file_exists(filepath_or_buffer) ): self.handles = get_handle( filepath_or_buffer, "r", encoding=self.encoding, compression=self.compression, storage_options=self.storage_options, errors=self.encoding_errors, ) filepath_or_buffer = self.handles.handle elif ( isinstance(filepath_or_buffer, str) and filepath_or_buffer.lower().endswith( (".json",) + tuple(f".json{c}" for c in _extension_to_compression) ) and not file_exists(filepath_or_buffer) ): raise FileNotFoundError(f"File {filepath_or_buffer} does not exist") return filepath_or_buffer
The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. It raises FileNotFoundError if the input is a string ending in one of .json, .json.gz, .json.bz2, etc. but no such file exists.
64
75
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_data_from_filepath(self, filepath_or_buffer): # if it is a string but the file does not exist, it might be a JSON string filepath_or_buffer = stringify_path(filepath_or_buffer) if ( not isinstance(filepath_or_buffer, str) or is_url(filepath_or_buffer) or is_fsspec_url(filepath_or_buffer) or file_exists(filepath_or_buffer) ): self.handles = get_handle( filepath_or_buffer, "r", encoding=self.encoding, compression=self.compression, storage_options=self.storage_options, errors=self.encoding_errors, ) filepath_or_buffer = self.handles.handle elif ( isinstance(filepath_or_buffer, str) and filepath_or_buffer.lower().endswith( (".json",) + tuple(f".json{c}" for c in _extension_to_compression) ) and not file_exists(filepath_or_buffer) ): raise FileNotFoundError(f"File {filepath_or_buffer} does not exist") return filepath_or_buffer ``` ###Assistant : The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. It raises FileNotFoundError if the input is a string ending in one of .json, .json.gz, .json.bz2, etc. but no such file exists.
205
def _run_pip(args, additional_paths=None): # Run the bootstraping in a subprocess to avoid leaking any state that happens # after pip has executed. Particulary, this avoids the case when pip holds onto # the files in *additional_paths*, preventing us to remove them at the end of the # invocation. code = f return subprocess.run([sys.executable, '-W', 'ignore::DeprecationWarning', "-c", code], check=True).returncode
import runpy import sys sys.path = {additional_paths or []} + sys.path sys.argv[1:] = {args} runpy.run_module("pip", run_name="__main__", alter_sys=True)
17
58
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _run_pip(args, additional_paths=None): # Run the bootstraping in a subprocess to avoid leaking any state that happens # after pip has executed. Particulary, this avoids the case when pip holds onto # the files in *additional_paths*, preventing us to remove them at the end of the # invocation. code = f return subprocess.run([sys.executable, '-W', 'ignore::DeprecationWarning', "-c", code], check=True).returncode ``` ###Assistant : import runpy import sys sys.path = {additional_paths or []} + sys.path sys.argv[1:] = {args} runpy.run_module("pip", run_name="__main__", alter_sys=True)
206
def test_pprint_heap_allocated_type(): module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35" expected_output = ( "xxlimited.Null" if sys.version_info < (3, 11) else "xxlimited_35.Null" ) xxlimited = pytest.importorskip(module_name) output = pretty.pretty(xxlimited.Null) assert output == expected_output
Test that pprint works for heap allocated types.
8
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pprint_heap_allocated_type(): module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35" expected_output = ( "xxlimited.Null" if sys.version_info < (3, 11) else "xxlimited_35.Null" ) xxlimited = pytest.importorskip(module_name) output = pretty.pretty(xxlimited.Null) assert output == expected_output ``` ###Assistant : Test that pprint works for heap allocated types.
207
def test_orderby_percentile_with_many_fields_transactions_unsupported_fields(self): response = self.get_response( self.organization.slug, field=[ "p50(sentry.transactions.measurements.lcp)", "sum(user_misery)", ], statsPeriod="1h", interval="1h", datasource="snuba", groupBy=["project_id", "transaction"], orderBy="p50(sentry.transactions.measurements.lcp)", ) assert response.status_code == 400 assert ( response.json()["detail"] == "Multi-field select order by queries is not supported for metric user_misery" )
Test that contains a field in the `select` that is performance related but currently not supported should return a 400
20
37
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_orderby_percentile_with_many_fields_transactions_unsupported_fields(self): response = self.get_response( self.organization.slug, field=[ "p50(sentry.transactions.measurements.lcp)", "sum(user_misery)", ], statsPeriod="1h", interval="1h", datasource="snuba", groupBy=["project_id", "transaction"], orderBy="p50(sentry.transactions.measurements.lcp)", ) assert response.status_code == 400 assert ( response.json()["detail"] == "Multi-field select order by queries is not supported for metric user_misery" ) ``` ###Assistant : Test that contains a field in the `select` that is performance related but currently not supported should return a 400
208
def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTarget]) -> t.Dict[str, t.Set[IntegrationTarget]] targets_dict = dict((target.name, target) for target in integration_targets) target_dependencies = analyze_integration_target_dependencies(integration_targets) dependency_map = {} # type: t.Dict[str, t.Set[IntegrationTarget]] invalid_targets = set() for dependency, dependents in target_dependencies.items(): dependency_target = targets_dict.get(dependency) if not dependency_target: invalid_targets.add(dependency) continue for dependent in dependents: if dependent not in dependency_map: dependency_map[dependent] = set() dependency_map[dependent].add(dependency_target) if invalid_targets: raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets))) return dependency_map
Analyze the given list of integration test targets and return a dictionary expressing target names and the targets on which they depend.
22
67
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTarget]) -> t.Dict[str, t.Set[IntegrationTarget]] targets_dict = dict((target.name, target) for target in integration_targets) target_dependencies = analyze_integration_target_dependencies(integration_targets) dependency_map = {} # type: t.Dict[str, t.Set[IntegrationTarget]] invalid_targets = set() for dependency, dependents in target_dependencies.items(): dependency_target = targets_dict.get(dependency) if not dependency_target: invalid_targets.add(dependency) continue for dependent in dependents: if dependent not in dependency_map: dependency_map[dependent] = set() dependency_map[dependent].add(dependency_target) if invalid_targets: raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets))) return dependency_map ``` ###Assistant : Analyze the given list of integration test targets and return a dictionary expressing target names and the targets on which they depend.
209
def text(self) -> str: if self.error is None: return str(self) return self.error.databaseText()
Get a short text description of the error. This is a string suitable to show to the user as error message.
21
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def text(self) -> str: if self.error is None: return str(self) return self.error.databaseText() ``` ###Assistant : Get a short text description of the error. This is a string suitable to show to the user as error message.
210
def test_recover_start_from_replica_actor_names(serve_instance): # Test failed to deploy with total of 2 replicas, # but first constructor call fails.
Test controller is able to recover starting -> running replicas from actor names.
13
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_recover_start_from_replica_actor_names(serve_instance): # Test failed to deploy with total of 2 replicas, # but first constructor call fails. ``` ###Assistant : Test controller is able to recover starting -> running replicas from actor names.
211
def associated_entities(self) -> list[dict[str, Any]]: ha_entity_registry = self.device.gateway.ha_entity_registry zha_device_registry = self.device.gateway.device_registry return [ GroupEntityReference( ha_entity_registry.async_get(entity_ref.reference_id).name, ha_entity_registry.async_get(entity_ref.reference_id).original_name, entity_ref.reference_id, )._asdict() for entity_ref in zha_device_registry.get(self.device.ieee) if list(entity_ref.cluster_channels.values())[ 0 ].cluster.endpoint.endpoint_id == self.endpoint_id ]
Return the list of entities that were derived from this endpoint.
11
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def associated_entities(self) -> list[dict[str, Any]]: ha_entity_registry = self.device.gateway.ha_entity_registry zha_device_registry = self.device.gateway.device_registry return [ GroupEntityReference( ha_entity_registry.async_get(entity_ref.reference_id).name, ha_entity_registry.async_get(entity_ref.reference_id).original_name, entity_ref.reference_id, )._asdict() for entity_ref in zha_device_registry.get(self.device.ieee) if list(entity_ref.cluster_channels.values())[ 0 ].cluster.endpoint.endpoint_id == self.endpoint_id ] ``` ###Assistant : Return the list of entities that were derived from this endpoint.
212
def getheader(self, name, default=None): if self.headers is None: raise ResponseNotReady() headers = self.headers.get_all(name) or default if isinstance(headers, str) or not hasattr(headers, '__iter__'): return headers else: return ', '.join(headers)
Returns the value of the header matching *name*. If there are multiple matching headers, the values are combined into a single string separated by commas and spaces. If no matching header is found, returns *default* or None if the *default* is not specified. If the headers are unknown, raises http.client.ResponseNotReady.
50
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getheader(self, name, default=None): if self.headers is None: raise ResponseNotReady() headers = self.headers.get_all(name) or default if isinstance(headers, str) or not hasattr(headers, '__iter__'): return headers else: return ', '.join(headers) ``` ###Assistant : Returns the value of the header matching *name*. If there are multiple matching headers, the values are combined into a single string separated by commas and spaces. If no matching header is found, returns *default* or None if the *default* is not specified. If the headers are unknown, raises http.client.ResponseNotReady.
213
def test_generate_pipeline_code_2(): pipeline = [ 'KNeighborsClassifier', [ 'CombineDFs', [ 'GradientBoostingClassifier', 'input_matrix', 38.0, 5, 5, 5, 0.05, 0.5], [ 'CombineDFs', [ 'MinMaxScaler', 'input_matrix' ], ['ZeroCount', [ 'MaxAbsScaler', 'input_matrix' ] ] ] ], 18, 'uniform', 2 ] expected_code = assert expected_code == generate_pipeline_code(pipeline, tpot_obj.operators)
Assert that generate_pipeline_code() returns the correct code given a specific pipeline with two CombineDFs.make_pipeline( make_union( StackingEstimator(estimator=GradientBoostingClassifier(learning_rate=38.0, max_depth=5, max_features=5, min_samples_leaf=5, min_samples_split=0.05, n_estimators=0.5)), make_union( MinMaxScaler(), make_pipeline( MaxAbsScaler(), ZeroCount() ) ) ), KNeighborsClassifier(n_neighbors=18, p="uniform", weights=2) )
33
42
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_generate_pipeline_code_2(): pipeline = [ 'KNeighborsClassifier', [ 'CombineDFs', [ 'GradientBoostingClassifier', 'input_matrix', 38.0, 5, 5, 5, 0.05, 0.5], [ 'CombineDFs', [ 'MinMaxScaler', 'input_matrix' ], ['ZeroCount', [ 'MaxAbsScaler', 'input_matrix' ] ] ] ], 18, 'uniform', 2 ] expected_code = assert expected_code == generate_pipeline_code(pipeline, tpot_obj.operators) ``` ###Assistant : Assert that generate_pipeline_code() returns the correct code given a specific pipeline with two CombineDFs.make_pipeline( make_union( StackingEstimator(estimator=GradientBoostingClassifier(learning_rate=38.0, max_depth=5, max_features=5, min_samples_leaf=5, min_samples_split=0.05, n_estimators=0.5)), make_union( MinMaxScaler(), make_pipeline( MaxAbsScaler(), ZeroCount() ) ) ), KNeighborsClassifier(n_neighbors=18, p="uniform", weights=2) )
214
def _script_names(dist, script_name, is_gui): # type: (Distribution, str, bool) -> List[str] if dist_in_usersite(dist): bin_dir = get_bin_user() else: bin_dir = get_bin_prefix() exe_name = os.path.join(bin_dir, script_name) paths_to_remove = [exe_name] if WINDOWS: paths_to_remove.append(exe_name + '.exe') paths_to_remove.append(exe_name + '.exe.manifest') if is_gui: paths_to_remove.append(exe_name + '-script.pyw') else: paths_to_remove.append(exe_name + '-script.py') return paths_to_remove
Create the fully qualified name of the files created by {console,gui}_scripts for the given ``dist``. Returns the list of file names
21
46
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _script_names(dist, script_name, is_gui): # type: (Distribution, str, bool) -> List[str] if dist_in_usersite(dist): bin_dir = get_bin_user() else: bin_dir = get_bin_prefix() exe_name = os.path.join(bin_dir, script_name) paths_to_remove = [exe_name] if WINDOWS: paths_to_remove.append(exe_name + '.exe') paths_to_remove.append(exe_name + '.exe.manifest') if is_gui: paths_to_remove.append(exe_name + '-script.pyw') else: paths_to_remove.append(exe_name + '-script.py') return paths_to_remove ``` ###Assistant : Create the fully qualified name of the files created by {console,gui}_scripts for the given ``dist``. Returns the list of file names
215
def bbox_center(boxes): boxes_cx = (boxes[..., 0] + boxes[..., 2]) / 2 boxes_cy = (boxes[..., 1] + boxes[..., 3]) / 2 return paddle.stack([boxes_cx, boxes_cy], axis=-1)
Get bbox centers from boxes. Args: boxes (Tensor): boxes with shape (..., 4), "xmin, ymin, xmax, ymax" format. Returns: Tensor: boxes centers with shape (..., 2), "cx, cy" format.
29
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bbox_center(boxes): boxes_cx = (boxes[..., 0] + boxes[..., 2]) / 2 boxes_cy = (boxes[..., 1] + boxes[..., 3]) / 2 return paddle.stack([boxes_cx, boxes_cy], axis=-1) ``` ###Assistant : Get bbox centers from boxes. Args: boxes (Tensor): boxes with shape (..., 4), "xmin, ymin, xmax, ymax" format. Returns: Tensor: boxes centers with shape (..., 2), "cx, cy" format.
216
def predict(self, input): input_names = self.predictor.get_input_names() input_tensor = self.predictor.get_input_handle(input_names[0]) output_names = self.predictor.get_output_names() output_tensor = self.predictor.get_output_handle(output_names[0]) # preprocess self.recognize_times.preprocess_time_s.start() if type(input) == str: inputs = self.preprocess_video(input) else: inputs = self.preprocess_frames(input) self.recognize_times.preprocess_time_s.end() inputs = np.expand_dims( inputs, axis=0).repeat( self.batch_size, axis=0).copy() input_tensor.copy_from_cpu(inputs) # model prediction self.recognize_times.inference_time_s.start() self.predictor.run() self.recognize_times.inference_time_s.end() output = output_tensor.copy_to_cpu() # postprocess self.recognize_times.postprocess_time_s.start() classes, scores = self.postprocess(output) self.recognize_times.postprocess_time_s.end() return classes, scores
Args: input (str) or (list): video file path or image data list Returns: results (dict):
15
58
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def predict(self, input): input_names = self.predictor.get_input_names() input_tensor = self.predictor.get_input_handle(input_names[0]) output_names = self.predictor.get_output_names() output_tensor = self.predictor.get_output_handle(output_names[0]) # preprocess self.recognize_times.preprocess_time_s.start() if type(input) == str: inputs = self.preprocess_video(input) else: inputs = self.preprocess_frames(input) self.recognize_times.preprocess_time_s.end() inputs = np.expand_dims( inputs, axis=0).repeat( self.batch_size, axis=0).copy() input_tensor.copy_from_cpu(inputs) # model prediction self.recognize_times.inference_time_s.start() self.predictor.run() self.recognize_times.inference_time_s.end() output = output_tensor.copy_to_cpu() # postprocess self.recognize_times.postprocess_time_s.start() classes, scores = self.postprocess(output) self.recognize_times.postprocess_time_s.end() return classes, scores ``` ###Assistant : Args: input (str) or (list): video file path or image data list Returns: results (dict):
217
def deep_deconstruct(self, obj): if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return {key: self.deep_deconstruct(value) for key, value in obj.items()} elif isinstance(obj, functools.partial): return ( obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords), ) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, "deconstruct"): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], {key: self.deep_deconstruct(value) for key, value in kwargs.items()}, ) else: return obj
Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly.
22
121
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deep_deconstruct(self, obj): if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return {key: self.deep_deconstruct(value) for key, value in obj.items()} elif isinstance(obj, functools.partial): return ( obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords), ) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, "deconstruct"): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], {key: self.deep_deconstruct(value) for key, value in kwargs.items()}, ) else: return obj ``` ###Assistant : Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly.
218
def test_cancellation(self): deferred: "Deferred[str]" = Deferred() wrapper_deferred = stop_cancellation(deferred) # Cancel the new `Deferred`. wrapper_deferred.cancel() self.assertTrue(wrapper_deferred.called) self.failureResultOf(wrapper_deferred, CancelledError) self.assertFalse( deferred.called, "Original `Deferred` was unexpectedly cancelled." ) # Now make the inner `Deferred` fail. # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed # in logs. deferred.errback(ValueError("abc")) self.assertIsNone(deferred.result, "`Failure` was not consumed")
Test that cancellation of the new `Deferred` leaves the original running.
11
54
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_cancellation(self): deferred: "Deferred[str]" = Deferred() wrapper_deferred = stop_cancellation(deferred) # Cancel the new `Deferred`. wrapper_deferred.cancel() self.assertTrue(wrapper_deferred.called) self.failureResultOf(wrapper_deferred, CancelledError) self.assertFalse( deferred.called, "Original `Deferred` was unexpectedly cancelled." ) # Now make the inner `Deferred` fail. # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed # in logs. deferred.errback(ValueError("abc")) self.assertIsNone(deferred.result, "`Failure` was not consumed") ``` ###Assistant : Test that cancellation of the new `Deferred` leaves the original running.
219
def getgeneratorlocals(generator): if not isgenerator(generator): raise TypeError("{!r} is not a Python generator".format(generator)) frame = getattr(generator, "gi_frame", None) if frame is not None: return generator.gi_frame.f_locals else: return {} # ------------------------------------------------ coroutine introspection CORO_CREATED = 'CORO_CREATED' CORO_RUNNING = 'CORO_RUNNING' CORO_SUSPENDED = 'CORO_SUSPENDED' CORO_CLOSED = 'CORO_CLOSED'
Get the mapping of generator local variables to their current values. A dict is returned, with the keys the local variable names and values the bound values.
27
43
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getgeneratorlocals(generator): if not isgenerator(generator): raise TypeError("{!r} is not a Python generator".format(generator)) frame = getattr(generator, "gi_frame", None) if frame is not None: return generator.gi_frame.f_locals else: return {} # ------------------------------------------------ coroutine introspection CORO_CREATED = 'CORO_CREATED' CORO_RUNNING = 'CORO_RUNNING' CORO_SUSPENDED = 'CORO_SUSPENDED' CORO_CLOSED = 'CORO_CLOSED' ``` ###Assistant : Get the mapping of generator local variables to their current values. A dict is returned, with the keys the local variable names and values the bound values.
220
async def test_last_bin_contains_end_date(client, route): response = await client.post( f"/{route}/history", json=dict( history_start=str(dt), history_end=str(dt.add(days=1, minutes=30)), history_interval_seconds=timedelta(days=1).total_seconds(), ), ) assert response.status_code == status.HTTP_200_OK parsed = pydantic.parse_obj_as(List[responses.HistoryResponse], response.json()) assert len(parsed) == 2 assert parsed[0].interval_start == dt assert parsed[0].interval_end == dt.add(days=1) assert parsed[1].interval_start == dt.add(days=1) assert parsed[1].interval_end == dt.add(days=2)
The last bin contains the end date, so its own end could be after the history end
17
44
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_last_bin_contains_end_date(client, route): response = await client.post( f"/{route}/history", json=dict( history_start=str(dt), history_end=str(dt.add(days=1, minutes=30)), history_interval_seconds=timedelta(days=1).total_seconds(), ), ) assert response.status_code == status.HTTP_200_OK parsed = pydantic.parse_obj_as(List[responses.HistoryResponse], response.json()) assert len(parsed) == 2 assert parsed[0].interval_start == dt assert parsed[0].interval_end == dt.add(days=1) assert parsed[1].interval_start == dt.add(days=1) assert parsed[1].interval_end == dt.add(days=2) ``` ###Assistant : The last bin contains the end date, so its own end could be after the history end
221
def _validate_attributes(self): # Run config if not isinstance(self.run_config, RunConfig): raise ValueError( f"`run_config` should be an instance of `ray.air.RunConfig`, " f"found {type(self.run_config)} with value `{self.run_config}`." ) # Scaling config # Todo: move to ray.air.ScalingConfig if not isinstance(self.scaling_config, dict): raise ValueError( f"`scaling_config` should be an instance of `dict`, " f"found {type(self.scaling_config)} with value `{self.scaling_config}`." ) # Datasets if not isinstance(self.datasets, dict): raise ValueError( f"`datasets` should be a dict mapping from a string to " f"`ray.data.Dataset` objects, " f"found {type(self.datasets)} with value `{self.datasets}`." ) elif any( not isinstance(ds, ray.data.Dataset) and not callable(ds) for ds in self.datasets.values() ): raise ValueError( f"At least one value in the `datasets` dict is not a " f"`ray.data.Dataset`: {self.datasets}" ) # Preprocessor if self.preprocessor is not None and not isinstance( self.preprocessor, ray.data.Preprocessor ): raise ValueError( f"`preprocessor` should be an instance of `ray.data.Preprocessor`, " f"found {type(self.preprocessor)} with value `{self.preprocessor}`." ) if self.resume_from_checkpoint is not None and not isinstance( self.resume_from_checkpoint, ray.air.Checkpoint ): raise ValueError( f"`resume_from_checkpoint` should be an instance of " f"`ray.air.Checkpoint`, found {type(self.resume_from_checkpoint)} " f"with value `{self.resume_from_checkpoint}`." )
Called on __init()__ to validate trainer attributes.
7
168
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate_attributes(self): # Run config if not isinstance(self.run_config, RunConfig): raise ValueError( f"`run_config` should be an instance of `ray.air.RunConfig`, " f"found {type(self.run_config)} with value `{self.run_config}`." ) # Scaling config # Todo: move to ray.air.ScalingConfig if not isinstance(self.scaling_config, dict): raise ValueError( f"`scaling_config` should be an instance of `dict`, " f"found {type(self.scaling_config)} with value `{self.scaling_config}`." ) # Datasets if not isinstance(self.datasets, dict): raise ValueError( f"`datasets` should be a dict mapping from a string to " f"`ray.data.Dataset` objects, " f"found {type(self.datasets)} with value `{self.datasets}`." ) elif any( not isinstance(ds, ray.data.Dataset) and not callable(ds) for ds in self.datasets.values() ): raise ValueError( f"At least one value in the `datasets` dict is not a " f"`ray.data.Dataset`: {self.datasets}" ) # Preprocessor if self.preprocessor is not None and not isinstance( self.preprocessor, ray.data.Preprocessor ): raise ValueError( f"`preprocessor` should be an instance of `ray.data.Preprocessor`, " f"found {type(self.preprocessor)} with value `{self.preprocessor}`." ) if self.resume_from_checkpoint is not None and not isinstance( self.resume_from_checkpoint, ray.air.Checkpoint ): raise ValueError( f"`resume_from_checkpoint` should be an instance of " f"`ray.air.Checkpoint`, found {type(self.resume_from_checkpoint)} " f"with value `{self.resume_from_checkpoint}`." ) ``` ###Assistant : Called on __init()__ to validate trainer attributes.
222
def longitude(self) -> float | None: if ( self.extra_state_attributes is not None and ATTR_LONGITUDE in self.extra_state_attributes ): longitude: float = self.extra_state_attributes[ATTR_LONGITUDE] return longitude return None
Return longitude if provided in extra_state_attributes or None.
8
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def longitude(self) -> float | None: if ( self.extra_state_attributes is not None and ATTR_LONGITUDE in self.extra_state_attributes ): longitude: float = self.extra_state_attributes[ATTR_LONGITUDE] return longitude return None ``` ###Assistant : Return longitude if provided in extra_state_attributes or None.
223
def user_cache_dir(self) -> str: path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")) return self._append_parts(path, opinion_value="Cache")
:return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g. ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``
16
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def user_cache_dir(self) -> str: path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")) return self._append_parts(path, opinion_value="Cache") ``` ###Assistant : :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g. ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``
224
def get_project_config(project, full_config=True, project_keys=None): with sentry_sdk.push_scope() as scope: scope.set_tag("project", project.id) with metrics.timer("relay.config.get_project_config.duration"): return _get_project_config(project, full_config=full_config, project_keys=project_keys)
Constructs the ProjectConfig information. :param project: The project to load configuration for. Ensure that organization is bound on this object; otherwise it will be loaded from the database. :param full_config: True if only the full config is required, False if only the restricted (for external relays) is required (default True, i.e. full configuration) :param project_keys: Pre-fetched project keys for performance. However, if no project keys are provided it is assumed that the config does not need to contain auth information (this is the case when used in python's StoreView) :return: a ProjectConfig object for the given project
97
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_project_config(project, full_config=True, project_keys=None): with sentry_sdk.push_scope() as scope: scope.set_tag("project", project.id) with metrics.timer("relay.config.get_project_config.duration"): return _get_project_config(project, full_config=full_config, project_keys=project_keys) ``` ###Assistant : Constructs the ProjectConfig information. :param project: The project to load configuration for. Ensure that organization is bound on this object; otherwise it will be loaded from the database. :param full_config: True if only the full config is required, False if only the restricted (for external relays) is required (default True, i.e. full configuration) :param project_keys: Pre-fetched project keys for performance. However, if no project keys are provided it is assumed that the config does not need to contain auth information (this is the case when used in python's StoreView) :return: a ProjectConfig object for the given project
225
def _query_kg(self, sparql_query): try: response = self.knowledge_graph.query(sparql_query=sparql_query) # unpack different answer styles if isinstance(response, list): if len(response) == 0: result = "" else: result = [] for x in response: for k, v in x.items(): result.append(v["value"]) elif isinstance(response, bool): result = str(response) elif "count" in response[0]: result = str(int(response[0]["count"]["value"])) else: result = "" except Exception: result = "" return result, sparql_query
Execute a single SPARQL query on the knowledge graph to retrieve an answer and unpack different answer styles for boolean queries, count queries, and list queries. :param sparql_query: SPARQL query that shall be executed on the knowledge graph
38
61
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _query_kg(self, sparql_query): try: response = self.knowledge_graph.query(sparql_query=sparql_query) # unpack different answer styles if isinstance(response, list): if len(response) == 0: result = "" else: result = [] for x in response: for k, v in x.items(): result.append(v["value"]) elif isinstance(response, bool): result = str(response) elif "count" in response[0]: result = str(int(response[0]["count"]["value"])) else: result = "" except Exception: result = "" return result, sparql_query ``` ###Assistant : Execute a single SPARQL query on the knowledge graph to retrieve an answer and unpack different answer styles for boolean queries, count queries, and list queries. :param sparql_query: SPARQL query that shall be executed on the knowledge graph
226
def _path_importer_cache(cls, path): if path == '': try: path = _os.getcwd() except FileNotFoundError: # Don't cache the failure as the cwd can easily change to # a valid directory later on. return None try: finder = sys.path_importer_cache[path] except KeyError: finder = cls._path_hooks(path) sys.path_importer_cache[path] = finder return finder
Get the finder for the path entry from sys.path_importer_cache. If the path entry is not in the cache, find the appropriate finder and cache it. If no finder is available, store None.
32
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _path_importer_cache(cls, path): if path == '': try: path = _os.getcwd() except FileNotFoundError: # Don't cache the failure as the cwd can easily change to # a valid directory later on. return None try: finder = sys.path_importer_cache[path] except KeyError: finder = cls._path_hooks(path) sys.path_importer_cache[path] = finder return finder ``` ###Assistant : Get the finder for the path entry from sys.path_importer_cache. If the path entry is not in the cache, find the appropriate finder and cache it. If no finder is available, store None.
227
def test_publish_parts(self): import docutils self.assertNotEqual( docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE, "cmsreference" ) source = "reST, `interpreted text`, default role." markup = "<p>reST, <cite>interpreted text</cite>, default role.</p>\n" parts = docutils.core.publish_parts(source=source, writer_name="html4css1") self.assertEqual(parts["fragment"], markup)
Django shouldn't break the default role for interpreted text when ``publish_parts`` is used directly, by setting it to ``cmsreference`` (#6681).
20
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_publish_parts(self): import docutils self.assertNotEqual( docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE, "cmsreference" ) source = "reST, `interpreted text`, default role." markup = "<p>reST, <cite>interpreted text</cite>, default role.</p>\n" parts = docutils.core.publish_parts(source=source, writer_name="html4css1") self.assertEqual(parts["fragment"], markup) ``` ###Assistant : Django shouldn't break the default role for interpreted text when ``publish_parts`` is used directly, by setting it to ``cmsreference`` (#6681).
228
def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id depth_map = setup_info.depth_map # Try at "B" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map["B"], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"] ) # Try at "A" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] # Event "2" has a depth of 2 but is not included here because we only # know the approximate depth of 5 from our event "3". self.assertListEqual(backfill_event_ids, ["b3", "b2", "b1"])
Test to make sure only backfill points that are older and come before the `current_depth` are returned.
17
88
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id depth_map = setup_info.depth_map # Try at "B" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map["B"], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"] ) # Try at "A" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] # Event "2" has a depth of 2 but is not included here because we only # know the approximate depth of 5 from our event "3". self.assertListEqual(backfill_event_ids, ["b3", "b2", "b1"]) ``` ###Assistant : Test to make sure only backfill points that are older and come before the `current_depth` are returned.
229
def theano_code(expr, cache=None, **kwargs): sympy_deprecation_warning( , deprecated_since_version="1.8", active_deprecations_target='theanocode-deprecated') if not theano: raise ImportError("theano is required for theano_code") if cache is None: cache = global_cache return TheanoPrinter(cache=cache, settings={}).doprint(expr, **kwargs)
Convert a SymPy expression into a Theano graph variable. .. deprecated:: 1.8 ``sympy.printing.theanocode`` is deprecated. Theano has been renamed to Aesara. Use ``sympy.printing.aesaracode`` instead. See :ref:`theanocode-deprecated` for more information. Parameters ========== expr : sympy.core.expr.Expr SymPy expression object to convert. cache : dict Cached Theano variables (see :class:`TheanoPrinter.cache <TheanoPrinter>`). Defaults to the module-level global cache. dtypes : dict Passed to :meth:`.TheanoPrinter.doprint`. broadcastables : dict Passed to :meth:`.TheanoPrinter.doprint`. Returns ======= theano.gof.graph.Variable A variable corresponding to the expression's value in a Theano symbolic expression graph. sympy.printing.theanocode is deprecated. Theano has been renamed to Aesara. Use sympy.printing.aesaracode instead.
94
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def theano_code(expr, cache=None, **kwargs): sympy_deprecation_warning( , deprecated_since_version="1.8", active_deprecations_target='theanocode-deprecated') if not theano: raise ImportError("theano is required for theano_code") if cache is None: cache = global_cache return TheanoPrinter(cache=cache, settings={}).doprint(expr, **kwargs) ``` ###Assistant : Convert a SymPy expression into a Theano graph variable. .. deprecated:: 1.8 ``sympy.printing.theanocode`` is deprecated. Theano has been renamed to Aesara. Use ``sympy.printing.aesaracode`` instead. See :ref:`theanocode-deprecated` for more information. Parameters ========== expr : sympy.core.expr.Expr SymPy expression object to convert. cache : dict Cached Theano variables (see :class:`TheanoPrinter.cache <TheanoPrinter>`). Defaults to the module-level global cache. dtypes : dict Passed to :meth:`.TheanoPrinter.doprint`. broadcastables : dict Passed to :meth:`.TheanoPrinter.doprint`. Returns ======= theano.gof.graph.Variable A variable corresponding to the expression's value in a Theano symbolic expression graph. sympy.printing.theanocode is deprecated. Theano has been renamed to Aesara. Use sympy.printing.aesaracode instead.
230
def _validate_axes_lengths(self): if self._row_lengths_cache is not None and len(self.index) > 0: # An empty frame can have 0 rows but a nonempty index. If the frame # does have rows, the number of rows must equal the size of the # index. num_rows = sum(self._row_lengths_cache) if num_rows > 0: ErrorMessage.catch_bugs_and_request_email( num_rows != len(self._index_cache), f"Row lengths: {num_rows} != {len(self._index_cache)}", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._row_lengths_cache), f"Row lengths cannot be negative: {self._row_lengths_cache}", ) if self._column_widths_cache is not None and len(self.columns) > 0: # An empty frame can have 0 column but a nonempty column index. If # the frame does have columns, the number of columns must equal the # size of the columns. num_columns = sum(self._column_widths_cache) if num_columns > 0: ErrorMessage.catch_bugs_and_request_email( num_columns != len(self._columns_cache), f"Column widths: {num_columns} != {len(self._columns_cache)}", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._column_widths_cache), f"Column widths cannot be negative: {self._column_widths_cache}", )
Validate that labels are split correctly if split is known.
10
147
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate_axes_lengths(self): if self._row_lengths_cache is not None and len(self.index) > 0: # An empty frame can have 0 rows but a nonempty index. If the frame # does have rows, the number of rows must equal the size of the # index. num_rows = sum(self._row_lengths_cache) if num_rows > 0: ErrorMessage.catch_bugs_and_request_email( num_rows != len(self._index_cache), f"Row lengths: {num_rows} != {len(self._index_cache)}", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._row_lengths_cache), f"Row lengths cannot be negative: {self._row_lengths_cache}", ) if self._column_widths_cache is not None and len(self.columns) > 0: # An empty frame can have 0 column but a nonempty column index. If # the frame does have columns, the number of columns must equal the # size of the columns. num_columns = sum(self._column_widths_cache) if num_columns > 0: ErrorMessage.catch_bugs_and_request_email( num_columns != len(self._columns_cache), f"Column widths: {num_columns} != {len(self._columns_cache)}", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._column_widths_cache), f"Column widths cannot be negative: {self._column_widths_cache}", ) ``` ###Assistant : Validate that labels are split correctly if split is known.
231
def test_get_name_capability_sid(): cap_sid = "S-1-15-3-1024-1065365936-1281604716-3511738428-1654721687-432734479-3232135806-4053264122-3456934681" sid_obj = win32security.ConvertStringSidToSid(cap_sid) assert salt.utils.win_dacl.get_name(sid_obj) is None
Test get_name with a compatibility SID. Should return `None` as we want to ignore these SIDs
16
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_name_capability_sid(): cap_sid = "S-1-15-3-1024-1065365936-1281604716-3511738428-1654721687-432734479-3232135806-4053264122-3456934681" sid_obj = win32security.ConvertStringSidToSid(cap_sid) assert salt.utils.win_dacl.get_name(sid_obj) is None ``` ###Assistant : Test get_name with a compatibility SID. Should return `None` as we want to ignore these SIDs
232
def entry_points(group=None): eps = importlib.metadata.entry_points() if group: try: return eps.select(group=group) except AttributeError: return eps.get(group, []) return eps
Returns an iterable of entrypoints. For compatibility with Python 3.8/3.9. In 3.10 the return type changed from a dict to an ``importlib.metadata.EntryPoints``. This compatibility utility can be removed once Python 3.10 is the minimum.
34
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def entry_points(group=None): eps = importlib.metadata.entry_points() if group: try: return eps.select(group=group) except AttributeError: return eps.get(group, []) return eps ``` ###Assistant : Returns an iterable of entrypoints. For compatibility with Python 3.8/3.9. In 3.10 the return type changed from a dict to an ``importlib.metadata.EntryPoints``. This compatibility utility can be removed once Python 3.10 is the minimum.
233
def elliptic_curve(self) -> Optional[str]: key = self._private_key() if isinstance(key, EllipticCurvePrivateKey): return key.curve.name return None
:returns: If the private key is an elliptic key, the name of its curve. :rtype: str
16
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def elliptic_curve(self) -> Optional[str]: key = self._private_key() if isinstance(key, EllipticCurvePrivateKey): return key.curve.name return None ``` ###Assistant : :returns: If the private key is an elliptic key, the name of its curve. :rtype: str
234
def page_type_display_name(self): if not self.specific_class or self.is_root(): return "" else: return self.specific_class.get_verbose_name()
A human-readable version of this page's type
7
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def page_type_display_name(self): if not self.specific_class or self.is_root(): return "" else: return self.specific_class.get_verbose_name() ``` ###Assistant : A human-readable version of this page's type
235
def save(self, fname, **kwargs) -> Plot: # TODO expose important keyword arugments in our signature? self.plot().save(fname, **kwargs) return self
Render the plot and write it to a buffer or file on disk. Parameters ---------- fname : str, path, or buffer Location on disk to save the figure, or a buffer to write into. Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.
41
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self, fname, **kwargs) -> Plot: # TODO expose important keyword arugments in our signature? self.plot().save(fname, **kwargs) return self ``` ###Assistant : Render the plot and write it to a buffer or file on disk. Parameters ---------- fname : str, path, or buffer Location on disk to save the figure, or a buffer to write into. Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.
236
def get_install_candidate(self, link_evaluator, link): # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate] is_candidate, result = link_evaluator.evaluate_link(link) if not is_candidate: if result: self._log_skipped_link(link, reason=result) return None return InstallationCandidate( name=link_evaluator.project_name, link=link, version=result, )
If the link is a candidate for install, convert it to an InstallationCandidate and return it. Otherwise, return None.
19
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_install_candidate(self, link_evaluator, link): # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate] is_candidate, result = link_evaluator.evaluate_link(link) if not is_candidate: if result: self._log_skipped_link(link, reason=result) return None return InstallationCandidate( name=link_evaluator.project_name, link=link, version=result, ) ``` ###Assistant : If the link is a candidate for install, convert it to an InstallationCandidate and return it. Otherwise, return None.
237
def bar(self, x=None, y=None, **kwargs) -> PlotAccessor: return self(kind="bar", x=x, y=y, **kwargs)
Vertical bar plot. A bar plot is a plot that presents categorical data with rectangular bars with lengths proportional to the values that they represent. A bar plot shows comparisons among discrete categories. One axis of the plot shows the specific categories being compared, and the other axis represents a measured value.
52
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bar(self, x=None, y=None, **kwargs) -> PlotAccessor: return self(kind="bar", x=x, y=y, **kwargs) ``` ###Assistant : Vertical bar plot. A bar plot is a plot that presents categorical data with rectangular bars with lengths proportional to the values that they represent. A bar plot shows comparisons among discrete categories. One axis of the plot shows the specific categories being compared, and the other axis represents a measured value.
238
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): input_shape = shape(y_pred) num_samples, num_steps = input_shape[0], input_shape[1] y_pred = tf.math.log( tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon() ) input_length = tf.cast(input_length, tf.int32) if greedy: (decoded, log_prob) = tf.nn.ctc_greedy_decoder( inputs=y_pred, sequence_length=input_length ) else: (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder( inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths, ) decoded_dense = [] for st in decoded: st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps)) decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) return (decoded_dense, log_prob) # HIGH ORDER FUNCTIONS @keras_export("keras.backend.map_fn") @doc_controls.do_not_generate_docs
Decodes the output of a softmax. Can use either greedy search (also known as best path) or a constrained dictionary search. Args: y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, )` containing the sequence length for each batch item in `y_pred`. greedy: perform much faster best-path search if `true`. This does not use a dictionary. beam_width: if `greedy` is `false`: a beam search decoder will be used with a beam of this width. top_paths: if `greedy` is `false`, how many of the most probable paths will be returned. Returns: Tuple: List: if `greedy` is `true`, returns a list of one element that contains the decoded sequence. If `false`, returns the `top_paths` most probable decoded sequences. Each decoded sequence has shape (samples, time_steps). Important: blank labels are returned as `-1`. Tensor `(top_paths, )` that contains the log probability of each decoded sequence.
149
71
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): input_shape = shape(y_pred) num_samples, num_steps = input_shape[0], input_shape[1] y_pred = tf.math.log( tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon() ) input_length = tf.cast(input_length, tf.int32) if greedy: (decoded, log_prob) = tf.nn.ctc_greedy_decoder( inputs=y_pred, sequence_length=input_length ) else: (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder( inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths, ) decoded_dense = [] for st in decoded: st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps)) decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) return (decoded_dense, log_prob) # HIGH ORDER FUNCTIONS @keras_export("keras.backend.map_fn") @doc_controls.do_not_generate_docs ``` ###Assistant : Decodes the output of a softmax. Can use either greedy search (also known as best path) or a constrained dictionary search. Args: y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, )` containing the sequence length for each batch item in `y_pred`. greedy: perform much faster best-path search if `true`. This does not use a dictionary. beam_width: if `greedy` is `false`: a beam search decoder will be used with a beam of this width. top_paths: if `greedy` is `false`, how many of the most probable paths will be returned. Returns: Tuple: List: if `greedy` is `true`, returns a list of one element that contains the decoded sequence. If `false`, returns the `top_paths` most probable decoded sequences. Each decoded sequence has shape (samples, time_steps). Important: blank labels are returned as `-1`. Tensor `(top_paths, )` that contains the log probability of each decoded sequence.
239
def display_time_updates(bar): threading.Thread(target=_show_time_updates, args=(bar,)).start()
Start displaying the progress `bar` in a notebook. Parameters ---------- bar : tqdm.tqdm The progress bar wrapper to display in a notebook cell.
23
4
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def display_time_updates(bar): threading.Thread(target=_show_time_updates, args=(bar,)).start() ``` ###Assistant : Start displaying the progress `bar` in a notebook. Parameters ---------- bar : tqdm.tqdm The progress bar wrapper to display in a notebook cell.
240
def remove(name=None, pkgs=None, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets) errors = [] # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the fileset or rpm package(s) for target in targets: try: named, versionpkg, rpmpkg = _check_pkg(target) except CommandExecutionError as exc: if exc.info: errors.append(exc.info["errors"]) continue if rpmpkg: # assume use dnf or yum cmdflags = " -y remove " if pathlib.Path("/opt/freeware/bin/dnf").is_file(): cmdexe = "/opt/freeware/bin/dnf" elif pathlib.Path("/opt/freeware/bin/yum").is_file(): cmdexe = "/opt/freeware/bin/yum" elif pathlib.Path("/usr/bin/yum").is_file(): cmdexe = "/usr/bin/yum" else: cmdexe = "/usr/bin/rpm" cmdflags = " -e " cmd = [cmdexe, cmdflags, named] out = __salt__["cmd.run_all"](cmd, python_shell=False) else: cmd = ["/usr/sbin/installp", "-u", named] out = __salt__["cmd.run_all"](cmd, python_shell=False) # Get a list of the packages after the uninstall __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( "Problems encountered removing filesets(s)/package(s)", info={"changes": ret, "errors": errors}, ) return ret
Remove specified fileset(s)/rpm package(s). name The name of the fileset or rpm package to be deleted. .. versionadded:: 3005 preference to install rpm packages are to use in the following order: /opt/freeware/bin/dnf /opt/freeware/bin/yum /usr/bin/yum /usr/bin/rpm Multiple Package Options: pkgs A list of filesets and/or rpm packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <fileset/rpm package name> salt '*' pkg.remove tcsh salt '*' pkg.remove xlC.rte salt '*' pkg.remove Firefox.base.adt salt '*' pkg.remove pkgs='["foo", "bar"]'
101
157
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def remove(name=None, pkgs=None, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets) errors = [] # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the fileset or rpm package(s) for target in targets: try: named, versionpkg, rpmpkg = _check_pkg(target) except CommandExecutionError as exc: if exc.info: errors.append(exc.info["errors"]) continue if rpmpkg: # assume use dnf or yum cmdflags = " -y remove " if pathlib.Path("/opt/freeware/bin/dnf").is_file(): cmdexe = "/opt/freeware/bin/dnf" elif pathlib.Path("/opt/freeware/bin/yum").is_file(): cmdexe = "/opt/freeware/bin/yum" elif pathlib.Path("/usr/bin/yum").is_file(): cmdexe = "/usr/bin/yum" else: cmdexe = "/usr/bin/rpm" cmdflags = " -e " cmd = [cmdexe, cmdflags, named] out = __salt__["cmd.run_all"](cmd, python_shell=False) else: cmd = ["/usr/sbin/installp", "-u", named] out = __salt__["cmd.run_all"](cmd, python_shell=False) # Get a list of the packages after the uninstall __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( "Problems encountered removing filesets(s)/package(s)", info={"changes": ret, "errors": errors}, ) return ret ``` ###Assistant : Remove specified fileset(s)/rpm package(s). name The name of the fileset or rpm package to be deleted. .. versionadded:: 3005 preference to install rpm packages are to use in the following order: /opt/freeware/bin/dnf /opt/freeware/bin/yum /usr/bin/yum /usr/bin/rpm Multiple Package Options: pkgs A list of filesets and/or rpm packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <fileset/rpm package name> salt '*' pkg.remove tcsh salt '*' pkg.remove xlC.rte salt '*' pkg.remove Firefox.base.adt salt '*' pkg.remove pkgs='["foo", "bar"]'
241
def _from_ordinalf(x, tz=None): tz = _get_tzinfo(tz) dt = (np.datetime64(get_epoch()) + np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us')) if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'): raise ValueError(f'Date ordinal {x} converts to {dt} (using ' f'epoch {get_epoch()}), but Matplotlib dates must be ' 'between year 0001 and 9999.') # convert from datetime64 to datetime: dt = dt.tolist() # datetime64 is always UTC: dt = dt.replace(tzinfo=dateutil.tz.gettz('UTC')) # but maybe we are working in a different timezone so move. dt = dt.astimezone(tz) # fix round off errors if np.abs(x) > 70 * 365: # if x is big, round off to nearest twenty microseconds. # This avoids floating point roundoff error ms = round(dt.microsecond / 20) * 20 if ms == 1000000: dt = dt.replace(microsecond=0) + datetime.timedelta(seconds=1) else: dt = dt.replace(microsecond=ms) return dt # a version of _from_ordinalf that can operate on numpy arrays _from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf, otypes="O") # a version of dateutil.parser.parse that can operate on numpy arrays _dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
Convert Gregorian float of the date, preserving hours, minutes, seconds and microseconds. Return value is a `.datetime`. The input date *x* is a float in ordinal days at UTC, and the output will be the specified `.datetime` object corresponding to that time in timezone *tz*, or if *tz* is ``None``, in the timezone specified in :rc:`timezone`.
56
156
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _from_ordinalf(x, tz=None): tz = _get_tzinfo(tz) dt = (np.datetime64(get_epoch()) + np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us')) if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'): raise ValueError(f'Date ordinal {x} converts to {dt} (using ' f'epoch {get_epoch()}), but Matplotlib dates must be ' 'between year 0001 and 9999.') # convert from datetime64 to datetime: dt = dt.tolist() # datetime64 is always UTC: dt = dt.replace(tzinfo=dateutil.tz.gettz('UTC')) # but maybe we are working in a different timezone so move. dt = dt.astimezone(tz) # fix round off errors if np.abs(x) > 70 * 365: # if x is big, round off to nearest twenty microseconds. # This avoids floating point roundoff error ms = round(dt.microsecond / 20) * 20 if ms == 1000000: dt = dt.replace(microsecond=0) + datetime.timedelta(seconds=1) else: dt = dt.replace(microsecond=ms) return dt # a version of _from_ordinalf that can operate on numpy arrays _from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf, otypes="O") # a version of dateutil.parser.parse that can operate on numpy arrays _dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse) ``` ###Assistant : Convert Gregorian float of the date, preserving hours, minutes, seconds and microseconds. Return value is a `.datetime`. The input date *x* is a float in ordinal days at UTC, and the output will be the specified `.datetime` object corresponding to that time in timezone *tz*, or if *tz* is ``None``, in the timezone specified in :rc:`timezone`.
242
def getdoc(object): try: doc = object.__doc__ except AttributeError: return None if doc is None: try: doc = _finddoc(object) except (AttributeError, TypeError): return None if not isinstance(doc, str): return None return cleandoc(doc)
Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed.
41
31
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getdoc(object): try: doc = object.__doc__ except AttributeError: return None if doc is None: try: doc = _finddoc(object) except (AttributeError, TypeError): return None if not isinstance(doc, str): return None return cleandoc(doc) ``` ###Assistant : Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed.
243
def load_config_with_kwargs(cls, kwargs): assert_is_a_marshmallow_class(cls) schema = cls.Schema() fields = schema.fields.keys() return load_config(cls, **{k: v for k, v in kwargs.items() if k in fields}), { k: v for k, v in kwargs.items() if k not in fields }
Takes a marshmallow class and dict of parameter values and appropriately instantiantes the schema.
14
37
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_config_with_kwargs(cls, kwargs): assert_is_a_marshmallow_class(cls) schema = cls.Schema() fields = schema.fields.keys() return load_config(cls, **{k: v for k, v in kwargs.items() if k in fields}), { k: v for k, v in kwargs.items() if k not in fields } ``` ###Assistant : Takes a marshmallow class and dict of parameter values and appropriately instantiantes the schema.
244
def test_payment_refund_or_void_refund_called_txn_exist(refund_mock, payment): # given payment.charge_status = ChargeStatus.FULLY_CHARGED payment.save(update_fields=["charge_status"]) assert payment.can_refund() is True payment.captured_amount = payment.total payment.save(update_fields=["captured_amount"]) txn = payment.transactions.create( is_success=True, action_required=False, kind=TransactionKind.REFUND_ONGOING, amount=payment.captured_amount / 2, currency=payment.currency, token="test", gateway_response={}, ) # when gateway.payment_refund_or_void( payment, get_plugins_manager(), None, transaction_id=txn.token ) # then assert refund_mock.called_once() @patch("saleor.payment.gateway.refund")
Ensure that the refund method is called when the refund process is already ongoing but not covered full payment captured amount.
21
43
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_payment_refund_or_void_refund_called_txn_exist(refund_mock, payment): # given payment.charge_status = ChargeStatus.FULLY_CHARGED payment.save(update_fields=["charge_status"]) assert payment.can_refund() is True payment.captured_amount = payment.total payment.save(update_fields=["captured_amount"]) txn = payment.transactions.create( is_success=True, action_required=False, kind=TransactionKind.REFUND_ONGOING, amount=payment.captured_amount / 2, currency=payment.currency, token="test", gateway_response={}, ) # when gateway.payment_refund_or_void( payment, get_plugins_manager(), None, transaction_id=txn.token ) # then assert refund_mock.called_once() @patch("saleor.payment.gateway.refund") ``` ###Assistant : Ensure that the refund method is called when the refund process is already ongoing but not covered full payment captured amount.
245
def _all(self): groups = super(Deprecated, self).values() return EntryPoints(itertools.chain.from_iterable(groups))
Reconstruct a list of all entrypoints from the groups.
9
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _all(self): groups = super(Deprecated, self).values() return EntryPoints(itertools.chain.from_iterable(groups)) ``` ###Assistant : Reconstruct a list of all entrypoints from the groups.
246
def readlines(self, sizehint=None, keepends=True): data = self.read() return data.splitlines(keepends)
Read all lines available on the input stream and return them as a list. Line breaks are implemented using the codec's decoder method and are included in the list entries. sizehint, if given, is ignored since there is no efficient way to finding the true end-of-line.
46
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def readlines(self, sizehint=None, keepends=True): data = self.read() return data.splitlines(keepends) ``` ###Assistant : Read all lines available on the input stream and return them as a list. Line breaks are implemented using the codec's decoder method and are included in the list entries. sizehint, if given, is ignored since there is no efficient way to finding the true end-of-line.
247
async def notify_clients(cls) -> None: while not cls.STOP: await asyncio.sleep(cls.UPDATE_INTERVALS) if cls.EVENT_QUEUE: await cls.broadcast_estimations()
Notify clients about events statuses in the queue periodically.
9
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def notify_clients(cls) -> None: while not cls.STOP: await asyncio.sleep(cls.UPDATE_INTERVALS) if cls.EVENT_QUEUE: await cls.broadcast_estimations() ``` ###Assistant : Notify clients about events statuses in the queue periodically.
248
def _readPyPIFile(self): # Complex stuff, pylint: disable=too-many-branches,too-many-statements if self.used_modules is None: pyi_filename = self.getPyIFilename() if os.path.exists(pyi_filename): pyi_deps = OrderedSet() # Flag signalling multiline import handling in_import = False in_import_part = "" for line in getFileContentByLine(pyi_filename): line = line.strip() if not in_import: if line.startswith("import "): imported = line[7:] pyi_deps.add(imported) elif line.startswith("from "): parts = line.split(None, 3) assert parts[0] == "from" assert parts[2] == "import" origin_name = parts[1] if origin_name == "typing": continue if origin_name == ".": origin_name = self.getFullName() else: dot_count = 0 while origin_name.startswith("."): origin_name = origin_name[1:] dot_count += 1 if dot_count > 0: if origin_name: origin_name = ( self.getFullName() .getRelativePackageName(level=dot_count + 1) .getChildNamed(origin_name) ) else: origin_name = ( self.getFullName().getRelativePackageName( level=dot_count + 1 ) ) if origin_name != self.getFullName(): pyi_deps.add(origin_name) imported = parts[3] if imported.startswith("("): # Handle multiline imports if not imported.endswith(")"): in_import = True imported = imported[1:] in_import_part = origin_name assert in_import_part, ( "Multiline part in file %s cannot be empty" % pyi_filename ) else: in_import = False imported = imported[1:-1] assert imported if imported == "*": continue for name in imported.split(","): if name: name = name.strip() pyi_deps.add(origin_name + "." + name) else: # In import imported = line if imported.endswith(")"): imported = imported[0:-1] in_import = False for name in imported.split(","): name = name.strip() if name: pyi_deps.add(in_import_part + "." + name) if "typing" in pyi_deps: pyi_deps.discard("typing") if "__future__" in pyi_deps: pyi_deps.discard("__future__") if self.getFullName() in pyi_deps: pyi_deps.discard(self.getFullName()) if self.getFullName().getPackageName() in pyi_deps: pyi_deps.discard(self.getFullName().getPackageName()) self.used_modules = tuple((pyi_dep, None) for pyi_dep in pyi_deps) else: self.used_modules = ()
Read the .pyi file if present and scan for dependencies.
10
244
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _readPyPIFile(self): # Complex stuff, pylint: disable=too-many-branches,too-many-statements if self.used_modules is None: pyi_filename = self.getPyIFilename() if os.path.exists(pyi_filename): pyi_deps = OrderedSet() # Flag signalling multiline import handling in_import = False in_import_part = "" for line in getFileContentByLine(pyi_filename): line = line.strip() if not in_import: if line.startswith("import "): imported = line[7:] pyi_deps.add(imported) elif line.startswith("from "): parts = line.split(None, 3) assert parts[0] == "from" assert parts[2] == "import" origin_name = parts[1] if origin_name == "typing": continue if origin_name == ".": origin_name = self.getFullName() else: dot_count = 0 while origin_name.startswith("."): origin_name = origin_name[1:] dot_count += 1 if dot_count > 0: if origin_name: origin_name = ( self.getFullName() .getRelativePackageName(level=dot_count + 1) .getChildNamed(origin_name) ) else: origin_name = ( self.getFullName().getRelativePackageName( level=dot_count + 1 ) ) if origin_name != self.getFullName(): pyi_deps.add(origin_name) imported = parts[3] if imported.startswith("("): # Handle multiline imports if not imported.endswith(")"): in_import = True imported = imported[1:] in_import_part = origin_name assert in_import_part, ( "Multiline part in file %s cannot be empty" % pyi_filename ) else: in_import = False imported = imported[1:-1] assert imported if imported == "*": continue for name in imported.split(","): if name: name = name.strip() pyi_deps.add(origin_name + "." + name) else: # In import imported = line if imported.endswith(")"): imported = imported[0:-1] in_import = False for name in imported.split(","): name = name.strip() if name: pyi_deps.add(in_import_part + "." + name) if "typing" in pyi_deps: pyi_deps.discard("typing") if "__future__" in pyi_deps: pyi_deps.discard("__future__") if self.getFullName() in pyi_deps: pyi_deps.discard(self.getFullName()) if self.getFullName().getPackageName() in pyi_deps: pyi_deps.discard(self.getFullName().getPackageName()) self.used_modules = tuple((pyi_dep, None) for pyi_dep in pyi_deps) else: self.used_modules = () ``` ###Assistant : Read the .pyi file if present and scan for dependencies.
249
def topological_sort(self): result = [] # Make a shallow copy of the adjacency list alist = {} for k, v in self.adjacency_list.items(): alist[k] = v[:] while True: # See what we can remove in this run to_remove = [] for k, v in list(alist.items())[:]: if not v: to_remove.append(k) del alist[k] if not to_remove: # What's left in alist (if anything) is a cycle. break # Remove from the adjacency list of others for k, v in alist.items(): alist[k] = [(d, r) for d, r in v if d not in to_remove] logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove]) result.extend(to_remove) return result, list(alist.keys())
Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle.
47
108
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def topological_sort(self): result = [] # Make a shallow copy of the adjacency list alist = {} for k, v in self.adjacency_list.items(): alist[k] = v[:] while True: # See what we can remove in this run to_remove = [] for k, v in list(alist.items())[:]: if not v: to_remove.append(k) del alist[k] if not to_remove: # What's left in alist (if anything) is a cycle. break # Remove from the adjacency list of others for k, v in alist.items(): alist[k] = [(d, r) for d, r in v if d not in to_remove] logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove]) result.extend(to_remove) return result, list(alist.keys()) ``` ###Assistant : Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle.
250
def soft_reset(self) -> None: self.length = 0 self.episode_id = random.randrange(int(2e9)) self.total_reward = 0.0 self.agent_rewards = defaultdict(float) self._agent_reward_history = defaultdict(list)
Clears rewards and metrics, but retains RNN and other state. This is used to carry state across multiple logical episodes in the same env (i.e., if `soft_horizon` is set).
29
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def soft_reset(self) -> None: self.length = 0 self.episode_id = random.randrange(int(2e9)) self.total_reward = 0.0 self.agent_rewards = defaultdict(float) self._agent_reward_history = defaultdict(list) ``` ###Assistant : Clears rewards and metrics, but retains RNN and other state. This is used to carry state across multiple logical episodes in the same env (i.e., if `soft_horizon` is set).
251
def get_ordering_field(self, field_name): try: field = self.opts.get_field(field_name) return field.name except FieldDoesNotExist: # See whether field_name is a name of a non-field # that allows sorting. if callable(field_name): attr = field_name elif hasattr(self.model_admin, field_name): attr = getattr(self.model_admin, field_name) else: attr = getattr(self.model, field_name) return getattr(attr, "admin_order_field", None)
Returns the proper model field name corresponding to the given field_name to use for ordering. field_name may either be the name of a proper model field or the name of a method (on the admin or model) or a callable with the 'admin_order_field' attribute. Returns None if no proper model field name can be matched.
55
46
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ordering_field(self, field_name): try: field = self.opts.get_field(field_name) return field.name except FieldDoesNotExist: # See whether field_name is a name of a non-field # that allows sorting. if callable(field_name): attr = field_name elif hasattr(self.model_admin, field_name): attr = getattr(self.model_admin, field_name) else: attr = getattr(self.model, field_name) return getattr(attr, "admin_order_field", None) ``` ###Assistant : Returns the proper model field name corresponding to the given field_name to use for ordering. field_name may either be the name of a proper model field or the name of a method (on the admin or model) or a callable with the 'admin_order_field' attribute. Returns None if no proper model field name can be matched.
252
def _check_ordering(self, obj): # ordering = None if obj.ordering is None: # The default value is None return [] elif not isinstance(obj.ordering, (list, tuple)): return must_be( "a list or tuple", option="ordering", obj=obj, id="admin.E031" ) else: return list( chain.from_iterable( self._check_ordering_item(obj, field_name, "ordering[%d]" % index) for index, field_name in enumerate(obj.ordering) ) )
Check that ordering refers to existing fields or is random.
10
50
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_ordering(self, obj): # ordering = None if obj.ordering is None: # The default value is None return [] elif not isinstance(obj.ordering, (list, tuple)): return must_be( "a list or tuple", option="ordering", obj=obj, id="admin.E031" ) else: return list( chain.from_iterable( self._check_ordering_item(obj, field_name, "ordering[%d]" % index) for index, field_name in enumerate(obj.ordering) ) ) ``` ###Assistant : Check that ordering refers to existing fields or is random.
253
def parsing_hooks(cls) -> Tuple[Type["Block"], Type["Sentence"], Type["Statements"]]: return Block, Sentence, Statements
Returns object types that this class should be able to `parse` recusrively. The order of the objects indicates the order in which the parser should try to parse each subitem. :returns: A list of Parsable classes. :rtype list:
38
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parsing_hooks(cls) -> Tuple[Type["Block"], Type["Sentence"], Type["Statements"]]: return Block, Sentence, Statements ``` ###Assistant : Returns object types that this class should be able to `parse` recusrively. The order of the objects indicates the order in which the parser should try to parse each subitem. :returns: A list of Parsable classes. :rtype list:
254
def _cast_inplace(terms, acceptable_dtypes, dtype) -> None: dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value)
Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. dtype : str or numpy.dtype The dtype to cast to.
39
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cast_inplace(terms, acceptable_dtypes, dtype) -> None: dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value) ``` ###Assistant : Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. dtype : str or numpy.dtype The dtype to cast to.
255
def _deployment_created(self, external_id, request): payload = request.data["payload"] vercel_project_id = ( payload["projectId"] if payload.get("projectId") else payload["project"]["id"] ) # Only create releases for production deploys for now if payload["target"] != "production": logger.info( f"Ignoring deployment for environment: {payload['target']}", extra={"external_id": external_id, "vercel_project_id": vercel_project_id}, ) return self.respond(status=204) logging_params = {"external_id": external_id, "vercel_project_id": vercel_project_id} org_integrations = OrganizationIntegration.objects.select_related("organization").filter( integration__external_id=external_id, integration__provider=self.provider ) if not org_integrations: logger.info("Integration not found", extra=logging_params) return self.respond({"detail": "Integration not found"}, status=404) # for each org integration, search the configs to find one that matches the vercel project of the webhook for org_integration in org_integrations: project_mappings = org_integration.config.get("project_mappings") or [] matched_mappings = list(filter(lambda x: x[1] == vercel_project_id, project_mappings)) if matched_mappings: organization = org_integration.organization sentry_project_id = matched_mappings[0][0] logging_params["organization_id"] = organization.id logging_params["project_id"] = sentry_project_id try: release_payload, token = get_payload_and_token( payload, organization.id, sentry_project_id ) except Project.DoesNotExist: logger.info("Project not found", extra=logging_params) return self.respond({"detail": "Project not found"}, status=404) except SentryAppInstallationForProvider.DoesNotExist: logger.info("Installation not found", extra=logging_params) return self.respond({"detail": "Installation not found"}, status=404) except SentryAppInstallationToken.DoesNotExist: logger.info("Token not found", extra=logging_params) return self.respond({"detail": "Token not found"}, status=404) except NoCommitFoundError: logger.info("No commit found", extra=logging_params) return self.respond({"detail": "No commit found"}, status=404) except MissingRepositoryError: logger.info("Could not determine repository", extra=logging_params) return self.respond({"detail": "Could not determine repository"}, status=400) url = absolute_uri(f"/api/0/organizations/{organization.slug}/releases/") headers = { "Accept": "application/json", "Authorization": f"Bearer {token}", "User-Agent": f"sentry_vercel/{VERSION}", } json_error = None # create the basic release payload without refs no_ref_payload = release_payload.copy() del no_ref_payload["refs"] with http.build_session() as session: try: resp = session.post(url, json=no_ref_payload, headers=headers) json_error = safe_json_parse(resp) resp.raise_for_status() except RequestException as e: # errors here should be uncommon but we should be aware of them logger.error( f"Error creating release: {e} - {json_error}", extra=logging_params, exc_info=True, ) # 400 probably isn't the right status code but oh well return self.respond({"detail": f"Error creating release: {e}"}, status=400) # set the refs try: resp = session.post( url, json=release_payload, headers=headers, ) json_error = safe_json_parse(resp) resp.raise_for_status() except RequestException as e: # errors will probably be common if the user doesn't have repos set up logger.info( f"Error setting refs: {e} - {json_error}", extra=logging_params, exc_info=True, ) # 400 probably isn't the right status code but oh well return self.respond({"detail": f"Error setting refs: {e}"}, status=400) # we are going to quit after the first project match as there shouldn't be multiple matches return self.respond(status=201) return self.respond(status=204)
Steps: 1. Find all org integrations that match the external id 2. Search the configs to find one that matches the vercel project of the webhook 3. Look up the Sentry project that matches 4. Look up the connected internal integration 5. Find the token associated with that installation 6. Determine the commit sha and repo based on what provider is used 7. Create the release using the token WITHOUT refs 8. Update the release with refs
77
360
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _deployment_created(self, external_id, request): payload = request.data["payload"] vercel_project_id = ( payload["projectId"] if payload.get("projectId") else payload["project"]["id"] ) # Only create releases for production deploys for now if payload["target"] != "production": logger.info( f"Ignoring deployment for environment: {payload['target']}", extra={"external_id": external_id, "vercel_project_id": vercel_project_id}, ) return self.respond(status=204) logging_params = {"external_id": external_id, "vercel_project_id": vercel_project_id} org_integrations = OrganizationIntegration.objects.select_related("organization").filter( integration__external_id=external_id, integration__provider=self.provider ) if not org_integrations: logger.info("Integration not found", extra=logging_params) return self.respond({"detail": "Integration not found"}, status=404) # for each org integration, search the configs to find one that matches the vercel project of the webhook for org_integration in org_integrations: project_mappings = org_integration.config.get("project_mappings") or [] matched_mappings = list(filter(lambda x: x[1] == vercel_project_id, project_mappings)) if matched_mappings: organization = org_integration.organization sentry_project_id = matched_mappings[0][0] logging_params["organization_id"] = organization.id logging_params["project_id"] = sentry_project_id try: release_payload, token = get_payload_and_token( payload, organization.id, sentry_project_id ) except Project.DoesNotExist: logger.info("Project not found", extra=logging_params) return self.respond({"detail": "Project not found"}, status=404) except SentryAppInstallationForProvider.DoesNotExist: logger.info("Installation not found", extra=logging_params) return self.respond({"detail": "Installation not found"}, status=404) except SentryAppInstallationToken.DoesNotExist: logger.info("Token not found", extra=logging_params) return self.respond({"detail": "Token not found"}, status=404) except NoCommitFoundError: logger.info("No commit found", extra=logging_params) return self.respond({"detail": "No commit found"}, status=404) except MissingRepositoryError: logger.info("Could not determine repository", extra=logging_params) return self.respond({"detail": "Could not determine repository"}, status=400) url = absolute_uri(f"/api/0/organizations/{organization.slug}/releases/") headers = { "Accept": "application/json", "Authorization": f"Bearer {token}", "User-Agent": f"sentry_vercel/{VERSION}", } json_error = None # create the basic release payload without refs no_ref_payload = release_payload.copy() del no_ref_payload["refs"] with http.build_session() as session: try: resp = session.post(url, json=no_ref_payload, headers=headers) json_error = safe_json_parse(resp) resp.raise_for_status() except RequestException as e: # errors here should be uncommon but we should be aware of them logger.error( f"Error creating release: {e} - {json_error}", extra=logging_params, exc_info=True, ) # 400 probably isn't the right status code but oh well return self.respond({"detail": f"Error creating release: {e}"}, status=400) # set the refs try: resp = session.post( url, json=release_payload, headers=headers, ) json_error = safe_json_parse(resp) resp.raise_for_status() except RequestException as e: # errors will probably be common if the user doesn't have repos set up logger.info( f"Error setting refs: {e} - {json_error}", extra=logging_params, exc_info=True, ) # 400 probably isn't the right status code but oh well return self.respond({"detail": f"Error setting refs: {e}"}, status=400) # we are going to quit after the first project match as there shouldn't be multiple matches return self.respond(status=201) return self.respond(status=204) ``` ###Assistant : Steps: 1. Find all org integrations that match the external id 2. Search the configs to find one that matches the vercel project of the webhook 3. Look up the Sentry project that matches 4. Look up the connected internal integration 5. Find the token associated with that installation 6. Determine the commit sha and repo based on what provider is used 7. Create the release using the token WITHOUT refs 8. Update the release with refs
256
def _app_user(self) -> User | None: return self.user if isinstance(self.user, User) else None
The user, if they are represented persistently in our app.
10
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _app_user(self) -> User | None: return self.user if isinstance(self.user, User) else None ``` ###Assistant : The user, if they are represented persistently in our app.
257
def new_locator(self, nx, nx1=None): return AxesLocator(self, nx, 0, nx1 if nx1 is not None else nx + 1, 1)
Create a new `.AxesLocator` for the specified cell. Parameters ---------- nx, nx1 : int Integers specifying the column-position of the cell. When *nx1* is None, a single *nx*-th column is specified. Otherwise, location of columns spanning between *nx* to *nx1* (but excluding *nx1*-th column) is specified.
46
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def new_locator(self, nx, nx1=None): return AxesLocator(self, nx, 0, nx1 if nx1 is not None else nx + 1, 1) ``` ###Assistant : Create a new `.AxesLocator` for the specified cell. Parameters ---------- nx, nx1 : int Integers specifying the column-position of the cell. When *nx1* is None, a single *nx*-th column is specified. Otherwise, location of columns spanning between *nx* to *nx1* (but excluding *nx1*-th column) is specified.
258
def test_edit_cases(self) -> None: self.login("hamlet") hamlet = self.example_user("hamlet") msg_id = self.send_stream_message( self.example_user("hamlet"), "Denmark", topic_name="topic 1", content="content 1" ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 2", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 1") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual( set(history[0].keys()), { "timestamp", "prev_content", "user_id", "prev_rendered_content", "prev_rendered_content_version", }, ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "topic": "topic 2", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 1") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual(set(history[0].keys()), {"timestamp", LEGACY_PREV_TOPIC, "user_id"}) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 3", "topic": "topic 3", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 2") self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 2") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual( set(history[0].keys()), { "timestamp", LEGACY_PREV_TOPIC, "prev_content", "user_id", "prev_rendered_content", "prev_rendered_content_version", }, ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 4", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 3") self.assertEqual(history[0]["user_id"], hamlet.id) self.login("iago") result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "topic": "topic 4", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 3") self.assertEqual(history[0]["user_id"], self.example_user("iago").id) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 3") self.assertEqual(history[2][LEGACY_PREV_TOPIC], "topic 2") self.assertEqual(history[3][LEGACY_PREV_TOPIC], "topic 1") self.assertEqual(history[1]["prev_content"], "content 3") self.assertEqual(history[2]["prev_content"], "content 2") self.assertEqual(history[4]["prev_content"], "content 1") # Now, we verify that the edit history data sent back has the # correct filled-out fields message_edit_history = self.client_get(f"/json/messages/{msg_id}/history") json_response = orjson.loads(message_edit_history.content) # We reverse the message history view output so that the IDs line up with the above. message_history = list(reversed(json_response["message_history"])) i = 0 for entry in message_history: expected_entries = {"content", "rendered_content", "topic", "timestamp", "user_id"} if i in {0, 2, 3}: expected_entries.add("prev_topic") if i in {1, 2, 4}: expected_entries.add("prev_content") expected_entries.add("prev_rendered_content") expected_entries.add("content_html_diff") i += 1 self.assertEqual(expected_entries, set(entry.keys())) self.assert_length(message_history, 6) self.assertEqual(message_history[0]["prev_topic"], "topic 3") self.assertEqual(message_history[0]["topic"], "topic 4") self.assertEqual(message_history[1]["topic"], "topic 3") self.assertEqual(message_history[2]["topic"], "topic 3") self.assertEqual(message_history[2]["prev_topic"], "topic 2") self.assertEqual(message_history[3]["topic"], "topic 2") self.assertEqual(message_history[3]["prev_topic"], "topic 1") self.assertEqual(message_history[4]["topic"], "topic 1") self.assertEqual(message_history[0]["content"], "content 4") self.assertEqual(message_history[1]["content"], "content 4") self.assertEqual(message_history[1]["prev_content"], "content 3") self.assertEqual(message_history[2]["content"], "content 3") self.assertEqual(message_history[2]["prev_content"], "content 2") self.assertEqual(message_history[3]["content"], "content 2") self.assertEqual(message_history[4]["content"], "content 2") self.assertEqual(message_history[4]["prev_content"], "content 1") self.assertEqual(message_history[5]["content"], "content 1") self.assertEqual(message_history[5]["topic"], "topic 1")
This test verifies the accuracy of construction of Zulip's edit history data structures.
13
310
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_edit_cases(self) -> None: self.login("hamlet") hamlet = self.example_user("hamlet") msg_id = self.send_stream_message( self.example_user("hamlet"), "Denmark", topic_name="topic 1", content="content 1" ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 2", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 1") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual( set(history[0].keys()), { "timestamp", "prev_content", "user_id", "prev_rendered_content", "prev_rendered_content_version", }, ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "topic": "topic 2", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 1") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual(set(history[0].keys()), {"timestamp", LEGACY_PREV_TOPIC, "user_id"}) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 3", "topic": "topic 3", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 2") self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 2") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual( set(history[0].keys()), { "timestamp", LEGACY_PREV_TOPIC, "prev_content", "user_id", "prev_rendered_content", "prev_rendered_content_version", }, ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 4", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 3") self.assertEqual(history[0]["user_id"], hamlet.id) self.login("iago") result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "topic": "topic 4", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 3") self.assertEqual(history[0]["user_id"], self.example_user("iago").id) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 3") self.assertEqual(history[2][LEGACY_PREV_TOPIC], "topic 2") self.assertEqual(history[3][LEGACY_PREV_TOPIC], "topic 1") self.assertEqual(history[1]["prev_content"], "content 3") self.assertEqual(history[2]["prev_content"], "content 2") self.assertEqual(history[4]["prev_content"], "content 1") # Now, we verify that the edit history data sent back has the # correct filled-out fields message_edit_history = self.client_get(f"/json/messages/{msg_id}/history") json_response = orjson.loads(message_edit_history.content) # We reverse the message history view output so that the IDs line up with the above. message_history = list(reversed(json_response["message_history"])) i = 0 for entry in message_history: expected_entries = {"content", "rendered_content", "topic", "timestamp", "user_id"} if i in {0, 2, 3}: expected_entries.add("prev_topic") if i in {1, 2, 4}: expected_entries.add("prev_content") expected_entries.add("prev_rendered_content") expected_entries.add("content_html_diff") i += 1 self.assertEqual(expected_entries, set(entry.keys())) self.assert_length(message_history, 6) self.assertEqual(message_history[0]["prev_topic"], "topic 3") self.assertEqual(message_history[0]["topic"], "topic 4") self.assertEqual(message_history[1]["topic"], "topic 3") self.assertEqual(message_history[2]["topic"], "topic 3") self.assertEqual(message_history[2]["prev_topic"], "topic 2") self.assertEqual(message_history[3]["topic"], "topic 2") self.assertEqual(message_history[3]["prev_topic"], "topic 1") self.assertEqual(message_history[4]["topic"], "topic 1") self.assertEqual(message_history[0]["content"], "content 4") self.assertEqual(message_history[1]["content"], "content 4") self.assertEqual(message_history[1]["prev_content"], "content 3") self.assertEqual(message_history[2]["content"], "content 3") self.assertEqual(message_history[2]["prev_content"], "content 2") self.assertEqual(message_history[3]["content"], "content 2") self.assertEqual(message_history[4]["content"], "content 2") self.assertEqual(message_history[4]["prev_content"], "content 1") self.assertEqual(message_history[5]["content"], "content 1") self.assertEqual(message_history[5]["topic"], "topic 1") ``` ###Assistant : This test verifies the accuracy of construction of Zulip's edit history data structures.
259
def get_project_name(doctype, txt, searchfield, start, page_len, filters): doctype = "Project" cond = "" if filters and filters.get("customer"): cond = % ( frappe.db.escape(filters.get("customer")) ) fields = get_fields(doctype, ["name", "project_name"]) searchfields = frappe.get_meta(doctype).get_search_fields() searchfields = " or ".join(["`tabProject`." + field + " like %(txt)s" for field in searchfields]) return frappe.db.sql( .format( fields=", ".join(["`tabProject`.{0}".format(f) for f in fields]), cond=cond, scond=searchfields, match_cond=get_match_cond(doctype), start=start, page_len=page_len, ), {"txt": "%{0}%".format(txt), "_txt": txt.replace("%", "")}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
(`tabProject`.customer = %s or ifnull(`tabProject`.customer,"")="") andselect {fields} from `tabProject` where `tabProject`.status not in ('Completed', 'Cancelled') and {cond} {scond} {match_cond} order by (case when locate(%(_txt)s, `tabProject`.name) > 0 then locate(%(_txt)s, `tabProject`.name) else 99999 end), `tabProject`.idx desc, `tabProject`.name asc limit {page_len} offset {start}
41
69
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_project_name(doctype, txt, searchfield, start, page_len, filters): doctype = "Project" cond = "" if filters and filters.get("customer"): cond = % ( frappe.db.escape(filters.get("customer")) ) fields = get_fields(doctype, ["name", "project_name"]) searchfields = frappe.get_meta(doctype).get_search_fields() searchfields = " or ".join(["`tabProject`." + field + " like %(txt)s" for field in searchfields]) return frappe.db.sql( .format( fields=", ".join(["`tabProject`.{0}".format(f) for f in fields]), cond=cond, scond=searchfields, match_cond=get_match_cond(doctype), start=start, page_len=page_len, ), {"txt": "%{0}%".format(txt), "_txt": txt.replace("%", "")}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs ``` ###Assistant : (`tabProject`.customer = %s or ifnull(`tabProject`.customer,"")="") andselect {fields} from `tabProject` where `tabProject`.status not in ('Completed', 'Cancelled') and {cond} {scond} {match_cond} order by (case when locate(%(_txt)s, `tabProject`.name) > 0 then locate(%(_txt)s, `tabProject`.name) else 99999 end), `tabProject`.idx desc, `tabProject`.name asc limit {page_len} offset {start}
260
def test_autosuggest_at_EOL(text, cursor, suggestion, called): event = make_event(text, cursor, suggestion) event.current_buffer.insert_text = Mock() _apply_autosuggest(event) if called: event.current_buffer.insert_text.assert_called() else: event.current_buffer.insert_text.assert_not_called() # event.current_buffer.document.get_end_of_line_position.assert_called()
test that autosuggest is only applied at end of line.
10
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_autosuggest_at_EOL(text, cursor, suggestion, called): event = make_event(text, cursor, suggestion) event.current_buffer.insert_text = Mock() _apply_autosuggest(event) if called: event.current_buffer.insert_text.assert_called() else: event.current_buffer.insert_text.assert_not_called() # event.current_buffer.document.get_end_of_line_position.assert_called() ``` ###Assistant : test that autosuggest is only applied at end of line.
261
def stack3(x, filters, blocks, stride1=2, groups=32, name=None): x = block3(x, filters, stride=stride1, groups=groups, name=name + "_block1") for i in range(2, blocks + 1): x = block3( x, filters, groups=groups, conv_shortcut=False, name=name + "_block" + str(i), ) return x @keras_export( "keras.applications.resnet50.ResNet50", "keras.applications.resnet.ResNet50", "keras.applications.ResNet50", )
A set of stacked residual blocks. Args: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. stride1: default 2, stride of the first layer in the first block. groups: default 32, group size for grouped convolution. name: string, stack label. Returns: Output tensor for the stacked blocks.
58
43
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def stack3(x, filters, blocks, stride1=2, groups=32, name=None): x = block3(x, filters, stride=stride1, groups=groups, name=name + "_block1") for i in range(2, blocks + 1): x = block3( x, filters, groups=groups, conv_shortcut=False, name=name + "_block" + str(i), ) return x @keras_export( "keras.applications.resnet50.ResNet50", "keras.applications.resnet.ResNet50", "keras.applications.ResNet50", ) ``` ###Assistant : A set of stacked residual blocks. Args: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. stride1: default 2, stride of the first layer in the first block. groups: default 32, group size for grouped convolution. name: string, stack label. Returns: Output tensor for the stacked blocks.
262
def is_mixed(self) -> bool: warnings.warn( "Index.is_mixed is deprecated and will be removed in a future version. " "Check index.inferred_type directly instead.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.inferred_type in ["mixed"]
Check if the Index holds data with mixed data types. Returns ------- bool Whether or not the Index holds data with mixed data types. See Also -------- is_boolean : Check if the Index only consists of booleans. is_integer : Check if the Index only consists of integers. is_floating : Check if the Index is a floating type. is_numeric : Check if the Index only consists of numeric data. is_object : Check if the Index is of the object dtype. is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects. Examples -------- >>> idx = pd.Index(['a', np.nan, 'b']) >>> idx.is_mixed() True >>> idx = pd.Index([1.0, 2.0, 3.0, 5.0]) >>> idx.is_mixed() False
118
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_mixed(self) -> bool: warnings.warn( "Index.is_mixed is deprecated and will be removed in a future version. " "Check index.inferred_type directly instead.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.inferred_type in ["mixed"] ``` ###Assistant : Check if the Index holds data with mixed data types. Returns ------- bool Whether or not the Index holds data with mixed data types. See Also -------- is_boolean : Check if the Index only consists of booleans. is_integer : Check if the Index only consists of integers. is_floating : Check if the Index is a floating type. is_numeric : Check if the Index only consists of numeric data. is_object : Check if the Index is of the object dtype. is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects. Examples -------- >>> idx = pd.Index(['a', np.nan, 'b']) >>> idx.is_mixed() True >>> idx = pd.Index([1.0, 2.0, 3.0, 5.0]) >>> idx.is_mixed() False
263
def responder(request): # Find an available port with socket.socket() as sock: sock.bind(("localhost", 0)) port = sock.getsockname()[1] server_process = multiprocessing.Process( target=process_server, args=(request.param, port) ) server_process.start() yield port server_process.join(10) server_process.terminate() kill_time = 5 wait_time = 0 while server_process.is_alive(): if wait_time > kill_time: server_process.kill() break else: wait_time += 0.1 time.sleep(0.1) server_process.close() @pytest.mark.parametrize( "responder, read_method, parquet_engine", [ (CSVUserAgentResponder, pd.read_csv, None), (JSONUserAgentResponder, pd.read_json, None), (ParquetPyArrowUserAgentResponder, pd.read_parquet, "pyarrow"), pytest.param( ParquetFastParquetUserAgentResponder, pd.read_parquet, "fastparquet", # TODO(ArrayManager) fastparquet marks=[ td.skip_array_manager_not_yet_implemented, pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10"), ], ), (PickleUserAgentResponder, pd.read_pickle, None), (StataUserAgentResponder, pd.read_stata, None), (GzippedCSVUserAgentResponder, pd.read_csv, None), (GzippedJSONUserAgentResponder, pd.read_json, None), ], indirect=["responder"], )
Fixture that starts a local http server in a separate process on localhost and returns the port. Running in a separate process instead of a thread to allow termination/killing of http server upon cleanup.
34
93
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def responder(request): # Find an available port with socket.socket() as sock: sock.bind(("localhost", 0)) port = sock.getsockname()[1] server_process = multiprocessing.Process( target=process_server, args=(request.param, port) ) server_process.start() yield port server_process.join(10) server_process.terminate() kill_time = 5 wait_time = 0 while server_process.is_alive(): if wait_time > kill_time: server_process.kill() break else: wait_time += 0.1 time.sleep(0.1) server_process.close() @pytest.mark.parametrize( "responder, read_method, parquet_engine", [ (CSVUserAgentResponder, pd.read_csv, None), (JSONUserAgentResponder, pd.read_json, None), (ParquetPyArrowUserAgentResponder, pd.read_parquet, "pyarrow"), pytest.param( ParquetFastParquetUserAgentResponder, pd.read_parquet, "fastparquet", # TODO(ArrayManager) fastparquet marks=[ td.skip_array_manager_not_yet_implemented, pytest.mark.xfail(PY310, reason="fastparquet failing on 3.10"), ], ), (PickleUserAgentResponder, pd.read_pickle, None), (StataUserAgentResponder, pd.read_stata, None), (GzippedCSVUserAgentResponder, pd.read_csv, None), (GzippedJSONUserAgentResponder, pd.read_json, None), ], indirect=["responder"], ) ``` ###Assistant : Fixture that starts a local http server in a separate process on localhost and returns the port. Running in a separate process instead of a thread to allow termination/killing of http server upon cleanup.
264
def _pre_setup(self): super()._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send( sender=settings._wrapped.__class__, setting="INSTALLED_APPS", value=self.available_apps, enter=True, ) for db_name in self._databases_names(include_mirrors=False): emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) try: self._fixture_setup() except Exception: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send( sender=settings._wrapped.__class__, setting="INSTALLED_APPS", value=settings.INSTALLED_APPS, enter=False, ) raise # Clear the queries_log so that it's less likely to overflow (a single # test probably won't execute 9K queries). If queries_log overflows, # then assertNumQueries() doesn't work. for db_name in self._databases_names(include_mirrors=False): connections[db_name].queries_log.clear()
Perform pre-test setup: * If the class has an 'available_apps' attribute, restrict the app registry to these applications, then fire the post_migrate signal -- it must run with the correct set of applications for the test case. * If the class has a 'fixtures' attribute, install those fixtures.
48
72
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _pre_setup(self): super()._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send( sender=settings._wrapped.__class__, setting="INSTALLED_APPS", value=self.available_apps, enter=True, ) for db_name in self._databases_names(include_mirrors=False): emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) try: self._fixture_setup() except Exception: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send( sender=settings._wrapped.__class__, setting="INSTALLED_APPS", value=settings.INSTALLED_APPS, enter=False, ) raise # Clear the queries_log so that it's less likely to overflow (a single # test probably won't execute 9K queries). If queries_log overflows, # then assertNumQueries() doesn't work. for db_name in self._databases_names(include_mirrors=False): connections[db_name].queries_log.clear() ``` ###Assistant : Perform pre-test setup: * If the class has an 'available_apps' attribute, restrict the app registry to these applications, then fire the post_migrate signal -- it must run with the correct set of applications for the test case. * If the class has a 'fixtures' attribute, install those fixtures.
265
def test_multiple_actions_form(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], # Two different actions selected on the two forms... "action": ["external_mail", "delete_selected"], # ...but "go" was clicked on the top form. "index": 0, } self.client.post( reverse("admin:admin_views_externalsubscriber_changelist"), action_data ) # The action sends mail rather than deletes. self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, "Greetings from a function action")
Actions come from the form whose submit button was pressed (#10618).
11
51
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_multiple_actions_form(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], # Two different actions selected on the two forms... "action": ["external_mail", "delete_selected"], # ...but "go" was clicked on the top form. "index": 0, } self.client.post( reverse("admin:admin_views_externalsubscriber_changelist"), action_data ) # The action sends mail rather than deletes. self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, "Greetings from a function action") ``` ###Assistant : Actions come from the form whose submit button was pressed (#10618).
266
def test_empty_message(self) -> None: self.login("hamlet") othello = self.example_user("othello") result = self.client_post( "/json/messages", {"type": "private", "content": " ", "to": othello.email}, ) self.assert_json_error(result, "Message must not be empty")
Sending a message that is empty or only whitespace should fail
11
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_empty_message(self) -> None: self.login("hamlet") othello = self.example_user("othello") result = self.client_post( "/json/messages", {"type": "private", "content": " ", "to": othello.email}, ) self.assert_json_error(result, "Message must not be empty") ``` ###Assistant : Sending a message that is empty or only whitespace should fail
267
def freqai_feature_engineering_generic(self, dataframe, **kwargs): dataframe["%-pct-change"] = dataframe["close"].pct_change() dataframe["%-raw_volume"] = dataframe["volume"] dataframe["%-raw_price"] = dataframe["close"] return dataframe
This optional function will be called for all include_timeframes (including corr_pairs). After that, the features will be shifted by the number of candles in the include_shifted_candles. :param df: strategy dataframe which will receive the features dataframe["%-pct-change"] = dataframe["close"].pct_change()
38
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def freqai_feature_engineering_generic(self, dataframe, **kwargs): dataframe["%-pct-change"] = dataframe["close"].pct_change() dataframe["%-raw_volume"] = dataframe["volume"] dataframe["%-raw_price"] = dataframe["close"] return dataframe ``` ###Assistant : This optional function will be called for all include_timeframes (including corr_pairs). After that, the features will be shifted by the number of candles in the include_shifted_candles. :param df: strategy dataframe which will receive the features dataframe["%-pct-change"] = dataframe["close"].pct_change()
268
def next_sample(uid): return next(_SHARED_SEQUENCES[uid]) @keras_export("keras.utils.GeneratorEnqueuer")
Gets the next value from the generator `uid`. To allow multiple generators to be used at the same time, we use `uid` to get a specific one. A single generator would cause the validation to overwrite the training generator. Args: uid: int, generator identifier Returns: The next value of generator `uid`.
51
5
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def next_sample(uid): return next(_SHARED_SEQUENCES[uid]) @keras_export("keras.utils.GeneratorEnqueuer") ``` ###Assistant : Gets the next value from the generator `uid`. To allow multiple generators to be used at the same time, we use `uid` to get a specific one. A single generator would cause the validation to overwrite the training generator. Args: uid: int, generator identifier Returns: The next value of generator `uid`.
269
def check_settings(base_url=None): if base_url is None: base_url = settings.STATIC_URL if not base_url: raise ImproperlyConfigured( "You're using the staticfiles app " "without having set the required STATIC_URL setting.") if settings.MEDIA_URL == base_url: raise ImproperlyConfigured( "The MEDIA_URL and STATIC_URL settings must have different values" ) if (settings.DEBUG and settings.MEDIA_URL and settings.STATIC_URL and settings.MEDIA_URL.startswith(settings.STATIC_URL)): raise ImproperlyConfigured( "runserver can't serve media if MEDIA_URL is within STATIC_URL." ) if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and (settings.MEDIA_ROOT == settings.STATIC_ROOT)): raise ImproperlyConfigured( "The MEDIA_ROOT and STATIC_ROOT settings must have different values" )
Check if the staticfiles settings have sane values.
8
83
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_settings(base_url=None): if base_url is None: base_url = settings.STATIC_URL if not base_url: raise ImproperlyConfigured( "You're using the staticfiles app " "without having set the required STATIC_URL setting.") if settings.MEDIA_URL == base_url: raise ImproperlyConfigured( "The MEDIA_URL and STATIC_URL settings must have different values" ) if (settings.DEBUG and settings.MEDIA_URL and settings.STATIC_URL and settings.MEDIA_URL.startswith(settings.STATIC_URL)): raise ImproperlyConfigured( "runserver can't serve media if MEDIA_URL is within STATIC_URL." ) if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and (settings.MEDIA_ROOT == settings.STATIC_ROOT)): raise ImproperlyConfigured( "The MEDIA_ROOT and STATIC_ROOT settings must have different values" ) ``` ###Assistant : Check if the staticfiles settings have sane values.
270
def kubernetes_dict(self, **kwargs) -> Dict: config = self.dict(**kwargs) for idx, deployment in enumerate(config["deployments"]): if isinstance(deployment.get("ray_actor_options"), dict): # JSON-serialize ray_actor_options' resources dictionary if isinstance(deployment["ray_actor_options"].get("resources"), dict): deployment["ray_actor_options"]["resources"] = json.dumps( deployment["ray_actor_options"]["resources"] ) # JSON-serialize ray_actor_options' runtime_env dictionary if isinstance(deployment["ray_actor_options"].get("runtime_env"), dict): deployment["ray_actor_options"]["runtime_env"] = json.dumps( deployment["ray_actor_options"]["runtime_env"] ) # Convert ray_actor_options' keys deployment["ray_actor_options"] = dict_keys_snake_to_camel_case( deployment["ray_actor_options"] ) # JSON-serialize user_config dictionary if isinstance(deployment.get("user_config"), dict): deployment["user_config"] = json.dumps(deployment["user_config"]) # Convert deployment's keys config["deployments"][idx] = dict_keys_snake_to_camel_case(deployment) # Convert top-level runtime_env if isinstance(config.get("runtime_env"), dict): config["runtime_env"] = json.dumps(config["runtime_env"]) # Convert top-level option's keys config = dict_keys_snake_to_camel_case(config) return config @PublicAPI(stability="beta")
Returns dictionary in Kubernetes format. Dictionary can be yaml-dumped to a Serve config file directly and then copy-pasted into a RayService Kubernetes config. Args: all kwargs are passed directly into schema's dict() function.
33
89
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def kubernetes_dict(self, **kwargs) -> Dict: config = self.dict(**kwargs) for idx, deployment in enumerate(config["deployments"]): if isinstance(deployment.get("ray_actor_options"), dict): # JSON-serialize ray_actor_options' resources dictionary if isinstance(deployment["ray_actor_options"].get("resources"), dict): deployment["ray_actor_options"]["resources"] = json.dumps( deployment["ray_actor_options"]["resources"] ) # JSON-serialize ray_actor_options' runtime_env dictionary if isinstance(deployment["ray_actor_options"].get("runtime_env"), dict): deployment["ray_actor_options"]["runtime_env"] = json.dumps( deployment["ray_actor_options"]["runtime_env"] ) # Convert ray_actor_options' keys deployment["ray_actor_options"] = dict_keys_snake_to_camel_case( deployment["ray_actor_options"] ) # JSON-serialize user_config dictionary if isinstance(deployment.get("user_config"), dict): deployment["user_config"] = json.dumps(deployment["user_config"]) # Convert deployment's keys config["deployments"][idx] = dict_keys_snake_to_camel_case(deployment) # Convert top-level runtime_env if isinstance(config.get("runtime_env"), dict): config["runtime_env"] = json.dumps(config["runtime_env"]) # Convert top-level option's keys config = dict_keys_snake_to_camel_case(config) return config @PublicAPI(stability="beta") ``` ###Assistant : Returns dictionary in Kubernetes format. Dictionary can be yaml-dumped to a Serve config file directly and then copy-pasted into a RayService Kubernetes config. Args: all kwargs are passed directly into schema's dict() function.
271
def reload_images(self, group_method, img_list): logger.info("Preparing to group...") if group_method == 'group_blur': filename_list, image_list = self._get_images() blurs = [self.estimate_blur(img) for img in image_list] temp_list = list(zip(filename_list, blurs)) elif group_method == 'group_blur_fft': filename_list, image_list = self._get_images() fft_blurs = [self.estimate_blur_fft(img) for img in image_list] temp_list = list(zip(filename_list, fft_blurs)) elif group_method == 'group_face_cnn': filename_list, image_list, landmarks = self._get_landmarks() temp_list = list(zip(filename_list, landmarks)) elif group_method == 'group_face_yaw': filename_list, image_list, landmarks = self._get_landmarks() yaws = [self.calc_landmarks_face_yaw(mark) for mark in landmarks] temp_list = list(zip(filename_list, yaws)) elif group_method == 'group_hist': filename_list, image_list = self._get_images() histograms = [cv2.calcHist([img], [0], None, [256], [0, 256]) for img in image_list] temp_list = list(zip(filename_list, histograms)) elif group_method == 'group_black_pixels': filename_list, image_list = self._get_images() black_pixels = [np.ndarray.all(img == [0, 0, 0], axis=2).sum()/img.size*100*3 for img in image_list] temp_list = list(zip(filename_list, black_pixels)) else: raise ValueError(f"{group_method} group_method not found.") return self.splice_lists(img_list, temp_list)
Reloads the image list by replacing the comparative values with those that the chosen grouping method expects. :param group_method: str name of the grouping method that will be used. :param img_list: image list that has been sorted by one of the sort methods. :return: img_list but with the comparative values that the chosen grouping method expects.
56
135
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reload_images(self, group_method, img_list): logger.info("Preparing to group...") if group_method == 'group_blur': filename_list, image_list = self._get_images() blurs = [self.estimate_blur(img) for img in image_list] temp_list = list(zip(filename_list, blurs)) elif group_method == 'group_blur_fft': filename_list, image_list = self._get_images() fft_blurs = [self.estimate_blur_fft(img) for img in image_list] temp_list = list(zip(filename_list, fft_blurs)) elif group_method == 'group_face_cnn': filename_list, image_list, landmarks = self._get_landmarks() temp_list = list(zip(filename_list, landmarks)) elif group_method == 'group_face_yaw': filename_list, image_list, landmarks = self._get_landmarks() yaws = [self.calc_landmarks_face_yaw(mark) for mark in landmarks] temp_list = list(zip(filename_list, yaws)) elif group_method == 'group_hist': filename_list, image_list = self._get_images() histograms = [cv2.calcHist([img], [0], None, [256], [0, 256]) for img in image_list] temp_list = list(zip(filename_list, histograms)) elif group_method == 'group_black_pixels': filename_list, image_list = self._get_images() black_pixels = [np.ndarray.all(img == [0, 0, 0], axis=2).sum()/img.size*100*3 for img in image_list] temp_list = list(zip(filename_list, black_pixels)) else: raise ValueError(f"{group_method} group_method not found.") return self.splice_lists(img_list, temp_list) ``` ###Assistant : Reloads the image list by replacing the comparative values with those that the chosen grouping method expects. :param group_method: str name of the grouping method that will be used. :param img_list: image list that has been sorted by one of the sort methods. :return: img_list but with the comparative values that the chosen grouping method expects.
272
def get_sales_orders(quotations): if not quotations: return [] quotation_names = [q.name for q in quotations] return frappe.db.sql( .format( ", ".join(["%s"] * len(quotation_names)) ), tuple(quotation_names), as_dict=1, ) # nosec
SELECT so.`name`, so.`base_grand_total`, soi.prevdoc_docname as quotation FROM `tabSales Order` so, `tabSales Order Item` soi WHERE so.docstatus=1 AND so.name = soi.parent AND soi.prevdoc_docname in ({0})
24
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_sales_orders(quotations): if not quotations: return [] quotation_names = [q.name for q in quotations] return frappe.db.sql( .format( ", ".join(["%s"] * len(quotation_names)) ), tuple(quotation_names), as_dict=1, ) # nosec ``` ###Assistant : SELECT so.`name`, so.`base_grand_total`, soi.prevdoc_docname as quotation FROM `tabSales Order` so, `tabSales Order Item` soi WHERE so.docstatus=1 AND so.name = soi.parent AND soi.prevdoc_docname in ({0})
273
def download_extract(name, folder=None): fname = download(name) base_dir = os.path.dirname(fname) data_dir, ext = os.path.splitext(fname) if ext == '.zip': fp = zipfile.ZipFile(fname, 'r') elif ext in ('.tar', '.gz'): fp = tarfile.open(fname, 'r') else: assert False, 'Only zip/tar files can be extracted.' fp.extractall(base_dir) return os.path.join(base_dir, folder) if folder else data_dir
Download and extract a zip/tar file. Defined in :numref:`sec_utils`
9
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def download_extract(name, folder=None): fname = download(name) base_dir = os.path.dirname(fname) data_dir, ext = os.path.splitext(fname) if ext == '.zip': fp = zipfile.ZipFile(fname, 'r') elif ext in ('.tar', '.gz'): fp = tarfile.open(fname, 'r') else: assert False, 'Only zip/tar files can be extracted.' fp.extractall(base_dir) return os.path.join(base_dir, folder) if folder else data_dir ``` ###Assistant : Download and extract a zip/tar file. Defined in :numref:`sec_utils`
274
def sample_weights_mismatch(self): # If there is a mismatch between sample weight mode and the placeholders # created, then recompile the sub-graphs that depend on sample weights. return ( self.sample_weight_mode is not None and self.sample_weight is None ) or ( self.sample_weight_mode is None and self.sample_weight is not None )
Check if the sample weight and the mode match or not.
11
48
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sample_weights_mismatch(self): # If there is a mismatch between sample weight mode and the placeholders # created, then recompile the sub-graphs that depend on sample weights. return ( self.sample_weight_mode is not None and self.sample_weight is None ) or ( self.sample_weight_mode is None and self.sample_weight is not None ) ``` ###Assistant : Check if the sample weight and the mode match or not.
275
def shebang(self, line, cell): # Create the event loop in which to run script magics # this operates on a background thread if self.event_loop is None: if sys.platform == "win32": # don't override the current policy, # just create an event loop event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop() else: event_loop = asyncio.new_event_loop() self.event_loop = event_loop # start the loop in a background thread asyncio_thread = Thread(target=event_loop.run_forever, daemon=True) asyncio_thread.start() else: event_loop = self.event_loop
Run a cell via a shell command The `%%script` line is like the #! line of script, specifying a program (bash, perl, ruby, etc.) with which to run. The rest of the cell is run by that program. Examples -------- :: In [1]: %%script bash ...: for i in 1 2 3; do ...: echo $i ...: done 1 2 3
61
69
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def shebang(self, line, cell): # Create the event loop in which to run script magics # this operates on a background thread if self.event_loop is None: if sys.platform == "win32": # don't override the current policy, # just create an event loop event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop() else: event_loop = asyncio.new_event_loop() self.event_loop = event_loop # start the loop in a background thread asyncio_thread = Thread(target=event_loop.run_forever, daemon=True) asyncio_thread.start() else: event_loop = self.event_loop ``` ###Assistant : Run a cell via a shell command The `%%script` line is like the #! line of script, specifying a program (bash, perl, ruby, etc.) with which to run. The rest of the cell is run by that program. Examples -------- :: In [1]: %%script bash ...: for i in 1 2 3; do ...: echo $i ...: done 1 2 3
276
def statistics(self): try: return self._local.statistics except AttributeError: self._local.statistics = {} return self._local.statistics
Return a dictionary of runtime statistics. This dictionary will be empty when the controller has never been ran. When it is running or has ran previously it should have (but may not) have useful and/or informational keys and values when running is underway and/or completed. .. warning:: The keys in this dictionary **should** be some what stable (not changing), but there existence **may** change between major releases as new statistics are gathered or removed so before accessing keys ensure that they actually exist and handle when they do not. .. note:: The values in this dictionary are local to the thread running call (so if multiple threads share the same retrying object - either directly or indirectly) they will each have there own view of statistics they have collected (in the future we may provide a way to aggregate the various statistics from each thread).
145
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def statistics(self): try: return self._local.statistics except AttributeError: self._local.statistics = {} return self._local.statistics ``` ###Assistant : Return a dictionary of runtime statistics. This dictionary will be empty when the controller has never been ran. When it is running or has ran previously it should have (but may not) have useful and/or informational keys and values when running is underway and/or completed. .. warning:: The keys in this dictionary **should** be some what stable (not changing), but there existence **may** change between major releases as new statistics are gathered or removed so before accessing keys ensure that they actually exist and handle when they do not. .. note:: The values in this dictionary are local to the thread running call (so if multiple threads share the same retrying object - either directly or indirectly) they will each have there own view of statistics they have collected (in the future we may provide a way to aggregate the various statistics from each thread).
277
def send(self, sender, **named): if ( not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS ): return [] return [ (receiver, receiver(signal=self, sender=sender, **named)) for receiver in self._live_receivers(sender) ]
Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop. So it's possible that all receivers won't be called if an error is raised. Arguments: sender The sender of the signal. Either a specific object or None. named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ].
70
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def send(self, sender, **named): if ( not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS ): return [] return [ (receiver, receiver(signal=self, sender=sender, **named)) for receiver in self._live_receivers(sender) ] ``` ###Assistant : Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop. So it's possible that all receivers won't be called if an error is raised. Arguments: sender The sender of the signal. Either a specific object or None. named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ].
278
def deploy_ray_func(func, *args): # pragma: no cover result = func(*args) ip = get_node_ip_address() if isinstance(result, pandas.DataFrame): return result, len(result), len(result.columns), ip elif all(isinstance(r, pandas.DataFrame) for r in result): return [i for r in result for i in [r, len(r), len(r.columns), ip]] else: return [i for r in result for i in [r, None, None, ip]]
Execute a function on an axis partition in a worker process. Parameters ---------- func : callable Function to be executed on an axis partition. *args : iterable Additional arguments that need to passed in ``func``. Returns ------- list The result of the function ``func`` and metadata for it. Notes ----- Ray functions are not detected by codecov (thus pragma: no cover).
61
55
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deploy_ray_func(func, *args): # pragma: no cover result = func(*args) ip = get_node_ip_address() if isinstance(result, pandas.DataFrame): return result, len(result), len(result.columns), ip elif all(isinstance(r, pandas.DataFrame) for r in result): return [i for r in result for i in [r, len(r), len(r.columns), ip]] else: return [i for r in result for i in [r, None, None, ip]] ``` ###Assistant : Execute a function on an axis partition in a worker process. Parameters ---------- func : callable Function to be executed on an axis partition. *args : iterable Additional arguments that need to passed in ``func``. Returns ------- list The result of the function ``func`` and metadata for it. Notes ----- Ray functions are not detected by codecov (thus pragma: no cover).
279
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ViltEncoder): module.gradient_checkpointing = value VILT_START_DOCSTRING = r VILT_INPUTS_DOCSTRING = r VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.", VILT_START_DOCSTRING, )
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ViltConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See [`ViltFeatureExtractor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*): Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `pixel_values` into patch embeddings. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See [`ViltFeatureExtractor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*): Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `pixel_values` into patch embeddings. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
802
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ViltEncoder): module.gradient_checkpointing = value VILT_START_DOCSTRING = r VILT_INPUTS_DOCSTRING = r VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.", VILT_START_DOCSTRING, ) ``` ###Assistant : This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ViltConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See [`ViltFeatureExtractor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*): Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `pixel_values` into patch embeddings. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See [`ViltFeatureExtractor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*): Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `pixel_values` into patch embeddings. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
280
def needs_document_end_workaround(self): if objects.backend == usertypes.Backend.QtWebKit: return False assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend broken_scripts = [ ('http://userstyles.org', None), ('https://github.com/ParticleCore', 'Iridium'), ] return any(self._matches_id(namespace=namespace, name=name) for namespace, name in broken_scripts)
Check whether to force @run-at document-end. This needs to be done on QtWebEngine for known-broken scripts. On Qt 5.12, accessing the DOM isn't possible with "@run-at document-start". It was documented to be impossible before, but seems to work fine. However, some scripts do DOM access with "@run-at document-start". Fix those by forcing them to use document-end instead.
57
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def needs_document_end_workaround(self): if objects.backend == usertypes.Backend.QtWebKit: return False assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend broken_scripts = [ ('http://userstyles.org', None), ('https://github.com/ParticleCore', 'Iridium'), ] return any(self._matches_id(namespace=namespace, name=name) for namespace, name in broken_scripts) ``` ###Assistant : Check whether to force @run-at document-end. This needs to be done on QtWebEngine for known-broken scripts. On Qt 5.12, accessing the DOM isn't possible with "@run-at document-start". It was documented to be impossible before, but seems to work fine. However, some scripts do DOM access with "@run-at document-start". Fix those by forcing them to use document-end instead.
281
def _temperature_unit(self) -> str: if ( weather_option_temperature_unit := self._weather_option_temperature_unit ) is not None: return weather_option_temperature_unit return self._default_temperature_unit
Return the converted unit of measurement for temperature. Should not be set by integrations.
14
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _temperature_unit(self) -> str: if ( weather_option_temperature_unit := self._weather_option_temperature_unit ) is not None: return weather_option_temperature_unit return self._default_temperature_unit ``` ###Assistant : Return the converted unit of measurement for temperature. Should not be set by integrations.
282
def fontName(self, fontprop): if isinstance(fontprop, str): filenames = [fontprop] elif mpl.rcParams['pdf.use14corefonts']: filenames = _fontManager._find_fonts_by_props( fontprop, fontext='afm', directory=RendererPdf._afm_font_dir ) else: filenames = _fontManager._find_fonts_by_props(fontprop) first_Fx = None for fname in filenames: Fx = self.fontNames.get(fname) if not first_Fx: first_Fx = Fx if Fx is None: Fx = next(self._internal_font_seq) self.fontNames[fname] = Fx _log.debug('Assigning font %s = %r', Fx, fname) if not first_Fx: first_Fx = Fx # find_fontsprop's first value always adheres to # findfont's value, so technically no behaviour change return first_Fx
Select a font based on fontprop and return a name suitable for Op.selectfont. If fontprop is a string, it will be interpreted as the filename of the font.
28
78
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fontName(self, fontprop): if isinstance(fontprop, str): filenames = [fontprop] elif mpl.rcParams['pdf.use14corefonts']: filenames = _fontManager._find_fonts_by_props( fontprop, fontext='afm', directory=RendererPdf._afm_font_dir ) else: filenames = _fontManager._find_fonts_by_props(fontprop) first_Fx = None for fname in filenames: Fx = self.fontNames.get(fname) if not first_Fx: first_Fx = Fx if Fx is None: Fx = next(self._internal_font_seq) self.fontNames[fname] = Fx _log.debug('Assigning font %s = %r', Fx, fname) if not first_Fx: first_Fx = Fx # find_fontsprop's first value always adheres to # findfont's value, so technically no behaviour change return first_Fx ``` ###Assistant : Select a font based on fontprop and return a name suitable for Op.selectfont. If fontprop is a string, it will be interpreted as the filename of the font.
283
def center_to_corners_format(x): x_center, y_center, width, height = x.unbind(-1) boxes = [(x_center - 0.5 * width), (y_center - 0.5 * height), (x_center + 0.5 * width), (y_center + 0.5 * height)] return torch.stack(boxes, dim=-1)
Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format (left, top, right, bottom).
21
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def center_to_corners_format(x): x_center, y_center, width, height = x.unbind(-1) boxes = [(x_center - 0.5 * width), (y_center - 0.5 * height), (x_center + 0.5 * width), (y_center + 0.5 * height)] return torch.stack(boxes, dim=-1) ``` ###Assistant : Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format (left, top, right, bottom).
284
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): raise NotImplementedError( "subclasses of BaseCache must provide an add() method" )
Set a value in the cache if the key does not already exist. If timeout is given, use that timeout for the key; otherwise use the default cache timeout. Return True if the value was stored, False otherwise.
38
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): raise NotImplementedError( "subclasses of BaseCache must provide an add() method" ) ``` ###Assistant : Set a value in the cache if the key does not already exist. If timeout is given, use that timeout for the key; otherwise use the default cache timeout. Return True if the value was stored, False otherwise.
285
def normalized_laplacian_matrix(G, nodelist=None, weight="weight"): r import numpy as np import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") n, m = A.shape diags = A.sum(axis=1) # TODO: rm csr_array wrapper when spdiags can produce arrays D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format="csr")) L = D - A with sp.errstate(divide="ignore"): diags_sqrt = 1.0 / np.sqrt(diags) diags_sqrt[np.isinf(diags_sqrt)] = 0 # TODO: rm csr_array wrapper when spdiags can produce arrays DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr")) return DH @ (L @ DH)
Returns the normalized Laplacian matrix of G. The normalized graph Laplacian is the matrix .. math:: N = D^{-1/2} L D^{-1/2} where `L` is the graph Laplacian and `D` is the diagonal matrix of node degrees [1]_. Parameters ---------- G : graph A NetworkX graph nodelist : list, optional The rows and columns are ordered according to the nodes in nodelist. If nodelist is None, then the ordering is produced by G.nodes(). weight : string or None, optional (default='weight') The edge data key used to compute each value in the matrix. If None, then each edge has weight 1. Returns ------- N : SciPy sparse array The normalized Laplacian matrix of G. Notes ----- For MultiGraph, the edges weights are summed. See :func:`to_numpy_array` for other options. If the Graph contains selfloops, D is defined as ``diag(sum(A, 1))``, where A is the adjacency matrix [2]_. See Also -------- laplacian_matrix normalized_laplacian_spectrum References ---------- .. [1] Fan Chung-Graham, Spectral Graph Theory, CBMS Regional Conference Series in Mathematics, Number 92, 1997. .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98, March 2007.
190
94
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def normalized_laplacian_matrix(G, nodelist=None, weight="weight"): r import numpy as np import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") n, m = A.shape diags = A.sum(axis=1) # TODO: rm csr_array wrapper when spdiags can produce arrays D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format="csr")) L = D - A with sp.errstate(divide="ignore"): diags_sqrt = 1.0 / np.sqrt(diags) diags_sqrt[np.isinf(diags_sqrt)] = 0 # TODO: rm csr_array wrapper when spdiags can produce arrays DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr")) return DH @ (L @ DH) ``` ###Assistant : Returns the normalized Laplacian matrix of G. The normalized graph Laplacian is the matrix .. math:: N = D^{-1/2} L D^{-1/2} where `L` is the graph Laplacian and `D` is the diagonal matrix of node degrees [1]_. Parameters ---------- G : graph A NetworkX graph nodelist : list, optional The rows and columns are ordered according to the nodes in nodelist. If nodelist is None, then the ordering is produced by G.nodes(). weight : string or None, optional (default='weight') The edge data key used to compute each value in the matrix. If None, then each edge has weight 1. Returns ------- N : SciPy sparse array The normalized Laplacian matrix of G. Notes ----- For MultiGraph, the edges weights are summed. See :func:`to_numpy_array` for other options. If the Graph contains selfloops, D is defined as ``diag(sum(A, 1))``, where A is the adjacency matrix [2]_. See Also -------- laplacian_matrix normalized_laplacian_spectrum References ---------- .. [1] Fan Chung-Graham, Spectral Graph Theory, CBMS Regional Conference Series in Mathematics, Number 92, 1997. .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98, March 2007.
286
def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None): i_h, i_w = image.shape[:2] p_h, p_w = patch_size if p_h > i_h: raise ValueError( "Height of the patch should be less than the height of the image." ) if p_w > i_w: raise ValueError( "Width of the patch should be less than the width of the image." ) image = check_array(image, allow_nd=True) image = image.reshape((i_h, i_w, -1)) n_colors = image.shape[-1] extracted_patches = _extract_patches( image, patch_shape=(p_h, p_w, n_colors), extraction_step=1 ) n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches) if max_patches: rng = check_random_state(random_state) i_s = rng.randint(i_h - p_h + 1, size=n_patches) j_s = rng.randint(i_w - p_w + 1, size=n_patches) patches = extracted_patches[i_s, j_s, 0] else: patches = extracted_patches patches = patches.reshape(-1, p_h, p_w, n_colors) # remove the color dimension if useless if patches.shape[-1] == 1: return patches.reshape((n_patches, p_h, p_w)) else: return patches
Reshape a 2D image into a collection of patches. The resulting patches are allocated in a dedicated array. Read more in the :ref:`User Guide <image_feature_extraction>`. Parameters ---------- image : ndarray of shape (image_height, image_width) or \ (image_height, image_width, n_channels) The original image data. For color images, the last dimension specifies the channel: a RGB image would have `n_channels=3`. patch_size : tuple of int (patch_height, patch_width) The dimensions of one patch. max_patches : int or float, default=None The maximum number of patches to extract. If `max_patches` is a float between 0 and 1, it is taken to be a proportion of the total number of patches. random_state : int, RandomState instance, default=None Determines the random number generator used for random sampling when `max_patches` is not None. Use an int to make the randomness deterministic. See :term:`Glossary <random_state>`. Returns ------- patches : array of shape (n_patches, patch_height, patch_width) or \ (n_patches, patch_height, patch_width, n_channels) The collection of patches extracted from the image, where `n_patches` is either `max_patches` or the total number of patches that can be extracted. Examples -------- >>> from sklearn.datasets import load_sample_image >>> from sklearn.feature_extraction import image >>> # Use the array data from the first image in this dataset: >>> one_image = load_sample_image("china.jpg") >>> print('Image shape: {}'.format(one_image.shape)) Image shape: (427, 640, 3) >>> patches = image.extract_patches_2d(one_image, (2, 2)) >>> print('Patches shape: {}'.format(patches.shape)) Patches shape: (272214, 2, 2, 3) >>> # Here are just two of these patches: >>> print(patches[1]) [[[174 201 231] [174 201 231]] [[173 200 230] [173 200 230]]] >>> print(patches[800]) [[[187 214 243] [188 215 244]] [[187 214 243] [188 215 244]]]
266
136
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None): i_h, i_w = image.shape[:2] p_h, p_w = patch_size if p_h > i_h: raise ValueError( "Height of the patch should be less than the height of the image." ) if p_w > i_w: raise ValueError( "Width of the patch should be less than the width of the image." ) image = check_array(image, allow_nd=True) image = image.reshape((i_h, i_w, -1)) n_colors = image.shape[-1] extracted_patches = _extract_patches( image, patch_shape=(p_h, p_w, n_colors), extraction_step=1 ) n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches) if max_patches: rng = check_random_state(random_state) i_s = rng.randint(i_h - p_h + 1, size=n_patches) j_s = rng.randint(i_w - p_w + 1, size=n_patches) patches = extracted_patches[i_s, j_s, 0] else: patches = extracted_patches patches = patches.reshape(-1, p_h, p_w, n_colors) # remove the color dimension if useless if patches.shape[-1] == 1: return patches.reshape((n_patches, p_h, p_w)) else: return patches ``` ###Assistant : Reshape a 2D image into a collection of patches. The resulting patches are allocated in a dedicated array. Read more in the :ref:`User Guide <image_feature_extraction>`. Parameters ---------- image : ndarray of shape (image_height, image_width) or \ (image_height, image_width, n_channels) The original image data. For color images, the last dimension specifies the channel: a RGB image would have `n_channels=3`. patch_size : tuple of int (patch_height, patch_width) The dimensions of one patch. max_patches : int or float, default=None The maximum number of patches to extract. If `max_patches` is a float between 0 and 1, it is taken to be a proportion of the total number of patches. random_state : int, RandomState instance, default=None Determines the random number generator used for random sampling when `max_patches` is not None. Use an int to make the randomness deterministic. See :term:`Glossary <random_state>`. Returns ------- patches : array of shape (n_patches, patch_height, patch_width) or \ (n_patches, patch_height, patch_width, n_channels) The collection of patches extracted from the image, where `n_patches` is either `max_patches` or the total number of patches that can be extracted. Examples -------- >>> from sklearn.datasets import load_sample_image >>> from sklearn.feature_extraction import image >>> # Use the array data from the first image in this dataset: >>> one_image = load_sample_image("china.jpg") >>> print('Image shape: {}'.format(one_image.shape)) Image shape: (427, 640, 3) >>> patches = image.extract_patches_2d(one_image, (2, 2)) >>> print('Patches shape: {}'.format(patches.shape)) Patches shape: (272214, 2, 2, 3) >>> # Here are just two of these patches: >>> print(patches[1]) [[[174 201 231] [174 201 231]] [[173 200 230] [173 200 230]]] >>> print(patches[800]) [[[187 214 243] [188 215 244]] [[187 214 243] [188 215 244]]]
287
def refresh_from_db(self, using=None, fields=None): if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' "are not allowed in fields." % LOOKUP_SEP ) hints = {"instance": self} db_instance_qs = self.__class__._base_manager.db_manager( using, hints=hints ).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [ f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields ] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db
Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method.
85
165
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def refresh_from_db(self, using=None, fields=None): if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' "are not allowed in fields." % LOOKUP_SEP ) hints = {"instance": self} db_instance_qs = self.__class__._base_manager.db_manager( using, hints=hints ).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [ f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields ] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db ``` ###Assistant : Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method.
288
def set_tunnel(self, host, port=None, headers=None): if self.sock: raise RuntimeError("Can't set up tunnel for established connection") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear()
Set up host and port for HTTP CONNECT tunnelling. In a connection that uses HTTP CONNECT tunneling, the host passed to the constructor is used as a proxy server that relays all communication to the endpoint passed to `set_tunnel`. This done by sending an HTTP CONNECT request to the proxy server when the connection is established. This method must be called before the HTTP connection has been established. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request.
85
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_tunnel(self, host, port=None, headers=None): if self.sock: raise RuntimeError("Can't set up tunnel for established connection") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear() ``` ###Assistant : Set up host and port for HTTP CONNECT tunnelling. In a connection that uses HTTP CONNECT tunneling, the host passed to the constructor is used as a proxy server that relays all communication to the endpoint passed to `set_tunnel`. This done by sending an HTTP CONNECT request to the proxy server when the connection is established. This method must be called before the HTTP connection has been established. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request.
289
def explicit_device_get_scope() -> Iterator[None]: state = transfer_guard_lib.thread_local_state() prev = state.explicit_device_get state.explicit_device_get = True try: yield finally: state.explicit_device_get = prev
Indicates that the current context is an explicit device_get() call.
10
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def explicit_device_get_scope() -> Iterator[None]: state = transfer_guard_lib.thread_local_state() prev = state.explicit_device_get state.explicit_device_get = True try: yield finally: state.explicit_device_get = prev ``` ###Assistant : Indicates that the current context is an explicit device_get() call.
290
def accessory_info(self) -> Service: return self.accessory.services.first( service_type=ServicesTypes.ACCESSORY_INFORMATION )
Information about the make and model of an accessory.
9
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def accessory_info(self) -> Service: return self.accessory.services.first( service_type=ServicesTypes.ACCESSORY_INFORMATION ) ``` ###Assistant : Information about the make and model of an accessory.
291
def _always_object(classes): if object not in classes: return classes + (object,) return classes
Ensure object appears in the mro even for old-style classes.
10
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _always_object(classes): if object not in classes: return classes + (object,) return classes ``` ###Assistant : Ensure object appears in the mro even for old-style classes.
292
def addIncludedDataFilesFromFileOptions(): for included_datafile in _addIncludedDataFilesFromFileOptions(): addIncludedDataFile(included_datafile)
Early data files, from user options that work with file system.
11
7
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def addIncludedDataFilesFromFileOptions(): for included_datafile in _addIncludedDataFilesFromFileOptions(): addIncludedDataFile(included_datafile) ``` ###Assistant : Early data files, from user options that work with file system.
293
def _attributes(**kwargs) -> dict[str, str]: return {key: str(value) for key, value in kwargs.items() if value is not None}
Return the given kwargs as a dictionary with values converted to strings. Items with a value of None will be omitted.
21
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _attributes(**kwargs) -> dict[str, str]: return {key: str(value) for key, value in kwargs.items() if value is not None} ``` ###Assistant : Return the given kwargs as a dictionary with values converted to strings. Items with a value of None will be omitted.
294
def test_charpp(self): dll = CDLL(_ctypes_test.__file__) func = dll._testfunc_c_p_p func.restype = c_char_p argv = (c_char_p * 2)() argc = c_int( 2 ) argv[0] = b'hello' argv[1] = b'world' result = func( byref(argc), argv ) self.assertEqual(result, b'world')
Test that a character pointer-to-pointer is correctly passed
8
35
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_charpp(self): dll = CDLL(_ctypes_test.__file__) func = dll._testfunc_c_p_p func.restype = c_char_p argv = (c_char_p * 2)() argc = c_int( 2 ) argv[0] = b'hello' argv[1] = b'world' result = func( byref(argc), argv ) self.assertEqual(result, b'world') ``` ###Assistant : Test that a character pointer-to-pointer is correctly passed
295
def test_error_message_unsigned(self): # Ensure to test for potential overflow in the case of: # x - y # and # y - x x = np.asarray([0, 1, 8], dtype='uint8') y = np.asarray([4, 4, 4], dtype='uint8') with pytest.raises(AssertionError) as exc_info: assert_allclose(x, y, atol=3) msgs = str(exc_info.value).split('\n') assert_equal(msgs[4], 'Max absolute difference: 4')
Check the the message is formatted correctly when overflow can occur (gh21768)
12
50
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_error_message_unsigned(self): # Ensure to test for potential overflow in the case of: # x - y # and # y - x x = np.asarray([0, 1, 8], dtype='uint8') y = np.asarray([4, 4, 4], dtype='uint8') with pytest.raises(AssertionError) as exc_info: assert_allclose(x, y, atol=3) msgs = str(exc_info.value).split('\n') assert_equal(msgs[4], 'Max absolute difference: 4') ``` ###Assistant : Check the the message is formatted correctly when overflow can occur (gh21768)
296
def _join_or_get_room(self, room_id_or_alias): rooms = self._client.get_rooms() if room_id_or_alias in rooms: _LOGGER.debug("Already in room %s", room_id_or_alias) return rooms[room_id_or_alias] for room in rooms.values(): if room.room_id not in self._aliases_fetched_for: room.update_aliases() self._aliases_fetched_for.add(room.room_id) if ( room_id_or_alias in room.aliases or room_id_or_alias == room.canonical_alias ): _LOGGER.debug( "Already in room %s (known as %s)", room.room_id, room_id_or_alias ) return room room = self._client.join_room(room_id_or_alias) _LOGGER.info("Joined room %s (known as %s)", room.room_id, room_id_or_alias) return room
Join a room or get it, if we are already in the room. We can't just always call join_room(), since that seems to crash the client if we're already in the room.
32
64
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _join_or_get_room(self, room_id_or_alias): rooms = self._client.get_rooms() if room_id_or_alias in rooms: _LOGGER.debug("Already in room %s", room_id_or_alias) return rooms[room_id_or_alias] for room in rooms.values(): if room.room_id not in self._aliases_fetched_for: room.update_aliases() self._aliases_fetched_for.add(room.room_id) if ( room_id_or_alias in room.aliases or room_id_or_alias == room.canonical_alias ): _LOGGER.debug( "Already in room %s (known as %s)", room.room_id, room_id_or_alias ) return room room = self._client.join_room(room_id_or_alias) _LOGGER.info("Joined room %s (known as %s)", room.room_id, room_id_or_alias) return room ``` ###Assistant : Join a room or get it, if we are already in the room. We can't just always call join_room(), since that seems to crash the client if we're already in the room.
297
def _is_function_class_equation(func_class, f, symbol): if f.is_Mul or f.is_Add: return all(_is_function_class_equation(func_class, arg, symbol) for arg in f.args) if f.is_Pow: if not f.exp.has(symbol): return _is_function_class_equation(func_class, f.base, symbol) else: return False if not f.has(symbol): return True if isinstance(f, func_class): try: g = Poly(f.args[0], symbol) return g.degree() <= 1 except PolynomialError: return False else: return False
Tests whether the equation is an equation of the given function class. The given equation belongs to the given function class if it is comprised of functions of the function class which are multiplied by or added to expressions independent of the symbol. In addition, the arguments of all such functions must be linear in the symbol as well. Examples ======== >>> from sympy.solvers.solveset import _is_function_class_equation >>> from sympy import tan, sin, tanh, sinh, exp >>> from sympy.abc import x >>> from sympy.functions.elementary.trigonometric import TrigonometricFunction >>> from sympy.functions.elementary.hyperbolic import HyperbolicFunction >>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x) False >>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x) True >>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x) False >>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x) True >>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x) True
123
52
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _is_function_class_equation(func_class, f, symbol): if f.is_Mul or f.is_Add: return all(_is_function_class_equation(func_class, arg, symbol) for arg in f.args) if f.is_Pow: if not f.exp.has(symbol): return _is_function_class_equation(func_class, f.base, symbol) else: return False if not f.has(symbol): return True if isinstance(f, func_class): try: g = Poly(f.args[0], symbol) return g.degree() <= 1 except PolynomialError: return False else: return False ``` ###Assistant : Tests whether the equation is an equation of the given function class. The given equation belongs to the given function class if it is comprised of functions of the function class which are multiplied by or added to expressions independent of the symbol. In addition, the arguments of all such functions must be linear in the symbol as well. Examples ======== >>> from sympy.solvers.solveset import _is_function_class_equation >>> from sympy import tan, sin, tanh, sinh, exp >>> from sympy.abc import x >>> from sympy.functions.elementary.trigonometric import TrigonometricFunction >>> from sympy.functions.elementary.hyperbolic import HyperbolicFunction >>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x) False >>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x) True >>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x) False >>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x) True >>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x) True
298
def get_feature_names_out(self, input_features=None): class_name = self.__class__.__name__.lower() return np.asarray([f"{class_name}0"], dtype=object)
Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Ignored. Returns ------- feature_names_out : ndarray of str objects An ndarray with one string i.e. ["isotonicregression0"].
32
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_feature_names_out(self, input_features=None): class_name = self.__class__.__name__.lower() return np.asarray([f"{class_name}0"], dtype=object) ``` ###Assistant : Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Ignored. Returns ------- feature_names_out : ndarray of str objects An ndarray with one string i.e. ["isotonicregression0"].
299
def get_distance(self, f, dist_val, lookup_type): # Getting the distance parameter value = dist_val[0] # Shorthand boolean flags. geodetic = f.geodetic(self.connection) geography = f.geography if isinstance(value, Distance): if geography: dist_param = value.m elif geodetic: if lookup_type == "dwithin": raise ValueError( "Only numeric values of degree units are " "allowed on geographic DWithin queries." ) dist_param = value.m else: dist_param = getattr( value, Distance.unit_attname(f.units_name(self.connection)) ) else: # Assuming the distance is in the units of the field. dist_param = value return [dist_param]
Retrieve the distance parameters for the given geometry field, distance lookup value, and the distance lookup type. This is the most complex implementation of the spatial backends due to what is supported on geodetic geometry columns vs. what's available on projected geometry columns. In addition, it has to take into account the geography column type.
55
80
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_distance(self, f, dist_val, lookup_type): # Getting the distance parameter value = dist_val[0] # Shorthand boolean flags. geodetic = f.geodetic(self.connection) geography = f.geography if isinstance(value, Distance): if geography: dist_param = value.m elif geodetic: if lookup_type == "dwithin": raise ValueError( "Only numeric values of degree units are " "allowed on geographic DWithin queries." ) dist_param = value.m else: dist_param = getattr( value, Distance.unit_attname(f.units_name(self.connection)) ) else: # Assuming the distance is in the units of the field. dist_param = value return [dist_param] ``` ###Assistant : Retrieve the distance parameters for the given geometry field, distance lookup value, and the distance lookup type. This is the most complex implementation of the spatial backends due to what is supported on geodetic geometry columns vs. what's available on projected geometry columns. In addition, it has to take into account the geography column type.