Dataset Viewer
Auto-converted to Parquet
id
int64
11
59.9k
original
stringlengths
33
150k
modified
stringlengths
37
150k
45,191
def test_drop_duplicates(): frame_data = { "A": list(range(3)) * 2, "B": list(range(1, 4)) * 2, "C": list(range(6)), } modin_df = pd.DataFrame(frame_data) pandas_df = pandas.DataFrame(frame_data) # noqa F841 df_equals( modin_df.drop_duplicates(subset=["A", "B"], keep="first", inplace=False), pandas_df.drop_duplicates(subset=["A", "B"], keep="first", inplace=False), ) df_equals( modin_df.drop_duplicates(subset=["A", "B"], keep="last", inplace=False), pandas_df.drop_duplicates(subset=["A", "B"], keep="last", inplace=False), ) df_equals( modin_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=False), pandas_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=False), ) df_equals(modin_df.drop_duplicates(inplace=False), pandas_df) modin_df.drop_duplicates(subset=["A", "B"], inplace=True) df_equals(modin_df, pandas_df.drop_duplicates(subset=["A", "B"], inplace=False)) modin_df = pd.DataFrame(frame_data) modin_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=True) df_equals(modin_df, pandas.DataFrame({"A": [], "B": [], "C": []}))
def test_drop_duplicates(): frame_data = { "A": list(range(3)) * 2, "B": list(range(1, 4)) * 2, "C": list(range(6)), } modin_df = pd.DataFrame(frame_data) pandas_df = pandas.DataFrame(frame_data) df_equals( modin_df.drop_duplicates(subset=["A", "B"], keep="first", inplace=False), pandas_df.drop_duplicates(subset=["A", "B"], keep="first", inplace=False), ) df_equals( modin_df.drop_duplicates(subset=["A", "B"], keep="last", inplace=False), pandas_df.drop_duplicates(subset=["A", "B"], keep="last", inplace=False), ) df_equals( modin_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=False), pandas_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=False), ) df_equals(modin_df.drop_duplicates(inplace=False), pandas_df) modin_df.drop_duplicates(subset=["A", "B"], inplace=True) df_equals(modin_df, pandas_df.drop_duplicates(subset=["A", "B"], inplace=False)) modin_df = pd.DataFrame(frame_data) modin_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=True) df_equals(modin_df, pandas.DataFrame({"A": [], "B": [], "C": []}))
9,363
def test_wrap_var_set(): assert not isinstance(wrap_var(set(['foo'])), AnsibleUnsafe) for item in wrap_var(set(['foo'])): assert isinstance(item, AnsibleUnsafe)
def test_wrap_var_set(): assert isinstance(wrap_var(set(['foo'])), set) for item in wrap_var(set(['foo'])): assert isinstance(item, AnsibleUnsafe)
24,704
def _declare_qos_parameteres( entity_type: Union[Type[Publisher], Type[Subscription]], node: 'Node', topic_name: Text, qos: QoSProfile, options: QoSOverridingOptions ) -> QoSProfile: """ Declare qos parameters for a Publisher or a Subscription. :param entity_type: Either `rclpy.node.Publisher` or `rclpy.node.Subscription`. :param node: Node used to declare the parameters. :param topic_name: Topic name of the entity being created. :param qos: Default qos settings of the entity being created, that will be overriden with the user provided qos parameter overrides. :param options: Options that indicates which parameters are going to be declared. """ if not issubclass(entity_type, (Publisher, Subscription)): raise TypeError('Argument `entity_type` should be a subclass of Publisher or Subscription') entity_type_str = 'publisher' if issubclass(entity_type, Publisher) else Subscription id_suffix = '' if options.entity_id is None else f'_{options.entity_id}' name = f'qos_overrides.{topic_name}.{entity_type_str}{id_suffix}.' '{}' description = '{}' f' for {entity_type_str} `{topic_name}` with id `{options.entity_id}`' allowed_policies = _get_allowed_policies(entity_type) for policy in options.policy_kinds: if policy not in allowed_policies: continue policy_name = policy.name.lower() descriptor = ParameterDescriptor() descriptor.description = description.format(policy_name) descriptor.read_only = True param = node.declare_parameter( name.format(policy_name), _get_qos_policy_parameter(qos, policy), descriptor) _override_qos_policy_with_param(qos, policy, param) if options.callback is not None and not options.callback(qos): raise InvalidQosOverridesError( description.format('Provided qos overrides') + ', are not valid')
def _declare_qos_parameters( entity_type: Union[Type[Publisher], Type[Subscription]], node: 'Node', topic_name: Text, qos: QoSProfile, options: QoSOverridingOptions ) -> QoSProfile: """ Declare qos parameters for a Publisher or a Subscription. :param entity_type: Either `rclpy.node.Publisher` or `rclpy.node.Subscription`. :param node: Node used to declare the parameters. :param topic_name: Topic name of the entity being created. :param qos: Default qos settings of the entity being created, that will be overriden with the user provided qos parameter overrides. :param options: Options that indicates which parameters are going to be declared. """ if not issubclass(entity_type, (Publisher, Subscription)): raise TypeError('Argument `entity_type` should be a subclass of Publisher or Subscription') entity_type_str = 'publisher' if issubclass(entity_type, Publisher) else Subscription id_suffix = '' if options.entity_id is None else f'_{options.entity_id}' name = f'qos_overrides.{topic_name}.{entity_type_str}{id_suffix}.' '{}' description = '{}' f' for {entity_type_str} `{topic_name}` with id `{options.entity_id}`' allowed_policies = _get_allowed_policies(entity_type) for policy in options.policy_kinds: if policy not in allowed_policies: continue policy_name = policy.name.lower() descriptor = ParameterDescriptor() descriptor.description = description.format(policy_name) descriptor.read_only = True param = node.declare_parameter( name.format(policy_name), _get_qos_policy_parameter(qos, policy), descriptor) _override_qos_policy_with_param(qos, policy, param) if options.callback is not None and not options.callback(qos): raise InvalidQosOverridesError( description.format('Provided qos overrides') + ', are not valid')
3,123
def test_win_type_freq_return_deprecation(): freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s") with tm.assert_produces_warning(FutureWarning): freq_roll.win_type
def test_win_type_freq_return_deprecation(): freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s") with tm.assert_produces_warning(FutureWarning): assert freq_roll.win_type == "freq"
19,831
def populate_counts(sf, schema, objs_cached, logger): objects_to_count = [objname for objname in objs_cached] counts, transports_errors, salesforce_errors = count_sobjects(sf, objects_to_count) errors = transports_errors + salesforce_errors for error in errors[0:10]: logger.warning(f"Error counting SObjects: {error}") if len(errors) > 10: logger.warning(f"{len(errors)} more counting errors surpressed") for objname, count in counts.items(): schema[objname].count = count schema.session.flush() return counts.items()
def populate_counts(sf, schema, objs_cached, logger): objects_to_count = [objname for objname in objs_cached] counts, transports_errors, salesforce_errors = count_sobjects(sf, objects_to_count) errors = transports_errors + salesforce_errors for error in errors[0:10]: logger.warning(f"Error counting SObjects: {error}") if len(errors) > 10: logger.warning(f"{len(errors)} more counting errors suppressed") for objname, count in counts.items(): schema[objname].count = count schema.session.flush() return counts.items()
31,982
def main(): install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging) option = option_handler() packs_artifacts_path = option.packs_artifacts_path id_set_path = option.id_set_path extract_destination_path = option.extract_path storage_bucket_name = option.bucket_name service_account = option.service_account target_packs = option.pack_names if option.pack_names else "" build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4()) override_all_packs = option.override_all_packs signature_key = option.key_string packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {} storage_base_path = option.storage_base_path remove_test_playbooks = option.remove_test_playbooks is_bucket_upload_flow = option.bucket_upload private_bucket_name = option.private_bucket_name ci_branch = option.ci_branch force_upload = option.force_upload marketplace = option.marketplace is_create_dependencies_zip = option.create_dependencies_zip # google cloud storage client initialized storage_client = init_storage_client(service_account) storage_bucket = storage_client.bucket(storage_bucket_name) # Relevant when triggering test upload flow if storage_bucket_name: GCPConfig.PRODUCTION_BUCKET = storage_bucket_name # download and extract index from public bucket index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket, extract_destination_path, storage_base_path) # content repo client initialized content_repo = get_content_git_client(CONTENT_ROOT_PATH) current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path, is_bucket_upload_flow, ci_branch) # detect packs to upload pack_names = get_packs_names(target_packs, previous_commit_hash) extract_packs_artifacts(packs_artifacts_path, extract_destination_path) packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name), marketplace) for pack_name in pack_names if os.path.exists(os.path.join(extract_destination_path, pack_name))] diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash)) # taking care of private packs is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content( index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names, storage_base_path ) if not option.override_all_packs: check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash, storage_bucket, is_private_content_updated) # initiate the statistics handler for marketplace packs statistics_handler = StatisticsHandler(service_account, index_folder_path) # clean index and gcs from non existing or invalid packs clean_non_existing_packs(index_folder_path, private_packs, storage_bucket, storage_base_path) # Packages that depend on new packs that are not in the previous index.json packs_missing_dependencies = [] # starting iteration over packs for pack in packs_list: if not prepare_and_zip_pack(pack, signature_key, remove_test_playbooks): continue task_status = pack.upload_integration_images(storage_bucket, storage_base_path, diff_files_list, True) if not task_status: pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name pack.cleanup() continue task_status = pack.upload_author_image(storage_bucket, storage_base_path, diff_files_list, True) if not task_status: pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name pack.cleanup() continue task_status, modified_rn_files_paths, pack_was_modified = pack.detect_modified( content_repo, index_folder_path, current_commit_hash, previous_commit_hash) if not task_status: pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name pack.cleanup() continue task_status, is_missing_dependencies = pack.format_metadata(index_folder_path, packs_dependencies_mapping, build_number, current_commit_hash, pack_was_modified, statistics_handler, pack_names) if is_missing_dependencies: # If the pack is dependent on a new pack # (which is not yet in the index.zip as it might not have been iterated yet) # we will note that it is missing dependencies. # And finally after updating all the packages in index.zip - i.e. the new pack exists now. # We will go over the pack again to add what was missing. # See issue #37290 packs_missing_dependencies.append(pack) if not task_status: pack.status = PackStatus.FAILED_METADATA_PARSING.name pack.cleanup() continue task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified, modified_rn_files_paths) if not task_status: pack.status = PackStatus.FAILED_RELEASE_NOTES.name pack.cleanup() continue if not_updated_build: pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name pack.cleanup() continue task_status, skipped_upload, _ = pack.upload_to_storage(pack.zip_path, pack.latest_version, storage_bucket, override_all_packs or pack_was_modified, storage_base_path) if not task_status: pack.status = PackStatus.FAILED_UPLOADING_PACK.name pack.cleanup() continue task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path) if not task_status: pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name pack.cleanup() continue task_status = pack.prepare_for_index_upload() if not task_status: pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name pack.cleanup() continue task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path, pack_version=pack.latest_version, hidden_pack=pack.hidden) if not task_status: pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name pack.cleanup() continue # in case that pack already exist at cloud storage path and in index, don't show that the pack was changed if skipped_upload and exists_in_index and pack not in packs_missing_dependencies: pack.status = PackStatus.PACK_ALREADY_EXISTS.name pack.cleanup() continue pack.status = PackStatus.SUCCESS.name logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}") # Going over all packs that were marked as missing dependencies, # updating them with the new data for the new packs that were added to the index.zip for pack in packs_missing_dependencies: task_status, _ = pack.format_metadata(index_folder_path, packs_dependencies_mapping, build_number, current_commit_hash, False, statistics_handler, pack_names, format_dependencies_only=True) if not task_status: pack.status = PackStatus.FAILED_METADATA_REFORMATING.name pack.cleanup() continue task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path, pack_version=pack.latest_version, hidden_pack=pack.hidden) if not task_status: pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name pack.cleanup() continue pack.status = PackStatus.SUCCESS.name # upload core packs json to bucket create_corepacks_config(storage_bucket, build_number, index_folder_path, os.path.dirname(packs_artifacts_path), storage_base_path, marketplace) # finished iteration over content packs upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path, index_blob=index_blob, build_number=build_number, private_packs=private_packs, current_commit_hash=current_commit_hash, index_generation=index_generation, force_upload=force_upload, previous_commit_hash=previous_commit_hash, landing_page_sections=statistics_handler.landing_page_sections, artifacts_dir=os.path.dirname(packs_artifacts_path), storage_bucket=storage_bucket, ) # get the lists of packs divided by their status successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list) # Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE) store_successful_and_failed_packs_in_ci_artifacts( packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs, updated_private_packs_ids, images_data=get_images_data(packs_list) ) # summary of packs status print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow) if is_create_dependencies_zip and is_create_dependencies_zip != 'false' and marketplace == 'xsoar': # handle packs with dependencies zip upload_packs_with_dependencies_zip(extract_destination_path, packs_dependencies_mapping, signature_key, storage_bucket, storage_base_path, id_set_path, packs_list, marketplace)
def main(): install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging) option = option_handler() packs_artifacts_path = option.packs_artifacts_path id_set_path = option.id_set_path extract_destination_path = option.extract_path storage_bucket_name = option.bucket_name service_account = option.service_account target_packs = option.pack_names if option.pack_names else "" build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4()) override_all_packs = option.override_all_packs signature_key = option.key_string packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {} storage_base_path = option.storage_base_path remove_test_playbooks = option.remove_test_playbooks is_bucket_upload_flow = option.bucket_upload private_bucket_name = option.private_bucket_name ci_branch = option.ci_branch force_upload = option.force_upload marketplace = option.marketplace is_create_dependencies_zip = option.create_dependencies_zip # google cloud storage client initialized storage_client = init_storage_client(service_account) storage_bucket = storage_client.bucket(storage_bucket_name) # Relevant when triggering test upload flow if storage_bucket_name: GCPConfig.PRODUCTION_BUCKET = storage_bucket_name # download and extract index from public bucket index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket, extract_destination_path, storage_base_path) # content repo client initialized content_repo = get_content_git_client(CONTENT_ROOT_PATH) current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path, is_bucket_upload_flow, ci_branch) # detect packs to upload pack_names = get_packs_names(target_packs, previous_commit_hash) extract_packs_artifacts(packs_artifacts_path, extract_destination_path) packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name), marketplace) for pack_name in pack_names if os.path.exists(os.path.join(extract_destination_path, pack_name))] diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash)) # taking care of private packs is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content( index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names, storage_base_path ) if not option.override_all_packs: check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash, storage_bucket, is_private_content_updated) # initiate the statistics handler for marketplace packs statistics_handler = StatisticsHandler(service_account, index_folder_path) # clean index and gcs from non existing or invalid packs clean_non_existing_packs(index_folder_path, private_packs, storage_bucket, storage_base_path) # Packages that depend on new packs that are not in the previous index.json packs_missing_dependencies = [] # starting iteration over packs for pack in packs_list: if not prepare_and_zip_pack(pack, signature_key, remove_test_playbooks): continue task_status = pack.upload_integration_images(storage_bucket, storage_base_path, diff_files_list, True) if not task_status: pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name pack.cleanup() continue task_status = pack.upload_author_image(storage_bucket, storage_base_path, diff_files_list, True) if not task_status: pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name pack.cleanup() continue task_status, modified_rn_files_paths, pack_was_modified = pack.detect_modified( content_repo, index_folder_path, current_commit_hash, previous_commit_hash) if not task_status: pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name pack.cleanup() continue task_status, is_missing_dependencies = pack.format_metadata(index_folder_path, packs_dependencies_mapping, build_number, current_commit_hash, pack_was_modified, statistics_handler, pack_names) if is_missing_dependencies: # If the pack is dependent on a new pack # (which is not yet in the index.zip as it might not have been iterated yet) # we will note that it is missing dependencies. # And finally after updating all the packages in index.zip - i.e. the new pack exists now. # We will go over the pack again to add what was missing. # See issue #37290 packs_missing_dependencies.append(pack) if not task_status: pack.status = PackStatus.FAILED_METADATA_PARSING.name pack.cleanup() continue task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified, modified_rn_files_paths) if not task_status: pack.status = PackStatus.FAILED_RELEASE_NOTES.name pack.cleanup() continue if not_updated_build: pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name pack.cleanup() continue task_status, skipped_upload, _ = pack.upload_to_storage(pack.zip_path, pack.latest_version, storage_bucket, override_all_packs or pack_was_modified, storage_base_path) if not task_status: pack.status = PackStatus.FAILED_UPLOADING_PACK.name pack.cleanup() continue task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path) if not task_status: pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name pack.cleanup() continue task_status = pack.prepare_for_index_upload() if not task_status: pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name pack.cleanup() continue task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path, pack_version=pack.latest_version, hidden_pack=pack.hidden) if not task_status: pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name pack.cleanup() continue # in case that pack already exist at cloud storage path and in index, don't show that the pack was changed if skipped_upload and exists_in_index and pack not in packs_missing_dependencies: pack.status = PackStatus.PACK_ALREADY_EXISTS.name pack.cleanup() continue pack.status = PackStatus.SUCCESS.name logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}") # Going over all packs that were marked as missing dependencies, # updating them with the new data for the new packs that were added to the index.zip for pack in packs_missing_dependencies: task_status, _ = pack.format_metadata(index_folder_path, packs_dependencies_mapping, build_number, current_commit_hash, False, statistics_handler, pack_names, format_dependencies_only=True) if not task_status: pack.status = PackStatus.FAILED_METADATA_REFORMATING.name pack.cleanup() continue task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path, pack_version=pack.latest_version, hidden_pack=pack.hidden) if not task_status: pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name pack.cleanup() continue pack.status = PackStatus.SUCCESS.name # upload core packs json to bucket create_corepacks_config(storage_bucket, build_number, index_folder_path, os.path.dirname(packs_artifacts_path), storage_base_path, marketplace) # finished iteration over content packs upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path, index_blob=index_blob, build_number=build_number, private_packs=private_packs, current_commit_hash=current_commit_hash, index_generation=index_generation, force_upload=force_upload, previous_commit_hash=previous_commit_hash, landing_page_sections=statistics_handler.landing_page_sections, artifacts_dir=os.path.dirname(packs_artifacts_path), storage_bucket=storage_bucket, ) # get the lists of packs divided by their status successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list) # Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE) store_successful_and_failed_packs_in_ci_artifacts( packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs, updated_private_packs_ids, images_data=get_images_data(packs_list) ) # summary of packs status print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow) if is_create_dependencies_zip and marketplace == 'xsoar': # handle packs with dependencies zip upload_packs_with_dependencies_zip(extract_destination_path, packs_dependencies_mapping, signature_key, storage_bucket, storage_base_path, id_set_path, packs_list, marketplace)
8,665
def configure(config): config.define_section('currency', CurrencySection, validate=False) config.currency.configure_setting('fixer_io_key', 'API key for fixer IO. Leave blank to use exchangeratesapi.io:') config.currency.configure_setting('enable_regex', 'automatically respond to regex matches:')
def configure(config): config.define_section('currency', CurrencySection, validate=False) config.currency.configure_setting('fixer_io_key', 'API key for fixer IO. Leave blank to use exchangeratesapi.io:') config.currency.configure_setting('enable_regex', 'Automatically respond to regex matches?')
20,224
def process_missing(missing_ids): """Create missing school and alias objects and dump csv of additions. """ csv_out_data = [] csv_slug = '{}/schools_added_on_{}.csv'.format(ipeds_directory, datetime.date.today()) missing_data = process_datafiles(add_schools=missing_ids) for school_id in missing_data: create_school(int(school_id), missing_data[school_id]) data_row = missing_data[school_id] data_row['ID'] = school_id csv_out_data.append(data_row) header = sorted(csv_out_data[0].keys()) dump_csv(csv_slug, header, csv_out_data)
def process_missing(missing_ids): """Create missing school and alias objects and dump csv of additions.""" csv_out_data = [] csv_slug = '{}/schools_added_on_{}.csv'.format(ipeds_directory, datetime.date.today()) missing_data = process_datafiles(add_schools=missing_ids) for school_id in missing_data: create_school(int(school_id), missing_data[school_id]) data_row = missing_data[school_id] data_row['ID'] = school_id csv_out_data.append(data_row) header = sorted(csv_out_data[0].keys()) dump_csv(csv_slug, header, csv_out_data)
30,938
def write_data(sheet, data_item, data_headers, workbook, bold, border): if not isinstance(data_item, list): data_item = [data_item] if not data_headers: data_headers = list(data_item[0].keys()) worksheet = workbook.add_worksheet(sheet) row = 0 col = 0 for key in data_headers: worksheet.write(row, col, key, bold) col += 1 for item in data_item: if len(item) > 0: col = 0 row += 1 for value in data_headers: if item.get(value): worksheet.write(row, col, item.get(value), border) col += 1 else: raise ValueError(f'The header "{value}" does not exist in the given data item.')
def write_data(sheet, data_item, data_headers, workbook, bold, border): if not isinstance(data_item, list): data_item = [data_item] if not data_headers: data_headers = list(data_item[0].keys()) worksheet = workbook.add_worksheet(sheet) row = 0 col = 0 for key in data_headers: worksheet.write(row, col, key, bold) col += 1 for item in data_item: if item: col = 0 row += 1 for value in data_headers: if item.get(value): worksheet.write(row, col, item.get(value), border) col += 1 else: raise ValueError(f'The header "{value}" does not exist in the given data item.')
31,006
def get_pack_dir(branch: str, pr_number: str, repo: str) -> List[str]: """ Get a packs dir names from a contribution pull request changed files Args: branch: The contrib branch pr_number: The contrib PR repo: The contrib repo Returns: A list of packs dir names, if found. """ page = 1 list_packs_dir_names = [] while True: response = requests.get(f'https://api.github.com/repos/demisto/content/pulls/{pr_number}/files', params={'page': str(page)}) response.raise_for_status() files = response.json() if not files: break for pr_file in files: if pr_file['filename'].startswith('Packs/'): pack_dir_name = pr_file['filename'].split('/')[1] if pack_dir_name not in list_packs_dir_names: list_packs_dir_names.append(pack_dir_name) page += 1 return list_packs_dir_names
def get_pack_dir(branch: str, pr_number: str, repo: str) -> List[str]: """ Get packs dir names from a contribution pull request changed files Args: branch: The contrib branch pr_number: The contrib PR repo: The contrib repo Returns: A list of packs dir names, if found. """ page = 1 list_packs_dir_names = [] while True: response = requests.get(f'https://api.github.com/repos/demisto/content/pulls/{pr_number}/files', params={'page': str(page)}) response.raise_for_status() files = response.json() if not files: break for pr_file in files: if pr_file['filename'].startswith('Packs/'): pack_dir_name = pr_file['filename'].split('/')[1] if pack_dir_name not in list_packs_dir_names: list_packs_dir_names.append(pack_dir_name) page += 1 return list_packs_dir_names
44,408
def states_to_numbers(hilbert: DiscreteHilbert, σ: Array) -> Array: """ Converts the configuration σ to a 64-bit integer labelling the Hilbert Space. .. Note:: Requires jax >= 0.3.17 and will crash on older versions. Args: hilbert: The Hilbert space σ: A single or a batch of configurations Returns: a single integer or a batch of integer indices. """ if module_version("jax") < (0, 3, 17): raise RuntimeError( "The jitted conversion of bit-strings to hilbert numbers" "is only supported with jax.__version__ >= 0.3.17, but you " f"have {module_version('jax')}" ) if not hilbert.is_indexable: raise ValueError( f"Hilbert space {hilbert} is too large to be indexed or " f"cannot be indexed at all." ) # calls back into python return jax.pure_callback( hilbert.states_to_numbers, jax.ShapeDtypeStruct(σ.shape[:-1], jnp.int64), σ, vectorized=True, )
def states_to_numbers(hilbert: DiscreteHilbert, σ: Array) -> Array: """ Converts the configuration σ to a 64-bit integer labelling the Hilbert Space. .. Note:: Requires jax >= 0.3.17 and will raise an exception on older versions. Args: hilbert: The Hilbert space σ: A single or a batch of configurations Returns: a single integer or a batch of integer indices. """ if module_version("jax") < (0, 3, 17): raise RuntimeError( "The jitted conversion of bit-strings to hilbert numbers" "is only supported with jax.__version__ >= 0.3.17, but you " f"have {module_version('jax')}" ) if not hilbert.is_indexable: raise ValueError( f"Hilbert space {hilbert} is too large to be indexed or " f"cannot be indexed at all." ) # calls back into python return jax.pure_callback( hilbert.states_to_numbers, jax.ShapeDtypeStruct(σ.shape[:-1], jnp.int64), σ, vectorized=True, )
6,606
def get_or_make_bin(item_code, warehouse): bin_record = frappe.db.get_value('Bin', {'item_code': item_code, 'warehouse': warehouse}) if not bin_record: bin_obj = frappe.get_doc({ "doctype": "Bin", "item_code": item_code, "warehouse": warehouse, }) bin_obj.flags.ignore_permissions = 1 bin_obj.insert() bin_record = bin_obj.name return bin_record
def get_or_make_bin(item_code, warehouse) -> str: bin_record = frappe.db.get_value('Bin', {'item_code': item_code, 'warehouse': warehouse}) if not bin_record: bin_obj = frappe.get_doc({ "doctype": "Bin", "item_code": item_code, "warehouse": warehouse, }) bin_obj.flags.ignore_permissions = 1 bin_obj.insert() bin_record = bin_obj.name return bin_record
13,911
def _find_excluded_ranges( lines: List[Tuple[int, str]], *, warnings: _ExclusionRangeWarnings, exclude_lines_by_pattern: Optional[str] = None, exclude_branches_by_pattern: Optional[str] = None, exclude_pattern_prefix: str, ) -> Callable[[int], bool]: """ Scan through all lines to find line ranges and branch ranges covered by exclusion markers. Example: >>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'), ... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')] >>> [exclude_line, exclude_branch] = _find_excluded_ranges( ... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE', ... exclude_branches_by_pattern = '.*IGNORE_BR', exclude_pattern_prefix='PREFIX') >>> [lineno for lineno in range(30) if exclude_line(lineno)] [11, 13, 15, 16, 17] >>> [lineno for lineno in range(30) if exclude_branch(lineno)] [21, 23, 25, 26, 27] """ exclude_lines_by_pattern_regex = None if exclude_lines_by_pattern: exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern) exclude_branches_by_pattern_regex = None if exclude_branches_by_pattern: exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern) # possibly overlapping half-open ranges that are excluded exclude_line_ranges: List[Tuple[int, int]] = [] exclude_branch_ranges: List[Tuple[int, int]] = [] exclusion_stack_line = [] exclusion_stack_branch = [] for lineno, code in lines: if _EXCLUDE_FLAG in code: # process the exclusion marker # # header is a marker name like LCOV or GCOVR # # START flags are added to the exlusion stack # STOP flags remove a marker from the exclusion stack # line exclusion excl_line_pattern = re.compile( "(" + exclude_pattern_prefix + ")" + _EXCLUDE_LINE_PATTERN_POSTFIX ) for header, flag in excl_line_pattern.findall(code): if flag == "LINE": if exclusion_stack_line: warnings.line_after_start( lineno, f"{header}_EXCL_LINE", exclusion_stack_line[-1][1] ) else: exclude_line_ranges.append((lineno, lineno + 1)) if flag == "START": exclusion_stack_line.append((header, lineno)) elif flag == "STOP": if not exclusion_stack_line: warnings.stop_without_start( lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP" ) continue start_header, start_lineno = exclusion_stack_line.pop() if header != start_header: warnings.mismatched_start_stop( start_lineno, f"{start_header}_EXCL_START", lineno, f"{header}_EXCL_STOP", ) exclude_line_ranges.append((start_lineno, lineno)) else: # pragma: no cover pass # branche exclusion excl_branch_pattern = re.compile( "(" + exclude_pattern_prefix + ")" + _EXCLUDE_BRANCH_PATTERN_POSTFIX ) for header, flag in excl_branch_pattern.findall(code): if flag == "LINE": if exclusion_stack_branch: warnings.branch_after_start( lineno, f"{header}_EXCL_LINE", exclusion_stack_branch[-1][1] ) else: exclude_branch_ranges.append((lineno, lineno + 1)) if flag == "START": exclusion_stack_branch.append((header, lineno)) elif flag == "STOP": if not exclusion_stack_branch: warnings.stop_without_start( lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP" ) continue start_header, start_lineno = exclusion_stack_branch.pop() if header != start_header: warnings.mismatched_start_stop( start_lineno, f"{start_header}_EXCL_START", lineno, f"{header}_EXCL_STOP", ) exclude_branch_ranges.append((start_lineno, lineno)) else: # pragma: no cover pass if exclude_lines_by_pattern_regex: if exclude_lines_by_pattern_regex.match(code): exclude_line_ranges.append((lineno, lineno + 1)) if exclude_branches_by_pattern_regex: if exclude_branches_by_pattern_regex.match(code): exclude_branch_ranges.append((lineno, lineno + 1)) for header, lineno in exclusion_stack_line: warnings.start_without_stop( lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP" ) for header, lineno in exclusion_stack_branch: warnings.start_without_stop( lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP" ) return [ _make_is_in_any_range(exclude_line_ranges), _make_is_in_any_range(exclude_branch_ranges), ]
def _find_excluded_ranges( lines: List[Tuple[int, str]], *, warnings: _ExclusionRangeWarnings, exclude_lines_by_pattern: Optional[str] = None, exclude_branches_by_pattern: Optional[str] = None, exclude_pattern_prefix: str, ) -> Callable[[int], bool]: """ Scan through all lines to find line ranges and branch ranges covered by exclusion markers. Example: >>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'), ... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')] >>> [exclude_line, exclude_branch] = _find_excluded_ranges( ... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE', ... exclude_branches_by_pattern = '.*IGNORE_BRANCH', exclude_pattern_prefix='PREFIX') >>> [lineno for lineno in range(30) if exclude_line(lineno)] [11, 13, 15, 16, 17] >>> [lineno for lineno in range(30) if exclude_branch(lineno)] [21, 23, 25, 26, 27] """ exclude_lines_by_pattern_regex = None if exclude_lines_by_pattern: exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern) exclude_branches_by_pattern_regex = None if exclude_branches_by_pattern: exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern) # possibly overlapping half-open ranges that are excluded exclude_line_ranges: List[Tuple[int, int]] = [] exclude_branch_ranges: List[Tuple[int, int]] = [] exclusion_stack_line = [] exclusion_stack_branch = [] for lineno, code in lines: if _EXCLUDE_FLAG in code: # process the exclusion marker # # header is a marker name like LCOV or GCOVR # # START flags are added to the exlusion stack # STOP flags remove a marker from the exclusion stack # line exclusion excl_line_pattern = re.compile( "(" + exclude_pattern_prefix + ")" + _EXCLUDE_LINE_PATTERN_POSTFIX ) for header, flag in excl_line_pattern.findall(code): if flag == "LINE": if exclusion_stack_line: warnings.line_after_start( lineno, f"{header}_EXCL_LINE", exclusion_stack_line[-1][1] ) else: exclude_line_ranges.append((lineno, lineno + 1)) if flag == "START": exclusion_stack_line.append((header, lineno)) elif flag == "STOP": if not exclusion_stack_line: warnings.stop_without_start( lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP" ) continue start_header, start_lineno = exclusion_stack_line.pop() if header != start_header: warnings.mismatched_start_stop( start_lineno, f"{start_header}_EXCL_START", lineno, f"{header}_EXCL_STOP", ) exclude_line_ranges.append((start_lineno, lineno)) else: # pragma: no cover pass # branche exclusion excl_branch_pattern = re.compile( "(" + exclude_pattern_prefix + ")" + _EXCLUDE_BRANCH_PATTERN_POSTFIX ) for header, flag in excl_branch_pattern.findall(code): if flag == "LINE": if exclusion_stack_branch: warnings.branch_after_start( lineno, f"{header}_EXCL_LINE", exclusion_stack_branch[-1][1] ) else: exclude_branch_ranges.append((lineno, lineno + 1)) if flag == "START": exclusion_stack_branch.append((header, lineno)) elif flag == "STOP": if not exclusion_stack_branch: warnings.stop_without_start( lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP" ) continue start_header, start_lineno = exclusion_stack_branch.pop() if header != start_header: warnings.mismatched_start_stop( start_lineno, f"{start_header}_EXCL_START", lineno, f"{header}_EXCL_STOP", ) exclude_branch_ranges.append((start_lineno, lineno)) else: # pragma: no cover pass if exclude_lines_by_pattern_regex: if exclude_lines_by_pattern_regex.match(code): exclude_line_ranges.append((lineno, lineno + 1)) if exclude_branches_by_pattern_regex: if exclude_branches_by_pattern_regex.match(code): exclude_branch_ranges.append((lineno, lineno + 1)) for header, lineno in exclusion_stack_line: warnings.start_without_stop( lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP" ) for header, lineno in exclusion_stack_branch: warnings.start_without_stop( lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP" ) return [ _make_is_in_any_range(exclude_line_ranges), _make_is_in_any_range(exclude_branch_ranges), ]
5,578
def parse_metar(metar_text, year, month, station_metadata=station_info): """Parse a METAR report in text form into a list of named tuples. Parameters ---------- metar_text : str The METAR report station_metadata : dict Mapping of station identifiers to station metadata year : int Reported year of observation for constructing 'date_time' month : int Reported month of observation for constructing 'date_time' Returns ------- metar : namedtuple Named tuple of parsed METAR fields Notes ----- Returned data has named tuples with the following attributes: * 'station_id': Station Identifier (ex. KLOT) * 'latitude': Latitude of the observation, measured in degrees * 'longitude': Longitude of the observation, measured in degrees * 'elevation': Elevation of the observation above sea level, measured in meters * 'date_time': Date and time of the observation, datetime object * 'wind_direction': Direction the wind is coming from, measured in degrees * 'wind_speed': Wind speed, measured in knots * 'wind_gust': Wind gusts, measured in knots * 'current_wx1': Current weather (1 of 3) * 'current_wx2': Current weather (2 of 3) * 'current_wx3': Current weather (3 of 3) * 'skyc1': Sky cover (ex. FEW) * 'skylev1': Height of sky cover 1, measured in feet * 'skyc2': Sky cover (ex. OVC) * 'skylev2': Height of sky cover 2, measured in feet * 'skyc3': Sky cover (ex. FEW) * 'skylev3': Height of sky cover 3, measured in feet * 'skyc4': Sky cover (ex. CLR) * 'skylev4:': Height of sky cover 4, measured in feet * 'cloudcover': Cloud coverage measured in oktas, taken from maximum of sky cover values * 'temperature': Temperature, measured in degrees Celsius * 'dewpoint': Dewpoint, measured in degrees Celsius * 'altimeter': Altimeter value, measured in inches of mercury * 'current_wx1_symbol': Current weather symbol (1 of 3), WMO integer code from [WMO306]_ Attachment IV * 'current_wx2_symbol': Current weather symbol (2 of 3), WMO integer code from [WMO306]_ Attachment IV * 'current_wx3_symbol': Current weather symbol (3 of 3), WMO integer code from [WMO306]_ Attachment IV * 'visibility': Visibility distance, measured in meters * 'remarks': Remarks (unparsed) in the report """ from ..plots.wx_symbols import wx_code_to_numeric # Decode the data using the parser (built using Canopy) the parser utilizes a grammar # file which follows the format structure dictated by the WMO Handbook, but has the # flexibility to decode the METAR text when there are missing or incorrectly # encoded values tree = parse(metar_text) # Station ID which is used to find the latitude, longitude, and elevation station_id = tree.siteid.text.strip() # Extract the latitude and longitude values from 'master' dictionary try: info = station_metadata[station_id] lat = info.latitude lon = info.longitude elev = info.altitude except KeyError: lat = np.nan lon = np.nan elev = np.nan # Set the datetime, day, and time_utc try: day_time_utc = tree.datetime.text.strip() day = int(day_time_utc[0:2]) hour = int(day_time_utc[2:4]) minute = int(day_time_utc[4:6]) date_time = datetime(year, month, day, hour, minute) except ValueError: date_time = np.nan # Set the wind values wind_units = 'kts' try: # If there are missing wind values, set wind speed and wind direction to nan if ('/' in tree.wind.text) or (tree.wind.text == 'KT') or (tree.wind.text == ''): wind_dir = np.nan wind_spd = np.nan # If the wind direction is variable, set wind direction to nan but keep the wind speed else: wind_spd = float(tree.wind.wind_spd.text) if 'MPS' in tree.wind.text: wind_units = 'm/s' wind_spd = units.Quantity(wind_spd, wind_units).m_as('knots') if (tree.wind.wind_dir.text == 'VRB') or (tree.wind.wind_dir.text == 'VAR'): wind_dir = np.nan else: wind_dir = int(tree.wind.wind_dir.text) # If there are any errors, return nan except ValueError: wind_dir = np.nan wind_spd = np.nan # Parse out the wind gust field if 'G' in tree.wind.text: wind_gust = units.Quantity(float(tree.wind.gust.text.strip()[1:]), wind_units).m_as('knots') else: wind_gust = np.nan # Handle visibility try: if tree.vis.text.endswith('SM'): visibility = 0 # Strip off the SM and any whitespace around the value and any leading 'M' vis_str = tree.vis.text[:-2].strip().lstrip('M') # Case of e.g. 1 1/4SM if ' ' in vis_str: whole, vis_str = vis_str.split(maxsplit=1) visibility += int(whole) # Handle fraction regardless if '/' in vis_str: num, denom = vis_str.split('/', maxsplit=1) visibility += int(num) / int(denom) else: # Should be getting all cases of whole number without fraction visibility += int(vis_str) visibility = units.Quantity(visibility, 'miles').m_as('meter') # CAVOK means vis is "at least 10km" and no significant clouds or weather elif 'CAVOK' in tree.vis.text: visibility = 10000 elif not tree.vis.text or tree.vis.text.strip() == '////': visibility = np.nan else: # Only worry about the first 4 characters (digits) and ignore possible 'NDV' visibility = int(tree.vis.text.strip()[:4]) # If there are any errors, return nan except ValueError: visibility = np.nan # Set the weather symbols # If the weather symbol is missing, set values to nan current_wx = [] current_wx_symbol = [] if tree.curwx.text.strip() not in ('', '//', 'NSW'): current_wx = tree.curwx.text.strip().split() # Handle having e.g. '+' and 'TSRA' parsed into separate items if current_wx[0] in ('-', '+') and current_wx[1]: current_wx[0] += current_wx[1] current_wx.pop(1) current_wx_symbol = wx_code_to_numeric(current_wx).tolist() while len(current_wx) < 3: current_wx.append(np.nan) while len(current_wx_symbol) < 3: current_wx_symbol.append(0) # Set the sky conditions skyc = [np.nan] * 4 skylev = [np.nan] * 4 if tree.skyc.text[1:3] == 'VV': skyc[0] = 'VV' level = tree.skyc.text.strip()[2:5] skylev[0] = np.nan if '/' in level else 100 * int(level) else: for ind, part in enumerate(tree.skyc.text.strip().split(maxsplit=3)): cover = part[:3] level = part[3:6] # Strips off any ending text like in FEW017CB if '/' not in cover: skyc[ind] = cover if level and '/' not in level: with contextlib.suppress(ValueError): skylev[ind] = float(level) * 100 # Set the cloud cover variable (measured in oktas) if 'OVC' in tree.skyc.text or 'VV' in tree.skyc.text: cloudcover = 8 elif 'BKN' in tree.skyc.text: cloudcover = 6 elif 'SCT' in tree.skyc.text: cloudcover = 4 elif 'FEW' in tree.skyc.text: cloudcover = 2 elif ('SKC' in tree.skyc.text or 'NCD' in tree.skyc.text or 'NSC' in tree.skyc.text or 'CLR' in tree.skyc.text or 'CAVOK' in tree.vis.text): cloudcover = 0 else: cloudcover = 10 # Set the temperature and dewpoint temp = np.nan dewp = np.nan if tree.temp_dewp.text and tree.temp_dewp.text != ' MM/MM': with contextlib.suppress(ValueError): temp = float(tree.temp_dewp.temp.text[-2:]) if 'M' in tree.temp_dewp.temp.text: temp *= -1 with contextlib.suppress(ValueError): dewp = float(tree.temp_dewp.dewp.text[-2:]) if 'M' in tree.temp_dewp.dewp.text: dewp *= -1 # Set the altimeter value and sea level pressure if tree.altim.text: val = float(tree.altim.text.strip()[1:5]) altim = val / 100 if val > 1100 else units.Quantity(val, 'hPa').m_as('inHg') else: altim = np.nan # Strip off extraneous stuff off the remarks section remarks = tree.remarks.text.lstrip().rstrip('= ') if remarks.startswith('RMK'): remarks = remarks[3:].strip() # Returns a named tuple with all the relevant variables return Metar(station_id, lat, lon, elev, date_time, wind_dir, wind_spd, wind_gust, visibility, current_wx[0], current_wx[1], current_wx[2], skyc[0], skylev[0], skyc[1], skylev[1], skyc[2], skylev[2], skyc[3], skylev[3], cloudcover, temp, dewp, altim, current_wx_symbol[0], current_wx_symbol[1], current_wx_symbol[2], remarks)
def parse_metar(metar_text, year, month, station_metadata=station_info): """Parse a METAR report in text form into a list of named tuples. Parameters ---------- metar_text : str The METAR report station_metadata : dict Mapping of station identifiers to station metadata year : int Reported year of observation for constructing 'date_time' month : int Reported month of observation for constructing 'date_time' Returns ------- metar : namedtuple Named tuple of parsed METAR fields Notes ----- Returned data has named tuples with the following attributes: * 'station_id': Station Identifier (ex. KLOT) * 'latitude': Latitude of the observation, measured in degrees * 'longitude': Longitude of the observation, measured in degrees * 'elevation': Elevation of the observation above sea level, measured in meters * 'date_time': Date and time of the observation, datetime object * 'wind_direction': Direction the wind is coming from, measured in degrees * 'wind_speed': Wind speed, measured in knots * 'wind_gust': Wind gust, measured in knots * 'current_wx1': Current weather (1 of 3) * 'current_wx2': Current weather (2 of 3) * 'current_wx3': Current weather (3 of 3) * 'skyc1': Sky cover (ex. FEW) * 'skylev1': Height of sky cover 1, measured in feet * 'skyc2': Sky cover (ex. OVC) * 'skylev2': Height of sky cover 2, measured in feet * 'skyc3': Sky cover (ex. FEW) * 'skylev3': Height of sky cover 3, measured in feet * 'skyc4': Sky cover (ex. CLR) * 'skylev4:': Height of sky cover 4, measured in feet * 'cloudcover': Cloud coverage measured in oktas, taken from maximum of sky cover values * 'temperature': Temperature, measured in degrees Celsius * 'dewpoint': Dewpoint, measured in degrees Celsius * 'altimeter': Altimeter value, measured in inches of mercury * 'current_wx1_symbol': Current weather symbol (1 of 3), WMO integer code from [WMO306]_ Attachment IV * 'current_wx2_symbol': Current weather symbol (2 of 3), WMO integer code from [WMO306]_ Attachment IV * 'current_wx3_symbol': Current weather symbol (3 of 3), WMO integer code from [WMO306]_ Attachment IV * 'visibility': Visibility distance, measured in meters * 'remarks': Remarks (unparsed) in the report """ from ..plots.wx_symbols import wx_code_to_numeric # Decode the data using the parser (built using Canopy) the parser utilizes a grammar # file which follows the format structure dictated by the WMO Handbook, but has the # flexibility to decode the METAR text when there are missing or incorrectly # encoded values tree = parse(metar_text) # Station ID which is used to find the latitude, longitude, and elevation station_id = tree.siteid.text.strip() # Extract the latitude and longitude values from 'master' dictionary try: info = station_metadata[station_id] lat = info.latitude lon = info.longitude elev = info.altitude except KeyError: lat = np.nan lon = np.nan elev = np.nan # Set the datetime, day, and time_utc try: day_time_utc = tree.datetime.text.strip() day = int(day_time_utc[0:2]) hour = int(day_time_utc[2:4]) minute = int(day_time_utc[4:6]) date_time = datetime(year, month, day, hour, minute) except ValueError: date_time = np.nan # Set the wind values wind_units = 'kts' try: # If there are missing wind values, set wind speed and wind direction to nan if ('/' in tree.wind.text) or (tree.wind.text == 'KT') or (tree.wind.text == ''): wind_dir = np.nan wind_spd = np.nan # If the wind direction is variable, set wind direction to nan but keep the wind speed else: wind_spd = float(tree.wind.wind_spd.text) if 'MPS' in tree.wind.text: wind_units = 'm/s' wind_spd = units.Quantity(wind_spd, wind_units).m_as('knots') if (tree.wind.wind_dir.text == 'VRB') or (tree.wind.wind_dir.text == 'VAR'): wind_dir = np.nan else: wind_dir = int(tree.wind.wind_dir.text) # If there are any errors, return nan except ValueError: wind_dir = np.nan wind_spd = np.nan # Parse out the wind gust field if 'G' in tree.wind.text: wind_gust = units.Quantity(float(tree.wind.gust.text.strip()[1:]), wind_units).m_as('knots') else: wind_gust = np.nan # Handle visibility try: if tree.vis.text.endswith('SM'): visibility = 0 # Strip off the SM and any whitespace around the value and any leading 'M' vis_str = tree.vis.text[:-2].strip().lstrip('M') # Case of e.g. 1 1/4SM if ' ' in vis_str: whole, vis_str = vis_str.split(maxsplit=1) visibility += int(whole) # Handle fraction regardless if '/' in vis_str: num, denom = vis_str.split('/', maxsplit=1) visibility += int(num) / int(denom) else: # Should be getting all cases of whole number without fraction visibility += int(vis_str) visibility = units.Quantity(visibility, 'miles').m_as('meter') # CAVOK means vis is "at least 10km" and no significant clouds or weather elif 'CAVOK' in tree.vis.text: visibility = 10000 elif not tree.vis.text or tree.vis.text.strip() == '////': visibility = np.nan else: # Only worry about the first 4 characters (digits) and ignore possible 'NDV' visibility = int(tree.vis.text.strip()[:4]) # If there are any errors, return nan except ValueError: visibility = np.nan # Set the weather symbols # If the weather symbol is missing, set values to nan current_wx = [] current_wx_symbol = [] if tree.curwx.text.strip() not in ('', '//', 'NSW'): current_wx = tree.curwx.text.strip().split() # Handle having e.g. '+' and 'TSRA' parsed into separate items if current_wx[0] in ('-', '+') and current_wx[1]: current_wx[0] += current_wx[1] current_wx.pop(1) current_wx_symbol = wx_code_to_numeric(current_wx).tolist() while len(current_wx) < 3: current_wx.append(np.nan) while len(current_wx_symbol) < 3: current_wx_symbol.append(0) # Set the sky conditions skyc = [np.nan] * 4 skylev = [np.nan] * 4 if tree.skyc.text[1:3] == 'VV': skyc[0] = 'VV' level = tree.skyc.text.strip()[2:5] skylev[0] = np.nan if '/' in level else 100 * int(level) else: for ind, part in enumerate(tree.skyc.text.strip().split(maxsplit=3)): cover = part[:3] level = part[3:6] # Strips off any ending text like in FEW017CB if '/' not in cover: skyc[ind] = cover if level and '/' not in level: with contextlib.suppress(ValueError): skylev[ind] = float(level) * 100 # Set the cloud cover variable (measured in oktas) if 'OVC' in tree.skyc.text or 'VV' in tree.skyc.text: cloudcover = 8 elif 'BKN' in tree.skyc.text: cloudcover = 6 elif 'SCT' in tree.skyc.text: cloudcover = 4 elif 'FEW' in tree.skyc.text: cloudcover = 2 elif ('SKC' in tree.skyc.text or 'NCD' in tree.skyc.text or 'NSC' in tree.skyc.text or 'CLR' in tree.skyc.text or 'CAVOK' in tree.vis.text): cloudcover = 0 else: cloudcover = 10 # Set the temperature and dewpoint temp = np.nan dewp = np.nan if tree.temp_dewp.text and tree.temp_dewp.text != ' MM/MM': with contextlib.suppress(ValueError): temp = float(tree.temp_dewp.temp.text[-2:]) if 'M' in tree.temp_dewp.temp.text: temp *= -1 with contextlib.suppress(ValueError): dewp = float(tree.temp_dewp.dewp.text[-2:]) if 'M' in tree.temp_dewp.dewp.text: dewp *= -1 # Set the altimeter value and sea level pressure if tree.altim.text: val = float(tree.altim.text.strip()[1:5]) altim = val / 100 if val > 1100 else units.Quantity(val, 'hPa').m_as('inHg') else: altim = np.nan # Strip off extraneous stuff off the remarks section remarks = tree.remarks.text.lstrip().rstrip('= ') if remarks.startswith('RMK'): remarks = remarks[3:].strip() # Returns a named tuple with all the relevant variables return Metar(station_id, lat, lon, elev, date_time, wind_dir, wind_spd, wind_gust, visibility, current_wx[0], current_wx[1], current_wx[2], skyc[0], skylev[0], skyc[1], skylev[1], skyc[2], skylev[2], skyc[3], skylev[3], cloudcover, temp, dewp, altim, current_wx_symbol[0], current_wx_symbol[1], current_wx_symbol[2], remarks)
42,005
def _run_iteration( zmap: Dict[complex, Union[int, float]], coordinates: List[complex], overshoot: float = 0.0 ) -> Tuple[Dict[complex, Union[int, float]], float]: max_fractional_delta = 0.0 for coord in coordinates: current_val = zmap.get(coord, None) max_neighbor = -np.inf min_neighbor = np.inf sum_neighbors = 0 n_neighbors = 0 for offset in NEIGHBOR_OFFSETS: neighbor = zmap.get(coord + offset, None) if neighbor is None: # off the edge or not filled in continue sum_neighbors += neighbor # type: ignore n_neighbors += 1 if current_val is not None: max_neighbor = max(max_neighbor, neighbor) min_neighbor = min(min_neighbor, neighbor) # fill value is just mean of its neighbors new_val = sum_neighbors / n_neighbors if current_val is None: zmap[coord] = new_val max_fractional_delta = 1.0 else: zmap[coord] = (1 + overshoot) * new_val - overshoot * current_val if max_neighbor > min_neighbor: fractional_delta = abs(new_val - current_val) / (max_neighbor - min_neighbor) max_fractional_delta = max(overshoot, fractional_delta) return zmap, max_fractional_delta
def _run_iteration( zmap: Dict[complex, Union[int, float]], coordinates: List[complex], overshoot: float = 0.0 ) -> Tuple[Dict[complex, Union[int, float]], float]: max_fractional_delta = 0.0 for coord in coordinates: current_val = zmap.get(coord, None) max_neighbor = -np.inf min_neighbor = np.inf sum_neighbors = 0 n_neighbors = 0 for offset in NEIGHBOR_OFFSETS: neighbor = zmap.get(coord + offset, None) if neighbor is None: # off the edge or not filled in continue sum_neighbors += neighbor # type: ignore n_neighbors += 1 if current_val is not None: max_neighbor = max(max_neighbor, neighbor) min_neighbor = min(min_neighbor, neighbor) # fill value is just mean of its neighbors new_val = sum_neighbors / n_neighbors if current_val is None: zmap[coord] = new_val max_fractional_delta = 1.0 else: zmap[coord] = (1 + overshoot) * new_val - overshoot * current_val if max_neighbor > min_neighbor: fractional_delta = abs(new_val - current_val) / (max_neighbor - min_neighbor) max_fractional_delta = max(overshoot, fractional_delta) return max_fractional_delta
38,902
def field_singleton_schema( # noqa: C901 (ignore complexity) field: Field, *, by_alias: bool, model_name_map: Dict[Type['BaseModel'], str], schema_overrides: bool = False, ref_prefix: Optional[str] = None, known_models: Set[Type['BaseModel']], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ This function is indirectly used by ``field_schema()``, you should probably be using that function. Take a single Pydantic ``Field``, and return its schema and any additional definitions from sub-models. """ ref_prefix = ref_prefix or default_prefix definitions: Dict[str, Any] = {} if field.sub_fields: return field_singleton_sub_fields_schema( field.sub_fields, by_alias=by_alias, model_name_map=model_name_map, schema_overrides=schema_overrides, ref_prefix=ref_prefix, known_models=known_models, ) if field.type_ is Any: return {}, definitions # no restrictions if is_callable_type(field.type_): raise SkipField(f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.') f_schema: Dict[str, Any] = {} if field.schema is not None and field.schema.const: f_schema['const'] = field.default if issubclass(field.type_, Enum): f_schema.update({'enum': [item.value for item in field.type_]}) # Don't return immediately, to allow adding specific types for field_name, schema_name in validation_attribute_to_schema_keyword.items(): field_value = getattr(field.type_, field_name, None) if field_value is not None: if field_name == 'regex': field_value = field_value.pattern f_schema[schema_name] = field_value for type_, t_schema in field_class_to_schema_enum_enabled: if issubclass(field.type_, type_): f_schema.update(t_schema) break # Return schema, with or without enum definitions if f_schema: return f_schema, definitions for type_, t_schema in field_class_to_schema_enum_disabled: if issubclass(field.type_, type_): return t_schema, definitions # Handle dataclass-based models field_type = field.type_ if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), pydantic.BaseModel): field_type = cast(Type['dataclasses.DataclassType'], field_type) field_type = field_type.__pydantic_model__ if issubclass(field_type, pydantic.BaseModel): model_name = model_name_map[field_type] if field_type not in known_models: sub_schema, sub_definitions = model_process_schema( field_type, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, known_models=known_models, ) definitions.update(sub_definitions) definitions[model_name] = sub_schema else: definitions[model_name] = None schema_ref = {'$ref': f'{ref_prefix}{model_name}'} if not schema_overrides: return schema_ref, definitions else: return {'allOf': [schema_ref]}, definitions raise ValueError(f'Value not declarable with JSON Schema, field: {field}')
def field_singleton_schema( # noqa: C901 (ignore complexity) field: Field, *, by_alias: bool, model_name_map: Dict[Type['BaseModel'], str], schema_overrides: bool = False, ref_prefix: Optional[str] = None, known_models: Set[Type['BaseModel']], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ This function is indirectly used by ``field_schema()``, you should probably be using that function. Take a single Pydantic ``Field``, and return its schema and any additional definitions from sub-models. """ ref_prefix = ref_prefix or default_prefix definitions: Dict[str, Any] = {} if field.sub_fields: return field_singleton_sub_fields_schema( field.sub_fields, by_alias=by_alias, model_name_map=model_name_map, schema_overrides=schema_overrides, ref_prefix=ref_prefix, known_models=known_models, ) if field.type_ is Any: return {}, definitions # no restrictions if is_callable_type(field.type_): raise SkipField(f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.') f_schema: Dict[str, Any] = {} if field.schema is not None and field.schema.const: f_schema['const'] = field.default if issubclass(field.type_, Enum): f_schema.update({'enum': [item.value for item in field.type_]}) # Don't return immediately, to allow adding specific types for field_name, schema_name in validation_attribute_to_schema_keyword.items(): field_value = getattr(field.type_, field_name, None) if field_value is not None: if field_name == 'regex': field_value = field_value.pattern f_schema[schema_name] = field_value for type_, t_schema in field_class_to_schema_enum_enabled: if issubclass(field.type_, type_): f_schema.update(t_schema) break # Return schema, with or without enum definitions if f_schema: return f_schema, definitions for type_, t_schema in field_class_to_schema_enum_disabled: if issubclass(field.type_, type_): return t_schema, definitions # Handle dataclass-based models field_type = field.type_ if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), pydantic.BaseModel): field_type = cast(Type['dataclasses.DataclassType'], field_type) field_type = field_type.__pydantic_model__ if issubclass(field_type, pydantic.BaseModel): model_name = model_name_map[field_type] if field_type not in known_models: sub_schema, sub_definitions = model_process_schema( field_type, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, known_models=known_models, ) definitions.update(sub_definitions) definitions[model_name] = sub_schema else: definitions[model_name] = None schema_ref = {'$ref': ref_prefix + model_name} if not schema_overrides: return schema_ref, definitions else: return {'allOf': [schema_ref]}, definitions raise ValueError(f'Value not declarable with JSON Schema, field: {field}')
44,177
def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs): """Computes the ExpvalCost and catches the initial deprecation warning.""" with pytest.warns(UserWarning, match="will be deprecated,"): res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs) return res
def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs): """Computes the ExpvalCost and catches the initial deprecation warning.""" with pytest.warns(UserWarning, match="is deprecated,"): res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs) return res
4,560
def clean(signals, sessions=None, detrend=True, standardize='zscore', confounds=None, standardize_confounds=True, filter="butterworth", low_pass=None, high_pass=None, t_r=2.5, ensure_finite=False): """Improve SNR on masked fMRI signals. This function can do several things on the input signals, in the following order: - detrend - low- and high-pass filter - remove confounds - standardize Low-pass filtering improves specificity. High-pass filtering should be kept small, to keep some sensitivity. Filtering is only meaningful on evenly-sampled signals. According to Lindquist et al. (2018), removal of confounds will be done orthogonally to temporal filters (low- and/or high-pass filters), if both are specified. Parameters ---------- signals: numpy.ndarray Timeseries. Must have shape (instant number, features number). This array is not modified. sessions : numpy array, optional Add a session level to the cleaning process. Each session will be cleaned independently. Must be a 1D array of n_samples elements. confounds: numpy.ndarray, str, DataFrame or list of Confounds timeseries. Shape must be (instant number, confound number), or just (instant number,) The number of time instants in signals and confounds must be identical (i.e. signals.shape[0] == confounds.shape[0]). If a string is provided, it is assumed to be the name of a csv file containing signals as columns, with an optional one-line header. If a list is provided, all confounds are removed from the input signal, as if all were in the same array. t_r: float Repetition time, in second (sampling period). Set to None if not. filter: {'butterworth', False} Filtering methods. 'butterworth': perform butterworth filtering. False : Do not perform filtering. low_pass, high_pass: float Respectively high and low cutoff frequencies, in Hertz. detrend: bool If detrending should be applied on timeseries (before confound removal) standardize: {'zscore', 'psc', False}, default is 'zscore' Strategy to standardize the signal. 'zscore': the signal is z-scored. Timeseries are shifted to zero mean and scaled to unit variance. 'psc': Timeseries are shifted to zero mean value and scaled to percent signal change (as compared to original mean signal). False : Do not standardize the data. standardize_confounds: boolean, optional, default is True If standardize_confounds is True, the confounds are z-scored: their mean is put to 0 and their variance to 1 in the time dimension. ensure_finite: bool If True, the non-finite values (NANs and infs) found in the data will be replaced by zeros. Returns ------- cleaned_signals: numpy.ndarray Input signals, cleaned. Same shape as `signals`. Notes ----- Confounds removal is based on a projection on the orthogonal of the signal space. See `Friston, K. J., A. P. Holmes, K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak. "Statistical Parametric Maps in Functional Imaging: A General Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210. <http://dx.doi.org/10.1002/hbm.460020402>`_ Orthogonalization between temporal filters and confound removal is based on suggestions in `Lindquist, M., Geuter, S., Wager, T., & Caffo, B. (2018). Modular preprocessing pipelines can reintroduce artifacts into fMRI data. bioRxiv, 407676. <http://dx.doi.org/10.1101/407676>`_ See Also -------- nilearn.image.clean_img """ # Read confounds and signals signals, confounds = _sanitize_inputs(signals, confounds, ensure_finite) # check if filter paramters are satidfied _ = _check_filter_parameters(filter, low_pass, high_pass, t_r) # Restrict the signal to the orthogonal of the confounds if sessions is not None: signals = _process_session(signals, sessions, detrend, standardize, confounds, low_pass, high_pass, t_r) # Detrend # Detrend and filtering should apply to confounds, if confound presents # keep filters orthogonal (according to Lindquist et al. (2018)) if detrend: mean_signals = signals.mean(axis=0) signals = _standardize(signals, standardize=False, detrend=detrend) if confounds is not None: confounds = _standardize(confounds, standardize=False, detrend=detrend) # Apply low- and high-pass filters if filter == "butterworth" and t_r is not None: # this change enticipate extra fltering methods signals = butterworth(signals, sampling_rate=1. / t_r, low_pass=low_pass, high_pass=high_pass) if confounds is not None: # Apply low- and high-pass filters to keep filters orthogonal # (according to Lindquist et al. (2018)) confounds = butterworth(confounds, sampling_rate=1. / t_r, low_pass=low_pass, high_pass=high_pass) # if filter == "cosine": # ... # Remove confounds if confounds is not None: confounds = _standardize(confounds, standardize=standardize_confounds, detrend=False) if not standardize_confounds: # Improve numerical stability by controlling the range of # confounds. We don't rely on _standardize as it removes any # constant contribution to confounds. confound_max = np.max(np.abs(confounds), axis=0) confound_max[confound_max == 0] = 1 confounds /= confound_max # Pivoting in qr decomposition was added in scipy 0.10 Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True) Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float64).eps * 100.] signals -= Q.dot(Q.T).dot(signals) # Standardize if detrend and (standardize == 'psc'): # If the signal is detrended, we have to know the original mean # signal to calculate the psc. signals = _standardize(signals + mean_signals, standardize=standardize, detrend=False) else: signals = _standardize(signals, standardize=standardize, detrend=False) return signals
def clean(signals, sessions=None, detrend=True, standardize='zscore', confounds=None, standardize_confounds=True, filter='butterworth', low_pass=None, high_pass=None, t_r=2.5, ensure_finite=False): """Improve SNR on masked fMRI signals. This function can do several things on the input signals, in the following order: - detrend - low- and high-pass filter - remove confounds - standardize Low-pass filtering improves specificity. High-pass filtering should be kept small, to keep some sensitivity. Filtering is only meaningful on evenly-sampled signals. According to Lindquist et al. (2018), removal of confounds will be done orthogonally to temporal filters (low- and/or high-pass filters), if both are specified. Parameters ---------- signals: numpy.ndarray Timeseries. Must have shape (instant number, features number). This array is not modified. sessions : numpy array, optional Add a session level to the cleaning process. Each session will be cleaned independently. Must be a 1D array of n_samples elements. confounds: numpy.ndarray, str, DataFrame or list of Confounds timeseries. Shape must be (instant number, confound number), or just (instant number,) The number of time instants in signals and confounds must be identical (i.e. signals.shape[0] == confounds.shape[0]). If a string is provided, it is assumed to be the name of a csv file containing signals as columns, with an optional one-line header. If a list is provided, all confounds are removed from the input signal, as if all were in the same array. t_r: float Repetition time, in second (sampling period). Set to None if not. filter: {'butterworth', False} Filtering methods. 'butterworth': perform butterworth filtering. False : Do not perform filtering. low_pass, high_pass: float Respectively high and low cutoff frequencies, in Hertz. detrend: bool If detrending should be applied on timeseries (before confound removal) standardize: {'zscore', 'psc', False}, default is 'zscore' Strategy to standardize the signal. 'zscore': the signal is z-scored. Timeseries are shifted to zero mean and scaled to unit variance. 'psc': Timeseries are shifted to zero mean value and scaled to percent signal change (as compared to original mean signal). False : Do not standardize the data. standardize_confounds: boolean, optional, default is True If standardize_confounds is True, the confounds are z-scored: their mean is put to 0 and their variance to 1 in the time dimension. ensure_finite: bool If True, the non-finite values (NANs and infs) found in the data will be replaced by zeros. Returns ------- cleaned_signals: numpy.ndarray Input signals, cleaned. Same shape as `signals`. Notes ----- Confounds removal is based on a projection on the orthogonal of the signal space. See `Friston, K. J., A. P. Holmes, K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak. "Statistical Parametric Maps in Functional Imaging: A General Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210. <http://dx.doi.org/10.1002/hbm.460020402>`_ Orthogonalization between temporal filters and confound removal is based on suggestions in `Lindquist, M., Geuter, S., Wager, T., & Caffo, B. (2018). Modular preprocessing pipelines can reintroduce artifacts into fMRI data. bioRxiv, 407676. <http://dx.doi.org/10.1101/407676>`_ See Also -------- nilearn.image.clean_img """ # Read confounds and signals signals, confounds = _sanitize_inputs(signals, confounds, ensure_finite) # check if filter paramters are satidfied _ = _check_filter_parameters(filter, low_pass, high_pass, t_r) # Restrict the signal to the orthogonal of the confounds if sessions is not None: signals = _process_session(signals, sessions, detrend, standardize, confounds, low_pass, high_pass, t_r) # Detrend # Detrend and filtering should apply to confounds, if confound presents # keep filters orthogonal (according to Lindquist et al. (2018)) if detrend: mean_signals = signals.mean(axis=0) signals = _standardize(signals, standardize=False, detrend=detrend) if confounds is not None: confounds = _standardize(confounds, standardize=False, detrend=detrend) # Apply low- and high-pass filters if filter == "butterworth" and t_r is not None: # this change enticipate extra fltering methods signals = butterworth(signals, sampling_rate=1. / t_r, low_pass=low_pass, high_pass=high_pass) if confounds is not None: # Apply low- and high-pass filters to keep filters orthogonal # (according to Lindquist et al. (2018)) confounds = butterworth(confounds, sampling_rate=1. / t_r, low_pass=low_pass, high_pass=high_pass) # if filter == "cosine": # ... # Remove confounds if confounds is not None: confounds = _standardize(confounds, standardize=standardize_confounds, detrend=False) if not standardize_confounds: # Improve numerical stability by controlling the range of # confounds. We don't rely on _standardize as it removes any # constant contribution to confounds. confound_max = np.max(np.abs(confounds), axis=0) confound_max[confound_max == 0] = 1 confounds /= confound_max # Pivoting in qr decomposition was added in scipy 0.10 Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True) Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float64).eps * 100.] signals -= Q.dot(Q.T).dot(signals) # Standardize if detrend and (standardize == 'psc'): # If the signal is detrended, we have to know the original mean # signal to calculate the psc. signals = _standardize(signals + mean_signals, standardize=standardize, detrend=False) else: signals = _standardize(signals, standardize=standardize, detrend=False) return signals
14,255
def get_sim_steps( time: Union[Real, Decimal], units: str = "step", round_mode: str = "error" ) -> int: """Calculates the number of simulation time steps for a given amount of *time*. Args: time: The value to convert to simulation time steps. units: String specifying the units of the result (one of ``'step'``, ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``). ``'step'`` means time is already in simulation time steps. round_mode: String specifying how to handle time values that sit between time steps (one of ``'error'``, ``'round'``, ``'ceil'``, ``'floor'``). Returns: The number of simulation time steps. When *round_mode* is ``"error"``, a :exc:`ValueError` is thrown if the value cannot be accurately represented in terms of simulator time steps. When *round_mode* is ``"round"``, ``"ceil"``, or ``"floor"``, the corresponding rounding function from the standard library will be used to round to a simulator time step. .. versionchanged:: 1.5 Support ``'step'`` as the the *units* argument to mean "simulator time step". .. versionchanged:: 1.6 Support rounding modes. """ if units not in (None, "step"): result = _ldexp10(time, _get_log_time_scale(units) - _get_simulator_precision()) else: result = time if units is None: warnings.warn( 'Using units=None is deprecated, use units="step" instead.', DeprecationWarning, stacklevel=2) units="step" # don't propagate deprecated value if round_mode == "error": result_rounded = math.floor(result) if result_rounded != result: precision = _get_simulator_precision() raise ValueError( f"Unable to accurately represent {time}({units}) with the simulator precision of 1e{precision}" ) elif round_mode == "ceil": result_rounded = math.ceil(result) elif round_mode == "round": result_rounded = round(result) elif round_mode == "floor": result_rounded = math.floor(result) else: raise ValueError(f"invalid round_mode specifier: {round_mode}") return result_rounded
def get_sim_steps( time: Union[Real, Decimal], units: str = "step", round_mode: str = "error" ) -> int: """Calculates the number of simulation time steps for a given amount of *time*. Args: time: The value to convert to simulation time steps. units: String specifying the units of the result (one of ``'step'``, ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``). ``'step'`` means time is already in simulation time steps. round_mode: String specifying how to handle time values that sit between time steps (one of ``'error'``, ``'round'``, ``'ceil'``, ``'floor'``). Returns: The number of simulation time steps. When *round_mode* is ``"error"``, a :exc:`ValueError` is thrown if the value cannot be accurately represented in terms of simulator time steps. When *round_mode* is ``"round"``, ``"ceil"``, or ``"floor"``, the corresponding rounding function from the standard library will be used to round to a simulator time step. .. versionchanged:: 1.5 Support ``'step'`` as the the *units* argument to mean "simulator time step". .. versionchanged:: 1.6 Support rounding modes. """ if units not in (None, "step"): result = _ldexp10(time, _get_log_time_scale(units) - _get_simulator_precision()) else: result = time if units is None: warnings.warn( 'Using units=None is deprecated, use units="step" instead.', DeprecationWarning, stacklevel=2) units="step" # don't propagate deprecated value if round_mode == "error": result_rounded = math.floor(result) if result_rounded != result: precision = _get_simulator_precision() raise ValueError( f"Unable to accurately represent {time}({units}) with the simulator precision of 1e{precision}" ) elif round_mode == "ceil": result_rounded = math.ceil(result) elif round_mode == "round": result_rounded = round(result) elif round_mode == "floor": result_rounded = math.floor(result) else: raise ValueError(f"Invalid round_mode specifier: {round_mode}") return result_rounded
14,125
def _continuous_to_discrete_coords(total_bounds, bounds, p): """ Calculates mid points & ranges of geoms and returns as discrete coords Parameters ---------- total_bounds : Total bounds of geometries - array bounds : Bounds of each geometry - array p : The number of iterations used in constructing the Hilbert curve Returns --------- Discrete two-dimensional numpy array Two-dimensional array Array of hilbert distances for each geom """ # Hilbert Side len side_length = 2 ** p # Calculate x and y range of total bound coords - returns array xmin, ymin, xmax, ymax = total_bounds # Calculate mid points for x and y bound coords - returns array x_mids = (bounds[:, 0] + bounds[:, 2]) / 2.0 y_mids = (bounds[:, 1] + bounds[:, 3]) / 2.0 # Transform continuous int to discrete int for each dimension x_int = _continuous_to_discrete(x_mids, (xmin, xmax), side_length) y_int = _continuous_to_discrete(y_mids, (ymin, ymax), side_length) return x_int, y_int
def _continuous_to_discrete_coords(total_bounds, bounds, p): """ Calculates mid points & ranges of geoms and returns as discrete coords Parameters ---------- total_bounds : Total bounds of geometries - array bounds : Bounds of each geometry - array p : The number of iterations used in constructing the Hilbert curve Returns --------- Discrete two-dimensional numpy array Two-dimensional array Array of hilbert distances for each geom """ # Hilbert Side len side_length = 2 ** p # Calculate x and y range of total bound coords - returns array xmin, ymin, xmax, ymax = total_bounds # Calculate mid points for x and y bound coords - returns array x_mids = (bounds[:, 0] + bounds[:, 2]) / 2.0 y_mids = (bounds[:, 1] + bounds[:, 3]) / 2.0 # Transform continuous value to discrete integer for each dimension x_int = _continuous_to_discrete(x_mids, (xmin, xmax), side_length) y_int = _continuous_to_discrete(y_mids, (ymin, ymax), side_length) return x_int, y_int
6,585
def execute(): click.secho( "E-Invoicing Integration is moved to a separate app and will be removed from ERPNext in version-14.\n" "Please install the app to continue using the integration: https://github.com/frappe/erpnext_gst_compliance", fg="yellow", )
def execute(): click.secho( "Indian E-Invoicing integration is moved to a separate app and will be removed from ERPNext in version-14.\n" "Please install the app to continue using the integration: https://github.com/frappe/erpnext_gst_compliance", fg="yellow", )
20,458
def merge_stock_location_path_stock_rule(env): openupgrade.logged_query( env.cr, """ INSERT INTO stock_rule (name, active, action, sequence, company_id, location_id, location_src_id, route_id, procure_method, route_sequence, picking_type_id, delay, propagate, warehouse_id, auto, create_uid, create_date, write_uid, write_date, %s) SELECT name, active, 'push' AS action, sequence, company_id, location_dest_id, location_from_id, route_id, 'make_to_stock' AS procure_method, route_sequence, picking_type_id, delay, propagate, warehouse_id, auto, create_uid, create_date, write_uid, write_date, id FROM stock_location_path """, (AsIs(openupgrade.get_legacy_name('loc_path_id')), ), ) openupgrade.logged_query( env.cr, """ UPDATE ir_model_data imd SET model = 'stock.rule', res_id = sr.id FROM stock_rule sr WHERE imd.res_id = sr.%s AND model = 'stock.location.path' """, (AsIs(openupgrade.get_legacy_name('loc_path_id')), ), ) env.cr.execute( """ SELECT DISTINCT sm.rule_id, sr.id FROM stock_move sm INNER JOIN stock_rule sr ON sm.%s = sr.%s WHERE sr.%s IS NOT NULL AND sm.rule_id IS NOT NULL """, ( AsIs(openupgrade.get_legacy_name('push_rule_id')), AsIs(openupgrade.get_legacy_name('loc_path_id')), AsIs(openupgrade.get_legacy_name('loc_path_id')), ), ) rules_to_merge = env.cr.fetchall() openupgrade.logged_query( env.cr, """ UPDATE stock_move sm SET rule_id = sr.id FROM stock_rule sr WHERE sm.%s = sr.%s AND sr.%s IS NOT NULL AND sm.rule_id IS NULL """, ( AsIs(openupgrade.get_legacy_name('push_rule_id')), AsIs(openupgrade.get_legacy_name('loc_path_id')), AsIs(openupgrade.get_legacy_name('loc_path_id')), ), ) for row in rules_to_merge: openupgrade_merge_records.merge_records( env, 'stock.rule', [row[1]], row[0], ) pull_push_rule_ids = list(set([r[0] for r in rules_to_merge])) if pull_push_rule_ids: openupgrade.logged_query( env.cr, """ UPDATE stock_rule SET action = 'pull_push' WHERE id in %s""", (tuple(pull_push_rule_ids), ), )
def merge_stock_location_path_stock_rule(env): openupgrade.logged_query( env.cr, """ INSERT INTO stock_rule (name, active, action, sequence, company_id, location_id, location_src_id, route_id, procure_method, route_sequence, picking_type_id, delay, propagate, warehouse_id, auto, create_uid, create_date, write_uid, write_date, %s) SELECT name, active, 'push' AS action, sequence, company_id, location_dest_id, location_from_id, route_id, 'make_to_stock' AS procure_method, route_sequence, picking_type_id, delay, propagate, warehouse_id, auto, create_uid, create_date, write_uid, write_date, id FROM stock_location_path """, (AsIs(openupgrade.get_legacy_name('loc_path_id')), ), ) openupgrade.logged_query( env.cr, """ UPDATE ir_model_data imd SET model = 'stock.rule', res_id = sr.id FROM stock_rule sr WHERE imd.res_id = sr.%s AND model = 'stock.location.path' """, (AsIs(openupgrade.get_legacy_name('loc_path_id')), ), ) env.cr.execute( """ SELECT DISTINCT sm.rule_id, sr.id FROM stock_move sm INNER JOIN stock_rule sr ON sm.%s = sr.%s WHERE sr.%s IS NOT NULL AND sm.rule_id IS NOT NULL """, ( AsIs(openupgrade.get_legacy_name('push_rule_id')), AsIs(openupgrade.get_legacy_name('loc_path_id')), AsIs(openupgrade.get_legacy_name('loc_path_id')), ), ) rules_to_merge = env.cr.fetchall() openupgrade.logged_query( env.cr, """ UPDATE stock_move sm SET rule_id = sr.id FROM stock_rule sr WHERE sm.%s = sr.%s AND sr.%s IS NOT NULL AND sm.rule_id IS NULL """, ( AsIs(openupgrade.get_legacy_name('push_rule_id')), AsIs(openupgrade.get_legacy_name('loc_path_id')), AsIs(openupgrade.get_legacy_name('loc_path_id')), ), ) for row in rules_to_merge: openupgrade_merge_records.merge_records( env, 'stock.rule', [row[1]], row[0], ) pull_push_rule_ids = tuple(set([r[0] for r in rules_to_merge])) if pull_push_rule_ids: openupgrade.logged_query( env.cr, """ UPDATE stock_rule SET action = 'pull_push' WHERE id in %s""", (tuple(pull_push_rule_ids), ), )
31,722
def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse: """ get-remote-data command: Returns an updated incident and entries If offense's events were updated in the long running container, update the demisto incident. Args: client (Client): QRadar client to perform the API calls. params (Dict): Demisto params. args (Dict): id: Offense id to retrieve. lastUpdate: When was the last time we data was retrieved in Epoch. Returns: GetRemoteDataResponse. """ remote_args = GetRemoteDataArgs(args) ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets')) offense_id = remote_args.remote_incident_id offense = client.offenses_list(offense_id=offense_id) offense_last_update = get_time_parameter(offense.get('last_persisted_time')) mirror_options = params.get('mirror_options') context_data = get_integration_context() processed_offenses = print_mirror_events_stats(context_data, f"Starting Get Remote Data For " f"Offense {str(offense.get('id'))}") # versions below 6.1 compatibility last_update = get_time_parameter(args.get('lastUpdate')) if last_update and last_update > offense_last_update and str(offense.get("id")) not in processed_offenses: demisto.debug('Nothing new in the ticket') return GetRemoteDataResponse({'id': offense_id, 'in_mirror_error': ''}, []) demisto.debug(f'Updating offense. Offense last update was {offense_last_update}') entries = [] if offense.get('status') == 'CLOSED' and argToBoolean(params.get('close_incident', False)): demisto.debug(f'Offense is closed: {offense}') if closing_reason := offense.get('closing_reason_id', ''): closing_reason = client.closing_reasons_list(closing_reason).get('text') offense_close_time = offense.get('close_time', '') closed_offense_notes = client.offense_notes_list(offense_id, f'items={DEFAULT_RANGE_VALUE}', filter_=f'create_time >= {offense_close_time}') # In QRadar UI, when you close a reason, a note is added with the reason and more details. Try to get note # if exists, else fallback to closing reason only, as closing QRadar through an API call does not create a note. close_reason_with_note = next((note.get('note_text') for note in closed_offense_notes if note.get('note_text').startswith('This offense was closed with reason:')), closing_reason) if not close_reason_with_note: print_debug_msg(f'Could not find closing reason or closing note for offense with offense id {offense_id}') close_reason_with_note = 'Unknown closing reason from QRadar' else: close_reason_with_note = f'From QRadar: {close_reason_with_note}' entries.append({ 'Type': EntryType.NOTE, 'Contents': { 'dbotIncidentClose': True, 'closeReason': close_reason_with_note }, 'ContentsFormat': EntryFormat.JSON }) if mirror_options == MIRROR_OFFENSE_AND_EVENTS: offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) max_retries = MAX_FETCH_EVENT_RETIRES * (len(offenses_waiting_for_update) + 3) is_waiting_to_be_updated = True evented_offense = None retries = 0 while ((not evented_offense) or is_waiting_to_be_updated) and retries < max_retries: if retries != 0: time.sleep(FAILURE_SLEEP) ctx = get_integration_context() context_data = ctx.copy() print_mirror_events_stats(context_data, f"Get Remote Data Loop for id {offense.get('id')}, retry {retries}") retries += 1 offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) evented_offense = [evented_offense for evented_offense in offenses_with_updated_events if str(evented_offense.get('id')) == str(offense.get("id"))] is_waiting_to_be_updated = any([True for waiting_offense in offenses_waiting_for_update if str(waiting_offense.get('id')) == str(offense.get("id"))]) if evented_offense: demisto.debug(f"Mirror Events: Offense {offense.get('id')} events were updated, updating incident.") if evented_offense[0].get('events'): offense['events'] = evented_offense[0].get('events') demisto.debug(f"Mirror Events: Offense {offense.get('id')} now has {offense.get('events')} " f"fetched events.") offenses_with_updated_events.remove(evented_offense[0]) resubmitted_offenses_ids = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []).copy() if offense.get("id") in resubmitted_offenses_ids: resubmitted_offenses_ids.remove(offense.get("id")) context_data[RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY] = resubmitted_offenses_ids context_data[UPDATED_MIRRORED_OFFENSES_CTX_KEY] = offenses_with_updated_events print_mirror_events_stats(context_data, f"Get Remote Data End for id {offense.get('id')}") set_integration_context(context_data) enriched_offense = enrich_offenses_result(client, offense, ip_enrich, asset_enrich) final_offense_data = sanitize_outputs(enriched_offense)[0] return GetRemoteDataResponse(final_offense_data, entries)
def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse: """ get-remote-data command: Returns an updated incident and entries If offense's events were updated in the long running container, update the demisto incident. Args: client (Client): QRadar client to perform the API calls. params (Dict): Demisto params. args (Dict): id: Offense id to retrieve. lastUpdate: When was the last time we data was retrieved in Epoch. Returns: GetRemoteDataResponse. """ remote_args = GetRemoteDataArgs(args) ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets')) offense_id = remote_args.remote_incident_id offense = client.offenses_list(offense_id=offense_id) offense_last_update = get_time_parameter(offense.get('last_persisted_time')) mirror_options = params.get('mirror_options') context_data = get_integration_context() processed_offenses = print_mirror_events_stats(context_data, f"Starting Get Remote Data For " f"Offense {str(offense.get('id'))}") # versions below 6.1 compatibility last_update = get_time_parameter(args.get('lastUpdate')) if last_update and last_update > offense_last_update and str(offense.get("id")) not in processed_offenses: demisto.debug('Nothing new in the ticket') return GetRemoteDataResponse({'id': offense_id, 'in_mirror_error': ''}, []) demisto.debug(f'Updating offense. Offense last update was {offense_last_update}') entries = [] if offense.get('status') == 'CLOSED' and argToBoolean(params.get('close_incident', False)): demisto.debug(f'Offense is closed: {offense}') if closing_reason := offense.get('closing_reason_id', ''): closing_reason = client.closing_reasons_list(closing_reason).get('text') offense_close_time = offense.get('close_time', '') closed_offense_notes = client.offense_notes_list(offense_id, f'items={DEFAULT_RANGE_VALUE}', filter_=f'create_time >= {offense_close_time}') # In QRadar UI, when you close a reason, a note is added with the reason and more details. Try to get note # if exists, else fallback to closing reason only, as closing QRadar through an API call does not create a note. close_reason_with_note = next((note.get('note_text') for note in closed_offense_notes if note.get('note_text').startswith('This offense was closed with reason:')), closing_reason) if not close_reason_with_note: print_debug_msg(f'Could not find closing reason or closing note for offense with offense id {offense_id}') close_reason_with_note = 'Unknown closing reason from QRadar' else: close_reason_with_note = f'From QRadar: {close_reason_with_note}' entries.append({ 'Type': EntryType.NOTE, 'Contents': { 'dbotIncidentClose': True, 'closeReason': close_reason_with_note }, 'ContentsFormat': EntryFormat.JSON }) if mirror_options == MIRROR_OFFENSE_AND_EVENTS: offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) max_retries = MAX_FETCH_EVENT_RETIRES * (len(offenses_waiting_for_update) + 3) is_waiting_to_be_updated = True evented_offense = None retries = 0 while ((not evented_offense) or is_waiting_to_be_updated) and retries < max_retries: if retries != 0: time.sleep(FAILURE_SLEEP) context_data = get_integration_context().copy() print_mirror_events_stats(context_data, f"Get Remote Data Loop for id {offense.get('id')}, retry {retries}") retries += 1 offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) evented_offense = [evented_offense for evented_offense in offenses_with_updated_events if str(evented_offense.get('id')) == str(offense.get("id"))] is_waiting_to_be_updated = any([True for waiting_offense in offenses_waiting_for_update if str(waiting_offense.get('id')) == str(offense.get("id"))]) if evented_offense: demisto.debug(f"Mirror Events: Offense {offense.get('id')} events were updated, updating incident.") if evented_offense[0].get('events'): offense['events'] = evented_offense[0].get('events') demisto.debug(f"Mirror Events: Offense {offense.get('id')} now has {offense.get('events')} " f"fetched events.") offenses_with_updated_events.remove(evented_offense[0]) resubmitted_offenses_ids = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []).copy() if offense.get("id") in resubmitted_offenses_ids: resubmitted_offenses_ids.remove(offense.get("id")) context_data[RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY] = resubmitted_offenses_ids context_data[UPDATED_MIRRORED_OFFENSES_CTX_KEY] = offenses_with_updated_events print_mirror_events_stats(context_data, f"Get Remote Data End for id {offense.get('id')}") set_integration_context(context_data) enriched_offense = enrich_offenses_result(client, offense, ip_enrich, asset_enrich) final_offense_data = sanitize_outputs(enriched_offense)[0] return GetRemoteDataResponse(final_offense_data, entries)
6,077
def matchQueue(jobJDL, queueDict, fullMatch=False): """ Match the job description to the queue definition :param str job: JDL job description :param bool fullMatch: test matching on all the criteria :param dict queueDict: queue parameters dictionary :return: S_OK/S_ERROR, Value - result of matching, S_OK if matched or S_ERROR with the reason for no match """ # Check the job description validity job = ClassAd(jobJDL) if not job.isOK(): return S_ERROR('Invalid job description') noMatchReasons = [] # Check job requirements to resource # 1. CPUTime cpuTime = job.getAttributeInt('CPUTime') if not cpuTime: cpuTime = 84600 if cpuTime and cpuTime > queueDict.get('CPUTime', 0.): noMatchReasons.append('Job CPUTime requirement not satisfied') if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 2. Multi-value match requirements for parameter in ['Site', 'GridCE', 'Platform', 'GridMiddleware', 'PilotType', 'SubmitPool', 'JobType']: if parameter in queueDict: valueSet = set(job.getListFromExpression(parameter)) if not valueSet: valueSet = set(job.getListFromExpression('%ss' % parameter)) queueSet = set(fromChar(queueDict[parameter])) if valueSet and queueSet and not valueSet.intersection(queueSet): valueToPrint = ','.join(valueSet) if len(valueToPrint) > 20: valueToPrint = "%s..." % valueToPrint[:20] noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint)) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 3. Banned multi-value match requirements for par in ['Site', 'GridCE', 'Platform', 'GridMiddleware', 'PilotType', 'SubmitPool', 'JobType']: parameter = "Banned%s" % par if par in queueDict: valueSet = set(job.getListFromExpression(parameter)) if not valueSet: valueSet = set(job.getListFromExpression('%ss' % parameter)) queueSet = set(fromChar(queueDict[par])) if valueSet and queueSet and valueSet.issubset(queueSet): valueToPrint = ','.join(valueSet) if len(valueToPrint) > 20: valueToPrint = "%s..." % valueToPrint[:20] noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint)) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 4. Tags tags = set(job.getListFromExpression('Tag')) nProc = job.getAttributeInt('NumberOfProcessors') if nProc and nProc > 1: tags.add('MultiProcessor') wholeNode = job.getAttributeString('WholeNode') if wholeNode: tags.add('WholeNode') queueTags = set(queueDict.get('Tags', [])) if not tags.issubset(queueTags): noMatchReasons.append('Job Tag %s not satisfied' % ','.join(tags)) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 4. MultiProcessor requirements if nProc and nProc > int(queueDict.get('NumberOfProcessors', 1)): noMatchReasons.append('Job NumberOfProcessors %d requirement not satisfied' % nProc) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 5. RAM ram = job.getAttributeInt('RAM') if ram and ram > int(queueDict['MaxRAM']): noMatchReasons.append('Job RAM %d requirement not satisfied' % ram) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # Check resource requirements to job # 1. OwnerGroup - rare case but still if "OwnerGroup" in queueDict: result = getProxyInfo(disableVOMS=True) if not result['OK']: return S_ERROR('No valid proxy available') ownerGroup = result['Value']['group'] if ownerGroup != queueDict['OwnerGroup']: noMatchReasons.append('Resource OwnerGroup %s requirement not satisfied' % queueDict['OwnerGroup']) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 2. Required tags requiredTags = set(queueDict.get('RequiredTags', [])) if not requiredTags.issubset(tags): noMatchReasons.append('Resource RequiredTags %s not satisfied' % ','.join(requiredTags)) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 3. RunningLimit site = queueDict['Site'] opsHelper = Operations() result = opsHelper.getSections('JobScheduling/RunningLimit') if result['OK'] and site in result['Value']: result = opsHelper.getSections('JobScheduling/RunningLimit/%s' % site) if result['OK']: for parameter in result['Value']: value = job.getAttributeString(parameter) if value and opsHelper.getValue('JobScheduling/RunningLimit/%s/%s/%s' % (site, parameter, value), 1) == 0: noMatchReasons.append('Resource operational %s requirement not satisfied' % parameter) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) if noMatchReasons: return S_OK({'Match': False, 'Reason': noMatchReasons}) return S_OK({'Match': True, 'Reason': noMatchReasons})
def matchQueue(jobJDL, queueDict, fullMatch=False): """ Match the job description to the queue definition :param str job: JDL job description :param bool fullMatch: test matching on all the criteria :param dict queueDict: queue parameters dictionary :return: S_OK/S_ERROR, Value - result of matching, S_OK if matched or S_ERROR with the reason for no match """ # Check the job description validity job = ClassAd(jobJDL) if not job.isOK(): return S_ERROR('Invalid job description') noMatchReasons = [] # Check job requirements to resource # 1. CPUTime cpuTime = job.getAttributeInt('CPUTime') if not cpuTime: cpuTime = 84600 if cpuTime and cpuTime > queueDict.get('CPUTime', 0.): noMatchReasons.append('Job CPUTime requirement not satisfied') if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 2. Multi-value match requirements for parameter in ['Site', 'GridCE', 'Platform', 'GridMiddleware', 'PilotType', 'SubmitPool', 'JobType']: if parameter in queueDict: valueSet = set(job.getListFromExpression(parameter)) if not valueSet: valueSet = set(job.getListFromExpression('%ss' % parameter)) queueSet = set(fromChar(queueDict[parameter])) if valueSet and queueSet and not valueSet.intersection(queueSet): valueToPrint = ','.join(valueSet) if len(valueToPrint) > 20: valueToPrint = "%s..." % valueToPrint[:20] noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint)) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 3. Banned multi-value match requirements for par in ['Site', 'GridCE', 'Platform', 'GridMiddleware', 'PilotType', 'SubmitPool', 'JobType']: parameter = "Banned%s" % par if par in queueDict: valueSet = set(job.getListFromExpression(parameter)) if not valueSet: valueSet = set(job.getListFromExpression('%ss' % parameter)) queueSet = set(fromChar(queueDict[par])) if valueSet and queueSet and valueSet.issubset(queueSet): valueToPrint = ','.join(valueSet) if len(valueToPrint) > 20: valueToPrint = "%s..." % valueToPrint[:20] noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint)) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 4. Tags tags = set(job.getListFromExpression('Tag')) nProc = job.getAttributeInt('NumberOfProcessors') if nProc and nProc > 1: tags.add('MultiProcessor') wholeNode = job.getAttributeString('WholeNode') if wholeNode: tags.add('WholeNode') queueTags = set(queueDict.get('Tags', [])) if not tags.issubset(queueTags): noMatchReasons.append('Job Tag %s not satisfied' % ','.join(tags)) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 4. MultiProcessor requirements if nProc and nProc > int(queueDict.get('NumberOfProcessors', 1)): noMatchReasons.append('Job NumberOfProcessors %d requirement not satisfied' % nProc) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 5. RAM ram = job.getAttributeInt('RAM') if ram and ram > int(queueDict['MaxRAM']): noMatchReasons.append('Job RAM %d requirement not satisfied' % ram) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # Check resource requirements to job # 1. OwnerGroup - rare case but still if "OwnerGroup" in queueDict: result = getProxyInfo(disableVOMS=True) if not result['OK']: return S_ERROR('No valid proxy available') ownerGroup = result['Value']['group'] if ownerGroup != queueDict['OwnerGroup']: noMatchReasons.append('Resource OwnerGroup %s requirement not satisfied' % queueDict['OwnerGroup']) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 2. Required tags requiredTags = set(queueDict.get('RequiredTags', [])) if not requiredTags.issubset(tags): noMatchReasons.append('Resource RequiredTags %s not satisfied' % ','.join(requiredTags)) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) # 3. RunningLimit site = queueDict['Site'] opsHelper = Operations() result = opsHelper.getSections('JobScheduling/RunningLimit') if result['OK'] and site in result['Value']: result = opsHelper.getSections('JobScheduling/RunningLimit/%s' % site) if result['OK']: for parameter in result['Value']: value = job.getAttributeString(parameter) if value and opsHelper.getValue('JobScheduling/RunningLimit/%s/%s/%s' % (site, parameter, value), 1) == 0: noMatchReasons.append('Resource operational %s requirement not satisfied' % parameter) if not fullMatch: return S_OK({'Match': False, 'Reason': noMatchReasons[0]}) if noMatchReasons: return S_OK({'Match': False, 'Reason': noMatchReasons}) return S_OK({'Match': not bool(noMatchReasons), 'Reason': noMatchReasons})
25,968
def get_data_service_client(cli_ctx, service_type, account_name, account_key, connection_string=None, sas_token=None, socket_timeout=None, token_credential=None, endpoint_suffix=None, location_mode=None): logger.debug('Getting data service client service_type=%s', service_type.__name__) try: if account_name: account_name = account_name.split('.', 2)[0] client_kwargs = {'account_name': account_name, 'account_key': account_key, 'connection_string': connection_string, 'sas_token': sas_token} if socket_timeout: client_kwargs['socket_timeout'] = socket_timeout if token_credential: client_kwargs['token_credential'] = token_credential if endpoint_suffix: client_kwargs['endpoint_suffix'] = endpoint_suffix client = service_type(**client_kwargs) if location_mode: client.location_mode = location_mode if 'Blob' in service_type.__name__: service = 'blob' elif 'File' in service_type.__name__: service = 'file' elif 'Queue' in service_type.__name__: service = 'queue' elif 'Table' in service_type.__name__: service = 'table' else: raise CLIError("Invalid service type.") if account_name and len(account_name.split('.', 2)) == 2: dns = account_name.split('.', 2)[1] client.primary_endpoint = "{}.{}.{}.{}".format(client.primary_endpoint.split('.', 1)[0], dns, service, endpoint_suffix) client.secondary_endpoint = "{}.{}.{}.{}".format(client.secondary_endpoint.split('.', 1)[0], dns, service, endpoint_suffix) except ValueError as exc: _ERROR_STORAGE_MISSING_INFO = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common._error#_ERROR_STORAGE_MISSING_INFO') if _ERROR_STORAGE_MISSING_INFO in str(exc): raise ValueError(exc) raise CLIError('Unable to obtain data client. Check your connection parameters.') # TODO: enable Fiddler client.request_callback = _get_add_headers_callback(cli_ctx) return client
def get_data_service_client(cli_ctx, service_type, account_name, account_key, connection_string=None, sas_token=None, socket_timeout=None, token_credential=None, endpoint_suffix=None, location_mode=None): logger.debug('Getting data service client service_type=%s', service_type.__name__) try: client_kwargs = {'account_name': account_name.split('.', 2)[0] if account_name else account_name, 'account_key': account_key, 'connection_string': connection_string, 'sas_token': sas_token} if socket_timeout: client_kwargs['socket_timeout'] = socket_timeout if token_credential: client_kwargs['token_credential'] = token_credential if endpoint_suffix: client_kwargs['endpoint_suffix'] = endpoint_suffix client = service_type(**client_kwargs) if location_mode: client.location_mode = location_mode if 'Blob' in service_type.__name__: service = 'blob' elif 'File' in service_type.__name__: service = 'file' elif 'Queue' in service_type.__name__: service = 'queue' elif 'Table' in service_type.__name__: service = 'table' else: raise CLIError("Invalid service type.") if account_name and len(account_name.split('.', 2)) == 2: dns = account_name.split('.', 2)[1] client.primary_endpoint = "{}.{}.{}.{}".format(client.primary_endpoint.split('.', 1)[0], dns, service, endpoint_suffix) client.secondary_endpoint = "{}.{}.{}.{}".format(client.secondary_endpoint.split('.', 1)[0], dns, service, endpoint_suffix) except ValueError as exc: _ERROR_STORAGE_MISSING_INFO = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common._error#_ERROR_STORAGE_MISSING_INFO') if _ERROR_STORAGE_MISSING_INFO in str(exc): raise ValueError(exc) raise CLIError('Unable to obtain data client. Check your connection parameters.') # TODO: enable Fiddler client.request_callback = _get_add_headers_callback(cli_ctx) return client
54,216
def group_settings_greedy(settings: Iterable[InitObsSetting]) \ -> Dict[InitObsSetting, List[InitObsSetting]]: """ Group a list of settings which can be simultaneously measured via a greedy algorithm. We construct a dictionary keyed by `max_setting` (see docstrings for `_max_weight_state` and `_max_weight_observable`) where the value is a list of settings compatible with `max_setting`. For each new setting, we try to find an existing group to add it and update `max_setting` for that group if necessary. Otherwise, we make a new group. In practice, this greedy algorithm performs comparably to something more complicated by solving the clique cover problem on a graph of simultaneously-measurable settings. Args: settings: The settings to group. Returns: A dictionary keyed by `max_setting` which need not exist in the input list of settings. Each dictionary value is a list of settings compatible with `max_setting`. """ grouped_settings = {} # type: Dict[InitObsSetting, List[InitObsSetting]] for setting in settings: for max_setting, simul_settings in grouped_settings.items(): trial_grouped_settings = simul_settings + [setting] new_max_weight_state = _max_weight_state( stg.init_state for stg in trial_grouped_settings) new_max_weight_obs = _max_weight_observable( stg.observable for stg in trial_grouped_settings) # max_weight_xxx returns None if the set of xxx's aren't compatible, # so the following conditional is True if setting can # be inserted into the current group. if (new_max_weight_state is not None and new_max_weight_obs is not None): del grouped_settings[max_setting] new_max_setting = InitObsSetting(new_max_weight_state, new_max_weight_obs) grouped_settings[new_max_setting] = trial_grouped_settings break else: # made it through entire dict without finding an existing group # Strip coefficients before using as key new_max_weight_obs = setting.observable.with_coefficient(1.0) new_max_setting = InitObsSetting(setting.init_state, new_max_weight_obs) grouped_settings[new_max_setting] = [setting] return grouped_settings
def group_settings_greedy(settings: Iterable[InitObsSetting]) \ -> Dict[InitObsSetting, List[InitObsSetting]]: """ Group a list of settings which can be simultaneously measured via a greedy algorithm. We construct a dictionary keyed by `max_setting` (see docstrings for `_max_weight_state` and `_max_weight_observable`) where the value is a list of settings compatible with `max_setting`. For each new setting, we try to find an existing group to add it and update `max_setting` for that group if necessary. Otherwise, we make a new group. In practice, this greedy algorithm performs comparably to something more complicated by solving the clique cover problem on a graph of simultaneously-measurable settings. Args: settings: The settings to group. Returns: A dictionary keyed by `max_setting` which need not exist in the input list of settings. Each dictionary value is a list of settings compatible with `max_setting`. """ grouped_settings = {} # type: Dict[InitObsSetting, List[InitObsSetting]] for setting in settings: for max_setting, simul_settings in grouped_settings.items(): trial_grouped_settings = simul_settings + [setting] new_max_weight_state = _max_weight_state( stg.init_state for stg in trial_grouped_settings) new_max_weight_obs = _max_weight_observable( stg.observable for stg in trial_grouped_settings) # max_weight_xxx returns None if the set of xxx's aren't compatible, # so the following conditional is True if setting can # be inserted into the current group. if (new_max_weight_state is not None and new_max_weight_obs is not None): del grouped_settings[max_setting] new_max_setting = InitObsSetting(new_max_weight_state, new_max_weight_obs) grouped_settings[new_max_setting] = trial_grouped_settings break else: # made it through entire dict without finding a compatible group, # thus a new group needs to be created # Strip coefficients before using as key new_max_weight_obs = setting.observable.with_coefficient(1.0) new_max_setting = InitObsSetting(setting.init_state, new_max_weight_obs) grouped_settings[new_max_setting] = [setting] return grouped_settings
20,273
def unholder(item): """Get the held itme of an object holder of list of object holers.""" if isinstance(item, list): return [i.held_object if hasattr(i, 'held_object') else i for i in item] if hasattr(item, 'held_object'): return item.held_object return item
def unholder(item): """Get the held item of an object holder or list of object holders.""" if isinstance(item, list): return [i.held_object if hasattr(i, 'held_object') else i for i in item] if hasattr(item, 'held_object'): return item.held_object return item
40,426
def test_graph_store_conversion(): graph_store = MyGraphStore() edge_index = get_edge_index(100, 100, 300) edge_index = sort_edge_index(edge_index, sort_by_row=False) adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100)) coo = (edge_index[0], edge_index[1]) csr = adj.csr()[:2] csc = adj.csc()[-2::-1] # Put all edge indices: graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'), layout='coo', num_nodes=(100, 100), is_sorted=True) graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'), layout='csr', num_nodes=(100, 100)) graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'), layout='csc', num_nodes=(100, 100)) def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor): assert torch.equal(sort_edge_index(expected), sort_edge_index(actual)) # Convert to COO: row_dict, col_dict, perm_dict = graph_store.coo() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict.keys(): actual = torch.stack((row_dict[key], col_dict[key])) assert_edge_index_equal(actual, edge_index) assert perm_dict[key] is None # Convert to CSR: row_dict, col_dict, perm_dict = graph_store.csr() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict: assert torch.equal(row_dict[key], csr[0]) assert torch.equal(col_dict[key], csr[1]) if key == ('v', '1', 'v'): assert perm_dict[key] is not None # Convert to CSC: row_dict, col_dict, perm_dict = graph_store.csc() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict: assert torch.equal(row_dict[key], csc[0]) assert torch.equal(col_dict[key], csc[1]) assert perm_dict[key] is None
def test_graph_store_conversion(): graph_store = MyGraphStore() edge_index = get_edge_index(100, 100, 300) edge_index = sort_edge_index(edge_index, sort_by_row=False) adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100)) coo = (edge_index[0], edge_index[1]) csr = adj.csr()[:2] csc = adj.csc()[-2::-1] # Put all edge indices: graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'), layout='coo', num_nodes=(100, 100), is_sorted=True) graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'), layout='csr', num_nodes=(100, 100)) graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'), layout='csc', num_nodes=(100, 100)) def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor): assert torch.equal(sort_edge_index(expected), sort_edge_index(actual)) # Convert to COO: row_dict, col_dict, perm_dict = graph_store.coo() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict.keys(): actual = torch.stack((row_dict[key], col_dict[key])) assert_edge_index_equal(actual, edge_index) assert perm_dict[key] is None # Convert to CSR: row_dict, col_dict, perm_dict = graph_store.csr() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict: assert torch.equal(row_dict[key], csr[0]) assert torch.equal(col_dict[key], csr[1]) if key == ('v', '1', 'v'): assert perm_dict[key] is not None # Convert to CSC: row_dict, colptr_dict, perm_dict = graph_store.csc() assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 for key in row_dict: assert torch.equal(row_dict[key], csc[0]) assert torch.equal(col_dict[key], csc[1]) assert perm_dict[key] is None
58,329
def rk4(f, x, t, dt, stages=4, s=0.0): """Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers. The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0 convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with additive noise as defined in the below. See `bib.grudzien2020numerical`. Parameters ---------- f : function The time derivative of the dynamical system. Must be of the form `f(t, x)` x : ndarray or float State vector of the forcing term t : float Starting time of the integration dt : float Integration time step. stages : int, optional The number of stages of the RK method. Default: 4. When stages=1, this becomes Euler / Euler-Maruyama. s : float The diffusion coeffient for models with additive noise. Default: 0 for deterministic integration. Returns ------- ndarray State vector at the new time, `t+dt` """ if s > 0.0: # non-trivial diffusion, this defines the SDE integration with additive noise # generate perturbation for Brownian motion dims = np.shape(x) if len(dims) > 1: N_e, N_x , = dims W = np.sqrt(dt) * np.random.standard_normal(N_e, N_x) else: N_x , = dims W = np.sqrt(dt) * np.random.standard_normal(N_x) if stages >=1: k1 = dt * f(t , x) + s * W # noqa if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa if stages ==4: # noqa k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa k4 = dt * f(t+dt , x+k3) + s * W # noqa if stages ==1: return x + k1 # noqa elif stages ==2: return x + k2 # noqa elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa else: raise NotImplementedError # noqa else: # deterministic integration if stages >=1: k1 = dt * f(t , x) # noqa if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa if stages ==4: # noqa k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa k4 = dt * f(t+dt , x+k3) # noqa if stages ==1: return x + k1 # noqa elif stages ==2: return x + k2 # noqa elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa else: raise NotImplementedError # noqa # fmt: on
def rk4(f, x, t, dt, stages=4, s=0.0): """Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers. The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0 convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with additive noise as defined in the below. See `bib.grudzien2020numerical`. Parameters ---------- f : function The time derivative of the dynamical system. Must be of the form `f(t, x)` x : ndarray or float State vector of the forcing term t : float Starting time of the integration dt : float Integration time step. stages : int, optional The number of stages of the RK method. When stages=1, this becomes the Euler (-Maruyama) scheme. Default: 4. s : float The diffusion coeffient for models with additive noise. Default: 0 for deterministic integration. Returns ------- ndarray State vector at the new time, `t+dt` """ if s > 0.0: # non-trivial diffusion, this defines the SDE integration with additive noise # generate perturbation for Brownian motion dims = np.shape(x) if len(dims) > 1: N_e, N_x , = dims W = np.sqrt(dt) * np.random.standard_normal(N_e, N_x) else: N_x , = dims W = np.sqrt(dt) * np.random.standard_normal(N_x) if stages >=1: k1 = dt * f(t , x) + s * W # noqa if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa if stages ==4: # noqa k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa k4 = dt * f(t+dt , x+k3) + s * W # noqa if stages ==1: return x + k1 # noqa elif stages ==2: return x + k2 # noqa elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa else: raise NotImplementedError # noqa else: # deterministic integration if stages >=1: k1 = dt * f(t , x) # noqa if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa if stages ==4: # noqa k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa k4 = dt * f(t+dt , x+k3) # noqa if stages ==1: return x + k1 # noqa elif stages ==2: return x + k2 # noqa elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa else: raise NotImplementedError # noqa # fmt: on
53,266
def boris_push_relativistic(x, v, B, E, q, m, dt): r""" The explicit Boris pusher, including realtivistic corrections. Parameters ---------- x : np.ndarray particle position at full timestep, in SI (meter) units. v : np.ndarray particle velocity at half timestep, in SI (meter/second) units. B : np.ndarray magnetic field at full timestep, in SI (tesla) units. E : float electric field at full timestep, in SI (V/m) units. q : float particle charge, in SI (Coulomb) units. m : float particle mass, in SI (kg) units. dt : float timestep, in SI (second) units. Notes ---------- The Boris algorithm is the standard energy conserving algorithm for particle movement in plasma physics. See [1]_ for more details, and [2]_ for a nice overview. Conceptually, the algorithm has three phases: 1. Add half the impulse from electric field. 2. Rotate the particle velocity about the direction of the magnetic field. 3. Add the second half of the impulse from the electric field. This ends up causing the magnetic field action to be properly "centered" in time, and the algorithm, being a symplectic integrator, conserves energy. References ---------- .. [1] C. K. Birdsall, A. B. Langdon, "Plasma Physics via Computer Simulation", 2004, p. 58-63 .. [2] L. Brieda, "Particle Push in Magnetic Field (Boris Method)", https://www.particleincell.com/2011/vxb-rotation/ """ c = constants.c.si.value γ = 1 / np.sqrt(1 - (v / c) ** 2) uvel = v * γ uvel_minus = uvel + q * E * dt / (2 * m) γ1 = np.sqrt(1 + (uvel_minus / c) ** 2) # Birdsall has a factor of c incorrect in the definiton of t? # See this source: https://www.sciencedirect.com/science/article/pii/S163107211400148X t = q * B * dt / (2 * γ1 * m) s = 2 * t / (1 + (t * t).sum(axis=1, keepdims=True)) uvel_prime = uvel_minus + np.cross(uvel_minus.si.value, t) uvel_plus = uvel_minus + np.cross(uvel_prime.si.value, s) uvel_new = uvel_plus + +q * E * dt / (2 * m) # You can show that this expression is equivalent to calculating # v_new then calculating γnew using the usual formula γ2 = np.sqrt(1 + (uvel_new / c) ** 2) # Update the velocities of the particles that are being pushed v[...] = uvel_new / γ2 x += v * dt
def boris_push_relativistic(x, v, B, E, q, m, dt): r""" The explicit Boris pusher, including realtivistic corrections. Parameters ---------- x : np.ndarray particle position at full timestep, in SI (meter) units. v : np.ndarray particle velocity at half timestep, in SI (meter/second) units. B : np.ndarray magnetic field at full timestep, in SI (tesla) units. E : float electric field at full timestep, in SI (V/m) units. q : float particle charge, in SI (Coulomb) units. m : float particle mass, in SI (kg) units. dt : float timestep, in SI (second) units. Notes ---------- For the basic overview of this algorithm, see `boris_push`. This version, based on [1]_, applies relativistic corrections such as TODO. Keep in mind that the non-relativistic version will be slightly faster if you don't encounter velocities in relativistic regimes. References ---------- .. [1] C. K. Birdsall, A. B. Langdon, "Plasma Physics via Computer Simulation", 2004, p. 58-63 """ c = constants.c.si.value γ = 1 / np.sqrt(1 - (v / c) ** 2) uvel = v * γ uvel_minus = uvel + q * E * dt / (2 * m) γ1 = np.sqrt(1 + (uvel_minus / c) ** 2) # Birdsall has a factor of c incorrect in the definiton of t? # See this source: https://www.sciencedirect.com/science/article/pii/S163107211400148X t = q * B * dt / (2 * γ1 * m) s = 2 * t / (1 + (t * t).sum(axis=1, keepdims=True)) uvel_prime = uvel_minus + np.cross(uvel_minus.si.value, t) uvel_plus = uvel_minus + np.cross(uvel_prime.si.value, s) uvel_new = uvel_plus + +q * E * dt / (2 * m) # You can show that this expression is equivalent to calculating # v_new then calculating γnew using the usual formula γ2 = np.sqrt(1 + (uvel_new / c) ** 2) # Update the velocities of the particles that are being pushed v[...] = uvel_new / γ2 x += v * dt
1,217
def needs_nibabel_data(subdir=None): """ Decorator for tests needing nibabel-data Parameters ---------- subdir : None or str Subdirectory we need in nibabel-data directory. If None, only require nibabel-data directory itself. Returns ------- skip_dec : decorator Decorator skipping tests if required directory not present """ nibabel_data = get_nibabel_data() if nibabel_data == '': return pytest.mark.skipif(True, reason="Need nibabel-data directory for this test") if subdir is None: return pytest.mark.skipif(False, reason="todo") required_path = pjoin(nibabel_data, subdir) # Path should not be empty (as is the case for not-updated submodules) have_files = exists(required_path) and len(listdir(required_path)) > 0 return pytest.mark.skipif(not have_files, reason="Need files in {0} for these tests".format(required_path))
def needs_nibabel_data(subdir=None): """ Decorator for tests needing nibabel-data Parameters ---------- subdir : None or str Subdirectory we need in nibabel-data directory. If None, only require nibabel-data directory itself. Returns ------- skip_dec : decorator Decorator skipping tests if required directory not present """ nibabel_data = get_nibabel_data() if nibabel_data == '': return pytest.mark.skipif(True, reason="Need nibabel-data directory for this test") if subdir is None: return pytest.mark.skipif(False, reason="todo") required_path = pjoin(nibabel_data, subdir) # Path should not be empty (as is the case for not-updated submodules) have_files = exists(required_path) and len(listdir(required_path)) > 0 return pytest.mark.skipif(not have_files, reason="Need files in {0} for these tests".format(required_path))
57,843
def main() -> None: try: arguments = demisto.args() api_key = demisto.params().get('apikey') base_url = urljoin(demisto.params()['url'], '/api/') verify_certificate = not demisto.params().get('insecure', False) first_fetch_time = arg_to_timestamp( arg=demisto.params().get('first_fetch', '1 days'), arg_name='First fetch time', required=True ) assert isinstance(first_fetch_time, int) proxy = demisto.params().get('proxy', False) page = arguments.get('page', "1") page_count_no = arguments.get('max', "25") demisto.debug(f'Command being called is {demisto.command()}') params = {'page': page, 'max': page_count_no} headers = { 'Authorization': f'Bearer {api_key}' } client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy) if demisto.command() == 'test-module': result = test_module_command(client) return_results(result) elif demisto.command() == 'fetch-incidents': # Set and define the fetch incidents command to run after activated via integration settings. fetch_incident_command = demisto.params().get('fetch_incident_command') max_results = arg_to_int( arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False ) if not max_results or max_results > MAX_INCIDENTS_TO_FETCH: max_results = MAX_INCIDENTS_TO_FETCH next_run, incidents = fetch_incidents( client=client, max_results=max_results, last_run=demisto.getLastRun(), # getLastRun() gets the last run dict first_fetch_time=first_fetch_time, command_type=fetch_incident_command ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == 'gra-fetch-users': fetch_records(client, '/users', 'Gra.Users', 'employeeId', params) elif demisto.command() == 'gra-fetch-accounts': fetch_records(client, '/accounts', 'Gra.Accounts', 'id', params) elif demisto.command() == 'gra-fetch-active-resource-accounts': resource_name = arguments.get('resource_name', 'Windows Security') active_resource_url = '/resources/' + resource_name + '/accounts' fetch_records(client, active_resource_url, 'Gra.Active.Resource.Accounts', 'id', params) elif demisto.command() == 'gra-fetch-user-accounts': employee_id = arguments.get('employee_id') user_account_url = '/users/' + employee_id + '/accounts' fetch_records(client, user_account_url, 'Gra.User.Accounts', 'id', params) elif demisto.command() == 'gra-fetch-resource-highrisk-accounts': res_name = arguments.get('Resource_name', 'Windows Security') high_risk_account_resource_url = '/resources/' + res_name + '/accounts/highrisk' fetch_records(client, high_risk_account_resource_url, 'Gra.Resource.Highrisk.Accounts', 'id', params) elif demisto.command() == 'gra-fetch-hpa': fetch_records(client, '/accounts/highprivileged', 'Gra.Hpa', 'id', params) elif demisto.command() == 'gra-fetch-resource-hpa': resource_name = arguments.get('Resource_name', 'Windows Security') resource_hpa = '/resources/' + resource_name + '/accounts/highprivileged' fetch_records(client, resource_hpa, 'Gra.Resource.Hpa', 'id', params) elif demisto.command() == 'gra-fetch-orphan-accounts': fetch_records(client, '/accounts/orphan', 'Gra.Orphan.Accounts', 'id', params) elif demisto.command() == 'gra-fetch-resource-orphan-accounts': resource_name = arguments.get('resource_name', 'Windows Security') resource_orphan = '/resources/' + resource_name + '/accounts/orphan' fetch_records(client, resource_orphan, 'Gra.Resource.Orphan.Accounts', 'id', params) elif demisto.command() == 'gra-user-activities': employee_id = arguments.get('employee_id') user_activities_url = '/user/' + employee_id + '/activity' fetch_records(client, user_activities_url, 'Gra.User.Activity', 'employee_id', params) elif demisto.command() == 'gra-fetch-users-details': employee_id = arguments.get('employee_id') fetch_records(client, '/users/' + employee_id, 'Gra.User', 'employeeId', params) elif demisto.command() == 'gra-highRisk-users': fetch_records(client, '/users/highrisk', 'Gra.Highrisk.Users', 'employeeId', params) elif demisto.command() == 'gra-cases': status = arguments.get('status') cases_url = '/cases/' + status fetch_records(client, cases_url, 'Gra.Cases', 'caseId', params) elif demisto.command() == 'gra-user-anomalies': employee_id = arguments.get('employee_id') anomaly_url = '/users/' + employee_id + '/anomalies/' fetch_records(client, anomaly_url, 'Gra.User.Anomalies', 'anomaly_name', params) elif demisto.command() == 'gra-case-action': action = arguments.get('action') caseId = arguments.get('caseId') subOption = arguments.get('subOption') caseComment = arguments.get('caseComment') riskAcceptDate = arguments.get('riskAcceptDate') cases_url = '/cases/' + action if action == 'riskManageCase': post_url = '{"caseId":' + caseId + ',"subOption":"' + subOption + '","caseComment":"' + caseComment +\ '","riskAcceptDate":"' + riskAcceptDate + '"}' else: post_url = '{"caseId":' + caseId + ',"subOption":"' + subOption + '","caseComment":"' + \ caseComment + '"}' fetch_post_records(client, cases_url, 'Gra.Case.Action', 'caseId', params, post_url) elif demisto.command() == 'gra-case-action-anomaly': action = arguments.get('action') caseId = arguments.get('caseId') anomalyNames = arguments.get('anomalyNames') subOption = arguments.get('subOption') caseComment = arguments.get('caseComment') riskAcceptDate = arguments.get('riskAcceptDate') cases_url = '/cases/' + action if action == 'riskAcceptCaseAnomaly': post_url = '{"caseId":' + caseId + ',"anomalyNames":' + anomalyNames + ',"subOption":"' + subOption +\ '","caseComment":"' + caseComment + '","riskAcceptDate":"' + riskAcceptDate + '"}' else: post_url = '{"caseId":' + caseId + ',"anomalyNames":"' + anomalyNames + '","subOption":"' + \ subOption + '","caseComment":"' + caseComment + '"}' fetch_post_records(client, cases_url, 'Gra.Cases.Action.Anomaly', 'caseId', params, post_url) elif demisto.command() == 'gra-investigate-anomaly-summary': fromDate = arguments.get('fromDate') toDate = arguments.get('toDate') modelName = arguments.get('modelName') if fromDate is not None and toDate is not None: investigateAnomaly_url = '/investigateAnomaly/anomalySummary/' + modelName + '?fromDate=' + fromDate \ + ' 00:00:00&toDate=' + toDate + ' 23:59:59' else: investigateAnomaly_url = '/investigateAnomaly/anomalySummary/' + modelName fetch_records(client, investigateAnomaly_url, 'Gra.Investigate.Anomaly.Summary', 'modelId', params) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
def main() -> None: try: arguments = demisto.args() api_key = demisto.params().get('apikey') base_url = urljoin(demisto.params()['url'], '/api/') verify_certificate = not demisto.params().get('insecure', False) first_fetch_time = arg_to_timestamp( arg=demisto.params().get('first_fetch', '1 days'), arg_name='First fetch time', required=True ) assert isinstance(first_fetch_time, int) proxy = demisto.params().get('proxy', False) page = arguments.get('page', "1") page_count_no = arguments.get('max', "25") demisto.debug(f'Command being called is {demisto.command()}') params = {'page': page, 'max': page_count_no} headers = { 'Authorization': f'Bearer {api_key}' } client = Client( base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy) if demisto.command() == 'test-module': result = test_module_command(client) return_results(result) elif demisto.command() == 'fetch-incidents': # Set and define the fetch incidents command to run after activated via integration settings. fetch_incident_command = demisto.params().get('fetch_incident_command') max_results = arg_to_int( arg=demisto.params().get('max_fetch'), arg_name='max_fetch', required=False ) if not max_results or max_results > MAX_INCIDENTS_TO_FETCH: max_results = MAX_INCIDENTS_TO_FETCH next_run, incidents = fetch_incidents( client=client, max_results=max_results, last_run=demisto.getLastRun(), # getLastRun() gets the last run dict first_fetch_time=first_fetch_time, command_type=fetch_incident_command ) demisto.setLastRun(next_run) demisto.incidents(incidents) elif demisto.command() == 'gra-fetch-users': fetch_records(client, '/users', 'Gra.Users', 'employeeId', params) elif demisto.command() == 'gra-fetch-accounts': fetch_records(client, '/accounts', 'Gra.Accounts', 'id', params) elif demisto.command() == 'gra-fetch-active-resource-accounts': resource_name = arguments.get('resource_name', 'Windows Security') active_resource_url = '/resources/' + resource_name + '/accounts' fetch_records(client, active_resource_url, 'Gra.Active.Resource.Accounts', 'id', params) elif demisto.command() == 'gra-fetch-user-accounts': employee_id = arguments.get('employee_id') user_account_url = '/users/' + employee_id + '/accounts' fetch_records(client, user_account_url, 'Gra.User.Accounts', 'id', params) elif demisto.command() == 'gra-fetch-resource-highrisk-accounts': res_name = arguments.get('Resource_name', 'Windows Security') high_risk_account_resource_url = '/resources/' + res_name + '/accounts/highrisk' fetch_records(client, high_risk_account_resource_url, 'Gra.Resource.Highrisk.Accounts', 'id', params) elif demisto.command() == 'gra-fetch-hpa': fetch_records(client, '/accounts/highprivileged', 'Gra.Hpa', 'id', params) elif demisto.command() == 'gra-fetch-resource-hpa': resource_name = arguments.get('Resource_name', 'Windows Security') resource_hpa = '/resources/' + resource_name + '/accounts/highprivileged' fetch_records(client, resource_hpa, 'Gra.Resource.Hpa', 'id', params) elif demisto.command() == 'gra-fetch-orphan-accounts': fetch_records(client, '/accounts/orphan', 'Gra.Orphan.Accounts', 'id', params) elif demisto.command() == 'gra-fetch-resource-orphan-accounts': resource_name = arguments.get('resource_name', 'Windows Security') resource_orphan = '/resources/' + resource_name + '/accounts/orphan' fetch_records(client, resource_orphan, 'Gra.Resource.Orphan.Accounts', 'id', params) elif demisto.command() == 'gra-user-activities': employee_id = arguments.get('employee_id') user_activities_url = '/user/' + employee_id + '/activity' fetch_records(client, user_activities_url, 'Gra.User.Activity', 'employee_id', params) elif demisto.command() == 'gra-fetch-users-details': employee_id = arguments.get('employee_id') fetch_records(client, '/users/' + employee_id, 'Gra.User', 'employeeId', params) elif demisto.command() == 'gra-highRisk-users': fetch_records(client, '/users/highrisk', 'Gra.Highrisk.Users', 'employeeId', params) elif demisto.command() == 'gra-cases': status = arguments.get('status') cases_url = '/cases/' + status fetch_records(client, cases_url, 'Gra.Cases', 'caseId', params) elif demisto.command() == 'gra-user-anomalies': employee_id = arguments.get('employee_id') anomaly_url = '/users/' + employee_id + '/anomalies/' fetch_records(client, anomaly_url, 'Gra.User.Anomalies', 'anomaly_name', params) elif demisto.command() == 'gra-case-action': action = arguments.get('action') caseId = arguments.get('caseId') subOption = arguments.get('subOption') caseComment = arguments.get('caseComment') riskAcceptDate = arguments.get('riskAcceptDate') cases_url = '/cases/' + action post_url = { "caseId": caseId, "subOption": subOption, "caseComment": caseComment, } if action == 'riskManageCase': post_url["riskAcceptDate"] = riskAcceptDate fetch_post_records(client, cases_url, 'Gra.Case.Action', 'caseId', params, json.dumps(post_url)) elif demisto.command() == 'gra-case-action-anomaly': action = arguments.get('action') caseId = arguments.get('caseId') anomalyNames = arguments.get('anomalyNames') subOption = arguments.get('subOption') caseComment = arguments.get('caseComment') riskAcceptDate = arguments.get('riskAcceptDate') cases_url = '/cases/' + action if action == 'riskAcceptCaseAnomaly': post_url = '{"caseId":' + caseId + ',"anomalyNames":' + anomalyNames + ',"subOption":"' + subOption +\ '","caseComment":"' + caseComment + '","riskAcceptDate":"' + riskAcceptDate + '"}' else: post_url = '{"caseId":' + caseId + ',"anomalyNames":"' + anomalyNames + '","subOption":"' + \ subOption + '","caseComment":"' + caseComment + '"}' fetch_post_records(client, cases_url, 'Gra.Cases.Action.Anomaly', 'caseId', params, post_url) elif demisto.command() == 'gra-investigate-anomaly-summary': fromDate = arguments.get('fromDate') toDate = arguments.get('toDate') modelName = arguments.get('modelName') if fromDate is not None and toDate is not None: investigateAnomaly_url = '/investigateAnomaly/anomalySummary/' + modelName + '?fromDate=' + fromDate \ + ' 00:00:00&toDate=' + toDate + ' 23:59:59' else: investigateAnomaly_url = '/investigateAnomaly/anomalySummary/' + modelName fetch_records(client, investigateAnomaly_url, 'Gra.Investigate.Anomaly.Summary', 'modelId', params) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
57,765
def test_module(client: Client) -> str: """Tests API connectivity and authentication' Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Raises exceptions if something goes wrong. :type client: ``Client`` :param Client: GreatHorn client to use :return: 'ok' if test passed, anything else will fail the test. :rtype: ``str`` """ try: client.get_policy() except DemistoException as e: if 'Forbidden' in str(e): return 'Authorization Error: make sure API Key is correctly set' else: raise e return 'ok'
def test_module(client: Client) -> str: """Tests API connectivity and authentication' Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Raises exceptions if something goes wrong. :type client: ``Client`` :param Client: GreatHorn client to use :return: 'ok' if test passed, anything else will fail the test. :rtype: ``str`` """ try: client.get_policy() except DemistoException as e: if 'Forbidden' in str(e): raise 'Authorization Error: make sure API Key is correctly set' else: raise e return 'ok'
31,228
def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]: """Get Connector Runs command. Args: client: Client which connects to api Returns: Human Readable Entry Context Raw Data """ connector_id = demisto.getArg("connector_id") url_suffix = '/connectors/%s/connector_runs' % connector_id human_readable = [] context: Dict[str, Any] = {} connectors: List[Dict[str, Any]] = client.http_request(message='GET', suffix=url_suffix) if connectors: keys = [ "id", "start_time", "end_time", "success", "total_payload_count", "processed_palyoad_count", "failed_payload_count", "processed_assets_count", "assets_with_tags_reset_count", "processed_scanner_vuln_count", "created_scanner_vuln_count", "closed_scanner_vuln_count", "autoclosed_scanner_vuln_count", "reopened_scanner_vuln_count", "closed_vuln_count", "autoclosed_vuln_count", "reopened_vuln_count" ] context_list = parse_response(connectors, keys, keys) for connector in connectors: curr_dict = { "id": connector.get("id"), "start_time": connector.get("start_time"), "end_time": connector.get("end_time"), "success": connector.get("success"), "total_payload_count": connector.get("total_payload_count"), "processed_payload_count": connector.get("total_payload_count"), "failed_payload_count": connector.get("failed_payload_count"), "processed_assets_count": connector.get("processed_assets_count"), "assets_with_tags_reset_count": connector.get("assets_with_tags_reset_count"), "processed_scanner_vuln_count": connector.get("processed_scanner_vuln_count"), "updated_scanner_vuln_count": connector.get("updated_scanner_vuln_count"), "created_scanner_vuln_count": connector.get("created_scanner_vuln_count"), "closed_scanner_vuln_count": connector.get("closed_scanner_vuln_count"), "autoclosed_scanner_vuln_count": connector.get("autoclosed_scanner_vuln_count"), "reopened_scanner_vuln_count": connector.get("reopened_scanner_vuln_count"), "closed_vuln_count": connector.get("closed_vuln_count"), "autoclosed_vuln_count": connector.get("closed_vuln_count"), "reopened_vuln_count": connector.get("reopened_vuln_count") } human_readable.append(curr_dict) context = { 'Kenna.ConnectorRunsList(val.ID === obj.ID)': context_list } human_readable_markdown = tableToMarkdown('Kenna Connector Runs', human_readable, removeNull=True) else: human_readable_markdown = "no connectors in get response." return human_readable_markdown, context, connectors
def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]: """Get Connector Runs command. Args: client: Client which connects to api Returns: Human Readable Entry Context Raw Data """ connector_id = str(args.get("connector_id")) url_suffix = '/connectors/%s/connector_runs' % connector_id human_readable = [] context: Dict[str, Any] = {} connectors: List[Dict[str, Any]] = client.http_request(message='GET', suffix=url_suffix) if connectors: keys = [ "id", "start_time", "end_time", "success", "total_payload_count", "processed_palyoad_count", "failed_payload_count", "processed_assets_count", "assets_with_tags_reset_count", "processed_scanner_vuln_count", "created_scanner_vuln_count", "closed_scanner_vuln_count", "autoclosed_scanner_vuln_count", "reopened_scanner_vuln_count", "closed_vuln_count", "autoclosed_vuln_count", "reopened_vuln_count" ] context_list = parse_response(connectors, keys, keys) for connector in connectors: curr_dict = { "id": connector.get("id"), "start_time": connector.get("start_time"), "end_time": connector.get("end_time"), "success": connector.get("success"), "total_payload_count": connector.get("total_payload_count"), "processed_payload_count": connector.get("total_payload_count"), "failed_payload_count": connector.get("failed_payload_count"), "processed_assets_count": connector.get("processed_assets_count"), "assets_with_tags_reset_count": connector.get("assets_with_tags_reset_count"), "processed_scanner_vuln_count": connector.get("processed_scanner_vuln_count"), "updated_scanner_vuln_count": connector.get("updated_scanner_vuln_count"), "created_scanner_vuln_count": connector.get("created_scanner_vuln_count"), "closed_scanner_vuln_count": connector.get("closed_scanner_vuln_count"), "autoclosed_scanner_vuln_count": connector.get("autoclosed_scanner_vuln_count"), "reopened_scanner_vuln_count": connector.get("reopened_scanner_vuln_count"), "closed_vuln_count": connector.get("closed_vuln_count"), "autoclosed_vuln_count": connector.get("closed_vuln_count"), "reopened_vuln_count": connector.get("reopened_vuln_count") } human_readable.append(curr_dict) context = { 'Kenna.ConnectorRunsList(val.ID === obj.ID)': context_list } human_readable_markdown = tableToMarkdown('Kenna Connector Runs', human_readable, removeNull=True) else: human_readable_markdown = "no connectors in get response." return human_readable_markdown, context, connectors
31,366
def is_there_private_packs_to_upload(public_index_json, private_index_path): """ Checks if there are private packs that should be uploaded. The check compares the private index with the public one to verify if Content commit hash of each private pack in those files (private and public index files) are equal. If there is one private pack that has a different content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT be skipped. Args: public_index_json (dict) : The public index file. private_index_path : Path to where the private index is located. Returns: (bool) True is there is at least one private pack that should be upload. False otherwise (i.e there are no private packs that should upload) """ logging.debug("Checking if there are private packs to upload") with open(os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")) as private_index_file: private_index_json = json.load(private_index_file) if was_private_pack_updated(private_index_json, public_index_json): logging.debug(f"There is at least one private pack that was updated, upload should not be skipped") return True return False
def is_there_private_packs_to_upload(public_index_json, private_index_path): """ Checks if there are private packs that should be uploaded. The check compares the private index with the public one to verify if Content commit hash of each private pack in those files (private and public index files) are equal. If there is one private pack that has a different content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT be skipped. Args: public_index_json (dict) : The public index file. private_index_path : Path to where the private index.zip is located. Returns: (bool) True is there is at least one private pack that should be upload. False otherwise (i.e there are no private packs that should upload) """ logging.debug("Checking if there are private packs to upload") with open(os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")) as private_index_file: private_index_json = json.load(private_index_file) if was_private_pack_updated(private_index_json, public_index_json): logging.debug(f"There is at least one private pack that was updated, upload should not be skipped") return True return False
5,862
def _dirstats_preprocessing(samples, normalize, axis): """ Preprocessing of input for directional stats functions. Performs input validation and if necesssary normalization. Used by directionalvar and directionalmean. Parameters ---------- samples : array Input array. Must be at least two-dimensional, and the last axis of the input must correspond with the dimensionality of the vector space. axis : int Axis along which the directional mean is computed. normalize: boolean If True, normalize the input to ensure that each observation is a unit vector. It the observations are already unit vectors, consider setting this to False to avoid unnecessary computation. """ samples = np.asarray(samples) if samples.ndim < 2: raise ValueError("samples must at least be two-dimensional. " f"Instead samples has shape: {samples.shape!r}") samples = np.moveaxis(samples, axis, 0) if normalize: samples = samples/np.linalg.norm(samples, axis=-1, keepdims=True) return samples
def _dirstats_preprocessing(samples, normalize, axis): """ Preprocessing of input for directional stats functions. Performs input validation and if necesssary normalization. Used by directionalvar and directionalmean. Parameters ---------- samples : array Input array. Must be at least two-dimensional, and the last axis of the input must correspond with the dimensionality of the vector space. axis : int Axis along which the directional statistic is computed. normalize: boolean If True, normalize the input to ensure that each observation is a unit vector. It the observations are already unit vectors, consider setting this to False to avoid unnecessary computation. """ samples = np.asarray(samples) if samples.ndim < 2: raise ValueError("samples must at least be two-dimensional. " f"Instead samples has shape: {samples.shape!r}") samples = np.moveaxis(samples, axis, 0) if normalize: samples = samples/np.linalg.norm(samples, axis=-1, keepdims=True) return samples
42,827
def backup_packages(backup_path, dry_run: bool = False, skip=False): """ Creates `packages` directory and places install list text files there. """ def run_cmd_if_no_dry_run(command, dest, dry_run) -> int: if dry_run: print_dry_run_copy_info(f"$ {command}", dest) # Return -1 for any processes depending on chained successful commands (npm) return -1 else: return run_cmd_write_stdout(command, dest) print_section_header("PACKAGES", Fore.BLUE) if not dry_run: overwrite_dir_prompt_if_needed(backup_path, skip) for mgr in ["gem"]: # deal with package managers that have spaces in them. print_pkg_mgr_backup(mgr) command = f"{mgr} list" dest = f"{backup_path}/{mgr.replace(' ', '-')}_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # brew print_pkg_mgr_backup("brew") command = f"brew bundle dump --file {backup_path}/brew_list.txt" dest = f"{backup_path}/brew_list.txt" if not dry_run: ret = run_cmd(command) if not ret: print_yellow("Package manager not present.") # cargo print_pkg_mgr_backup("cargo") command = "ls {}".format(home_prefix(".cargo/bin/")) dest = f"{backup_path}/cargo_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # pip print_pkg_mgr_backup("pip") command = "pip list --format=freeze" dest = f"{backup_path}/pip_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # pip3 print_pkg_mgr_backup("pip3") command = "pip3 list --format=freeze" dest = f"{backup_path}/pip3_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # npm print_pkg_mgr_backup("npm") command = "npm ls --global --parseable=true --depth=0" temp_file_path = f"{backup_path}/npm_temp_list.txt" # If command is successful, go to the next parsing step. npm_backup_cmd_success = run_cmd_if_no_dry_run(command, temp_file_path, dry_run) == 0 if npm_backup_cmd_success: npm_dest_file = f"{backup_path}/npm_list.txt" # Parse npm output with open(temp_file_path, mode="r+") as temp_file: # Skip first line of file temp_file.seek(1) with open(npm_dest_file, mode="w+") as dest: for line in temp_file: dest.write(line.split("/")[-1]) os.remove(temp_file_path) # atom package manager print_pkg_mgr_backup("Atom") command = "apm list --installed --bare" dest = f"{backup_path}/apm_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # vscode extensions print_pkg_mgr_backup("VSCode") command = "code --list-extensions --show-versions" dest = f"{backup_path}/vscode_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # macports print_pkg_mgr_backup("macports") command = "port installed requested" dest = f"{backup_path}/macports_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # system installs print_pkg_mgr_backup("System Applications") applications_path = get_applications_dir() command = "ls {}".format(applications_path) dest = f"{backup_path}/system_apps_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run)
def backup_packages(backup_path, dry_run: bool = False, skip=False): """ Creates `packages` directory and places install list text files there. """ def run_cmd_if_no_dry_run(command, dest, dry_run) -> int: if dry_run: print_dry_run_copy_info(f"$ {command}", dest) # Return -1 for any processes depending on chained successful commands (npm) return -1 else: return run_cmd_write_stdout(command, dest) print_section_header("PACKAGES", Fore.BLUE) if not dry_run: overwrite_dir_prompt_if_needed(backup_path, skip) for mgr in ["gem"]: # deal with package managers that have spaces in them. print_pkg_mgr_backup(mgr) command = f"{mgr} list" dest = f"{backup_path}/{mgr.replace(' ', '-')}_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # brew print_pkg_mgr_backup("brew") command = f"brew bundle dump --file {backup_path}/brew_list.txt" dest = f"{backup_path}/brew_list.txt" if not dry_run: ret = run_cmd(command) if not ret: print_yellow("brew package manager not found.") # cargo print_pkg_mgr_backup("cargo") command = "ls {}".format(home_prefix(".cargo/bin/")) dest = f"{backup_path}/cargo_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # pip print_pkg_mgr_backup("pip") command = "pip list --format=freeze" dest = f"{backup_path}/pip_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # pip3 print_pkg_mgr_backup("pip3") command = "pip3 list --format=freeze" dest = f"{backup_path}/pip3_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # npm print_pkg_mgr_backup("npm") command = "npm ls --global --parseable=true --depth=0" temp_file_path = f"{backup_path}/npm_temp_list.txt" # If command is successful, go to the next parsing step. npm_backup_cmd_success = run_cmd_if_no_dry_run(command, temp_file_path, dry_run) == 0 if npm_backup_cmd_success: npm_dest_file = f"{backup_path}/npm_list.txt" # Parse npm output with open(temp_file_path, mode="r+") as temp_file: # Skip first line of file temp_file.seek(1) with open(npm_dest_file, mode="w+") as dest: for line in temp_file: dest.write(line.split("/")[-1]) os.remove(temp_file_path) # atom package manager print_pkg_mgr_backup("Atom") command = "apm list --installed --bare" dest = f"{backup_path}/apm_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # vscode extensions print_pkg_mgr_backup("VSCode") command = "code --list-extensions --show-versions" dest = f"{backup_path}/vscode_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # macports print_pkg_mgr_backup("macports") command = "port installed requested" dest = f"{backup_path}/macports_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run) # system installs print_pkg_mgr_backup("System Applications") applications_path = get_applications_dir() command = "ls {}".format(applications_path) dest = f"{backup_path}/system_apps_list.txt" run_cmd_if_no_dry_run(command, dest, dry_run)
38,427
def register_keys(web3: Web3, keys: Optional[list]): def not_none(x): return x if x is not None else [] for key in not_none(keys): register_key(web3, key)
def register_keys(web3: Web3, keys: Optional[list]): def not_none(x): return x if x is not None else [] for key in keys or []: register_key(web3, key)
39,301
def vtk_points(points, deep=True): """Convert numpy or list of points to a vtkPoints object.""" if not isinstance(points, np.ndarray): points = np.array(points) # verify is numeric if not np.issubdtype(points.dtype, np.number): raise TypeError('Points must be a numeric type') # check dimensionality if points.ndim == 1: points = points.reshape((-1, 3)) elif points.ndim > 2: raise ValueError('Dimension of ``points`` should be 1 or 2, not ' f'{points.ndim}') # verify shape if points.shape[1] != 3: raise ValueError('Points array must contain three values per point. \n' f'Shape is {points.shape} and should be (X, 3)') # points must be contiguous if not points.flags['C_CONTIGUOUS']: points = np.ascontiguousarray(points) vtkpts = _vtk.vtkPoints() vtkpts.SetData(_vtk.numpy_to_vtk(points, deep=deep)) return vtkpts
def vtk_points(points, deep=True): """Convert numpy array or array-like to a vtkPoints object.""" if not isinstance(points, np.ndarray): points = np.array(points) # verify is numeric if not np.issubdtype(points.dtype, np.number): raise TypeError('Points must be a numeric type') # check dimensionality if points.ndim == 1: points = points.reshape((-1, 3)) elif points.ndim > 2: raise ValueError('Dimension of ``points`` should be 1 or 2, not ' f'{points.ndim}') # verify shape if points.shape[1] != 3: raise ValueError('Points array must contain three values per point. \n' f'Shape is {points.shape} and should be (X, 3)') # points must be contiguous if not points.flags['C_CONTIGUOUS']: points = np.ascontiguousarray(points) vtkpts = _vtk.vtkPoints() vtkpts.SetData(_vtk.numpy_to_vtk(points, deep=deep)) return vtkpts
End of preview. Expand in Data Studio

Splits: 80% train, 10% validation, 10% test.


📦 Method-Level Change / Code Review Suggestion Dataset 📝 Overview This dataset is designed for training or fine-tuning large language models (LLMs) on the task of automated code suggestion generation at the method level. Each entry in the dataset contains: An original Python method extracted from a GitHub pull request A revised version of the same method, incorporating code review suggestions

🎯 Purpose To enable models to learn fine-grained, real-world code changes suggested during pull request reviews. Ideal for:

  • Method-level code generation
  • Code completion
  • Refactoring suggestions
  • Review automation

🔍 Source Mined from public GitHub repositories using GraphQL and REST APIs. Pull request review suggestions were extracted and aligned with method-level changes. For more on how suggestions work in GitHub PRs, see: Incorporating Feedback in Your Pull Request https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/incorporating-feedback-in-your-pull-request

Downloads last month
25