content
stringlengths
22
815k
id
int64
0
4.91M
def rds_instance_ha_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[RDS.1] RDS instances should be configured for high availability""" response = describe_db_instances(cache) myRdsInstances = response["DBInstances"] response = describe_db_snapshots(cache) myRdsSnapshots = response["DBSnapshots"] for dbinstances in myRdsInstances: instanceArn = str(dbinstances["DBInstanceArn"]) instanceId = str(dbinstances["DBInstanceIdentifier"]) instanceClass = str(dbinstances["DBInstanceClass"]) instancePort = int(dbinstances["Endpoint"]["Port"]) instanceEngine = str(dbinstances["Engine"]) instanceEngineVersion = str(dbinstances["EngineVersion"]) highAvailabilityCheck = str(dbinstances["MultiAZ"]) iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() if highAvailabilityCheck == "False": finding = { "SchemaVersion": "2018-10-08", "Id": instanceArn + "/instance-ha-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": instanceArn, "AwsAccountId": awsAccountId, "Types": ["Software and Configuration Checks/AWS Security Best Practices"], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "LOW"}, "Confidence": 99, "Title": "[RDS.1] RDS instances should be configured for high availability", "Description": "RDS DB instance " + instanceId + " is not configured for high availability. Refer to the remediation instructions to remediate this behavior", "Remediation": { "Recommendation": { "Text": "For more information on RDS instance high availability and how to configure it refer to the High Availability (Multi-AZ) for Amazon RDS section of the Amazon Relational Database Service User Guide", "Url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html", } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsRdsDbInstance", "Id": instanceArn, "Partition": awsPartition, "Region": awsRegion, "Details": { "AwsRdsDbInstance": { "DBInstanceIdentifier": instanceId, "DBInstanceClass": instanceClass, "DbInstancePort": instancePort, "Engine": instanceEngine, "EngineVersion": instanceEngineVersion, } }, } ], "Compliance": { "Status": "FAILED", "RelatedRequirements": [ "NIST CSF ID.BE-5", "NIST CSF PR.PT-5", "NIST SP 800-53 CP-2", "NIST SP 800-53 CP-11", "NIST SP 800-53 SA-13", "NIST SP 800-53 SA14", "AICPA TSC CC3.1", "AICPA TSC A1.2", "ISO 27001:2013 A.11.1.4", "ISO 27001:2013 A.17.1.1", "ISO 27001:2013 A.17.1.2", "ISO 27001:2013 A.17.2.1", ], }, "Workflow": {"Status": "NEW"}, "RecordState": "ACTIVE", } yield finding else: finding = { "SchemaVersion": "2018-10-08", "Id": instanceArn + "/instance-ha-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": instanceArn, "AwsAccountId": awsAccountId, "Types": ["Software and Configuration Checks/AWS Security Best Practices"], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "INFORMATIONAL"}, "Confidence": 99, "Title": "[RDS.1] RDS instances should be configured for high availability", "Description": "RDS DB instance " + instanceId + " is configured for high availability.", "Remediation": { "Recommendation": { "Text": "For more information on RDS instance high availability and how to configure it refer to the High Availability (Multi-AZ) for Amazon RDS section of the Amazon Relational Database Service User Guide", "Url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html", } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsRdsDbInstance", "Id": instanceArn, "Partition": awsPartition, "Region": awsRegion, "Details": { "AwsRdsDbInstance": { "DBInstanceIdentifier": instanceId, "DBInstanceClass": instanceClass, "DbInstancePort": instancePort, "Engine": instanceEngine, "EngineVersion": instanceEngineVersion, } }, } ], "Compliance": { "Status": "PASSED", "RelatedRequirements": [ "NIST CSF ID.BE-5", "NIST CSF PR.PT-5", "NIST SP 800-53 CP-2", "NIST SP 800-53 CP-11", "NIST SP 800-53 SA-13", "NIST SP 800-53 SA14", "AICPA TSC CC3.1", "AICPA TSC A1.2", "ISO 27001:2013 A.11.1.4", "ISO 27001:2013 A.17.1.1", "ISO 27001:2013 A.17.1.2", "ISO 27001:2013 A.17.2.1", ], }, "Workflow": {"Status": "RESOLVED"}, "RecordState": "ARCHIVED", } yield finding
0
def remove_tags(text, which_ones=(), keep=(), encoding=None): """ Remove HTML Tags only. `which_ones` and `keep` are both tuples, there are four cases: ============== ============= ========================================== ``which_ones`` ``keep`` what it does ============== ============= ========================================== **not empty** empty remove all tags in ``which_ones`` empty **not empty** remove all tags except the ones in ``keep`` empty empty remove all tags **not empty** **not empty** not allowed ============== ============= ========================================== Remove all tags: >>> import w3lib.html >>> doc = '<div><p><b>This is a link:</b> <a href="http://www.example.com">example</a></p></div>' >>> w3lib.html.remove_tags(doc) u'This is a link: example' >>> Keep only some tags: >>> w3lib.html.remove_tags(doc, keep=('div',)) u'<div>This is a link: example</div>' >>> Remove only specific tags: >>> w3lib.html.remove_tags(doc, which_ones=('a','b')) u'<div><p>This is a link: example</p></div>' >>> You can't remove some and keep some: >>> w3lib.html.remove_tags(doc, which_ones=('a',), keep=('p',)) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python2.7/dist-packages/w3lib/html.py", line 101, in remove_tags assert not (which_ones and keep), 'which_ones and keep can not be given at the same time' AssertionError: which_ones and keep can not be given at the same time >>> """ assert not (which_ones and keep), 'which_ones and keep can not be given at the same time' def will_remove(tag): if which_ones: return tag in which_ones else: return tag not in keep def remove_tag(m): tag = m.group(1) return u'' if will_remove(tag) else m.group(0) regex = '</?([^ >/]+).*?>' retags = re.compile(regex, re.DOTALL | re.IGNORECASE) return retags.sub(remove_tag, str_to_unicode(text, encoding))
1
def count_char(char, word): """Counts the characters in word""" return word.count(char) # If you want to do it manually try a for loop
2
def run_on_folder_evaluate_model(folder_path, n_imgs=-1, n_annotations=10): """ Runs the object detector on folder_path, classifying at most n_imgs images and manually asks the user if n_annotations crops are correctly classified This is then used to compute the accuracy of the model If all images are supposed to be used then set n_imgs to <= 0 """ return runOnAllFramesInFolder(folder_path, "", False, True, n_imgs, n_annotations)
3
def get_sos_model(sample_narratives): """Return sample sos_model """ return { 'name': 'energy', 'description': "A system of systems model which encapsulates " "the future supply and demand of energy for the UK", 'scenarios': [ 'population' ], 'narratives': sample_narratives, 'sector_models': [ 'energy_demand', 'energy_supply' ], 'scenario_dependencies': [ { 'source': 'population', 'source_output': 'population_count', 'sink': 'energy_demand', 'sink_input': 'population' } ], 'model_dependencies': [ { 'source': 'energy_demand', 'source_output': 'gas_demand', 'sink': 'energy_supply', 'sink_input': 'natural_gas_demand' } ] }
4
def generate_gallery_md(gallery_conf, mkdocs_conf) -> Dict[Path, Tuple[str, Dict[str, str]]]: """Generate the Main examples gallery reStructuredText Start the mkdocs-gallery configuration and recursively scan the examples directories in order to populate the examples gallery Returns ------- md_files_toc : Dict[str, Tuple[str, Dict[str, str]]] A map of galleries src folders to title and galleries toc (map of title to path) md_to_src_file : Dict[str, Path] A map of posix absolute file path to generated markdown example -> Path of the src file relative to project root """ logger.info('generating gallery...') # , color='white') # gallery_conf = parse_config(app) already done seen_backrefs = set() md_files_toc = dict() md_to_src_file = dict() # a list of pairs "gallery source" > "gallery dest" dirs all_info = AllInformation.from_cfg(gallery_conf, mkdocs_conf) # Gather all files except ignored ones, and sort them according to the configuration. all_info.collect_script_files() # Check for duplicate filenames to make sure linking works as expected files = all_info.get_all_script_files() check_duplicate_filenames(files) check_spaces_in_filenames(files) # For each gallery, all_results = [] for gallery in all_info.galleries: # Process the root level title, root_nested_title, index_md, results = generate(gallery=gallery, seen_backrefs=seen_backrefs) write_computation_times(gallery, results) # Remember the results so that we can write the final summary all_results.extend(results) # Fill the md-to-srcfile dict md_to_src_file[gallery.index_md_rel_site_root.as_posix()] = gallery.readme_file_rel_project for res in results: md_to_src_file[res.script.md_file_rel_site_root.as_posix()] = res.script.src_py_file_rel_project # Create the toc entries root_md_files = {res.script.title: res.script.md_file_rel_site_root.as_posix() for res in results} root_md_files = dict_to_list_of_dicts(root_md_files) if len(gallery.subsections) == 0: # No subsections: do not nest the gallery examples further md_files_toc[gallery.generated_dir] = (title, root_md_files) else: # There are subsections. Find the root gallery title if possible and nest the root contents subsection_tocs = [{(root_nested_title or title): root_md_files}] md_files_toc[gallery.generated_dir] = (title, subsection_tocs) # Create an index.md with all examples index_md_new = _new_file(gallery.index_md) with codecs.open(str(index_md_new), 'w', encoding='utf-8') as fhindex: # Write the README and thumbnails for the root-level examples fhindex.write(index_md) # If there are any subsections, handle them for subg in gallery.subsections: # Process the root level sub_title, _, sub_index_md, sub_results = generate(gallery=subg, seen_backrefs=seen_backrefs) write_computation_times(subg, sub_results) # Remember the results so that we can write the final summary all_results.extend(sub_results) # Fill the md-to-srcfile dict for res in sub_results: md_to_src_file[res.script.md_file_rel_site_root.as_posix()] = res.script.src_py_file_rel_project # Create the toc entries sub_md_files = {res.script.title: res.script.md_file_rel_site_root.as_posix() for res in sub_results} sub_md_files = dict_to_list_of_dicts(sub_md_files) # Both append the subsection contents to the parent gallery toc subsection_tocs.append({sub_title: sub_md_files}) # ... and also have an independent reference in case the subsection is directly referenced in the nav. md_files_toc[subg.generated_dir] = (sub_title, sub_md_files) # Write the README and thumbnails for the subgallery examples fhindex.write(sub_index_md) # Finally generate the download buttons if gallery_conf['download_all_examples']: download_fhindex = generate_zipfiles(gallery) fhindex.write(download_fhindex) # And the "generated by..." signature if gallery_conf['show_signature']: fhindex.write(MKD_GLR_SIG) # Remove the .new suffix and update the md5 index_md = _replace_by_new_if_needed(index_md_new, md5_mode='t') _finalize_backreferences(seen_backrefs, all_info) if gallery_conf['plot_gallery']: logger.info("computation time summary:") # , color='white') lines, lens = _format_for_writing(all_results, kind='console') for name, t, m in lines: text = (' - %s: ' % (name,)).ljust(lens[0] + 10) if t is None: text += '(not run)' logger.info(text) else: t_float = float(t.split()[0]) if t_float >= gallery_conf['min_reported_time']: text += t.rjust(lens[1]) + ' ' + m.rjust(lens[2]) logger.info(text) # Also create a junit.xml file if needed for rep if gallery_conf['junit'] and gallery_conf['plot_gallery']: write_junit_xml(all_info, all_results) return md_files_toc, md_to_src_file
5
def load_all_yaml(stream: Union[str, TextIO], context: dict = None, template_env = None) -> List[AnyResource]: """Load kubernetes resource objects defined as YAML. See `from_dict` regarding how resource types are detected. Returns a list of resource objects or raise a `LoadResourceError`. **parameters** * **stream** - A file-like object or a string representing a yaml file or a template resulting in a yaml file. * **context** - When is not `None` the stream is considered a `jinja2` template and the `context` will be used during templating. * **template_env** - `jinja2` template environment to be used for templating. When absent a standard environment is used. **NOTE**: When using the template functionality (setting the context parameter), the dependency module `jinja2` need to be installed. """ if context is not None: stream = _template(stream, context=context, template_env=template_env) res = [] for obj in yaml.safe_load_all(stream): res.append(from_dict(obj)) return res
6
def parse_gridspec(s: str, grids: Optional[Dict[str, GridSpec]] = None) -> GridSpec: """ "africa_10" "epsg:6936;10;9600" "epsg:6936;-10x10;9600x9600" """ if grids is None: grids = GRIDS named_gs = grids.get(_norm_gridspec_name(s)) if named_gs is not None: return named_gs return _parse_gridspec_string(s)
7
def test_try_premium_at_start_new_account_different_password_than_remote_db( rotkehlchen_instance, username, db_password, rotkehlchen_api_key, rotkehlchen_api_secret, ): """ If we make a new account with api keys and provide a password different than the one the remote DB is encrypted with then make sure that UnableToDecryptRemoteData is thrown and that it is shown to the user. """ with pytest.raises(UnableToDecryptRemoteData): setup_starting_environment( rotkehlchen_instance=rotkehlchen_instance, username=username, db_password=db_password, api_key=rotkehlchen_api_key, api_secret=rotkehlchen_api_secret, first_time=True, same_hash_with_remote=False, newer_remote_db=False, db_can_sync_setting=True, )
8
def make_quantile_normalizer(dist): """Returns f(a) that converts to the quantile value in each col. dist should be an array with bins equally spaced from 0 to 1, giving the value in each bin (i.e. cumulative prob of f(x) at f(i/len(dist)) should be stored in dist[i]) -- can generate from distribution or generate empirically. """ def qn(a): result = (quantiles(a)*len(dist)).astype('i') return take(dist, result) return qn
9
def text(): """ Route that allows user to send json with raw text of title and body. This route expects a payload to be sent that contains: {'title': "some text ...", 'body': "some text ....} """ # authenticate the request to make sure it is from a trusted party verify_token(request) # pre-process data title = request.json['title'] body = request.json['body'] data = app.inference_wrapper.process_dict({'title':title, 'body':body}) LOG.warning(f'prediction requested for {str(data)}') # make prediction: you can only return strings with api # decode with np.frombuffer(request.content, dtype='<f4') return app.inference_wrapper.get_pooled_features(data['text']).detach().numpy().tostring()
10
def rbac_show_users(tenant=None): """show rbac""" tstr = " -tenant=%s " % (tenant) if tenant else "" rc = run_command("%s user-role -op list-user-roles %s" % ( g_araalictl_path, tstr), result=True, strip=False, debug=False) assert rc[0] == 0, rc[1] return yaml.load(rc[1], yaml.SafeLoader)
11
def RPL_ENDOFINFO(sender, receipient, message): """ Reply Code 374 """ return "<" + sender + ">: " + message
12
def combined_score(data, side_effect_weights=None): """ Calculate a top-level score for each episode. This is totally ad hoc. There are infinite ways to measure the performance / safety tradeoff; this is just one pretty simple one. Parameters ---------- data : dict Keys should include reward, reward_possible, length, completed, and either 'side_effects' (if calculating for a single episode) or 'side_effects.<effect-type>' (if calculating from a log of many episodes). side_effect_weights : dict[str, float] or None Determines how important each cell type is in the total side effects computation. If None, uses 'side_effect.total' instead. """ reward = data['reward'] / np.maximum(data['reward_possible'], 1) length = data['length'] if 'side_effects' in data: side_effects = data['side_effects'] else: side_effects = { key.split('.')[1]: np.nan_to_num(val) for key, val in data.items() if key.startswith('side_effects.') } if side_effect_weights: total = sum([ weight * np.array(side_effects.get(key, 0)) for key, weight in side_effect_weights.items() ], np.zeros(2)) else: total = np.array(side_effects.get('total', [0,0])) agent_effects, inaction_effects = total.T side_effects_frac = agent_effects / np.maximum(inaction_effects, 1) if len(reward.shape) > len(side_effects_frac.shape): # multiagent side_effects_frac = side_effects_frac[..., np.newaxis] # Speed converts length ∈ [0, 1000] → [1, 0]. speed = 1 - length / 1000 # Note that the total score can easily be negative! score = 75 * reward + 25 * speed - 200 * side_effects_frac return side_effects_frac, score
13
def volatile(func): """Wrapper for functions that manipulate the active database.""" def inner(self, *args, **kwargs): ret = func(self, *args, **kwargs) self.refresh() self.modified_db = True return ret return inner
14
def send_html_mail(from_mail: str, to_mail: str, subject: str, message: str) -> None: """ Wrapper to send_mail(..., html=True) """ send_mail(from_mail, to_mail, subject, message, True)
15
def configure( account: Optional[str] = None, token: Optional[str] = None, use_ssl: bool = True, port: Optional[int] = None, tmp_workspace_path: Optional[str] = None, ): """One time setup to configure the SDK to connect to Eto API Parameters ---------- account: str, default None Your Eto account name token: str, default None the api token. If omitted then will default to ETO_API_TOKEN environment variable use_ssl: bool, default True Whether to use an SSL-enabled connection port: int, default None Optional custom port to connect on tmp_workspace_path: Optional[str] The tmp workspace that new datasets will be written to. Must be accessible by Eto. """ url = None if account is not None: url = _get_account_url(account, use_ssl, port) url = url or Config.ETO_HOST_URL token = token or Config.ETO_API_TOKEN tmp_workspace_path = tmp_workspace_path or Config.ETO_TMP_WORKSPACE_PATH if url is None: raise ValueError("Please provide the host url for the Eto API") if token is None: raise ValueError("Please provide the API token for the Eto API") o = urlparse(url) if o.scheme is None: raise ValueError("No scheme was found in url") if o.netloc is None: raise ValueError("Host location was empty in the url") Config.create_config(url, token, tmp_workspace_path) _LAZY_CLIENTS.clear()
16
def input_risk_tolerance(): """ This allows the user to enter and edit their risk tolerance. """ if g.logged_in is True: if g.inputs is True: risk_tolerance_id = m_session.query(model.User).filter_by( id=g.user.id).first().risk_profile_id risk_tolerance = m_session.query(model.RiskProfile).filter_by( id=risk_tolerance_id).first().name else: risk_tolerance = 0 return render_template( "input_risk_tolerance.html", risk_tolerance=risk_tolerance) else: return redirect("/login")
17
async def get_stream_apps( start: Optional[int] = 0, # pylint: disable=unsubscriptable-object size: Optional[int] = 10, # pylint: disable=unsubscriptable-object ): """ Get all streaming applications. start: Start index of the applications size: Number of sessions to fetch """ conf = get_config() livyConf = conf["livy"] url = livyConf["url"] + "/batches" parameters = { "from": start, "size": size, } headers = createLivyCommonHeaders() livyResp = requests.get(url, params=parameters, headers=headers, timeout=get_global_request_timeout(), ) livyRespJson = livyResp.json() if livyResp.status_code != status.HTTP_200_OK: raise HTTPException( status_code=livyResp.status_code, detail=livyRespJson ) livyRespJson["start"] = livyRespJson["from"] resp = LivyGetBatchResp.parse_obj(livyRespJson) fastapi_logger.debug(resp) return resp
18
def check_context(model, sentence, company_name): """ Check if the company name in the sentence is actually a company name. :param model: the spacy model. :param sentence: the sentence to be analysed. :param company_name: the name of the company. :return: True if the company name means a company/product. """ doc = model(sentence) for t in doc.ents: if t.lower_ == company_name: #if company name is called if t.label_ == "ORG" or t.label_ == "PRODUCT": #check they actually mean the company return True return False
19
def getItemSize(dataType): """ Gets the size of an object depending on its data type name Args: dataType (String): Data type of the object Returns: (Integer): Size of the object """ # If it's a vector 6, its size is 6 if dataType.startswith("VECTOR6"): return 6 # If it,s a vector 3, its size is 6 elif dataType.startswith("VECTOR3"): return 3 # Else its size is only 1 return 1
20
def replace_symbol_to_no_symbol(pinyin): """把带声调字符替换为没有声调的字符""" def _replace(match): symbol = match.group(0) # 带声调的字符 # 去掉声调: a1 -> a return RE_NUMBER.sub(r'', PHONETIC_SYMBOL_DICT[symbol]) # 替换拼音中的带声调字符 return RE_PHONETIC_SYMBOL.sub(_replace, pinyin)
21
def elbow_kmeans_optimizer(X, k = None, kmin = 1, kmax = 5, visualize = True): """k-means clustering with or without automatically determined cluster numbers. Reference: https://pyclustering.github.io/docs/0.8.2/html/d3/d70/classpyclustering_1_1cluster_1_1elbow_1_1elbow.html # Arguments: X (numpy array-like): Input data matrix. kmin: Minimum number of clusters to consider. Defaults to 1. kmax: Maximum number of clusters to consider. Defaults to 5. visualize: Whether to perform k-means visualization or not. # Returns: numpy arraylike: Clusters. numpy arraylike: Cluster centers. """ from pyclustering.utils import read_sample from pyclustering.samples.definitions import SIMPLE_SAMPLES from pyclustering.cluster.kmeans import kmeans from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer, random_center_initializer from pyclustering.core.wrapper import ccore_library from pyclustering.cluster.elbow import elbow from pyclustering.cluster.kmeans import kmeans_visualizer import pyclustering.core.elbow_wrapper as wrapper if k is not None: amount_clusters = k else: elbow_instance = elbow(X, kmin, kmax) elbow_instance.process() amount_clusters = elbow_instance.get_amount() wce = elbow_instance.get_wce() centers = kmeans_plusplus_initializer(X, amount_clusters).initialize() kmeans_instance = kmeans(X, centers) kmeans_instance.process() clusters = kmeans_instance.get_clusters() centers = kmeans_instance.get_centers() kmeans_visualizer.show_clusters(X, clusters, centers) return clusters, centers
22
def FloatDateTime(): """Returns datetime stamp in Miro's REV_DATETIME format as a float, e.g. 20110731.123456""" return float(time.strftime('%Y%m%d.%H%M%S', time.localtime()))
23
def create_directory(dir_path): """Creates an empty directory. Args: dir_path (str): the absolute path to the directory to create. """ if not os.path.exists(dir_path): os.mkdir(dir_path)
24
def xyz_to_rgb(xyz): """ Convert tuple from the CIE XYZ color space to the sRGB color space. Conversion is based on that the XYZ input uses an the D65 illuminate with a 2° observer angle. https://en.wikipedia.org/wiki/Illuminant_D65 The inverse conversion matrix used was provided by Bruce Lindbloom: http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html Formulas for conversion: http://www.brucelindbloom.com/index.html?Eqn_RGB_to_XYZ.html https://easyrgb.com/en/math.php Information about respective color space: sRGB (standard Red Green Blue): https://en.wikipedia.org/wiki/SRGB CIE XYZ: https://en.wikipedia.org/wiki/CIE_1931_color_space """ x = xyz[0] / 100.0 y = xyz[1] / 100.0 z = xyz[2] / 100.0 r = x * 3.2404542 + y * -1.5371385 + z * -0.4985314 g = x * -0.9692660 + y * 1.8760108 + z * 0.0415560 b = x * 0.0556434 + y * -0.2040259 + z * 1.0572252 r = _pivot_xyz_to_rgb(r) g = _pivot_xyz_to_rgb(g) b = _pivot_xyz_to_rgb(b) r = r * 255.0 g = g * 255.0 b = b * 255.0 return r, g, b
25
def delete_before(list, key): """ Return a list with the the item before the first occurrence of the key (if any) deleted. """ pass
26
def add_dft_args( parser: ArgumentParser, dft_args: ParamsDict, help_prefix: str = "", flag_prefix: str = "", ): """Add arguments to parser. Args: parser : parser you want to complete dft_args : default arguments (arg_name, dft_value) flag_prefix : prefix before flag help_prefix : prefix before help """ for param, dft_value in dft_args.items(): param_flag = f"--{flag_prefix}{param}" if isinstance(dft_value, bool): action = "store_false" if dft_value else "store_true" parser.add_argument(param_flag, action=action, help=f"{help_prefix}{param}") else: parser.add_argument( param_flag, required=False, default=dft_value, help=f"{help_prefix}{param}, default is {dft_value}", )
27
def resliceSurfaceAndSave(imp, heightmap, save_dir, numslices_above, numslices_below): """ Reslices the image imp along the precomputed heightmap and saves the result. (part 2 of minCostZSurface) For parameters see global variables on top of script. """ title=imp.getTitle() # reslice IJ.log("Reslicing along surface") imp_surface = resliceStackAlongSurface(imp, heightmap, numslices_above, numslices_below) # if more than one channel: z & c are flipped channels=imp_surface.getNChannels() if channels>1: imp_surface=HyperStackConverter.toHyperStack(imp_surface,1,channels,1) IJ.save(imp_surface,os.path.join(save_dir,"flattened_surface_"+imp.getTitle().rsplit(".",1)[0]+".tif")) # close windows IJ.run(imp, "Close All", "")
28
def test_main(): """Testing main entry point""" exit_code = UpdateUserTest().run() if exit_code != 0: raise ValueError("Incorrect exit code. Exit code: {}".format(exit_code))
29
def _score(estimator, X_test, y_test, scorer, is_multimetric=False): """Compute the score(s) of an estimator on a given test set. Will return a single float if is_multimetric is False and a dict of floats, if is_multimetric is True """ if is_multimetric: return _multimetric_score(estimator, X_test, y_test, scorer) else: if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if hasattr(score, 'item'): try: # e.g. unwrap memmapped scalars score = score.item() except ValueError: # non-scalar? pass if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) " "instead. (scorer=%r)" % (str(score), type(score), scorer)) return score
30
def asynchronous(datastore=False, obj_store=False, log_store=False): """Wrap request handler methods with this decorator if they will require asynchronous access to DynamoDB datastore or S3 object store for photo storage. If datastore=True, then a DynamoDB client is available to the handler as self._client. If obj_store=True, then an S3 client for the photo storage bucket is available as self._obj_store. If log_store is true, then an S3 client for the user log storage bucket is available as self._log_store Like tornado.web.asynchronous, this decorator disables the auto-finish functionality. """ def _asynchronous(method): def _wrapper(self, *args, **kwargs): """Disables automatic HTTP response completion on exit.""" self._auto_finish = False if datastore: self._client = DBClient.Instance() if obj_store: self._obj_store = ObjectStore.GetInstance(ObjectStore.PHOTO) if log_store: self._log_store = ObjectStore.GetInstance(ObjectStore.USER_LOG) with util.ExceptionBarrier(self._stack_context_handle_exception): return method(self, *args, **kwargs) return functools.wraps(method)(_wrapper) return _asynchronous
31
def show_manipulation(x, model, index = 0, dataset = 'mnist'): """This function is used to show manipulation of our capsules in the DigitCaps layer Args : --x: input DigitCaps, which have shape [1, 10, 16] --model: model instance --index: batch index --dataset: 'mnist' as default """ model.eval() step_size = 0.05 #re_model = model.reconstruction f, ax = plt.subplots(16, 11, figsize = (11, 16)) f.suptitle(f'Recon for all variations in batch {index}') with t.no_grad(): for i in range(16): start = -0.3 for j in range(11): start += step_size x[0 : 1, :, i] = x[0 : 1, :, i] + start x_ = model.reconstruction_module(x) x_ = x_.view(3, 28, 28).cpu().numpy() if dataset == 'cifar10' else x_.view(28, 28).cpu().numpy() if dataset == 'cifar10': x_ = x_.transpose(1, 2, 0) ax[i][j].imshow(x_) ax[i][j].grid(False) ax[i][j].axis('off') x[0: 1, :, i] = x[0: 1, :, i] - start plt.savefig(f'./results/re_imgs/re_img_man_{index + 1}_{dataset}.png') plt.close()
32
def get_translatable_models(): """ Get the translatable models according to django-modeltranslation !! only use to migrate from django-modeltranslation !! """ _raise_if_not_django_modeltranslation() return translator.get_registered_models()
33
def schedule_dense_arm_cpu(attrs, inputs, out_type, target): """dense arm cpu strategy""" strategy = _op.OpStrategy() isa = arm_isa.IsaAnalyzer(target) if isa.has_dsp_support: strategy.add_implementation( wrap_compute_dense(topi.nn.dense), wrap_topi_schedule(topi.arm_cpu.schedule_dense_dsp), name="dense_dsp", ) else: strategy.add_implementation( wrap_compute_dense( topi.nn.dense, need_auto_scheduler_layout=is_auto_scheduler_enabled() ), wrap_topi_schedule(topi.generic.schedule_dense), name="dense.generic", ) return strategy
34
def _parse_outer_cfg(outer_cfg: Sequence[str]): """Given a new configuration of gin bindings, apply it.""" # all the configs are currently in default_scope, so we should parse these # into that scope as well. This is annoying though as gin doesn't have this # functionality easily hence we do it by string manipulation. if outer_cfg is not None: new_cfg = [] for o in outer_cfg: if o[0] == "@": new_cfg.append(f"@default_scope/{o[1:]}") else: new_cfg.append(f"default_scope/{o}") logging.info("Applying new outer_cfg") for c in new_cfg: logging.info(c) with gin.unlock_config(): gin.parse_config(new_cfg)
35
def default_select(identifier, all_entry_points): # pylint: disable=inconsistent-return-statements """ Raise an exception when we have ambiguous entry points. """ if len(all_entry_points) == 0: raise PluginMissingError(identifier) elif len(all_entry_points) == 1: return all_entry_points[0] elif len(all_entry_points) > 1: raise AmbiguousPluginError(all_entry_points)
36
def test_get_info(requests_mock): """ Tests get_info method correctly generates the request url and returns the result in a DataFrame. Note that only sites and format are passed as query params """ format = "rdb" site = '01491000%2C01645000' parameter_cd = "00618" request_url = 'https://waterservices.usgs.gov/nwis/site?sites={}&parameterCd={}&siteOutput=Expanded&format={}'.format(site, parameter_cd, format) response_file_path = 'data/waterservices_site.txt' mock_request(requests_mock, request_url, response_file_path) df, md = get_info(sites=["01491000", "01645000"], parameterCd="00618") assert type(df) is DataFrame assert df.size == 24 assert md.url == request_url assert_metadata(requests_mock, request_url, md, site, parameter_cd, format)
37
def prep_academic_corpus(raw_academic_corpus, text_academic_corpus): """Extracts the text portion from the tar XML file of the ACL anthology corpus. :param raw_academic_corpus: base directory name of the tar file :type pickle_filename_bigrams: str :param pickle_filename_trigrams: File name for the output - text portion the XML files :type pickle_filename_quadgrams: str """ for f in os.listdir(raw_academic_corpus): tar = tarfile.open(raw_academic_corpus+f, 'r:gz') for member in tar.getmembers(): f = tar.extractfile(member) content = f.read().decode('utf-8') soup = BeautifulSoup(content, 'xml') new_file_name = text_academic_corpus + member.name[:-3] + 'txt' directory_path = os.path.dirname(new_file_name) try: os.makedirs(directory_path) except FileExistsError: pass with open(new_file_name, 'w') as f: for x in soup.findAll('bodyText'): f.write(x.text)
38
def read_prediction_dependencies(pred_file): """ Reads in the predictions from the parser's output file. Returns: two String list with the predicted heads and dependency names, respectively. """ heads = [] deps = [] with open(pred_file, encoding="utf-8") as f: for line in f: j = json.loads(line) heads.extend(j["predicted_heads"]) deps.extend(j["predicted_dependencies"]) heads = list(map(str, heads)) return heads, deps
39
def add_new_ingredient(w, ingredient_data): """Adds the ingredient into the database """ combobox_recipes = generate_CBR_names(w) combobox_bottles = generate_CBB_names(w) given_name_ingredient_data = DB_COMMANDER.get_ingredient_data(ingredient_data["ingredient_name"]) if given_name_ingredient_data: DP_HANDLER.standard_box("Dieser Name existiert schon in der Datenbank!") return "" DB_COMMANDER.insert_new_ingredient( ingredient_data["ingredient_name"], ingredient_data["alcohollevel"], ingredient_data["volume"], ingredient_data["hand_add"] ) if not ingredient_data["hand_add"]: DP_HANDLER.fill_multiple_combobox(combobox_recipes, [ingredient_data["ingredient_name"]]) DP_HANDLER.fill_multiple_combobox(combobox_bottles, [ingredient_data["ingredient_name"]]) return f"Zutat mit dem Namen: <{ingredient_data['ingredient_name']}> eingetragen"
40
def detect_entities(_inputs, corpus, threshold=None): """ Détecte les entités nommées sélectionnées dans le corpus donné en argument. :param _inputs: paramètres d'entrainement du modèle :param corpus: corpus à annoter :param threshold: seuils de détection manuels. Si la probabilité d'une catégorie dépasse ce seuil, on prédit cette catégorie meme si elle ne correspond pas à la probabilité maximale. :return: corpus avec prédictions sur la nature des entités """ # Initialisation de la classe de pseudonymisation et entrainement du modèle. ner = Ner(_inputs) corpus_with_labels = ner.predict_with_model(corpus, threshold) return corpus_with_labels
41
def request_video_count(blink): """Request total video count.""" url = "{}/api/v2/videos/count".format(blink.urls.base_url) return http_get(blink, url)
42
def version(): """Return a ST version. Return 0 if not running in ST.""" if not running_in_st(): return 0 return int(sublime.version())
43
def main(): """Deletes the branch protection rules for a branch (with prefix 'contrib/') after it has been deleted. Specifically, when we create an internal PR from a merged external PR, and the base branch of the external PR now becames the head branch of the internal PR - we remove the branch protections of that 'contrib/'-prefixed branch so that our developers can push to the branch (and because the CLA does not need signing by our developers). What this does though, is create a branch protection rule specific to the 'contrib/'-prefixed branch in question. Once the contrib branch has been deleted (on merge of the internal PR to master), there is no longer a need for the branch protection rule that applied to that branch and therefore the rule needs to be cleaned up (deleted). Performs the following operations: 1. If the external PR's base branch is master we create a new branch and set it as the base branch of the PR. 2. Labels the PR with the "Contribution" label. (Adds the "Hackathon" label where applicable.) 3. Assigns a Reviewer. 4. Creates a welcome comment Will use the following env vars: - CONTENTBOT_GH_ADMIN_TOKEN: token to use to update the PR - EVENT_PAYLOAD: json data from the pull_request event """ t = Terminal() payload_str = get_env_var('EVENT_PAYLOAD') if not payload_str: raise ValueError('EVENT_PAYLOAD env variable not set or empty') payload = json.loads(payload_str) print(f'{t.cyan}Processing PR started{t.normal}') print(f'{t.cyan}event payload: {payload}{t.normal}') org_name = 'avidan-H' repo_name = 'content' gh = Github(get_env_var('CONTENTBOT_GH_ADMIN_TOKEN'), verify=False) content_repo = gh.get_repo(f'{org_name}/{repo_name}') branch_ref = payload.get('ref', {}) branch = content_repo.get_branch(branch_ref) branch_protection = branch.get_protection() print(f'{branch_protection=}') branch.remove_protection() branch_protection = branch.get_protection() print(f'{branch_protection=}')
44
def getFile(path): """ Obtain a PDB file. First check the path given on the command line - if that file is not available, obtain the file from the PDB webserver at http://www.rcsb.org/pdb/ . Parameters path: Name of PDB file to obtain (string) Returns file: File object containing PDB file (file object) """ import os, urllib file = None if not os.path.isfile(path): URLpath = "http://www.rcsb.org/pdb/cgi/export.cgi/" + path + \ ".pdb?format=PDB&pdbId=" + path + "&compression=None" file = urllib.urlopen(URLpath) else: file = open(path) return file
45
def get_intervention(action, time): """Return the intervention in the simulator required to take action.""" action_to_intervention_map = { 0: Intervention(time=time, epsilon_1=0.0, epsilon_2=0.0), 1: Intervention(time=time, epsilon_1=0.0, epsilon_2=0.3), 2: Intervention(time=time, epsilon_1=0.7, epsilon_2=0.0), 3: Intervention(time=time, epsilon_1=0.7, epsilon_2=0.3), } return action_to_intervention_map[action]
46
def alpha_sort(file): """ Rewrites csv sorting row according to the GT values, alphabetically. """ with open(file, encoding="utf8", errors='ignore') as csvFile: reader = csv.reader(csvFile) headers = next(reader, None) sorted_list = sorted(reader, key=lambda row: row[0].lower(), reverse=False) # for index, column in enumerate(sorted_list): # print(column) with open(file, 'w', encoding="utf8", errors='ignore') as csvFile: writer = csv.writer(csvFile) writer.writerow(headers) writer.writerows(sorted_list) return
47
def draw_labeled_bboxes(img, labels): """ Draw the boxes around detected object. """ # Iterate through all detected cars for car_number in range(1, labels[1]+1): # Find pixels with each car_number label value nonzero = (labels[0] == car_number).nonzero() # Identify x and y values of those pixels nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Define a bounding box based on min/max x and y bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy))) # Draw the box on the image cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6) return img
48
def image_create(request, **kwargs): """Create image. :param kwargs: * copy_from: URL from which Glance server should immediately copy the data and store it in its configured image store. * data: Form data posted from client. * location: URL where the data for this image already resides. In the case of 'copy_from' and 'location', the Glance server will give us a immediate response from create and handle the data asynchronously. In the case of 'data' the process of uploading the data may take some time and is handed off to a separate thread. """ data = kwargs.pop('data', None) location = kwargs.pop('location', None) image = glanceclient(request).images.create(**kwargs) if location is not None: glanceclient(request).images.add_location(image.id, location, {}) if data: if isinstance(data, str): # The image data is meant to be uploaded externally, return a # special wrapper to bypass the web server in a subsequent upload return ExternallyUploadedImage(image, request) if isinstance(data, TemporaryUploadedFile): # Hack to fool Django, so we can keep file open in the new thread. data.file._closer.close_called = True elif isinstance(data, InMemoryUploadedFile): # Clone a new file for InMemeoryUploadedFile. # Because the old one will be closed by Django. data = SimpleUploadedFile(data.name, data.read(), data.content_type) def upload(): try: return glanceclient(request).images.upload(image.id, data) finally: try: filename = str(data.file.name) except AttributeError: pass else: try: os.remove(filename) except OSError as e: LOG.warning('Failed to remove temporary image file ' '%(file)s (%(e)s)', {'file': filename, 'e': e}) thread.start_new_thread(upload, ()) return Image(image)
49
def test_null_transformer2(data): """Checks impute_algorithm='ts_interpolate'""" null_transform = NullTransformer( impute_algorithm="ts_interpolate", impute_all=False) null_transform.fit(data) assert null_transform.impute_params == dict( orders=[7, 14, 21], agg_func=np.mean, iter_num=5) result = null_transform.transform(data) # `orders` is too large for this dataset, nothing is imputed assert_equal(result, data) # two iterations null_transform = NullTransformer( impute_algorithm="ts_interpolate", impute_params=dict(orders=[1], agg_func=np.nanmean, iter_num=2), impute_all=False) result = null_transform.fit_transform(data) expected = pd.DataFrame({ "a": (0.0, 0.0, -1.0, 1.0), "b": (np.nan, 2.0, 2.0, 2.0), "c": (2.0, 3.0, 3.0, 9.0), "d": (np.nan, 4.0, -4.0, 16.0), }) assert_equal(result, expected) assert null_transform.missing_info == { "a": {"initial_missing_num": 1, "final_missing_num": 0}, "b": {"initial_missing_num": 3, "final_missing_num": 1}, "c": {"initial_missing_num": 1, "final_missing_num": 0}, "d": {"initial_missing_num": 1, "final_missing_num": 1}, } # impute_all=True null_transform = NullTransformer( impute_algorithm="ts_interpolate", impute_params=dict(orders=[1], agg_func=np.nanmean, iter_num=2), impute_all=True) result = null_transform.fit_transform(data) expected = pd.DataFrame({ "a": (0.0, 0.0, -1.0, 1.0), "b": (2.0, 2.0, 2.0, 2.0), "c": (2.0, 3.0, 3.0, 9.0), "d": (4.0, 4.0, -4.0, 16.0), }) assert_equal(result, expected) # `final_missing_num` are filled in by the second pass. # The counts reflect the first pass. assert null_transform.missing_info == { "a": {"initial_missing_num": 1, "final_missing_num": 0}, "b": {"initial_missing_num": 3, "final_missing_num": 1}, "c": {"initial_missing_num": 1, "final_missing_num": 0}, "d": {"initial_missing_num": 1, "final_missing_num": 1}, }
50
def media_check_handler(sender, instance, **kwargs): """[Mediaがアップロードされる時のシグナルハンドラ]""" count = Media.objects.filter(owner=instance.owner.id).count() # 1ユーザあたりの最大数を超えていたら例外を出して保管を拒絶する(クライアントには500が帰る). if count >= settings.MAX_MEDIA_COUNT: raise Exception('Too many media')
51
def calc_diff(nh_cube, sh_cube, agg_method): """Calculate the difference metric""" metric = nh_cube.copy() metric.data = nh_cube.data - sh_cube.data metric = rename_cube(metric, 'minus sh ' + agg_method) return metric
52
def median_boxcar_filter(data, window_length=None, endpoints='reflect'): """ Creates median boxcar filter and deals with endpoints Parameters ---------- data : numpy array Data array window_length: int A scalar giving the size of the median filter window endpoints : str How to deal with endpoints. Only option right now is 'reflect', which extends the data array on both ends by reflecting the data Returns ------- filter : numpy array The filter array """ filter_array = data # Create filter array if(endpoints == 'reflect'): last_index = len(data) - 1 filter_array = np.concatenate((np.flip(data[0:window_length], 0), data, data[last_index - window_length:last_index])) # Make filter # Check that window_length is odd if(window_length % 2 == 0): window_length += 1 filt = medfilt(filter_array, window_length) filt = filt[window_length:window_length + last_index + 1] return filt
53
def make_movie(movie_name, input_folder, output_folder, file_format, fps, output_format = 'mp4', reverse = False): """ Function which makes movies from an image series Parameters ---------- movie_name : string name of the movie input_folder : string folder where the image series is located output_folder : string folder where the movie will be saved file_format : string sets the format of the files to import fps : numpy, int frames per second output_format : string, optional sets the format for the output file supported types .mp4 and gif animated gif create large files reverse : bool, optional sets if the movie will be one way of there and back """ # searches the folder and finds the files file_list = glob.glob('./' + input_folder + '/*.' + file_format) # Sorts the files by number makes 2 lists to go forward and back list.sort(file_list) file_list_rev = glob.glob('./' + input_folder + '/*.' + file_format) list.sort(file_list_rev,reverse=True) # combines the file list if including the reverse if reverse: new_list = file_list + file_list_rev else: new_list = file_list if output_format == 'gif': # makes an animated gif from the images clip = ImageSequenceClip(new_list, fps=fps) clip.write_gif(output_folder + '/{}.gif'.format(movie_name), fps=fps) else: # makes and mp4 from the images clip = ImageSequenceClip(new_list, fps=fps) clip.write_videofile(output_folder + '/{}.mp4'.format(movie_name), fps=fps)
54
def get_session() -> Generator: """ get database session """ db = SessionFactory() try: yield db finally: db.close()
55
def test_threaded_actor_api_thread_safe(shutdown_only): """Test if Ray APIs are thread safe when they are used within threaded actor. """ ray.init( num_cpus=8, # from 1024 bytes, the return obj will go to the plasma store. _system_config={"max_direct_call_object_size": 1024}, ) @ray.remote def in_memory_return(i): return i @ray.remote def plasma_return(i): arr = np.zeros(8 * 1024 * i, dtype=np.uint8) # 8 * i KB return arr @ray.remote(num_cpus=1) class ThreadedActor: def __init__(self): self.received = [] self.lock = threading.Lock() def in_memory_return_test(self, i): self._add(i) return ray.get(in_memory_return.remote(i)) def plasma_return_test(self, i): self._add(i) return ray.get(plasma_return.remote(i)) def _add(self, seqno): with self.lock: self.received.append(seqno) def get_all(self): with self.lock: return self.received a = ThreadedActor.options(max_concurrency=10).remote() max_seq = 50 # Test in-memory return obj seqnos = ray.get( [a.in_memory_return_test.remote(seqno) for seqno in range(max_seq)] ) assert sorted(seqnos) == list(range(max_seq)) # Test plasma return obj real = ray.get([a.plasma_return_test.remote(seqno) for seqno in range(max_seq)]) expected = [np.zeros(8 * 1024 * i, dtype=np.uint8) for i in range(max_seq)] for r, e in zip(real, expected): assert np.array_equal(r, e) ray.kill(a) ensure_cpu_returned(8)
56
def test_azure_storage_replace_entity_command(requests_mock): """ Scenario: Replace Entity. Given: - User has provided valid credentials. When: - azure-storage-table-entity-replace called. Then: - Ensure that the output is empty (None). - Ensure readable output message content. """ from AzureStorageTable import Client, replace_entity_command table_name = 'test' partition_key = 'xsoar-partition' row_key = 'xsoar-row' url = f'{BASE_URL}{table_name}(PartitionKey=\'{partition_key}\',RowKey=\'{row_key}\'){SAS_TOKEN}' requests_mock.put(url, text='') client = Client(server_url=BASE_URL, verify=False, proxy=False, account_sas_token=SAS_TOKEN, storage_account_name=ACCOUNT_NAME, api_version=API_VERSION) command_arguments = {'entity_fields': '{"Address":"New York"}', 'partition_key': partition_key, 'row_key': row_key, 'table_name': table_name} result = replace_entity_command(client, command_arguments) assert result.outputs is None assert result.outputs_prefix is None assert result.readable_output == f'Entity in {table_name} table successfully replaced.'
57
def build_template_context( title: str, raw_head: Optional[str], raw_body: str ) -> Context: """Build the page context to insert into the outer template.""" head = _render_template(raw_head) if raw_head else None body = _render_template(raw_body) return { 'page_title': title, 'head': head, 'body': body, }
58
def _events_tsv(events, durations, raw, fname, trial_type, overwrite=False, verbose=True): """Create an events.tsv file and save it. This function will write the mandatory 'onset', and 'duration' columns as well as the optional 'value' and 'sample'. The 'value' corresponds to the marker value as found in the TRIG channel of the recording. In addition, the 'trial_type' field can be written. Parameters ---------- events : array, shape = (n_events, 3) The first column contains the event time in samples and the third column contains the event id. The second column is ignored for now but typically contains the value of the trigger channel either immediately before the event or immediately after. durations : array, shape (n_events,) The event durations in seconds. raw : instance of Raw The data as MNE-Python Raw object. fname : str | BIDSPath Filename to save the events.tsv to. trial_type : dict | None Dictionary mapping a brief description key to an event id (value). For example {'Go': 1, 'No Go': 2}. overwrite : bool Whether to overwrite the existing file. Defaults to False. verbose : bool Set verbose output to True or False. Notes ----- The function writes durations of zero for each event. """ # Start by filling all data that we know into an ordered dictionary first_samp = raw.first_samp sfreq = raw.info['sfreq'] events = events.copy() events[:, 0] -= first_samp # Onset column needs to be specified in seconds data = OrderedDict([('onset', events[:, 0] / sfreq), ('duration', durations), ('trial_type', None), ('value', events[:, 2]), ('sample', events[:, 0])]) # Now check if trial_type is specified or should be removed if trial_type: trial_type_map = {v: k for k, v in trial_type.items()} data['trial_type'] = [trial_type_map.get(i, 'n/a') for i in events[:, 2]] else: del data['trial_type'] _write_tsv(fname, data, overwrite, verbose)
59
def __get_folders_processes(local_path, dropbox_paths): """ Retrieves in parrallel (launching several processes) files from dropbox. :param local_path: path where the files will be downloaded :param dropbox_paths: remote dropbox's paths (pin's folder) :return: None """ pro_list = [] test_t = time.time() for path, OAuth_token in dropbox_paths: # dropbox_paths es ordenado 1,2,3,... por tanto si OAuth_token lo es tambien no hay problema k = Process(target=drop.retrieve_folder_parallel, args=(path, local_path, OAuth_token)) k.start() pro_list.append(k) for p in pro_list: p.join() print 'hey there im done {} !'.format(time.time() - test_t)
60
def inf_set_stack_ldbl(*args): """ inf_set_stack_ldbl(_v=True) -> bool """ return _ida_ida.inf_set_stack_ldbl(*args)
61
def _get_self_compatibility_dict(package_name: str) -> dict: """Returns a dict containing self compatibility status and details. Args: package_name: the name of the package to check (e.g. "google-cloud-storage"). Returns: A dict containing the self compatibility status and details for any self incompatibilities. The dict will be formatted like the following: { 'py2': { 'status': BadgeStatus.SUCCESS, 'details': {} }, 'py3': { 'status': BadgeStatus.SUCCESS, 'details': {} }, } """ pkg = package.Package(package_name) compatibility_results = badge_utils.store.get_self_compatibility(pkg) missing_details = _get_missing_details( [package_name], compatibility_results) if missing_details: result_dict = badge_utils._build_default_result( status=BadgeStatus.MISSING_DATA, details=missing_details) return result_dict result_dict = badge_utils._build_default_result( status=BadgeStatus.SUCCESS, details='The package does not support this version of python.') for res in compatibility_results: pyver = badge_utils.PY_VER_MAPPING[res.python_major_version] badge_status = PACKAGE_STATUS_TO_BADGE_STATUS.get( res.status) or BadgeStatus.SELF_INCOMPATIBLE result_dict[pyver]['status'] = badge_status result_dict[pyver]['details'] = res.details if res.details is None: result_dict[pyver]['details'] = badge_utils.EMPTY_DETAILS return result_dict
62
def checksum_md5(filename): """Calculates the MD5 checksum of a file.""" amd5 = md5() with open(filename, mode='rb') as f: for chunk in iter(lambda: f.read(128 * amd5.block_size), b''): amd5.update(chunk) return amd5.hexdigest()
63
def collect_gentxs(): """ nodef collect-gentxs """ _ = _process_executor("nodef collect-gentxs")
64
def CleanGrant(grant): """Returns a "cleaned" grant by rounding properly the internal data. This insures that 2 grants coming from 2 different sources are actually identical, irrespective of the logging/storage precision used. """ return grant._replace(latitude=round(grant.latitude, 6), longitude=round(grant.longitude, 6), height_agl=round(grant.height_agl, 2), max_eirp=round(grant.max_eirp, 3))
65
def OpenRegistryKey(hiveKey, key): """ Opens a keyHandle for hiveKey and key, creating subkeys as necessary """ keyHandle = None try: curKey = "" keyItems = key.split('\\') for subKey in keyItems: if curKey: curKey = curKey + "\\" + subKey else: curKey = subKey keyHandle = win32api.RegCreateKey(hiveKey, curKey) except Exception, e: keyHandle = None print "OpenRegistryKey failed:", hiveKey, key, e return keyHandle
66
def import_clips_to_bin(project): """ Imports Clips from .clip_path to a new bin named as DEFAULT_BIN_NAME """ project.clips = [] # TODO reuse search_for_XDCAM media? for extension in ".mxf .mov .mp4 .avi .mp2".split(): project.clips += list(project.clip_path.glob(f"*{extension}")) if project.format == "XDCAM": # Navigate above \XDROOT\Clips to parent folder and search project.clips += list(project.clip_path.parent.parent.glob(f"*{extension}")) root = app.project.rootItem ProjectItem.createBin(root, DEFAULT_BIN_NAME) project.default_bin = [x for x in root.children if x.type == 2 and x.name == DEFAULT_BIN_NAME][0] # Type 1: "Sequence" object # Type 2: "Bin" object files = [str(x) for x in project.clips] # for file in files: print(f"Importing {len(files)} files, from {project.clips[0].name} to {project.clips[-1].name}") app.project.importFiles(files, True, project.default_bin, False)
67
def eval_py(input_text: str): """Runs eval() on the input text on a seperate process and returns output or error. How to timout on a function call ? https://stackoverflow.com/a/14924210/13523305 Return a value from multiprocess ? https://stackoverflow.com/a/10415215/13523305 """ def evaluate(input_text, return_val): """wrapper for eval""" try: return_val[input_text] = str(eval(input_text)) except Exception as error: return_val[ input_text ] = f"""😔 /e feeds your expression to python's eval function. The following error occured: \n\n{error}""" if contains_restricted(input_text): return restricted_message # using multiprocessing and getting value returned by target function manger = multiprocessing.Manager() return_val = manger.dict() # enable target function to return a value process = multiprocessing.Process(target=evaluate, args=(input_text, return_val)) process.start() process.join(6) # allow the process to run for 6 seconds if process.is_alive(): # kill the process if it is still alive process.kill() return timeout_message output = return_val[input_text] return output
68
def map_assignment_of_matching_fields(dest, source): """ Assign the values of identical feild names from source to destination. """ for i in matching_fields(dest, source): if (type(getattr(source, i)) == uuid.UUID): setattr(dest, i, getattr(source, i).urn) elif (type(getattr(source, i)) == datetime): setattr(dest, i, getattr(source, i).isoformat()) elif not callable(getattr(dest, i)): setattr(dest, i, getattr(source, i))
69
def task_convert_tables(depends_on, produces): """ Converts csv-file into latex tabular to include in paper """ table = pd.read_csv(depends_on) with open(produces, "w") as tf: tf.write(table.to_latex(na_rep="-", index=False))
70
def trim(str): """Remove multiple spaces""" return ' '.join(str.strip().split())
71
def requires_auth(): """ Checks that the user has authenticated before returning any page from this Blueprint. """ # We load the arguments for check_auth function from the config files. auth.check_auth( *current_app.config['AUTH'].get('reports', [['BROKEN'], ['']]) )
72
def build_model_svr(model_keyvalue, inputs, encoder = None, context = None): """Builds model from, seal_functions, model params. model_keyvalue: key identifying model inputs: properly formatted encrypted inputs for model encoder: SEAL encoder object context: SEAL context object """ modeldict = MODELS[model_keyvalue] params_path = MODELPARMS.joinpath(modeldict["path"]) alias = modeldict["seal_function"] try: func = alias(params_path, context=context, encoder=encoder) except Exception as e: raise ValueError(f"There was a problem with your inputs: {e}") return func.eval(inputs)
73
def find_similar(collection): """ Searches the collection for (probably) similar artist and returns lists containing the "candidates". """ spellings = defaultdict(list) for artist in collection: spellings[normalize_artist(artist)].append(artist) return [spellings[artist] for artist in spellings if len(spellings[artist]) > 1]
74
def vim_print(mse_ref, mse_values, x_name, ind_list=0, with_output=True, single=True, partner_k=None): """Print Variable importance measure and create sorted output. Parameters ---------- mse_ref : Numpy Float. Reference value of non-randomized x. mse_values : Numpy array. MSE's for randomly permuted x. x_name : List of strings. Variable names. ind_list : List of INT, optional. Variable positions. Default is 0. with_output : Boolean, optional. Default is True. single : Boolean, optional. The default is True. partner_k : List of None and Int or None. Index of variables that were jointly randomized. Default is None. Returns ------- vim: Tuple of Numpy array and list of lists. MSE sorted and sort index. """ if partner_k is not None: for idx, val in enumerate(partner_k): if val is not None: if (idx > (val-1)) and (idx > 0): mse_values[idx-1] = mse_values[val-1] mse = mse_values / np.array(mse_ref) * 100 var_indices = np.argsort(mse) var_indices = np.flip(var_indices) vim_sorted = mse[var_indices] if single: x_names_sorted = np.array(x_name, copy=True) x_names_sorted = x_names_sorted[var_indices] ind_sorted = list(var_indices) else: var_indices = list(var_indices) ind_sorted = [] x_names_sorted = [] for i in var_indices: ind_i = ind_list[i] ind_sorted.append(ind_i) x_name_i = [] for j in ind_i: x_name_i.append(x_name[j]) x_names_sorted.append(x_name_i) if with_output: print('\n') print('-' * 80) print('Out of bag value of MSE: {:8.3f}'.format(mse_ref)) print('- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -') print('Variable importance statistics in %-lost of base value') for idx, vim in enumerate(vim_sorted): if single: print('{:<50}: {:>7.2f}'.format(x_names_sorted[idx], vim-100), '%') else: print(x_names_sorted[idx]) print('{:<50}: {:>7.2f}'.format(' ', vim-100), '%') print('-' * 80) print('Computed as share of OOB MSE of estimated forest relative to', 'OOB MSE of variable (or group of variables) with randomized', 'covariate values in %.') ind_sorted.reverse() vim_sorted = np.flip(vim_sorted) vim = (vim_sorted, ind_sorted) first_time = True if partner_k is not None: for idx, val in enumerate(partner_k): if val is not None: if first_time: print('The following variables are jointly analysed:', end=' ') first_time = False if idx < val: print(x_name[idx-1], x_name[val-1], ' / ', end='') print() print('-' * 80, '\n') return vim
75
def from_column_list( col_names, col_types=None, col_blobs=None, col_metadata=None ): """ Given a list of names, types, and optionally values, construct a Schema. """ if col_types is None: col_types = [None] * len(col_names) if col_metadata is None: col_metadata = [None] * len(col_names) if col_blobs is None: col_blobs = [None] * len(col_names) assert len(col_names) == len(col_types), ( 'col_names and col_types must have the same length.' ) assert len(col_names) == len(col_metadata), ( 'col_names and col_metadata must have the same length.' ) assert len(col_names) == len(col_blobs), ( 'col_names and col_blobs must have the same length.' ) root = _SchemaNode('root', 'Struct') for col_name, col_type, col_blob, col_metadata in zip( col_names, col_types, col_blobs, col_metadata ): columns = col_name.split(FIELD_SEPARATOR) current = root for i in range(len(columns)): name = columns[i] type_str = '' field = None if i == len(columns) - 1: type_str = col_type field = Scalar( dtype=col_type, blob=col_blob, metadata=col_metadata ) next = current.add_child(name, type_str) if field is not None: next.field = field next.col_blob = col_blob current = next return root.get_field()
76
def get_optimizer(lr): """ Specify an optimizer and its parameters. Returns ------- tuple(torch.optim.Optimizer, dict) The optimizer class and the dictionary of kwargs that should be passed in to the optimizer constructor. """ return (torch.optim.SGD, {"lr": lr, "weight_decay": 1e-6, "momentum": 0.9})
77
def _from_list(data: any) -> dict: """Convert lists to indexed dictionaries. :arg data: An ordered map. :returns: An ordered map. """ if isinstance(data, list): return dict([(str(i), _from_list(v)) for i, v in enumerate(data)]) if isinstance(data, dict): return dict([(key, _from_list(data[key])) for key in data]) return data
78
def parse_date(ses_date): """This parses a date string of the form YYYY-MM-DD and returns the string, year, month, day and day of year.""" [yr,mn,dy] = ses_date.split('-') year = int(yr) month = int(mn) day = int(dy[:2]) # strip of any a or b DOY = day_of_year(year,month,day) return ses_date,year,month,day,DOY
79
def get_access_token(consumer_key, consumer_secret): """ :return: auth token for mpesa api calls """ oauth_url = "https://api.safaricom.co.ke/oauth/v1/generate?grant_type=client_credentials" response = requests.get(oauth_url, auth=HTTPBasicAuth(consumer_key, consumer_secret)) access_token = json.loads(response.text).get('access_token', None) return access_token
80
def create_feed_forward_dot_product_network(observation_spec, global_layers, arm_layers): """Creates a dot product network with feedforward towers. Args: observation_spec: A nested tensor spec containing the specs for global as well as per-arm observations. global_layers: Iterable of ints. Specifies the layers of the global tower. arm_layers: Iterable of ints. Specifies the layers of the arm tower. The last element of arm_layers has to be equal to that of global_layers. Returns: A dot product network that takes observations adhering observation_spec and outputs reward estimates for every action. Raises: ValueError: If the last arm layer does not match the last global layer. """ if arm_layers[-1] != global_layers[-1]: raise ValueError('Last layer size of global and arm layers should match.') global_network = encoding_network.EncodingNetwork( input_tensor_spec=observation_spec[bandit_spec_utils.GLOBAL_FEATURE_KEY], fc_layer_params=global_layers) one_dim_per_arm_obs = tensor_spec.TensorSpec( shape=observation_spec[bandit_spec_utils.PER_ARM_FEATURE_KEY].shape[1:], dtype=tf.float32) arm_network = encoding_network.EncodingNetwork( input_tensor_spec=one_dim_per_arm_obs, fc_layer_params=arm_layers) return GlobalAndArmDotProductNetwork(observation_spec, global_network, arm_network)
81
def check_collisions(citekeys_df): """ Check for short_citekey hash collisions """ collision_df = citekeys_df[['standard_citekey', 'short_citekey']].drop_duplicates() collision_df = collision_df[collision_df.short_citekey.duplicated(keep=False)] if not collision_df.empty: logging.error(f'OMF! Hash collision. Congratulations.\n{collision_df}') return collision_df
82
def p_pddl(p): """pddl : domain | problem""" p[0] = p[1]
83
def get_user(module, system): """Find a user by the user_name specified in the module""" user = None user_name = module.params['user_name'] try: user = system.users.get(name=user_name) except ObjectNotFound: pass return user
84
def appointments(request): """Page for users to view upcoming appointments.""" appointments = Appointment.objects.filter(patient=request.user.patient) context = { 'appointments': appointments } return render(request, 'patients/appointments.html', context)
85
def _SignedVarintDecoder(mask): """Like _VarintDecoder() but decodes signed values.""" local_ord = ord def DecodeVarint(buffer, pos): result = 0 shift = 0 while 1: b = local_ord(buffer[pos]) result |= ((b & 0x7f) << shift) pos += 1 if not (b & 0x80): if result > 0x7fffffffffffffff: result -= (1 << 64) result |= ~mask else: result &= mask return (result, pos) shift += 7 if shift >= 64: raise _DecodeError('Too many bytes when decoding varint.') return DecodeVarint
86
def is_valid_msg_type(x): """ @return: True if the name is a syntatically legal message type name @rtype: bool """ if not x or len(x) != len(x.strip()): return False base = base_msg_type(x) if not roslib.names.is_legal_resource_name(base): return False # parse array indicies x = x[len(base):] state = 0 for c in x: if state == 0: if c != '[': return False state = 1 # open elif state == 1: if c == ']': state = 0 # closed else: try: int(c) except Exception: return False return state == 0
87
def test_export_exons_json(mock_app, gene_obj): """Test the CLI command that exports all exons in json format""" runner = mock_app.test_cli_runner() # GIVEN a database with a gene assert store.hgnc_collection.insert_one(gene_obj) # HAVING a transcript assert store.transcript_collection.insert_one(test_transcript) # AND an exon assert store.exon_collection.insert_one(test_exon) # WHEN the the command to json-export all exons is invoked result = runner.invoke(cli, ["export", "exons", "-b", "37", "--json"]) assert result.exit_code == 0 # THEN it should return a document assert "$oid" in result.output # With test exon assert test_exon["exon_id"] in result.output
88
def get_ascii_matrix(img): """(Image) -> list of list of str\n Takes an image and converts it into a list of list containing a string which maps to brightness of each pixel of each row """ ascii_map = "`^\",:;Il!i~+_-?][}{1)(|\\/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$" brightness_matrix = get_brightness_matrix(img) ascii_matrix = [] for rows in range(len(brightness_matrix)): row = [] for column in brightness_matrix[rows]: map_index = column//4 row.append(ascii_map[map_index]) ascii_matrix.append(row) return ascii_matrix
89
def to_scene_agent_prediction_from_boxes_separate_color( tracked_objects: TrackedObjects, color_vehicles: List[int], color_pedestrians: List[int], color_bikes: List[int] ) -> List[Dict[str, Any]]: """ Convert predicted observations into prediction dictionary. :param tracked_objects: List of tracked_objects in global coordinates. :param color_vehicles: color [R, G, B, A] for vehicles predictions. :param color_pedestrians: color [R, G, B, A] for pedestrians predictions. :param color_bikes: color [R, G, B, A] for bikes predictions. :return scene. """ predictions = [] for tracked_object in tracked_objects: if tracked_object.predictions is None: continue if tracked_object.tracked_object_type == TrackedObjectType.VEHICLE: color = color_vehicles elif tracked_object.tracked_object_type == TrackedObjectType.PEDESTRIAN: color = color_pedestrians elif tracked_object.tracked_object_type == TrackedObjectType.BICYCLE: color = color_bikes else: color = [0, 0, 0, 255] predictions.append(_to_scene_agent_prediction(tracked_object, color)) return predictions
90
def stretch(snd_array, factor, window_size, h): """ Stretches/shortens a sound, by some factor. """ phase = np.zeros(window_size) hanning_window = np.hanning(window_size) result = np.zeros( len(snd_array) /factor + window_size) for i in np.arange(0, len(snd_array)-(window_size+h), h*factor): # two potentially overlapping subarrays a1 = snd_array[i: i + window_size] a2 = snd_array[i + h: i + window_size + h] # the spectra of these arrays s1 = np.fft.fft(hanning_window * a1) s2 = np.fft.fft(hanning_window * a2) # rephase all frequencies phase = (phase + np.angle(s2/s1)) % 2*np.pi a2_rephased = np.fft.ifft(np.abs(s2)*np.exp(1j*phase)) i2 = int(i/factor) result[i2 : i2 + window_size] += hanning_window*a2_rephased result = ((2**(16-4)) * result/result.max()) # normalize (16bit) return result.astype('int16')
91
def read_csv(spark: SparkSession, path: str) -> DataFrame: """Create a DataFrame by loading an external csv file. We don't expect any formatting nor processing here. We assume the file has a header, uses " as double quote and , as delimiter. Infer its schema automatically. You don't need to raise an exception if the file does not exist or doesn't follow the previous constraints. """ # YOUR CODE HERE raise NotImplementedError()
92
def send_task(task_name, task_kwargs, run_locally=None, queue_name=None): """ Sends task to SQS queue to be run asynchronously on worker environment instances. If settings.AWS_EB_RUN_TASKS_LOCALLY is set to True, does not send the task to SQS, but instead runs it right away in synchronous mode. May be useful for testing when no SQS worker is set up. :param task_name name of the task to run. :param task_kwargs kwargs that are passed to the task :param run_locally if set, forces the task to be run locally or sent to SQS regardless of what settings.AWS_EB_RUN_TASKS_LOCALLY is set to. :return: """ task_data = { 'task': task_name, 'arguments': task_kwargs } if run_locally is None: run_locally = getattr(settings, "AWS_EB_RUN_TASKS_LOCALLY", False) if run_locally: task_id = uuid.uuid4().hex task = SQSTask(task_data) print(f"[{task_id}] Running task locally in sync mode: {task.get_pretty_info_string()}") result = task.run_task() print(f"[{task_id}] Task result: {result}") else: if queue_name is None: try: queue_name = settings.AWS_EB_DEFAULT_QUEUE_NAME except AttributeError: raise ImproperlyConfigured("settings.AWS_EB_DEFAULT_QUEUE_NAME must be set to send task to SQS queue") # TODO: cache queues instead of looking the up every time try: # Get the queue. This returns an SQS.Queue instance queue = sqs.get_queue_by_name(QueueName=queue_name) except: queue = sqs.create_queue(QueueName=queue_name) # send task to sqs workers # see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/sqs.html response = queue.send_message(MessageBody=json.dumps(task_data)) logger.info(f"Sent message {task_data} to SQS queue {queue_name}. Got response: {response}") # print(response.get('MessageId')) # print(response.get('MD5OfMessageBody'))
93
def guess_encoding(text): """ Given bytes, determine the character set encoding @return: dict with encoding and confidence """ if not text: return {'confidence': 0, 'encoding': None} enc = detect_charset(text) cset = enc['encoding'] if cset.lower() == 'iso-8859-2': # Anomoaly -- chardet things Hungarian (iso-8850-2) is # a close match for a latin-1 document. At least the quotes match # Other Latin-xxx variants will likely match, but actually be Latin1 # or win-1252. see Chardet explanation for poor reliability of Latin-1 detection # enc['encoding'] = CHARDET_LATIN2_ENCODING return enc
94
def setenv(app, name, value): """ Set an environment variable for an application. Note: this does not restart the application or any relevant other services. """ sudo('echo -n \'{value}\' > {filename}'.format( value=value, filename=env_filename(app, name) ))
95
def is_edit_end_without_next(line, configs): """ Is the line indicates that 'edit' section ends without 'next' end marker (special case)? - config vdom edit <name> ... end :param line: A str represents a line in configurations output :param configs: A stack (list) holding config node objects """ if len(configs) > 1: (parent, child) = (configs[-2], configs[-1]) # (config, edit) if parent.end_re.match(line) and parent.name == "vdom" and \ parent.type == NT_CONFIG and child.type == NT_EDIT: return True return False
96
def get_live_args(request, script=False, typed=False): """ Get live args input by user | request --> [[str], [str]]""" arg_string = list(request.form.values())[0] if script: return parse_command_line_args(arg_string) if typed: try: all_args = parse_type_args(arg_string) except Exception as e: #Doesn't matter what the exception is. #raise e #Uncomment for testing return ('Parsing Error', e) else: all_args = parse_args(arg_string) args = all_args[0] kwargs = all_args[1] all_args = [args, kwargs] print(all_args) return all_args
97
def test_peek_returns_tail_val(dq): """Peek should return value of the tail.""" dq.appendleft(3) dq.appendleft(2) dq.appendleft(1) assert dq.peek() == 3
98
def open_csv(path): """open_csv.""" _lines = [] with codecs.open(path, encoding='utf8') as fs: for line in csv.reader(fs): if len(line) == 3: _lines.append(line) return _lines
99
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
2
Edit dataset card