code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def OnOpen(self, event): if undo.stack().haschanged(): save_choice = self.interfaces.get_save_request_from_user() if save_choice is None: return elif save_choice: post_command_event(self.main_window, self.main_window.SaveMsg) f2w = get_filetypes2wildcards( ["pys", "pysu", "xls", "xlsx", "ods", "all"]) filetypes = f2w.keys() wildcards = f2w.values() wildcard = "|".join(wildcards) message = _("Choose file to open.") style = wx.OPEN default_filetype = config["default_open_filetype"] try: default_filterindex = filetypes.index(default_filetype) except ValueError: default_filterindex = 0 get_fp_fidx = self.interfaces.get_filepath_findex_from_user filepath, filterindex = get_fp_fidx(wildcard, message, style, filterindex=default_filterindex) if filepath is None: return filetype = filetypes[filterindex] self.main_window.filepath = filepath post_command_event(self.main_window, self.main_window.GridActionOpenMsg, attr={"filepath": filepath, "filetype": filetype}) title_text = filepath.split("/")[-1] + " - pyspread" post_command_event(self.main_window, self.main_window.TitleMsg, text=title_text) self.main_window.grid.ForceRefresh() if is_gtk(): try: wx.Yield() except: pass undo.stack().clear() undo.stack().savepoint() try: post_command_event(self.main_window, self.ContentChangedMsg) except TypeError: pass
File open event handler
def preprocess_legislation(legislation_json): import os import pkg_resources import pandas as pd default_config_files_directory = os.path.join( pkg_resources.get_distribution().location) prix_annuel_carburants = pd.read_csv( os.path.join( default_config_files_directory, , , , ), sep = ) prix_annuel_carburants[] = prix_annuel_carburants[].astype(int) prix_annuel_carburants = prix_annuel_carburants.set_index() all_values = {} prix_carburants = { "@type": "Node", "description": "prix des carburants en euros par hectolitre", "children": {}, } prix_annuel = prix_annuel_carburants[] all_values[] = [] for year in range(1990, 2009): values1 = dict() values1[] = u.format(year) values1[] = u.format(year) values1[] = prix_annuel.loc[year] * 100 all_values[].append(values1) prix_annuel = prix_annuel_carburants[] for year in range(2009, 2013): values2 = dict() values2[] = u.format(year) values2[] = u.format(year) values2[] = prix_annuel.loc[year] * 100 all_values[].append(values2) prix_annuel = prix_annuel_carburants[] for year in range(2013, 2015): values3 = dict() values3[] = u.format(year) values3[] = u.format(year) values3[] = prix_annuel.loc[year] * 100 all_values[].append(values3) prix_carburants[][] = { "@type": "Parameter", "description": .replace(, ), "format": "float", "values": all_values[] } for element in [, , , , , , , , , , ]: assert element in prix_annuel_carburants.columns prix_annuel = prix_annuel_carburants[element] all_values[element] = [] for year in range(1990, 2015): values = dict() values[] = u.format(year) values[] = u.format(year) values[] = prix_annuel.loc[year] * 100 all_values[element].append(values) prix_carburants[][element] = { "@type": "Parameter", "description": element.replace(, ), "format": "float", "values": all_values[element] } legislation_json[][][][] = prix_carburants default_config_files_directory = os.path.join( pkg_resources.get_distribution().location) parc_annuel_moyen_vp = pd.read_csv( os.path.join( default_config_files_directory, , , , ), sep = ) parc_annuel_moyen_vp = parc_annuel_moyen_vp.set_index() values_parc = {} parc_vp = { "@type": "Node", "description": "taille moyenne du parc automobile en France métropolitaine en milliers de véhicules", "children": {}, } for element in [, ]: taille_parc = parc_annuel_moyen_vp[element] values_parc[element] = [] for year in range(1990, 2014): values = dict() values[] = u.format(year) values[] = u.format(year) values[] = taille_parc.loc[year] values_parc[element].append(values) parc_vp[][element] = { "@type": "Parameter", "description": "nombre de véhicules particuliers immatriculés en France à motorisation " + element, "format": "float", "values": values_parc[element] } legislation_json[][][][] = parc_vp default_config_files_directory = os.path.join( pkg_resources.get_distribution().location) quantite_carbu_vp_france = pd.read_csv( os.path.join( default_config_files_directory, , , , ), sep = ) quantite_carbu_vp_france = quantite_carbu_vp_france.set_index() values_quantite = {} quantite_carbu_vp = { "@type": "Node", "description": "quantite de carburants consommés en France métropolitaine", "children": {}, } for element in [, ]: quantite_carburants = quantite_carbu_vp_france[element] values_quantite[element] = [] for year in range(1990, 2014): values = dict() values[] = u.format(year) values[] = u.format(year) values[] = quantite_carburants.loc[year] values_quantite[element].append(values) quantite_carbu_vp[][element] = { "@type": "Parameter", "description": "consommation totale de " + element + " en France", "format": "float", "values": values_quantite[element] } legislation_json[][][][] = quantite_carbu_vp default_config_files_directory = os.path.join( pkg_resources.get_distribution().location) part_des_types_de_supercarburants = pd.read_csv( os.path.join( default_config_files_directory, , , ), sep = ) del part_des_types_de_supercarburants[] part_des_types_de_supercarburants = \ part_des_types_de_supercarburants[part_des_types_de_supercarburants[] > 0].copy() part_des_types_de_supercarburants[] = part_des_types_de_supercarburants[].astype(int) part_des_types_de_supercarburants = part_des_types_de_supercarburants.set_index() cols = part_des_types_de_supercarburants.columns for element in cols: part_des_types_de_supercarburants[element] = ( part_des_types_de_supercarburants[element] / (part_des_types_de_supercarburants[] - part_des_types_de_supercarburants[]) ) del part_des_types_de_supercarburants[] del part_des_types_de_supercarburants[] cols = part_des_types_de_supercarburants.columns part_des_types_de_supercarburants[] = 0 for element in cols: part_des_types_de_supercarburants[] += part_des_types_de_supercarburants[element] assert (part_des_types_de_supercarburants[] == 1).any(), "The weighting of the shares did not work" values_part_supercarburants = {} part_type_supercaburant = { "@type": "Node", "description": "part de la consommation totale dsuper_plombesp_95sp_98sp_e10start{}-01-01stop{}-12-31valuechildrenessences", "format": "float", "values": values_part_supercarburants[element] } legislation_json[][][][] = \ part_type_supercaburant alcool_conso_et_vin = { "@type": "Node", "description": "alcools", "children": {}, } alcool_conso_et_vin[][] = { "@type": "Node", "description": "Pour calculer le taux de taxation implicite sur le vin", "children": { "droit_cn_vin": { "@type": "Parameter", "description": u"Masse droit vin, vin mousseux, cidres et poirés selon comptabilité nationale", "format": "float", "values": [ {: u, : u, : 129}, {: u, : u, : 130}, {: u, : u, : 129}, {: u, : u, : 132}, {: u, : u, : 133}, {: u, : u, : 127}, {: u, : u, : 127}, {: u, : u, : 127}, {: u, : u, : 127}, {: u, : u, : 125}, {: u, : u, : 117}, {: u, : u, : 119}, {: u, : u, : 117}, {: u, : u, : 114}, {: u, : u, : 117}, {: u, : u, : 119}, {: u, : u, : 118}, {: u, : u, : 120}, {: u, : u, : 122}, ], }, "masse_conso_cn_vin": { "@type": "Parameter", "description": u"Masse consommation vin, vin mousseux, cidres et poirés selon comptabilité nationale", "format": "float", "values": [ {: u, : u, : 7191}, {: u, : u, : 7419}, {: u, : u, : 7636}, {: u, : u, : 8025}, {: u, : u, : 8451}, {: u, : u, : 8854}, {: u, : u, : 9168}, {: u, : u, : 9476}, {: u, : u, : 9695}, {: u, : u, : 9985}, {: u, : u, : 9933}, {: u, : u, : 10002}, {: u, : u, : 10345}, {: u, : u, : 10461}, {: u, : u, : 10728}, {: u, : u, : 11002}, {: u, : u, : 11387}, {: u, : u, : 11407}, {: u, : u, : 11515}, ], }, }, } alcool_conso_et_vin[][] = { "@type": "Node", "description": "Pour calculer le taux de taxation implicite sur la bière", "children": { "droit_cn_biere": { "@type": "Parameter", "description": "Masse droit biere selon comptabilité nationale", "format": "float", "values": [ {: u, : u, : 361}, {: u, : u, : 366}, {: u, : u, : 364}, {: u, : u, : 365}, {: u, : u, : 380}, {: u, : u, : 359}, {: u, : u, : 364}, {: u, : u, : 361}, {: u, : u, : 370}, {: u, : u, : 378}, {: u, : u, : 364}, {: u, : u, : 396}, {: u, : u, : 382}, {: u, : u, : 375}, {: u, : u, : 376}, {: u, : u, : 375}, {: u, : u, : 393}, {: u, : u, : 783}, {: u, : u, : 897}, ], }, "masse_conso_cn_biere": { "@type": "Parameter", "description": u"Masse consommation biere selon comptabilité nationale", "format": "float", "values": [ {: u, : u, : 2111}, {: u, : u, : 2144}, {: u, : u, : 2186}, {: u, : u, : 2291}, {: u, : u, : 2334}, {: u, : u, : 2290}, {: u, : u, : 2327}, {: u, : u, : 2405}, {: u, : u, : 2554}, {: u, : u, : 2484}, {: u, : u, : 2466}, {: u, : u, : 2486}, {: u, : u, : 2458}, {: u, : u, : 2287}, {: u, : u, : 2375}, {: u, : u, : 2461}, {: u, : u, : 2769}, {: u, : u, : 2868}, {: u, : u, : 3321}, ], }, }, } alcool_conso_et_vin[][] = { "@type": "Node", "description": "Pour calculer le taux de taxation implicite sur alcools forts", "children": { "droit_cn_alcools": { "@type": "Parameter", "description": "Masse droit alcool selon comptabilité nationale sans droits sur les produits intermediaires et cotisation spéciale alcool fort", "format": "float", "values": [ {: u, : u, : 1872}, {: u, : u, : 1957}, {: u, : u, : 1932}, {: u, : u, : 1891}, {: u, : u, : 1908}, {: u, : u, : 1842}, {: u, : u, : 1954}, {: u, : u, : 1990}, {: u, : u, : 2005}, {: u, : u, : 2031}, {: u, : u, : 2111}, {: u, : u, : 2150}, {: u, : u, : 2225}, ], }, "droit_cn_alcools_total": { "@type": "Parameter", "description": u"Masse droit alcool selon comptabilité nationale avec les differents droits", "format": "float", "values": [ {: u, : u, : 2337}, {: u, : u, : 2350}, {: u, : u, : 2366}, {: u, : u, : 2369}, {: u, : u, : 2385}, {: u, : u, : 2416}, {: u, : u, : 2514}, {: u, : u, : 2503}, {: u, : u, : 2453}, {: u, : u, : 2409}, {: u, : u, : 2352}, {: u, : u, : 2477}, {: u, : u, : 2516}, {: u, : u, : 2528}, {: u, : u, : 2629}, {: u, : u, : 2734}, {: u, : u, : 3078}, {: u, : u, : 2718}, {: u, : u, : 3022}, ], }, "masse_conso_cn_alcools": { "@type": "Parameter", "description": u"Masse consommation alcool selon comptabilité nationale", "format": "float", "values": [ {: u, : u, : 4893}, {: u, : u, : 5075}, {: u, : u, : 5065}, {: u, : u, : 5123}, {: u, : u, : 5234}, {: u, : u, : 5558}, {: u, : u, : 5721}, {: u, : u, : 5932}, {: u, : u, : 5895}, {: u, : u, : 5967}, {: u, : u, : 5960}, {: u, : u, : 6106}, {: u, : u, : 6142}, {: u, : u, : 6147}, {: u, : u, : 6342}, {: u, : u, : 6618}, {: u, : u, : 6680}, {: u, : u, : 6996}, {: u, : u, : 7022}, ], }, }, } legislation_json[][][][] = alcool_conso_et_vin keys_ticpe = legislation_json[][][][][].keys() for element in keys_ticpe: get_values = \ legislation_json[][][][][][element][] for each_value in get_values: get_character = .format(each_value[]) year = int(get_character[:4]) if year < 2002: each_value[] = each_value[] / 6.55957 else: each_value[] = each_value[] return legislation_json
Preprocess the legislation parameters to add prices and amounts from national accounts
def contains_no_backer(self, addr): for i, p in self._pages.items(): if i * self._page_size <= addr < (i + 1) * self._page_size: return addr - (i * self._page_size) in p.keys() return False
Tests if the address is contained in any page of paged memory, without considering memory backers. :param int addr: The address to test. :return: True if the address is included in one of the pages, False otherwise. :rtype: bool
def lambda_return(reward, value, length, discount, lambda_): timestep = tf.range(reward.shape[1].value) mask = tf.cast(timestep[None, :] < length[:, None], tf.float32) sequence = mask * reward + discount * value * (1 - lambda_) discount = mask * discount * lambda_ sequence = tf.stack([sequence, discount], 2) return_ = tf.reverse(tf.transpose(tf.scan( lambda agg, cur: cur[0] + cur[1] * agg, tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]), tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1]) return tf.check_numerics(tf.stop_gradient(return_), )
TD-lambda returns.
def virtual_machines_list_all(**kwargs): result = {} compconn = __utils__[](, **kwargs) try: vms = __utils__[]( compconn.virtual_machines.list_all() ) for vm in vms: result[vm[]] = vm except CloudError as exc: __utils__[](, str(exc), **kwargs) result = {: str(exc)} return result
.. versionadded:: 2019.2.0 List all virtual machines within a subscription. CLI Example: .. code-block:: bash salt-call azurearm_compute.virtual_machines_list_all
def synthesize(vers, opts): parsed_version = parse_version(vers["version"]) vers["capabilities"] = {} for name in opts["optional"]: vers["capabilities"][name] = check(parsed_version, name) failed = False for name in opts["required"]: have = check(parsed_version, name) vers["capabilities"][name] = have if not have: vers["error"] = ( "client required capability `" + name + "` is not supported by this server" ) return vers
Synthesize a capability enabled version response This is a very limited emulation for relatively recent feature sets
def sliver_reader(filename_end_mask="*[0-9].mhd", sliver_reference_dir="~/data/medical/orig/sliver07/training/", read_orig=True, read_seg=False): sliver_reference_dir = op.expanduser(sliver_reference_dir) orig_fnames = glob.glob(sliver_reference_dir + "*orig" + filename_end_mask) ref_fnames = glob.glob(sliver_reference_dir + "*seg"+ filename_end_mask) orig_fnames.sort() ref_fnames.sort() output = [] for i in range(0, len(orig_fnames)): oname = orig_fnames[i] rname = ref_fnames[i] vs_mm = None ref_data= None orig_data = None if read_orig: orig_data, metadata = io3d.datareader.read(oname) vs_mm = metadata[] if read_seg: ref_data, metadata = io3d.datareader.read(rname) vs_mm = metadata[] import re numeric_label = re.search(".*g(\d+)", oname).group(1) out = (numeric_label, vs_mm, oname, orig_data, rname, ref_data) yield out
Generator for reading sliver data from directory structure. :param filename_end_mask: file selection can be controlled with this parameter :param sliver_reference_dir: directory with sliver .mhd and .raw files :param read_orig: read image data if is set True :param read_seg: read segmentation data if is set True :return: numeric_label, vs_mm, oname, orig_data, rname, ref_data
def anyword_substring_search_inner(query_word, target_words): for target_word in target_words: if(target_word.startswith(query_word)): return query_word return False
return True if ANY target_word matches a query_word
def find_tf_idf(file_names=[],prev_file_path=None, dump_path=None): tf_idf = [] df = defaultdict(int) if prev_file_path: print(TAG,,prev_file_path) df,tf_idf = pickle.load(open(prev_file_path,)) prev_doc_count = len(df) prev_corpus_length = len(tf_idf) for f in file_names: with open(f,) as file1: for line in file1: wdict = defaultdict(int) for word in set(line.split()): df[word] +=1 for word in line.split(): wdict[word] += 1 tf_idf.append(wdict) for doc in tf_idf: for key in doc: true_idf = math.log(len(tf_idf)/df[key]) true_tf = doc[key]/float(len(doc)) doc[key] = true_tf * true_idf print(TAG,,len(df),+paint(+str(len(df)-prev_doc_count),)+ if prev_file_path else ) print(TAG,,len(tf_idf),+paint(+str(len(tf_idf)-prev_corpus_length),)+ if prev_file_path else ) if dump_path: if dump_path[-8:] == : pickle.dump((df,tf_idf),open(dump_path,),protocol=pickle.HIGHEST_PROTOCOL) print(TAG,,dump_path) return df,tf_idf
Function to create a TF-IDF list of dictionaries for a corpus of docs. If you opt for dumping the data, you can provide a file_path with .tfidfpkl extension(standard made for better understanding) and also re-generate a new tfidf list which overrides over an old one by mentioning its path. @Args: -- file_names : paths of files to be processed on, these files are created using twitter_streaming module. prev_file_path : path of old .tfidfpkl file, if available. (default=None) dump_path : directory-path where to dump generated lists.(default=None) @returns: -- df : a dict of unique words in corpus,with their document frequency as values. tf_idf : the generated tf-idf list of dictionaries for mentioned docs.
def cbpdnmd_ystep(k): if mp_W.shape[0] > 1: W = mp_W[k] else: W = mp_W AXU0 = mp_DX[k] - mp_S[k] + mp_Z_U0[k] AXU1 = mp_Z_X[k] + mp_Z_U1[k] mp_Z_Y0[k] = mp_xrho*AXU0 / (W**2 + mp_xrho) mp_Z_Y1[k] = sp.prox_l1(AXU1, (mp_lmbda/mp_xrho))
Do the Y step of the cbpdn stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables.
def get(self, sid): return SessionContext(self._version, service_sid=self._solution[], sid=sid, )
Constructs a SessionContext :param sid: The unique string that identifies the resource :returns: twilio.rest.proxy.v1.service.session.SessionContext :rtype: twilio.rest.proxy.v1.service.session.SessionContext
def choose_colour(self, title="Select Colour", **kwargs): return_data = self._run_zenity(title, ["--color-selection"], kwargs) if return_data.successful: converted_colour = ColourData.from_zenity_tuple_str(return_data.data) return DialogData(return_data.return_code, converted_colour) else: return DialogData(return_data.return_code, None)
Show a Colour Chooser dialog Usage: C{dialog.choose_colour(title="Select Colour")} @param title: window title for the dialog @return: @rtype: C{DialogData(int, Optional[ColourData])}
def _validate_first_message(cls, msg): data = cls._unpack_message(msg) logger.debug(data) if data != cls.RTM_HANDSHAKE: raise SlackApiError(.format(data)) logger.info()
Check the first message matches the expected handshake. Note: The handshake is provided as :py:attr:`RTM_HANDSHAKE`. Arguments: msg (:py:class:`aiohttp.Message`): The message to validate. Raises: :py:class:`SlackApiError`: If the data doesn't match the expected handshake.
def _validateListedSubdirsExist(self, component): t, warning messages are printed. t exist but is listed in the module.json file of %s", d, component ) ok = False for d in bin_subdirs: if not os.path.exists(os.path.join(component.path, d)): logger.warning( "bin directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component ) ok = False return ok
Return true if all the subdirectories which this component lists in its module.json file exist (although their validity is otherwise not checked). If they don't, warning messages are printed.
def _increment_504_stat(self, request): for key in self.stats_dict: if key == : unique = request.url + str(time.time()) self.stats_dict[key].increment(unique) else: self.stats_dict[key].increment() self.logger.debug("Incremented status_code stats")
Increments the 504 stat counters @param request: The scrapy request in the spider
def layer_uri(self, layer_name): layers = self.layers() for layer, extension in product(layers, EXTENSIONS): one_file = QFileInfo( self.uri.filePath(layer + + extension)) if one_file.exists(): if one_file.baseName() == layer_name: return one_file.absoluteFilePath() else: return None
Get layer URI. :param layer_name: The name of the layer to fetch. :type layer_name: str :return: The URI to the layer. :rtype: str .. versionadded:: 4.0
def variability_threshold(featuresdir, outfile, magbins=DEFAULT_MAGBINS, maxobjects=None, timecols=None, magcols=None, errcols=None, lcformat=, lcformatdir=None, min_lcmad_stdev=5.0, min_stetj_stdev=2.0, min_iqr_stdev=2.0, min_inveta_stdev=2.0, verbose=True): ve stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo else: LOGERROR("cant figure out the light curve format") return None if timecols is None: timecols = dtimecols if magcols is None: magcols = dmagcols if errcols is None: errcols = derrcols pklist = glob.glob(os.path.join(featuresdir, )) if maxobjects: pklist = pklist[:maxobjects] allobjects = {} for magcol in magcols: if (isinstance(min_stetj_stdev, list) or isinstance(min_stetj_stdev, np.ndarray)): magcol_min_stetj_stdev = min_stetj_stdev[::] else: magcol_min_stetj_stdev = min_stetj_stdev if (isinstance(min_iqr_stdev, list) or isinstance(min_iqr_stdev, np.ndarray)): magcol_min_iqr_stdev = min_iqr_stdev[::] else: magcol_min_iqr_stdev = min_iqr_stdev if (isinstance(min_inveta_stdev, list) or isinstance(min_inveta_stdev, np.ndarray)): magcol_min_inveta_stdev = min_inveta_stdev[::] else: magcol_min_inveta_stdev = min_inveta_stdev LOGINFO() magcol_min_stetj_stdev[magi] = 2.0 thisbin_objectids_thresh_stetsonj = thisbin_objectids[ thisbin_stetsonj > ( thisbin_stetsonj_median + thisbin_min_stetj_stdev*thisbin_stetsonj_stdev ) ] thisbin_iqr_median = np.median(thisbin_iqr) thisbin_iqr_stdev = np.median( np.abs(thisbin_iqr - thisbin_iqr_median) ) * 1.483 binned_iqr_median.append(thisbin_iqr_median) binned_iqr_stdev.append(thisbin_iqr_stdev) if isinstance(magcol_min_iqr_stdev, float): thisbin_objectids_thresh_iqr = thisbin_objectids[ thisbin_iqr > (thisbin_iqr_median + magcol_min_iqr_stdev*thisbin_iqr_stdev) ] elif (isinstance(magcol_min_iqr_stdev, np.ndarray) or isinstance(magcol_min_iqr_stdev, list)): thisbin_min_iqr_stdev = magcol_min_iqr_stdev[magi] if not np.isfinite(thisbin_min_iqr_stdev): LOGWARNING( % thisbin_sdssr_median) thisbin_min_iqr_stdev = 2.0 magcol_min_inveta_stdev[magi] = 2.0 thisbin_objectids_thresh_inveta = thisbin_objectids[ thisbin_inveta > ( thisbin_inveta_median + thisbin_min_inveta_stdev*thisbin_inveta_stdev ) ] else: thisbin_objectids_thresh_stetsonj = ( np.array([],dtype=np.unicode_) ) thisbin_objectids_thresh_iqr = ( np.array([],dtype=np.unicode_) ) thisbin_objectids_thresh_inveta = ( np.array([],dtype=np.unicode_) ) thisbin_objectids_thresh_all = reduce( np.intersect1d, (thisbin_objectids_thresh_stetsonj, thisbin_objectids_thresh_iqr, thisbin_objectids_thresh_inveta) ) binned_objectids.append(thisbin_objectids) binned_sdssr.append(thisbin_sdssr) binned_lcmad.append(thisbin_lcmad) binned_stetsonj.append(thisbin_stetsonj) binned_iqr.append(thisbin_iqr) binned_inveta.append(thisbin_inveta) binned_count.append(thisbin_objectids.size) binned_objectids_thresh_stetsonj.append( thisbin_objectids_thresh_stetsonj ) binned_objectids_thresh_iqr.append( thisbin_objectids_thresh_iqr ) binned_objectids_thresh_inveta.append( thisbin_objectids_thresh_inveta ) binned_objectids_thresh_all.append( thisbin_objectids_thresh_all ) allobjects[magcol][] = magbins allobjects[magcol][] = binned_objectids allobjects[magcol][] = binned_sdssr_median allobjects[magcol][] = binned_sdssr allobjects[magcol][] = binned_count allobjects[magcol][] = binned_lcmad allobjects[magcol][] = binned_lcmad_median allobjects[magcol][] = binned_lcmad_stdev allobjects[magcol][] = binned_stetsonj allobjects[magcol][] = binned_stetsonj_median allobjects[magcol][] = binned_stetsonj_stdev allobjects[magcol][] = binned_iqr allobjects[magcol][] = binned_iqr_median allobjects[magcol][] = binned_iqr_stdev allobjects[magcol][] = binned_inveta allobjects[magcol][] = binned_inveta_median allobjects[magcol][] = binned_inveta_stdev allobjects[magcol][] = ( binned_objectids_thresh_stetsonj ) allobjects[magcol][] = ( binned_objectids_thresh_iqr ) allobjects[magcol][] = ( binned_objectids_thresh_inveta ) allobjects[magcol][] = ( binned_objectids_thresh_all ) try: allobjects[magcol][] = np.unique( np.concatenate( allobjects[magcol][] ) ) except ValueError: LOGWARNING() allobjects[magcol][] = ( np.array([]) ) allobjects[magcol][] = np.unique( np.concatenate( allobjects[magcol][] ) ) allobjects[magcol][] = np.unique( np.concatenate(allobjects[magcol][]) ) allobjects[magcol][] = np.unique( np.concatenate(allobjects[magcol][]) ) allobjects[magcol][] = min_lcmad_stdev allobjects[] = magbins with open(outfile,) as outfd: pickle.dump(allobjects, outfd, protocol=pickle.HIGHEST_PROTOCOL) return allobjects
This generates a list of objects with stetson J, IQR, and 1.0/eta above some threshold value to select them as potential variable stars. Use this to pare down the objects to review and put through period-finding. This does the thresholding per magnitude bin; this should be better than one single cut through the entire magnitude range. Set the magnitude bins using the magbins kwarg. FIXME: implement a voting classifier here. this will choose variables based on the thresholds in IQR, stetson, and inveta based on weighting carried over from the variability recovery sims. Parameters ---------- featuresdir : str This is the directory containing variability feature pickles created by :py:func:`astrobase.lcproc.lcpfeatures.parallel_varfeatures` or similar. outfile : str This is the output pickle file that will contain all the threshold information. magbins : np.array of floats This sets the magnitude bins to use for calculating thresholds. maxobjects : int or None This is the number of objects to process. If None, all objects with feature pickles in `featuresdir` will be processed. timecols : list of str or None The timecol keys to use from the lcdict in calculating the thresholds. magcols : list of str or None The magcol keys to use from the lcdict in calculating the thresholds. errcols : list of str or None The errcol keys to use from the lcdict in calculating the thresholds. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. min_lcmad_stdev,min_stetj_stdev,min_iqr_stdev,min_inveta_stdev : float or np.array These are all the standard deviation multiplier for the distributions of light curve standard deviation, Stetson J variability index, the light curve interquartile range, and 1/eta variability index respectively. These multipliers set the minimum values of these measures to use for selecting variable stars. If provided as floats, the same value will be used for all magbins. If provided as np.arrays of `size = magbins.size - 1`, will be used to apply possibly different sigma cuts for each magbin. verbose : bool If True, will report progress and warn about any problems. Returns ------- dict Contains all of the variability threshold information along with indices into the array of the object IDs chosen as variables.
def _get_names(self): names = self._read_name_file() names = self._compute_weights(names) return names
Get the list of first names. :return: A list of first name entries.
def main(): if len(sys.argv) < 6: print("Usage: %s server_url username password namespace classname" % sys.argv[0]) print() server_url = SERVER_URL namespace = TEST_NAMESPACE username = USERNAME password = PASSWORD classname = TEST_CLASS else: print() server_url = sys.argv[1] namespace = sys.argv[2] username = sys.argv[3] password = sys.argv[4] classname = sys.argv[5] creds = (username, password) execute_request(server_url, creds, namespace, classname) return 0
Get arguments and call the execution function
def added(self): def _added(diffs, prefix): keys = [] for key in diffs.keys(): if isinstance(diffs[key], dict) and not in diffs[key]: keys.extend(_added(diffs[key], prefix=.format(prefix, key))) elif diffs[key][] == self.NONE_VALUE: if isinstance(diffs[key][], dict): keys.extend( _added(diffs[key][], prefix=.format(prefix, key))) else: keys.append(.format(prefix, key)) return keys return sorted(_added(self._diffs, prefix=))
Returns all keys that have been added. If the keys are in child dictionaries they will be represented with . notation
def replace(path, value, **kwargs): return _gen_4spec(LCB_SDCMD_REPLACE, path, value, create_path=False, **kwargs)
Replace an existing path. This works on any valid path if the path already exists. Valid only in :cb_bmeth:`mutate_in` :param path: The path to replace :param value: The new value
def pyside_load_ui(uifile, base_instance=None): form_class, base_class = load_ui_type(uifile) if not base_instance: typeName = form_class.__name__ finalType = type(typeName, (form_class, base_class), {}) base_instance = finalType() else: if not isinstance(base_instance, base_class): raise RuntimeError( % type(base_class)) typeName = type(base_instance).__name__ base_instance.__class__ = type(typeName, (form_class, type(base_instance)), {}) base_instance.setupUi(base_instance) return base_instance
Provide PyQt4.uic.loadUi functionality to PySide Args: uifile (str): Absolute path to .ui file base_instance (QWidget): The widget into which UI widgets are loaded Note: pysideuic is required for this to work with PySide. This seems to work correctly in Maya as well as outside of it as opposed to other implementations which involve overriding QUiLoader. Returns: QWidget: the base instance
def find_types(observatory, match=None, trend=None, connection=None, **connection_kw): return sorted(connection.find_types(observatory, match=match), key=lambda x: _type_priority(observatory, x, trend=trend))
Find the available data types for a given observatory. See also -------- gwdatafind.http.HTTPConnection.find_types FflConnection.find_types for details on the underlying method(s)
def get_scopes_for(self, user_provided_scopes): if user_provided_scopes is None: user_provided_scopes = [app_part for app_part in self._oauth_scopes] elif isinstance(user_provided_scopes, str): user_provided_scopes = [user_provided_scopes] if not isinstance(user_provided_scopes, (list, tuple)): raise ValueError( " must be a list or a tuple of strings") scopes = set() for app_part in user_provided_scopes: for scope in self._oauth_scopes.get(app_part, [(app_part,)]): scopes.add(self._prefix_scope(scope)) return list(scopes)
Returns a list of scopes needed for each of the scope_helpers provided, by adding the prefix to them if required :param user_provided_scopes: a list of scopes or scope helpers :type user_provided_scopes: list or tuple or str :return: scopes with url prefix added :rtype: list :raises ValueError: if unexpected datatype of scopes are passed
def get_route_name(resource_uri): resource_uri = resource_uri.strip() resource_uri = re.sub(, , resource_uri) return resource_uri
Get route name from RAML resource URI. :param resource_uri: String representing RAML resource URI. :returns string: String with route name, which is :resource_uri: stripped of non-word characters.
def psd(tachogram_time, tachogram_data): init_time = tachogram_time[0] fin_time = tachogram_time[-1] tck = interpol.splrep(tachogram_time, tachogram_data) interpolation_rate = 4 nn_time_even = numpy.linspace(init_time, fin_time, (fin_time - init_time) * interpolation_rate) nn_tachogram_even = interpol.splev(nn_time_even, tck) freq_axis, power_axis = scisignal.welch(nn_tachogram_even, interpolation_rate, window=scisignal.get_window("hanning", min(len(nn_tachogram_even), 1000)), nperseg=min(len(nn_tachogram_even), 1000)) freqs = [round(val, 3) for val in freq_axis if val < 0.5] power = [round(val, 4) for val, freq in zip(power_axis, freq_axis) if freq < 0.5] return freqs, power
----- Brief ----- Determination of the Power Spectral Density Function (Fourier Domain) ----------- Description ----------- The Power Spectral Density Function allows to perceive the behavior of a given signal in terms of its frequency. This procedure costs the time resolution of the signal but may be important to extract features in a different domain appart from the time domain. This function constructs the Power Spectral Density Function in the frequency domain. ---------- Parameters ---------- tachogram_time : list X Axis of tachogram. tachogram_data : list Y Axis of tachogram. Returns ------- out : list, list Frequency and power axis.
def parsedeglat (latstr): deg = _parsesexagesimal (latstr, , True) if abs (deg) > 90: raise ValueError ( + latstr) return deg * D2R
Parse a latitude formatted as sexagesimal degrees into an angle. This function converts a textual representation of a latitude, measured in degrees, into a floating point value measured in radians. The format of *latstr* is very limited: it may not have leading or trailing whitespace, and the components of the sexagesimal representation must be separated by colons. The input must therefore resemble something like ``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does not resemble this template. Latitudes greater than 90 or less than -90 degrees are not allowed.
def container_rename(name, newname, remote_addr=None, cert=None, key=None, verify_cert=True): container = container_get( name, remote_addr, cert, key, verify_cert, _raw=True ) if container.status_code == CONTAINER_STATUS_RUNNING: raise SaltInvocationError( "Can{0}'.".format(name) ) container.rename(newname, wait=True) return _pylxd_model_to_dict(container)
Rename a container name : Name of the container to Rename newname : The new name of the contianer remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates.
def next(self): if self.nextId is not None: return super(CapitainsCtsPassage, self).getTextualNode(subreference=self.nextId)
Next CapitainsCtsPassage (Interactive CapitainsCtsPassage)
def load_pos_model(lang="en", version="2"): src_dir = "pos{}".format(version) p = locate_resource(src_dir, lang) fh = _open(p) return dict(np.load(fh))
Return a part of speech tagger parameters for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used.
def set_visible_region(self, rectangles, count): if not isinstance(rectangles, basestring): raise TypeError("rectangles can only be an instance of type basestring") if not isinstance(count, baseinteger): raise TypeError("count can only be an instance of type baseinteger") self._call("setVisibleRegion", in_p=[rectangles, count])
Suggests a new visible region to this frame buffer. This region represents the area of the VM display which is a union of regions of all top-level windows of the guest operating system running inside the VM (if the Guest Additions for this system support this functionality). This information may be used by the frontends to implement the seamless desktop integration feature. The address of the provided array must be in the process space of this IFramebuffer object. The IFramebuffer implementation must make a copy of the provided array of rectangles. Method not yet implemented. in rectangles of type str Pointer to the @c RTRECT array. in count of type int Number of @c RTRECT elements in the @a rectangles array.
def save_tc_to_nii(strCsvCnfg, lgcTest=False, lstRat=None, lgcMdlRsp=False, strPathHrf=None, lgcSaveRam=False): dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest) cfg = cls_set_config(dicCnfg) if strPathHrf is not None: cfg.strPathOut = cfg.strPathOut + if lstRat is not None: cfg.strPathOut = cfg.strPathOut + cfg.strPathMdl = cfg.strPathMdl + lstRat.insert(0, 1.0) lstWnrPrm = [cfg.strPathOut + , cfg.strPathOut + , cfg.strPathOut + ] errorMsg = assert os.path.isfile(lstWnrPrm[0]), errorMsg assert os.path.isfile(lstWnrPrm[1]), errorMsg assert os.path.isfile(lstWnrPrm[2]), errorMsg aryIntGssPrm = load_res_prm(lstWnrPrm, lstFlsMsk=[cfg.strPathNiiMask])[0][0] lstPathBeta = [cfg.strPathOut + ] aryBetas = load_res_prm(lstPathBeta, lstFlsMsk=[cfg.strPathNiiMask])[0][0] assert os.path.isfile(lstPathBeta[0]), errorMsg if lstRat is not None: lstPathRatio = [cfg.strPathOut + ] aryRatio = load_res_prm(lstPathRatio, lstFlsMsk=[cfg.strPathNiiMask])[0][0] assert os.path.isfile(lstPathRatio[0]), errorMsg aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func( cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100) aryIntGssPrm = aryIntGssPrm[aryLgcVar, :] aryBetas = aryBetas[aryLgcVar, :] if lstRat is not None: aryRatio = aryRatio[aryLgcVar, :] aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX), int(cfg.varVslSpcSzeY)), cfg.varNum1, cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2, cfg.varExtYmin, cfg.varExtYmax, cfg.varNumPrfSizes, cfg.varPrfStdMin, cfg.varPrfStdMax, kwUnt=, kwCrd=cfg.strKwCrd) lgcMdlInc = np.load(cfg.strPathMdl + ) aryMdlParams = aryMdlParams[lgcMdlInc, :] aryPrfTc = np.load(cfg.strPathMdl + ) aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp) if lgcMdlRsp: aryMdlRsp = np.load(cfg.strPathMdl + ) aryFitTc = np.zeros((aryFunc.shape), dtype=np.float32) if lgcMdlRsp: if lstRat is not None: aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0], aryMdlRsp.shape[1], aryMdlRsp.shape[3]), dtype=np.float32) else: aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0], aryMdlRsp.shape[1]), dtype=np.float32) vecVxlTst = np.zeros(aryIntGssPrm.shape[0]) aryUnqRows, aryUnqInd = fnd_unq_rws(aryIntGssPrm, return_index=False, return_inverse=True) print() for indRow, vecPrm in enumerate(aryUnqRows): lgcVxl = [aryUnqInd == indRow][0] if np.all(np.invert(lgcVxl)): print() vecVxlTst[lgcVxl] += 1 lgcMdl = np.where(np.isclose(aryMdlParams, vecPrm, atol=0.01).all(axis=1))[0][0] if lgcMdl is None: print() aryMdlTc = aryPrfTc[lgcMdl, ...] aryWeights = aryBetas[lgcVxl, :] if lstRat is not None: aryVxlRatio = aryRatio[lgcVxl, :] indRat = [ind for ind, rat1 in enumerate(lstRat) for rat2 in aryVxlRatio[:, 0] if np.isclose(rat1, rat2)] indVxl = range(len(indRat)) if lstRat is not None: aryFitTcTmp = np.tensordot(aryWeights, aryMdlTc, axes=([1], [0])) aryFitTc[lgcVxl, :] = aryFitTcTmp[indVxl, indRat, :] else: aryFitTc[lgcVxl, :] = np.dot(aryWeights, aryMdlTc) if lgcMdlRsp: if lstRat is not None: aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :, indRat, :] else: aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :] errMsg = assert len(vecVxlTst) == np.sum(vecVxlTst), errMsg lstNiiNames = [] lstNiiNames = [cfg.strPathOut + strNii + for strNii in lstNiiNames] print() export_nii(aryFunc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat=) print() if lgcSaveRam: strPthRamOut = cfg.strPathOut + + imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFunc, axis=1), axis=1), affine=np.eye(4)) nb.save(imgNii, strPthRamOut) lstNiiNames = [] lstNiiNames = [cfg.strPathOut + strNii + for strNii in lstNiiNames] print() export_nii(aryFitTc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat=) print() if lgcMdlRsp: strNpyName = cfg.strPathOut + + print() np.save(strNpyName, aryFitMdlRsp) print() strNpyMskName = cfg.strPathOut + + aryLgcMsk[aryLgcMsk] = aryLgcVar print() np.save(strNpyMskName, aryLgcMsk) print() if lgcSaveRam: strPthRamOut = cfg.strPathOut + + imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFitTc, axis=1), axis=1), affine=np.eye(4)) nb.save(imgNii, strPthRamOut)
Save empirical and fitted time courses to nii file format. Parameters ---------- strCsvCnfg : str Absolute file path of config file used for pRF fitting. lgcTest : boolean Whether this is a test (pytest). If yes, absolute path of pyprf libary will be prepended to config file paths. lstRat : None or list Ratio of size of center to size of suppressive surround. lgcMdlRsp : boolean Should the aperture responses for the winner model also be saved? strPathHrf : str or None: Path to npy file with custom hrf parameters. If None, defaults parameters were used. lgcSaveRam : boolean Whether to also save a nii file that uses little RAM. Notes ----- This function does not return any arguments but, instead, saves nii files to disk.
def delete_alias(self, index, name, params=None): for param in (index, name): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "DELETE", _make_path(index, "_alias", name), params=params )
Delete specific alias. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_ :arg index: A comma-separated list of index names (supports wildcards); use `_all` for all indices :arg name: A comma-separated list of aliases to delete (supports wildcards); use `_all` to delete all aliases for the specified indices. :arg master_timeout: Specify timeout for connection to master :arg request_timeout: Explicit timeout for the operation
def send_response(self, body, set_content_type=True): settings = get_settings(self.application, force_instance=True) handler = settings[self.get_response_content_type()] content_type, data_bytes = handler.to_bytes(body) if set_content_type: self.set_header(, content_type) self.add_header(, ) self.write(data_bytes)
Serialize and send ``body`` in the response. :param dict body: the body to serialize :param bool set_content_type: should the :http:header:`Content-Type` header be set? Defaults to :data:`True`
def _check_keys(dictionary): for key in dictionary: if isinstance(dictionary[key], matlab.mio5_params.mat_struct): dictionary[key] = _todict(dictionary[key]) return dictionary
checks if entries in dictionary are mat-objects. If yes todict is called to change them to nested dictionaries
def is_duplicate_of(self, other): if super(Spectrum, self).is_duplicate_of(other): return True row_matches = 0 for ri, row in enumerate(self.get(self._KEYS.DATA, [])): lambda1, flux1 = tuple(row[0:2]) if (self._KEYS.DATA not in other or ri > len(other[self._KEYS.DATA])): break lambda2, flux2 = tuple(other[self._KEYS.DATA][ri][0:2]) minlambdalen = min(len(lambda1), len(lambda2)) minfluxlen = min(len(flux1), len(flux2)) if (lambda1[:minlambdalen + 1] == lambda2[:minlambdalen + 1] and flux1[:minfluxlen + 1] == flux2[:minfluxlen + 1] and float(flux1[:minfluxlen + 1]) != 0.0): row_matches += 1 if row_matches >= 5: return True if ri >= 10: break return False
Check if spectrum is duplicate of another.
def save(self, savefile): with open(str(savefile), ) as f: self.write_to_fp(f) log.debug("Saved to %s", savefile)
Do the TTS API request and write result to file. Args: savefile (string): The path and file name to save the ``mp3`` to. Raises: :class:`gTTSError`: When there's an error with the API request.
def _pick_state_im_name(state_name, im_name, use_full_path=False): initial_dir = os.getcwd() if (state_name is None) or (im_name is None): wid = tk.Tk() wid.withdraw() if state_name is None: state_name = tkfd.askopenfilename( initialdir=initial_dir, title=) os.chdir(os.path.dirname(state_name)) if im_name is None: im_name = tkfd.askopenfilename( initialdir=initial_dir, title=) if (not use_full_path) and (os.path.dirname(im_name) != ): im_path = os.path.dirname(im_name) os.chdir(im_path) im_name = os.path.basename(im_name) else: os.chdir(initial_dir) return state_name, im_name
If state_name or im_name is None, picks them interactively through Tk, and then sets with or without the full path. Parameters ---------- state_name : {string, None} The name of the state. If None, selected through Tk. im_name : {string, None} The name of the image. If None, selected through Tk. use_full_path : Bool, optional Set to True to return the names as full paths rather than relative paths. Default is False (relative path).
def verify_ticket_signature(self, data, sig): try: signature = base64.b64decode(sig) except TypeError as e: if hasattr(self, "debug"): print("Exception in function base64.b64decode. File %s" % (__file__)) print("%s" % e) return False if six.PY3: except RSA.RSAError: return False return True if isinstance(self.pub_key, DSA.DSA_pub): try: return self.pub_key.verify_asn1(digest, signature) except DSA.DSAError as e: if hasattr(self, "debug"): print("Exception in function self.pub_key.verify_asn1(digest, signature). File %s" % (__file__)) print("%s" % e) return False return False
Verify ticket signature.
def new_connection(self): if not self.prepared: self.prepare() con = sqlite3.connect(self.path, isolation_level=self.isolation) con.row_factory = self.factory if self.text_fact: con.text_factory = self.text_fact return con
Make a new connection.
def _affine_mult(c:FlowField,m:AffineMatrix)->FlowField: "Multiply `c` by `m` - can adjust for rectangular shaped `c`." if m is None: return c size = c.flow.size() h,w = c.size m[0,1] *= h/w m[1,0] *= w/h c.flow = c.flow.view(-1,2) c.flow = torch.addmm(m[:2,2], c.flow, m[:2,:2].t()).view(size) return c
Multiply `c` by `m` - can adjust for rectangular shaped `c`.
def _session(): profile_name = _cfg() if profile_name: log.info(, profile_name) else: log.info() try: return boto3.Session(profile_name=profile_name) except botocore.exceptions.ProfileNotFound as orig_exc: err_msg = .format( profile_name or ) config_error = salt.exceptions.SaltConfigurationError(err_msg) six.raise_from(config_error, orig_exc) except botocore.exceptions.NoRegionError as orig_exc: err_msg = ( ).format(profile_name or ) config_error = salt.exceptions.SaltConfigurationError(err_msg) six.raise_from(config_error, orig_exc)
Return the boto3 session to use for the KMS client. If aws_kms:profile_name is set in the salt configuration, use that profile. Otherwise, fall back on the default aws profile. We use the boto3 profile system to avoid having to duplicate individual boto3 configuration settings in salt configuration.
def curl_couchdb(url, method=, base_url=BASE_URL, data=None): (username, password) = get_admin() if username is None: auth = None else: auth = (username, password) if method == : req = requests.put(.format(base_url, url), auth=auth, data=data) elif method == : req = requests.delete(.format(base_url, url), auth=auth) else: req = requests.get(.format(base_url, url), auth=auth) if req.status_code not in [200, 201]: raise HTTPError(.format(req.status_code, req.text)) return req
Launch a curl on CouchDB instance
def _infinite_iterator(self): while True: for crash_id in self._basic_iterator(): if self._filter_disallowed_values(crash_id): continue yield crash_id
this iterator wraps the "_basic_iterator" when the configuration specifies that the "number_of_submissions" is set to "forever". Whenever the "_basic_iterator" is exhausted, it is called again to restart the iteration. It is up to the implementation of the innermost iterator to define what starting over means. Some iterators may repeat exactly what they did before, while others may iterate over new values
def binboolflip(item): if item in [0, False, 1, True]: return int(item) if isinstance(item, bool) else bool(item) raise ValueError("Invalid item specified.")
Convert 0 or 1 to False or True (or vice versa). The converter works as follows: - 0 > False - False > 0 - 1 > True - True > 1 :type item: integer or boolean :param item: The item to convert. >>> binboolflip(0) False >>> binboolflip(False) 0 >>> binboolflip(1) True >>> binboolflip(True) 1 >>> binboolflip("foo") Traceback (most recent call last): ... ValueError: Invalid item specified.
def start_volume(name, force=False): * cmd = .format(name) if force: cmd = .format(cmd) volinfo = info(name) if name not in volinfo: log.error("Cannot start non-existing volume %s", name) return False if not force and volinfo[name][] == : log.info("Volume %s already started", name) return True return _gluster(cmd)
Start a gluster volume name Volume name force Force the volume start even if the volume is started .. versionadded:: 2015.8.4 CLI Example: .. code-block:: bash salt '*' glusterfs.start mycluster
def _my_top_k(x, k): if k > 10: return tf.nn.top_k(x, k) values = [] indices = [] depth = tf.shape(x)[1] for i in range(k): values.append(tf.reduce_max(x, 1)) argmax = tf.argmax(x, 1) indices.append(argmax) if i + 1 < k: x += tf.one_hot(argmax, depth, -1e9) return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))
GPU-compatible version of top-k that works for very small constant k. Calls argmax repeatedly. tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense, seems not to be, so if we use tf.nn.top_k, then both the top_k and its gradient go on cpu. Once this is not an issue, this function becomes obsolete and should be replaced by tf.nn.top_k. Args: x: a 2d Tensor. k: a small integer. Returns: values: a Tensor of shape [batch_size, k] indices: a int32 Tensor of shape [batch_size, k]
def shell(args): " A helper command to be used for shell integration " print print " print " print "export MAKESITE_HOME=%s" % args.path print "source %s" % op.join(settings.BASEDIR, ) print
A helper command to be used for shell integration
def get_label(self, label_name): for label in self.get_labels(): if label.name == label_name: return label
Return the user's label that has a given name. :param label_name: The name to search for. :type label_name: str :return: A label that has a matching name or ``None`` if not found. :rtype: :class:`pytodoist.todoist.Label` >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> label = user.get_label('family')
def download(self, storagemodel:object, modeldefinition = None): if (storagemodel.name is None): raise AzureStorageWrapException(storagemodel, "StorageBlobModel does not contain content nor content settings") else: container_name = modeldefinition[] blob_name = storagemodel.name try: if modeldefinition[].exists(container_name, blob_name): blob = modeldefinition[].get_blob_to_bytes( container_name=modeldefinition[], blob_name=storagemodel.name ) storagemodel.__mergeblob__(blob) except Exception as e: msg = .format(storagemodel._containername, e) raise AzureStorageWrapException(storagemodel, msg=msg) return storagemodel
load blob from storage into StorageBlobModelInstance
def files_info(self, *, id: str, **kwargs) -> SlackResponse: kwargs.update({"id": id}) return self.api_call("files.info", http_verb="GET", params=kwargs)
Gets information about a team file. Args: id (str): The file id. e.g. 'F1234467890'
def transform_language_code(code): if code is None: return components = code.split(, 2) language_code = components[0] try: country_code = components[1] except IndexError: country_code = language_family = SUCCESSFACTORS_OCN_LANGUAGE_CODES.get(language_code) if not language_family: return return language_family.get(country_code, language_family[])
Transform ISO language code (e.g. en-us) to the language name expected by SAPSF.
def add_distinguished_name_list(list_name): * payload = {"jsonrpc": "2.0", "id": "ID0", "method": "add_policy_distinguished_names_list", "params": [{"list_name": list_name}]} response = __proxy__[](payload, True) return _validate_change_result(response)
Add a list of policy distinguished names. list_name(str): The name of the specific policy distinguished name list to add. CLI Example: .. code-block:: bash salt '*' bluecoat_sslv.add_distinguished_name_list MyDistinguishedList
def insert(self, key, value): if len(self.history) == self.maxsize: expectorate = self.history[0] else: expectorate = None self.history.append((key, value)) if key in self: super().__getitem__(key).append(value) else: super().__setitem__(key, [value]) if expectorate is not None: old_key, old_value = expectorate super().__getitem__(old_key).pop(0) if len(super().__getitem__(old_key)) == 0: super().__delitem__(old_key) return (old_key, old_value)
Adds a new key-value pair. Returns any discarded values.
def absolute(self): return Timeseries(np.absolute(self), self.tspan, self.labels)
Calculate the absolute value element-wise. Returns: absolute (Timeseries): Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2)
def setup(self): if self._formats: return basedir = self.basedir options = self.options crumbs = self.get_relative_breadcrumbs() fmts = list() for fmt_class in self.formats: fmt = fmt_class(basedir, options, crumbs) fmt.setup() fmts.append(fmt) self._formats = fmts
instantiates all report formats that have been added to this reporter, and calls their setup methods.
def remove_extra_delims(expr, ldelim="(", rdelim=")"): r op_group = "" for item1 in _OP_PREC: if isinstance(item1, list): for item2 in item1: op_group += item2 else: op_group += item1 iobj = zip([expr, ldelim, rdelim], ["expr", "ldelim", "rdelim"]) for item, desc in iobj: if not isinstance(item, str): raise RuntimeError("Argument `{0}` is not valid".format(desc)) if (len(ldelim) != 1) or ((len(ldelim) == 1) and (ldelim in op_group)): raise RuntimeError("Argument `ldelim` is not valid") if (len(rdelim) != 1) or ((len(rdelim) == 1) and (rdelim in op_group)): raise RuntimeError("Argument `rdelim` is not valid") if expr.count(ldelim) != expr.count(rdelim): raise RuntimeError("Mismatched delimiters") if not expr: return expr vchars = ( "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" ".0123456789" r"_()[]\{\}" + rdelim + ldelim + op_group ) if any([item not in vchars for item in expr]) or ("__" in expr): raise RuntimeError("Argument `expr` is not valid") expr = _remove_consecutive_delims(expr, ldelim=ldelim, rdelim=rdelim) expr = expr.replace(ldelim + rdelim, "") return _remove_extra_delims(expr, ldelim=ldelim, rdelim=rdelim)
r""" Remove unnecessary delimiters in mathematical expressions. Delimiters (parenthesis, brackets, etc.) may be removed either because there are multiple consecutive delimiters enclosing a single expressions or because the delimiters are implied by operator precedence rules. Function names must start with a letter and then can contain alphanumeric characters and a maximum of one underscore :param expr: Mathematical expression :type expr: string :param ldelim: Single character left delimiter :type ldelim: string :param rdelim: Single character right delimiter :type rdelim: string :rtype: string :raises: * RuntimeError (Argument \`expr\` is not valid) * RuntimeError (Argument \`ldelim\` is not valid) * RuntimeError (Argument \`rdelim\` is not valid) * RuntimeError (Function name `*[function_name]*` is not valid) * RuntimeError (Mismatched delimiters)
def setPalette(self, palette): self._palette = XNodePalette(palette) if palette is not None else None self.setDirty()
Sets the palette for this node to the inputed palette. If None is provided, then the scene's palette will be used for this node. :param palette | <XNodePalette> || None
def _unpack_batch_response(response): parser = Parser() message = _generate_faux_mime_message(parser, response) if not isinstance(message._payload, list): raise ValueError("Bad response: not multi-part") for subrequest in message._payload: status_line, rest = subrequest._payload.split("\n", 1) _, status, _ = status_line.split(" ", 2) sub_message = parser.parsestr(rest) payload = sub_message._payload msg_headers = dict(sub_message._headers) content_id = msg_headers.get("Content-ID") subresponse = requests.Response() subresponse.request = requests.Request( method="BATCH", url="contentid://{}".format(content_id) ).prepare() subresponse.status_code = int(status) subresponse.headers.update(msg_headers) subresponse._content = payload.encode("utf-8") yield subresponse
Convert requests.Response -> [(headers, payload)]. Creates a generator of tuples of emulating the responses to :meth:`requests.Session.request`. :type response: :class:`requests.Response` :param response: HTTP response / headers from a request.
def gen_rupture_getters(dstore, slc=slice(None), concurrent_tasks=1, hdf5cache=None): if dstore.parent: dstore = dstore.parent csm_info = dstore[] trt_by_grp = csm_info.grp_by("trt") samples = csm_info.get_samples_by_grp() rlzs_by_gsim = csm_info.get_rlzs_by_gsim_grp() rup_array = dstore[][slc] maxweight = numpy.ceil(len(rup_array) / (concurrent_tasks or 1)) nr, ne = 0, 0 for grp_id, arr in general.group_array(rup_array, ).items(): if not rlzs_by_gsim[grp_id]: continue for block in general.block_splitter(arr, maxweight): rgetter = RuptureGetter( hdf5cache or dstore.filename, numpy.array(block), grp_id, trt_by_grp[grp_id], samples[grp_id], rlzs_by_gsim[grp_id]) rgetter.weight = getattr(block, , len(block)) yield rgetter nr += len(block) ne += rgetter.num_events logging.info(, nr, ne)
:yields: RuptureGetters
def __make_http_query(self, params, topkey=): if len(params) == 0: return "" result = "" if type(params) is dict: for key in params.keys(): newkey = quote(key) if topkey != : newkey = topkey + quote( + key + ) if type(params[key]) is dict: result += self.__make_http_query(params[key], newkey) elif type(params[key]) is list: i = 0 for val in params[key]: if type(val) is dict: result += self.__make_http_query( val, newkey + quote(+str(i)+)) else: result += newkey + \ quote(+str(i)+) + "=" + \ quote(str(val)) + "&" i = i + 1 elif type(params[key]) is bool: result += newkey + "=" + \ quote(str(params[key]).lower()) + "&" else: result += newkey + "=" + quote(str(params[key])) + "&" if (result) and (topkey == ) and (result[-1] == ): result = result[:-1] return result
Function to covert params into url encoded query string :param dict params: Json string sent by Authy. :param string topkey: params key :return string: url encoded Query.
def zero_nan(self, *cols): if len(cols) == 0: self.warning("Can not nan zero values if a column name " "is not provided") df = self._zero_nan(*cols) if df is None: self.err("Can not fill zero values with nan") return self.df = df
Converts zero values to nan values in selected columns :param \*cols: names of the colums :type \*cols: str, at least one :example: ``ds.zero_nan("mycol1", "mycol2")``
def __calc_signed_volume(triangle): v321 = triangle[2][0] * triangle[1][1] * triangle[0][2] v231 = triangle[1][0] * triangle[2][1] * triangle[0][2] v312 = triangle[2][0] * triangle[0][1] * triangle[1][2] v132 = triangle[0][0] * triangle[2][1] * triangle[1][2] v213 = triangle[1][0] * triangle[0][1] * triangle[2][2] v123 = triangle[0][0] * triangle[1][1] * triangle[2][2] signed_volume = (-v321 + v231 + v312 - v132 - v213 + v123) / 6.0 return signed_volume
Calculate signed volume of given triangle :param list of list triangle: :rtype float
def getSwapStats(self, dev): if self._swapList is None: self._initSwapInfo() if dev in self._swapList: return self.getDevStats(dev) else: return None
Returns I/O stats for swap partition. @param dev: Device name for swap partition. @return: Dict of stats.
def search(cls, search_string, values_of=, group=whoosh.qparser.OrGroup, match_substrings=True, limit=None): index = Whooshee.get_or_create_index(_get_app(cls), cls) prepped_string = cls.prep_search_string(search_string, match_substrings) with index.searcher() as searcher: parser = whoosh.qparser.MultifieldParser(cls.schema.names(), index.schema, group=group) query = parser.parse(prepped_string) results = searcher.search(query, limit=limit) if values_of: return [x[values_of] for x in results] return results
Searches the fields for given search_string. Returns the found records if 'values_of' is left empty, else the values of the given columns. :param search_string: The string to search for. :param values_of: If given, the method will not return the whole records, but only values of given column. Defaults to returning whole records. :param group: The whoosh group to use for searching. Defaults to :class:`whoosh.qparser.OrGroup` which searches for all words in all columns. :param match_substrings: ``True`` if you want to match substrings, ``False`` otherwise. :param limit: The number of the top records to be returned. Defaults to ``None`` and returns all records.
def store_mapping(self, path): with open(path, "w") as writer: for key, value in self.mapping.iteritems(): writer.write("{}\t{}\n".format(key, value))
Store the current Id mappings into a TSV file.
def delete_files_or_folders(self, request, files_queryset, folders_queryset): opts = self.model._meta app_label = opts.app_label if not self.has_delete_permission(request): raise PermissionDenied current_folder = self._get_current_action_folder( request, files_queryset, folders_queryset) all_protected = [] using = router.db_for_write(self.model) deletable_files, model_count_files, perms_needed_files, protected_files = get_deleted_objects(files_queryset, files_queryset.model._meta, request.user, self.admin_site, using) deletable_folders, model_count_folder, perms_needed_folders, protected_folders = get_deleted_objects(folders_queryset, folders_queryset.model._meta, request.user, self.admin_site, using) all_protected.extend(protected_files) all_protected.extend(protected_folders) all_deletable_objects = [deletable_files, deletable_folders] all_perms_needed = perms_needed_files.union(perms_needed_folders) if request.POST.get(): if all_perms_needed: raise PermissionDenied n = files_queryset.count() + folders_queryset.count() if n: for f in files_queryset: self.log_deletion(request, f, force_text(f)) f.delete() folder_ids = set() for folder in folders_queryset: folder_ids.add(folder.id) folder_ids.update( folder.get_descendants().values_list(, flat=True)) for f in File.objects.filter(folder__in=folder_ids): self.log_deletion(request, f, force_text(f)) f.delete() for f in folders_queryset: self.log_deletion(request, f, force_text(f)) f.delete() self.message_user(request, _("Successfully deleted %(count)d files and/or folders.") % {"count": n, }) return None if all_perms_needed or all_protected: title = _("Cannot delete files and/or folders") else: title = _("Are you sure?") context = self.admin_site.each_context(request) context.update({ "title": title, "instance": current_folder, "breadcrumbs_action": _("Delete files and/or folders"), "deletable_objects": all_deletable_objects, "files_queryset": files_queryset, "folders_queryset": folders_queryset, "perms_lacking": all_perms_needed, "protected": all_protected, "opts": opts, : popup_status(request), : AdminContext(request), "root_path": reverse(), "app_label": app_label, "action_checkbox_name": helpers.ACTION_CHECKBOX_NAME, }) return render( request, "admin/filer/delete_selected_files_confirmation.html", context )
Action which deletes the selected files and/or folders. This action first displays a confirmation page whichs shows all the deleteable files and/or folders, or, if the user has no permission on one of the related childs (foreignkeys), a "permission denied" message. Next, it deletes all selected files and/or folders and redirects back to the folder.
def b64_encode(self): encoded = _lib.NETSCAPE_SPKI_b64_encode(self._spki) result = _ffi.string(encoded) _lib.OPENSSL_free(encoded) return result
Generate a base64 encoded representation of this SPKI object. :return: The base64 encoded string. :rtype: :py:class:`bytes`
def set_password(self, raw_password): if raw_password is None: self.set_unusable_password() else: xmpp_backend.set_password(self.node, self.domain, raw_password)
Calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` for the user. If password is ``None``, calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_unusable_password`.
def enable_faulthandler(cls, signum=signal.SIGUSR1): with cls._lock: if not signum: cls._disable_faulthandler() return if not cls.file_handler or faulthandler is None: return cls.faulthandler_signum = signum dump_file = cls.file_handler.stream faulthandler.enable(file=dump_file, all_threads=True) faulthandler.register(signum, file=dump_file, all_threads=True, chain=False)
Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT Note: this must be called from the surviving process in case of daemonization. Note that SIGQUIT does not work in all environments with a python process. :param int|None signum: Signal number to register for full thread stack dump (use None to disable)
def def_alignment(self, year): scheme_text = self._year_info_pq(year, ).text() m = re.search(r, scheme_text, re.I) if m: return m.group(1) else: return None
Returns the name of the defensive alignment the team ran in the given year. :year: Int representing the season year. :returns: A string representing the defensive alignment.
def _inverse_lower_triangular(M): if M.get_shape().ndims != 3: raise ValueError("Number of dimensions for input is required to be 3.") D, N = tf.shape(M)[0], tf.shape(M)[1] I_DNN = tf.eye(N, dtype=M.dtype)[None, :, :] * tf.ones((D, 1, 1), dtype=M.dtype) return tf.matrix_triangular_solve(M, I_DNN)
Take inverse of lower triangular (e.g. Cholesky) matrix. This function broadcasts over the first index. :param M: Tensor with lower triangular structure of shape DxNxN :return: The inverse of the Cholesky decomposition. Same shape as input.
def extend(self, collection): l_ids = set([a.Id for a in self]) for acces in collection: if not acces.Id in l_ids: list.append(self,acces) info = collection.get_info(Id=acces.Id) if info: self.infos[acces.Id] = info
Merges collections. Ensure uniqueness of ids
def draw_interface(objects, callback, callback_text): screen = curses.initscr() height, width = screen.getmaxyx() curses.noecho() curses.cbreak() curses.start_color() screen.keypad( 1 ) curses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_CYAN) highlightText = curses.color_pair( 1 ) normalText = curses.A_NORMAL screen.border( 0 ) curses.curs_set( 0 ) max_row = height - 15 box = curses.newwin( max_row + 2, int(width - 2), 1, 1 ) box.box() fmt = PartialFormatter() row_num = len( objects ) pages = int( ceil( row_num / max_row ) ) position = 1 page = 1 for i in range( 1, max_row + 1 ): if row_num == 0: box.addstr( 1, 1, "There arenstringstringt strings", highlightText ) else: if ( i + ( max_row * ( page - 1 ) ) == position + ( max_row * ( page - 1 ) ) ): box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ][], highlightText ) else: box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ][], normalText ) if i == row_num: break screen.refresh() box.refresh() x = screen.getch() curses.endwin() exit()
Draws a ncurses interface. Based on the given object list, every object should have a "string" key, this is whats displayed on the screen, callback is called with the selected object. Rest of the code is modified from: https://stackoverflow.com/a/30834868
def create_user(**data): if not in data or not in data: raise ValueError() data[] = generate_password_hash( data.pop(), method= ) db_users = json.load(open()) db_users[data[]] = data json.dump(db_users, open(, )) return data
Creates user with encrypted password
def get_context(self, parent_context, data): if django.VERSION >= (1, 8): new_context = parent_context.new(data) else: settings = { : parent_context.autoescape, : parent_context.current_app, : parent_context.use_l10n, : parent_context.use_tz, } new_context = Context(data, **settings) csrf_token = parent_context.get(, None) if csrf_token is not None: new_context[] = csrf_token return new_context
Wrap the context data in a :class:`~django.template.Context` object. :param parent_context: The context of the parent template. :type parent_context: :class:`~django.template.Context` :param data: The result from :func:`get_context_data` :type data: dict :return: Context data. :rtype: :class:`~django.template.Context`
def stop_func_accept_retry_state(stop_func): if not six.callable(stop_func): return stop_func if func_takes_retry_state(stop_func): return stop_func @_utils.wraps(stop_func) def wrapped_stop_func(retry_state): warn_about_non_retry_state_deprecation( , stop_func, stacklevel=4) return stop_func( retry_state.attempt_number, retry_state.seconds_since_start, ) return wrapped_stop_func
Wrap "stop" function to accept "retry_state" parameter.
def delete(self, url): response = requests.delete( self.host + url, headers={ : + self.token, :self.sourceHeader }, verify=False ) if response.status_code == 204: return None elif response.status_code == 401: raise Exception(json.dumps({:})) else: raise Exception(response.content)
To make a DELETE request to Falkonry API server :param url: string
def _nsplit(self): north_pole_left = self.max_int(self.bits - 1) north_pole_right = 2 ** (self.bits - 1) straddling = False if self.upper_bound >= north_pole_right: if self.lower_bound > self.upper_bound: straddling = True elif self.lower_bound <= north_pole_left: straddling = True else: if self.lower_bound > self.upper_bound and self.lower_bound <= north_pole_left: straddling = True if straddling: a_upper_bound = north_pole_left - ((north_pole_left - self.lower_bound) % self.stride) a = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=self.lower_bound, upper_bound=a_upper_bound, uninitialized=self.uninitialized) b_lower_bound = a_upper_bound + self.stride b = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=b_lower_bound, upper_bound=self.upper_bound, uninitialized=self.uninitialized) return [ a, b ] else: return [ self.copy() ]
Split `self` at the north pole, which is the same as in signed arithmetic. :return: A list of split StridedIntervals
def show_progress(self, n, total_runs): if self.report_progress: percentage, logger_name, log_level = self.report_progress if logger_name == : logger = else: logger = logging.getLogger(logger_name) if n == -1: digits = int(math.log10(total_runs + 0.1)) + 1 self._format_string = + % digits + fmt_string = self._format_string % (n + 1, total_runs) + reprint = log_level == 0 progressbar(n, total_runs, percentage_step=percentage, logger=logger, log_level=log_level, fmt_string=fmt_string, reprint=reprint)
Displays a progressbar
def getAnalysisRequestsBrains(self, **kwargs): kwargs[] = self.UID() catalog = getToolByName(self, CATALOG_ANALYSIS_REQUEST_LISTING) brains = catalog(kwargs) return brains
Return all the Analysis Requests brains linked to the Batch kargs are passed directly to the catalog.
def format_numeric_result(analysis, result, decimalmark=, sciformat=1): try: result = float(result) except ValueError: return result if math.isnan(result): return result threshold = analysis.getExponentialFormatPrecision() precision = analysis.getPrecision(result) formatted = _format_decimal_or_sci(result, precision, threshold, sciformat) return formatDecimalMark(formatted, decimalmark)
Returns the formatted number part of a results value. This is responsible for deciding the precision, and notation of numeric values in accordance to the uncertainty. If a non-numeric result value is given, the value will be returned unchanged. The following rules apply: If the "Calculate precision from uncertainties" is enabled in the Analysis service, and a) If the non-decimal number of digits of the result is above the service's ExponentialFormatPrecision, the result will be formatted in scientific notation. Example: Given an Analysis with an uncertainty of 37 for a range of results between 30000 and 40000, with an ExponentialFormatPrecision equal to 4 and a result of 32092, this method will return 3.2092E+04 b) If the number of digits of the integer part of the result is below the ExponentialFormatPrecision, the result will be formatted as decimal notation and the resulta will be rounded in accordance to the precision (calculated from the uncertainty) Example: Given an Analysis with an uncertainty of 0.22 for a range of results between 1 and 10 with an ExponentialFormatPrecision equal to 4 and a result of 5.234, this method will return 5.2 If the "Calculate precision from Uncertainties" is disabled in the analysis service, the same rules described above applies, but the precision used for rounding the result is not calculated from the uncertainty. The fixed length precision is used instead. For further details, visit https://jira.bikalabs.com/browse/LIMS-1334 The default decimal mark '.' will be replaced by the decimalmark specified. :param analysis: the analysis from which the uncertainty, precision and other additional info have to be retrieved :param result: result to be formatted. :param decimalmark: decimal mark to use. By default '.' :param sciformat: 1. The sci notation has to be formatted as aE^+b 2. The sci notation has to be formatted as ax10^b 3. As 2, but with super html entity for exp 4. The sci notation has to be formatted as a·10^b 5. As 4, but with super html entity for exp By default 1 :result: should be a string to preserve the decimal precision. :returns: the formatted result as string
def sync(self, hooks=True, async_hooks=True): active_repos = {} github_repos = {repo.id: repo for repo in self.api.repositories() if repo.permissions[]} for gh_repo_id, gh_repo in github_repos.items(): active_repos[gh_repo_id] = { : gh_repo_id, : gh_repo.full_name, : gh_repo.description, } if hooks: self._sync_hooks(list(active_repos.keys()), asynchronous=async_hooks) db_repos = Repository.query.filter( Repository.user_id == self.user_id, Repository.github_id.in_(github_repos.keys()) ) for repo in db_repos: gh_repo = github_repos.get(repo.github_id) if gh_repo and repo.name != gh_repo.full_name: repo.name = gh_repo.full_name db.session.add(repo) Repository.query.filter( Repository.user_id == self.user_id, ~Repository.github_id.in_(github_repos.keys()) ).update(dict(user_id=None, hook=None), synchronize_session=False) self.account.extra_data.update(dict( repos=active_repos, last_sync=iso_utcnow(), )) self.account.extra_data.changed() db.session.add(self.account)
Synchronize user repositories. :param bool hooks: True for syncing hooks. :param bool async_hooks: True for sending of an asynchronous task to sync hooks. .. note:: Syncing happens from GitHub's direction only. This means that we consider the information on GitHub as valid, and we overwrite our own state based on this information.
def transfer(ctx, _to=, _value=, returns=STATUS): log.DEV() if ctx.accounts[ctx.msg_sender] >= _value: ctx.accounts[ctx.msg_sender] -= _value ctx.accounts[_to] += _value ctx.Transfer(ctx.msg_sender, _to, _value) return OK else: return INSUFFICIENTFUNDS
Standardized Contract API: function transfer(address _to, uint256 _value) returns (bool _success)
def has_unchecked_field(self, locator, **kwargs): kwargs["checked"] = False return self.has_selector("field", locator, **kwargs)
Checks if the page or current node has a radio button or checkbox with the given label, value, or id, that is currently unchecked. Args: locator (str): The label, name, or id of an unchecked field. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. Returns: bool: Whether it exists.
def variance_inflation_factors(df): corr = np.corrcoef(df, rowvar=0) corr_inv = np.linalg.inv(corr) vifs = np.diagonal(corr_inv) return pd.Series(vifs, df.columns, name=)
Computes the variance inflation factor (VIF) for each column in the df. Returns a pandas Series of VIFs Args: df: pandas DataFrame with columns to run diagnostics on
def _ParseEntryArrayObject(self, file_object, file_offset): entry_array_object_map = self._GetDataTypeMap( ) try: entry_array_object, _ = self._ReadStructureFromFileObject( file_object, file_offset, entry_array_object_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( ).format(file_offset, exception)) if entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY: raise errors.ParseError(.format( entry_array_object.object_type)) if entry_array_object.object_flags != 0: raise errors.ParseError(.format( entry_array_object.object_flags)) return entry_array_object
Parses an entry array object. Args: file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the entry array object relative to the start of the file-like object. Returns: systemd_journal_entry_array_object: entry array object. Raises: ParseError: if the entry array object cannot be parsed.
def weighted_n(self): if not self.is_weighted: return float(self.unweighted_n) return float(sum(self._cube_dict["result"]["measures"]["count"]["data"]))
float count of returned rows adjusted for weighting.
def get_execution_info(self, driver_id, function_descriptor): if self._worker.load_code_from_local: driver_id = ray.DriverID.nil() if not function_descriptor.is_actor_method(): self._load_function_from_local(driver_id, function_descriptor) else: with profiling.profile("wait_for_function"): self._wait_for_function(function_descriptor, driver_id) try: function_id = function_descriptor.function_id info = self._function_execution_info[driver_id][function_id] except KeyError as e: message = ("Error occurs in get_execution_info: " "driver_id: %s, function_descriptor: %s. Message: %s" % (driver_id, function_descriptor, e)) raise KeyError(message) return info
Get the FunctionExecutionInfo of a remote function. Args: driver_id: ID of the driver that the function belongs to. function_descriptor: The FunctionDescriptor of the function to get. Returns: A FunctionExecutionInfo object.
def start_replication(mysql_settings, binlog_pos_memory=(None, 2), **kwargs): if not isinstance(binlog_pos_memory, _bpm.BaseBinlogPosMemory): if not isinstance(binlog_pos_memory, (tuple, list)): raise ValueError( % binlog_pos_memory) binlog_pos_memory = _bpm.FileBasedBinlogPosMemory(*binlog_pos_memory) mysql_settings.setdefault(, 5) kwargs.setdefault(, True) kwargs.setdefault(, True) with binlog_pos_memory: kwargs.setdefault(, binlog_pos_memory.log_file) kwargs.setdefault(, binlog_pos_memory.log_pos) _logger.info( % (mysql_settings, kwargs)) start_publishing(mysql_settings, **kwargs)
Start replication on server specified by *mysql_settings* Args: mysql_settings (dict): mysql settings that is used to connect to mysql via pymysql binlog_pos_memory (_bpm.BaseBinlogPosMemory): Binlog Position Memory, it should be an instance of subclass of :py:class:`_bpm.BaseBinlogPosMemory`. If a tuple (str, float) is passed, it will be initialize parameters for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file- name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos* **kwargs: any arguments that are accepted by :py:class:`pymysqlreplication.BinLogStreamReader`'s constructor
def find_file_in_zip(zip_file): candidate_files = filter(lambda x: x.split()[-1] in (, ), zip_file.namelist()) for filename in candidate_files: with zip_file.open(filename) as xml_candidate: try: ET.parse(xml_candidate) return filename except ET.ParseError: pass
Returns the twb/tds file from a Tableau packaged file format. Packaged files can contain cache entries which are also valid XML, so only look for files with a .tds or .twb extension.
def _configure_interrupt(self, function_name, timeout, container, is_debugging): def timer_handler(): LOG.info("Execution of function %s was interrupted", function_name) self._container_manager.stop(container) if is_debugging: LOG.debug("Setting up SIGTERM interrupt handler") signal.signal(signal.SIGTERM, signal_handler) else: timer = threading.Timer(timeout, timer_handler, ()) timer.start() return timer
When a Lambda function is executing, we setup certain interrupt handlers to stop the execution. Usually, we setup a function timeout interrupt to kill the container after timeout expires. If debugging though, we don't enforce a timeout. But we setup a SIGINT interrupt to catch Ctrl+C and terminate the container. :param string function_name: Name of the function we are running :param integer timeout: Timeout in seconds :param samcli.local.docker.container.Container container: Instance of a container to terminate :param bool is_debugging: Are we debugging? :return threading.Timer: Timer object, if we setup a timer. None otherwise
def fCPHASEs(self): return {tuple(es.targets): es.fCPHASE for es in self.edges_specs}
Get a dictionary of CPHASE fidelities (normalized to unity) from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of CPHASE fidelities, normalized to unity. :rtype: Dict[tuple(int, int), float]
def error_codes(self): if self._error_codes is None: from .tcex_error_codes import TcExErrorCodes self._error_codes = TcExErrorCodes() return self._error_codes
ThreatConnect error codes.
def convert_tuple_type(cls, name, value): names = [ + str(i) for i in range(len(value))] values = [cls.convert_value(name, value) for name, value in zip(names, value)] return cls.generate_data_dict(names, values)
Converts a tuple to RECORD that contains n fields, each will be converted to its corresponding data type in bq and will be named 'field_<index>', where index is determined by the order of the tuple elements defined in cassandra.
def _check_vmware_player_requirements(self, player_version): player_version = int(player_version) if player_version < 6: raise VMwareError("Using VMware Player requires version 6 or above") elif player_version == 6: yield from self.check_vmrun_version(minimum_required_version="1.13.0") elif player_version == 7: yield from self.check_vmrun_version(minimum_required_version="1.14.0") elif player_version >= 12: yield from self.check_vmrun_version(minimum_required_version="1.15.0") self._host_type = "player"
Check minimum requirements to use VMware Player. VIX 1.13 was the release for Player 6. VIX 1.14 was the release for Player 7. VIX 1.15 was the release for Workstation Player 12. :param player_version: VMware Player major version.
def post(self, request, *args, **kwargs): return self.disapprove(request, *args, **kwargs)
Handles POST requests.
def rst(filename): content = codecs.open(filename, encoding=).read() for regex, replacement in PYPI_RST_FILTERS: content = re.sub(regex, replacement, content) return content
Load rst file and sanitize it for PyPI. Remove unsupported github tags: - code-block directive - travis ci build badge
async def _get_smallest_env(self): async def slave_task(mgr_addr): r_manager = await self.env.connect(mgr_addr, timeout=TIMEOUT) ret = await r_manager.get_agents(addr=True) return mgr_addr, len(ret) sizes = await create_tasks(slave_task, self.addrs, flatten=False) return sorted(sizes, key=lambda x: x[1])[0][0]
Get address of the slave environment manager with the smallest number of agents.
def main(): parser = argparse.ArgumentParser( description="[v" + __version__ + "] " + __doc__, prog="w3c_validator", ) parser.add_argument( "--log", default="INFO", help=("log level: DEBUG, INFO or INFO " "(default: INFO)")) parser.add_argument( "--version", action="version", version="%(prog)s " + __version__) parser.add_argument( "--verbose", help="increase output verbosity", action="store_true") parser.add_argument( "source", metavar="F", type=str, nargs="+", help="file or URL") args = parser.parse_args() logging.basicConfig(level=getattr(logging, args.log)) LOGGER.info("Files to validate: \n {0}".format("\n ".join(args.source))) LOGGER.info("Number of files: {0}".format(len(args.source))) errors = 0 warnings = 0 for f in args.source: LOGGER.info("validating: %s ..." % f) retrys = 0 while retrys < 2: result = validate(f, verbose=args.verbose) if result: break time.sleep(2) retrys += 1 LOGGER.info("retrying: %s ..." % f) else: LOGGER.info("failed: %s" % f) errors += 1 continue if f.endswith(".css"): errorcount = result["cssvalidation"]["result"]["errorcount"] warningcount = result["cssvalidation"]["result"]["warningcount"] errors += errorcount warnings += warningcount if errorcount > 0: LOGGER.info("errors: %d" % errorcount) if warningcount > 0: LOGGER.info("warnings: %d" % warningcount) else: for msg in result["messages"]: print_msg(msg) if msg["type"] == "error": errors += 1 else: warnings += 1 sys.exit(min(errors, 255))
Parser the command line and run the validator.