content
stringlengths
22
815k
id
int64
0
4.91M
def parse_metadata(metadata_filepath: Union[str, Path]) -> Dict: """Parse the metadata file retreived from the BEACO2N site Args: metadata_filepath: Path of raw CSV metadata file pipeline: Are we running as part of the pipeline? If True return the parsed site information dictionary. Returns: dict: Dictionary of site metadata """ metadata_filepath = Path(metadata_filepath).resolve() raw_metadata = pd.read_csv(metadata_filepath) site_metadata = aDict() try: for index, row in raw_metadata.iterrows(): site_name = row["node_name_long"].lower().replace(" ", "") site_data = site_metadata[site_name] site_data["long_name"] = row["node_name_long"] site_data["id"] = row["id"] site_data["latitude"] = round(row["lat"], 5) site_data["longitude"] = round(row["lng"], 5) site_data["magl"] = check_nan(row["height_above_ground"]) site_data["masl"] = check_nan(row["height_above_sea"]) site_data["deployed"] = check_date(row["deployed"]) site_data["node_folder_id"] = row["node_folder_id"] except Exception as e: raise ValueError(f"Can't read metadata file, please ensure it has expected columns. Error: {e}") # Convert to a normal dict metadata: Dict = site_metadata.to_dict() return metadata
100
def unit_norm(model,axis=0): """ Constrains the weights incident to each hidden unit to have unit norm. Args: axis (int):axis along which to calculate weight norms. model : the model contains weights need to setting the constraints. """ def apply_constraint(t: Tensor): w_data = None if isinstance(t, tf.Variable): w_data = t.value().detach() else: w_data = t.copy().detach() param_applied = w_data/ (epsilon() +sqrt(reduce_sum(square(w_data),axis=axis,keepdims=True))) param_applied = param_applied.detach() return param_applied if is_tensor(model): model = apply_constraint(model) elif isinstance(model, Layer): for name, param in model.named_parameters(): if 'bias' not in name and param is not None and param.trainable == True: param.assign(apply_constraint(param))
101
def responsive_units(spike_times, spike_clusters, event_times, pre_time=[0.5, 0], post_time=[0, 0.5], alpha=0.05): """ Determine responsive neurons by doing a Wilcoxon Signed-Rank test between a baseline period before a certain task event (e.g. stimulus onset) and a period after the task event. Parameters ---------- spike_times : 1D array spike times (in seconds) spike_clusters : 1D array cluster ids corresponding to each event in `spikes` event_times : 1D array times (in seconds) of the events from the two groups pre_time : two-element array time (in seconds) preceding the event to get the baseline (e.g. [0.5, 0.2] would be a window starting 0.5 seconds before the event and ending at 0.2 seconds before the event) post_time : two-element array time (in seconds) to follow the event times alpha : float alpha to use for statistical significance Returns ------- significant_units : ndarray an array with the indices of clusters that are significatly modulated stats : 1D array the statistic of the test that was performed p_values : ndarray the p-values of all the clusters cluster_ids : ndarray cluster ids of the p-values """ # Get spike counts for baseline and event timewindow baseline_times = np.column_stack(((event_times - pre_time[0]), (event_times - pre_time[1]))) baseline_counts, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters, baseline_times) times = np.column_stack(((event_times + post_time[0]), (event_times + post_time[1]))) spike_counts, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters, times) # Do statistics p_values = np.empty(spike_counts.shape[0]) stats = np.empty(spike_counts.shape[0]) for i in range(spike_counts.shape[0]): if np.sum(baseline_counts[i, :] - spike_counts[i, :]) == 0: p_values[i] = 1 stats[i] = 0 else: stats[i], p_values[i] = wilcoxon(baseline_counts[i, :], spike_counts[i, :]) # Perform FDR correction for multiple testing sig_units, p_values, _, _ = multipletests(p_values, alpha, method='fdr_bh') significant_units = cluster_ids[sig_units] return significant_units, stats, p_values, cluster_ids
102
def create_link(seconds, image_name, size): """ Function returns temporary link to the image """ token = signing.dumps([str(timezone.now() + timedelta(seconds=int(seconds))), image_name, size]) return settings.SERVER_PATH + reverse("image:dynamic-image", kwargs={"token": token})
103
def test_paragraph_series_m_tb_ul_t_nl_ul_t_nl_ulb_nl_tb(): """ Test case: Unordered list text newline unordered list (b) new line thematic break """ # Arrange source_markdown = """- abc - def * --- """ expected_tokens = [ "[ulist(1,1):-::2:: ]", "[para(1,3):]", "[text(1,3):abc:]", "[end-para:::True]", "[li(2,1):2::]", "[para(2,3):\n]", "[text(2,3):def\n::\n]", "[text(3,1):*:]", "[end-para:::True]", "[end-ulist:::True]", "[tbreak(4,1):-::---]", "[BLANK(5,1):]", ] expected_gfm = """<ul> <li>abc</li> <li>def *</li> </ul> <hr />""" # Act & Assert act_and_assert(source_markdown, expected_gfm, expected_tokens)
104
def read_one_hot_labels(filename): """Read topic labels from file in one-hot form :param filename: name of input file :return: topic labels (one-hot DataFrame, M x N) """ return pd.read_csv(filename, dtype=np.bool)
105
def make_randint_list(start, stop, length=10): """ Makes a list of randomly generated integers Args: start: lowest integer to be generated randomly. stop: highest integer to be generated randomly. length: length of generated list. Returns: list of random numbers between start and stop of length length """ return [randint(start, stop) for i in range(length)]
106
def merge(intervals: list[list[int]]) -> list[list[int]]: """Generate a new schedule with non-overlapping intervals by merging intervals which overlap Complexity: n = len(intervals) Time: O(nlogn) for the initial sort Space: O(n) for the worst case of no overlapping intervals Examples: >>> merge(intervals=[[1,3],[2,6],[8,10],[15,18]]) [[1, 6], [8, 10], [15, 18]] >>> merge(intervals=[[1,4],[4,5]]) [[1, 5]] >>> merge(intervals=[[1,4]]) [[1, 4]] """ ## EDGE CASES ## if len(intervals) <= 1: return intervals """ALGORITHM""" ## INITIALIZE VARS ## intervals.sort(key=lambda k: k[0]) # sort on start times # DS's/res merged_intervals = [] # MERGE INTERVALS prev_interval, remaining_intervals = intervals[0], intervals[1:] for curr_interval in remaining_intervals: # if prev interval end >= curr interval start if prev_interval[1] >= curr_interval[0]: # adjust new prev interval prev_interval[1] = max(prev_interval[1], curr_interval[1]) else: merged_intervals.append(prev_interval) prev_interval = curr_interval merged_intervals.append(prev_interval) return merged_intervals
107
def df_drop_duplicates(df, ignore_key_pattern="time"): """ Drop duplicates from dataframe ignore columns with keys containing defined pattern. :param df: :param noinfo_key_pattern: :return: """ ks = df_drop_keys_contains(df, ignore_key_pattern) df = df.drop_duplicates(ks) return df
108
def get_mediawiki_flow_graph(limit, period): """ :type limit int :type period int :rtype: list[dict] """ # https://kibana5.wikia-inc.com/goto/e6ab16f694b625d5b87833ae794f5989 # goreplay is running in RES (check SJC logs only) rows = ElasticsearchQuery( es_host=ELASTICSEARCH_HOST, period=period, index_prefix='logstash-mediawiki' ).query_by_string( query='"Wikia internal request" AND @fields.environment: "prod" ' 'AND @fields.datacenter: "sjc" ' 'AND @fields.http_url_path: *', fields=[ '@context.source', '@fields.http_url_path', ], limit=limit ) # extract required fields only # (u'user-permissions', 'api:query::users') # (u'1', 'nirvana:EmailControllerDiscussionReply::handle') rows = [ ( row.get('@context', {})['source'], normalize_mediawiki_url(row.get('@fields', {})['http_url_path']) ) for row in rows if row.get('@context', {}).get('source') is not None ] # process the logs def _map(item): return '{}-{}'.format(item[0], item[1]) def _reduce(items): first = items[0] source = first[0] target = first[1] return { 'source': source if source != '1' else 'internal', 'edge': 'http', 'target': target, # the following is optional 'metadata': '{:.3f} reqs per sec'.format(1. * len(items) / period) } return logs_map_and_reduce(rows, _map, _reduce)
109
def showsounding(ab2, rhoa, resp=None, mn2=None, islog=True, xlab=None): """ Display a sounding curve (rhoa over ab/2) and an additional response. """ if xlab is None: xlab = r'$\rho_a$ in $\Omega$m' ab2a = N.asarray(ab2) rhoa = N.asarray(rhoa) if mn2 is None: if islog: l1 = P.loglog(rhoa, ab2, 'rx-', label='observed') else: l1 = P.semilogy(rhoa, ab2, 'rx-', label='observed') P.hold(True) if resp is not None: if islog: l2 = P.loglog(resp, ab2, 'bo-', label='simulated') else: l2 = P.semilogy(resp, ab2, 'bo-', label='simulated') P.legend((l1, l2), ('obs', 'sim'), loc=0) else: for unmi in N.unique(mn2): if islog: l1 = P.loglog(rhoa[mn2 == unmi], ab2a[mn2 == unmi], 'rx-', label='observed') else: l1 = P.semilogy(rhoa[mn2 == unmi], ab2a[mn2 == unmi], 'rx-', label='observed') P.hold(True) if resp is not None: l2 = P.loglog(resp[mn2 == unmi], ab2a[mn2 == unmi], 'bo-', label='simulated') P.legend((l1, l2), ('obs', 'sim')) P.axis('tight') P.ylim((max(ab2), min(ab2))) locs = P.yticks()[0] if len(locs) < 2: locs = N.hstack((min(ab2), locs, max(ab2))) else: locs[0] = max(locs[0], min(ab2)) locs[-1] = min(locs[-1], max(ab2)) a = [] for l in locs: a.append('%g' % rndig(l)) P.yticks(locs, a) locs = P.xticks()[0] a = [] for l in locs: a.append('%g' % rndig(l)) P.xticks(locs, a) P.grid(which='both') P.xlabel(xlab) P.ylabel('AB/2 in m') # P.legend() P.show() return
110
def cfg_from_list(cfg_list): """Set config keys via list (e.g., from command line).""" from ast import literal_eval assert len(cfg_list) % 2 == 0 for k, v in zip(cfg_list[0::2], cfg_list[1::2]): key_list = k.split('.') d = __C for subkey in key_list[:-1]: assert d.has_key(subkey) d = d[subkey] subkey = key_list[-1] assert d.has_key(subkey) try: value = literal_eval(v) except: # handle the case when v is a string literal value = v assert type(value) == type(d[subkey]), \ 'type {} does not match original type {}'.format( type(value), type(d[subkey])) d[subkey] = value
111
def bsplslib_Unperiodize(*args): """ :param UDirection: :type UDirection: bool :param Degree: :type Degree: int :param Mults: :type Mults: TColStd_Array1OfInteger & :param Knots: :type Knots: TColStd_Array1OfReal & :param Poles: :type Poles: TColgp_Array2OfPnt :param Weights: :type Weights: TColStd_Array2OfReal & :param NewMults: :type NewMults: TColStd_Array1OfInteger & :param NewKnots: :type NewKnots: TColStd_Array1OfReal & :param NewPoles: :type NewPoles: TColgp_Array2OfPnt :param NewWeights: :type NewWeights: TColStd_Array2OfReal & :rtype: void """ return _BSplSLib.bsplslib_Unperiodize(*args)
112
def genomic_del3_abs_37(genomic_del3_37_loc): """Create test fixture absolute copy number variation""" return { "type": "AbsoluteCopyNumber", "_id": "ga4gh:VAC.Pv9I4Dqk69w-tX0axaikVqid-pozxU74", "subject": genomic_del3_37_loc, "copies": {"type": "Number", "value": 2} }
113
def set_atommap(mol, num=0): """Set the atom map number for all atoms in the molecule. Parameters ---------- mol : rdkit.Chem.rdchem.Mol A molecule. num : int The atom map number to set for all atoms. If 0, it will clear the atom map. """ for atom in mol.GetAtoms(): atom.SetAtomMapNum(num)
114
def write_data_to_file(training_data_files, out_file, image_shape, truth_dtype=np.uint8, subject_ids=None, normalize=True, crop=True): """ Takes in a set of training images and writes those images to an hdf5 file. :param training_data_files: List of tuples containing the training data files. The modalities should be listed in the same order in each tuple. The last item in each tuple must be the labeled image. Example: [('sub1-T1.nii.gz', 'sub1-T2.nii.gz', 'sub1-truth.nii.gz'), ('sub2-T1.nii.gz', 'sub2-T2.nii.gz', 'sub2-truth.nii.gz')] :param out_file: Where the hdf5 file will be written to. :param image_shape: Shape of the images that will be saved to the hdf5 file. :param truth_dtype: Default is 8-bit unsigned integer. :return: Location of the hdf5 file with the image data written to it. """ n_samples = len(training_data_files) n_channels = len(training_data_files[0]) - 1 # n_channels is actually the number of modalities we have try: if not normalize: hdf5_file, data_storage, truth_storage, affine_storage, normalization_storage = \ create_data_file(out_file, n_channels=n_channels, n_samples=n_samples, image_shape=image_shape, normalize=normalize, storage_names=('data', 'truth', 'index', 'normalization')) else: hdf5_file, data_storage, truth_storage, affine_storage = create_data_file(out_file, n_channels=n_channels, n_samples=n_samples, image_shape=image_shape, normalize=normalize) normalization_storage = None except Exception as e: # If something goes wrong, delete the incomplete data file os.remove(out_file) raise e write_image_data_to_file(training_data_files, data_storage, truth_storage, image_shape, truth_dtype=truth_dtype, n_channels=n_channels, affine_storage=affine_storage, crop=crop, normalization_storage=normalization_storage) if subject_ids: hdf5_file.create_array(hdf5_file.root, 'subject_ids', obj=subject_ids) if normalize: normalize_data_storage(data_storage) hdf5_file.close() return out_file
115
def get_configinfo(env): """Returns a list of dictionaries containing the `name` and `options` of each configuration section. The value of `options` is a list of dictionaries containing the `name`, `value` and `modified` state of each configuration option. The `modified` value is True if the value differs from its default. :since: version 1.1.2 """ all_options = {} for (section, name), option in \ Option.get_registry(env.compmgr).iteritems(): all_options.setdefault(section, {})[name] = option sections = [] for section in env.config.sections(env.compmgr): options = [] for name, value in env.config.options(section, env.compmgr): registered = all_options.get(section, {}).get(name) if registered: default = registered.default normalized = registered.normalize(value) else: default = u'' normalized = unicode(value) options.append({'name': name, 'value': value, 'modified': normalized != default}) options.sort(key=lambda o: o['name']) sections.append({'name': section, 'options': options}) sections.sort(key=lambda s: s['name']) return sections
116
def fabric_wired(ctx, obj): """DNA Center Fabric Wired API (version: 1.3.1). Wraps the DNA Center Fabric Wired API and exposes the API as native Python commands. """ ctx.obj = obj.fabric_wired
117
def main(): """ Run all the tests """ files = utils.get_files(PATH) with open('results.txt', 'w') as f: f.write(f'{sys.version}\n') f.write(LINE) for exe in ['./sort1', './sort2', './sort3']: for file in files: cmd = subprocess.run(['time', exe, file], capture_output=True, text=True) if cmd.returncode != 0: print("Error") sys.exit(cmd.returncode) time = PATTERN.findall(cmd.stderr)[0] f.write(f'time {exe} {file} : {time}\n') f.write(LINE)
118
def given_energy(n, ef_energy): """ Calculate and return the value of given energy using given values of the params How to Use: Give arguments for ef_energy and n parameters *USE KEYWORD ARGUMENTS FOR EASY USE, OTHERWISE IT'LL BE HARD TO UNDERSTAND AND USE.' Parameters: ef_energy (int):effective energy in Joule n (int): efficiency Returns: int: the value of given energy in Joule """ gv_energy = ef_energy / n return gv_energy
119
def departures(stop: location.Stop, day: date, fname: str, days: int = 1) -> None: """Departures from a given stop for a given date (range)""" servinglines = set((c.line, c.id) for c in stop.courses) deps: List[Tuple[datetime, schedule.TripStop]] = [] for currdate in (day + timedelta(days=n) for n in range(days)): tripquery = schedule.Trip.query_for_date(stop._session, currdate) \ .filter( (schedule.Trip.version_id == stop.version_id) & tuple_(schedule.Trip.line, schedule.Trip.course_id).in_(servinglines) ).options( load_only('version_id'), load_only('line'), load_only('id'), load_only('course_id'), load_only('line_dir'), load_only('timing_group'), load_only('departure_time'), load_only('arr_stop_id'), joinedload('course', innerjoin=True).load_only('version_id').load_only('line').load_only('id').load_only('line_dir').load_only('name'), joinedload('arr_stop').load_only('name') ) trips = [(t, t.trip_stops(simple=True)) for t in tripquery.all()] # todo: eventuell mal das trip_stops sich sparen koennen. for trip, tripstops in trips: deps.extend( (datetime.combine(currdate, datetime.min.time()) + ts.dep_time, ts) for n, ts in filter( lambda n_ts: n_ts[1].stop == stop and n_ts[0] != len(tripstops), enumerate(tripstops, start=1) )) rows = [("date", "time", "plat", "linenum", "direction")] rows.extend( ( deptime.strftime("%Y-%m-%d"), deptime.strftime("%H:%M:%S"), ts.stop_point.name if ts.stop_point is not None else "", ts.trip.course.name, ts.trip.arr_stop.name ) for deptime, ts in sorted(deps, key=lambda d: d[0])) with open(fname, 'w', encoding='utf-8') as f: writer(f, delimiter=";", lineterminator='\n').writerows(rows)
120
def dt_getreferences_rmap_na(): """ >>> old_state = test_config.setup(cache=None, url="https://jwst-crds-dev.stsci.edu") >>> os.environ["CRDS_MAPPATH_SINGLE"] = test_config.TEST_DATA >>> heavy_client.getreferences({"META.INSTRUMENT.NAME":"NIRISS", "META.INSTRUMENT.DETECTOR":"NIS", "META.INSTRUMENT.FILTER":"BOGUS2"}, ... observatory="jwst", context="jwst_na_omit.pmap", ignore_cache=False) {'flat': 'NOT FOUND n/a'} >>> test_config.cleanup(old_state) >> config.get_crds_state() """
121
def sequence_sigmoid_cross_entropy(labels, logits, sequence_length, average_across_batch=True, average_across_timesteps=False, average_across_classes=True, sum_over_batch=False, sum_over_timesteps=True, sum_over_classes=False, time_major=False, stop_gradient_to_label=False, name=None): """Computes sigmoid cross entropy for each time step of sequence predictions. Args: labels: Target class distributions. - If :attr:`time_major` is `False` (default), this must be a\ Tensor of shape `[batch_size, max_time(, num_classes)]`. - If `time_major` is `True`, this must be a Tensor of shape\ `[max_time, batch_size(, num_classes)]`. Each row of `labels` should be a valid probability distribution, otherwise, the computation of the gradient will be incorrect. logits: Unscaled log probabilities having the same shape as with :attr:`labels`. sequence_length: A Tensor of shape `[batch_size]`. Time steps beyond the respective sequence lengths will have zero losses. average_across_timesteps (bool): If set, average the loss across the time dimension. Must not set `average_across_timesteps` and `sum_over_timesteps` at the same time. average_across_batch (bool): If set, average the loss across the batch dimension. Must not set `average_across_batch`' and `sum_over_batch` at the same time. average_across_classes (bool): If set, average the loss across the class dimension (if exists). Must not set `average_across_classes`' and `sum_over_classes` at the same time. Ignored if :attr:`logits` is a 2D Tensor. sum_over_timesteps (bool): If set, sum the loss across the time dimension. Must not set `average_across_timesteps` and `sum_over_timesteps` at the same time. sum_over_batch (bool): If set, sum the loss across the batch dimension. Must not set `average_across_batch` and `sum_over_batch` at the same time. sum_over_classes (bool): If set, sum the loss across the class dimension. Must not set `average_across_classes` and `sum_over_classes` at the same time. Ignored if :attr:`logits` is a 2D Tensor. time_major (bool): The shape format of the inputs. If `True`, :attr:`labels` and :attr:`logits` must have shape `[max_time, batch_size, ...]`. If `False` (default), they must have shape `[batch_size, max_time, ...]`. stop_gradient_to_label (bool): If set, gradient propagation to :attr:`labels` will be disabled. name (str, optional): A name for the operation. Returns: A Tensor containing the loss, of rank 0, 1, or 2 depending on the arguments :attr:`{average_across}/{sum_over}_{timesteps}/{batch}/{classes}`. For example, if the class dimension does not exist, and - If :attr:`sum_over_timesteps` and :attr:`average_across_batch` \ are `True` (default), the return Tensor is of rank 0. - If :attr:`average_across_batch` is `True` and other arguments are \ `False`, the return Tensor is of shape `[max_time]`. """ with tf.name_scope(name, "sequence_sigmoid_cross_entropy"): if stop_gradient_to_label: labels = tf.stop_gradient(labels) losses = tf.nn.sigmoid_cross_entropy_with_logits( labels=labels, logits=logits) rank = shapes.get_rank(logits) or shapes.get_rank(labels) if rank is None: raise ValueError( 'Cannot determine the rank of `logits` or `labels`.') losses = mask_and_reduce( losses, sequence_length, rank=rank, average_across_batch=average_across_batch, average_across_timesteps=average_across_timesteps, average_across_remaining=average_across_classes, sum_over_batch=sum_over_batch, sum_over_timesteps=sum_over_timesteps, sum_over_remaining=sum_over_classes, time_major=time_major) return losses
122
def stats(func): """Stats printing and exception handling decorator""" def inner(*args): try: code, decoded, res = func(*args) except ValueError as err: print(err) else: if FORMATTING: code_length = 0 for el in code: code_length += len(el) compression_rate = 24 * img.shape[0] * img.shape[1] / code_length print(f"Code length: {code_length}") else: compression_rate = 24 * img.shape[0] * img.shape[1] / len(code) code_length = len(code) print(f"Code length: {code_length}") #Convert RGB to YCbCr color_conv = RGBYCbCr() img_ycbcr = color_conv.forward(img) decoded_ycbcr = color_conv.forward(decoded) #Calculate MSE and PSNR, Y:U:V = 6:1:1 MSE_y = ((img_ycbcr[:,:,0].astype(int)-decoded_ycbcr[:,:,0].astype(int))**2).mean() MSE_u = ((img_ycbcr[:,:,1].astype(int)-decoded_ycbcr[:,:,1].astype(int))**2).mean() MSE_v = ((img_ycbcr[:,:,2].astype(int)-decoded_ycbcr[:,:,2].astype(int))**2).mean() PSNR_y = 10 * math.log10((255*255)/MSE_y) PSNR_u = 10 * math.log10((255*255)/MSE_u) PSNR_v = 10 * math.log10((255*255)/MSE_v) PSNR = (PSNR_y * 6 + PSNR_u + PSNR_v)/8 #Call the functions of SSIM, MS-SSIM, VIF D_1 = SSIM(channels=1) D_2 = MS_SSIM(channels=1) D_3 = VIFs(channels=3) # spatial domain VIF #To get 4-dimension torch tensors, (N, 3, H, W), divide by 255 to let the range between (0,1) torch_decoded = torch.FloatTensor(decoded.astype(int).swapaxes(0,2).swapaxes(1,2)).unsqueeze(0)/255 torch_img = torch.FloatTensor(img.astype(int).swapaxes(0,2).swapaxes(1,2)).unsqueeze(0)/255 torch_decoded_ycbcr = torch.FloatTensor(decoded_ycbcr.astype(int).swapaxes(0,2).swapaxes(1,2)).unsqueeze(0)/255 torch_img_ycbcr = torch.FloatTensor(img_ycbcr.astype(int).swapaxes(0,2).swapaxes(1,2)).unsqueeze(0)/255 #Calculate SSIM, MS-SSIM, VIF #SSIM on luma channel SSIM_value = D_1(torch_decoded_ycbcr[:, [0], :, :] , torch_img_ycbcr[:, [0], :, :], as_loss=False) #MS-SSIM on luma channel MS_SSIM_value = D_2(torch_decoded_ycbcr[:, [0], :, :], torch_img_ycbcr[:, [0], :, :], as_loss=False) #VIF on spatial domain VIF_value = D_3(torch_decoded, torch_img, as_loss=False) #print(D_3(torch_img, torch_img, as_loss=False)) #Print out the results #print(f"Mean squared error: {MSE}") print(f"General PSNR: {PSNR}") print(f"SSIM: {SSIM_value}") print(f"MS_SSIM: {MS_SSIM_value}") print(f"VIF: {VIF_value}") print(f"Compression rate: {compression_rate} bits/nt") # plt.imshow(decoded) # plt.show() # io.imsave(str(compression_rate) + ".png", decoded) return code, decoded, res, compression_rate, PSNR, SSIM_value, MS_SSIM_value, VIF_value return inner
123
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(entry, platform) for platform in PLATFORMS ] ) ) if unload_ok: config_data = hass.data[DOMAIN].pop(entry.entry_id) await config_data[CONF_CLIENT].async_client_close() return unload_ok
124
def get_ucp_worker_info(): """Gets information on the current UCX worker, obtained from `ucp_worker_print_info`. """ return _get_ctx().ucp_worker_info()
125
def check_can_collect_payment(id): """ Check if participant can collect payment this is true if : - They have been signed up for a year - They have never collected payment before or their last collection was more than 5 months ago """ select = "SELECT time_sign_up FROM SESSION_INFO WHERE user_id = (%s)" time_sign_up = db.execute(select, (id,), 1) one_year_after_sign_up = time_sign_up[0][0] + timedelta(weeks=43) select = "SELECT date_collected,next_collection from TASK_COMPLETED WHERE user_id = (%s)" date_collected = db.execute(select, (id,), 1) can_collect_payment = False #if one_year_after_sign_up < datetime.now() and user_type and next_collection[0][0] and next_collection[0][0] < datetime.now(): if one_year_after_sign_up < datetime.now() and len(date_collected) >= 1 and (date_collected[0][0] == None or date_collected[0][0] < (datetime.now() - timedelta(weeks=22))): can_collect_payment = True date_collected = date_collected[0][0] elif len(date_collected) > 1: date_collected = date_collected[0][0] return (can_collect_payment,date_collected,time_sign_up)
126
def plus_tensor(wx, wy, wz=np.array([0, 0, 1])): """Calculate the plus polarization tensor for some basis.c.f., eq. 2 of https://arxiv.org/pdf/1710.03794.pdf""" e_plus = np.outer(wx, wx) - np.outer(wy, wy) return e_plus
127
def duplicate_objects(dup_infos): """Duplicate an object with optional transformations. Args: dup_infos (list[dict]): A list of duplication infos. Each info is a dictionary, containing the following data: original (str): Name of the object to duplicate. name (str): Desired name for the duplicate. translation (f,f,f): Translation float tuple or None if not to change. rotation (f,f,f): Rotation float tuple or None if not to change. scale (f,f,f): 3d scale float tuple or None if not to change. Returns: list[tuple (str, str)]: The first element of each tuple contains the return 'code' of the operation, which can be - 'Ok' If no problem occured. - 'NotFound' If the original could not be found. - 'Renamed' If the name was changed by the editor. - 'Failed' If something else problematic happened. The second element is None, unless the editor 'Renamed' the object, in which case it contains the editor-assigned name. If the return value is 'Renamed', the calling function must assign the returned name to the original object in the Program or find a new fitting name and assign it to the duplicated object using the :func:`renameObject` function with the returned string as name. .. seealso:: :func:`renameObject` :func:`getFreeName` """ infos_str = json.dumps(dup_infos) msg = "DuplicateObjects " + infos_str result = connection.send_message(msg) results = json.parse(result) return results
128
def determineOptimalStartingPoint(options,logger_proxy,logging_mutex): """ If options.checkpoint is set to -1 then operations will start from the beginning but will skip steps to regenerate files For all other values of options.checkpoint, pre-generated data will be overwritten """ flag = checkForNewData(options) #options.checkpoint = 0 # Nothing will be deleted and nothing will be regenerated. Computation will start from the beginning and skip the steps already computed. if flag == 2: pass # No new RNA-Seq samples. PsiCLASS, FINDER and BRAKER2 runs have completed successfully. No need to change `options.checkpoint` can go ahead with user request elif flag == 3: if options.checkpoint>4: options.checkpoint = 4 # No new RNA-Seq samples, PsiCLASS, FINDER done but BRAKER NOT done. elif flag == 4: if options.checkpoint>3: options.checkpoint=3 # No new RNA-Seq samples, PsiCLASS done but FINDER and BRAKER NOT done elif flag == 5: if options.checkpoint>2: options.checkpoint=2 if options.checkpoint == 0: pass # Do Nothing elif options.checkpoint == 1: # Align reads to reference genome (Will trigger removal of all alignments and start from beginning) os.system(f"rm -rf {options.output_star}/*") # Delete contents of alignment folder os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/*") # Delete contents of assembly folder os.system(f"rm -rf {options.output_braker}/*") # Delete contents of BRAKER2 folder os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/braker.gtf") elif options.checkpoint == 2: # Assemble with PsiCLASS (Will remove all assemblies) os.system(f"rm -rf {options.output_star}/*_counts_all_info.pkl") # Removing genomic read counts file os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/*") # Delete contents of assembly folder os.system(f"rm -rf {options.output_braker}/*") # Delete contents of BRAKER2 folder os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/braker.gtf") elif options.checkpoint == 3: # Find genes with FINDER (entails changepoint detection) os.system(f"rm -rf {options.output_star}/*_counts_all_info.pkl") # Removing genomic read counts file os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/combined/combined_*gtf") # Delete contents of assembly folder os.system(f"rm -rf {options.output_braker}/*") # Delete contents of BRAKER2 folder os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/braker.gtf") elif options.checkpoint == 4: # Predict genes using BRAKER2 os.system(f"rm -rf {options.output_braker}/*") # Delete contents of BRAKER2 folder os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/combined/combined_with_CDS*.gtf") os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/braker.gtf") elif options.checkpoint == 5: # Annotate coding regions os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/combined/combined_with_CDS*.gtf") elif options.checkpoint == 6: # Merge FINDER annotations with BRAKER2 predictions and protein sequences os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/combined/combined_with_CDS_*.gtf") os.system(f"rm -rf {options.output_assemblies_psiclass_terminal_exon_length_modified}/combined/FINDER_BRAKER_PROT.gtf") with logging_mutex: logger_proxy.info(f"Starting FINDER from {options.checkpoint} checkpoint")
129
def _list_data_objects(request, model, serializer): """a factory method for querying and receiving database objects""" obj = model.objects.all() ser = serializer(obj, many=True) return Response(ser.data, status=status.HTTP_200_OK)
130
def load_conf(file='./config', section='SYNTH_DATA'): """load configuration Args: file (str, optional): path to conf file. Defaults to './config'. section (str, optional): name of section. Defaults to 'SYNTH_DATA'. Returns: [str]: params of configuration """ log_message('Load configuration.') config = configparser.ConfigParser() resource = config.read(file) if 0 == resource: log_message('Error: cannot read configuration file.') exit(1) params = {} options = config.options(section) for opt in options: params[opt] = config.get(section, opt) log_message(' - %s: %s' % (opt, params[opt])) return params
131
def plot_proper_motion(df): """Plot proper motion. df: DataFrame with `pm_phi1` and `pm_phi2` """ x = df['pm_phi1'] y = df['pm_phi2'] plt.plot(x, y, 'ko', markersize=0.3, alpha=0.3) plt.xlabel('Proper motion phi1 (mas/yr)') plt.ylabel('Proper motion phi2 (mas/yr)') plt.xlim(-12, 8) plt.ylim(-10, 10)
132
def consume(context, state): """ *musicpd.org, playback section:* ``consume {STATE}`` Sets consume state to ``STATE``, ``STATE`` should be 0 or 1. When consume is activated, each song played is removed from playlist. """ if int(state): context.core.tracklist.consume = True else: context.core.tracklist.consume = False
133
def distance(a, b): """ Computes a :param a: :param b: :return: """ x = a[0] - b[0] y = a[1] - b[1] return math.sqrt(x ** 2 + y ** 2)
134
def test_immunization_3(base_settings): """No. 3 tests collection for Immunization. Test File: immunization-example-refused.json """ filename = base_settings["unittest_data_dir"] / "immunization-example-refused.json" inst = immunization.Immunization.parse_file( filename, content_type="application/json", encoding="utf-8" ) assert "Immunization" == inst.resource_type impl_immunization_3(inst) # testing reverse by generating data from itself and create again. data = inst.dict() assert "Immunization" == data["resourceType"] inst2 = immunization.Immunization(**data) impl_immunization_3(inst2)
135
def approve_pipelines_for_publishing(pipeline_ids): # noqa: E501 """approve_pipelines_for_publishing :param pipeline_ids: Array of pipeline IDs to be approved for publishing. :type pipeline_ids: List[str] :rtype: None """ pipe_exts: [ApiPipelineExtension] = load_data(ApiPipelineExtension) pipe_ext_ids = {p.id for p in pipe_exts} missing_pipe_ext_ids = set(pipeline_ids) - pipe_ext_ids for id in missing_pipe_ext_ids: store_data(ApiPipelineExtension(id=id)) update_multiple(ApiPipelineExtension, [], "publish_approved", False) if pipeline_ids: update_multiple(ApiPipelineExtension, pipeline_ids, "publish_approved", True) return None, 200
136
def test_k8s_plugin_gets_raw_metrics_empty(postfix, mocked_http): """ K8sCollector returns a empty dict in case of problems. GIVEN: K8s stats server won't return a JSON response for some reason (Unauthorized, exceptions, internal errors, etc). WHEN: K8sCollector method `_get_raw_metrics` is used. THEN: It returns an empty dict. """ url = f"https://10.1.18.1:10250/stats/{postfix}" cert = ("client.crt", "client.key") result = K8sCollector._get_raw_metrics(url, cert) assert result == {}
137
def make_tokenizer_module(tokenizer): """tokenizer module""" tokenizers = {} cursors = {} @ffi.callback("int(int, const char *const*, sqlite3_tokenizer **)") def xcreate(argc, argv, ppTokenizer): if hasattr(tokenizer, "__call__"): args = [ffi.string(x).decode("utf-8") for x in argv[0:argc]] tk = tokenizer(args) else: tk = tokenizer th = ffi.new_handle(tk) tkn = ffi.new("sqlite3_tokenizer *") tkn.t = th tokenizers[tkn] = th ppTokenizer[0] = tkn return SQLITE_OK @ffi.callback("int(sqlite3_tokenizer *)") def xdestroy(pTokenizer): tkn = pTokenizer del tokenizers[tkn] return SQLITE_OK @ffi.callback( "int(sqlite3_tokenizer*, const char *, int, sqlite3_tokenizer_cursor **)" ) def xopen(pTokenizer, pInput, nInput, ppCursor): cur = ffi.new("sqlite3_tokenizer_cursor *") tokenizer = ffi.from_handle(pTokenizer.t) i = ffi.string(pInput).decode("utf-8") tokens = [(n.encode("utf-8"), b, e) for n, b, e in tokenizer.tokenize(i) if n] tknh = ffi.new_handle(iter(tokens)) cur.pTokenizer = pTokenizer cur.tokens = tknh cur.pos = 0 cur.offset = 0 cursors[cur] = tknh ppCursor[0] = cur return SQLITE_OK @ffi.callback( "int(sqlite3_tokenizer_cursor*, const char **, int *, int *, int *, int *)" ) def xnext(pCursor, ppToken, pnBytes, piStartOffset, piEndOffset, piPosition): try: cur = pCursor[0] tokens = ffi.from_handle(cur.tokens) normalized, inputBegin, inputEnd = next(tokens) ppToken[0] = ffi.from_buffer(normalized) pnBytes[0] = len(normalized) piStartOffset[0] = inputBegin piEndOffset[0] = inputEnd cur.offset = inputEnd piPosition[0] = cur.pos cur.pos += 1 except StopIteration: return SQLITE_DONE return SQLITE_OK @ffi.callback("int(sqlite3_tokenizer_cursor *)") def xclose(pCursor): tk = ffi.from_handle(pCursor.pTokenizer.t) on_close = getattr(tk, "on_close", None) if on_close and hasattr(on_close, "__call__"): on_close() del cursors[pCursor] return SQLITE_OK tokenizer_module = ffi.new( "sqlite3_tokenizer_module*", [0, xcreate, xdestroy, xopen, xclose, xnext] ) tokenizer_modules[tokenizer] = ( tokenizer_module, xcreate, xdestroy, xopen, xclose, xnext, ) return tokenizer_module
138
def looping_call(interval, callable): """ Returns a greenlet running your callable in a loop and an Event you can set to terminate the loop cleanly. """ ev = Event() def loop(interval, callable): while not ev.wait(timeout=interval): callable() return gevent.spawn(loop, interval, callable), ev
139
def obj2sta(obj, sta_filename, workshop=None): """ Convert an object(json format) to sta file :param obj: ready to convert :param sta_filename: output sta filename :param workshop: means path to read binary file :return: """ if workshop is None: workshop = '' with open(sta_filename, 'wb') as ofile: kwargs = {} kwargs['workshop'] = workshop byte = pack_obj(obj, **kwargs) # write header ofile.write(struct.pack('i', STA_MARK)) # write content ofile.write(byte)
140
def print_twin_results_vec(vec_dat, labels): """Print out comparison results, stored in 1d vector. Parameters ---------- vec_dat : 1d array Vector of data to print out. labels : list of str Labels for what data each row corresponds to. """ for ind, label in enumerate(labels): print('\t', label, '\t : ', '{:5.4f}'.format(vec_dat[ind]))
141
def rsquared_adj(r, nobs, df_res, has_constant=True): """ Compute the adjusted R^2, coefficient of determination. Args: r (float): rsquared value nobs (int): number of observations the model was fit on df_res (int): degrees of freedom of the residuals (nobs - number of model params) has_constant (bool): whether the fitted model included a constant (intercept) Returns: float: adjusted coefficient of determination """ if has_constant: return 1.0 - (nobs - 1) / df_res * (1.0 - r) else: return 1.0 - nobs / df_res * (1.0 - r)
142
def run_tests(test_folder: str, build_folder: str) -> None: """ Discover and run tests. """ sys.path.insert(0, '.') # Make sure log messages are not shown on stdout/stderr. We can't simply # increase the log level since some unit tests expect logging to happen. logging.getLogger().addHandler(logging.StreamHandler(io.StringIO())) # Run the unit test with the XML test runner so that the test output # can be processed by Sonar. my_dir = pathlib.Path(__file__).resolve().parent tests_dir = my_dir / test_folder results_dir = my_dir.parent / "build" / build_folder results_dir.mkdir(parents=True, exist_ok=True) unittest.main(module=None, testRunner=xmlrunner.XMLTestRunner(output=str(results_dir)), argv=[sys.argv[0], "discover", "-s", str(tests_dir), "-p", "*_tests.py"])
143
def metadata_factory(repo, json=False, **kwargs): """ This generates a layout you would expect for metadata storage with files. :type json: bool :param json: if True, will return string instead. """ output = { "baseline_filename": None, "crontab": "0 0 * * *", "exclude_regex": None, "plugins": { "AWSKeyDetector": {}, "ArtifactoryDetector": {}, "Base64HighEntropyString": { "base64_limit": 4.5, }, "BasicAuthDetector": {}, "HexHighEntropyString": { "hex_limit": 3, }, "KeywordDetector": { 'keyword_exclude': None }, "MailchimpDetector": {}, "PrivateKeyDetector": {}, "SlackDetector": {}, "StripeDetector": {}, }, "repo": repo, "sha": 'sha256-hash', } output.update(kwargs) if json: return json_module.dumps(output, indent=2, sort_keys=True) return output
144
def saveTextData(file_name, data, fmt): """Save data array in text format Args: file_name (str): output file name data (ndarray): numpy ndarray data fmt (str): format string, e.g. "%.2e" """ numpy.savetxt(file_name, data, fmt=fmt)
145
def inpand(clip: vs.VideoNode, sw: int, sh: Optional[int] = None, mode: XxpandMode = XxpandMode.RECTANGLE, thr: Optional[int] = None, planes: int | Sequence[int] | None = None) -> vs.VideoNode: """ Calls std.Minimum in order to shrink each pixel with the smallest value in its 3x3 neighbourhood from the desired width and height. :param clip: Source clip. :param sw: Shrinking shape width. :param sh: Shrinking shape height. If not specified, default to sw. :param mode: Shape form. Ellipses are combinations of rectangles and losanges and look more like octogons. Losanges are truncated (not scaled) when sw and sh are not equal. :param thr: Allows to limit how much pixels are changed. Output pixels will not become less than ``input - threshold``. The default is no limit. :param planes: Specifies which planes will be processed. Any unprocessed planes will be simply copied. :return: Transformed clip """ return morpho_transfo(clip, core.std.Minimum, sw, sh, mode, thr, planes)
146
def pre_update(ctx, ref=settings.UPDATE_REF): """Update code to pick up changes to this file.""" update_code(ref) update_info()
147
def test_instantiation(adb_executable): """ Just make sure that we can instantiate the ADB object :return: None """ try: adb = ADB(adb_executable) # noqa: W0612 except: # noqa pytest.fail("No failure should be raised")
148
def _extract_aggregate_functions(before_aggregate): """Converts `before_aggregate` to aggregation functions. Args: before_aggregate: The first result of splitting `after_broadcast` on `intrinsic_defs.FEDERATED_AGGREGATE`. Returns: `zero`, `accumulate`, `merge` and `report` as specified by `canonical_form.CanonicalForm`. All are instances of `building_blocks.CompiledComputation`. Raises: transformations.CanonicalFormCompilationError: If we extract an ASTs of the wrong type. """ # See `get_iterative_process_for_canonical_form()` above for the meaning of # variable names used in the code below. zero_index_in_before_aggregate_result = 1 zero_tff = transformations.select_output_from_lambda( before_aggregate, zero_index_in_before_aggregate_result).result accumulate_index_in_before_aggregate_result = 2 accumulate_tff = transformations.select_output_from_lambda( before_aggregate, accumulate_index_in_before_aggregate_result).result merge_index_in_before_aggregate_result = 3 merge_tff = transformations.select_output_from_lambda( before_aggregate, merge_index_in_before_aggregate_result).result report_index_in_before_aggregate_result = 4 report_tff = transformations.select_output_from_lambda( before_aggregate, report_index_in_before_aggregate_result).result zero = transformations.consolidate_and_extract_local_processing(zero_tff) accumulate = transformations.consolidate_and_extract_local_processing( accumulate_tff) merge = transformations.consolidate_and_extract_local_processing(merge_tff) report = transformations.consolidate_and_extract_local_processing(report_tff) return zero, accumulate, merge, report
149
def process_site_eb(err, product_id, sid, data): """Errorback from process_site transaction.""" msg = f"process_site({product_id}, {sid}, {data}) got {err}" common.email_error(err, msg)
150
def _make_system(A, M, x0, b): """Make a linear system Ax = b Args: A (cupy.ndarray or cupyx.scipy.sparse.spmatrix or cupyx.scipy.sparse.LinearOperator): sparse or dense matrix. M (cupy.ndarray or cupyx.scipy.sparse.spmatrix or cupyx.scipy.sparse.LinearOperator): preconditioner. x0 (cupy.ndarray): initial guess to iterative method. b (cupy.ndarray): right hand side. Returns: tuple: It returns (A, M, x, b). A (LinaerOperator): matrix of linear system M (LinearOperator): preconditioner x (cupy.ndarray): initial guess b (cupy.ndarray): right hand side. """ fast_matvec = _make_fast_matvec(A) A = _interface.aslinearoperator(A) if fast_matvec is not None: A = _interface.LinearOperator(A.shape, matvec=fast_matvec, rmatvec=A.rmatvec, dtype=A.dtype) if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape: {})'.format(A.shape)) if A.dtype.char not in 'fdFD': raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype)) n = A.shape[0] if not (b.shape == (n,) or b.shape == (n, 1)): raise ValueError('b has incompatible dimensions') b = b.astype(A.dtype).ravel() if x0 is None: x = cupy.zeros((n,), dtype=A.dtype) else: if not (x0.shape == (n,) or x0.shape == (n, 1)): raise ValueError('x0 has incompatible dimensions') x = x0.astype(A.dtype).ravel() if M is None: M = _interface.IdentityOperator(shape=A.shape, dtype=A.dtype) else: fast_matvec = _make_fast_matvec(M) M = _interface.aslinearoperator(M) if fast_matvec is not None: M = _interface.LinearOperator(M.shape, matvec=fast_matvec, rmatvec=M.rmatvec, dtype=M.dtype) if A.shape != M.shape: raise ValueError('matrix and preconditioner have different shapes') return A, M, x, b
151
def merge_intersecting_segments(segments: List[Segment]) -> List[Segment]: """ Merges intersecting segments from the list. """ sorted_by_start = sorted(segments, key=lambda segment: segment.start) merged = [] for segment in sorted_by_start: if not merged: merged.append(Segment(segment.start, segment.end)) continue last_merged = merged[-1] if segment.start <= last_merged.end: last_merged.end = max(last_merged.end, segment.end) else: merged.append(Segment(segment.start, segment.end)) return merged
152
def test_set_dids_metadata_bulk_multi(did_factory): """ DID (CORE) : Test setting metadata in bulk with multiple key-values on multiple dids""" skip_without_json() nb_dids = 5 dids = [did_factory.make_dataset() for _ in range(nb_dids)] for did in dids: testkeys = list(map(lambda i: 'testkey' + generate_uuid(), range(3))) testmeta = {key: key + 'value' for key in testkeys} did['meta'] = testmeta print(dids) set_dids_metadata_bulk(dids=dids, recursive=False) for did in dids: testmeta = did['meta'] print('Metadata:', testmeta) meta = get_metadata(plugin="ALL", scope=did['scope'], name=did['name']) print('Metadata:', meta) for testkey in testmeta: assert testkey in meta and meta[testkey] == testmeta[testkey]
153
def change_log_root_key(): """Root key of an entity group with change log.""" # Bump ID to rebuild the change log from *History entities. return ndb.Key('AuthDBLog', 'v1')
154
def load_file(filename): """Loads a TESS *spoc* FITS file and returns TIME, PDCSAP_FLUX""" hdu = fits.open(filename) time = hdu[1].data['TIME'] flux = hdu[1].data['PDCSAP_FLUX'] flux[flux == 0] = numpy.nan return time, flux
155
def AcProgRanlib(context, selection=None): """Corresponds to AC_PROG_RANLIB_ autoconf macro :Parameters: context SCons configuration context. selection If ``None`` (default), the program will be found automatically, otherwise the method will return the value of **selection**. .. _AC_PROG_RANLIB: http://www.gnu.org/software/autoconf/manual/autoconf.html#index-AC_005fPROG_005fRANLIB-291 """ raise NotImplementedError("not implemented")
156
def blit_anchors(dest, dest_anchor, src, src_anchor): """ Blits the source onto the destination such that their anchors align. src_anchor and dest_anchor can be strings of one of the point attributes (topleft, center, midbottom, etc.) or a position on their respective surfaces (e.g [100, 200]). """ try: src_anchor = get_anchor(src, src_anchor) except ValueError: pass # Assume src_anchor is already a point. If not, it will fail in the map(). try: dest_anchor = get_anchor(dest, dest_anchor) except ValueError: pass # Assume dest_anchor is already a point. If not, it will fail in the map(). topleft = list(map(lambda a,b,c: a - b + c, src.get_rect().topleft, src_anchor, dest_anchor)) dest.blit(src, topleft)
157
def updateGraph( Graph, GraphNumber ): """ The function calls an appropriate plot-function based on the mode (value) of the radio-button :param Graph: an instance of VibroP_GraphObject class :param GraphNumber: int :return: """ # Update the graph ID ( GraphNumber - it's a built-in bohek variable that # belongs to the RadioButton widget ) Graph.setPlottingGraphNumber( GraphNumber ) plotEigenfrequenciesPlate( Graph ) # Depict coresponding lines based on the graph chosen by the user if (GraphNumber == 0): plotWaveSpeedGraph( Graph ) if (GraphNumber == 1): plotWaveSpeedGraphWithLimits( Graph ) if (GraphNumber == 2): plotModesInBand( Graph ) if (GraphNumber == 3): plotModalDensity( Graph ) if (GraphNumber == 4): plotModalOverlapFactor( Graph ) if (GraphNumber == 5): plotMaximumElementSize( Graph )
158
def create_insight_id_extension( insight_id_value: str, insight_system: str ) -> Extension: """Creates an extension for an insight-id with a valueIdentifier The insight id extension is defined in the IG at: https://alvearie.io/alvearie-fhir-ig/StructureDefinition-insight-id.html Args: insight_id_value - the value of the insight id insight_system - urn for the system used to create the insight Returns: The insight id extension Example: >>> ext = create_insight_id_extension("insight-1", "urn:id:alvearie.io/patterns/QuickUMLS_v1.4.0") >>> print(ext.json(indent=2)) { "url": "http://ibm.com/fhir/cdm/StructureDefinition/insight-id", "valueIdentifier": { "system": "urn:id:alvearie.io/patterns/QuickUMLS_v1.4.0", "value": "insight-1" } } """ insight_id_ext = Extension.construct() insight_id_ext.url = alvearie_ext_url.INSIGHT_ID_URL insight_id = Identifier.construct() insight_id.system = insight_system insight_id.value = insight_id_value insight_id_ext.valueIdentifier = insight_id return insight_id_ext
159
def ReadNotifyResponseHeader(payload_size, data_type, data_count, sid, ioid): """ Construct a ``MessageHeader`` for a ReadNotifyResponse command. Read value of a channel. Sent over TCP. Parameters ---------- payload_size : integer Size of DBR formatted data in payload. data_type : integer Payload format. data_count : integer Payload element count. sid : integer SID of the channel. ioid : integer IOID of this operation. """ struct_args = (15, payload_size, data_type, data_count, sid, ioid) # If payload_size or data_count cannot fit into a 16-bit integer, use the # extended header. return (ExtendedMessageHeader(*struct_args) if any((payload_size > 0xffff, data_count > 0xffff, )) else MessageHeader(*struct_args))
160
def substitute_T5_cols(c, cols, nlu_identifier=True): """ rename cols with base name either <t5> or if not unique <t5_<task>> """ new_cols = {} new_base_name = 't5' if nlu_identifier=='UNIQUE' else f't5_{nlu_identifier}' for col in cols : if '_results' in col : new_cols[col] = new_base_name elif '_beginnings' in col : new_cols[col] = f'{new_base_name}_begin' elif '_endings' in col : new_cols[col] = f'{new_base_name}_end' elif '_embeddings' in col : continue # Token never stores Embeddings new_cols[col] = f'{new_base_name}_embedding' elif '_types' in col : continue # new_cols[col] = f'{new_base_name}_type' elif 'meta' in col: if '_sentence' in col : new_cols[col] = f'{new_base_name}_origin_sentence' # maps to which sentence token comes from else : logger.info(f'Dropping unmatched metadata_col={col} for c={c}') # new_cols[col]= f"{new_base_name}_confidence" return new_cols
161
def _get_trigger_func(name, trigger_name): """ Given a valid vulnerability name, get the trigger function corresponding to the vulnerability name and trigger name. If the trigger function isn't found, raise NotFound. """ try: return get_trigger(name, trigger_name) except AttributeError: raise NotFound()
162
def _get_rotated_bounding_box(size, quaternion): """Calculates the bounding box of a rotated 3D box. Args: size: An array of length 3 specifying the half-lengths of a box. quaternion: A unit quaternion specifying the box's orientation. Returns: An array of length 3 specifying the half-lengths of the bounding box of the rotated box. """ corners = ((size[0], size[1], size[2]), (size[0], size[1], -size[2]), (size[0], -size[1], size[2]), (-size[0], size[1], size[2])) rotated_corners = tuple( transformations.quat_rotate(quaternion, corner) for corner in corners) return np.amax(np.abs(rotated_corners), axis=0)
163
def Cos( a: float = 1., b: float = 1., c: float = 0.) -> InternalLayer: """Affine transform of `Cos` nonlinearity, i.e. `a cos(b*x + c)`. Args: a: output scale. b: input scale. c: input phase shift. Returns: `(init_fn, apply_fn, kernel_fn)`. """ return Sin(a=a, b=b, c=c + np.pi / 2)
164
def key_make_model(chip): """ Given a chip, return make and model string. Make and model are extracted from chip.misc using the keys "make" and "model". If they are missing it returns None for that value. If misc missing or not a dictionary, (None, None) is returned. Args: chip: A chip named tuple Returns: string: "make_model" from the chip. The string "None" may be returned for one of the positions (or both) if it is missing in the chip. """ output = [None, None] # Ensure we have a misc dictionary if hasattr(chip, "misc"): misc = chip.misc if hasattr(misc, "get"): output[0] = misc.get("make", None) output[1] = misc.get("model", None) return tuple_to_string(output)
165
def check_concentration(R,D): """ check the concentration of a halo by finding where the power law is most similar to alpha^-2 return 1./radius, which is the concentration. (so find the scale radius by taking 1./concentration) """ func = np.log10(R**-2.)-np.log10(D) print('Concentration={}'.format(1./R[np.nanargmin(func)]))
166
def _mergesort_space_optimized(nums: list, start: int, end: int) -> None: """Performing merge operation in-place by overwriting associated indexes of input array Complexity: n = len(nums) Space: n = O(n) for (2 * n/2) copies of sorted left/right subarrays Examples: >>> _mergesort_space_optimized([], 0, 0) """ ## EDGE CASES ## if not nums: return """Algorithm""" ## BASE CASE ## if start >= end: return ## INITIALIZE VARS## mid = (start + end + 1) // 2 ## RECURSIVELY SORT SUBARRAYS ## _mergesort_space_optimized(nums, start, mid - 1) left = nums[start:mid] # n/2 copy _mergesort_space_optimized(nums, mid, end) right = nums[mid : end + 1] # n/2 copy ## MERGE SORTED SUBARRAYS ## curr_left_idx = curr_right_idx = 0 insertion_idx = start # Merge until a single subarray is exhausted while curr_left_idx < len(left) and curr_right_idx < len(right): if left[curr_left_idx] <= right[curr_right_idx]: nums[insertion_idx] = left[curr_left_idx] curr_left_idx += 1 else: nums[insertion_idx] = right[curr_right_idx] curr_right_idx += 1 insertion_idx += 1 # Merge remaining subarray if curr_left_idx == len(left): while curr_right_idx < len(right): nums[insertion_idx] = right[curr_right_idx] curr_right_idx += 1 insertion_idx += 1 else: while curr_left_idx < len(left): nums[insertion_idx] = left[curr_left_idx] curr_left_idx += 1 insertion_idx += 1
167
def test_has_object_destroy_permission_owner(api_rf, km_user_accessor_factory): """ The Know Me user the accessor grants access on should be able to destroy the accessor. """ accessor = km_user_accessor_factory() api_rf.user = accessor.km_user.user request = api_rf.get("/") assert accessor.has_object_destroy_permission(request)
168
def alternating_epsilons_actor_core( policy_network: EpsilonPolicy, epsilons: Sequence[float], ) -> actor_core_lib.ActorCore[EpsilonActorState, None]: """Returns actor components for alternating epsilon exploration. Args: policy_network: A feedforward action selecting function. epsilons: epsilons to alternate per-episode for epsilon-greedy exploration. Returns: A feedforward policy. """ epsilons = jnp.array(epsilons) def apply_and_sample(params: networks_lib.Params, observation: networks_lib.Observation, state: EpsilonActorState): random_key, key = jax.random.split(state.rng) actions = policy_network(params, key, observation, state.epsilon) return (actions.astype(jnp.int32), EpsilonActorState(rng=random_key, epsilon=state.epsilon)) def policy_init(random_key: networks_lib.PRNGKey): random_key, key = jax.random.split(random_key) epsilon = jax.random.choice(key, epsilons) return EpsilonActorState(rng=random_key, epsilon=epsilon) return actor_core_lib.ActorCore( init=policy_init, select_action=apply_and_sample, get_extras=lambda _: None)
169
def get_keywords( current_user: models.User = Depends(deps.get_current_active_user), controller_client: ControllerClient = Depends(deps.get_controller_client), labels: List = Depends(deps.get_personal_labels), q: Optional[str] = Query(None, description="query keywords"), offset: int = Query(0), limit: Optional[int] = Query(None), ) -> Dict: """ Get keywords and aliases """ filter_f = partial(filter_keyword, q) if q else None items = list(labels_to_keywords(labels, filter_f)) if settings.REVERSE_KEYWORDS_OUTPUT: items.reverse() res = {"total": len(items), "items": paginate(items, offset, limit)} return {"result": res}
170
def _compile_unit(i): """Append gas to unit and update CO2e for pint/iam-unit compatibility""" if " equivalent" in i["unit"]: return i["unit"].replace("CO2 equivalent", "CO2e") if i["unit"] in ["kt", "t"]: return " ".join([i["unit"], i["gas"]]) else: return i["unit"]
171
def get_fdfs_url(file): """ 上传文件或图片到FastDFS :param file: 文件或图片对象,二进制数据或本地文件 :return: 文件或图片在FastDFS中的url """ # 创建FastDFS连接对象 fdfs_client = Fdfs_client(settings.FASTDFS_CONF_PATH) """ client.upload_by_filename(文件名), client.upload_by_buffer(文件bytes数据) """ # 上传文件或图片到fastDFS if isinstance(file, InMemoryUploadedFile): result = fdfs_client.upload_by_buffer(file.read()) else: result = fdfs_client.upload_by_filename(file) """ result = { 'Group name': 'group1', # FastDFS服务端Storage组名 'Remote file_id': 'group1/M00/00/00/wKgThF0LMsmATQGSAAExf6lt6Ck10.jpeg', # 文件存储的位置(索引),可用于下载 'Status': 'Upload successed.', # 文件上传结果反馈 'Local file name': '/home/python/Desktop/upload_Images/02.jpeg', # 所上传文件的真实路径 'Uploaded size': '76.00KB', # 文件大小 'Storage IP': '192.168.19.132'} # FastDFS服务端Storage的IP """ # 判断是否上传成功,result为一个字典 if result['Status'] != 'Upload successed.': return Response(status=403) # 获取文件或图片上传后的路径 file_url = result['Remote file_id'] return file_url
172
def test_download(mocker: MockFixture): """Test happy Path""" # Mocking cmd line arguments testArgs = Namespace(output="file.out", slot="Test") mocker.patch("argparse.ArgumentParser.parse_args", return_value=testArgs) mocker.patch("requests.request", side_effect=fakeQnAMakerAPI) fileOut = mock_open() mocker.patch.object(builtins, "open", fileOut) # run as module (start from if __name__ == "__main__") runpy.run_module("kb.scripts.download-kb", run_name="__main__") fileOut.assert_called_once_with("file.out", "w", encoding="utf-8") fileOut().write.assert_called_with( json.dumps(jsonKB["qnaDocuments"], sort_keys=True, indent=4) )
173
def updateDb(dbObj, resultTupleList): """ Write the results to the dbOj. """ for (benchmarkInfo, benchmarkResults) in resultTupleList: dbObj.addResults(benchmarkInfo, benchmarkResults)
174
def get_all_edge_detects(clip: vs.VideoNode, **kwargs: Any) -> List[vs.VideoNode]: """Allows you to get all masks inheriting from EdgeDetect. Args: clip (vs.VideoNode): Source clip. kwargs: Arguments passed to EdgeDetect().get_mask Returns: List[vs.VideoNode]: List of masks. Example: from vardefunc.mask import get_all_edge_detect clip.set_output(0) for i, mask in enumerate(get_all_edge_detect(get_y(clip)), start=1): mask.set_output(i) """ masks = [ edge_detect().get_mask(clip, **kwargs).text.Text(edge_detect.__name__) # type: ignore for edge_detect in EdgeDetect.__subclasses__() ] return masks
175
def add_overwrite_arg(parser): """ Add overwrite option to the parser. Parameters ---------- parser: argparse.ArgumentParser object """ parser.add_argument( '-f', dest='overwrite', action='store_true', help='Force overwriting of the output files.')
176
def get_ustz(localdt, timezone): """ Returns the timezone associated to a local datetime and an IANA timezone. There are two common timezone conventions. One is the Olson/IANA and the other is the Microsoft convention. For example, the closest IANA timezone for Boston, MA is America/New_York. More commonly, this is known as Eastern time zone. The goal of this function is to return the common name for a timezone in the contiguous US. Note that Arizona has its own IANA timezone and does not observe daylight savings. So depending on the time of year, the offset for Arizona will correspond to either Pacific or Mountain time. Parameters ---------- localdt : datetime The local datetime instance. timezone : str The IANA timezone associated with `localdt`. This should be a timezone for the contiguous US. use_noon : bool If `True`, ignore the time for the incoming datetime and use noon instead. This is nice for quick checks, but undesirable when accurate timezone identification is needed late at night or early morning Returns ------ tz : str The common name for the timezone. This will be one of Pacific, Mountain, Central, or Eastern. """ # Use noon to guarantee that we have the same day in each timezone. # This is desirable in the sense that we don't want someone's tweet jumping # from Eastern to Central, for example, at the end of daylight savings time. localdt = datetime.datetime(localdt.year, localdt.month, localdt.day, 12) timezone = pytz.timezone(timezone) dt = timezone.localize(localdt) for tz, tz_ref in TIMEZONES: dt_new = dt.astimezone(tz_ref) if dt_new.utcoffset() == dt.utcoffset(): return tz
177
def plot_rolling_sharpe(returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6, **kwargs): """ Plots the rolling Sharpe ratio versus date. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. rolling_window : int, optional The days window over which to compute the sharpe ratio. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ rolling_sharpe_ts = timeseries.rolling_sharpe( returns, rolling_window) return rolling_sharpe_ts
178
def test_data_format_avro(sdc_builder, sdc_executor, gcp): """ Write data to Google cloud storage using Avro format. The pipeline looks like: google_cloud_storage_origin >> wiretap """ DATA = {'name': 'boss', 'age': 60, 'emails': ['boss@company.com', 'boss2@company.com'], 'boss': None} SCHEMA = {'namespace': 'example.avro', 'type': 'record', 'name': 'Employee', 'fields': [{'name': 'name', 'type': 'string'}, {'name': 'age', 'type': 'int'}, {'name': 'emails', 'type': {'type': 'array', 'items': 'string'}}, {'name': 'boss', 'type': ['Employee', 'null']}]} pipeline_builder = sdc_builder.get_pipeline_builder() storage_client = gcp.storage_client bucket_name = "stf_std_" + get_random_string(string.ascii_lowercase, 20) dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source') dev_raw_data_source.set_attributes(data_format='JSON', stop_after_first_batch=True, raw_data=json.dumps(DATA)) google_cloud_storage = pipeline_builder.add_stage('Google Cloud Storage', type='destination') google_cloud_storage.set_attributes(bucket=bucket_name, common_prefix='gcs-test', partition_prefix='test', data_format='AVRO', avro_schema=json.dumps(SCHEMA), avro_schema_location='INLINE') dev_raw_data_source >> google_cloud_storage pipeline = pipeline_builder.build( title=f'Google Cloud Storage Destination Data Format Avro').configure_for_environment(gcp) sdc_executor.add_pipeline(pipeline) try: created_bucket = gcp.retry_429(storage_client.create_bucket)(bucket_name) logger.info('Starting GCS Origin pipeline and wait until the information is read ...') sdc_executor.start_pipeline(pipeline).wait_for_finished() # To verify that the Avro format has been successfully stored, we read the data from GCS using an auxiliary # pipeline. This pipeline is set to read data in Avro format. result = read_avro_data(bucket_name, sdc_builder, sdc_executor, gcp) # We compare the results read by the GCS Origin pipeline and check that the data is equal to the original data stored assert [record.field for record in result] == [DATA] finally: logger.info('Deleting bucket %s ...', created_bucket.name) gcp.retry_429(created_bucket.delete)(force=True)
179
def saveSolution(name, C): """save set C to file name as a VertexCover solution""" f = open(name, "w") s = ",".join([str(c) for c in C]) f.write(s) f.close()
180
def ResolveNamespace(namespace): """Validate app namespace, providing a default. If the argument is None, namespace_manager.get_namespace() is substituted. Args: namespace: The namespace argument value to be validated. Returns: The value of namespace, or the substituted default. Always a non-empty string or None. Raises: BadArgumentError if the value is not a string. """ if namespace is None: namespace = namespace_manager.get_namespace() else: namespace_manager.validate_namespace( namespace, datastore_errors.BadArgumentError) return namespace
181
def Calculo_por_etapas(Diccionario): """Calculo de la hornilla por etapas""" Lista_Contenido=[] Lista_columnas=[] #Normalización de la capacidad de la hornilla #Mem_dias=float(Diccionario['¿Cada cuantos días quiere moler? (días)']) #Mem_Temp=Normalizar_Capacidad(float(Diccionario['Capacidad estimada de la hornilla']),Mem_dias) #print(float(Diccionario['Capacidad estimada de la hornilla'])) #print(Mem_Temp) Etapas=int(float(Diccionario['Etapas']))#Mem_Temp[1] #Etapas=12 #Saturador "minimo son dos etapas" if (Etapas>2): Factor_Division=Etapas-2 else: Factor_Division=2 Etapas=2 #Caracteristicas de las celdas de cada columna (Lista_columnas) #Fila 0 concentración de solidos inicial #Fila 1 Concentración de solidos final #Fila 2 Concentración promedio #Fila 3 Masa de jugo de entrada #Fila 4 Calor Especifico P Cte jugo #Fila 5 Densidad del Jugo #Fila 6 Volumen de jugo kg #Fila 7 Volumen de jugo en L #Fila 8 Temperatura de Entrada #Fila 9 Temperatura de Salida #Fila 10 Entalpia de Vaporización #Fila 11 Masa de Agua a Evaporar #Fila 12 Calor Nece Calc por Etapa for i in range(13): for j in range (Etapas): Lista_columnas.append(float(i+j)) Lista_Contenido.append(Lista_columnas) Lista_columnas=[] Lista_Contenido[0][0]=float(Diccionario['CSS del jugo pos-evaporación']) #Concentracion_solidos_inicial (CSS02) Lista_Contenido[1][0]=float(Diccionario['CSS panela']) #Concentracion_solidos_final (CSSF1) Lista_Contenido[0][Etapas-1]=float(Diccionario['CSS del jugo de Caña']) #Concentracion_solidos_inicial (CSS01) Lista_Contenido[1][Etapas-1]=float(Diccionario['CSS del jugo clarificado']) #Concentracion_solidos_final (CSSF1) if(Etapas>2): ite=0 for i in range(Etapas-2,0,-1): Lista_Contenido[0][i]=Lista_Contenido[1][i+1] if(ite==0): Lista_Contenido[1][i]=((Lista_Contenido[0][0]-Lista_Contenido[0][i])/Factor_Division)+Lista_Contenido[0][i] ite=ite+1 else: Lista_Contenido[1][i]=((Lista_Contenido[0][0]-Lista_Contenido[0][Etapas-2])/Factor_Division)+Lista_Contenido[0][i] for i in range(Etapas-1,-1,-1): #Concentración promedio=(Concentracion_solidos_inicial+Concentracion_solidos_final)/2 Lista_Contenido[2][i]=(Lista_Contenido[0][i]+Lista_Contenido[1][i])/2 if(i==Etapas-1): #Masa de jugo de entrada Lista_Contenido[3][i]=float(Diccionario['A clarificación']) else: #Masa de jugo de entrada=(Masa de jugo etapa anterior*CCS inicial etapa anterior)/CCS Final etapa anterior Lista_Contenido[3][i]=Lista_Contenido[3][i+1]*Lista_Contenido[0][i+1]/Lista_Contenido[1][i+1] #Calor_Especifico_P_Cte_jugo=4.18*(1-(0.006*Concetracion_promedio)) Lista_Contenido[4][i]=4.18*(1-(0.006*Lista_Contenido[2][i])) #Densidad_del_Jugo=997.39+(4.46*Concetracion_promedio) Lista_Contenido[5][i]=997.39+(4.46*Lista_Contenido[2][i]) #Volumen_jugo=Masa_jugo_de_entrada/Densidad_del_Jugo Lista_Contenido[6][i]=Lista_Contenido[3][i]/Lista_Contenido[5][i] #Volumen_jugo_L=Volumen_jugo*1000 Lista_Contenido[7][i]=Lista_Contenido[6][i]*1000.0 if(i==Etapas-1): #Temperatura_Entrada=Temperatura ambiente Lista_Contenido[8][i]=float(Diccionario['Temperatura del ambiente']) else: #Temperatura_Entrada=Temperatura_ebullición_agua+0.2209*math.exp(0.0557*Concentracion_solidos_inicial) Lista_Contenido[8][i]=Lista_Contenido[9][i+1] #Temperatura_Salida=G37+0.2209*math.exp(0.0557*Concentracion_solidos_final) Lista_Contenido[9][i]=float(Diccionario['Temperatura de ebullición del agua'])+0.2209*math.exp(0.0557*Lista_Contenido[1][i]) #Entalpia_Vaporizacion=(2492.9-(2.0523*Temperatura_Entrada))-(0.0030752*(Temperatura_Entrada**2)) Lista_Contenido[10][i]=(2492.9-(2.0523*Lista_Contenido[8][i]))-(0.0030752*(Lista_Contenido[8][i]**2)) #Masa_Agua_Evaporar=Masa_jugo_de_entrada-(Masa_jugo_de_entrada*Concentracion_solidos_inicial/Concentracion_solidos_final) Lista_Contenido[11][i]=Lista_Contenido[3][i]-(Lista_Contenido[3][i]*Lista_Contenido[0][i]/Lista_Contenido[1][i]) #Calor_por_Etapa=(Masa_jugo_de_entrada*Calor_Especifico_P_Cte_jugo*(Temperatura_Salida-Temperatura_Entrada)+Masa_Agua_Evaporar*Entalpia_Vaporizacion)/3600 Lista_Contenido[12][i]=(Lista_Contenido[3][i]*Lista_Contenido[4][i]*(Lista_Contenido[9][i]-Lista_Contenido[8][i])+Lista_Contenido[11][i]*Lista_Contenido[10][i])/3600.0 #Fijar decimales en 3 for j in range (13): for i in range (Etapas): Lista_Contenido[j][i]=round(Lista_Contenido[j][i],3) #Cambiar la salida o posicion de la paila de punteo a la paila 3 o 4 Lista_contenido_2=[] L_aux=[] for i in Lista_Contenido: inio=3 if (Etapas!=7): L_aux.append(i[2]) L_aux.append(i[1]) L_aux.append(i[0]) inio=3 else: L_aux.append(i[3]) L_aux.append(i[2]) L_aux.append(i[1]) L_aux.append(i[0]) inio=4 for t in range(inio,len(i)): L_aux.append(i[t]) Lista_contenido_2.append(L_aux) L_aux=[] Lista_Contenido=Lista_contenido_2 Etiquetas=[ 'Concentracion de Solidos Inicial [ºBrix]', 'Concentracion de Solidos Final [ºBrix]', 'Concentracion de Solidos Promedio [ºBrix]', 'Masa de Jugo Entrada [Kg]', 'Calor Especifico P Cte jugo [kJ/Kg °C]', 'Densidad del Jugo [kg/m3]', 'Volumen de jugo [m^3/kg]', 'Volumen de jugo [L]', 'Temperatura de Entrada [ºC]', 'Temperatura de Salida [ºC]', 'Entalpia de Vaporización [kJ/kg]', 'Masa de Agua a Evaporar [kg]', 'Calor Nece Calc por Etapa [kW]' ] Dict_aux=dict(zip(Etiquetas,Lista_Contenido)) Dict_aux_2=dict(zip(['Etapas'],[Etapas])) Dict_aux.update(Dict_aux_2) return Dict_aux
182
def venus_equ_proportional_minute(x, y, e, R): """ Venus equation proportional minute :param x: true argument (av) in degree :param y: mean center (cm) in degree :param e: eccentricity :param R: radius of the epicycle :return: minutes proportional in degree """ return utils.minuta_proportionalia(x, R, e, y)
183
def e2_cond(p, m, sigma, alpha, mu): """ This function is dependent from the gamma function. Conditional mean of the square of the normal distribution. See the article for more informations. Parameters ---------- p : float Proportion of persistent species. m : float Mean of the persistent species. sigma : float Root square mean of the persistent species. alpha : float Parameter of the model - Interaction strength. mu : float Parameter of the model - Interaction drift. Returns ------- float Conditional mean associated to the system. """ # The value delta is similar in the article. delta = alpha/(sigma*np.sqrt(p))*(1+mu*p*m) p_1 = np.exp(-delta**2/2) p_2 = 1-stats.norm.cdf(-delta) return (1/np.sqrt(2*np.pi))*-delta*p_1/p_2+1
184
def concline_generator(matches, idxs, df, metadata, add_meta, category, fname, preserve_case=False): """ Get all conclines :param matches: a list of formatted matches :param idxs: their (sent, word) idx """ conc_res = [] # potential speedup: turn idxs into dict from collections import defaultdict mdict = defaultdict(list) # if remaking idxs here, don't need to do it earlier idxs = list(matches.index) for mid, (s, i) in zip(matches, idxs): #for s, i in matches: mdict[s].append((i, mid)) # shorten df to just relevant sents to save lookup time df = df.loc[list(mdict.keys())] # don't look up the same sentence multiple times for s, tup in sorted(mdict.items()): sent = df.loc[s] if not preserve_case: sent = sent.str.lower() meta = metadata[s] sname = meta.get('speaker', 'none') for i, mid in tup: if not preserve_case: mid = mid.lower() ix = '%d,%d' % (s, i) start = ' '.join(sent.loc[:i-1].values) end = ' '.join(sent.loc[i+1:].values) lin = [ix, category, fname, sname, start, mid, end] if add_meta: for k, v in sorted(meta.items()): if k in ['speaker', 'parse', 'sent_id']: continue if isinstance(add_meta, list): if k in add_meta: lin.append(v) elif add_meta is True: lin.append(v) conc_res.append(lin) return conc_res
185
def main(): """Create a flow that uses the 'say_hello' task and run it. This creates a Prefect flow and runs it a couple times with different inputs. """ with Flow("My First Flow") as flow: name = Parameter('name') say_hello(name) flow.run(name='World') flow.run(name='NH Python Meetup Group')
186
def match_detections(predicted_data, gt_data, min_iou): """Carry out matching between detected and ground truth bboxes. :param predicted_data: List of predicted bboxes :param gt_data: List of ground truth bboxes :param min_iou: Min IoU value to match bboxes :return: List of matches """ all_matches = {} total_gt_bbox_num = 0 matched_gt_bbox_num = 0 frame_ids = gt_data.keys() for frame_id in tqdm(frame_ids, desc='Matching detections'): if frame_id not in predicted_data.keys(): all_matches[frame_id] = [] continue gt_bboxes = gt_data[frame_id] predicted_bboxes = predicted_data[frame_id] total_gt_bbox_num += len(gt_bboxes) similarity_matrix = calculate_similarity_matrix(gt_bboxes, predicted_bboxes) matches = [] for _ in xrange(len(gt_bboxes)): best_match_pos = np.unravel_index(similarity_matrix.argmax(), similarity_matrix.shape) best_match_value = similarity_matrix[best_match_pos] if best_match_value <= min_iou: break gt_id = best_match_pos[0] predicted_id = best_match_pos[1] similarity_matrix[gt_id, :] = 0.0 similarity_matrix[:, predicted_id] = 0.0 matches.append((gt_id, predicted_id)) matched_gt_bbox_num += 1 all_matches[frame_id] = matches print('Matched gt bbox: {} / {} ({:.2f}%)' .format(matched_gt_bbox_num, total_gt_bbox_num, 100. * float(matched_gt_bbox_num) / float(max(1, total_gt_bbox_num)))) return all_matches
187
def map_string(affix_string: str, punctuation: str, whitespace_only: bool = False) -> str: """Turn affix string into type char representation. Types are 'w' for non-whitespace char, and 's' for whitespace char. :param affix_string: a string :type: str :param punctuation: the set of characters to treat as punctuation :type punctuation: str :param whitespace_only: whether to treat only whitespace as word boundary or also include (some) punctuation :type whitespace_only: bool :return: the type char representation :rtype: str """ if whitespace_only: return "".join(["s" if char == " " else "w" for char in affix_string]) else: return "".join(["s" if char == " " or char in punctuation else "w" for char in affix_string])
188
def info(input, verbose, pyformat, **kwargs): """ Provides info about the input. Requires valid input. """ if not input: input = '-' with click.open_file(input, mode='rb') as f: data = json_utils.load_ordered(f) d = { 'length': len(data), 'keys': sorted(data.keys()) } if verbose: d['_object'] = { 'type': type(data), # 'repr': repr(data), # 'vars': sorted(vars(data)), # 'dir': sorted(dir(data)), 'members': sorted(varsdict(data).keys()) } # click.echo(d) # click.echo(sorted(d.items())) if pyformat: s = pformat(d) else: s = json.dumps(d, indent=2, sort_keys=True) click.echo(s)
189
def config_loads(cfg_text, from_cfg=None, whitelist_keys=None): """Same as config_load but load from a string """ try: cfg = AttrDict(yaml.load(cfg_text)) except TypeError: # empty string cfg = AttrDict() if from_cfg: if not whitelist_keys: whitelist_keys = [] _validate_config(cfg, from_cfg, whitelist_keys) return from_cfg + cfg return cfg
190
def align_column ( table , index , align = 'left') : """Aling the certain column of the table >>> aligned = align_column ( table , 1 , 'left' ) """ nrows = [ list ( row ) for row in table ] lmax = 0 for row in nrows : if index <= len ( row ) : item = decolorize ( row [ index ] ) lmax = max ( lmax , len ( item ) ) if not lmax : return table aleft = align.lower() in left aright = not aleft and align.lower() in right new_table = [] for row in nrows : if index <= len ( row ) : item = decolorize ( row [ index ] ) nspace = lmax - len ( item ) if aleft : item = row [ index ] + nspace * ' ' elif aright: item = nspace * ' ' + row [ index ] else : sl = nspace / 2 sr = nspace - sl item = sl * ' ' + row [ index ] + sr * ' ' row[ index ] = item new_table.append ( row ) return [ tuple ( row ) for row in new_table ]
191
def calculate_shared_private_key(partner): """ Calculate a shared private key :param partner: Name of partner """ print('generating {}'.format(get_private_filename(partner))) private_key = get_key(private_filename) public_key = get_key(public_filename) shared_modified_key = get_key(get_modified_filename(partner)) shared_private_key = pow(shared_modified_key, private_key, public_key) save_key(shared_private_key, get_private_filename(partner))
192
def make_form(domain, parent, data, existing=None): """simulate a POST payload from the location create/edit page""" location = existing or Location(domain=domain, parent=parent) def make_payload(k, v): if hasattr(k, '__iter__'): prefix, propname = k prefix = 'props_%s' % prefix else: prefix, propname = 'main', k return ('%s-%s' % (prefix, propname), v) payload = dict(make_payload(k, v) for k, v in data.iteritems()) return LocationForm(location, payload)
193
def score_auroc(y_true: Iterable[int], y_prob: Iterable[Iterable[float]]) -> float: """ Computes the Area Under ROC curve (AUROC). Parameters ---------- y_true : List TODO y_prob : List TODO References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ .. [2] `Analyzing a portion of the ROC curve. McClish, 1989 <https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_ """ assert len(y_true) == len(y_prob) tpr, fpr = roc_curve_multiclass(y_true, y_prob) fpr_diffs = [fpr[i] - fpr[i - 1] for i in range(1, len(fpr))] tpr_means = [(tpr[i] + tpr[i - 1]) / 2.0 for i in range(1, len(tpr))] return sum([tpr_i * fpr_i for tpr_i, fpr_i in zip(tpr_means, fpr_diffs)])
194
def set_group(pathname,group_name): """Change the group for the given file or directory""" #group=grp.getgrnam(group_name) #gid = group.gr_gid #os.lchown(pathname, -1, gid)#To leave one of the ids unchanged, set it to -1. This function will not follow symbolic links. shutil.chown(path, group=group_name)
195
def test_add_item_db(session): """Test if through session we can add an item Args: session (SQLAlchemy Object Session): It's the session object from SQLALchemy Instance """ add_item = Items("Conjured Mana Cake", 5, 8) session.add(add_item) session.commit() assert add_item.name == "Conjured Mana Cake" assert add_item.sell_in == 5 assert add_item.quality == 8
196
def abs_densitye_seed(model, inputs, args, tokenizer, **kwargs): """Maximum density sampling by calculating information density for example when passed through [model]""" # print('getting embedding_a') X_a = load_and_embed_examples(args, model, tokenizer, evaluate=True, text = 'text_a') # print('getting embedding_b') X_b = load_and_embed_examples(args, model, tokenizer, evaluate=True, text = 'text_b') X = np.absolute(X_a - X_b) similarity_mtx = 1 / (1 + pairwise_distances(X, X, metric='euclidean')) scores = torch.tensor(similarity_mtx.mean(axis=1)) return scores
197
def get_marketplace_image_output(instance_type: Optional[pulumi.Input[Optional[str]]] = None, label: Optional[pulumi.Input[str]] = None, zone: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMarketplaceImageResult]: """ Gets local image ID of an image from its label name. ## Example Usage ```python import pulumi import pulumi_scaleway as scaleway my_image = scaleway.get_marketplace_image(label="ubuntu_focal") ``` :param str instance_type: The instance type the image is compatible with. You find all the available types on the [pricing page](https://www.scaleway.com/en/pricing/). :param str label: Exact label of the desired image. You can use [this endpoint](https://api-marketplace.scaleway.com/images?page=1&per_page=100) to find the right `label`. :param str zone: `zone`) The zone in which the image exists. """ ...
198
def read_capacity_from_file(experiment_name): """ Read and return the min capacity, max capacity, interpolation, gamma as a tuple if the capacity is variable. Otherwise return the constant capacity as is. TODO: This is a bit brittle at the moment - We should take a look at fixing this for static beta later. Parameters ---------- experiment_name : str The name of the experiment, which is the name of the folder that the model is expected to be in. """ meta_data = load_metadata(os.path.join(RES_DIR, experiment_name)) min_capacity = meta_data['betaB_initC'] max_capacity = meta_data['betaB_finC'] interp_capacity = meta_data['betaB_stepsC'] gamma = meta_data['betaB_G'] return (min_capacity, max_capacity, interp_capacity, gamma)
199