content
stringlengths
22
815k
id
int64
0
4.91M
def get_msids_for_add_msids(opt, logger): """ Parse MSIDs spec file (opt.add_msids) and return corresponding list of MSIDs. This implements support for a MSID spec file like:: # MSIDs that match the name or pattern are included, where * matches # anything (0 or more characters) while ? matches exactly one character: # aopcadm? aacccd* # MSIDs with the same subsystem and sampling rate as given MSIDs are included. # Example: */1wrat gives all acis4eng engineering telemetry. */1wrat # MSIDs with the same subsystem regardless of sampling rate. # Example: **/3tscpos gives all engineering SIM telemetry **/3tscpos :param opt: options :param logger: logger :return: msids_out, msids_content (mapping of MSID to content type) """ logger.info(f'Reading available cheta archive MSIDs from {opt.sync_root}') with get_readable(opt.sync_root, opt.is_url, sync_files['msid_contents']) as (tmpfile, uri): if tmpfile is None: # If index_file is not found then get_readable returns None logger.info(f'No cheta MSIDs list file found at{uri}') return None logger.info(f'Reading cheta MSIDs list file {uri}') msids_content = pickle.load(gzip.open(tmpfile, 'rb')) content_msids = collections.defaultdict(list) for msid, content in msids_content.items(): content_msids[content].append(msid) logger.info(f'Reading MSID specs from {opt.add_msids}') with open(opt.add_msids) as fh: lines = [line.strip() for line in fh.readlines()] msid_specs = [line.upper() for line in lines if (line and not line.startswith('#'))] logger.info('Assembling list of MSIDs that match MSID specs') msids_out = [] for msid_spec in msid_specs: if msid_spec.startswith('**/'): msid_spec = msid_spec[3:] content = msids_content[msid_spec] subsys = re.match(r'([^\d]+)', content).group(1) for content, msids in content_msids.items(): if content.startswith(subsys): logger.info(f' Found {len(msids)} MSIDs for **/{msid_spec} with ' f'content = {content}') msids_out.extend(msids) elif msid_spec.startswith('*/'): msid_spec = msid_spec[2:] content = msids_content[msid_spec] msids = content_msids[content] logger.info(f' Found {len(msids)} MSIDs for */{msid_spec} with ' f'content = {content}') msids_out.extend(msids) else: msids = [msid for msid in msids_content if fnmatch(msid, msid_spec)] if not msids: raise ValueError(f'no MSID matching {msid} (remember derived params like PITCH ' 'must be written as"dp_<MSID>"') logger.info(f' Found {len(msids)} MSIDs for {msid_spec}') msids_out.extend(msids) logger.info(f' Found {len(msids_out)} matching MSIDs total') return msids_out, msids_content
5,359,000
def gaussian_similarity(stimulus_representation, i, j, w, c, r): """ Function that calculates and returns the gaussian similarity of stimuli i and j (equation 4b in [Noso86]_) Parameters ---------- stimulus_representation : np.array The stimuli are given to this function in the form of a n x N matrix, where n is the number of stimuli and N is the number of dimensions of each stimuli in the psychological space i : int Stimulus i j : int Stimulus j w : list This is the list of weights corresponding to each dimension of the stimulus in the psychological space c : int This is the scale parameter used in the distance calculation r : int This is the Minkowski's distance metric. A value of 1 corresponds to city-block metric (generally used when the stimuli has separable dimensions) ; A value of 2 corresponds to eucledian distance metric (generally used when the stimuli has integral dimensions) Returns ------- np.float64 The Gaussian similarity between the two stimulus """ def distance(): """ Calculates the distance between two stimulus (equation 6 in [Noso86]_) Returns ------- np.float64 Distance scaled by the scale parameter 'c' """ sum = 0.0 N = np.shape(stimulus_representation)[1] for idx in range(N): sum += (w[idx] * (stimulus_representation[i, idx] - stimulus_representation[j, idx]) ** r) sum = sum ** (1 / r) return c * sum return np.exp(-(distance()) ** 2)
5,359,001
def windShearVector(u, v, top, bottom, unit=None): """ calculate the u and v layer difference and return as vector """ udiff = layerDiff(u, top, bottom, unit) vdiff = layerDiff(v, top, bottom, unit) return makeVector(udiff, vdiff)
5,359,002
def test_escaping(): """test escaping Windows Resource files to Python strings""" assert rc.escape_to_python('''First line \ second line''') == "First line second line" assert rc.escape_to_python("A newline \\n in a string") == "A newline \n in a string" assert rc.escape_to_python("A tab \\t in a string") == "A tab \t in a string" assert rc.escape_to_python("A backslash \\\\ in a string") == "A backslash \\ in a string" assert rc.escape_to_python(r'''First line " \ "second line''') == "First line second line"
5,359,003
def create_tables(cur, conn) -> None: """Create all tables base on create_table_queries commit results into the database immediately. Parameters ---------- cur: psyconpg2.connect cursor of connection conn: psyconpg2.connect.cursor connecition function """ for query in create_table_queries: cur.execute(query) conn.commit()
5,359,004
def test_validate_owner_inactive_subscription( api_rf, subscription_factory, user_factory ): """ If the user initiating the transfer has an inactive subscription, validation should fail. """ owner = user_factory() subscription_factory(is_active=False, user=owner) api_rf.user = owner recipient = user_factory() serializer = subscription_serializers.SubscriptionTransferSerializer( context={"request": api_rf.post("/")} ) serializer.validate_recipient_email(recipient.primary_email.email) with pytest.raises(ValidationError): serializer.validate({"recipient_email": recipient.primary_email.email})
5,359,005
def withdraw_entry(contest): """Withdraws a submitted entry from the contest. After this step the submitted entry will be seen as a draft. """ return _update_sketch(contest, code=None, action="withdraw")
5,359,006
def test_invalid_properties(properties, error_message): """Check that ValueError is raised if invalid properties are passed. 1. Try to create an embedded representation with invalid properties. 2. Check that ValueError is raised. 3. Check the error message. """ with pytest.raises(ValueError) as error_info: EmbeddedRepresentation(relations=DEFAULT_RELATIONS, properties=properties) assert error_info.value.args[0] == error_message, "Wrong error message"
5,359,007
def scan(): """Update the database of transactions (amount in each address). """ controller.scan_utxos()
5,359,008
def download_file(url, output_path): """Downloads a file given its URL and the output path to be saved. Args: url (str): URL to download the file. output_path (str): Path to save the downloaded file. """ file_exists = os.path.exists(output_path) if not file_exists: folder_exists = os.path.exists(c.DATA_FOLDER) if not folder_exists: os.mkdir(c.DATA_FOLDER) urllib.request.urlretrieve(url, output_path)
5,359,009
def _conditional_field(if_, condition, colon, comment, eol, indent, body, dedent): """Formats an `if` construct.""" del indent, dedent # Unused # The body of an 'if' should be columnized with the surrounding blocks, so # much like an inline 'bits', its body is treated as an inline list of blocks. header_row = _Row('if', ['{} {}{} {}'.format(if_, condition, colon, comment)]) indented_body = _indent_blocks(body) assert indented_body, 'Expected body of if condition.' return [_Block([header_row] + eol + indented_body[0].prefix, indented_body[0].header, indented_body[0].body)] + indented_body[1:]
5,359,010
def check_sum_cases(nation='England'): """check total data""" ck=LocalLatest() fail=False data=ck.data.get('data') latest={} data=clean_cases(data) #repair glitches #check latest data matches stored data for nation for i in data: _code=i['areaCode'] latest[_code]=i try: _nation=ons_week.nation[_code] except Exception as e: log.error(e) log.error(i['areaName']) continue if _nation==nation: if _code in ons_week.stored_names: place=ons_week.stored_names[_code] _total=DailyCases.objects.filter(areaname=place).aggregate(Max('totalLabConfirmedCases')).get('totalLabConfirmedCases__max') _latest=i['cumCasesByPublishDate'] if _total !=_latest: print(f'Mismatch: {place} Latest total{_latest} != stored {_total}') fail=True else: #print(f'{place} up to date') pass else: place=i['areaName'] print(f'{place} not counted / not in TR tally') sumtotal=0 for _code in ons_week.stored_names: if ons_week.nation[_code]==nation: i=latest.get(_code) if i: _latest=i['cumCasesByPublishDate'] _total=DailyCases.objects.filter(areacode=_code).aggregate(Max('totalLabConfirmedCases')).get('totalLabConfirmedCases__max') if _latest!=_total: print(f'Mismatch: {_code} Latest total{_latest} != stored {_total}') else: if _latest: sumtotal +=_latest else: print(f'Missing place {_code} in PHE published cases') print(f'Sum total of stored names for {nation} is {sumtotal}') return fail
5,359,011
def get_module_name() -> str: """Gets the name of the module that called a function Is meant to be used within a function. :returns: The name of the module that called your function """ return getmodulename(stack()[2][1])
5,359,012
def hand_points(work_hand): """returns the point value of a given hand""" debug_level = 1 work_points = 0 for card in work_hand: work_points += card_point_value(card) return work_points
5,359,013
def elements_counter(arr, count=0): """递归计算列表包含的元素数 Arguments: arr {[list]} -- [列表] Keyword Arguments: count {int} -- [列表包含的元素数] (default: {0}) Returns: [int] -- [列表包含的元素数] """ if len(arr): arr.pop(0) count += 1 return elements_counter(arr, count) return count
5,359,014
def dataset_is_open_data(dataset: Dict) -> bool: """Check if dataset is tagged as open data.""" is_open_data = dataset.get("isOpenData") if is_open_data: return is_open_data["value"] == "true" return False
5,359,015
def destroy_shared_memory_region(shm_handle): """Unlink a shared memory region with the specified handle. Parameters ---------- shm_handle : c_void_p The handle for the shared memory region. Raises ------ SharedMemoryException If unable to unlink the shared memory region. """ _raise_if_error(c_int(_cshm_shared_memory_region_destroy(shm_handle))) return
5,359,016
def _remove_suffix_apple(path): """ Strip off .so or .dylib. >>> _remove_suffix_apple("libpython.so") 'libpython' >>> _remove_suffix_apple("libpython.dylib") 'libpython' >>> _remove_suffix_apple("libpython3.7") 'libpython3.7' """ if path.endswith(".dylib"): return path[:-len(".dylib")] if path.endswith(".so"): return path[:-len(".so")] return path
5,359,017
def sparsenet201(**kwargs): """ SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs)
5,359,018
async def process_token(message: types.Message, state: FSMContext): """ Process user token """ logger.info('Обрабатываем ввод токена ВК.') vk_token = message.text async with state.proxy() as data: data['vk_token'] = vk_token test_result, test_message = await vk.test_token(vk_token) await bot.send_message(message.chat.id, test_message) if test_result: await Form.group_id.set() await bot.send_message(message.chat.id, 'Введи ID группы:') else: # Авторизация чето не удалась await bot.send_message( message.chat.id, 'Авторизация чето не удалась, я хз, повтори')
5,359,019
def _run_diagnostic_(diff, diff_var, rratio=None, rratio_var=None, oratio=None, oratio_var=None, color="gray"): """Background function to run all diagnostics Returns ------- Plot to console """ # Continuous outcomes have less plots to generate if rratio is None: # Point estimates plt.subplot(121) _estimate_density_plot_(diff, bw_method='scott', fill=True, color=color) plt.title("ACE") # Variance estimates plt.subplot(122) _estimate_density_plot_(diff_var, bw_method='scott', fill=True, color=color) plt.title("Var(ACE)") # Binary outcomes have plots for all measures else: if oratio is None: # Risk Difference estimates plt.subplot(221) _estimate_density_plot_(diff, bw_method='scott', fill=True, color=color) plt.title("Risk Difference") # Var(RD) estimates plt.subplot(223) _estimate_density_plot_(diff_var, bw_method='scott', fill=True, color=color, variance=True) plt.title("Var(RD)") # Risk Ratio estimates plt.subplot(222) _estimate_density_plot_(rratio, bw_method='scott', fill=True, color=color) plt.title("Risk Ratio") # Var(RR) estimates plt.subplot(224) _estimate_density_plot_(rratio_var, bw_method='scott', fill=True, color=color, variance=True) plt.title("Var(ln(RR))") else: # Risk Difference estimates plt.subplot(231) _estimate_density_plot_(diff, bw_method='scott', fill=True, color=color) plt.title("Risk Difference") # Var(RD) estimates plt.subplot(234) _estimate_density_plot_(diff_var, bw_method='scott', fill=True, color=color, variance=True) plt.title("Var(RD)") # Risk Ratio estimates plt.subplot(232) _estimate_density_plot_(rratio, bw_method='scott', fill=True, color=color) plt.title("Risk Ratio") # Var(RR) estimates plt.subplot(235) _estimate_density_plot_(rratio_var, bw_method='scott', fill=True, color=color, variance=True) plt.title("Var(ln(RR))") # Odds Ratio estimates plt.subplot(233) _estimate_density_plot_(oratio, bw_method='scott', fill=True, color=color) plt.title("Odds Ratio") # Var(OR) estimates plt.subplot(236) _estimate_density_plot_(oratio_var, bw_method='scott', fill=True, color=color, variance=True) plt.title("Var(ln(OR))") plt.tight_layout() plt.show()
5,359,020
def alt_text_to_curly_bracket(text): """ Converts the text that appears in the alt attribute of image tags from gatherer to a curly-bracket mana notation. ex: 'Green'->{G}, 'Blue or Red'->{U/R} 'Variable Colorless' -> {XC} 'Colorless' -> {C} 'N colorless' -> {N}, where N is some number """ def convert_color_to_letter(color): if color.lower() not in ('red', 'white', 'blue', 'green', 'black', 'colorless', 'tap', 'energy'): # some cards have weird split mana costs where you can pay N colorless # or one of a specific color. # Since we're ending up here, and what we're given isn't a color, lets assume its N return color else: if color.lower() == 'blue': return 'U' else: return color[0].upper() try: val = int(text, 10) except Exception: pass else: # This is just a number. Easy enough. return f"{{{text}}}" if ' or ' in text: # this is a compound color, not as easy to deal with. text = text.replace('or', '') text = '/'.join([convert_color_to_letter(x) for x in text.split()]) else: if 'Variable' in text: text = 'X' else: # hopefully all that's left is just simple color symbols. text = convert_color_to_letter(text) # at this point we've hopefully return f"{{{text}}}"
5,359,021
def massage_primary(repo_primary, src_cache, cdt): """ Massages the result of dictify() into a less cumbersome form. In particular: 1. There are many lists that can only be of length one that don't need to be lists at all. 2. The '_text' entries need to go away. 3. The real information starts at ['metadata']['package'] 4. We want the top-level key to be the package name and under that, an entry for each arch for which the package exists. """ new_dict = dict({}) for package in repo_primary['metadata']['package']: name = package['name'][0]['_text'] arch = package['arch'][0]['_text'] if arch == 'src': continue checksum = package['checksum'][0]['_text'] source = package['format'][0]['{rpm}sourcerpm'][0]['_text'] # If you need to check if the sources exist (perhaps you've got the source URL wrong # or the distro has forgotten to copy them?): # import requests # sbase_url = cdt['sbase_url'] # surl = sbase_url + source # print("{} {}".format(requests.head(surl).status_code, surl)) location = package['location'][0]['href'] version = package['version'][0] summary = package['summary'][0]['_text'] try: description = package['description'][0]['_text'] except: description = "NA" if '_text' in package['url'][0]: url = package['url'][0]['_text'] else: url = '' license = package['format'][0]['{rpm}license'][0]['_text'] try: provides = package['format'][0]['{rpm}provides'][0]['{rpm}entry'] provides = massage_primary_requires(provides, cdt) except: provides = [] try: requires = package['format'][0]['{rpm}requires'][0]['{rpm}entry'] requires = massage_primary_requires(requires, cdt) except: requires = [] new_package = dict({'checksum': checksum, 'location': location, 'home': url, 'source': source, 'version': version, 'summary': yaml_quote_string(summary), 'description': description, 'license': license, 'provides': provides, 'requires': requires}) if name in new_dict: if arch in new_dict[name]: print("WARNING: Duplicate packages exist for {} for arch {}".format(name, arch)) new_dict[name][arch] = new_package else: new_dict[name] = dict({arch: new_package}) return new_dict
5,359,022
def ansi_color_name_to_escape_code(name, style="default", cmap=None): """Converts a color name to the inner part of an ANSI escape code""" cmap = _ensure_color_map(style=style, cmap=cmap) if name in cmap: return cmap[name] m = RE_XONSH_COLOR.match(name) if m is None: raise ValueError("{!r} is not a color!".format(name)) parts = m.groupdict() # convert regex match into actual ANSI colors if parts["reset"] is not None: if parts["reset"] == "NO_COLOR": warn_deprecated_no_color() res = "0" elif parts["bghex"] is not None: res = "48;5;" + rgb_to_256(parts["bghex"][3:])[0] elif parts["background"] is not None: color = parts["color"] if "#" in color: res = "48;5;" + rgb_to_256(color[1:])[0] else: fgcolor = cmap[color] if fgcolor.isdecimal(): res = str(int(fgcolor) + 10) elif fgcolor.startswith("38;"): res = "4" + fgcolor[1:] elif fgcolor == "DEFAULT": res = "39" else: msg = ( "when converting {!r}, did not recognize {!r} within " "the following color map as a valid color:\n\n{!r}" ) raise ValueError(msg.format(name, fgcolor, cmap)) else: # have regular, non-background color mods = parts["modifiers"] if mods is None: mods = [] else: mods = mods.strip("_").split("_") mods = [ANSI_ESCAPE_MODIFIERS[mod] for mod in mods] color = parts["color"] if "#" in color: mods.append("38;5;" + rgb_to_256(color[1:])[0]) elif color == "DEFAULT": res = "39" else: mods.append(cmap[color]) res = ";".join(mods) cmap[name] = res return res
5,359,023
def twitter(bot, message): """#twitter [-p 天数] -p : 几天以前 """ try: cmd, *args = shlex.split(message.text) except ValueError: return False if not cmd[0] in config['trigger']: return False if not cmd[1:] == 'twitter': return False try: options, args = getopt.gnu_getopt(args, 'hp:') except getopt.GetoptError: # 格式不对 reply(bot, message, twitter.__doc__) return True days = 0 for o, a in options: if o == '-p': # 几天以前 try: days = int(a) if days < 0: raise ValueError except ValueError: reply(bot, message, twitter.__doc__) return True elif o == '-h': # 帮助 reply(bot, message, twitter.__doc__) return True tweets = Twitter.objects(Q(date__gte=datetime.now().date()+timedelta(days=-days)) & Q(date__lte=datetime.now().date()+timedelta(days=-days+1))) if tweets: reply(bot, message, '\n---------\n'.join([str(tweet) for tweet in tweets])) return True else: reply(bot, message, '安娜啥都没说...') return True
5,359,024
def checkdir(*args: List[str]) -> bool: """ Guard for checking directories Returns: bool -- True if all arguments directories """ for a in args: if a and not os.path.isdir(a): return False if a and a[0] != '/': return False return True
5,359,025
def get_zones(request): """Returns preprocessed thermal data for a given request or None.""" logging.info("received zone request:", request.building) zones, err = _get_zones(request.building) if err is not None: return None, err grpc_zones = [] for zones in zones: grpc_zones.append( building_zone_names_pb2.NamePoint(name=zones)) return building_zone_names_pb2.Reply(names=grpc_zones), None
5,359,026
def proper_classification(sp): """ Uses splat.classifyByStandard to classify spectra using spex standards """ #sp.slitpixelwidth=1 #sp.slitwidth=1 #sp.toInstrument('WFC3-G141') wsp= wisps.Spectrum(wave=sp.wave.value, flux=sp.flux.value, noise=sp.noise.value, contam= np.ones_like(sp.noise.value)) val=wisps.classify(wsp, stripunits=True) return val
5,359,027
def sum_last_4_layers(sequence_outputs: Tuple[torch.Tensor]) -> torch.Tensor: """Sums the last 4 hidden representations of a sequence output of BERT. Args: ----- sequence_output: Tuple of tensors of shape (batch, seq_length, hidden_size). For BERT base, the Tuple has length 13. Returns: -------- summed_layers: Tensor of shape (batch, seq_length, hidden_size) """ last_layers = sequence_outputs[-4:] return torch.stack(last_layers, dim=0).sum(dim=0)
5,359,028
def TotalCust(): """(read-only) Total Number of customers served from this line section.""" return lib.Lines_Get_TotalCust()
5,359,029
def extra_normalize(text_orig: str): """ This function allows a simple normalization to the original text to make possible the aligning process. The replacement_patterns were obtained during experimentation with real text it is possible to add more or to get some errors without new rules. :Note: very important, every rule in replacement_patterns do not change the length of the original text, only replace patterns with same length string. This process is different to preProcessFlow. """ replacement_patterns = [(r'[:](?=\s*?\n)','##1'), (r'\xc2|\xa0',' '), (r'(\w\s*?):(?=\s+?[A-Z]+?)|(\w\s*?):(?=\s*?"+?[A-Z]+?)','\g<1>##2'), (r'[?!]','##3'), (r'(\w+?)(\n)(?=["$%()*+&,-/;:¿¡<=>@[\\]^`{|}~\t\s]*(?=.*[A-Z0-9]))','\g<1>##4'), # any alphanumeric char # follow by \n follow by any number of point sign follow by a capital letter, replace by alphanumerig+. (r'(\w+?)(\n)(?=["$%()*+&,-/;:¿¡<=>@[\\]^`{|}~\t\s\n]*(?=[a-zA-Z0-9]))','\g<1>##5'),# any alphanumeric char # follow by \n follow by any number of point sign follow by a letter, replace by alphanumerig+. (r'[:](?=\s*?)(?=["$%()*+&,-/;:¿¡<=>@[\\]^`{|}~\t\s]*[A-Z]+?)','##6'), (r'(\w+?\s*?)\|','\g<1>##7'), (r'\n(?=\s*?[A-Z]+?)','##8'), (r'##\d','apdbx'), ] for (pattern, repl) in replacement_patterns: (text_orig, count) = re.subn(pattern, repl, text_orig) text_orig = replace_dot_sequence(text_orig) text_orig = multipart_words(text_orig) text_orig = abbreviations(text_orig) text_orig = re.sub(r'apdbx+','.', text_orig) text_orig = add_doc_ending_point(text_orig)#append . final si el último caracter no tiene punto, evita un ciclo infinito al final. return text_orig
5,359,030
def test_serial_bad_configA(): """Test if bad_configurationA causes a RuntimeError on trying to create the population.""" # Load configuration. local_dir = os.path.dirname(__file__) config_path = os.path.join(local_dir, 'bad_configurationA') config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path) try: # Create the population, which is the top-level object for a NEAT run. p = neat.Population(config) except RuntimeError: pass else: raise Exception( "Should have had a RuntimeError with bad_configurationA")
5,359,031
def _print_version(): """ Print the version of the currently running collector. Gets the collector name from inspecting `__main__`. Gets version information from environment variables set by an S2I build. """ import __main__ # name of dir above app.py exporter_name = pathlib.PurePath(__main__.__file__).parent.name repo, ref, commit = ( os.environ.get(f"OPENSHIFT_BUILD_{var.upper()}") for var in "source reference commit".split() ) if repo and ref and commit: print( f"Running {exporter_name} exporter from {repo}, ref {ref} (commit {commit})" ) else: print(f"Running {exporter_name} exporter. No version information found.")
5,359,032
def test_vbm_installed(mock_path_exists): """Test installed.""" vbm = mech.vbm.VBoxManage(executable='/bin/VBoxManage') assert vbm.installed() mock_path_exists.assert_called() assert vbm.get_executable() == '/bin/VBoxManage'
5,359,033
def list2str(lst: list) -> str: """ 将 list 内的元素转化为字符串,使得打印时能够按行输出并在前面加上序号(从1开始) e.g. In: lst = [a,b,c] str = list2str(lst) print(str) Out: 1. a 2. b 3. c """ i = 1 res_list = [] for x in lst: res_list.append(str(i)+'. '+str(x)) i += 1 res_str = '\n'.join(res_list) return res_str
5,359,034
def _prompt_save(): # pragma: no cover """Show a prompt asking the user whether he wants to save or not. Output is 'save', 'cancel', or 'close' """ b = prompt( "Do you want to save your changes before quitting?", buttons=['save', 'cancel', 'close'], title='Save') return show_box(b)
5,359,035
def plot_dataset_samples_1d( dataset, n_samples=10, title="Dataset", figsize=DFLT_FIGSIZE, ax=None, plot_config_kwargs={}, seed=123, ): """Plot `n_samples` samples of the a datset.""" np.random.seed(seed) with plot_config(plot_config_kwargs): if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) alpha = 0.5 + 1 / (n_samples ** 0.5 + 1) for i in range(n_samples): x, y = dataset[np.random.randint(len(dataset))] x = rescale_range(x, (-1, 1), dataset.min_max) ax.plot(x.numpy(), y.numpy(), alpha=alpha) ax.set_xlim(*dataset.min_max) if title is not None: ax.set_title(title, fontsize=14) return ax
5,359,036
def test_str_conversion_of_command_object(): """ String conversion shows embedded command string, class of command object and id to allow for differentiating between multiple instances of same command """ class PingCmd(Command): def __init__(self, host='localhost', connection=None): super(PingCmd, self).__init__(connection=connection) self.command_string = 'ping {}'.format(host) def data_received(self, data, recv_time): pass # not important now ping = PingCmd() assert 'PingCmd("ping localhost", id:{})'.format(instance_id(ping)) == str(ping) ping = PingCmd(host='127.0.0.1') assert 'PingCmd("ping 127.0.0.1", id:{})'.format(instance_id(ping)) == str(ping) ping.command_string = '' assert 'PingCmd("<EMPTY COMMAND STRING>", id:{})'.format(instance_id(ping)) == str(ping)
5,359,037
def list_versions(namespace, name, provider): """List version for mnodule. Args: namespace (str): namespace for the version name (str): Name of the module provider (str): Provider for the module Returns: response: JSON formatted respnse """ try: return make_response(backend.get_versions(namespace, name, provider), 200) except ModuleNotFoundException as module_not_found: return make_response(module_not_found.message, 404)
5,359,038
def script_cbor(self, script_hash: str, **kwargs): """ CBOR representation of a plutus script https://docs.blockfrost.io/#tag/Cardano-Scripts/paths/~1scripts~1{script_hash}~1cbor/get :param script_hash: Hash of the script. :type script_hash: str :param return_type: Optional. "object", "json" or "pandas". Default: "object". :type return_type: str :returns A list of ScriptCborResponse objects. :rtype [ScriptCborResponse] :raises ApiError: If API fails :raises Exception: If the API response is somehow malformed. """ return requests.get( url=f"{self.url}/scripts/{script_hash}/cbor", headers=self.default_headers )
5,359,039
def ithOfNPointsOnCircleY(i,n,r): """ return x coordinate of ith value of n points on circle of radius r points are numbered from 0 through n-1, spread counterclockwise around circle point 0 is at angle 0, as of on a unit circle, i.e. at point (0,r) """ # Hints: similar to ithOfNPointsOnCircleX, but use r sin (theta) return "stub"
5,359,040
def fieldset_experiment_results(objparent): """ :param objparent: """ objparent.id() objparent.created_at() objparent.occured_at() objparent.result() objparent.updated_at() objparent.object_class_id() fieldset_research_plan_metrics(objparent.research_plan_metric())
5,359,041
def get_tags(ec2id, ec2type, region): """ get tags return tags (json) """ mytags = [] ec2 = connect('ec2', region) if ec2type == 'volume': response = ec2.describe_volumes(VolumeIds=[ec2id]) if 'Tags' in response['Volumes'][0]: mytags = response['Volumes'][0]['Tags'] elif ec2type == 'snapshot': response = ec2.describe_snapshots(SnapshotIds=[ec2id]) if 'Tags' in response['Snapshots'][0]: mytags = response['Snapshots'][0]['Tags'] return mytags
5,359,042
def parse(data, raw=False, quiet=False): """ Main text parsing function Parameters: data: (string) text data to parse raw: (boolean) unprocessed output if True quiet: (boolean) suppress warning messages if True Returns: Dictionary. Raw or processed structured data. """ jc.utils.compatibility(__name__, info.compatible, quiet) jc.utils.input_type_check(data) raw_output = {} if jc.utils.has_data(data): for line in filter(None, data.splitlines()): linedata = line.split(':', maxsplit=1) key = linedata[0].strip().lower().replace(' ', '_').replace('.', '_') value = linedata[1].strip() raw_output[key] = value if raw: return raw_output else: return _process(raw_output)
5,359,043
def download_pretrained_model(model: str, target_path: str = None) -> str: """Downloads pretrained model to given target path, if target path is None, it will use model cache path. If model already exists in the given target path than it will do notting. Args: model (str): pretrained model name to download target_path (str, optional): target directory to download model. Defaults to None. Returns: str: file path of the model """ if target_path is None: target_path = get_model_cache_dir() registry = get_registry() assert model in registry, f"given model: {model} is not in the registry" assert os.path.exists(target_path), f"given target path: {target_path} does not exists" assert os.path.isdir(target_path), "given target path must be directory not a file" adapter = registry[model]["adapter"] file_name = registry[model]["adapter"]["kwargs"]["file_name"] model_path = os.path.join(target_path,file_name) if not os.path.isfile(model_path): # download if model not exists download_object(adapter['type'], dest_path=target_path, **adapter['kwargs']) return model_path
5,359,044
def callNasaApi(date='empty'): """calls NASA APIS Args: date (str, optional): date for nasa APOD API. Defaults to 'empty'. Returns: Dict: custom API response """ print('calling nasa APOD API...') url = nasaInfo['nasa_apod_api_uri'] if date != 'empty': params = getApodEndpointParams('True', date) else: params = getApodEndpointParams('True') response = makeApiCall(url, params, HttpMethods.get.value) return response
5,359,045
def train_reduced_model(x_values: np.ndarray, y_values: np.ndarray, n_components: int, seed: int, max_iter: int = 10000) -> sklearn.base.BaseEstimator: """ Train a reduced-quality model by putting a Gaussian random projection in front of the multinomial logistic regression stage of the pipeline. :param x_values: input embeddings for training set :param y_values: integer labels corresponding to embeddings :param n_components: Number of dimensions to reduce the embeddings to :param seed: Random seed to drive Gaussian random projection :param max_iter: Maximum number of iterations of L-BGFS to run. The default value of 10000 will achieve a tight fit but takes a while. :returns A model (Python object with a `predict()` method) fit on the input training data with the specified level of dimension reduction by random projection. """ reduce_pipeline = sklearn.pipeline.Pipeline([ ("dimred", sklearn.random_projection.GaussianRandomProjection( n_components=n_components, random_state=seed )), ("mlogreg", sklearn.linear_model.LogisticRegression( multi_class="multinomial", max_iter=max_iter )) ]) print(f"Training model with n_components={n_components} and seed={seed}.") return reduce_pipeline.fit(x_values, y_values)
5,359,046
def pv(array): """Return the PV value of the valid elements of an array. Parameters ---------- array : `numpy.ndarray` array of values Returns ------- `float` PV of the array """ non_nan = np.isfinite(array) return array[non_nan].max() - array[non_nan].min()
5,359,047
def test_main_nothing_to_do(capfd: Any) -> None: """Do nothing if nothing to do""" args = main.cli_parser([]) with patch.object(main, "cli_parser", MagicMock(return_value=args)): main.main() out, _ = capfd.readouterr() assert out == main.NOTHING_TO_DO + "\n"
5,359,048
def format_bad_frames(bad_frames): """Create an array of bad frame indices from string loaded from yml file.""" if bad_frames == "": bads = [] else: try: bads = [x.split("-") for x in bad_frames.split(",")] bads = [[int(x) for x in y] for y in bads] bads = np.concatenate( [ np.array(x) if len(x) == 1 else np.arange(x[0], x[1] + 1) for x in bads ] ) except: bads = [] bads = list(bads) bads = [x.item() for x in bads] return bads
5,359,049
def test_multivoxel(): """Test fitting with multivoxel data. We generate a multivoxel signal to test the fitting for multivoxel data. This is to ensure that the fitting routine takes care of signals packed as 1D, 2D or 3D arrays. """ ivim_fit_multi = ivim_model.fit(data_multi) est_signal = ivim_fit_multi.predict(gtab, S0=1.) assert_array_equal(est_signal.shape, data_multi.shape) assert_array_almost_equal(ivim_fit_multi.model_params, ivim_params) assert_array_almost_equal(est_signal, data_multi)
5,359,050