content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def spg_line_search_step_length(current_step_length, delta, f_old, f_new, sigma_one=0.1, sigma_two=0.9): """Return next step length for line search.""" step_length_tmp = (-0.5 * current_step_length ** 2 * delta / (f_new - f_old - current_step_length * delta)) next_step_length = 0 if sigma_one <= step_length_tmp <= sigma_two * current_step_length: next_step_length = step_length_tmp else: next_step_length = 0.5 * current_step_length return next_step_length
844cccdfe1ec3f9c2c287384284ceb2ac3530e8e
709,570
def samps2ms(samples: float, sr: int) -> float: """samples to milliseconds given a sampling rate""" return (samples / sr) * 1000.0
49e07ee02984bf0e9a0a54715ef6b6e5a3c87798
709,572
import os def get_folder_name(path, prefix=''): """ Look at the current path and change the name of the experiment if it is repeated Args: path (string): folder path prefix (string): prefix to add Returns: string: unique path to save the experiment """ if prefix == '': prefix = path.split('/')[-1] path = '/'.join(path.split('/')[:-1]) folders = [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))] if prefix not in folders: path = os.path.join(path, prefix) elif not os.path.isdir(os.path.join(path, '{}_0'.format(prefix))): path = os.path.join(path, '{}_0'.format(prefix)) else: n = sorted([int(f.split('_')[-1]) for f in folders if '_' in f[-2:]])[-1] path = os.path.join(path, '{}_{}'.format(prefix, n+1)) return path
126648fbe460581272eedafc8599a3af1ded07e4
709,573
import argparse def get_args(): """! Command line parser for Utterance level classification Leave one speaker out schema pipeline -- Find Best Models""" parser = argparse.ArgumentParser( description='Utterance level classification Leave one ' 'speaker out schema pipeline -- Find Best Models' ) parser.add_argument('-i', '--input_features_paths', nargs='+', help='File paths of the features you want to ' 'concatenate and the classify') args = parser.parse_args() return args
4a349da4fe3b570dae359937ed80494075bf26ea
709,574
def esOperador(o): """"retorna true si 'o' es un operador""" return o == "+" or o == "-" or o == "/" or o == "*"
7e1088b641dee7cad2594159c4a34cf979362458
709,575
def valid_identity(identity): """Determines whether or not the provided identity is a valid value.""" valid = (identity == "homer") or (identity == "sherlock") return valid
9865d19802b596d1d5fdce6ff8d236678da29ee6
709,576
import argparse def parse_arguments(): """parse_arguments""" parser = argparse.ArgumentParser(description="MindSpore Tensorflow weight transfer") parser.add_argument("--pretrained", default=None, type=str) parser.add_argument("--name", default="imagenet22k", choices=["imagenet22k",]) args = parser.parse_args() return args
08b57fffe7f95a96f19e35839ca137f9382573ba
709,577
import argparse def build_arg_parser(): """ Builds an argparse object to handle command-line arguments passed in. """ parser = argparse.ArgumentParser(description="Loads an ontology file in " + "OBO file format into a Neo4j graph database.") parser.add_argument('-i', '--input_obo_file', required=True, help="The input OBO file") parser.add_argument('-s', '--neo4j_server_address', required=True, help="The address to the Neo4j server. Must include port number") parser.add_argument('-t', '--typedefs', default="is_a", help="Typedefs that" + "that are present in this ontology. These will be used to define " + "the types of relationships supported in the input ontology") parser.add_argument('-r', '--root_node', required=True, action="append", default=[], help='DOID\'s for any root nodes in this ontology') args = parser.parse_args() return args
8b9da4a060138b5487f346452c9e3ca85e55b801
709,580
def get_username() -> str: """ Prompts the user to enter a username and then returns it :return: The username entered by the user """ while True: print("Please enter your username (without spaces)") username = input().strip() if ' ' not in username: return username
1a18a229908b86c32a0822c068b5b9081cc9fdc3
709,581
def get_pip_package_name(provider_package_id: str) -> str: """ Returns PIP package name for the package id. :param provider_package_id: id of the package :return: the name of pip package """ return "apache-airflow-providers-" + provider_package_id.replace(".", "-")
e7aafbdfb0e296e60fedfcf7e4970d750e4f3ffa
709,582
def first(iterable, default=None): """ Returns the first item or a default value >>> first(x for x in [1, 2, 3] if x % 2 == 0) 2 >>> first((x for x in [1, 2, 3] if x > 42), -1) -1 """ return next(iter(iterable), default)
6907e63934967c332eea9cedb5e0ee767a88fe8f
709,583
import random def random_small_number(): """ 随机生成一个小数 :return: 返回小数 """ return random.random()
45143c2c78dc72e21cbbe0a9c10babd00100be77
709,585
def float_to_16(value): """ convert float value into fixed exponent (8) number returns 16 bit integer, as value * 256 """ value = int(round(value*0x100,0)) return value & 0xffff
0a587e4505c9c19b0cbdd2f94c8a964f2a5a3ccd
709,587
import os def get_parent_dir(): """Returns the root directory of the project.""" return os.path.abspath(os.path.join(os.getcwd(), os.pardir))
f6c0a43cf2a38f507736f09429b7ca7012739559
709,588
def context(): """Return an instance of the JIRA tool context.""" return dict()
e24e859add22eef279b650f28dce4f6732c346b8
709,589
def f(OPL,R): """ Restoration function calculated from optical path length (OPL) and from rational function parameter (R). The rational is multiplied along all optical path. """ x = 1 for ii in range(len(OPL)): x = x * (OPL[ii] + R[ii][2]) / (R[ii][0] * OPL[ii] + R[ii][1]) return x
5b64b232646768d2068b114d112a8da749c84706
709,590
def _str_conv(number, rounded=False): """ Convenience tool to convert a number, either float or int into a string. If the int or float is None, returns empty string. >>> print(_str_conv(12.3)) 12.3 >>> print(_str_conv(12.34546, rounded=1)) 12.3 >>> print(_str_conv(None)) <BLANKLINE> >>> print(_str_conv(1123040)) 11.2e5 """ if not number: return str(' ') if not rounded and isinstance(number, (float, int)): if number < 100000: string = str(number) else: exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1 divisor = 10 ** exponant string = '{0:.1f}'.format(number / divisor) + 'e' + str(exponant) elif rounded == 2 and isinstance(number, (float, int)): if number < 100000: string = '{0:.2f}'.format(number) else: exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1 divisor = 10 ** exponant string = '{0:.2f}'.format(number / divisor) + 'e' + str(exponant) elif rounded == 1 and isinstance(number, (float, int)): if number < 100000: string = '{0:.1f}'.format(number) else: exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1 divisor = 10 ** exponant string = '{0:.1f}'.format(number / divisor) + 'e' + str(exponant) else: return str(number) return string
d352e8f0956b821a25513bf4a4eecfae5a6a7dcd
709,591
def label_smoothed_nll_loss(lprobs, target, epsilon: float = 1e-8, ignore_index=None): """Adapted from fairseq Parameters ---------- lprobs Log probabilities of amino acids per position target Target amino acids encoded as integer indices epsilon Smoothing factor between 0 and 1, by default 1e-8 ignore_index, optional Amino acid (encoded as integer) to ignore, by default None Returns ------- Negative log-likelihood loss """ nll_loss = -lprobs.gather(dim=-1, index=target) smooth_loss = -lprobs.sum(dim=-1, keepdim=True) if ignore_index is not None: pad_mask = target.eq(ignore_index) nll_loss.masked_fill_(pad_mask, 0.0) smooth_loss.masked_fill_(pad_mask, 0.0) else: nll_loss = nll_loss.squeeze(-1) smooth_loss = smooth_loss.squeeze(-1) nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / lprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss
eb09b7dd5c800b01b723f33cd0f7a84ae93b3489
709,592
import re def parse_date(regexen, date_str): """ Parse a messy string into a granular date `regexen` is of the form [ (regex, (granularity, groups -> datetime)) ] """ if date_str: for reg, (gran, dater) in regexen: m = re.match(reg, date_str) if m: try: return gran, dater(m.groups()) except ValueError: return 0, None return 0, None
a141cad6762556115699ca0327b801537bab1c7e
709,593
import requests def is_referenced(url, id, catalog_info): """Given the url of a resource from the catalog, this function returns True if the resource is referenced by data.gouv.fr False otherwise :param :url: url of a resource in the catalog :type :url: string""" dgf_page = catalog_info['url_dgf'] headers = requests.head(url).headers downloadable = 'attachment' in headers.get('Content-Disposition', '') if not downloadable: raise Exception(f'This id is associated to a dataset not referenced by data.gouv.fr. \n ' f'Please download the dataset from here: {dgf_page}\n' f'Then manually upload it in the corresponding folder and name it: {id}.csv') return downloadable
15cfa64979f2765d29d7c4bb60a7a017feb27d43
709,594
from typing import Dict def get_entity_contents(entity: Dict) -> Dict: """ :param entity: Entity is a dictionary :return: A dict representation of the contents of entity """ return { 'ID': entity.get('id'), 'Name': entity.get('name'), 'EmailAddress': entity.get('email_address'), 'Organization': entity.get('organization'), 'Tags': entity.get('labels'), 'StrictNameMatching': entity.get('strict_name_matching'), 'PolicyID': entity.get('policy_id'), 'Profile': entity.get('profile'), 'EntityGroupID': entity.get('entity_group', {}).get('id') if entity.get('entity_group') else None, 'EntityGroupName': entity.get('entity_group', {}).get('name') if entity.get('entity_group') else None, 'TypeID': entity.get('type', {}).get('id') if entity.get('type') else None, 'TypeName': entity.get('type', {}).get('name') if entity.get('type') else None }
3c9e133bf80bc4d59c6f663503b5083401acc4e0
709,595
def t68tot90(t68): """Convert from IPTS-68 to ITS-90 temperature scales, as specified in the CF Standard Name information for sea_water_temperature http://cfconventions.org/Data/cf-standard-names/27/build/cf-standard-name-table.html temperatures are in degrees C""" t90 = 0.99976 * t68 return t90
87ff55a196f01b8f1afd78381e7d012eafa079fa
709,596
def is_insertion(ref, alt): """Is alt an insertion w.r.t. ref? Args: ref: A string of the reference allele. alt: A string of the alternative allele. Returns: True if alt is an insertion w.r.t. ref. """ return len(ref) < len(alt)
17d7d6b8dfdf387e6dd491a6f782e8c9bde22aff
709,597
import os def get_testfile_paths(): """ return the necessary paths for the testfile tests Returns ------- str absolute file path to the test file str absolute folder path to the expected output folder """ testfile = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', '0009_20170523_181119_FA2806.all') expected_output = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', 'converted') return testfile, expected_output
f1cb8d29c70c686fbca43175637f44b7c5342180
709,598
def singleton(cls): """Decorator that provides singleton functionality. >>> @singleton ... class Foo(object): ... pass ... >>> a = Foo() >>> b = Foo() >>> a is b True """ _inst = [None] def decorated(*args, **kwargs): if _inst[0] is None: _inst[0] = cls(*args, **kwargs) return _inst[0] return decorated
4ae64aeaaba1b838232e4d7700d692dcc109be6d
709,599
def get_choice(options): """Devuelve como entero la opcion seleccionada para el input con mensaje message""" print(options) try: return int(input("Por favor, escoja una opción: ")) except ValueError: return 0
32e95e0113650d0b94449e5e31e7d8156ae85981
709,600
def _listminus(list1, list2): """ """ return [a for a in list1 if a not in list2]
3f05d8bfd4169d92bb51c4617536b54779b387c9
709,601
def get_station_freqs(df, method='median'): """ apply to df after applying group_by_days and group_by_station """ #df['DATE'] = df.index.get_level_values('DATE') df['DAY'] = [d.dayofweek for d in df.index.get_level_values('DATE')] df['DAYNAME'] = [d.day_name() for d in df.index.get_level_values('DATE')] return df.groupby(['STATION', 'DAY','DAYNAME']).agg({'INS':method, 'OUTS':method})
aebc1a2486c48ff2d829fc70f1f2c4b38bd3017b
709,603
import numpy def quantile(data, num_breaks): """ Calculate quantile breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform. """ def scipy_mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()): """ function copied from scipy 0.13.3::scipy.stats.mstats.mquantiles """ def _quantiles1D(data,m,p): x = numpy.sort(data.compressed()) n = len(x) if n == 0: return numpy.ma.array(numpy.empty(len(p), dtype=float), mask=True) elif n == 1: return numpy.ma.array(numpy.resize(x, p.shape), mask=numpy.ma.nomask) aleph = (n*p + m) k = numpy.floor(aleph.clip(1, n-1)).astype(int) gamma = (aleph-k).clip(0,1) return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] # Initialization & checks --------- data = numpy.ma.array(a, copy=False) if data.ndim > 2: raise TypeError("Array should be 2D at most !") # if limit: condition = (limit[0] < data) & (data < limit[1]) data[~condition.filled(True)] = numpy.ma.masked # p = numpy.array(prob, copy=False, ndmin=1) m = alphap + p*(1.-alphap-betap) # Computes quantiles along axis (or globally) if (axis is None): return _quantiles1D(data, m, p) return numpy.ma.apply_along_axis(_quantiles1D, axis, data, m, p) return scipy_mquantiles(data, numpy.linspace(1.0 / num_breaks, 1, num_breaks))
24486e39fcefb9e6cf969067836d1793b9f4a7c8
709,604
def get_valid_fields(val: int, cs: dict) -> set: """ A value is valid if there's at least one field's interval which contains it. """ return { field for field, intervals in cs.items() if any(map(lambda i: i[0] <= val <= i[1], intervals)) }
3016e78637374eadf7d0e2029d060538fea86377
709,605
def get_successors(graph): """Returns a dict of all successors of each node.""" d = {} for e in graph.get_edge_list(): src = e.get_source() dst = e.get_destination() if src in d.keys(): d[src].add(dst) else: d[src] = set([dst]) return d
1ec7b0ab8772dc738758bb14fe4abd5dd4b9074e
709,606
def route53_scan(assets, record_value, record): """ Scan Route53 """ for i, asset in enumerate(assets): asset_type = asset.get_type() if asset_type == 'EC2' and record_value in (asset.public_ip, asset.private_ip): assets[i].dns_record = record['Name'].replace('\\052', '*') elif asset_type == 'ELBV2' and record_value == f'{asset.name}.': assets[i].dns_record = record['Name'].replace('\\052', '*') return assets
eccbb2d716ef7b5dd713e7fbbd210c246c97347d
709,607
def to_int(text): """Text to integer.""" try: return int(text) except ValueError: return ''
d870ee05c3117111adcf85c91038b19beaf9585b
709,608
def flooding(loss, b): """flooding loss """ return (loss - b).abs() + b
c34eedf0421b60e27bd813381ff7dfe96a3912eb
709,609
def nir_mean(msarr,nir_band=7): """ Calculate the mean of the (unmasked) values of the NIR (near infrared) band of an image array. The default `nir_band` value of 7 selects the NIR2 band in WorldView-2 imagery. If you're working with a different type of imagery, you will need figure out the appropriate value to use instead. Parameters ---------- msarr : numpy array (RxCxBands shape) The multispectral image array. See `OpticalRS.RasterDS` for more info. nir_band : int (Default value = 7) The default `nir_band` value of 7 selects the NIR2 band in WorldView-2 imagery. If you're working with a different type of imagery, you will need figure out the appropriate value to use instead. This is a zero indexed number (the first band is 0, not 1). Returns ------- float The mean radiance in the NIR band. """ return msarr[...,nir_band].mean()
7ba6ea8b7d51b8942a0597f2f89a05ecbee9f46e
709,610
def sizeFromString(sizeStr, relativeSize): """ Converts from a size string to a float size. sizeStr: The string representation of the size. relativeSize: The size to use in case of percentages. """ if not sizeStr: raise Exception("Size not specified") dpi = 96.0 cm = 2.54 if len(sizeStr) > 2 and sizeStr[-2:] == 'cm': return float(sizeStr[:-2])*dpi/cm elif len(sizeStr) > 2 and sizeStr[-2:] == 'mm': return float(sizeStr[:-2])*dpi/(cm*10.0) elif len(sizeStr) > 1 and sizeStr[-1:] == 'Q': return float(sizeStr[:-1])*dpi/(cm*40.0) elif len(sizeStr) > 2 and sizeStr[-2:] == 'in': return float(sizeStr[:-2])*dpi elif len(sizeStr) > 2 and sizeStr[-2:] == 'pc': return float(sizeStr[:-2])*dpi/6.0 elif len(sizeStr) > 2 and sizeStr[-2:] == 'pt': return float(sizeStr[:-2])*dpi/72.0 elif len(sizeStr) > 2 and sizeStr[-2:] == 'em': return float(sizeStr[:-2])*16.0 elif len(sizeStr) > 2 and sizeStr[-2:] == 'px': return float(sizeStr[:-2]) elif len(sizeStr) > 1 and sizeStr[-1:] == '%': return float(sizeStr[:-1])/100.0*relativeSize return float(sizeStr)
5f53d7d1ea86d4c54beb3aaebca228f7706e5a9b
709,611
import re def verify_time_format(time_str): """ This method is to verify time str format, which is in the format of 'hour:minute', both can be either one or two characters. Hour must be greater or equal 0 and smaller than 24, minute must be greater or equal 0 and smaller than 60 :param time_str: time str :return: """ if not isinstance(time_str, str): return False time_format = r'^(\d{1,2}):(\d{1,2})$' matched = re.match(time_format, time_str) if matched: if 0 <= int(matched.group(1)) < 24 and 0 <= int(matched.group(2)) < 60: return True else: print('Hour should be within [0, 24); Minute should be within [0, 60)') return False else: return False
fee469248d4d1d792c1ed858cf9043e5695c9f5d
709,612
def has_space_element(source): """ 判断对象中的元素,如果存在 None 或空字符串,则返回 True, 否则返回 False, 支持字典、列表和元组 :param: * source: (list, set, dict) 需要检查的对象 :return: * result: (bool) 存在 None 或空字符串或空格字符串返回 True, 否则返回 False 举例如下:: print('--- has_space_element demo---') print(has_space_element([1, 2, 'test_str'])) print(has_space_element([0, 2])) print(has_space_element([1, 2, None])) print(has_space_element((1, [1, 2], 3, ''))) print(has_space_element({'a': 1, 'b': 0})) print(has_space_element({'a': 1, 'b': []})) print('---') 执行结果:: --- has_space_element demo--- False False True True False True --- """ if isinstance(source, dict): check_list = list(source.values()) elif isinstance(source, list) or isinstance(source, tuple): check_list = list(source) else: raise TypeError('source except list, tuple or dict, but got {}'.format(type(source))) for i in check_list: if i is 0: continue if not (i and str(i).strip()): return True return False
ab8a968fb807654af73d9017145c0af2259ae41e
709,613
def return_latest_psm_is(df, id_col, file_col, instr_col, psm_col): """ Extracts info on PSM number, search ID and Instrument from the last row in DB """ last_row = df.iloc[-1] search_id = last_row[id_col] instr = last_row[instr_col] psm = last_row[psm_col] psm_string = str(psm) + ' PSMs in file ' + str(last_row[file_col]) print('String to put on the graph', psm_string) return (search_id, instr, psm, psm_string)
73c5acc945b9a6ef40aa1ce102351152b948a4b6
709,614
def add_parser_arguments_misc(parser): """ Adds the options that the command line parser will search for, some miscellaneous parameters, like use of gpu, timing, etc. :param parser: the argument parser :return: the same parser, but with the added options. """ parser.add_argument('--use_gpu', action='store_true', help='use GPU (CUDA). For loading data on Windows OS, if you get an Access Denied or Operation ' 'Not Supported for cuda, you must set --loader_num_workers to 0 ' '(you can\'t share CUDA tensors among Windows processes).') parser.add_argument('--gpu_num', default="0", type=str) parser.add_argument('--map_gpu_beginning', action='store_true', help='Will map all tensors (including FULL dataset) to GPU at the start of the instance, if ' '--use_gpu flag is supplied and CUDA is available. This option is NOT recommended if you ' 'have low GPU memory or if you dataset is very large, since you may quickly run out of ' 'memory.') parser.add_argument('--timing', action='store_true', help='if specified, will display times for several parts of training') parser.add_argument('--load_args_from_json', type=str, default=None, help='Path to json file containing args to pass. Should be an object containing the keys of ' 'the attributes you want to change (keys that you don\'t supply will be left unchanged) ' 'and their values according to their type (int, str, bool, list, etc.)') return parser
706ec64dfd6393fd1bd4741568e5e1af1d22a4d0
709,615
import os import json def get_jobs(job_filename): """Reads jobs from a known job file location """ jobs = list() if job_filename and os.path.isfile(job_filename): with open(job_filename, 'r') as input_fd: data = input_fd.read() job_dict = json.loads(data) del data for job in job_dict['jobs']: jobs.append(job) os.unlink(job_filename) return jobs
eaa091131a026c8a4c5f4e788406e185e1bbffde
709,617
from typing import List from typing import Set def grouping_is_valid( proposed_grouping: List[Set[str]], past_groups: List[Set[str]], max_intersection_size: int, ) -> bool: """Returns true if no group in the proposed grouping intersects with any past group with intersection size strictly greater than `max_intersection_size`. """ for group in proposed_grouping: for past_group in past_groups: if len(group & past_group) > max_intersection_size: return False return True
caeb7568a2e8fddea9058ccc512dc9c06070ece9
709,618
def next_wire_in_dimension(wire1, tile1, wire2, tile2, tiles, x_wires, y_wires, wire_map, wires_in_node): """ next_wire_in_dimension returns true if tile1 and tile2 are in the same row and column, and must be adjcent. """ tile1_info = tiles[tile1] tile2_info = tiles[tile2] tile1_x = tile1_info['grid_x'] tile2_x = tile2_info['grid_x'] tile1_y = tile1_info['grid_y'] tile2_y = tile2_info['grid_y'] # All wires are in the same row or column or if the each wire lies in its own # row or column. if len(y_wires) == 1 or len(x_wires) == len(wires_in_node) or abs( tile1_y - tile2_y) == 0: ordered_wires = sorted(x_wires.keys()) idx1 = ordered_wires.index(tile1_x) idx2 = ordered_wires.index(tile2_x) if len(x_wires[tile1_x]) == 1 and len(x_wires[tile2_x]) == 1: return abs(idx1 - idx2) == 1 if len(x_wires) == 1 or len(y_wires) == len(wires_in_node) or abs( tile1_x - tile2_x) == 0: ordered_wires = sorted(y_wires.keys()) idx1 = ordered_wires.index(tile1_y) idx2 = ordered_wires.index(tile2_y) if len(y_wires[tile1_y]) == 1 and len(y_wires[tile2_y]) == 1: return abs(idx1 - idx2) == 1 return None
2c2b6a2cb4d117f2435568437d38f05311b7dd13
709,619
def sampleM(a0, bk, njk, m_cap=20): """produces sample from distribution over M using normalized log probabilities parameterizing a categorical dist.""" raise DeprecationWarning() wts = np.empty((m_cap,)) sum = 0 for m in range(m_cap): wts[m] = gammaln(a0*bk) - gammaln(a0*bk+njk) + log(stirling.get(njk, m)+1e-9) + m*(a0+bk) sum += wts[-1] wts = np.array(wts) / sum print(wts, np.sum(wts)) return rand.multinomial(1, wts)
76cc9e0bd6a0594bd8b6350053957073ccf9caf9
709,620
def or_default(none_or_value, default): """ inputs: none_or_value: variable to test default: value to return if none_or_value is None """ return none_or_value if none_or_value is not None else default
43200fe3bd1308eed87de0ad905873fd3c629067
709,621
import re def keyclean(key): """ Default way to clean table headers so they make good dictionary keys. """ clean = re.sub(r'\s+', '_', key.strip()) clean = re.sub(r'[^\w]', '', clean) return clean
0f28f0e92e2817a98a31396949690a46e7538ace
709,622
import collections def get_rfactors_for_each(lpin): """ R-FACTORS FOR INTENSITIES OF DATA SET /isilon/users/target/target/Iwata/_proc_ox2r/150415-hirata/1010/06/DS/multi011_1-5/XDS_ASCII_fullres.HKL RESOLUTION R-FACTOR R-FACTOR COMPARED LIMIT observed expected 5.84 60.4% 50.1% 174 4.13 58.1% 51.5% 310 3.38 60.0% 54.6% 410 2.92 90.3% 76.1% 483 2.62 130.4% 100.3% 523 2.39 241.1% 180.5% 612 2.21 353.9% 277.9% 634 2.07 541.1% 444.0% 673 1.95 -99.9% -99.9% 535 total 84.5% 71.2% 4354 """ read_flag = False filename = None ret = collections.OrderedDict() # {filename: list of [dmin, Robs, Rexpt, Compared]} for l in open(lpin): if "R-FACTORS FOR INTENSITIES OF DATA SET" in l: filename = l.strip().split()[-1] elif "LIMIT observed expected" in l: read_flag = True elif read_flag: sp = l.strip().replace("%","").split() if len(sp) == 4: dmin, robs, rexp, compared = sp if dmin != "total": dmin = float(dmin) else: dmin, read_flag = None, False robs, rexp = map(float, (robs, rexp)) compared = int(compared) ret.setdefault(filename, []).append([dmin, robs, rexp, compared]) return ret
937ad8e2cf01fa6ab92838d235a385f9bbfb1b63
709,623
def value_left(self, right): """ Returns the value of the right type instance to use in an operator method, namely when the method's instance is on the left side of the expression. """ return right.value if isinstance(right, self.__class__) else right
f28c2f0548d3e004e3dd37601dda6c1ea5ab36f6
709,624
def get_mid_surface(in_surfaces): """get_mid_surface gives the mid surface when dealing with the 7 different surfaces Args: (list of strings) in_surfaces : List of path to the 7 different surfaces generated by mris_expand Returns: (string) Path to the mid surface """ return in_surfaces[3]
718ab8fa7a3b716241ae05a4e507f40ab6cb0efd
709,625
def parse_type(msg_type): """ Parse ROS message field type :param msg_type: ROS field type, ``str`` :returns: base_type, is_array, array_length, ``(str, bool, int)`` :raises: :exc:`ValueError` If *msg_type* cannot be parsed """ if not msg_type: raise ValueError("Invalid empty type") if '[' in msg_type: var_length = msg_type.endswith('[]') splits = msg_type.split('[') if len(splits) > 2: raise ValueError("Currently only support 1-dimensional array types: %s"%msg_type) if var_length: return msg_type[:-2], True, None else: try: length = int(splits[1][:-1]) return splits[0], True, length except ValueError: raise ValueError("Invalid array dimension: [%s]"%splits[1][:-1]) else: return msg_type, False, None
1dfe4f3abb7b69bed17b60ee2666279081666dc6
709,626
def echo(text): """Return echo function.""" return text
c128bc86bc63006a1ac5b209c10b21f787b7100a
709,627
def zernike_name(index, framework='Noll'): """ Get the name of the Zernike with input index in input framework (Noll or WSS). :param index: int, Zernike index :param framework: str, 'Noll' or 'WSS' for Zernike ordering framework :return zern_name: str, name of the Zernike in the chosen framework """ noll_names = {1: 'piston', 2: 'tip', 3: 'tilt', 4: 'defocus', 5: 'astig45', 6: 'astig0', 7: 'ycoma', 8: 'xcoma', 9: 'ytrefoil', 10: 'xtrefoil', 11: 'spherical'} wss_names = {1: 'piston', 2: 'tip', 3: 'tilt', 5: 'defocus', 4: 'astig45', 6: 'astig0', 8: 'ycoma', 7: 'xcoma', 10: 'ytrefoil', 11: 'xtrefoil', 9: 'spherical'} if framework == 'Noll': zern_name = noll_names[index] elif framework == 'WSS': zern_name = wss_names[index] else: raise ValueError('No known Zernike convention passed.') return zern_name
33e73739c11bc2340a47162e161ba7d87e26d279
709,628
def xml_string(line, tag, namespace, default=None): """ Get string value from etree element """ try: val = (line.find(namespace + tag).text) except: val = default return val
77745d463cf6604ed787e220fdabf6ff998f770e
709,629
def probabilities (X) -> dict: """ This function maps the set of outcomes found in the sequence of events, 'X', to their respective probabilty of occuring in 'X'. The return value is a python dictionary where the keys are the set of outcomes and the values are their associated probabilities.""" # The set of outcomes, denoted as 'C', and the total events, denoted as 'T'. C, T = set(X), len(X) return {c: X.count(c) / T for c in C}
c908a1186feea270be71bb1f03485c901bc82733
709,630
def select_daily(ds, day_init=15, day_end=21): """ Select lead time days. Args: ds: xarray dataset. day_init (int): first lead day selection. Defaults to 15. day_end (int): last lead day selection. Defaults to 21. Returns: xarray dataset subset based on time selection. ::Lead time indices for reference:: Week 1: 1, 2, 3, 4, 5, 6, 7 Week 2: 8, 9, 10, 11, 12, 13, 14 Week 3: 15, 16, 17, 18, 19, 20, 21 Week 4: 22, 23, 24, 25, 26, 27, 28 Week 5: 29, 30, 31, 32, 33, 34, 35 Week 6: 36, 37, 38, 39, 40, 41, 42 """ return ds.isel(lead=slice(day_init, day_end + 1))
9948ecba5acc3c1ca2fe28526585d0bfa81fb862
709,631
def bulk_lookup(license_dict, pkg_list): """Lookup package licenses""" pkg_licenses = {} for pkg in pkg_list: # Failsafe in case the bom file contains incorrect entries if not pkg.get("name") or not pkg.get("version"): continue pkg_key = pkg["name"] + "@" + pkg["version"] if pkg.get("vendor"): pkg_key = pkg.get("vendor") + ":" + pkg["name"] + "@" + pkg["version"] for lic in pkg.get("licenses"): if lic == "X11": lic = "MIT" elif "MIT" in lic: lic = "MIT" curr_list = pkg_licenses.get(pkg_key, []) match_lic = license_dict.get(lic) if match_lic: curr_list.append(match_lic) pkg_licenses[pkg_key] = curr_list return pkg_licenses
aa06b02fdfaa079dbfc4e1210ccccc995393dc52
709,632
def pack_bits(bools): """Pack sequence of bools into bits""" if len(bools) % 8 != 0: raise ValueError("list length must be multiple of 8") bytes_ = [] b = 0 for j, v in enumerate(reversed(bools)): b <<= 1 b |= v if j % 8 == 7: bytes_.append(b) b = 0 return bytes_
fadfb5e6abdb80691473262fac57f22384827c50
709,633
import json def LoadJSON(json_string): """Loads json object from string, or None. Args: json_string: A string to get object from. Returns: JSON object if the string represents a JSON object, None otherwise. """ try: data = json.loads(json_string) except ValueError: data = None return data
598c9b4d5e358a7a4672b25541c9db7743fcd587
709,634
def map_aemo_facility_status(facility_status: str) -> str: """ Maps an AEMO facility status to an Opennem facility status """ unit_status = facility_status.lower().strip() if unit_status.startswith("in service"): return "operating" if unit_status.startswith("in commissioning"): return "commissioning" if unit_status.startswith("committed"): return "committed" if unit_status.startswith("maturing"): return "maturing" if unit_status.startswith("emerging"): return "emerging" raise Exception( "Could not find AEMO status for facility status: {}".format( unit_status ) )
43e1d5e5ea984d36260604cf25f4c7b90d5e56f1
709,635
def demand_monthly_ba(tfr_dfs): """A stub transform function.""" return tfr_dfs
74bbb3d732b64a30f0529f76deedd646cc7d4171
709,636
def _SortableApprovalStatusValues(art, fd_list): """Return a list of approval statuses relevant to one UI table column.""" sortable_value_list = [] for fd in fd_list: for av in art.approval_values: if av.approval_id == fd.field_id: # Order approval statuses by life cycle. # NOT_SET == 8 but should be before all other statuses. sortable_value_list.append( 0 if av.status.number == 8 else av.status.number) return sortable_value_list
15ce3c6191495957674ab38c2f990d34f10ecdf6
709,637
def load_config_file(config_file): """ Loads the given file into a list of lines :param config_file: file name of the config file :type config_file: str :return: config file as a list (one item per line) as returned by open().readlines() """ with open(config_file, 'r') as f: config_document = f.readlines() return config_document
6a6e0199566e9ea27db309b2164f323cd5f57fdc
709,638
import re import requests def get(url: str) -> dict: """ author、audioName、audios """ data = {} headers = { "Accept": "application/json, text/plain, */*", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Connection": "keep-alive", "Host": "www.kuwo.cn", "Referer": "http://www.kuwo.cn/", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36", } song_info_url_format = "http://m.kuwo.cn/newh5/singles/songinfoandlrc?musicId={id}" mp3_url_format = "http://www.kuwo.cn/url?format=mp3&rid={id}&response=url&type=convert_url3&br={quality}&from=web" # http://www.kuwo.cn/play_detail/********* id = re.findall(r"/(\d{1,})", url) if id: id = id[0] else: data["msg"] = "不支持输入的链接形式" return data session = requests.session() # 得到最高品质以及歌曲信息 with session.get(song_info_url_format.format(id=id), headers=headers, timeout=10) as rep: if rep.status_code == 200 and rep.json().get("status") == 200: best_quality = rep.json().get("data").get( "songinfo").get("coopFormats")[0] author = rep.json().get("data").get("songinfo").get("artist") song_name = rep.json().get("data").get("songinfo").get("songName") pic = rep.json().get("data").get("songinfo").get("pic") data["author"] = author data["audioName"] = song_name data["imgs"] = [pic] else: data["msg"] = "获取失败" return data if not best_quality: best_quality = "128kmp3" # 得到歌曲链接 with session.get(mp3_url_format.format(id=id, quality=best_quality), headers=headers, timeout=10) as rep: if rep.status_code == 200 and rep.json().get("code") == 200: play_url = rep.json().get("url") data["audios"] = [play_url] else: data["msg"] = "获取音频链接失败" return data
5dd97f4974b1fdc0a89ad36bbb14ad5c26e1582d
709,639
from pathlib import Path def check_overwrite(path: str, overwrite: bool = False) -> str: """ Check if a path exists, if so raising a RuntimeError if overwriting is disabled. :param path: Path :param overwrite: Whether to overwrite :return: Path """ if Path(path).is_file() and not overwrite: raise RuntimeError( f"Requested existing {path!r} as output, but overwriting is disabled." ) return path
961affdcc87b055cdd5acb9a28547ef87ae426b9
709,640
import struct def bytes_to_text(input): """Converts given bytes (latin-1 char + padding)*length to text""" content = struct.unpack((int(len(input)/2))*"sx", input) return "".join([x.decode("latin-1") for x in content]).rstrip("\x00")
f058847886fc3a488c54b8e01c3d7506f6d76510
709,641
def update_game(game_obj, size, center1, center2): """ Update game state """ new_game_obj = game_obj.copy() if center1 is not None: new_game_obj['rudder1_pos'] = center1 if center2 is not None: new_game_obj['rudder2_pos'] = center2 # Check if hitting corner init_vel = new_game_obj['velocity'] if new_game_obj['pos'][1] >= 480-15 or new_game_obj['pos'][1] <= 15: new_game_obj['velocity'] = (init_vel[0], -1*init_vel[1]) if new_game_obj['pos'][0] >= 640-15: new_game_obj['pos'] = (size[1]/2, size[0]/2) new_game_obj['velocity'] = (-1.05*abs(new_game_obj['velocity'][0]), 1.05*abs(new_game_obj['velocity'][1])) new_game_obj['score1'] += 1 elif new_game_obj['pos'][0] <= 15: new_game_obj['pos'] = (size[1]/2, size[0]/2) new_game_obj['score2'] += 1 new_game_obj['velocity'] = (1.05*abs(new_game_obj['velocity'][0]), -1.05*abs(new_game_obj['velocity'][1])) elif 0 <= new_game_obj['pos'][0]-new_game_obj['rudder1_pos'][0] <= 17 and new_game_obj['rudder1_pos'][1]-(50+15) < new_game_obj['pos'][1] < new_game_obj['rudder1_pos'][1] + 50+15: new_game_obj['velocity'] = (-1*init_vel[0], init_vel[1]) elif 0 <= new_game_obj['rudder2_pos'][0] - new_game_obj['pos'][0] <= 17 and new_game_obj['rudder2_pos'][1]-(50+15) < new_game_obj['pos'][1] < new_game_obj['rudder2_pos'][1]+(50+15): init_vel = new_game_obj['velocity'] new_game_obj['velocity'] = (-1*init_vel[0], init_vel[1]) new_game_obj['pos'] = (new_game_obj['pos'][0] + new_game_obj['velocity'] [0], new_game_obj['pos'][1] + new_game_obj['velocity'][1]) # print(new_game_obj) return new_game_obj
33646593e6743d11174f72be6f4b825633fe8782
709,642
def mulaw_to_value(mudata): """Convert a mu-law encoded value to linear.""" position = ((mudata & 0xF0) >> 4) + 5 return ((1 << position) | ((mudata & 0xF) << (position - 4)) | (1 << (position - 5))) - 33
2ccca7f13861c7a212ac3a1dd2afc439839b19a7
709,643
def validate_watch(value): """Validate "watch" parameter.""" if not value: return None if isinstance(value, str): value = [_ for _ in value.split("\n") if _] return value
203b77f376a747cbd10f0c674897f912bb75618f
709,644
import re def test_invalid_patterns(list, pattern): """ Function to facilitate the tests in MyRegExTest class :param list: list with strings of invalid cases :param pattern: a regular expression :return: list with the result of all matches which should be a list of None """ newList = [] for item in list: matched = re.match(pattern, item) if matched is None: newList.append(None) else: raise ValueError(item + ' matched to ' + pattern + ' while it should not have matched') return newList
94a8232d66ff4c705e7a587aedc9d1cbe0b4f072
709,645
import argparse def args_parse(): """Parse the input args.""" parser = argparse.ArgumentParser(description='Certificate import') parser.add_argument("--cert", default="./kmc/config/crt/sever.cert", type=str, help="The path of certificate file") parser.add_argument("--key", default='./kmc/config/crt/sever.key', type=str, help="The path of private Key file.") parser.add_argument("--key_component_1", default='./kmc/config/ksf/ksmaster.dat', type=str, help="key material 1.") parser.add_argument("--key_component_2", default='./kmc/config/ksf/ksstandby.dat', type=str, help="key material 2.") args = parser.parse_args() return args
88b114ac63afe32bac097a26bb15fe704fc2e8c1
709,646
def remove_constant_features(sfm): """ Remove features that are constant across all samples """ # boolean matrix of whether x == first column (feature) x_not_equal_to_1st_row = sfm._x != sfm._x[0] non_const_f_bool_ind = x_not_equal_to_1st_row.sum(axis=0) >= 1 return sfm.ind_x(selected_f_inds=non_const_f_bool_ind)
ae8c6e1d14b7260c8d2491b2f8a00ba352d7375a
709,647
def _find_ntc_family(guide_id): """Return a String of the NTC family """ guide_id_list = guide_id.split('_') return '_'.join(guide_id_list[0:2])
2b340694c2379682b232e49c9b0f1f0a91c778cf
709,648
def _chr_ord(x): """ This is a private utility function for getBytesIOString to return chr(ord(x)) """ return chr(ord(x))
8529686bf3a40cd1f2c32f458ebdba17a9b35a05
709,649
def load_glove_vectors(glove_file="/home/yaguang/pretrained_models/glove.6B.50d.txt"): """Load the glove word vectors""" word_vectors = {} with open(glove_file) as f: for line in f: split = line.split() word_vectors[split[0]] = [float(x) for x in split[1:]] return word_vectors
a7bb1650885e12f436273b012d0c1c381e1be311
709,650
import math def compute_recommended_batch_size_for_trustworthy_experiments(C: int, H: int, W: int, safety_val: float) -> int: """ Based on inequality with safety_val=s: N' >= s*D' the recommended batch size is, assuming N'=B*H*W and D'=C (so considering neurons as filter, patches as data): B*H*W >= s*C leading to any batch size B that satisfies: B >= (s*C)/(H*W) for the current layer and model. So, C, H, W are for the current model at that layer. note: - recommended way to compute this is to get the largest B after plugging in the C, H, W for all the layers of your model - essentially computing the "worst-case" B needed for the model. :return: """ recommended_batch_size: int = int(math.ceil(safety_val * C / (H * W))) assert (recommended_batch_size > 0), 'Batch size that was recommnded was negative, check the input your using.' return recommended_batch_size
80f11adb87b252a31aba590c38e60350535025ae
709,651
def load_document(filepath): """ Description:Opens and loads the file specified by filepath as a raw txt string; assumes valid text file format. Input: String -> filepath of file from current directory Output: Entire contents of text file as a string """ #assert(filepath.endswith(".txt")), "Function: Load Document -> File specificed by filepath is not of type .txt" file = open(filepath, 'r') file_string = file.read() file.close() return file_string
b44a3af09ec7c776a1d3bd1a90efe3deb90da821
709,653
def readByte (file): """ Read a byte from file. """ return ord (file.read (1))
4e82d1b688d7742fd1dd1025cd7ac1ccb13bbca0
709,655
def _column_sel_dispatch(columns_to_select, df): # noqa: F811 """ Base function for column selection. Applies only to slices. The start slice value must be a string or None; same goes for the stop slice value. The step slice value should be an integer or None. A slice, if passed correctly in a Multindex column, returns a list of tuples across all levels of the column. A list of column names is returned. """ df_columns = df.columns filtered_columns = None start_check = None stop_check = None step_check = None if not df_columns.is_unique: raise ValueError( """ The column labels are not unique. Kindly ensure the labels are unique to ensure the correct output. """ ) start, stop, step = ( columns_to_select.start, columns_to_select.stop, columns_to_select.step, ) start_check = any((start is None, isinstance(start, str))) stop_check = any((stop is None, isinstance(stop, str))) step_check = any((step is None, isinstance(step, int))) if not start_check: raise ValueError( """ The start value for the slice must either be a string or `None`. """ ) if not stop_check: raise ValueError( """ The stop value for the slice must either be a string or `None`. """ ) if not step_check: raise ValueError( """ The step value for the slice must either be an integer or `None`. """ ) start_check = any((start is None, start in df_columns)) stop_check = any((stop is None, stop in df_columns)) if not start_check: raise ValueError( """ The start value for the slice must either be `None` or exist in the dataframe's columns. """ ) if not stop_check: raise ValueError( """ The stop value for the slice must either be `None` or exist in the dataframe's columns. """ ) if start is None: start = 0 else: start = df_columns.get_loc(start) if stop is None: stop = len(df_columns) + 1 else: stop = df_columns.get_loc(stop) if start > stop: filtered_columns = df_columns[slice(stop, start + 1, step)][::-1] else: filtered_columns = df_columns[slice(start, stop + 1, step)] df_columns = None return [*filtered_columns]
177fd2f84884e068b08b509037788c998c026502
709,656
def sdfGetMolBlock(mol): """ sdfGetMolBlock() returns the MOL block of the molecule """ return mol["molblock"]
399874a696f30f492ee878ef661094119bd5f96f
709,657
def createPhysicalAddressDataframe(userDf): """ This method create PhoneNumber dataframe for CDM :param userDf: person dataframe :type userDf: object """ addressColumns = [ "id as personId","city","country","officeLocation","postalCode","state","streetAddress" ] return userDf.selectExpr(addressColumns).where(userDf.country.isNotNull())
4d185175ff6719476ed843680c17d0f267fa15ff
709,659
from typing import Optional from pathlib import Path import os def is_stem(path: Optional[str]) -> bool: """Check if the given path is a stem.""" if path is None: return False path = path.lower() parent = str(Path(path).parent) if parent == ".": root, ext = os.path.splitext(path) if ext == "": return True return False
c14053cb93997eaea118f9187d96869b82c97539
709,660
import os def get_resource_path(filename: str = "") -> str: """ get the resource path in the resource in the test dir. /path/to/resource/filename """ current = os.path.abspath(__file__) current_path = os.path.dirname(current) resource_dir = os.path.join(current_path, 'resource') return os.path.join(resource_dir, filename)
4e140c7619336a508f3eb833b95513bc7e84bd4e
709,661
def noop_chew_func(_data, _arg): """ No-op chew function. """ return 0
82ef82b350c2a01e5ba22f288c003032bf6e63e0
709,662
def find_middle_snake_less_memory(old_sequence, N, new_sequence, M): """ A variant of the 'find middle snake' function that uses O(min(len(a), len(b))) memory instead of O(len(a) + len(b)) memory. This does not improve the worst-case memory requirement, but it takes the best case memory requirement down to near zero. """ MAX = N + M Delta = N - M V_SIZE=2*min(M,N) + 2 Vf = [None] * V_SIZE Vb = [None] * V_SIZE Vf[1] = 0 Vb[1] = 0 for D in range(0, (MAX//2+(MAX%2!=0)) + 1): for k in range(-(D - 2*max(0, D-M)), D - 2*max(0, D-N) + 1, 2): if k == -D or k != D and Vf[(k - 1) % V_SIZE] < Vf[(k + 1) % V_SIZE]: x = Vf[(k + 1) % V_SIZE] else: x = Vf[(k - 1) % V_SIZE] + 1 y = x - k x_i = x y_i = y while x < N and y < M and old_sequence[x] == new_sequence[y]: x = x + 1 y = y + 1 Vf[k % V_SIZE] = x inverse_k = (-(k - Delta)) if (Delta % 2 == 1) and inverse_k >= -(D -1) and inverse_k <= (D -1): if Vf[k % V_SIZE] + Vb[inverse_k % V_SIZE] >= N: return 2 * D -1, x_i, y_i, x, y for k in range(-(D - 2*max(0, D-M)), (D - 2*max(0, D-N)) + 1, 2): if k == -D or k != D and Vb[(k - 1) % V_SIZE] < Vb[(k + 1) % V_SIZE]: x = Vb[(k + 1) % V_SIZE] else: x = Vb[(k - 1) % V_SIZE] + 1 y = x - k x_i = x y_i = y while x < N and y < M and old_sequence[N - x -1] == new_sequence[M - y - 1]: x = x + 1 y = y + 1 Vb[k % V_SIZE] = x inverse_k = (-(k - Delta)) if (Delta % 2 == 0) and inverse_k >= -D and inverse_k <= D: if Vb[k % V_SIZE] + Vf[inverse_k % V_SIZE] >= N: return 2 * D, N - x, M - y, N - x_i, M - y_i
d320090f975525a620a7fafc479e9eec8b9a4ffa
709,663
from datetime import datetime def set_clock(child, timestamp=None): """Set the device's clock. :param pexpect.spawn child: The connection in a child application object. :param datetime timestamp: A datetime tuple (year, month, day, hour, minute, second). :returns: The updated connection in a child application object. :rtype: pexpect.spawn """ if not timestamp: timestamp = datetime.utcnow() child.sendline("clock set {0}\r".format(timestamp.strftime("%H:%M:%S %d %b %Y"))) child.expect_exact("{0}, configured from console by console".format(timestamp.strftime("%H:%M:%S UTC %a %b %d %Y"))) return child
b6299ab780ffc9e9d27b0715decf095b3d6a6272
709,664
def index(request): """Render site index page.""" return {}
38c0a1e47cdbe2eed374b6231761698efa1bc166
709,665
def parse_file(filename): """Parses the file containing the db schema Key Arguments: filename - the file to parse""" f = open(filename, 'r') lines = f.readlines() f.close() db = {} for line in lines: s_line = line.split('\t') if s_line[0] == 'TABLE_CATALOG': continue if s_line[1] in db: db[s_line[1]].append(s_line[2]) else: db[s_line[1]] = [s_line[2]] return db
0b02829505a1b07c8a1ed9cc8a34c651cf4be41c
709,666
import os import requests def check_token(token) -> bool: """Check ReCaptcha token Args: token Returns: bool """ if os.getenv("CI"): return True url = "https://www.google.com/recaptcha/api/siteverify" secret_key = os.getenv("RECAPTCHA_SECRET_KEY") payload = { "secret": secret_key, "response": token, } response = requests.post(url, data=payload) return response.json()["success"] and response.json()["score"] >= 0.5
5c78e55333e4e5484ae30acaea58a988247152dd
709,667
import argparse def get_commandline_parser(): """it parses commandline arguments.""" parser = argparse.ArgumentParser(description='Toolpath generator.') parser.add_argument('--stl-filepath', help='filpath of stl file.') parser.add_argument('--diameter', help='Diameter of toolbit.') parser.add_argument('--step-size', help='Step size of the CNC machine.') parser.add_argument('--feed-rate', help='Feed rate of CNC machine.') parser.add_argument('--calculate-time', help='Flag to print time.', type=bool) return parser
d8727b7a9f40f63e0c322074ae88585e7dd5f0eb
709,668
def uniquify_contacts(contacts): """ Return a sequence of contacts with all duplicates removed. If any duplicate names are found without matching numbers, an exception is raised. """ ctd = {} for ct in contacts: stored_ct = ctd.setdefault(ct.name, ct) if stored_ct.dmrid != ct.dmrid: raise RuntimeError( "Two contacts named {} have different IDs: {} {}".format( ct.name, ct.dmrid, stored_ct.dmrid ) ) return list(ctd.values())
f4bf001abcccad1307633e6de6ed6228516ba0b2
709,669
def optimize_bank_transaction_list(bank_transactions): """Append related objects using select_related and prefetch_related""" return bank_transactions.select_related('block')
c7c7242336f9cddf399efc9d813b7650b0f6ce5e
709,670
def determine_if_is_hmmdb(infp): """Return True if the given file is an HMM database (generated using hmmpress from the HMMer3 software package), and return False otherwise. """ #if open(infp, 'r').read().startswith('HMMER3/f'): if open(infp, 'r').readline().startswith('HMMER3/f'): return True else: return False
33b962e24c76e9e25f2cc76d4e7f78565adf8a3e
709,671
def template_footer(in_template): """Extracts footer from the notebook template. Args: in_template (str): Input notebook template file path. Returns: list: List of lines. """ footer = [] template_lines = [] footer_start_index = 0 with open(in_template) as f: template_lines = f.readlines() for index, line in enumerate(template_lines): if '## Display Earth Engine data layers' in line: footer_start_index = index - 3 footer = ['\n'] + template_lines[footer_start_index:] return footer
cb872076b82b2012b2e27fcb1be9b8704cd60d27
709,672
def post_step1(records): """Apply whatever extensions we have for GISTEMP step 1, that run after the main step 1. None at present.""" return records
98287f6930db6aa025715356084b3bef8c851774
709,673
def Position(context): """Function: <number> position()""" return context.position
e5ddf5aa8d5321ce9e7dc14b635cb942fbbbcbf1
709,674
import math def spatial_shift_crop_list(size, images, spatial_shift_pos, boxes=None): """ Perform left, center, or right crop of the given list of images. Args: size (int): size to crop. image (list): ilist of images to perform short side scale. Dimension is `height` x `width` x `channel` or `channel` x `height` x `width`. spatial_shift_pos (int): option includes 0 (left), 1 (middle), and 2 (right) crop. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (ndarray): the cropped list of images with dimension of `height` x `width` x `channel`. boxes (list): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. """ assert spatial_shift_pos in [0, 1, 2] height = images[0].shape[0] width = images[0].shape[1] y_offset = int(math.ceil((height - size) / 2)) x_offset = int(math.ceil((width - size) / 2)) if height > width: if spatial_shift_pos == 0: y_offset = 0 elif spatial_shift_pos == 2: y_offset = height - size else: if spatial_shift_pos == 0: x_offset = 0 elif spatial_shift_pos == 2: x_offset = width - size cropped = [ image[y_offset : y_offset + size, x_offset : x_offset + size, :] for image in images ] assert cropped[0].shape[0] == size, "Image height not cropped properly" assert cropped[0].shape[1] == size, "Image width not cropped properly" if boxes is not None: for i in range(len(boxes)): boxes[i][:, [0, 2]] -= x_offset boxes[i][:, [1, 3]] -= y_offset return cropped, boxes
c80d8ab83f072c94887d48c3d1cfe5bb18285dbb
709,675
import argparse def get_input_args(): """ Used to parse the command line arguments in order to predict the flower name and the class probability. Options: Return top KK most likely classes: python predict.py input checkpoint --top_k 3 Use a mapping of categories to real names: python predict.py input checkpoint --category_names cat_to_name.json Use GPU for inference: python predict.py input checkpoint --gpu """ # Create Parse using ArgumentParser parser = argparse.ArgumentParser( description='Process Image Folder, CNN Model Architecture, Set hyper parameters') parser.add_argument('single_image', metavar='single_image', type=str, nargs=1, help='a single image for which the flower name and the class probability is to be predicted') parser.add_argument('checkpoint', metavar='checkpoint', type=str, nargs=1, help='The checkpoint from which the model is re-built for the prediction') parser.add_argument('--top_k', type=int, default='3', help='The number of most likely classes with default value \'3\'') parser.add_argument('--category_names', type=str, default='cat_to_name.json', help='A file mapping of categories to real names with default value \'cat_to_name.json\'') parser.add_argument('--gpu', action='store_true', help='If available then the GPU will be used, else not') return parser.parse_args()
145e6d2601b37b10f4c8dbac649ab8abec2951a5
709,676
import sys import os def IsInteractive(output=False, error=False, heuristic=False): """Determines if the current terminal session is interactive. sys.stdin must be a terminal input stream. Args: output: If True then sys.stdout must also be a terminal output stream. error: If True then sys.stderr must also be a terminal output stream. heuristic: If True then we also do some additional heuristics to check if we are in an interactive context. Checking home path for example. Returns: True if the current terminal session is interactive. """ if not sys.stdin.isatty(): return False if output and not sys.stdout.isatty(): return False if error and not sys.stderr.isatty(): return False if heuristic: # Check the home path. Most startup scripts for example are executed by # users that don't have a home path set. Home is OS dependent though, so # check everything. # *NIX OS usually sets the HOME env variable. It is usually '/home/user', # but can also be '/root'. If it's just '/' we are most likely in an init # script. # Windows usually sets HOMEDRIVE and HOMEPATH. If they don't exist we are # probably being run from a task scheduler context. HOMEPATH can be '\' # when a user has a network mapped home directory. # Cygwin has it all! Both Windows and Linux. Checking both is perfect. home = os.getenv('HOME') homepath = os.getenv('HOMEPATH') if not homepath and (not home or home == '/'): return False return True
8037e2d38dca9fc745b7b0b79cfb83226e59d42d
709,677
import os import errno import stat def ismount(path): """ Test whether a path is a mount point. This is code hijacked from C Python 2.6.8, adapted to remove the extra lstat() system call. """ try: s1 = os.lstat(path) except os.error as err: if err.errno == errno.ENOENT: # It doesn't exist -- so not a mount point :-) return False raise if stat.S_ISLNK(s1.st_mode): # A symlink can never be a mount point return False s2 = os.lstat(os.path.join(path, '..')) dev1 = s1.st_dev dev2 = s2.st_dev if dev1 != dev2: # path/.. on a different device as path return True ino1 = s1.st_ino ino2 = s2.st_ino if ino1 == ino2: # path/.. is the same i-node as path return True return False
d1d18af449c720ed0b616436d905c28313ed88d1
709,678
def write_results(conn, cursor, mag_dict, position_dict): """ Write star truth results to the truth table Parameters ---------- conn is a sqlite3 connection to the database cursor is a sqlite3.conneciton.cursor() object mag_dict is a dict of mags. It is keyed on the pid of the Process used to process a chunk of magnitudes. Each value is a 2-D numpy array of shape (n_obj, n_bandpasses). It is produced by calculate_magnitudes. position_dict is a dict keyed on pid of the Process used to process a chunk of stars. The values are also dicts, these keyed on 'healpix', 'ra', 'dec', 'id' with the values being arrays of those quantities for the corresponding chunk of stars. Returns ------- None Just writes to the database """ assert len(mag_dict) == len(position_dict) row_ct = 0 for k in mag_dict.keys(): mm = mag_dict[k] pp = position_dict[k] row_ct += len(pp['ra']) if len(mm) != len(pp['ra']): raise RuntimeError('%d mm %d pp' % (len(mm), len(pp['ra']))) values = ((int(pp['healpix'][i_obj]), int(pp['id'][i_obj]), 1, 0, 0, pp['ra'][i_obj], pp['dec'][i_obj], 0.0, mm[i_obj][0], mm[i_obj][1], mm[i_obj][2], mm[i_obj][3], mm[i_obj][4], mm[i_obj][5]) for i_obj in range(len(pp['ra']))) cursor.executemany('''INSERT INTO truth VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', values) conn.commit() return row_ct
0b0c9234a32050277a7e70fee3ab7ba1be5931bb
709,679