content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def process_data(data_json, key1, renamed1, key2, renamed2, key3, renamed3, key4, renamed4, key5, renamed5, key6="", renamed6=""): """Converts our list of dictionaries to a dictionary of lists, while processing some of the data points (e.g. converting number strings to ints and float, as well as lowercasing strings). Also uses renamed keys instead of the very long default ones. """ val1 = [] val2 = [] val3 = [] val4 = [] val5 = [] val6 = [] for d in data_json: val1.append(d[key1]) val2.append(float(d[key2])) val3.append(int(d[key3])) val4.append(d[key4].lower()) val5.append(d[key5].lower()) if key6 is not "": val6.append(str(d[key6]) == "true" or str(d[key6]) == "True") return {renamed1: val1, renamed2: val2, renamed3: val3, renamed4: val4, renamed5: val5, renamed6: val6}
120b13b0d4851ba8c351e7654372f068ed241590
236,517
def get_run_info_nextseq500( instrument_model, application_version, tree ): """ Helper function to get some info about the sequencing runs. Args: tree: xml tree Returns: dict: basic statistics about run, like date, instrument, number of lanes, flowcell ID, read lengths, etc. """ run_stats = {} setup_node = tree.getroot().find("Setup") if setup_node is None: setup_node = tree.getroot() # Get required tree nodes. flowcell_node = tree.getroot().find("FlowCellRfidTag") # Now actually populate various stats run_stats['flow_cell_id'] = flowcell_node.find('SerialNumber').text run_stats['date'] = tree.getroot().find('RunStartDate').text run_stats['instrument'] = tree.getroot().find('InstrumentID').text run_stats['lanes'] = int(setup_node.find('NumLanes').text) run_stats['run_id'] = tree.getroot().find('RunID').text run_stats['r1_length'] = int(setup_node.find('Read1').text) run_stats['p7_index_length'] = int(setup_node.find('Index1Read').text) if( setup_node.find('Read1') != None ): run_stats['r2_length'] = int(setup_node.find('Read2').text) run_stats['p5_index_length'] = int(setup_node.find('Index2Read').text) run_stats['paired_end'] = True else: run_stats['paired_end'] = False run_stats['instrument_type'] = instrument_model run_stats['reverse_complement_i5'] = True return run_stats
e2ff69ef20282692c43cc08047c971e1a0106b98
640,845
def asfrozenset(term): """Convert to frozenset if it is not already""" return term if isinstance(term, frozenset) else frozenset(term)
7f5e946ad245d64bb8979de078bf000df06171ae
470,526
def livetime(match, hits): """Calculate the livetime represented by a set of simulation data. Required argument for MongoSimsDB. Note that `match.emissionrate` can be given a weight when registered by `query`, so don't apply any weight here Args: match (SimDataMatch): info requested from model and DB response hits (list): List of all documents retrieved from the DB. These may be partial docs containing only livetime-relevant info Returns: livetime (float): the total summed livetime represented by the hits """ nprimaries = sum(doc['nprimaries'] for doc in hits) #this function shouldn't be called if emissionrate==0, but it might return nprimaries / match.emissionrate
a608d4671d6ff1e9b0c9ccd1a8cf465f98852c20
566,237
def translate_error(error, translation_list, format_str=None): """Translates error or returns original error if no matches. Note, an error will be translated if it is a child class of a value in translation_list. Also, translations earlier in the list take priority. Args: error (Exception): Error to translate. translation_list (list): List of (Exception, Exception) tuples. Translates errors that are instances of first error type to second. If there is a hierarchy, error types earlier in list are translated first. format_str (str|None): An api_lib.util.exceptions.FormattableErrorPayload format string. Note that any properties that are accessed here are on the FormattableErrorPayload object, not the object returned from the server. Returns: Error (Exception). Translated if match. Else, original error. """ for untranslated_error, translated_error in translation_list: if isinstance(error, untranslated_error): return translated_error(error, format_str) return error
9e717ac8978f11d120fd78aaff86dfc84bb1f56f
118,558
def get_blue_green_from_app(app): """ Returns the blue_green object if exists and it's color field if exists >>> get_blue_green_from_app({}) (None, None) >>> get_blue_green_from_app({'blue_green': None}) (None, None) >>> get_blue_green_from_app({'blue_green': {}}) (None, None) >>> get_blue_green_from_app({'blue_green': {'color': None}}) ({'color': None}, None) >>> get_blue_green_from_app({'blue_green': {'color': ''}}) ({'color': ''}, '') >>> get_blue_green_from_app({'blue_green': {'color': 'blue'}}) ({'color': 'blue'}, 'blue') >>> get_blue_green_from_app({'blue_green': {'color': 'green'}}) ({'color': 'green'}, 'green') """ if app.get('blue_green'): return app['blue_green'], app['blue_green'].get('color', None) return None, None
c24c297f300fd4978aa1fd28245d835ef01ff387
683,763
def generate_label_asm(label, address): """ Return label definition text at a given address. Format: '{label}: ; {address}' """ label_text = '%s: ; %x' % (label, address) return (address, label_text, address)
249653b5a37f83601c49c8876ea10027d3118aa1
463,551
import re def has_numbers(string): """ Check if user's message has a number :param string: User's message :return: True/False (boolean) """ return bool(re.search(r'\d', string))
3ee518d8278513214334709839a106b958ff6797
167,208
def get_domain(entity_id): """Get domain portion of entity id.""" return entity_id.split('.')[0]
8f94dc85cc6379dd2cd6807d6382d39551549207
415,095
def nearest_neighbor_interpolation(points): """ Input: list of points (x, y). Returns a function f(x) which returns y value of nearest neighbor for given x. At the midpoints (when distances to 2 closest points are the same) value of point with lower x value is returned. """ points.sort(key=lambda x: x[0]) def f(x): if x < points[0][0]: return points[0][1] for i in range(1, len(points)): if points[i][0] < x: continue if abs(points[i][0] - x) < abs(points[i-1][0] - x): return points[i][1] return points[i-1][1] return points[-1][1] return f
992e385d91a722989aef6c8f962863ddc46d6d25
646,768
def safe_union_two_by_name(df1, df2): """Combine common fields from two dataframes rowwise. Note we do not use the ``pyspark.sql.DataFrame.unionByName`` function here because we explicitly reorder columns to the order of ``take``. Parameters ---------- df1 : pyspark.sql.DataFrame first dataframe to combine df2 : pyspark.sql.DataFrame second dataframe to combine Returns ------- out : pyspark.sql.DataFrame """ take = [c for c in df1.columns if c in df2.columns] df1 = df1.select(take) df2 = df2.select(take) out = df1.union(df2) return out
82a341c995c9158010d1576b2e2c4c6faadfe606
665,324
import re def lowercase(s, _sub=re.compile('[A-Z]').sub, _repl=(lambda m: '.' + m.group(0).lower()), _cache={}): """Convert to lowercase with dots. >>> lowercase('ResCompany') 'res.company' """ try: return _cache[s] except KeyError: _cache[s] = s = _sub(_repl, s).lstrip('.') return s
9a3c5f5ce49f95fa131313a2e4d06ba7e38105ad
433,545
def fill_big_gaps(array, gap_size): """ Insert values into the given sorted list if there is a gap of more than ``gap_size``. All values in the given array are preserved, even if they are within the ``gap_size`` of one another. >>> fill_big_gaps([1, 2, 4], gap_size=0.75) [1, 1.75, 2, 2.75, 3.5, 4] """ result = [] if len(array) == 0: raise ValueError("Input array must be len > 0") last_value = array[0] for value in array: while value - last_value > gap_size + 1e-15: last_value = last_value + gap_size result.append(last_value) result.append(value) last_value = value return result
11ecb164b9e54c75db249ca27cbbdd582ed47945
696,201
def has_afg_license(instr): """Returns True if the first license includes an AFG license""" return "AFG" in instr.query("LIC:ITEM? 0").strip().split('"')[3].split(",")
0b9b2d65b7f910d3a4e412f67c76c5333d4f7d7b
50,403
def limit(self, start_or_stop=None, stop=None, step=None): """ Create a new table with fewer rows. See also: Python's builtin :func:`slice`. :param start_or_stop: If the only argument, then how many rows to include, otherwise, the index of the first row to include. :param stop: The index of the last row to include. :param step: The size of the jump between rows to include. (`step=2` will return every other row.) :returns: A new :class:`.Table`. """ if stop or step: s = slice(start_or_stop, stop, step) else: s = slice(start_or_stop) rows = self._rows[s] if self._row_names is not None: row_names = self._row_names[s] else: row_names = None return self._fork(rows, row_names=row_names)
b101ed9eba1b5771b7acbd555ae41c4365cea1d3
18,471
from typing import Union from typing import Any def lastindexof(x: Union[str, list], value: Any) -> int: """ For string input, returns the last index of substring in the input string. For array input, returns the last index of value in the input array. """ if isinstance(x, str): return x.rfind(str(value)) else: x = list(x) try: return len(x) - 1 - x[::-1].index(value) except ValueError: return -1
0b8b4e1f4f7a795dff9b07db98afe721d049f298
447,790
import re def parse_config_mirror_session_no_destination_interface(raw_result): """ Parse the 'no destination interface' command raw output. :param str raw_result: vtysh raw result string. :rtype: str :return: the raw string, no parsing """ show_re = ( r'Destination interface removed, mirror session \S+ shutdown' ) re_result = re.match(show_re, raw_result) assert re_result return raw_result
6c900b36a3ac054ed4f265e4985af84e7fe5c6e1
255,661
import math def extract_blob(blob, image): """ Extract the pixels that make up the blob's neighbourhood :param blob: the blob to extract :param image: the image to extract the blob from :returns: extracted square neighbourhood """ y, x, r = blob hs, he = y - math.floor(r), y + math.floor(r) ws, we = x - math.floor(r), x + math.floor(r) image_section = image[hs:he, ws:we] return image_section
6de6b8a959c81e56dc2bbf7d70460f0852a1622d
329,155
def opinion(simulation): """ Returns the opinion vector of a simulation. """ return simulation.S
9bc3e155427bff98a2fb2f8082b7c6540099f6cf
166,483
def create_property_array(df_column, property_mapping, current_value): """ Create a query JSON 'properties' array Creates the properties array necessary for when the property_mapping is defined. Args: df_column (Series): A pandas Series to reconcile. property_mapping (dict): The property-column mapping dictionary. current_value (str): Current iteration through the input_keys Returns: list: A list of dictionaries corresponding to the properties. """ prop_mapping_list = [] for key, value in property_mapping.items(): prop_value = ( value.loc[df_column == current_value].to_string(index=False).strip() ) prop_mapping_list.append({"pid": key, "v": prop_value}) return prop_mapping_list
2acb56529b5de6d066be406b99958fcd515d9150
259,637
from typing import Tuple def parse_chunk(chunk: str) -> Tuple[str, int]: """Parse a chunk for rule name and error count. :param chunk: The chunk of logs to process. :returns: The rule name and count of errors found. :raises ValueError: if a rule was not found. """ lines = chunk.splitlines() if not chunk.startswith("Rule"): raise ValueError('Chunk must start with "Rule"') chunk_name = lines.pop(0) count = 0 for line in lines: if line.startswith("ERROR:"): count += 1 return chunk_name, count
c37668effa08117a0a0239eff5af335f5319776c
301,655
from datetime import datetime def alexa_datetime(date, time): """Return Alexa date and time strings as a datetime object.""" return datetime.strptime(date + " " + time, "%Y-%m-%d %H:%M")
92a8cdc51f2058f1656cbae040f5401b049f8490
172,397
def nest_dict(flat_dict, sep='-'): """Return nested dict by splitting the keys on a delimiter. Flask-wtf returns embedded document fields as a flattened dict, with embedded document names embedded in the key. Any keys with empty values will be removed. For example, a document User may have an embedded document Comment. Flask-wtf will return this in a form as "user-comment". This function returns a nested dictionary like d["user"]["comment"]. Args: flat_dict (dict): Flattened dict of embedded document fields. sep (str): Seperator between nested keys. Returns: dict: Nested dictionary which better represents the embedded documents. """ # Start a new dict to hold top level keys and take values for these top level keys new_dict = {} hyphen_dict = {} eds = set() for k, v in flat_dict.items(): if not v: pass elif '-' not in k: new_dict[k] = v else: hyphen_dict[k] = v eds.add(k.split(sep)[0]) # Create a new nested dict for each embedded document # And add these dicts to the correct top level key ed_dict = {} for ed in eds: ed_dict = {} for k, v in hyphen_dict.items(): if ed == k.split(sep)[0]: ed_dict[k.split(sep)[1]] = v new_dict[ed] = ed_dict return new_dict
e8a9a38c06db49e50c1b0a1f0a5216c46bf0df9c
393,010
def is_white_key(note): """True if note is represented by a white key""" key_pattern = [ True, False, True, True, False, True, False, True, True, False, True, False, ] return key_pattern[(note - 21) % len(key_pattern)]
515ba17c6f6234802c6ccb162e362d00dde62557
136,135
def get_item_properties(item, fields): """Return a tuple containing the item properties. :param item: a single item resource (e.g. Server, Project, etc) :param fields: tuple of strings with the desired field names """ return tuple([item.get(field, '') for field in fields])
23b24f51c5bcc0d5d26d497c967fd4c02c6aa7c1
91,054
def getByName(list, name): """ Return element by a given name. """ if list is None or name is None: return None for element in list: if element.get('name') is None: continue if element['name'] == name: return element return None
5dae082da8e6620b6ab44eed58f0fb012dce84eb
101,844
def listminus(c1, c2): """Return a list of all elements of C1 that are not in C2, but in order.""" s2 = set(c2) return [entry for entry in c1 if entry not in s2]
fce854870dfee595c89b576d3f6ab8957205d974
484,002
def duration(duration): """Filter that converts a duration in seconds to something like 01:54:01 """ if duration is None: return '' duration = int(duration) seconds = duration % 60 minutes = (duration // 60) % 60 hours = (duration // 60) // 60 s = '%02d' % (seconds) m = '%02d' % (minutes) h = '%02d' % (hours) output = [] if hours > 0: output.append(h) output.append(m) output.append(s) return ':'.join(output)
7cd89654a84c2e3e41d96cb1b13688833ee54387
10,122
def open_file(filename, mode='r'): """ 常用文件操作,可在python2和python3间切换. mode: 'r' or 'w' for read or write """ return open(filename, mode, encoding='utf-8', errors='ignore')
100179e22f140c4e8d25a1ccab94f9ca3831b5f3
691,138
def passwordExists(user): """Checks if the user has created a password for himself, passwords created by PSA are unusable""" return user.has_usable_password()
2b6dfbe31f3e9073ced5ebb634537cd101aaa37d
220,372
import torch def _float_from_bool(a): """ Since pytorch only supports matrix multiplication on float, IoU computations are done using floating point types. This function binarizes the input (positive to True and nonpositive to False), and converts from bool to float. If the data is already a floating-point type, it leaves it keeps the same type; otherwise it uses float. """ if a.dtype == torch.bool: return a.float() if a.dtype.is_floating_point: return a.sign().clamp_(0) return (a > 0).float()
3a16532903a44976a9dc036615c4887268096206
114,454
import math def convert_state_to_hex(state: str) -> str: """ This assumes that state only has "x"s and Us or Ls or Fs or Rs or Bs or Ds >>> convert_state_to_hex("xxxU") '1' >>> convert_state_to_hex("UxUx") 'a' >>> convert_state_to_hex("UUxUx") '1a' """ state = ( state.replace("x", "0") .replace("-", "0") .replace("U", "1") .replace("L", "1") .replace("F", "1") .replace("R", "1") .replace("B", "1") .replace("D", "1") ) hex_width = int(math.ceil(len(state) / 4.0)) hex_state = hex(int(state, 2))[2:] if hex_state.endswith("L"): hex_state = hex_state[:-1] return hex_state.zfill(hex_width)
d722e07ab69c6b46f834eca04c7a8ba75520b145
679,457
def extract_dims(array, ndim=1): """Decrease the dimensionality of ``array`` by extracting ``ndim`` leading singleton dimensions.""" for _ in range(ndim): assert len(array) == 1, len(array) array = array[0] return array
e51661bdce5029ecc58db892b761820c5f6de7e8
614,364
def _is_optional_field(field) -> bool: """Check if the input field is optional. Args: field (Field): input Field to check. Returns: bool: True if the input field is optional. """ # return isinstance(field.type, _GenericAlias) and type(None) in getattr(field.type, "__args__") return type(None) in getattr(field.type, "__args__")
a0e93747ab0044c5a8456f33e4a223bb2454dc3b
427,441
def real(x): """ Takes the real part of a 4D tensor x, where the last axis is interpreted as the real and imaginary parts. Parameters ---------- x : tensor_like Returns ------- x[..., 0], which is interpreted as the real part of x """ return x[..., 0]
364b4e3ab1f02dd92a5140244e6db5f3a4d80d64
363,919
def indent(t, indent=0, sep='\n'): # type: (str, int, str) -> str """Indent text.""" return sep.join(' ' * indent + p for p in t.split(sep))
95263f43173a6ebc1cc1270f7ac7606484f99fd8
546,802
import random def population(distribution, count=2048): """ Creates a list of numerical values (no uncertainty) of the specified length that are representative of the distribution for use in less robust statistical operations. :param distribution: The distribution instance on which to create a population :param count: The number of numerical values to include in the returned population list. :type: int :return: A list of numerical values that approximate the the measurement probability distributions distribution :rtype: list """ out = [] x_min = distribution.minimum_boundary(10) x_max = distribution.maximum_boundary(10) x = x_min delta = (x_max - x_min) / 512.0 total = float(len(distribution.measurements)) while x <= x_max: n = int(round(count * delta * distribution.probability_at(x))) for i in range(n): out.append(random.uniform( x - 0.5 * delta, x + 0.5 * delta )) if x == x_max: break x = min(x_max, x + delta) return out
70d74463b13fb4b9c5eaee03ac3010e5744972a4
278,729
from typing import Dict def universal_detection_loss_weights( loss_segmentation_word: float = 1e0, loss_inst_dist: float = 1e0, loss_mask_id: float = 1e-4, loss_pq: float = 3e0, loss_para: float = 1e0) -> Dict[str, float]: """A function that returns a dict for the weights of loss terms.""" return { "loss_segmentation_word": loss_segmentation_word, "loss_inst_dist": loss_inst_dist, "loss_mask_id": loss_mask_id, "loss_pq": loss_pq, "loss_para": loss_para, }
89996ea9be93748608ef84e5715b32d90c243d65
70,627
def averages_video_udea(dates, dlist, axes, names=['Exp1', 'Exp2'], colors=['black', 'DodgerBlue'], xticks=None, xlim=None, ylim=[-3, 3], title='', xlabel=r'Year', ylabel=''): """Plot area average time series of variable for UdeA video. In the video there will be axes with time series of some variable for two different data sets averaged spatially. This function will take care of it. Parameters ---------- dates: pandas.DatetimeIndex These are the x axis values. Matplotlib will interpret them as dates and format them as such. dlist: list of numpy.ndarrays Only two arrays are supported. These should be time series of area averages for some variable. axes: matplotlib.axes.Axes Generally created using `figure.add_subplot()`. Since this plot is to be appended to a larger picture, the axes must be created outside this function and used as input. names: list of str, optional Names to be shown in the legend. They must have the same order as the data in `dlist`. Default is ['Exp1', 'Exp2']. They will always be converted to upper case. colors: list of named colors, optional Colors for each line. They must have the same order as the data in `dlist`. Default is ['black', 'DodgerBlue'] xticks: list or numpy.ndarray, optional This controls the tick marks in the x axis. Default is to put a tick from the second year until the end every 2 years. xlim: list of datetime objects, optional Limits in the x axis. The user can choose the limit dates in this axis. Default is to use the first and last items in `dates`. ylim: list of float, optional Limits in the y axis. Default is [-3, 3]. title: str, optional Centered title. Default is empty. xlabel: str, optional Title in the x axis. Default is 'Year'. ylabel: str, optional Title in the y axis. Default is empty. Returns ------- matplotlib.axes.Axes with plot attached. """ # noqa # get ticks if xticks is None: xticks = dates[12::48] # get xlim if xlim is None: xlim = [dates[0], dates[-1]] # unpack data av1, av2 = dlist # points point1 = av1[-1] point2 = av2[-1] # line plot for land axes.plot(dates, av1, linewidth=1, color=colors[0], label=names[0].upper()) axes.plot(dates, av2, linewidth=1, color=colors[1], label=names[1].upper()) axes.plot(dates[-1], point1, 'o', color=colors[0], ms=2) axes.plot(dates[-1], point2, 'o', color=colors[1], ms=2) # set lims axes.set_xlim(xlim) axes.set_ylim(ylim) axes.set_xticks(xticks) # horizonatl lines axes.axhline(y=0, linestyle='--', alpha=0.5, linewidth=1, color='black') # titling axes.set_title(title) axes.set_ylabel(ylabel) axes.set_xlabel(xlabel) axes.legend(ncol=2) return axes
4a47890ff8dfd08294bf41b3cc949e31c4339c6f
530,034
def transpose(list_in): """ Shuffle/transpose a given 16 element list from [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] to [ 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15] """ list_out = [] for i in range(4): for j in range(4): list_out.append(list_in[i + 4 * j]) return list_out
27237a93397bc18edd58ab0eb4689060fec1418e
234,803
def fit_params_txt(fit_params, bp_list, out_dir): """Generates a text file in the same folder as the detrending plots that lists applied linear fit equations""" # Create .txt file and copy breakpoint list text_dir = out_dir + '\\detrending_fit_eqs.txt' text_file = open(text_dir, 'w+') bps_form = [i for i in bp_list] # Write to and save .txt file for count, params in enumerate(fit_params): if len(bp_list) != 0: text_file.write('From %s to %s: %.4f * dist_downstream + %.4f\n' % (bps_form[count], bps_form[count+1], params[0], params[1])) else: text_file.write('For full reach: %.4f * dist_downstream + %.4f\n' % (params[0], params[1])) text_file.close() return text_dir
5b0075fd90f25d446bbb1060da23b62221be6260
562,586
from datetime import datetime def format_date(date): """Function takes a datetime object and stringifies it down to MM/DD/YYYY format""" try: start_date = datetime.strftime(date, '%m/%d/%Y') except (TypeError, ValueError) as e: start_date = date pass return start_date
52cc6a1535789b010170444436c9e6122f9b29aa
562,296
def _isInt(argstr): """ Returns True if and only if the given string represents an integer. """ try: int(argstr, 0) # hex values must have the "0x" prefix for this to work return True except (ValueError, TypeError): return False
6ec4bfd37b59b28317433fedcd68967193f47b3c
601,956
def coerce_row_to_dict(schema, row): """ >>> from datashape import dshape >>> schema = dshape('{x: int, y: int}') >>> coerce_row_to_dict(schema, (1, 2)) # doctest: +SKIP {'x': 1, 'y': 2} Idempotent >>> coerce_row_to_dict(schema, {'x': 1, 'y': 2}) # doctest: +SKIP {'x': 1, 'y': 2} """ if isinstance(row, dict): return row return dict((name, item) for name, item in zip(schema[0].names, row))
89ea04b6b73a8b7218a4cf2a1b584d2db583379c
64,020
def point_window_unitxy(x, y, affine): """ Given an x, y and a geotransform Returns - rasterio window representing 2x2 window whose center points encompass point - the cartesian x, y coordinates of the point on the unit square defined by the array center points. ((row1, row2), (col1, col2)), (unitx, unity) """ fcol, frow = ~affine * (x, y) r, c = int(round(frow)), int(round(fcol)) # The new source window for our 2x2 array new_win = ((r - 1, r + 1), (c - 1, c + 1)) # the new x, y coords on the unit square unitxy = (0.5 - (c - fcol), 0.5 + (r - frow)) return new_win, unitxy
b7c2122569cf609c508d27ae74d035390c0d1744
282,755
import requests def sendRequest(url, type="POST", params=None, headers=None): """ Send a request to a URL ### Input: - `url` (str) | the url to send the request to - `type` (str) | the type of request (GET or POST) - `params` (dict) | parameters to be sent with the request - `headers` (dict) | headers to be sent with the request ### Output: - `response` (dict) | the JSON response of the request """ ## Perform a GET request if type == "GET": rawResponse = requests.get(url, params=params, headers=headers) ## Perform a POST request else: rawResponse = requests.post(url, params=params, headers=headers) ## Convert the response to a json object, if possible if hasattr(rawResponse, "json"): response = rawResponse.json() ## Otherwise, get the text response else: response = rawResponse.text return response
7f450b8eedf6405b237730b9f7d6da5277c41e7b
14,080
def grandchildren_with_tag(child,tagnames): """Return children of child that have tag names in the given set of tag names""" ret=[] for grandchild in child.iterchildren(): if grandchild.tag in tagnames: ret.append(grandchild) pass pass return ret
1121345abab69f67abde0ee2b6e4629e71034c61
691,469
def lookup_fxn(x, vals): """ Builds a simple function that acts as a lookup table. Useful for constructing bandwidth and weigth functions from existing values. Parameters ---------- x : iterable values to input for the function vals : iterable Output values for the function. Must be of the same length as x. Returns ------- lf : function A function that, when input a value in x, outputs the corresponding value in vals. """ # Build dictionary lookup = {} for i in range(len(x)): lookup[str(x[i])] = vals[i] # Define and return lookup function def lf(xi): return lookup[str(xi)] return lf
0fbe32d19b84cf80db0f0621a9792f7a1234536d
336,603
def search_ancestor(node, node_type_or_types): """ Recursively looks at the parents of a node and checks if the type names match. :param node: The node that is looked at. :param node_type_or_types: A tuple or a string of type names that are searched for. """ if not isinstance(node_type_or_types, (list, tuple)): node_type_or_types = (node_type_or_types,) while True: node = node.parent if node is None or node.type in node_type_or_types: return node
5a40baad702cdb9bf8119b4f0b65fc3b484bc3e6
588,667
import re def get_instances_from_report(report_file_path): """ Parse StegExpose report and return a list of (class, score). E.g. [('p', 0.10), ('n', 0.05)] """ instances = [] with open(report_file_path, 'r') as report_file: for line in report_file: # Filter the lines without images. if re.match(r'.*\.(png|jpg),', line): # Get the important data. pieces = line.split(sep=',') image_name = pieces[0] real_class = 'p' if re.match(r'.*_\d+p\.(png|jpg),', line) else 'n' fusion_score = float(pieces[-1]) # print(real_class, fusion_score, image_name) instances.append((real_class, fusion_score)) return instances
dc5bbf747aa642456ec3080e2ca93a0f6a3086e1
572,160
def add_unique(list,elt): """Add an element uniquely to a list """ # Since we can't use set(), which uses memory addresses as hashes # for any creation or order-sensitive operation in models # instead we use lists and rather than foo = set(); foo.add(x) # we use foo = []; add_unique(foo,x) if elt not in list: list.append(elt) return list
c4353523900aaa00e0bde73335983b1f07b6c302
410,304
def pick_from_list(items, suffix): """ Pick an element from list ending with suffix. If no match is found, return None. :param items list of items to be searched. :suffix String suffix defining the match. """ match = None for item in items: if item.endswith(suffix): match = item return match
b081cdba7de72c7864ab0ad3a7b358da267733e8
138,305
def build_streets_vertices(edges, shapes): """ Returns vertices and edges based on the subset of edges. @param edges indexes @param shapes streets @return vertices, edges *vertices* is a list of points. *edges* is a list of `tuple(a, b)` where `a`, `b` are indices refering to the array of vertices """ points = [] for i in edges: p = shapes[i].points a, b = (p[0][0], p[0][1]), (p[-1][0], p[-1][1]) points.append(a) points.append(b) vertices = list(sorted(set(points))) positions = {p: i for i, p in enumerate(vertices)} new_edges = [] for i in edges: points = shapes[i].points a, b = (points[0][0], points[0][1]), (points[-1][0], points[-1][1]) new_edges.append((positions[a], positions[b])) return vertices, new_edges
6a1b5e0365a8e5dd89470476b41502470e490176
447,685
def isvideotype(x): """Is an object a vipy.video class Video, VideoCategory, Scene?""" return (str(type(x)) in ["<class 'vipy.video.Video'>", "<class 'vipy.video.VideoCategory'>", "<class 'vipy.video.Scene'>"])
c628a8671c8dfa4c03d56a8f2cd00c6f92065741
358,177
import random def get_greeting(person): """ Get a random greeting, greeting *person* """ greetings = ["hello!", "hi", "namaste", "privet", "konichiwa", "nihao!"] return f"{random.choice(greetings).title()}, {person}."
b917af2dbe5f81c27631125e2e765fa76c5a030c
575,217
def eventCoordinates(event): """ Get the absolute coordinates of a mouse event. http://www.quirksmode.org/js/events_properties.html#position """ return event.pageX, event.pageY
7fb5439c221b9a617360c07ead9fe2f4ca3b4c10
521,516
def kernel_matrix(x, y): """ Returns kernel matrix (quadratic kernel, in this case) for input arrays, x and y: K(x,y) = phi(x).phi(y), which for a quadratic kernel is (x.y)^2 """ K = (x.dot(y.T))**2 return K
a1e5f666e6af3cde7588c3603a0cf56665643983
430,074
def recaman(length): """ Creates a Recamán's sequence :param length: The length of the wanted sequence :return: array containing the sequence For more information about this sequence: https://en.wikipedia.org/wiki/Recam%C3%A1n%27s_sequence """ a = [0] for n in range(1, length): candidate = a[-1] - n if candidate > 0 and candidate not in a: a.append(candidate) else: a.append(a[-1] + n) return a
8b9f03a09f18343a40e4466c726be2ff85ee83ea
464,800
def process_hex(hex): """ Processes hex data into a more usable format. """ # turn a hex section id into a hex category category = hex['category'].split('_')[2] hex['category'] = { 'DataListTypes': 'common', 'DataList1' : 'major', 'DataList2' : 'grand' }[category] return hex
bcb2bb6bce8578ace58b05b474954ba66c9e2cc1
491,302
def verbaUtilizada(modelo): """ Dados os parametros do problema (armazenados em 'modelo'), calcula a verba utilizada para distribuir 'totalCont + totalForm' alunos em 'totalTurmAbertas' turmas. Retorna o valor da verba utilizada. """ usoVerba = (modelo.custoAluno*(modelo.xSoma + modelo.ySoma) + modelo.custoProf*(modelo.qtdProfPedag + modelo.qtdProfAcd)*modelo.pSoma) return usoVerba
39bf1691df146d663a79490f46463cbe09c4052f
349,555
import inspect import functools def loggedmethod(method): """Decorator for LoggedObject methods, ensuring exceptions logging. Whenever an exception is raised by a method this function decorates, its details are logged through the object's `log` method at 'error' level before it is raised again. This is useful when unexpected exceptions may be raised in a context where they will not interrupt execution but need to be notified. Usage: >>> class Foo(LoggedObject): ... @loggedmethod ... def bar(self, x): ... if not isinstance(x, str); ... raise TypeError('Expected "x" to be a str.') ... self.log(x) ... >>> foo = Foo() >>> foo.bar('Some string.') <asctime> : Some string. >>> foo.bar(42) <asctime> : TypeError at `Foo.bar`: Expected "x" to be a str. TypeError: Expected "x" to be a str. """ if 'self' not in inspect.signature(method).parameters.keys(): raise RuntimeError( "Attempt at decorating a function with no 'self' argument " + "using '@logged_method'." ) @functools.wraps(method) def logged_method(self, *args, **kwargs): """Wrapped method ensuring exceptions logging before raising.""" try: return method(self, *args, **kwargs) except Exception as exception: method_name = getattr(method, '__qualname__', method.__name__) msg = "%s at '%s': %s" % ( type(exception).__name__, method_name, ';'.join(map(str, exception.args)) ) self.log(msg=msg, level='error') raise exception return logged_method
1b1cf7bde0e8567d185c2a2db1c0b63daa85904a
229,594
from typing import Callable from typing import Tuple def parse_line(line: str, distance_metric: Callable) -> Tuple[str, str, float]: """Parse a line of BLAST+6 output. Parameters ---------- line : str A blast line in format `-outfmt "6 qacc sacc length qlen slen ident"` distance_metric : Callable A function that computes a distance metric from the info in `line`. Returns ------- query_accession : str The query sequence accession. subject_accession : str The subject sequence accession. distance : float The distance between the sequences. """ qacc, sacc, length, qlen, slen, ident = line.split() return qacc, sacc, distance_metric(int(ident), int(qlen), int(slen))
362a9ab44a32197c90e3d7064c11db7e7e0af2b6
600,766
def lstrip_namespace(s, namespaces): """ Remove starting namespace :param s: input string :type s: ```AnyStr``` :param namespaces: namespaces to strip :type namespaces: ```Union[List[str], Tuple[str], Generator[str], Iterator[str]]``` :returns: `.lstrip`ped input (potentially just the original!) :rtype: ```AnyStr``` """ for namespace in namespaces: s = s.lstrip(namespace) return s
ddb4ef24a1eec4a67772ad7fed29b69c0cfe83ca
333,095
def join_labels(labels, join_symbol="|", threshold=1.e-6): """ Join labels with a joining symbol when they are very close :param labels: a list of length-2 tuples, in the format(position, label) :param join_symbol: the string to use to join different paths. By default, a pipe :param threshold: the threshold to decide if two float values are the same and should be joined :return: the same list as labels, but with the second value possibly replaced with strings joined when close enough """ if labels: new_labels = [list(labels[0])] # modify labels when in overlapping position j = 0 for i in range(1, len(labels)): if abs(labels[i][0] - labels[i - 1][0]) < threshold: new_labels[j][1] += join_symbol + labels[i][1] else: new_labels.append(list(labels[i])) j += 1 else: new_labels = [] return new_labels
b52b69a23dddec2edb81f022571c9e2f5573a229
596,952
def _uri(helper): """Returns the URL of the kvstore.""" return '/'.join(( helper.context_meta['server_uri'], 'servicesNS', 'nobody', 'Splunk_TA_paloalto', 'storage', 'collections', 'data', 'minemeldfeeds'))
d272afa2e9305a480609c215e653f3e80b1990b7
567,651
from typing import Optional import json def _try_parse_json(json_string: str, ref_val=None) -> Optional[dict]: """ Return whether the string can be interpreted as json. :param json_string: str, string to check for json :param ref_val: any, not used, interface design requirement :return None if not parseable, otherwise the parsed json object """ parsed = None try: parsed = json.loads(json_string) except (ValueError, TypeError): pass return parsed
a609eeefb32d88970ecf039578e8eb8a65ad8108
692,545
import shutil def get_archive_name_and_format_for_shutil(path): """Returns archive name and format to shutil.make_archive() for the |path|. e.g., returns ('/path/to/boot-img', 'gztar') if |path| is '/path/to/boot-img.tar.gz'. """ for format_name, format_extensions, _ in shutil.get_unpack_formats(): for extension in format_extensions: if path.endswith(extension): return path[:-len(extension)], format_name raise ValueError(f"Unsupported archive format: '{path}'")
152d68ea9613d7253f78c37ce85758a2c8bc67f9
700,162
def absolute_round(number: float) -> int: """ Rounds the value of number and then produces the absolute value of that result >>> absolute_round(-2.1) 2 >>> absolute_round(3.4) 3 >>> absolute_round(3.7) 4 >>> absolute_round(-2.9) 3 """ return abs(round(number))
446786ad83dfb42e8643917d1e656b22e750654b
118,388
import random def roll_die(sides: int) -> int: """Rolls a die with given number of sides.""" return random.randint(1, sides)
35cb7445f46a84e3232b7a24951139c635d912b3
245,029
from datetime import datetime def _get_timestamp() -> str: """Create a timestamp in the right format.""" return datetime.utcnow().strftime("_%H:%M:%S,%m-%d-%Y")
ab12e853d21b854f8868164c1522b513d271456a
256,142
def parse_title(line): """if this is title, return Tuple[level, content], @type line: str @return: Optional[Tuple[level, content]] """ line = line.strip() if not line.startswith('#'): return None sharp_count = 0 for c in line: if c == '#': sharp_count += 1 else: break if sharp_count == len(line): return None title = line[sharp_count:].strip() return sharp_count, title
7c170f417755c878d225b780b8475a379501c19f
707,815
from typing import Tuple def get_default_optimisation_params(config: dict) -> Tuple[float, int]: """Get the default coverage distance (theta) and number of sensors to use when optimising networks and generating figures. Parameters ---------- config : dict Parameters as loaded by utils.get_config Returns ------- Tuple[float, int] Coverage distance (theta) and number of sensors. """ theta = config["optimisation"]["theta"]["default"] n_sensors = config["optimisation"]["n_sensors"]["default"] return theta, n_sensors
c6dfe41d3d6be1eee2fec2c78413fa03c2033416
435,011
import hashlib def _hash_file(fpath, algorithm="sha256", chunk_size=65535): """Calculates a file sha256 or md5 hash. # Example ```python >>> from keras.data_utils import _hash_file >>> _hash_file('/path/to/file.zip') 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` # Arguments fpath: path to the file being validated algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. # Returns The file hash """ if algorithm in ("sha256", "auto"): hasher = hashlib.sha256() else: hasher = hashlib.md5() with open(fpath, "rb") as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b""): hasher.update(chunk) return hasher.hexdigest()
abc4874c9284a0e00392cc7668ce9d94e64e94ee
674,086
import torch def cosine_similarity(x, y=None, eps=1e-8): """Calculate cosine similarity between two matrices; Args: x: N*p tensor y: M*p tensor or None; if None, set y = x This function do not broadcast Returns: N*M tensor """ w1 = torch.norm(x, p=2, dim=1, keepdim=True) if y is None: w2 = w1.squeeze(dim=1) y = x else: w2 = torch.norm(y, p=2, dim=1) w12 = torch.mm(x, y.t()) return w12 / (w1*w2).clamp(min=eps)
68faf837293556409899487b47de072d013a1f42
674,093
def parse_header(field): """Parses a single header key and converts it to the way it appears on a HTTP classified_request. This function is necessary because the headers' keys are provided in the environment variables not as they appear in a HTTP classified_request. For example, it converts HTTP_USER_AGENT to User-Agent.""" header_key = field[5:] # Gets rid of the "HTTP_" prefix of the field. header_list = header_key.split("_") header_list = [string.capitalize() for string in header_list] return "-".join(header_list)
42007140e1eb2922485785e1213eeac859b50348
578,196
import shutil def process_java_resources(target, source, env): """Copy resource file into .resources dir. """ shutil.copy2(str(source[0]), str(target[0])) return None
3ee5194703956d43187a0c4f802c3ee4c132c18a
40,440
def encode_sequence(sequence: str, encoding_scheme: dict): """ Encodes a peptide sequence with values provided by the encoding table/scheme. """ encoded_sequence = [] for aa in sequence: try: value = encoding_scheme.get(aa) except Exception as e: msg = f'{e}' raise KeyError(msg) encoded_sequence.append(value) return encoded_sequence
ee4bd552608be084523606ee14a41cbbe29483bf
417,782
def is_component_enabled(env, cls): """ Determine whether a trac component is enabled. """ # We would like to use env.is_enabled(cls) to do this, # however, trac 0.11 does not have ComponentManager.is_enabled(). # So instead, rely on ComponentManager.__getitem__(), which does # have the same logic in it. return env[cls] is not None
0f946d94706f03d86980f0a05a74d486cd77d4a9
533,018
import yaml def read_yaml(file_path): """Read a YAML file by the given path. Args: file_path (str): The absolute path of the YAML file. Returns: dict: The data spec_reader from given YAML file. """ with open(file_path, "r") as file_object: return yaml.safe_load(file_object)
808bb6c31bd62e3a3be587f500e83e923b967fec
227,970
def _non_string_elements(x): """ Simple helper to check that all values of x are string. Returns all non string elements as (position, element). :param x: Iterable :return: [(int, !String), ...] """ problems = [] for i in range(0, len(x)): if not isinstance(x[i], str): problems.append((i, x[i])) return problems
974715622949157693084823a52a88973b51d100
1,095
def change_path_for_metric(path): """ Replace the '/' in the metric path by '_' so grafana can correctly use it. :param path: path of the metric (example: runs/search) :return: path with '_' instead of '/' """ if 'mlflow/' in path: path = path.split('mlflow/')[-1] return path.replace('/', '_')
72c887ddde6f34d30a00c21fa869ea919e554a61
510,170
def filter_matchable_fields(cite_keys, bib_dbs, desired_fields=["eprint", "doi"]): """Select bibtex entries which have certain desired fields. To look up an entry in a different database, we need a well-known identifier like a DOI or arXiv identifier. This function will select those entries which have enough info (by having desired fields) that we can search for them in another DB. The return is a mapping from bibkeys to their bib entries, where the entries have been stripped down to only the desired well-known identifiers. Parameters ---------- cite_keys: array of string Bibtex keys to filter bib_dbs: array of `bibtexparser.bibdatabase.BibDatabase` desired_fields: array of string, optional [default: `['eprint', 'doi']`] Fields whose presence lets us search in another DB. Returns ------- key_mapping: dict For a key `ads_key`, the value is a dict which is a filtered bib entry. This resulting dict has keys coming from `desired_fields`. For example, you can access `key_mapping[ads_key]['doi']`. """ key_mapping = {} for ads_key in cite_keys: for bib_db in bib_dbs: if ads_key in bib_db.entries_dict: entry = bib_db.entries_dict[ads_key] filtered_entry = { field: val for field, val in entry.items() if field in desired_fields } if len(filtered_entry) > 0: key_mapping[ads_key] = filtered_entry return key_mapping
b76ae5131200cf532abf95205dbe888385209d26
562,128
def csv_to_list(s: str): """Parse comma-separated list (and make lowercase)""" return [x.lower().strip() for x in s.split(',')]
bae07932fa373ce0935131f2c27ab794d880e188
516,384
def parse_int(sin): """A version of int but fail-safe""" return int(sin) if sin.isdigit() else -99
204cbcb01b6df1bbdd09af318fdfe90feb5fe68f
698,570
def FormatDatetime(date, day_only=False): """Returns a string representing the given UTC datetime.""" if not date: return None if day_only: return date.strftime('%Y-%m-%d') return date.strftime('%Y-%m-%d %H:%M:%S UTC')
c278c1d8cb5503bd384e2345da759009eef125a5
485,895
from typing import Dict import json def load_params_file(params_file: str) -> Dict: """ Load a JSON file of training parameters. :param params_file: The input file. :return: A dictionary of training parameters. """ with open(params_file, 'r') as fin: return json.load(fin)
0538c795c706f6a4edf1c523b643dc349d1e033e
95,632
from typing import Dict from typing import Optional def make_env_shell_str(env: Dict[str, str]) -> Optional[str]: """ Transforms env dict to string suitable for use in shell Returns None for empty dict """ env = env or {} result = [] for k, v in env.items(): value = v if "$" in v or " " in v: q = '"' if q in v: q = "'" value = f"{q}{v}{q}" result.append(f"{k}={value}") return " ".join(result) or None
1937f89bef2738d72dd426b687438d25f487771c
367,212
def N_ref_macs(*, theta, mu): """ Calculate N0 reference size for a macs command. Macs has a different meaning for the '-t theta' argument. Theta is the 'mutation rate per site per 4N generations'. theta = mu / (4 N) """ return theta / (4 * mu)
93049546c95dde13cd60da75a99d39891b6cf0ca
383,432
def parse_file_to_bucket_and_filename(file_path): """Divides file path to bucket name and file name""" path_parts = file_path.split("//") if len(path_parts) >= 2: main_part = path_parts[1] if "/" in main_part: divide_index = main_part.index("/") bucket_name = main_part[:divide_index] file_name = main_part[divide_index + 1 - len(main_part):] return bucket_name, file_name return "", ""
ff252fd051e3236da45b58e58a0b5bb57106def5
264,850
def _index_spec_params(spec_params): """ Makes an index of the spec parameters. It dict-ifies the list of spec params provided by the SpecManager, and also returns the set of param ids that are used in groups. This gets returned as a tuple (indexed params, group param ids) """ spec_params_dict = dict() grouped_parents = dict() for p in spec_params: spec_params_dict[p["id"]] = p # groupify the parameters - identify params that are part of groups, and don't include # them in the list separately. children = p.get("parameter_ids") if children: for child in children: grouped_parents[child] = p["id"] return (spec_params_dict, grouped_parents)
19aa93b2d34fb448476a2ebe0a2666494eebb70b
89,913
import torch def cross_entropy_loss(stu_logits, tea_logits, temp=1.): # 使用交叉熵 """ the same as nn.CrossEntropyLoss, but more flexible Args: stu_logits: tensor of shape [N, class] tea_logits: tensor of shape [N, class] temp: the distillation temperature Return: kd_loss: the cross entropy on soft labels """ pred = torch.nn.functional.softmax(stu_logits, dim=1) labels = torch.nn.functional.softmax(tea_logits / temp, dim=1) kd_loss = (-labels * torch.log(pred)).sum(dim=1).mean() return kd_loss
a819855533900dcd91c4bfc5278962e506ecaf7f
218,748
import pickle def load(filename): """ Loads in a saved instance of the SunXspex class. Parameters ---------- filename : str Filename for the pickled fitting class. Returns ------- Loaded in fitting class. """ with open(filename, 'rb') as f: loaded = pickle.load(f) return loaded
6cc32b68ccb588d66bb072d0a3587dd82bcbe489
602,903
def get_range(distribution): """ Returns the range of a distribution """ return max(distribution) - min(distribution)
56c5ecb14c18a45f123368908d597ef429210dab
336,556
def getPhenotype(chromosome, items): """ Given a chromosome, returns a list of items in the bag :param chromosome: :param items: :return: list """ return [v for i, v in enumerate(items) if chromosome[i] == 1]
19b7bc47cba3fdf652dd84d4c5c1932afde6cbde
108,302
def talk_type(talk): """ Return the pretty name for the type of the talk.""" if talk.admin_type: typ = talk.get_admin_type_display() else: typ = talk.get_type_display() return typ
67374d7ffaa9c8298812df4a504845c790042502
380,257
import string def get_gis_field(csv_field, gis_field_lookup): """return a (max) 10 character representation of csv_field that is unique to the list of analysis fields""" if csv_field in gis_field_lookup: return gis_field_lookup[csv_field] gis_field_set = set(gis_field_lookup.values()) gis_field = csv_field[:10] chars = iter(string.ascii_uppercase) while gis_field in gis_field_set: letters = list(gis_field) try: letters[-1] = next(chars) except StopIteration: raise Exception('Too many fields for analysis') gis_field = ''.join(letters) return gis_field
8adfc88babc4514c715f63ec8c258268862226b2
600,794
import uuid def create_marconi_headers(conf): """Returns headers to be used for all Marconi requests.""" headers = { "User-Agent": conf.headers.user_agent, "Accept": "application/json", "X-Project-ID": conf.headers.project_id, "Client-ID": str(uuid.uuid1()), } return headers
73a4a348b36651f1431865c2b9f197ad78765a61
588,494
import itertools def take(iterable, n): """Return first n items of the iterable as a list.""" return list(itertools.islice(iterable, n))
dd8b9681707b6abb0b2974e383d053ec9011af88
585,582
def build_sub_O2(Graph): """ For each white node in Graph, we create a copy of Graph and perform O2 in that node. Return a list of the graphs obtained. """ White = Graph.white() sub_list = [] for index, n in enumerate(White): temp_g = Graph.O2(n) temp_g.labeling(index+1) sub_list.append(temp_g) return(sub_list)
099ce3a5ac3b2dcdf1b1a05421ae8cb0e9a0dc07
519,848
from typing import List from typing import Counter def remove_rare(sentences: List[List[str]]) -> List[List[str]]: """ Remove rare words (those that appear at most once) from sentences. Parameters ---------- sentences: List of tokenized sentences. """ counts: Counter = Counter() for sentence in sentences: counts.update(sentence) return [[word for word in sentence if counts[word] > 1] for sentence in sentences]
1af60b7bb0393abf99db02abf6f4fea9d9529c15
47,572