content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def deco_inside_ctx_method_self(target): """decorator: wrap a class method inside a `with self: ...` context""" def tgt(self, *args, **kwargs): with self: return target(self, *args, **kwargs) return tgt
6a29ad468840229c026e6abf87556018a3e16718
2,475
def get_added_after( fetch_full_feed, initial_interval, last_fetch_time=None, filter_args=None ): """ Creates the added_after param, or extracts it from the filter_args :param fetch_full_feed: when set to true, will limit added_after :param initial_interval: initial_interval if no :param last_fetch_time: last_fetch time value (str) :param filter_args: set of filter_args defined by the user to be merged with added_after :return: added_after """ if fetch_full_feed: return initial_interval if not filter_args or "added_after" not in filter_args: return last_fetch_time or initial_interval return filter_args["added_after"]
281cb7d7429071bf8dca0d04eedee9130a29b28d
2,479
def _a_in_b(first, second): """Check if interval a is inside interval b.""" return first.start >= second.start and first.stop <= second.stop
e4ca21e1861b691510252eb3be53eed16c8bc8cf
2,482
async def ping_server(): """ Ping Server =========== Returns the message "The Optuna-server is alive!" if the server is running. Parameters ---------- None Returns ------- msg : str A message witnessing that the server is running. """ msg = 'The Optuna-server is alive!' return msg
2098f2167a14f08105824490824d62dd34b4c49e
2,487
def test_from_rsid(rsids, start_rsid): """Continue collecting publications for rsids in list, beginning with start_rsid Args: rsids (list): list of rsids to collect publications on start_rsid (str): rsid identifier to resume collecting publications on Returns: runtime_rsids (list): [start_rsid, onward...] start_rsid (str): starting rsid start_idx (str): starting rsid index rsids (list): [original list of ALL rsids] """ start_idx = rsids.index(start_rsid) # start_rsid index print(f"STARTING POINT SET TO: | INDEX: {start_idx} / {len(rsids)} | RSID: {rsids[start_idx]}") runtime_rsids = rsids[start_idx:] # runtime rsids return runtime_rsids, start_rsid, start_idx, rsids
bf2be86f28645addc08737e64f08695cd6b3a6d3
2,489
def get_base_url(host_name, customer_id): """ :arg host_name: the host name of the IDNow gateway server :arg customer_id: your customer id :returns the base url of the IDNow API and the selected customer """ return 'https://{0}/api/v1/{1}'.format(host_name, customer_id)
5a24a87f597cf01c61ab6a01202b2e01e3b00bf8
2,491
import re def cigar_segment_bounds(cigar, start): """ Determine the start and end positions on a chromosome of a non-no-matching part of an RNA-seq read based on a read's cigar string. cigar string meaning: http://bioinformatics.cvr.ac.uk/blog/tag/cigar-string/ Example: '50M25N50M' with start = 100 -> [100, 149, 175, 224]. Note that start and end integers are inclusive, i.e. all positions at or between 100 and 149 and at or between 175 and 224 are covered by reads. :param cigar: str a read's cigar string, e.g. "49M165N51M" :param start: int a read's start position on a chromosome :return: list of integers representing cigar match start, end points, in order of matching subsequences """ # if CIGAR string is a single full match (i.e. "<positive integer>M") # extract length of the match, return match segment. full_match = re.match(r'(\d+)M$', cigar) if full_match is not None: extension = int(cigar[:(full_match.span()[-1] - 1)]) - 1 return [start, start + extension] # break up cigar string into list of 2-tuples (letter indicative of match/no match, run length integer). cigar_split = [(v, int(k)) for k, v in re.findall(r'(\d+)([A-Z]?)', cigar)] # initialize parse params. # Allow for "hard clipping" where aligned read can start with non-matching region (https://bit.ly/2K6TJ5Y) augment = False any_match = False # output storage. match_idx_list = list() for idx in range(len(cigar_split)): segment = cigar_split[idx] if segment[0] == 'M': any_match = True extension = segment[1] - 1 # end of a match run is inclusive. augment = True match_idx_list += [start, start + extension] # append a match run to output. else: if augment: extension = segment[1] + 1 augment = False else: extension = segment[1] start += extension # if no matching regions found, throw error. if not any_match: raise ValueError('CIGAR string {0} has no matching region.'.format(cigar)) return match_idx_list
c870dfb9b11e2fd1df9fb347528252f114b8d70f
2,496
import functools import asyncio def no_block(func): """Turns a blocking function into a non-blocking coroutine function.""" @functools.wraps(func) async def no_blocking_handler(*args, **kwargs): partial = functools.partial(func, *args, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, partial) return no_blocking_handler
5681fe7275a89c522384b28f9473fded8bba846b
2,497
from typing import List def load_numbers_sorted(txt: str) -> List[int]: """ファイルから番号を読み込みソートしてリストを返す Args: txt (str): ファイルのパス Returns: List[int]: 番号のリスト """ numbers = [] with open(txt) as f: numbers = sorted(map(lambda e: int(e), f)) return numbers
6f10badd417a2ceefefa9f28a5c40583ea077d43
2,501
def translate_pt(p, offset): """Translates point p=(x,y) by offset=(x,y)""" return (p[0] + offset[0], p[1] + offset[1])
9fdc578d461219e9e5d1b557b9fde3d7a0946815
2,502
import torch def hsic(k_x: torch.Tensor, k_y: torch.Tensor, centered: bool = False, unbiased: bool = True) -> torch.Tensor: """Compute Hilbert-Schmidt Independence Criteron (HSIC) :param k_x: n by n values of kernel applied to all pairs of x data :param k_y: n by n values of kernel on y data :param centered: whether or not at least one kernel is already centered :param unbiased: if True, use unbiased HSIC estimator of Song et al (2007), else use original estimator of Gretton et al (2005) :return: scalar score in [0*, inf) measuring dependence of x and y * note that if unbiased=True, it is possible to get small values below 0. """ if k_x.size() != k_y.size(): raise ValueError("RDMs must have the same size!") n = k_x.size()[0] if not centered: h = torch.eye(n, device=k_y.device, dtype=k_y.dtype) - 1/n k_y = h @ k_y @ h if unbiased: # Remove the diagonal k_x = k_x * (1 - torch.eye(n, device=k_x.device, dtype=k_x.dtype)) k_y = k_y * (1 - torch.eye(n, device=k_y.device, dtype=k_y.dtype)) # Equation (4) from Song et al (2007) return ((k_x *k_y).sum() - 2*(k_x.sum(dim=0)*k_y.sum(dim=0)).sum()/(n-2) + k_x.sum()*k_y.sum()/((n-1)*(n-2))) / (n*(n-3)) else: # The original estimator from Gretton et al (2005) return torch.sum(k_x * k_y) / (n - 1)**2
7c91aa5991b90f396abbf835111a456208cbc50a
2,509
def ratingRange(app): """ Get the rating range of an app. """ rating = 'Unknown' r = app['rating'] if r >= 0 and r <= 1: rating = '0-1' elif r > 1 and r <= 2: rating = '1-2' elif r > 2 and r <= 3: rating = '2-3' elif r > 3 and r <= 4: rating = '3-4' elif r > 4 and r <= 5: rating = '4-5' return rating
69056c367a87e331cd3b606423540250b20f6485
2,517
import io def generate_table_definition(schema_and_table, column_info, primary_key=None, foreign_keys=None, diststyle=None, distkey=None, sortkey=None): """Return a CREATE TABLE statement as a string.""" if not column_info: raise Exception('No columns specified for {}'.format(schema_and_table)) out = io.StringIO() out.write('CREATE TABLE {} (\n'.format(schema_and_table)) columns_count = len(column_info) for i, (column, type_) in enumerate(column_info): out.write(' "{}" {}'.format(column, type_)) if (i < columns_count - 1) or primary_key or foreign_keys: out.write(',') out.write('\n') if primary_key: out.write(' PRIMARY KEY({})'.format(primary_key)) if foreign_keys: out.write(',') out.write('\n') foreign_keys = foreign_keys or [] foreign_keys_count = len(foreign_keys) for i, (key, reftable, refcolumn) in enumerate(foreign_keys): out.write(' FOREIGN KEY({}) REFERENCES {}({})'.format( key, reftable, refcolumn )) if i < foreign_keys_count - 1: out.write(',') out.write('\n') out.write(')\n') if diststyle: out.write('DISTSTYLE {}\n'.format(diststyle)) if distkey: out.write('DISTKEY({})\n'.format(distkey)) if sortkey: if isinstance(sortkey, str): out.write('SORTKEY({})\n'.format(sortkey)) elif len(sortkey) == 1: out.write('SORTKEY({})\n'.format(sortkey[0])) else: out.write('COMPOUND SORTKEY({})\n'.format(', '.join(sortkey))) return out.getvalue()
383cdc8ed13fbaa45adadec26f31ad0f5ac52fbc
2,519
def gradient_descent_update(x, gradx, learning_rate): """ Performs a gradient descent update. """ # Return the new value for x return x - learning_rate * gradx
db5ec512883352f473990eca124c8ad302ec3564
2,520
def next_line(grd_file): """ next_line Function returns the next line in the file that is not a blank line, unless the line is '', which is a typical EOF marker. """ done = False while not done: line = grd_file.readline() if line == '': return line, False elif line.strip(): return line, True
337f188930a03142bae59cdb378b09f1ac5e2ecb
2,522
from pathlib import Path import hashlib def file_md5_is_valid(fasta_file: Path, checksum: str) -> bool: """ Checks if the FASTA file matches the MD5 checksum argument. Returns True if it matches and False otherwise. :param fasta_file: Path object for the FASTA file. :param checksum: MD5 checksum string. :return: boolean indicating if the file validates. """ md5_hash = hashlib.md5() with fasta_file.open(mode="rb") as fh: # Read in small chunks to avoid memory overflow with large files. while chunk := fh.read(8192): md5_hash.update(chunk) return md5_hash.hexdigest() == checksum
ec400afbe29d940d0638a581da7f2ee001b9e985
2,523
def combine_to_int(values): """Combine several byte values to an integer""" multibyte_value = 0 for byte_id, byte in enumerate(values): multibyte_value += 2**(4 * byte_id) * byte return multibyte_value
58ff7cbee356cdcbe5b26e973de16c5b1cc40afc
2,524
import requests from bs4 import BeautifulSoup def get_soup(page_url): """ Returns BeautifulSoup object of the url provided """ try: req = requests.get(page_url) except Exception: print('Failed to establish a connection with the website') return if req.status_code == 404: print('Page not found') return content = req.content soup = BeautifulSoup(content, 'html.parser') return soup
d837e3b6aa6184285857428b2c796172379f3a1f
2,527
def foreign_key_constraint_sql(table): """Return the SQL to add foreign key constraints to a given table""" sql = '' fk_names = list(table.foreign_keys.keys()) for fk_name in sorted(fk_names): foreign_key = table.foreign_keys[fk_name] sql += "FOREIGN KEY({fn}) REFERENCES {tn}({kc}), ".format( fn=foreign_key.from_col, tn=foreign_key.to_table.name, kc=foreign_key.to_col ) return sql
0883050d2b9d302ab9099ef27abd400e4d4fe69e
2,528
from pathlib import Path def get_world_paths() -> list: """ Returns a list of paths to the worlds on the server. """ server_dir = Path(__file__).resolve().parents[1] world_paths = [] for p in server_dir.iterdir(): if p.is_dir and (p / "level.dat").is_file(): world_paths.append(p.absolute()) return world_paths
bf1c23c6a1c928dc66470db2e11b49ad2fc9e5d9
2,529
import hmac import hashlib def is_valid_webhook_request(webhook_token: str, request_body: str, webhook_signature_header: str) -> bool: """This method verifies that requests to your Webhook URL are genuine and from Buycoins. Args: webhook_token: your webhook token request_body: the body of the request webhook_signature_header: the X-Webhook-Signature header from BuyCoins Returns: a Boolean stating whether the request is valid or not """ hmac_request_body = hmac.new(webhook_token.encode(), request_body.encode(), hashlib.sha1) return hmac.compare_digest(hmac_request_body.hexdigest(), webhook_signature_header)
1ce1ef0a9e1386ebbea7773d8cd9d40df2544792
2,530
def _preprocess_stored_query(query_text, config): """Inject some default code into each stored query.""" ws_id_text = " LET ws_ids = @ws_ids " if 'ws_ids' in query_text else "" return '\n'.join([ config.get('query_prefix', ''), ws_id_text, query_text ])
bc63391724773cd4a60f3dc9686d243d6d733b40
2,532
def print_scale(skill, points): """Return TeX lines for a skill scale.""" lines = ['\\cvskill{'] lines[0] += skill lines[0] += '}{' lines[0] += str(points) lines[0] += '}\n' return lines
c88de0c6db9e7b92dbcee025f42f56817a4aa033
2,536
def _ontology_value(curie): """Get the id component of the curie, 0000001 from CL:0000001 for example.""" return curie.split(":")[1]
7ef1f0874e698c498ccef16294c0469f67cd5233
2,538
def alias(*alias): """Select a (list of) alias(es).""" valias = [t for t in alias] return {"alias": valias}
b2ff51f33b601468b1ba4d371bd5abd6d013a188
2,549
import json def read_json_info(fname): """ Parse info from the video information file. Returns: Dictionary containing information on podcast episode. """ with open(fname) as fin: return json.load(fin)
1eed945ce2917cbca1fb807a807ab57229622374
2,550
def add_ending_slash(directory: str) -> str: """add_ending_slash function Args: directory (str): directory that you want to add ending slash Returns: str: directory name with slash at the end Examples: >>> add_ending_slash("./data") "./data/" """ if directory[-1] != "/": directory = directory + "/" return directory
2062a55b59707dd48e5ae56d8d094c806d8a2c1d
2,563
import re def extractCompositeFigureStrings(latexString): """ Returns a list of latex figures as strings stripping out captions. """ # extract figures figureStrings = re.findall(r"\\begin{figure}.*?\\end{figure}", latexString, re.S) # filter composite figures only and remove captions (preserving captions in subfigures) figureStrings = [ re.findall(r"\\begin{figure}.*(?=\n.*\\caption)", figureString, re.S)[0] + "\n\\end{figure}" for figureString in figureStrings if "\\begin{subfigure}" in figureString ] return figureStrings
83a80c91890d13a6a0247745835e1ffb97d579f7
2,565
import re def BCA_formula_from_str(BCA_str): """ Get chemical formula string from BCA string Args: BCA_str: BCA ratio string (e.g. 'B3C1A1') """ if len(BCA_str)==6 and BCA_str[:3]=='BCA': # format: BCAxyz. suitable for single-digit integer x,y,z funits = BCA_str[-3:] else: # format: BxCyAz. suitable for multi-digit or non-integer x,y,z funits = re.split('[BCA]',BCA_str) funits = [u for u in funits if len(u) > 0] funits components = ['BaO','CaO','Al2O3'] formula = ''.join([f'({c}){n}' for c,n in zip(components, funits)]) return formula
36375e62d70995628e253ba68ba8b777eb88d728
2,570
def first_item(iterable, default=None): """ Returns the first item of given iterable. Parameters ---------- iterable : iterable Iterable default : object Default value if the iterable is empty. Returns ------- object First iterable item. """ if not iterable: return default for item in iterable: return item
f5ebbaea7cf4152382fb4b2854f68a3320d21fdc
2,577
def rank(value_to_be_ranked, value_providing_rank): """ Returns the rank of ``value_to_be_ranked`` in set of values, ``values``. Works even if ``values`` is a non-orderable collection (e.g., a set). A binary search would be an optimized way of doing this if we can constrain ``values`` to be an ordered collection. """ num_lesser = [v for v in value_providing_rank if v < value_to_be_ranked] return len(num_lesser)
18c2009eb59b62a2a3c63c69d55f84a6f51e5953
2,579
def get_specific_pos_value(img, pos): """ Parameters ---------- img : ndarray image data. pos : list pos[0] is horizontal coordinate, pos[1] is verical coordinate. """ return img[pos[1], pos[0]]
3929b29fa307a7e8b5282783c16639cacb2ab805
2,583
import re def mrefresh_to_relurl(content): """Get a relative url from the contents of a metarefresh tag""" urlstart = re.compile('.*URL=') _, url = content.split(';') url = urlstart.sub('', url) return url
90cc3dbace5d4b001698612f9263309fa95aac8b
2,584
import logging def get_previous_version(versions: dict, app: str) -> str: """Looks in the app's .version_history to retrieve the prior version""" try: with open(f"{app}/.version_history", "r") as fh: lines = [line.strip() for line in fh] except FileNotFoundError: logging.warning(f"No .version_history for {app}") return "" if versions[app] != lines[-1]: logging.warning( f"Mismatch in data:\n\tCurrent version is {versions[app]}" f" but most recent line in .version_history is {lines[-1]}" ) return "" elif len(lines) < 2: logging.warning("No prior version recorded") return "" return lines[-2]
d3a4aec5c3bc842181aa3901971774761866c3e5
2,585
import requests def getSBMLFromBiomodelsURN(urn): """ Get SBML string from given BioModels URN. Searches for a BioModels identifier in the given urn and retrieves the SBML from biomodels. For example: urn:miriam:biomodels.db:BIOMD0000000003.xml Handles redirects of the download page. :param urn: :return: SBML string for given model urn """ if ":" not in urn: raise ValueError("The URN", urn, "is not in the correct format: it must be divided by colons in a format such as 'urn:miriam:biomodels.db:BIOMD0000000003.xml'.") core = urn.split(":")[-1].split(".")[0] url = "https://www.ebi.ac.uk/biomodels/model/download/" + core + "?filename="+ core + "_url.xml" response = requests.get(url, allow_redirects=True) response.raise_for_status() sbml = response.content # bytes array in py3 try: sbml_str = str(sbml.decode("utf-8")) except: sbml_str = str(sbml) return sbml_str
9a28f4a0619ebed6f9e272d84331482442ae9fb8
2,588
import string def list_zero_alphabet() -> list: """Build a list: 0, a, b, c etc.""" score_dirs = ['0'] for char in string.ascii_lowercase: score_dirs.append(char) return score_dirs
6cd9fc9e93257dcc7729235ac3cffa01dbd80c95
2,598
def dim_axis_label(dimensions, separator=', '): """ Returns an axis label for one or more dimensions. """ if not isinstance(dimensions, list): dimensions = [dimensions] return separator.join([d.pprint_label for d in dimensions])
f03e4eb02fc57890421bdcdaa0aea7d6541b8678
2,599
def _is_camel_case_ab(s, index): """Determine if the index is at 'aB', which is the start of a camel token. For example, with 'workAt', this function detects 'kA'.""" return index >= 1 and s[index - 1].islower() and s[index].isupper()
c21ec7d8aa7e786d1ea523106af6f9426fea01d8
2,600
def rgb2hex(rgb: tuple) -> str: """ Converts RGB tuple format to HEX string :param rgb: :return: hex string """ return '#%02x%02x%02x' % rgb
1ecb1ca68fa3dbe7b58f74c2e50f76175e9a0c5a
2,601
def unix_to_windows_path(path_to_convert, drive_letter='C'): """ For a string representing a POSIX compatible path (usually starting with either '~' or '/'), returns a string representing an equivalent Windows compatible path together with a drive letter. Parameters ---------- path_to_convert : string A string representing a POSIX path drive_letter : string (Default : 'C') A single character string representing the desired drive letter Returns ------- string A string representing a Windows compatible path. """ if path_to_convert.startswith('~'): path_to_convert = path_to_convert[1:] if path_to_convert.startswith('/'): path_to_convert = path_to_convert[1:] path_to_convert = '{}{}{}'.format(drive_letter, ':\\', path_to_convert).replace('/', '\\') return path_to_convert
d3c23e2c19be4b81be135ae84760430be852da41
2,603
def flatten(iterable): """ Unpacks nested iterables into the root `iterable`. Examples: ```python from flashback.iterating import flatten for item in flatten(["a", ["b", ["c", "d"]], "e"]): print(item) #=> "a" #=> "b" #=> "c" #=> "d" #=> "e" assert flatten([1, {2, 3}, (4,), range(5, 6)]) == (1, 2, 3, 4, 5) ``` Params: iterable (Iterable<Any>): the iterable to flatten Returns: tuple<Any>: the flattened iterable """ items = [] for item in iterable: if isinstance(item, (list, tuple, set, frozenset, range)): for nested_item in flatten(item): items.append(nested_item) else: items.append(item) return tuple(items)
8c47de3255906fb114a13ecfec4bf4a1204a0dfd
2,604
from pathlib import Path def _path_to_str(var): """Make sure var is a string or Path, return string representation.""" if not isinstance(var, (Path, str)): raise ValueError("All path parameters must be either strings or " "pathlib.Path objects. Found type %s." % type(var)) else: return str(var)
c5ae3ed06be31de3220b5400966866ccda29b9fc
2,607
def has_1080p(manifest): """Return True if any of the video tracks in manifest have a 1080p profile available, else False""" return any(video['width'] >= 1920 for video in manifest['videoTracks'][0]['downloadables'])
f187ff7fd8f304c0cfe600c4bed8e809c4c5e105
2,612
def ms(val): """ Turn a float value into milliseconds as an integer. """ return int(val * 1000)
97f7d736ead998014a2026a430bf3f0c54042010
2,619
import torch def compute_rays_length(rays_d): """Compute ray length. Args: rays_d: [R, 3] float tensor. Ray directions. Returns: rays_length: [R, 1] float tensor. Ray lengths. """ rays_length = torch.norm(rays_d, dim=-1, keepdim=True) # [N_rays, 1] return rays_length
9b43f9ea79708a690282a04eec65dbabf4a7ae36
2,623
import itertools def _repeat_elements(arr, n): """ Repeats the elements int the input array, e.g. [1, 2, 3] -> [1, 1, 1, 2, 2, 2, 3, 3, 3] """ ret = list(itertools.chain(*[list(itertools.repeat(elem, n)) for elem in arr])) return ret
95cf8ebb75505d2704cf957cdd709b8fa735973a
2,624
def atlas_slice(atlas, slice_number): """ A function that pulls the data for a specific atlas slice. Parameters ---------- atlas: nrrd Atlas segmentation file that has a stack of slices. slice_number: int The number in the slice that corresponds to the fixed image for registration. Returns ------- sagittal: array Sagittal view being pulled from the atlas. coronal: array Coronal view being pulled from the atlas. horizontal: arrary Horizontal view being pulled from the atlas. """ epi_img_data2 = atlas.get_fdata() sagittal = epi_img_data2[140, :, :] coronal = epi_img_data2[:, slice_number, :] horizontal = epi_img_data2[:, :, 100] return sagittal, coronal, horizontal
bafe5d886568203792b0f6178302f3ca5d536e5b
2,627
from typing import Dict import aiohttp async def head(url: str) -> Dict: """Fetch headers returned http GET request. :param str url: The URL to perform the GET request for. :rtype: dict :returns: dictionary of lowercase headers """ async with aiohttp.request("HEAD", url) as res: response_headers = res.headers return {k.lower(): v for k, v in response_headers.items()}
b4decbfb4e92863c07c5202e2c884c02e590943f
2,629
def determine_if_pb_should_be_filtered(row, min_junc_after_stop_codon): """PB should be filtered if NMD, a truncation, or protein classification is not likely protein coding (intergenic, antisense, fusion,...) Args: row (pandas Series): protein classification row min_junc_after_stop_codon (int): mininum number of junctions after stop codon a protein can have. used in NMD determination Returns: int: 1 if should be filtered, 0 if should not be filtered """ # filter out pbs that are artifacts or noncoding pclass = str(row['protein_classification']) num_junc_after_stop_codon = int(row['num_junc_after_stop_codon']) pclass_base_to_keep = ['pFSM','pNIC'] pclass_base = str(row['protein_classification_base']) if pclass_base not in pclass_base_to_keep and num_junc_after_stop_codon > min_junc_after_stop_codon: return 1 elif 'trunc' in pclass: return 1 elif 'intergenic' in pclass: return 1 elif 'antisense' in pclass: return 1 elif 'fusion' in pclass: return 1 elif 'orphan' in pclass: return 1 elif 'genic' in pclass: return 1 return 0
29ab7ce53ac7569c4d8a29e8e8564eab33b3f545
2,631
def project_to_2D(xyz): """Projection to (0, X, Z) plane.""" return xyz[0], xyz[2]
c6cdb8bd6dce65f6ce39b14b9e56622832f35752
2,634
def transform_to_dict(closest_list: list) -> dict: """ Returns dict {(latitude, longitude): {film1, film2, ...}, ...} from closest_list [[film1, (latitude, longitude)], ...], where film1, film2 are titles of films, (latitude, longitude) is a coordinates of a place where those films were shoot. >>> transform_to_dict([["film1", (49, 24)]]) {(49, 24): {'film1'}} """ closest_dict = {} for film, coord in closest_list: if coord in closest_dict: closest_dict[coord].add(film) else: closest_dict[coord] = {film} return closest_dict
e7c6fae73792a828d85db03e794bfb69c7b1fe87
2,641
import signal def _signal_exit_code(signum: signal.Signals) -> int: """ Return the exit code corresponding to a received signal. Conventionally, when a program exits due to a signal its exit code is 128 plus the signal number. """ return 128 + int(signum)
050eee98632216fddcbd71e4eb6b0c973f6d4144
2,645
def is_contained(target, keys): """Check is the target json object contained specified keys :param target: target json object :param keys: keys :return: True if all of keys contained or False if anyone is not contained Invalid parameters is always return False. """ if not target or not keys: return False # if keys is just a string convert it to a list if type(keys) == str: keys = [keys] # traverse the list to check json object # if key does not exist or value is None then return False try: for key in keys: if target[key] is None: return False except KeyError: return False # All seems to be going well return True
948196d4b470788199506bd7768e03554fa67b40
2,646
def map(x, in_min, in_max, out_min, out_max): """ Map a value from one range to another :param in_min: minimum of input range :param in_max: maximum of input range :param out_min: minimum of output range :param out_max: maximum of output range :return: The value scaled to the new range :rtype: int """ return int((x-in_min) * (out_max-out_min) / (in_max-in_min) + out_min)
4117af35b0061df1fd271306accf198692442dac
2,647
import itertools def node_extractor(dataframe, *columns): """ Extracts the set of nodes from a given dataframe. :param dataframe: dataframe from which to extract the node list :param columns: list of column names that contain nodes :return: list of all unique nodes that appear in the provided dataset """ data_list = [dataframe[column].unique().tolist() for column in columns] return list(set(itertools.chain.from_iterable(data_list)))
7a4ab889257a0f2c5ddfe18e65d0a7f5f35d8d98
2,651
def _prepare_memoization_key(args, kwargs): """ Make a tuple of arguments which can be used as a key for a memoized function's lookup_table. If some object can't be hashed then used its __repr__ instead. """ key_list = [] for arg in args: try: hash(arg) key_list.append(arg) except: key_list.append(repr(arg)) for (k, v) in kwargs.items(): try: hash(k) hash(v) key_list.append((k, v)) except: key_list.append((repr(k), repr(v))) return tuple(key_list)
c83e08c42886ba0e7f6e4defe5bc8f53f5682657
2,655
def by_tag(articles_by_tag, tag): """ Filter a list of (tag, articles) to list of articles by tag""" for a in articles_by_tag: if a[0].slug == tag: return a[1]
642472a89cb624ed02a6e8ec488b72856ac231a9
2,658
def dp_port_id(switch: str, port: str) -> str: """ Return a unique id of a DP switch port based on switch name and port name :param switch: :param port: :return: """ return 'port+' + switch + ':' + port
479891e41b51114744dcbb2b177180c19cd1bfd5
2,659
def tuple_list_to_lua(tuple_list): """Given a list of tuples, return a lua table of tables""" def table(it): return "{" + ",".join(map(str, it)) + "}" return table(table(t) for t in tuple_list)
71ec1a29f5e23b8bf82867617fe157fbba4a2332
2,664
def fancy_vector(v): """ Returns a given 3-vector or array in a cute way on the shell, if you use 'print' on the return value. """ return "\n / %5.2F \\\n" % (v[0]) + \ " | %5.2F |\n" % (v[1]) + \ " \\ %5.2F /\n" % (v[2])
2340f22aa87da00abad30b9946c374f34b38496d
2,665
def any_of(elements): """ Check to see if the argument is contained in a list of possible elements. :param elements: The elements to check the argument against in the predicate. :return: A predicate to check if the argument is a constituent element. """ def predicate(argument): return argument in elements return predicate
adacf8fd632d25452d22dab0a8a439021083ec83
2,666
def find_year(films_lst: list, year: int): """ Filter list of films by given year """ filtered_films_lst = [line for line in films_lst if line[1] == str(year)] return filtered_films_lst
f4c11e09e76831afcf49154234dd57044536bce1
2,667
def cal_occurence(correspoding_text_number_list): """ calcualte each occurence of a number in a list """ di = dict() for i in correspoding_text_number_list: i = str(i) s = di.get(i, 0) if s == 0: di[i] = 1 else: di[i] = di[i] + 1 return di
aafabc6abdf4bf1df1b8d9e23a4af375df3ac75b
2,669
def ConvertVolumeSizeString(volume_size_gb): """Converts the volume size defined in the schema to an int.""" volume_sizes = { "500 GB (128 GB PD SSD x 4)": 500, "1000 GB (256 GB PD SSD x 4)": 1000, } return volume_sizes[volume_size_gb]
b1f90e5ded4d543d88c4f129ea6ac03aeda0c04d
2,671
def get_snps(x: str) -> tuple: """Parse a SNP line and return name, chromsome, position.""" snp, loc = x.split(' ') chrom, position = loc.strip('()').split(':') return snp, chrom, int(position)
52672c550c914d70033ab45fd582fb9e0f97f023
2,672
def get_upper_parentwidget(widget, parent_position: int): """This function replaces this: self.parentWidget().parentWidget().parentWidget() with this: get_upper_parentwidget(self, 3) :param widget: QWidget :param parent_position: Which parent :return: Wanted parent widget """ while parent_position > 0: widget = widget.parentWidget() parent_position -= 1 else: return widget
ff010f3d9e000cfa3c58160e150c858490f2412d
2,676
def add(n): """Add 1.""" return n + 1
c62cee4660540ae62b5b73369bdeb56ccb0088d6
2,679
def sortkey(d): """Split d on "_", reverse and return as a tuple.""" parts=d.split("_") parts.reverse() return tuple(parts)
1d8f8864a3d0bfd7dae8711bca183317e0f3fc0e
2,683
def first_n(m: dict, n: int): """Return first n items of dict""" return {k: m[k] for k in list(m.keys())[:n]}
57ccc9f8913c60c592b38211900fe8d28feffb4c
2,684
import pickle def save_calib(filename, calib_params): """ Saves calibration parameters as '.pkl' file. Parameters ---------- filename : str Path to save file, must be '.pkl' extension calib_params : dict Calibration parameters to save Returns ------- saved : bool Saved successfully. """ if type(calib_params) != dict: raise TypeError("calib_params must be 'dict'") output = open(filename, 'wb') try: pickle.dump(calib_params, output) except: raise IOError("filename must be '.pkl' extension") output.close() saved = True return saved
6735c8a6e96158b9fc580b6e61609b5ae7733fe0
2,685
def create_P(P_δ, P_ζ, P_ι): """ Combine `P_δ`, `P_ζ` and `P_ι` into a single matrix. Parameters ---------- P_δ : ndarray(float, ndim=1) Probability distribution over the values of δ. P_ζ : ndarray(float, ndim=2) Markov transition matrix for ζ. P_ι : ndarray(float, ndim=1) Probability distribution over the values of ι. Returns ---------- P : ndarray(float, ndim=3) Joint probability distribution over the values of δ, ζ and ι. Probabilities vary by δ on the first axis, by ζ on the second axis, and by ι on the third axis. """ P = \ P_δ[:, None, None, None] * P_ζ[None, :, :, None] * \ P_ι[None, None, None, :] return P
0afdef50c50563421bb7c6f3f928fa6b3e5f4733
2,687
import typing def median(vals: typing.List[float]) -> float: """Calculate median value of `vals` Arguments: vals {typing.List[float]} -- list of values Returns: float -- median value """ index = int(len(vals) / 2) - 1 return sorted(vals)[index]
9f840d11409a570a718fdfe56d7a282af43bc798
2,688
import networkx def nx_find_connected_limited(graph, start_set, end_set, max_depth=3): """Return the neurons in end_set reachable from start_set with limited depth.""" reverse_graph = graph.reverse() reachable = [] for e in end_set: preorder_nodes = list( ( networkx.algorithms.traversal.depth_first_search.dfs_preorder_nodes( reverse_graph, source=e, depth_limit=max_depth ) ) ) for s in start_set: if s in preorder_nodes: reachable.append(e) break return reachable
4322f4231be73b575d05442f09608c71c3b9f605
2,701
def hexbyte_2integer_normalizer(first_int_byte, second_int_btye): """Function to normalize integer bytes to a single byte Transform two integer bytes to their hex byte values and normalize their values to a single integer Parameters __________ first_int_byte, second_int_byte : int integer values to normalize (0 to 255) Returns _______ integer: int Single normalized integer """ first_hex = f'{hex(first_int_byte)}'.lstrip('0x') second_hex = f'{hex(second_int_btye)}'.lstrip('0x') first_hex = first_hex if len(f'{first_hex}') == 2 else f'0{first_hex}' second_hex = second_hex if len(f'{second_hex}') == 2 else f'0{second_hex}' hex_string = f'{first_hex}{second_hex}' integer = int(hex_string, 16) return integer
a3bbe75014b6e08607314b615440039bab245f04
2,702
def get_correct_line(df_decisions): """ The passed df has repeated lines for the same file (same chemin_source). We take the most recent one. :param df_decisions: Dataframe of decisions :return: Dataframe without repeated lines (according to the chemin_source column) """ return df_decisions.sort_values('timestamp_modification').drop_duplicates('chemin_source', keep='last')
989f1aba1c5e0c61f8b7ca1c883baf4dd181ebbc
2,704
def get_service(vm, port): """Return the service for a given port.""" for service in vm.get('suppliedServices', []): if service['portRange'] == port: return service
d617771c25c69ee874b0bc64adcc735aa876f929
2,707
def _project(doc, projection): """Return new doc with items filtered according to projection.""" def _include_key(key, projection): for k, v in projection.items(): if key == k: if v == 0: return False elif v == 1: return True else: raise ValueError('Projection value must be 0 or 1.') if projection and key != '_id': return False return True return {k: v for k, v in doc.items() if _include_key(k, projection)}
0f2cd190e73b39ceeec0f850054baab1dd357587
2,708
import requests import json def folder0_content(folder0_id, host, token): """ Modules ------- request, json ---------- Parameters ---------- folder0_id : Onedata folder level 0 id containing the data to publish. host : OneData provider (e.g., ceta-ciemat-02.datahub.egi.eu). token : OneData personal access token. ------- Returns ------- all_level0: "name" and "id" of the folders contained in the folder defined by "folder0_id" """ OneData_urlchildren = "https://" + host + '/api/v3/oneprovider/data/' + folder0_id + "/children" request_param = {'X-Auth-Token': token} r_level0 = requests.get(OneData_urlchildren, headers=request_param) all_level0 = json.loads(r_level0.text) return (all_level0)
8ce6ae617666f936643b9599ae115e140b30bd2b
2,713
import requests import logging def odata_getone(url, headers): """ Get a single object from Odata """ r = requests.get(url, headers=headers) if not r.ok: logging.warning(f"Fetch url {url} hit {r.status_code}") return None rjson = r.json() if 'error' in rjson: logging.warning(f"Fetching of {url} returned error {r.text}") return None return rjson
5d6c668845132d821f175a2e8c1a924492a9eb2f
2,727
from datetime import datetime import pytz def isotime(timestamp): """ISO 8601 formatted date in UTC from unix timestamp""" return datetime.fromtimestamp(timestamp, pytz.utc).isoformat()
f6a922d75a186e26f158edc585691e31bf430b01
2,738
def _get_index_sort_str(env, name): """ Returns a string by which an object with the given name shall be sorted in indices. """ ignored_prefixes = env.config.cmake_index_common_prefix for prefix in ignored_prefixes: if name.startswith(prefix) and name != prefix: return name[len(prefix):] return name
cdf7a509ef8f49ff15cac779e37f0bc5ab98c613
2,740
import requests def tmdb_find_movie(movie: str, tmdb_api_token: str): """ Search the tmdb api for movies by title Args: movie (str): the title of a movie tmdb_api_token (str): your tmdb v3 api token Returns: dict """ url = 'https://api.themoviedb.org/3/search/movie?' params = {'query': movie, 'language': 'en-US', 'api_key': tmdb_api_token, } return requests.get(url, params).json()
ea676fbb91f451b20ce4cd2f7258240ace3925b3
2,742
def errorString(node, error): """ Format error messages for node errors returned by checkLinkoStructure. inputs: node - the node for the error. error - a (backset, foreset) tuple, where backset is the set of missing backlinks and foreset is the set of missing forelinks. returns: string string - the error string message. """ back, fore = error[0], error[1] if len(back) == 0: back = 'None' if len(fore) == 0: fore = 'None' return ('Node {0}: missing backlinks {1},' ' missing forelinks {2}').format(node, back, fore)
df87b7838ed84fe4e6b95002357f616c96d04ad0
2,745
def _Backward3a_T_Ps(P, s): """Backward equation for region 3a, T=f(P,s) Parameters ---------- P : float Pressure [MPa] s : float Specific entropy [kJ/kgK] Returns ------- T : float Temperature [K] References ---------- IAPWS, Revised Supplementary Release on Backward Equations for the Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 6 Examples -------- >>> _Backward3a_T_Ps(20,3.8) 628.2959869 >>> _Backward3a_T_Ps(100,4) 705.6880237 """ I = [-12, -12, -10, -10, -10, -10, -8, -8, -8, -8, -6, -6, -6, -5, -5, -5, -4, -4, -4, -2, -2, -1, -1, 0, 0, 0, 1, 2, 2, 3, 8, 8, 10] J = [28, 32, 4, 10, 12, 14, 5, 7, 8, 28, 2, 6, 32, 0, 14, 32, 6, 10, 36, 1, 4, 1, 6, 0, 1, 4, 0, 0, 3, 2, 0, 1, 2] n = [0.150042008263875e10, -0.159397258480424e12, 0.502181140217975e-3, -0.672057767855466e2, 0.145058545404456e4, -0.823889534888890e4, -0.154852214233853, 0.112305046746695e2, -0.297000213482822e2, 0.438565132635495e11, 0.137837838635464e-2, -0.297478527157462e1, 0.971777947349413e13, -0.571527767052398e-4, 0.288307949778420e5, -0.744428289262703e14, 0.128017324848921e2, -0.368275545889071e3, 0.664768904779177e16, 0.449359251958880e-1, -0.422897836099655e1, -0.240614376434179, -0.474341365254924e1, 0.724093999126110, 0.923874349695897, 0.399043655281015e1, 0.384066651868009e-1, -0.359344365571848e-2, -0.735196448821653, 0.188367048396131, 0.141064266818704e-3, -0.257418501496337e-2, 0.123220024851555e-2] Pr = P/100 sigma = s/4.4 suma = 0 for i, j, ni in zip(I, J, n): suma += ni * (Pr+0.240)**i * (sigma-0.703)**j return 760*suma
cb0b9b55106cf771e95505c00043e5772faaef40
2,748
def format_dB(num): """ Returns a human readable string of dB. The value is divided by 10 to get first decimal digit """ num /= 10 return f'{num:3.1f} {"dB"}'
13d6313834333ee2ea432cf08470b6ce1efe1ad6
2,749
def get_fourier_col_name(k, col_name, function_name="sin", seas_name=None): """Returns column name corresponding to a particular fourier term, as returned by fourier_series_fcn :param k: int fourier term :param col_name: str column in the dataframe used to generate fourier series :param function_name: str sin or cos :param seas_name: strcols_interact appended to new column names added for fourier terms :return: str column name in DataFrame returned by fourier_series_fcn """ # patsy doesn't allow "." in formula term. Replace "." with "_" rather than quoting "Q()" all fourier terms name = f"{function_name}{k:.0f}_{col_name}" if seas_name is not None: name = f"{name}_{seas_name}" return name
5c15b52728d0333c9c7df59030d6ead66473c823
2,758
def build_binary_value(char_str, bits, alphabet) -> str: """ This method converts a string char_str into binary, using n bits per character and decoding from the supplied alphabet or from ASCII when bits=7 This is almost the inverse method to build_string in the decompress module. :param char_str: string. :param bits: number of bits per character. :param alphabet: Alphabet. :return: binary value. """ if bits == 7: indices = [ord(char_) for char_ in char_str] else: indices = [alphabet.index(char_) for char_ in char_str] binary_char_list = ["{0:b}".format(index).zfill(bits) for index in indices] return ''.join(binary_char_list)
50830dd5cfa3f5428b0946e7382220f9b5ff1915
2,761
def irange(start, end): """Inclusive range from start to end (vs. Python insanity.) irange(1,5) -> 1, 2, 3, 4, 5""" return range( start, end + 1 )
91d4c270b1d9304b4ee82c0cb16aee5d518db3d5
2,763
import unicodedata def sanitize_str(value: str) -> str: """Removes Unicode control (Cc) characters EXCEPT for tabs (\t), newlines (\n only), line separators (U+2028) and paragraph separators (U+2029).""" return "".join(ch for ch in value if unicodedata.category(ch) != 'Cc' and ch not in {'\t', '\n', '\u2028', '\u2029'})
5b5eae2b377a834e377a8bf7bcd7cefc2278c2f7
2,771
from typing import Optional def clean_pin_cite(pin_cite: Optional[str]) -> Optional[str]: """Strip spaces and commas from pin_cite, if it is not None.""" if pin_cite is None: return pin_cite return pin_cite.strip(", ")
9c495fcc4f1cf192c1358f50fef569c4d6b36290
2,773
import json def get_json_dump(json_object, indent=4, sort_keys=False): """ Short handle to get a pretty printed str from a JSON object. """ return json.dumps(json_object, indent=indent, sort_keys=sort_keys)
505548cdf972ef891b7bcc3bcd7be3347769faec
2,774
def heap_sort(arr: list): """ Heap sorting a list. Big-O: O(n log n). @see https://www.geeksforgeeks.org/heap-sort/ """ def heapify(sub: list, rdx: int, siz: int): """ Heapifying range between rdx and size ([rdx:siz]). @param sub: a slice of list. @param rdx: root/parent index to start. @param siz: size of heap. """ largest = ndx = rdx # assuming the root is the largest while ndx < siz: l_index = 2 * ndx + 1 # child index at left = 2*i + 1 r_index = 2 * ndx + 2 # child index at right = 2*i + 2 # reset largest index if left child exists and is greater than root. if l_index < siz and sub[ndx] < sub[l_index]: largest = l_index # check if right child is greater than the value at the largest index. if r_index < siz and sub[largest] < sub[r_index]: largest = r_index # change root, if needed if largest != ndx: sub[ndx], sub[largest] = sub[largest], sub[ndx] # swap ndx = largest # heapify the root. continue return pass n = len(arr) # build a max heap. parent = n // 2 - 1 # the last parent (that can have children) for i in range(parent, -1, -1): heapify(arr, i, n) # extract elements one by one. for i in range(n-1, 0, -1): arr[i], arr[0] = arr[0], arr[i] # swap heapify(arr, 0, i) return arr
9b53f3027804cab16c9850d4858377f49afe7bbf
2,775
def prompt_for_password(prompt=None): """Fake prompt function that just returns a constant string""" return 'promptpass'
49499970c7698b08f38078c557637907edef3223
2,777
def get_frame_list(video, jump_size = 6, **kwargs): """ Returns list of frame numbers including first and last frame. """ frame_numbers =\ [frame_number for frame_number in range(0, video.frame_count, jump_size)] last_frame_number = video.frame_count - 1; if frame_numbers[-1] != last_frame_number: frame_numbers.append(last_frame_number) return frame_numbers
786de04b4edf224045216de226ac61fdd42b0d7b
2,778
def obter_forca (unidade): """Esta funcao devolve a forca de ataque da unidade dada como argumento""" return unidade[2]
34fe4acac8e0e3f1964faf8e4b26fa31148cf2a6
2,783
import itertools def strip_translations_header(translations: str) -> str: """ Strip header from translations generated by ``xgettext``. Header consists of multiple lines separated from the body by an empty line. """ return "\n".join(itertools.dropwhile(len, translations.splitlines()))
b96c964502724008306d627d785224be08bddb86
2,789
def find_attachments(pattern, cursor): """Return a list of attachments that match the specified pattern. Args: pattern: The path to the attachment, as a SQLite pattern (to be passed to a LIKE clause). cursor: The Cursor object through which the SQLite queries are sent to the Zotero database. Returns: A list of (parentItemID, path) pairs that match the specified pattern. The returned list is empty if no matches are found. """ query = 'SELECT parentItemID, path FROM itemAttachments WHERE path LIKE ?' cursor.execute(query, (pattern,)) return list(cursor)
614649f6fd5972b026b191bb1a272e270dedffe5
2,795
def should_parse(config, file): """Check if file extension is in list of supported file types (can be configured from cli)""" return file.suffix and file.suffix.lower() in config.filetypes
1c2258d405ef715574b557d99cdf87e461627ffd
2,799
def flatten(x): """Flattens nested list""" if isinstance(x, list): return [a for i in x for a in flatten(i)] else: return [x]
7d348f8287dfccfbb77a52a84a5642c265381eb1
2,804
def identity(obj): """Returns the ``obj`` parameter itself :param obj: The parameter to be returned :return: ``obj`` itself >>> identity(5) 5 >>> foo = 2 >>> identity(foo) is foo True """ return obj
a3271a831d2e91fe6eebed7e80c18e7c81996da6
2,806