content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def float_one(vals): """ Return a constant floating point value of 1.0 """ return 1.0
31ce58c2629fb36fd67d84ff440adae05a823c2e
42,442
def get_public_key_location(private_key_location: str) -> str: """Returns the location of the public key based on the location of the private key. :param private_key_location: Private key location. """ return private_key_location + ".pub"
e415e3807db648a42cd40f2ade31a08979e58172
440,475
def FindActiveBundle(config): """Find the name of first active bundle in a config.""" bundle = next(b for b in config['bundles'] if b['active']) return bundle['id']
be0fb1e2cff864562390df84c07c85b6923a83fd
435,683
import re def clean_string(string): """ Clean a given string from Umlauts and unwanted characters to ensure compatibility with the file system. :param string: String that needs cleaning :type string: string :return: Cleaned string """ string = re.sub('Ä', 'Ae', string) string = re.sub('ä', 'ae', string) string = re.sub('Ö', 'Oe', string) string = re.sub('ö', 'oe', string) string = re.sub('Ü', 'Ue', string) string = re.sub('Ü', 'ue', string) string = re.sub('ß', 'ss', string) # Avoid slashes in names because they represent sub-dirs string = re.sub('/', ' ', string) return string
af92f79744e1d1497b468a2dd198b6afb9fd8685
460,349
import random def parent_idxs_choice(sorted_idxs, n_total, **optional_args): """Returns the idexes of the new parents for the next iteration. Used in Scheduler.loop() Args: sorted_idxs (list): Indexes of the sorted loss for the previous iteration. n_total (int): Length of the returned array. Returns: list: List containing the sorted indexes of the new parents for the next iteration. """ acceptance_probability = optional_args.get("accept_prob", 0.9) new_idxs = [{}] * n_total i = 0 j = 0 while i < n_total and j < len(sorted_idxs) - (n_total - i): if random.uniform(0, 1) < acceptance_probability: new_idxs[i] = sorted_idxs[j] i += 1 j += 1 while i < n_total: new_idxs[i] = sorted_idxs[j] i += 1 j += 1 return new_idxs
2224772d9586db9972fdf7dc21346e7ce370166d
498,619
import torch def gaussian_sample(mu, logvar): """ Sample from N(mu, Sigma): z ~ mu + Cholesky(Sigma(x)) * eps eps ~ N(0,I_n) The variance is restricted to be diagonal, so Cholesky(...) -> sqrt(...) Parameters ---------- mu : torch.Tensor Location parameter of Gaussian. (B, D) logvar : torch.Tensor Log of variance parameter of Gaussian. (B, D) """ sigma = torch.exp(0.5 * logvar) epsilon = torch.randn_like(sigma) return mu + sigma * epsilon
c810ef61005c639ffa39c766251f4a4f5c266bb7
189,303
import cProfile def run_profiler(func, *args, **kwargs): """Runs the cProfiler. Args: func (callable): The function to profile. *args: Variable length argument list. **kwargs: Arbitrary keyword arguments. Returns: cProfile.Profile: The profiler results. """ profile = cProfile.Profile() profile.enable() func(*args, **kwargs) profile.disable() return profile
845cc60b37233f43adb49fecdcf405b2c3de530b
339,333
def get_spark_df(spark, data, schema): """ This function returns a spark data frame :param spark: Spark session :param data: data fields :param schema: schema of the data fields :return: spark data frame """ df = spark.createDataFrame(data=data, schema=schema) return df
ee044f060f299b14dfa0242dc20f270a2223304d
388,760
import importlib def load_from_path(path): """ Load class or function from string path. """ module, attr = path.rsplit('.', 1) mod = importlib.import_module(module) return getattr(mod, attr)
6404bbb1dd77bd611dff9dae4c967c99fd5fa22a
251,578
import itertools def take_line(iterable): """ Advances the iterable to the next new line and returns the passed characters as a string. The newline character is not included. :param iterable: any iterable :return: a string of the next line. """ return ''.join(itertools.takewhile(lambda c: c != '\n', iterable))
0b89ebf5b8bbf1e87c9cd99c7d7db2e808c2970f
406,167
import pytz def dt_to_ms(dt): """Converts a datetime to a POSIX timestamp in milliseconds""" if dt.tzinfo is None: dt = dt.replace(tzinfo=pytz.UTC) return int(dt.timestamp() * 1000)
aa3050ce15e09b9c1ddeb1edbda8c6e4275f3ce6
18,693
def replace_segment(seq, start, end, replacement): """Return the sequence with ``seq[start:end]`` replaced by ``replacement``.""" return seq[:start] + replacement + seq[end:]
a19b052924f2dbef3fc44a109ab14b4cc07048d8
376,569
def np_to_xyz( geom, xyz2, ): """ Convert from xyz array to xyz file format in order to write xyz Params: geom ((natoms,4) np.ndarray) - system reference geometry (atom symbol, x,y,z) from xyz file xyz2 ((natoms,3) np.ndarray) - system geometry (x,y,z) Returns: geom2 ((natoms,4) np.ndarray) - new system geometry (atom symbol, x,y,z) """ geom2 = [] for A, atom in enumerate(geom): geom2.append(( atom[0], xyz2[A,0], xyz2[A,1], xyz2[A,2], )) return geom2
253e31df4042f4957dbec1e23387261dfb7876b3
517,470
import torch def expand_as_one_hot(input_, C, ignore_label=None): """ Converts NxSPATIAL label image to NxCxSPATIAL, where each label gets converted to its corresponding one-hot vector. NOTE: make sure that the input_ contains consecutive numbers starting from 0, otherwise the scatter_ function won't work. SPATIAL = DxHxW in case of 3D or SPATIAL = HxW in case of 2D :param input_: 3D or 4D label image (NxSPATIAL) :param C: number of channels/labels :param ignore_label: ignore index to be kept during the expansion :return: 4D or 5D output image (NxCxSPATIAL) """ assert input_.dim() in (3, 4), f"Unsupported input shape {input_.shape}" # expand the input_ tensor to Nx1xSPATIAL before scattering input_ = input_.unsqueeze(1) # create result tensor shape (NxCxSPATIAL) output_shape = list(input_.size()) output_shape[1] = C if ignore_label is not None: # create ignore_label mask for the result mask = input_.expand(output_shape) == ignore_label # clone the src tensor and zero out ignore_label in the input_ input_ = input_.clone() input_[input_ == ignore_label] = 0 # scatter to get the one-hot tensor result = torch.zeros(output_shape).to(input_.device).scatter_(1, input_, 1) # bring back the ignore_label in the result result[mask] = ignore_label return result else: # scatter to get the one-hot tensor return torch.zeros(output_shape).to(input_.device).scatter_(1, input_, 1)
5ced4e82c37f1d803a4b92eb9e089a0d90e7226e
119,129
def is_triangle(side_a, side_b, side_c): """Returns True if the input satisifes the sides of a triangle""" return side_a + side_b > side_c and side_a + side_c > side_b and side_b + side_c > side_a
1eb22261edb13c734430e3ef8207bd256436d1c5
628,916
def sipi(b3, b4, b8): """ Structure Intensive Pigment Index \ (Peñuelas, Baret and Filella, 1995). .. math:: SIPI = b3/b8 - b4 :param b3: Green. :type b3: numpy.ndarray or float :param b4: Red. :type b4: numpy.ndarray or float :param b8: NIR. :type b8: numpy.ndarray or float :returns SIPI: Index value .. Tip:: Peñuelas, J., Baret, F., Filella, I. 1995. Semi-empirical \ indices to assess carotenoids/chlorophyll-a ratio from leaf \ spectral reflectance. Photosynthetica 31, 221-230. """ SIPI = b3/b8 - b4 return SIPI
b0a7181970e9165d0e75ab7319646fd6bd1c6bbd
695,986
def partition(lis, predicate): """ Splits a list into two lists based on a predicate. The first list will contain all elements of the provided list where predicate is true, and the second list will contain the rest """ as_list = list(lis) true_list = [] false_list = [] for l in as_list: pred_value = predicate(l) if pred_value is True: true_list.append(l) elif pred_value is False: false_list.append(l) else: raise Exception("Invalid predicate") return true_list, false_list
0d1cdb7e410ccce46c02209df83d16d4f9823a2d
48,468
def plot_line(m, line, colour='b', lw=1, alpha=1): """ Plots a line given a line with lon,lat coordinates. Note: This means you probably have to call shapely `transform` on your line before passing it to this function. There is a helper partial function in utils called `utm2lola` which makes this easy. Args: m (Basemap): A matplotlib Basemap. line (shape): A shapely geometry. colour (str): A colour from the matplotlib dictionary. Returns: list: A list of matplotlib lines. """ lo, la = line.xy x, y = m(lo, la) return m.plot(x, y, color=colour, linewidth=lw, alpha=alpha, solid_capstyle='round')
4cf04142b7205116ff52c93ab32825a4874a28db
682,877
import logging import re def get_landsat_angles(productdir): """ Get Landsat angle bands file path. Parameters: productdir (str): path to directory containing angle bands. Returns: sz_path, sa_path, vz_path, va_path: file paths to solar zenith, solar azimuth, view (sensor) zenith and vier (sensor) azimuth. """ img_list = list(productdir.glob('**/*.tif')) logging.info('Load Landsat Angles') pattern = re.compile('.*_solar_zenith_.*') sz_path = list(item for item in img_list if pattern.match(str(item)))[0] pattern = re.compile('.*_solar_azimuth_.*') sa_path = list(item for item in img_list if pattern.match(str(item)))[0] pattern = re.compile('.*_sensor_zenith_.*') vz_path = list(item for item in img_list if pattern.match(str(item)))[0] pattern = re.compile('.*_sensor_azimuth_.*') va_path = list(item for item in img_list if pattern.match(str(item)))[0] return sz_path, sa_path, vz_path, va_path
8b22cb6fc1ea8ae3f3869664aff2f165c418f20c
24,364
def _get_subword_units(token, gram): """Return subword-units presentation, given a word/token. """ if token == '</s>': # special token for padding purpose. return [token] t = '#' + token + '#' return [t[i:i + gram] for i in range(0, len(t) - gram + 1)]
6790aeb5fdadc082bc472ef4d8dd149a61723a9c
290,126
from typing import Iterable from typing import Tuple from typing import Any from typing import Dict def provider_dict(items: Iterable[Tuple[str, Any]]) -> Dict[str, Any]: """Prepare provider data for JSON dump""" output = {} for key, value in items: if key == "ranges": output[key] = { str(target): [str(net) for net in nets] for target, nets in value.items() } else: output[key] = value return output
0ec3170d6f198b5cc61341fb97a4baa24391a13b
281,387
def qw(s): """ Examples : >>> print(qw(“ -correlated-propagation -scalarrepl -lowerinvoke”)) (-correlated-propagation, -scalarrepl, -lowerinvoke) Args: s (str): s is a list of all the possible passes that can be used (the passes shoul dvbe separated by whitespace). Returns: Returns a tuple of strings where each element is a pass(used for optimization) from s. """ return tuple(s.split())
9e0ec379da947e6210115cc4be4265414929e5d6
115,172
def admin_display(short_description): """ A decorator that will make a method a display method for django admin. Basically adding a name to it. """ def inner(func): func.short_description = short_description return func return inner
e609f2530a1c9e736a2216a99d9142027d955a97
266,318
import itertools def sumlist(x): """ sumlist(x) Returns the running sum of a list of numbers, x. For example: sumlist([1,2,3]) would return [1,3,6]. """ try: return list(itertools.accumulate(x)) except TypeError: raise TypeError("unsupported argument type for 'sumlist'")
11250fff0b1794accd2fccf53887480b2879cfd9
506,925
def get_best_response_actions_as_string(best_response_actions): """Turns a dict<bytes, int> into a bytestring compatible with C++. i.e. the bytestring can be copy-pasted as the brace initialization for a {std::unordered_,std::,absl::flat_hash_}map<std::string, int>. Args: best_response_actions: A dict mapping bytes to ints. Returns: A bytestring that can be copy-pasted to brace-initialize a C++ std::map<std::string, T>. """ best_response_keys = sorted(best_response_actions.keys()) best_response_strings = [ "%s: %i" % (k, best_response_actions[k]) for k in best_response_keys ] return "{%s}" % (", ".join(best_response_strings))
cf2b475d6bb76d262c17dc7753f1624e38cc69f4
178
def csv_append(csv_string, item): """ Appends an item to a comma-separated string. If the comma-separated string is empty/None, just returns item. """ if csv_string: return ",".join((csv_string, item)) else: return item
38d1bde31225a8bf42156f3472a8bad99d3d9dfb
61,650
def _check_n(fields, name, dim=0): """ Check the field consistency Args: fields (list): a list of fields name (str): name to use when raising error dim (int): dimension for comparing Returns: int or None """ n_candidates = [] for i in fields: if i is not None: n_candidates.append(i.shape[dim]) if len(n_candidates) > 0: if len(list(set(n_candidates))) > 1: raise ValueError(f"{name} inconsistent") return n_candidates[0] return None
0b8086f9e88363db414be8cf2fcab6d893c2978a
333,000
import torch def _compare_fn(preds, target) -> dict: """Comparison function for map implementation. Official pycocotools results calculated from a subset of https://github.com/cocodataset/cocoapi/tree/master/results All classes Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.706 Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.901 Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.846 Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.689 Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.800 Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.701 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.592 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.716 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.716 Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.767 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.800 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.700 Class 0 Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.725 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.780 Class 1 Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.800 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.800 Class 2 Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.454 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.450 Class 3 Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = -1.000 Class 4 Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.650 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.650 Class 5 Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.900 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.900 """ return { "map": torch.Tensor([0.706]), "map_50": torch.Tensor([0.901]), "map_75": torch.Tensor([0.846]), "map_small": torch.Tensor([0.689]), "map_medium": torch.Tensor([0.800]), "map_large": torch.Tensor([0.701]), "mar_1": torch.Tensor([0.592]), "mar_10": torch.Tensor([0.716]), "mar_100": torch.Tensor([0.716]), "mar_small": torch.Tensor([0.767]), "mar_medium": torch.Tensor([0.800]), "mar_large": torch.Tensor([0.700]), "map_per_class": torch.Tensor([0.725, 0.800, 0.454, -1.000, 0.650, 0.900]), "mar_100_per_class": torch.Tensor([0.780, 0.800, 0.450, -1.000, 0.650, 0.900]), }
aa69c14e57eb3ba08765e06a4c888f2e62eadb90
406,788
import re def escape_ansi(line): """Remove colors from error message.""" ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]") return ansi_escape.sub("", line)
829c69d83776b1e0a80b5a6a2fcd76b2e054b9fa
378,456
def tests_generator(slug, names): """ Returns a string containing repetitive tests. :param str slug: Slug containing protoype test. :param list names: Names to be substituted into the slug. """ tests = "" slug = slug.strip() for name in names: tests += slug.replace("%s", name) tests += "\n\n" return tests
273fdc4c0cdc9d9ec6628d86a870c9b5d852e72f
268,806
def matching_subset(main, sub): """check that all the keys in a dictionary are in sub and agree with main Example: main = {"a": 3, "b": 4, "c": 10} sub1 = {"a": 3, "c": 10} sub2 = {"a": 3, "foo": 5} sub3 = {"a": 0, "b": 1} sub1 is a "matching subset" of main since all its keys match with main sub2 is not a "matching subset" of main since the field "foo" is not in main sub3 is not a "matching subset" since sub3["a"] = 0 but main["a"] == 3 """ # check that all keys of sub are in main main_keys = set(main) sub_keys = set(sub) if not sub_keys.issubset(main_keys): return False # check that all values of sub match with main for key in sub_keys: if main[key] != sub[key]: return False return True
a1b541b93897866a8ffeb79bd963d136ca042f5e
509,405
def format_value(text): """ Format text. Replaces newlines and carriage returns with spaces so that each line of the output is one document. """ return text.encode('utf8').replace('\n', ' ').replace('\r', ' ')
475dc34a1ad3588bf8ef103a53fe09582a25b002
178,832
def rna_id(entry): """ Get the UPI for the entry, or fail if there is none. """ if entry["DB"] == "RNAcentral": return entry["DB_Object_ID"] raise ValueError("All entries are expected to come from RNAcentral")
2990b6eb29f7d5f3e8c8e3cf8ef50d5f7ccd81f5
147,789
def build_xaxis(num, radius_increment=0.05): """Calculate the radius/diameter for the x-axis in plots.""" x_axis = { 'radius': [], 'diameter': [] } radius = 0 for _ in range(num): radius = round(radius, 2) diameter = round(radius * 2, 1) x_axis['radius'].append(radius) x_axis['diameter'].append(diameter) radius += radius_increment return x_axis
f3de8bb1abb6190350f2939ea12148b39ffe4ed8
477,504
from typing import List from typing import Sequence from typing import Tuple def get_metadata_indices(license_numbers: List[int], start: int = 0) -> Sequence[Tuple[int, int]]: """Returns the metadata indices range in the license numbers array Args: license_numbers (List[int]): [description] start (int, optional): [description]. Defaults to 0. Returns: Sequence[Tuple[int, int]]: [description] """ # handling corner cases. These cases won't be hit on recursive calls # unless the data is corrupted if license_numbers == [] or len(license_numbers) < (start + 1): return [] children_count = license_numbers[start] metadata_count = license_numbers[start + 1] # print(children_count, metadata_count) start += 2 children_metadata_indices: List[Tuple[int, int]] = [] for _ in range(children_count): metadata_span = get_metadata_indices(license_numbers, start) children_metadata_indices.extend(metadata_span) start = metadata_span[-1][1] children_metadata_indices.append((start, start + metadata_count)) return children_metadata_indices
1ad51d1d75df939732f6f7e92f77df615b31f822
562,273
import random def generate_random_sequence(length, prob_dist=[0.25, 0.25, 0.25, 0.25]): """ generates a DNA sequence given length according to the given nucleotide distribution """ seq = '' for p in range(length): n = random.random() if n < prob_dist[0]: seq += 'A' elif n < sum(prob_dist[0:2]): seq += 'C' elif n < sum(prob_dist[0:3]): seq += 'G' else: seq += 'T' return seq
35d3eb9e92f2b4cf2ac23d018c44c94c8b1629ce
598,174
def is_intel_email(email): """Checks that email is valid Intel email""" return email and len(email) > 10 and " " not in email and email.lower().endswith("@intel.com")
2eb72e4cbe61fbf9c2b4b28d39c0e4bb62d6a324
148,963
def missing_integers(l): """Find the gaps in a sequence of integers""" return sorted(set(range(min(l), max(l) + 1)) - set(l))
5322f2d618c44e6765c64092e67420859fe61f0e
192,237
def user_key2(application2): """User key for second application""" key = application2["user_key"] return key
05b241dc708344dbe8a3aa3015e7b8774e6f6cb0
497,855
def getQaMask(img): """Gets the BQA quality band as boolean layer. 1 indicates a good pixel and 0 indicates a bad pixel. Args: img: An ee.Image originating from msslib.getCol(). Returns: A image with one boolean band named BQA_mask. """ return img.select('BQA').eq(32).rename('BQA_mask')
fd8a54d4a26dfd58b3faffaa3561cca6d0e933c5
300,117
def get_comm_cost(tensor_size, coef, intercept): """Returns communication cost (in microsecs) for the given tensor size.""" return int(round(coef * tensor_size + intercept))
47e52cff6c6f22a126e1e4f67f7919903583c6eb
210,210
def encode_address(address: int) -> bytes: """Encodes an HDLC address as a one-terminated LSB varint.""" result = bytearray() while True: result += bytes([(address & 0x7f) << 1]) address >>= 7 if address == 0: break result[-1] |= 0x1 return result
ee68f5f73737146550d19c540fa1f3123d2acd45
353,337
def noneorcomplex(s): """Turn empty or 'none' to None, else evaluate to complex.""" if s.lower() in ('', 'none'): return None else: return complex(eval(s, {}, {}))
81154ef925fbcd695e90c33f3620ba8ba75b2832
475,574
def load_filepaths(filename): """Read in a list of file paths. Args: filename: A text file containing a list of file paths. Assume that each line has one file path. Returns: filepaths: A list of strings where each is a file path. """ with open(filename) as f: filepaths = [line.strip() for line in f] return filepaths
2475a28b2afaf8380502270d79bc2f756dd5a25e
628,679
import random def stat_check(stat1, stat2): """ Checks if stat1 wins over stat2 in competitive stat check. """ roll1 = random.randrange(stat1) roll2 = random.randrange(stat2) return roll1 >= roll2
64b96fad051d20c182dfbf719403590951f1b13c
681,616
from pathlib import Path from typing import Optional def secret( name: str, secrets_dir: Path = Path("/run/secrets") ) -> Optional[str]: """Load the named secret from /run/secrets and return None otherwise.""" path = secrets_dir / name if path.exists(): return path.read_text().strip() return None
47ad7381157fa469b963c7fc769611c39713044d
592,065
def _join_lines(lines): """ Simple join, except we want empty lines to still provide a newline. """ result = [] for line in lines: if not line: if result and result[-1] != '\n': result.append('\n') else: result.append(line + '\n') return ''.join(result).strip()
83735124391dde628e4eb651dc663057a0b54eea
390,859
def get_list_inverse_index(unique_ids): """Get value to position index from a list of unique ids. Args: unique_ids: A list of unique integers of strings. Returns: result: a dict from value to position Raises: TypeError: If unique_ids is not a list. """ if not isinstance(unique_ids, list): raise TypeError('unique_ids must be a list') result = dict() for i, unique_id in enumerate(unique_ids): result[unique_id] = i return result
d2ff690106271b18784208f953e4e98d7f74840f
576,201
def get_dense_network_shapes(n_layers, hidden_size, n_features, n_outputs): """ Helper function to generate the input/output shapes for the layers of a densely connected network :param n_layers: Number of hidden layers in the network :param hidden_size: How many hidden neurons to use :param n_features: Number of features in the original input :param n_outputs: Output size/number of target variables :return: """ shapes = {'input': (n_features, hidden_size), 'hidden': [], 'output': (hidden_size * (n_layers+1) + n_features, n_outputs)} for i in range(n_layers): shapes['hidden'].append((hidden_size * (i + 1) + n_features, hidden_size)) return shapes
ea5e74fcdc3fe0b923f1377e202284f0576bff87
32,112
def split_filtStr(filtStr): """ ...doctest: >>> split_filtStr('a>b;c<=d; e == f; bc=[0,1]') ['a>b', 'c<=d', 'e == f', 'bc=[0,1]'] >>> split_filtStr('a>b AND c<=d AND e == f') ['a>b', 'c<=d', 'e == f'] >>> split_filtStr('bc=[0,1]') ['bc=[0,1]'] >>> split_filtStr('zm = [3,4,5] AND length >= 1000') ['zm = [3,4,5]', 'length >= 1000'] """ if ';' in filtStr: return [s.strip() for s in filtStr.split(';')] elif ' AND ' in filtStr: return [s.strip() for s in filtStr.split(' AND ')] if ',' in filtStr and '[' not in filtStr: msg = "You're doing it wrong! You have ',' in the filter-string '{}', but not '['. That means you are probably trying to use a comma to separate conditions, which we do not support. Please use ' AND ' to separate conditions.".format( filtStr) raise ValueError(msg) return [filtStr]
2f2221afeb53ac4bfa626dfb4b7ad3082fc2c7c1
636,951
def error_response(status:str, message:str): """ This function returns a dictionary of status and message :param status: The status code of the response :type status: str :param message: The message you want to display to the end user :type message: str :return: A dictionary of status and message """ payload = { "status": status, "message": message } return payload
574ffb9644793633348201d78e9aa2870067e994
341,569
def barxor(a, b): # xor two strings of different lengths """XOR the given byte strings. If they are not of equal lengths, right pad the shorter one with zeroes. """ if len(a) > len(b): return [ord(x) ^ ord(y) for (x, y) in zip(a[:len(b)], b)] else: return [ord(x) ^ ord(y) for (x, y) in zip(a, b[:len(a)])]
cbe2a490eb1ee901160511665f948b6699b20015
221,009
def _object_lookup_to_pk(model, lookup): """Performs a lookup if `lookup` is not simply a primary key otherwise is returned directly.""" try: pk = int(lookup) except (TypeError, ValueError): # otherwise, attempt a lookup try: pk = model._default_manager.get(**lookup).pk except model.DoesNotExist: pk = None return pk
20d282dc9c85313c71d16b878584cb155f538ecd
468,812
def compute_Rn(albedo_c, albedo_s, T_air, T_c, T_s, e_atm, Rs_c, Rs_s, F): """Compute Soil and Canopy Net Radiation Parameters ---------- albedo_c : ee.Image albedo_s : ee.Image T_air : ee.Image Air temperature (Kelvin). T_c : ee.Image Canopy temperature (Kelvin). T_s : ee.Image Soil temperature (Kelvin). e_atm : ee.Image Rs_c : ee.Image Rs_s : ee.Image F : ee.Image Returns ------- Rn_s : ee.Image Soil net radiation (W m-2) Rn_c : ee.Image Canopy net radiation (W m-2) Rn : ee.Image Net radiation (W m-2) """ # Long-wave extinction coefficient [-] kL = 0.95 # Soil Emissivity [-] eps_s = 0.94 # Canopy emissivity [-] eps_c = 0.99 L_c = T_c.expression( 'eps_c * 0.0000000567 * (T_c ** 4)', {'eps_c': eps_c, 'T_c': T_c}) L_s = T_s.expression( 'eps_s * 0.0000000567 * (T_s ** 4)', {'eps_s': eps_s, 'T_s': T_s}) Rle = T_air.expression( 'e_atm * 0.0000000567 * (T_air ** 4)', {'e_atm': e_atm, 'T_air': T_air}) Rn_c = albedo_c.expression( '((1 - albedo_c) * Rs_c) + ' '((1 - exp(-kL * F)) * (Rle + Ls - 2 * L_c))', {'albedo_c': albedo_c, 'F': F, 'kL': kL, 'L_c': L_c, 'L_s': L_s, 'Rle': Rle, 'Rs_c': Rs_c}) Rn_s = albedo_s.expression( '((1 - albedo_s) * Rs_s) + ' '((exp(-kL * F)) * Rle) + ((1 - exp(-kL * F)) * L_c) - L_s', {'albedo_s': albedo_s, 'F': F, 'kL': kL, 'Lc': L_c, 'Ls': L_s, 'Rle': Rle, 'Rs_s': Rs_s}) Rn = Rn_s.expression( 'Rn_s + Rn_c', {'Rn_s': Rn_s, 'Rn_c': Rn_c}) return Rn_s, Rn_c, Rn
bde06911937a44d479999854d224d07bc89b15a9
342,921
def a_send(text, ctx): """Send text line to the controller.""" ctx.ctrl.send(text) return True
7a5b8412f5099138afedc892c893f413ab4eba21
35,766
def get_duplicate_indices(words): """Given a list of words, loop through the words and check for each word if it occurs more than once. If so return the index of its first ocurrence. For example in the following list 'is' and 'it' occurr more than once, and they are at indices 0 and 1 so you would return [0, 1]: ['is', 'it', 'true', 'or', 'is', 'it', 'not?'] => [0, 1] Make sure the returning list is unique and sorted in ascending order.""" # one way # dups_indices_lst = [] # sort_set_lst = sorted(set(words), key=lambda x: words.index(x)) # for word in sort_set_lst: # if words.count(word) > 1: # r = words.index(word) # dups_indices_lst.append(r) # return dups_indices_lst # another way duplicate_set = {word for word in words if words.count(word) > 1} result = sorted([words.index(word) for word in duplicate_set]) return result
0ea94957d5ff8ae9cd7488ea9b4adcf33020ec2c
663,941
def _to_time(integ, frac, n=32): """Return a timestamp from an integral and fractional part. Parameters: integ -- integral part frac -- fractional part n -- number of bits of the fractional part Retuns: timestamp """ return integ + float(frac)/2**n
b558806f174f5305bf1c1b0c20c2a5fab8331037
666,071
def t_pulse(t_2, f_2, f, DM): """ Function for generating the time values of a pulse given the frequency values. Eq. 1 in FDMT paper. t_pulse, t_2 in s f_2, f in MHz DM in pc cm^-3 """ return t_2 + 4.148808*DM*((f/1000.)**(-2) - (f_2/1000.)**(-2))/1000.
12604a6ea9eb7a808eacb4fbeff972d5180c0429
607,236
def merge_dict(*args): """ Merges any number of dictionaries into a single dictionary. # Notes In Python 3.5+, you can just do this: ```python r = {**x, **y} ``` But if you want a single expression in Python 3.4 and below: ```python r = merge_dict(x, y) ``` """ result = {} for x in args: result.update(x) return result
9211ad5c859609194199e02960af497373085eb5
568,717
def _check_level_to_classname(check): """ Return the appropriated css classname for the check level """ if check.has_failed: if check.level == check.LEVEL_ERROR: return 'danger' elif check.level == check.LEVEL_WARNING: return 'warning' else: return 'info' return 'default'
3c2e92eaae71fc04e356a0953d2efd86b5494e46
523,822
def rgb2hex(rgb): """Converts RGB colours to HEX :param rgb: RGB colour :return: HEX colour as a string with a '#' """ return '#' + '%02x%02x%02x' % (rgb.r, rgb.g, rgb.b)
7ddd7fab7840a42b2c090d107ffbf60d0ac8b630
52,856
def _parse_outputs(outputs_data): """ Parses outputs from .tfstate file :param outputs_data: dict "output": { "value": string, "type": string } :return: dict, with the following structure: { "{name}": string, } """ res_outputs = {} for name, value_data in outputs_data.items(): extracted_value = value_data["value"] if isinstance(extracted_value, list): extracted_value = ", ".join(extracted_value) res_outputs.update({name: extracted_value}) return res_outputs
5a732a9d23e6e2d2480ce623e30e6cdecaad80bb
599,859
import re def to_snake_case(text: str): """ Convert string to snake_case. Example: 'Hi there!' -> 'hi_there_' """ return re.sub("\W+", "-", str(text)).lower()
c6d6962d4b5fa34f1bbe3cd762c9871cf4a5e3bd
128,480
from typing import Optional from typing import Callable import functools from typing import Any import warnings def deprecated(*, deadline: str, fix: str, name: Optional[str] = None) -> Callable[[Callable], Callable]: """Marks a function as deprecated. Args: deadline: The version where the function will be deleted (e.g. "v0.7"). fix: A complete sentence describing what the user should be using instead of this particular function (e.g. "Use cos instead.") name: How to refer to the function. Defaults to `func.__qualname__`. Returns: A decorator that decorates functions with a deprecation warning. """ def decorator(func: Callable) -> Callable: @functools.wraps(func) def decorated_func(*args, **kwargs) -> Any: qualname = (func.__qualname__ if name is None else name) warnings.warn( f'{qualname} was used but is deprecated.\n' f'It will be removed in cirq {deadline}.\n' f'{fix}\n', DeprecationWarning, stacklevel=2) return func(*args, **kwargs) decorated_func.__doc__ = ( f'THIS FUNCTION IS DEPRECATED.\n\n' f'IT WILL BE REMOVED IN `cirq {deadline}`.\n\n' f'{fix}\n\n' f'{decorated_func.__doc__ or ""}') return decorated_func return decorator
2a7f27ed60bc10d6dcfb2ddf68d17230206e8a0b
604,733
def _get_qpoint_pos(pos): """Return the coordinates of a QPointF object.""" return pos.x(), pos.y()
5f7c850599bacc335dbcba73cb23922abccc0107
58,567
def has_no_numbers(value): """Checks if the string does not contains numbers""" if isinstance(value, str): return not(any(char.isdigit() for char in value)) return False
97428385a68bd461d5cad3528e38ecc1861b2828
100,638
def get_circle_points(xy, radius): """ Returns tuples of (x0, y0), (x1, y1) for a circle centered at x, y with radius Arguments: xy: tuple of x, y coordinates radius: radius of circle to draw Returns: [(x0, y0), (x1, y1)] for bounding box of circle centered at x, y """ x, y = xy x0, y0 = x - radius, y - radius x1, y1 = x + radius, y + radius return [(x0, y0), (x1, y1)]
d89145d1ec5ede042003fc9f952e78f6c136424c
124,069
def listify(value): """ Convert an option specified as a string to a list. Allow both comma and space as delimiters. Passes lists transparently. """ if isinstance(value, (list, tuple)): # Already a sequence. Return as a list return list(value) else: # assume `value` is a string return value.split()
8b8636884ecaddf9f558ca8cb4d80583c01b59bf
653,041
def base_cli(parser): """Generate CLI with arguments shared among all interfaces""" parser.add_argument('out_dir', type=str, help='The path to the output directory. Created if it' 'does not already exist') parser.add_argument('--regressor_files', nargs='+', type=str, help='One or more tab-separated files with regressors ' 'in each column. The number of files must match ' 'the number of input files and must be in the ' 'same order. The number of rows in each file must ' 'match the number of timepoints in their ' 'respective input files. Can also be a single ' 'string with a wildcard (*) to specify all ' 'files matching the file pattern. If so, these ' 'files are naturally sorted by file name prior to ' 'extraction. Double check to make sure these are ' 'correctly aligned with the input files (see the ' 'parameters.json in the output)') parser.add_argument('--regressors', nargs='+', type=str, help='Regressor names or strategy to use for confound ' 'regression. Must be a) list of specified column ' 'names in all of the regressor_files, b) a ' 'predefined strategy by load_confounds, or c) a ' 'list compatible with load_confounds flexible ' 'denoising strategy options. See the documentation ' 'https://github.com/SIMEXP/load_confounds. If no ' 'regressor information provided but regressor ' 'files are provided, then all regressors in ' 'regressor files are used') parser.add_argument('--load_confounds_kwargs', type=str, help="Keyword arguments for load confound either " "predefined or flexible strategies. Input must be " "a Python dictionary wrapped in double quotes. " "Refer to documentation for the available " "arguments for each load confound strategy: " "https://github.com/SIMEXP/load_confounds") parser.add_argument('--standardize', action='store_true', default=False, help='Whether to standardize (z-score) each timeseries. ' 'Default: False') parser.add_argument('--t_r', type=int, help='The TR of the functional files, specified in ' 'seconds. Required if temporal ' 'filtering/detrending is specified') parser.add_argument('--high_pass', type=float, help='High pass filter cut off in Hertz. Do not use if ' 'high pass cosine regressors are specified in ' '`regressors`') parser.add_argument('--low_pass', type=float, help='Low pass filter cut off in Hertz. Do not use if ' 'low pass cosine regressors are specified in ' '`regressors`') parser.add_argument('--detrend', action='store_true', default=False, help='Temporally detrend the data. Default: False') parser.add_argument('--discard_scans', type=int, help='Discard the first N scans of each functional ' 'image') parser.add_argument('--n_jobs', type=int, default=1, help='The number of CPUs to use if parallelization is ' 'desired. Default: 1 (serial processing)') parser.add_argument('--n_decimals', type=int, help='Specify the number of decimals for output ' 'timeseries files. Fewer decimals are recommended ' 'for reducing disk-space, particularly for large ' 'extractions') parser.add_argument('-c', '--config', type=str, help='A configuration .json file to pass parameters ' 'This will overwrite command-line arguments if ' 'the same parameter is specified in both. See ' 'online documentation for formatting and what ' 'keys to include') parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Print out extraction progress') return parser
d6c4249e3cb6ae6859563d282b9a4911ea36dfc0
375,961
def _host_is_direct_connect(host: str) -> bool: """Check if a host is a unifi direct connect domain.""" return host.endswith(".ui.direct")
343019500fce8a4b8d977a6a224252603c332471
588,194
def prime_sieve(maximum) -> list: """Return a list of primes < maximum.""" maximum = int(maximum) prime_list = [] primalities = [1] * maximum for i in range(2, maximum): if primalities[i]: prime_list.append(i) for j in range(i, len(primalities), i): primalities[j] = 0 return prime_list
74779acb8aba278fd80e15130231c1304d8a4cba
534,655
def reverter_1(frase: str) -> str: """ >>> reverter_1('the sky is blue') 'blue is sky the' >>> reverter_1('uma baita frase') 'frase baita uma' """ lista_de_palavras = frase.split() # Tempo e memória linear palavras_reordenadas = reversed(lista_de_palavras) return ' '.join(palavras_reordenadas)
bff89726903c0d50cc7e6d61a888bad041518ae9
94,134
def validate_provider_name(name, supported_majors): """ Validates the provider name against the format in the specification: <protocol-version>.dnscrypt-cert.<zone> """ try: version, cert, _ = name.split('.', 2) return version in supported_majors and cert == 'dnscrypt-cert' except (AttributeError, TypeError, ValueError): return False
83af6362dce2b28b83f904fd635c0988722acddb
377,972
def respond(input_string, answers): """Finding the imput string in the dictionary and return the value Parameters ---------- input_string : string String to look for as the keys in the dictionary of answers. answers : dictionary dictionary of answers Returns ------- output : string String containing the value of the keys in the dictionary of answers. """ output = None # Checking if the imput string contains the keys in the dictionary of answers for word in input_string: if word in answers.keys(): output = answers[word].get_info() return output
4a49cc3fef326da54769bb94b40847e4df2096f4
160,628
from typing import List def gen_table_str(class_list: List) -> str: """ 给定聚类后的类别列表,返回头展开形式的广义表,用于给Java后台解析 :param class_list: 聚类后的类别列表,如[1 0 1 1 1 0] :return: 头展开形式的广义表,如((0,2,3,4),1,2) """ # 构件越多的放在越里面,所以这里维护几个集合 # 然后按集合的元素个数从大到小排序,再去生成结果 # 类别号 -> 构件下标集合 set_dict = dict() for idx, class_id in enumerate(class_list): if class_id not in set_dict: set_dict[class_id] = set() set_dict[class_id].add(idx) # 构件下标集合的列表 set_list = [st for st in set_dict.values()] # 按照集合中元素的数量,从大到小排序 set_list.sort(key=lambda x: len(x), reverse=True) # 生成结果的列表表示 res = [] for st in set_list: if len(res) > 0: res = [res] res.extend(st) # 转成字符串,方括号变成圆括号,再去除多余空格 return res.__str__().replace('[', '(').replace(']', ')').replace(' ', '')
ea2a8c897f28655b74ebbc9ff32c9bb342ceaff3
553,503
def moira_user_emails(member_list): """ Transform a list of moira list members to emails. Assumes kerberos id => <kerberos_id>@mit.edu Args: member_list (list of str): List of members returned by Moira Returns: list of str: Member emails in list """ return list( map( lambda member: member if "@" in member else f"{member}@mit.edu", filter(None, member_list), ) )
0fea9c6140a4ee6e79960ee97cbce721cda931fe
475,282
def is_verb(string): """ Check if a predstring is for a verb or a noun """ return string.split('_')[-2] == 'v'
a25210710a82d031ff49a2b9d7fc5e86f0f8d2a6
199,864
def rescale_list_to_range(original, limits): """ Linearly rescale values in original list to limits (minimum and maximum). :example: >>> rescale_list_to_range([1, 2, 3], (0, 10)) [0.0, 5.0, 10.0] >>> rescale_list_to_range([1, 2, 3], (-10, 0)) [-10.0, -5.0, 0.0] >>> rescale_list_to_range([1, 2, 3], (0j, 10j)) [0j, 5j, 10j] :param original: Original list or list-like to be rescaled. :type original: list :param limits: Tuple of two floats, min and max, to constrain the new list :type limits: tuple :return: Original list rescaled to fit between min and max :rtype: list """ new_min, new_max = limits[0:2] old_min, old_max = min(original), max(original) return (new_max + new_min) / 2 * original / old_min if old_min == old_max \ else [new_max * (v - old_min) / (old_max - old_min) + new_min * (old_max - v) / (old_max - old_min) for v in original]
bdd38bb24b597648e4ca9045ed133dfe93ad4bd8
706,047
def all_phases_positive(pha): """ Converts an array of phases to be positive definite. :param pha : 1-dimensional ndarray The phases to be modified. :return: pha: 1-dimensional ndarray Positive definite version of pha. """ while not (pha >= 0).all(): # make sure that all elements are >=0 pha[pha < 0] = pha[pha < 0] + 1.0 return pha
fcb67d95f5516758d1d28cccbdded543d0885c06
252,859
from typing import List def permutar(arr: List[int]) -> List[List[int]]: """Devuelve todas las permutaciones del array. :param arr: Lista de enteros únicos. :arr type: List[int] :return: Lista de permutaciones. :rtype: List[List[int]] """ if len(arr) == 0: return [] if len(arr) == 1: return [arr] t = [] for i in range(len(arr)): r = arr[:i] + arr[i+1:] for p in permutar(r): t.append([arr[i]] + p) return t
98af485e0834d5f108312fa188d3e21fc23bec8e
693,217
def parse_metrics(metrics: tuple) -> dict: """ Parse and convert the metrics captured by get_metrics() into a dictionary. The main role of this is to parse the enormous dict returned by docker.container.stats and just retrieve the memory usage as a list. NOTE: memory_usage_mb is total usage in megabytes, while the `docker stats` command line command reports total - cache. Parameters ---------- metrics: tuple Tuple of metrics output by get_metrics(). Returns ------- Dictionary containing the final rows in the covidcast table, rows loaded to the the covidcast table during the operation, final database size, change in database size during the operation, runtime, and the maximum memory usage queried during the function call. """ output = {"final_table_rows": metrics[3], "rows_loaded": metrics[3] - metrics[2], "db_size_mb": metrics[1], "size_loaded_mb": metrics[1] - metrics[0], "runtime": metrics[4], "peak_memory_mb": max(i["memory_stats"]["usage"] / 1024 / 1024 for i in metrics[5])} return output
0dd726c39662c680f6c5d3e18585bce20391d2a6
582,984
import zipfile def scan_zip(filepath): """ Scan the files inside of a .zip archive. """ return zipfile.ZipFile(filepath).namelist()
47a2bec01e9c4c2d544c42cbbeb442d677a2e378
516,814
from datetime import datetime def parse_date(date_string): """Parse a date string with the format '%Y-%m-%dT%H:%M:%S.%fZ'. Args: date_string (str): string with the format '%Y-%m-%dT%H:%M:%S.%fZ' Returns: datetime: """ return datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S.%fZ')
ee5ef83cad13259640c750b05c9f046df2827b1a
175,904
def pca_dim_reduction(x, basis, xmean=None): """(x, basis, xmean) --> xnew Dimensionality reduction with PCA. x: N*D data matrix basis: D*K basis matrix xmean: 1-D vector, mean vector used in PCA, if not set, use the mean of x instead xnew: N*K new data matrix """ if xmean is None: xmean = x.mean(axis=0) xnew = (x - xmean).dot(basis) return xnew
63ce86d46c38d9d8cfda646890ae45c237e97c79
451,865
def validate_sql_select(value: str) -> str | None: """Validate that value is a SQL SELECT query.""" if not value.lstrip().lower().startswith("select"): raise ValueError("Incorrect Query") return value
1cd221373adee7d0a06d30c39b82d6610273463f
126,899
def delegate(session_attributes, slots): """ Defines a delegate slot type response. """ return { "sessionAttributes": session_attributes, "dialogAction": {"type": "Delegate", "slots": slots}, }
6b1bc9d72b2e83a35516ef461d6d2517d458e36e
173,919
import torch def decimate(tensor, m): """ Decimate a tensor by a factor of 'm', i.e. downsample by keeping every 'm'th value. """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select( dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long() ) return tensor
f646aebf6e2a3f5998f3be0cd622874c7cd93df7
441,980
def combine_first_last_name(df): """ Combine first and last name if df has those columns. """ if 'first_name' in df.columns and 'last_name' in df.columns: df['full_name'] = df['first_name'] + ' ' + df['last_name'] df.drop(['first_name', 'last_name'], axis=1, inplace=True) return df
ae10f70b86c485e0ed65cbdb44036f03b485250a
655,644
from typing import List from typing import Tuple def iter_together(path_1: str, path_2: str) -> List[Tuple[str, str]]: """Open two files and iterate over them together. :param path_1: The file path of the left file :param path_2: The file path of the right file :returns: Pairs of lines for the two files """ with open(path_1) as file_1, open(path_2) as file_2: results = [] for line_1, line_2 in zip(file_1, file_2): results.append((line_1.strip(), line_2.strip())) return results
dfa770408cc39fdfb0fd8f2b0ae6601fdecd4341
192,526
def import_from_string(import_string): """ Import a class from a string import_string - string path to module to import using dot notation (foo.bar) """ import_split = import_string.split(".") import_class = import_split[-1] module_path = ".".join(import_split[:-1]) mod = __import__(module_path, fromlist=[import_class]) klass = getattr(mod, import_class) return klass
9176fa1716a2c09b20dfe5fcb6743aebccf94823
163,091
def _read_headers(entry_file,header_size): """ Read the HTTP header (stream 0 data) from a cache entry file """ # read header_size bytes to get the raw bytes of the HTTP headers # parse the raw bytes into a HttpHeader structure: # It is a series of null terminated strings, first is status code,e.g., "HTTP/1.1 200" # the rest are name:value pairs used to populate the headers dict. strings = entry_file.read(header_size).decode('utf-8').split('\0') headers = dict(s.split(':', 1) for s in strings[1:] if ':' in s) return headers
60fb71a292f20e0609848987c8e6677636f22de8
132,218
def lcs_analysis(Nb,Ne,Mb,Me,lcs,identical,equivalent,different): """This routine is derived from lcs_to_diff. Instead of writing out the diff based on the snake list it analyses the snake list and establishes whether the reference file and the data file are identical, equivalent, or different. In this case the three results mean: - identical: there are no differences between the two files at all. - equivalent: there are only tolerated differences. - different : there are some non-tolerated differences. The values for the results are taken from the argument list. """ analysis = identical xi1 = Mb-1 yj1 = Nb-1 mxtype = 0 Nsnake = len(lcs) Isnake = 0 itype = 0 if Nsnake == 0: if Nb <= Ne: analysis = different else: if Mb <= Me: analysis = different else: pass else: while (Isnake < Nsnake): (xi2,yj2,xi3,yj3,itype) = lcs[Isnake] Isnake = Isnake + 1 if itype > mxtype: mxtype = itype if mxtype == 1: # there are only exact matches so identical still is the best possible pass elif mxtype == 2: # there are tolerated differences so equivalent is the best possible analysis = equivalent elif mxtype == 3: # there are non-tolerated differences so different is the best possible analysis = different return analysis Isnake = -1 while (Isnake < Nsnake): Isnake = Isnake + 1 if (Isnake < Nsnake): (xi2,yj2,xi3,yj3,itype) = lcs[Isnake] else: xi2 = Me+1 yj2 = Ne+1 xi3 = Me+1 yj3 = Ne+1 if xi1+1 <= xi2 and yj1+1 <= yj2: if xi1+1 == xi2: if yj1+1 == yj2: # # This is a continuation of the previous snake (of a different type) # pass else: analysis = different else: analysis = different xi1 = xi3 yj1 = yj3 return analysis
e85f06d3733acd5e20db40c44c8d479e2ecd68b9
90,442
def shorten(string, length): """Shorten a string to a specific length, cropping in the middle""" len2 = length // 2 len3 = length - len2 - 1 lens = len(string) + 2 if lens > length: return ("[\033[32m{}…{}\033[0m]". format(string[:len2], string[lens - len3:])) return ("[\033[32m{}\033[0m]{}". format(string, " " * (length - lens)))
86d9e6f990c0f2a98d349804360da1338005fe94
252,498
def _query_item(item, query_id, query_namespace): """ Check if the given cobra collection item matches the query arguments. Parameters ---------- item: cobra.Reaction or cobra.Metabolite query_id: str The identifier to compare. The comparison is made case insensitively. query_namespace: str The miriam namespace identifier in which the given metabolite is registered. See https://www.ebi.ac.uk/miriam/main/collections The comparison is made case insensitively. Returns ------- bool True if the given id exists in the default namespace, or in the model annotations by the queried namespace, otherwise False. """ # Try the default identifiers (without confirming the namespace) if query_id.lower() == item.id.lower(): return True # Otherwise, try to find a case insensitive match for the namespace key for namespace in item.annotation: if query_namespace.lower() == namespace.lower(): annotation = item.annotation[namespace] # Compare the identifier case insensitively as well # Annotations may contain a single id or a list of ids if isinstance(annotation, list): if query_id.lower() in [i.lower() for i in annotation]: return True else: if query_id.lower() == annotation.lower(): return True return False
f3e418ab5cf2830d2c1dd6b4e83275e14dc8f4c8
683,020
import torch def _get_corners_3d(dim, o=[0] * 6): """Get eight corners of 3D image volume. Parameters ---------- dim : (3,), list or tuple Image dimensions. o : (6, ), default=[0] * 6 Offsets. Returns ------- c : (8, 4), tensor_like[torch.float64] Corners of volume. """ # Get corners c = torch.tensor( [[ 1, 1, 1, 1], [ 1, 1, dim[2], 1], [ 1, dim[1], 1, 1], [ 1, dim[1], dim[2], 1], [dim[0], 1, 1, 1], [dim[0], 1, dim[2], 1], [dim[0], dim[1], 1, 1], [dim[0], dim[1], dim[2], 1]]) # Include offset # Plane 1 c[0, 0] = c[0, 0] + o[0] c[1, 0] = c[1, 0] + o[0] c[2, 0] = c[2, 0] + o[0] c[3, 0] = c[3, 0] + o[0] # Plane 2 c[4, 0] = c[4, 0] - o[1] c[5, 0] = c[5, 0] - o[1] c[6, 0] = c[6, 0] - o[1] c[7, 0] = c[7, 0] - o[1] # Plane 3 c[0, 1] = c[0, 1] + o[2] c[1, 1] = c[1, 1] + o[2] c[4, 1] = c[4, 1] + o[2] c[5, 1] = c[5, 1] + o[2] # Plane 4 c[2, 1] = c[2, 1] - o[3] c[3, 1] = c[3, 1] - o[3] c[6, 1] = c[6, 1] - o[3] c[7, 1] = c[7, 1] - o[3] # Plane 5 c[0, 2] = c[0, 2] + o[4] c[2, 2] = c[2, 2] + o[4] c[4, 2] = c[4, 2] + o[4] c[6, 2] = c[6, 2] + o[4] # Plane 6 c[1, 2] = c[1, 2] - o[5] c[3, 2] = c[3, 2] - o[5] c[5, 2] = c[5, 2] - o[5] c[7, 2] = c[7, 2] - o[5] return c
de528426e853c020455f85643bdb3b714f5bb79c
604,762
import tqdm def apply_groups(groups, client, func, *args, return_futures=False, progress_bar=True, **kwargs): """ Call `func` on each group in `groups`. Additionally, `args` and `kwargs` are passed to the function call. Parameters ---------- groups: pandas.DataFrameGroupBy The result of a call to `groupby`on a data frame client: distributed.client.Client A dask client func: function pointer The function to apply to each row in `data_frame` args, kwargs The other arguments to pass to `func` return_futures: bool Whether to wait for the results (`False`, the default) or return a list of dask futures (when `True`). If a list of futures is returned, the `result` method should be called on each of them at some point before attempting to use the results. progress_bar: bool Whether to show a progress bar when waiting for results. The parameter is only relevant when `return_futures` is `False`. Returns ------- results: list Either the result of each function call or a future which will give the result, depending on the value of `return_futures` """ if len(groups) == 0: return [] it = groups if progress_bar: it = tqdm.tqdm(it) ret_list = [ client.submit(func, *(group, *args), **kwargs) for name, group in it ] if return_futures: return ret_list # add a progress bar if we asked for one if progress_bar: ret_list = tqdm.tqdm(ret_list) ret_list = [r.result() for r in ret_list] return ret_list
5bca1f2eb0945fa72c517134b0ea4deaf61ecd23
631,014
def cropToROI(input_image, SIDE_MARGIN=(20.0, 80.0), TOP_BOTTOM_MARGIN=(20.0, 80.0)): """ Method: Crops image to include only the Region of Interest (ROI) Parameters: input_image - Image to crop. SIDE_MARGIN - Tuple of margins on left and right sides (in percent). TOP_BOTTOM_MARGIN - Tuple of margines on top and bottom sides (in percent). Returns: A cropped SimpleCV.Image """ width, height = input_image.width, input_image.height top_left = width * SIDE_MARGIN[0] / 100, height * TOP_BOTTOM_MARGIN[0] / 100 bottom_right = width * SIDE_MARGIN[1] / 100, height * TOP_BOTTOM_MARGIN[1] / 100 cropped_img = input_image.crop(top_left, bottom_right) return cropped_img
6663b5be21001cc95d83b15456dd0469ef1a43d2
229,403
from pathlib import Path def get_project_dir() -> Path: """ Returns the project directory Path object :return: Path """ return Path(__file__).parent.parent.absolute()
82e07dab73d1cfee18ec77663d22caa992d10f6c
583,198
def rescale(value, max_range=0.6, min_range=0.3): """ rescale deltaG values by random distribution :param value: actual value :param max_range: max range :param min_range: min range :return: rescaled value """ scaled = value if value > min_range: scaled = (value) * (max_range - min_range) + min_range return scaled
f92850d428ce3af836b8f18c22dd98a62808c195
263,340
def read_lines(filename="", nb_lines=0): """read n lines of file""" i = 0 with open(filename, "r") as my_file: for line in my_file: i += 1 print(line, end="") if i == nb_lines: break my_file.close() return (i)
88daa680ff27f4053788ab9073c6be30942ade1f
333,943