content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import datetime def Write(Variable, f): """Function to Convert None Strings to Strings and Format to write to file with ,""" if isinstance(Variable, str) == False: if isinstance(Variable, datetime.datetime) == True: return f.write(f"{Variable.strftime('%Y-%m-%d')},") else: Variable = round(Variable, 2) return f.write(f"{str(Variable)},") elif isinstance(Variable, str) == True: return f.write(f"{(Variable)},")
9963c4117c7cc3f19d91331ed6c36e5733cffb56
709,406
def clone_model(model, **new_values): """Clones the entity, adding or overriding constructor attributes. The cloned entity will have exactly the same property values as the original entity, except where overridden. By default, it will have no parent entity or key name, unless supplied. Args: model: datastore_services.Model. Model to clone. **new_values: dict(str: *). Keyword arguments to override when invoking the cloned entity's constructor. Returns: datastore_services.Model. A cloned, and possibly modified, copy of self. Subclasses of BaseModel will return a clone with the same type. """ # Reference implementation: https://stackoverflow.com/a/2712401/4859885. cls = model.__class__ model_id = new_values.pop('id', model.id) props = {k: v.__get__(model, cls) for k, v in cls._properties.items()} # pylint: disable=protected-access props.update(new_values) return cls(id=model_id, **props)
ed668632c8917ad685b86fb5c71146be7c9b3b96
709,408
def changenonetoNone(s): """Convert str 'None' to Nonetype """ if s=='None': return None else: return s
9f6af1580d8b47d2a7852e433f7ba8bbd5c7044d
709,413
def identify_word_classes(tokens, word_classes): """ Match word classes to the token list :param list tokens: List of tokens :param dict word_classes: Dictionary of word lists to find and tag with the respective dictionary key :return: Matched word classes :rtype: list """ if word_classes is None: word_classes = [] classes = set() for key in word_classes: for token in tokens: if token.lower() in word_classes[key]: classes.add(key) return classes
ca7aa602d19ac196321af19c42a60df415c7d115
709,414
def is_interested_source_code_file(afile): """ If a file is the source code file that we are interested. """ tokens = afile.split(".") if len(tokens) > 1 and tokens[-1] in ("c", "cpp", "pl", "tmpl", "py", "s", "S"): # we care about C/C++/perl/template/python/assembly source code files return True return False
9bd77dc3b530262cc2bf8a32c0d050ea30077030
709,415
def recursively_extract(node, exfun, maxdepth=2): """ Transform a html ul/ol tree into a python list tree. Converts a html node containing ordered and unordered lists and list items into an object of lists with tree-like structure. Leaves are retrieved by applying `exfun` function to the html nodes not containing any ul/ol list. Args: node: BeautifulSoup HTML node to traverse exfun: function to apply to every string node found maxdepth: maximal depth of lists to go in the node Returns: A tree-like python object composed of lists. Examples: >>> node_content = \ ''' <ol> <li>Hase</li> <li>Nase<ol><li>Eins</li><li>Zwei</li></ol></li> </ol>''' >>> node = BeautifulSoup(node_content, "lxml") >>> recursively_extract(node, lambda x: x) [<li>Hase</li>, [<li>Eins</li>, <li>Zwei</li>]] >>> recursively_extract(node, lambda x: x.get_text()) ['Hase', ['Eins', 'Zwei']] """ if node.name in ['ol', 'ul']: lilist = node else: lilist = node.ol or node.ul if lilist and maxdepth: # apply 'recursively_extract' to every 'li' node found under this node return [recursively_extract(li, exfun, maxdepth=(maxdepth - 1)) for li in lilist.find_all('li', recursive=False)] # if this node doesn't contain 'ol' or 'ul' node, return the transformed # leaf (using the 'exfun' function) return exfun(node)
cc5732a786579172dda31958ad2bd468a4feef81
709,416
def density(mass, volume): """ Calculate density. """ return mass / volume * 1
53b1f76ba66695a9cd72be9186bcc374ee11f53b
709,418
def has_field_warning(meta, field_id): """Warn if dataset has existing field with same id.""" if meta.has_field(field_id): print( "WARN: Field '%s' is already present in dataset, not overwriting." % field_id ) print("WARN: Use '--replace' flag to overwrite existing field.") return 1 return 0
1cc5016f8ffcce698bcb53dcf6f307b760d7df55
709,424
def get_tagset(sentences, with_prefix): """ Returns the set of entity types appearing in the list of sentences. If with_prefix is True, it returns both the B- and I- versions for each entity found. If False, it merges them (i.e., removes the prefix and only returns the entity type). """ iobs = [iob for sent in sentences for (x,iob) in sent] tagset = set(iobs) if not with_prefix: tagset = set([t[2:] for t in list(tagset) if t != 'O']) return tagset
c0b00f7c5546bfc7fe10b2d4b35998b5dedeba21
709,426
def _proxies_dict(proxy): """Makes a proxy dict appropriate to pass to requests.""" if not proxy: return None return {'http': proxy, 'https': proxy}
ce51015dc652c494dc89bb11e21f18803ba34c85
709,429
def gen_run_entry_str(query_id, doc_id, rank, score, run_id): """A simple function to generate one run entry. :param query_id: query id :param doc_id: document id :param rank: entry rank :param score: entry score :param run_id: run id """ return f'{query_id} Q0 {doc_id} {rank} {score} {run_id}'
657c59fea34e4aed2159337360c973dc99b53082
709,430
def is_variant(title) -> bool: """ Check if an issue is variant cover. """ return "variant" in title.lower()
5e0bab3030c069d7726bbc8c9909f561ed139cb8
709,433
def default_marker_size(fmt): """ Find a default matplotlib marker size such that different marker types look roughly the same size. """ temp = fmt.replace('.-', '') if '.' in temp: ms = 10 elif 'D' in temp: ms = 7 elif set(temp).intersection('<>^vd'): ms = 9 else: ms = 8 return ms
feebe9bdda47a2e041636f15c9b9595e5cd6b2cc
709,435
from typing import List from typing import Dict def seq_hist(seq_lens: List[int]) -> Dict[int, int]: """Returns a dict of sequence_length/count key/val pairs. For each entry in the list of sequence lengths, tabulates the frequency of appearance in the list and returns the data as a dict. Useful for histogram operations on sequence length. """ seq_count = {} for slen in seq_lens: if slen in seq_count: seq_count[slen] += 1 else: seq_count[slen] = 1 return seq_count
5778b7566d1b64e8db0e2dce6bbf53e06cdb196d
709,439
def is_repo_in_config(config, repo, rev, hook_id): """Get if a repository is defined in a pre-commit configuration. Parameters ---------- config : dict Pre-commit configuration dictionary. repo : str Repository to search. rev : str Repository tag revision. hook_id : Hook identifier. Returns ------- dict : Information about if the repository and the hook have been found. """ response = {"repo_found": False, "hook_found": False, "same_rev": False} for repo_ in config["repos"]: if repo_["repo"] == repo: response["repo_found"] = True response["hook_found"] = hook_id in [hook["id"] for hook in repo_["hooks"]] response["same_rev"] = repo_["rev"] == rev break return response
855315c50f4bfe53a4f9b7a5d392bb539e364617
709,442
def split_dataframe(df, size=10*1024*1024): """Splits huge dataframes(CSVs) into smaller segments of given size in bytes""" # size of each row row_size = df.memory_usage().sum() / len(df) # maximum number of rows in each segment row_limit = int(size // row_size) # number of segments seg_num = (len(df)+row_limit-1)//row_limit # split df into segments segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)] return segments
46f34d388e6f596bfcf803b4569eb3015344bafb
709,443
def calculate_line_number(text): """Calculate line numbers in the text""" return len([line for line in text.split("\n") if line.strip() != ""])
f35533945203ec2f47a89e7072ddd9b172f5554b
709,446
def lammps_created_gsd(job): """Check if the mdtraj has converted the production to a gsd trajectory for the job.""" return job.isfile("trajectory-npt.gsd")
a66c899a20e9602098150f46067d5505572232c2
709,449
def deslugify_province(prov): """ Province slug to name, i.e. dashes to spaces and title case. KZN is a special case. """ if prov == 'kwazulu-natal': return 'KwaZulu-Natal' return prov.replace('-', ' ').title()
8e88ea7325c3b911495780b4437bc02784fbad82
709,450
import re def parse_vectors(vectors): """ Basic cleanup of vector or vectors Strip out V from V#s. Similar to parse tables, this by no means guarantees a valid entry, just helps with some standard input formats Parameters ---------- vectors : list of str or str A string or list of strings of vector names to be parsed Returns ------- list of str vectors with unnecessary characters removed """ def parse_vector(vector): """Strip string to numeric elements only""" if isinstance(vector, int): # Already parsed earlier return vector return int(re.sub(r'\D', '', vector)) if isinstance(vectors, str): return [parse_vector(vectors)] return [parse_vector(v) for v in vectors]
d2161e45bae51db21d7668ea6008ddb9ada16c4e
709,451
import torch def choice(x, a): """Generate a random sample from an array of given size.""" if torch.is_tensor(x): return x[torch.randint(len(x), (a,))] return x
af21321bcd12fe5f1a5eb59b8f0db14096899b5d
709,455
def get_identifier(positioner_id, command_id, uid=0, response_code=0): """Returns a 29 bits identifier with the correct format. The CAN identifier format for the positioners uses an extended frame with 29-bit encoding so that the 11 higher bits correspond to the positioner ID, the 8 middle bits are the command number, the following 6 bits are the unique identifier, and the 4 lower bits are the response code. Parameters ---------- positioner_id : int The Id of the positioner to command, or zero for broadcast. command_id : int The ID of the command to send. uid : int The unique identifier response_code : int The response code. Returns ------- identifier : `int` The decimal integer corresponding to the 29-bit identifier. Examples -------- :: >>> get_identifier(5, 17, uid=5) 1328128 >>> bin(1328128) '0b101000100010000000000' """ posid_bin = format(positioner_id, "011b") cid_bin = format(command_id, "08b") cuid_bin = format(uid, "06b") response_bin = format(int(response_code), "04b") identifier = posid_bin + cid_bin + cuid_bin + response_bin assert len(identifier) == 29 return int(identifier, 2)
57a1ce7004186e8c1c88c06665311e71010705c4
709,462
def standardized(array): """Normalize the values in an array. Arguments: array (np.ndarray): Array of values to normalize. Returns: array with zero mean and unit standard deviation. """ return (array - array.mean()) / max(1e-4, array.std())
1764dfd1e4e173d2ca081edeb8b7165a79d63b7d
709,463
def tree_unflatten(flat, tree, copy_from_tree=None): """Unflatten a list into a tree given the tree shape as second argument. Args: flat: a flat list of elements to be assembled into a tree. tree: a tree with the structure we want to have in the new tree. copy_from_tree: optional list of elements that we just copy from tree. This argument is used when the flat version does not contain all elements of the expected tree but just a subset, while the rest are filled from the tree itself. It allows to omit "unnecessary" elements. For example, consider trees (A, (B, X), X) and (X, (A, X), B) where X is some element we do not care about. Flattening the first tree and removing X will yield a flat list [A, B] and the second tree can then be reconstructed from this list and the tree (X, (E, X), E) with copy_from_tree=[X]. One example where this is used is the weights-tree of a model, where layers with no weights have () in the tree and we use copy_from_tree=[()] to restore a model from a file that only has a list of trainable weights. Returns: A pair (new_tree, rest_of_flat) where the new tree that has the structure of tree but with leaves from flat, and the remaining elements of flat if more were provided than the number of leaves of tree (useful for recursion). """ if copy_from_tree is not None and tree in copy_from_tree: return tree, flat if isinstance(tree, (list, tuple)): new_tree, rest = [], flat for t in tree: new_t, rest = tree_unflatten(rest, t, copy_from_tree=copy_from_tree) new_tree.append(new_t) new_tree = tuple(new_tree) if isinstance(tree, tuple) else new_tree return new_tree, rest if isinstance(tree, dict): new_tree, rest = {}, flat for k in tree: new_v, rest = tree_unflatten(rest, tree[k], copy_from_tree=copy_from_tree) new_tree[k] = new_v return new_tree, rest return flat[0], flat[1:]
711bc67a20835091360d0fbc64e0a8842eec53ba
709,470
def soma_radius(morph): """Get the radius of a morphology's soma.""" return morph.soma.radius
2f9991a2f9240965bdb69a1a14814ed99bf60f86
709,471
import json def get_repo_info(main_path): """ Get the info of repo. Args: main_path: the file store location. Return: A json object. """ with open(main_path + '/repo_info.json') as read_file: repo_info = json.load(read_file) return repo_info
f4a538819add0a102f6cbe50be70f2c9a0f969b6
709,475
import yaml def parse_settings(settings_file: str) -> dict: """ The function parses settings file into dict Parameters ---------- settings_file : str File with the model settings, must be in yaml. Returns ------- ydict : dict Parsed settings used for modeling. """ with open(settings_file, 'r') as fstream: ydict = yaml.safe_load(fstream) return ydict
1aec2a8be51376209db81d60115814ddefca7ea6
709,476
def get_mac_address(path): """ input: path to the file with the location of the mac address output: A string containing a mac address Possible exceptions: FileNotFoundError - when the file is not found PermissionError - in the absence of access rights to the file TypeError - If the function argument is not a string. """ if type(path) is not str: raise TypeError("The path must be a string value") try: file = open(path) except FileNotFoundError as e: raise e except PermissionError as e: raise e return file.readline().strip().upper()
814a530b63896103adcb8fbc84d17939644b9bbe
709,477
def parse_vars(vars): """ Transform a list of NAME=value environment variables into a dict """ retval = {} for var in vars: key, value = var.split("=", 1) retval[key] = value return retval
e2c6ae05cdf0151caaf8589eb7d7df90dcdd99a1
709,480
from typing import List import collections def find_dup_items(values: List) -> List: """Find duplicate items in a list Arguments: values {List} -- A list of items Returns: List -- A list of duplicated items """ dup = [t for t, c in collections.Counter(values).items() if c > 1] return dup
3a84c2f3b723bed9b7a82dc5f0cfd81d99c2bf48
709,481
import inspect def form_of(state): """Return the form of the given state.""" if hasattr(state, "__form__"): if callable(state.__form__) and not inspect.isclass(state.__form__): return state.__form__() else: return state.__form__ else: raise ValueError(f"{state} has no form")
e39aa7db7b324ab38b65232b34b987b862812c54
709,484
import torch def _switch_component( x: torch.Tensor, ones: torch.Tensor, zeros: torch.Tensor ) -> torch.Tensor: """ Basic component of switching functions. Args: x (torch.Tensor): Switch functions. ones (torch.Tensor): Tensor with ones. zeros (torch.Tensor): Zero tensor Returns: torch.Tensor: Output tensor. """ x_ = torch.where(x <= 0, ones, x) return torch.where(x <= 0, zeros, torch.exp(-ones / x_))
8d60c09428440be704e8ced9b8ac19219a0d0b04
709,487
def opts2dict(opts): """Converts options returned from an OptionParser into a dict""" ret = {} for k in dir(opts): if callable(getattr(opts, k)): continue if k.startswith('_'): continue ret[k] = getattr(opts, k) return ret
cfa828f0248ff7565aabbb5c37a7bc6fa38c6450
709,490
import math def smaller2k(n): """ Returns power of 2 which is smaller than n. Handles negative numbers. """ if n == 0: return 0 if n < 0: return -2**math.ceil(math.log2(-n)) else: return 2**math.floor(math.log2(n))
0d0bbbf95cb22bf1b9ffb29012075534bcc9646d
709,493
def unwind(g, num): """Return <num> first elements from iterator <g> as array.""" return [next(g) for _ in range(num)]
59b724ca27729b4fc20d19a40f95d590025307c4
709,494
def any(array, mapFunc): """ Checks if any of the elements of array returns true, when applied on a function that returns a boolean. :param array: The array that will be checked, for if any of the elements returns true, when applied on the function. \t :type array: [mixed] \n :param mapFunc: The function that gives a boolean value, when applied on the element of the array. \t :type mapFunc: function \n :returns: Whether any of the elements of the array, returned true or not. \t :rtype: : bool \n """ for elem in array: if mapFunc(elem): return True return False
1e635da691fd1c2fc9d99e15fd7fa0461a7bdf0e
709,498
def get_sample_activity_from_batch(activity_batch, idx=0): """Return layer activity for sample ``idx`` of an ``activity_batch``. """ return [(layer_act[0][idx], layer_act[1]) for layer_act in activity_batch]
0302fdf215e63d6cbcd5dafc1bd36ae3d27712f2
709,500
def _extract_bike_location(bike, lon_abbrev='lon'): """ Standardize the bike location data from GBFS. Some have extra fields, and some are missing fields. Arguments: bike (dict[str, str]): A GBFS bike object as it appears in free_bike_status.json lon_abbrev (str): The abbreviation used for `longitude` Returns: dict[str, str]: A normalized GBFS bike object """ output = {key: bike.get(key) for key in ['bike_id', 'lat', 'is_reserved', 'is_disabled']} output['lon'] = bike.get(lon_abbrev) return output
a20929a85c993a59b82b552fcfee81b1f818648d
709,502
def clean_word(word): """Return word in lowercase stripped of whitespace""" return word.strip().lower()
ce57fa95ec111ee18c8a00c2076c686bc0abfe5c
709,503
def WrapWithQuotes(text, quote='"'): """ Wrap the supplied text with quotes Args: text: Input text to wrap quote: Quote character to use for wrapping (default = "") Returns: Supplied text wrapped in quote char """ if not text.startswith(quote): text = quote + text if not text.endswith(quote): text = text + quote return text
f4f7b83d60e3ea928e3502b9d19ca4c8d52914b9
709,506
from typing import Callable def find_function_in_object(o: object, function_name: str) -> Callable: """Finds a callable object matching given function name in given object. Args: o: Any object. function_name: Name of attribute within o. Returns: Callable object with name <function_name> in object <o>. Raises: LookupError: if <function_Name> is not a callable object in <o>. """ try: function_handle = getattr(o, function_name) if not hasattr(function_handle, "__call__"): raise LookupError( f"Resolved object {function_name} in object {o} is not a function." ) else: return function_handle except AttributeError: raise LookupError(f"Cannot find function {function_name} in object {o}.")
c3b6ad12f42d005f643bc8a657f728613bd0e93c
709,508
from typing import Any def increment_occurance_dict(d: dict, k: Any) -> None: """ Increment occurance dict, updates in-place so nothing is returned. """ try: d[k] += 1 except KeyError: d[k] = 1 return None
725b437494f4c647848c54a3d13b4e974fa7f0e8
709,511
def is_bool(space, w_obj): """ Finds out whether a variable is a boolean""" return space.wrap(w_obj.tp == space.tp_bool)
39b62ec08ebbdd4d7505e558ad4901ca67afc12d
709,512
def _strip_after_new_lines(s): """Removes leading and trailing whitespaces in all but first line.""" lines = s.splitlines() if len(lines) > 1: lines = [lines[0]] + [l.lstrip() for l in lines[1:]] return '\n'.join(lines)
247cee0f34ab1e742069e05c8c00095cd24d80bc
709,515
from typing import Tuple def arm_name_to_sort_key(arm_name: str) -> Tuple[str, int, int]: """Parses arm name into tuple suitable for reverse sorting by key Example: arm_names = ["0_0", "1_10", "1_2", "10_0", "control"] sorted(arm_names, key=arm_name_to_sort_key, reverse=True) ["control", "0_0", "1_2", "1_10", "10_0"] """ try: trial_index, arm_index = arm_name.split("_") return ("", -int(trial_index), -int(arm_index)) except (ValueError, IndexError): return (arm_name, 0, 0)
c29958bb541a9754e7b4defc6ad953030a364d2f
709,519
from typing import Dict from typing import Any def replace_module_prefix( state_dict: Dict[str, Any], prefix: str, replace_with: str = "", ignore_prefix: str = "" ): """ Remove prefixes in a state_dict needed when loading models that are not VISSL trained models. Specify the prefix in the keys that should be removed. Added by DLM contributors: ignore_prefix is used to ignore certain keys in the state dict """ state_dict = { (key.replace(prefix, replace_with, 1) if key.startswith(prefix) else key): val for (key, val) in state_dict.items() if ((not key.startswith(ignore_prefix)) or ignore_prefix == "") } return state_dict
b8499c818053e7798e9549fbe546bab7d5fbfa84
709,520
def crop(img, left, top, right, bottom): """ Crop rectangle from image. Inputs: img - The image to crop. left - The leftmost index to crop the image. top - The topmost index. right - The rightmost index. bottom - The bottommost index. Outputs: img - The cropped image. """ return img[left:right, top:bottom]
1507a55bba07dc656f51f873d2328b69f70682c9
709,521
import ipaddress def get_hosts(network): """get_hosts() will return all the hosts within a provided network, range""" network = ipaddress.IPv4Network(network, strict=False) hosts_obj = network.hosts() hosts = [] for i in hosts_obj: hosts.append(str(i)) return hosts
097fa3abbf1cda1c3c0ddc0c2fec4a06d1d44fa9
709,522
import random def select(weights): """ select a node with probability proportional to its "weight" """ r = random.random() * sum(weights) s = 0.0 for k,w in enumerate(weights): s += w if r <= s: return k raise RuntimeError("select WTF from %s" % weights)
fed92de65cfae6f3532754215f5b88a564365ac7
709,526
import time def execution_duration(fun): """ Calculates the duration the function 'fun' takes to execute. execution_duration returns a wrapper function to which you pass your arguments. Example: execution_duration(my_function)(my_first_param, my_second_param) The result of the wrapper function will be a tuple, where the fist value is the return value of your function and the second is the execution time in seconds expressed as a float. """ def wrapper(*args, **kwargs): t1 = time.time() result = fun(*args, **kwargs) exec_dur = time.time() - t1 return result, exec_dur return wrapper
b824ce8e1448a65bd932ec8344b1976d2a86dd09
709,531
from typing import Union def chunks_lists_to_tuples(level: Union[list, int, float]) -> Union[tuple, int, float]: """Convert a recursive list of lists of ints into a tuple of tuples of ints. This is a helper function needed because MongoDB automatically converts tuples to lists, but the dask constructor wants the chunks defined strictly as tuples. e.g. - input: ``[[1, 2], [3, 4]]`` - output: ``((1, 2), (3, 4))`` .. note:: float data type is supported to allow for NaN-sized dask chunks """ if isinstance(level, list): return tuple(chunks_lists_to_tuples(i) for i in level) if isinstance(level, (int, float)): return level raise TypeError(level)
49cc7923211d50fdf6a386016af12b80a2f821df
709,532
def apply_inverse_rot_to_vec(rot, vec): """Multiply the inverse of a rotation matrix by a vector.""" # Inverse rotation is just transpose return [rot[0][0] * vec[0] + rot[1][0] * vec[1] + rot[2][0] * vec[2], rot[0][1] * vec[0] + rot[1][1] * vec[1] + rot[2][1] * vec[2], rot[0][2] * vec[0] + rot[1][2] * vec[1] + rot[2][2] * vec[2]]
1108ac6caa30b3562a2af1bcc83e1c1a1bfd8d4d
709,533
def binstringToBitList(binstring): """Converts a string of '0's and '1's to a list of 0's and 1's""" bitList = [] for bit in binstring: bitList.append(int(bit)) return bitList
d8ff10651d9fc2d02aba3b4a57a0a768032783b7
709,534
def int_to_bitstr(int_value: int) -> str: """ A function which returns its bit representation as a string. Arguments: int_value (int) - The int value we want to get the bit representation for. Return: str - The string representation of the bits required to form the int. """ return bin(int_value)[2:]
cafbf151ce0404081a0a8e1327d85e61ea7ddc52
709,535
def get_polynomial_coefficients(degree=5): """ Return a list with coefficient names, [1 x y x^2 xy y^2 x^3 ...] """ names = ["1"] for exp in range(1, degree + 1): # 0, ..., degree for x_exp in range(exp, -1, -1): y_exp = exp - x_exp if x_exp == 0: x_str = "" elif x_exp == 1: x_str = r"$x$" else: x_str = rf"$x^{x_exp}$" if y_exp == 0: y_str = "" elif y_exp == 1: y_str = r"$y$" else: y_str = rf"$y^{y_exp}$" names.append(x_str + y_str) return names
9369841215045e925a3453b83be9dc49c9be7b92
709,537
def user_enabled(inst, opt): """ Check whether the option is enabled. :param inst: instance from content object init :param url: Option to be checked :return: True if enabled, False if disabled or non present """ return opt in inst.settings and inst.settings[opt]
3b2a5a1534ff779178eb4bd6b839b66c0b07864f
709,538
def erase_not_displayed(client): """Erase all non-displayed models from memory. Args: client (obj): creopyson Client. Returns: None """ return client._creoson_post("file", "erase_not_displayed")
c3981fcce00b5d5440fcbdbe8781e9e6229a8fa7
709,540
def normalize(*args): """Scale a sequence of occurrences into probabilities that sum up to 1.""" total = sum(args) return [arg / total for arg in args]
49b0f998fe58b2c85da5a993e542d91bb5dd5382
709,543
from typing import Any from typing import Dict def _adjust_estimator_options(estimator: Any, est_options: Dict[str, Any], **kwargs) -> Dict[str, Any]: """ Adds specific required classifier options to the `clf_options` dictionary. Parameters ---------- classifier : Any The classifier object for which the options have to be added clf_options : Dict[str, Any] Dictionary, where the additional classifier options should be added to kwargs : Additional classifier options as keyword arguments Returns ------- Dict[str, Any] The input `clf_options` dictionary containing the additional classifier options """ if estimator.__name__ == 'XGBClassifier': est_options['num_class'] = kwargs['n_categories'] elif estimator.__name__ == 'DNNClassifier': est_options['n_classes'] = kwargs['n_categories'] est_options['n_features'] = kwargs['n_features'] est_options['random_state'] = kwargs['random_seed'] return est_options
4ff98d8a3b3e647e129fb0ffbc9bc549caa60440
709,551
def get_fields(fields): """ From the last column of a GTF, return a dictionary mapping each value. Parameters: fields (str): The last column of a GTF Returns: attributes (dict): Dictionary created from fields. """ attributes = {} description = fields.strip() description = [x.strip() for x in description.split(";")] for pair in description: if pair == "": continue pair = pair.replace('"', '') key, val = pair.split() attributes[key] = val # put in placeholders for important attributes (such as gene_id) if they # are absent if 'gene_id' not in attributes: attributes['gene_id'] = 'NULL' return attributes
30777838934b18a0046017f3da6b3a111a911a9c
709,555
def add_log_group_name_params(log_group_name, configs): """Add a "log_group_name": log_group_name to every config.""" for config in configs: config.update({"log_group_name": log_group_name}) return configs
a5fce8143c3404257789c1720bbfefc49c8ea3f5
709,556
import time def time_remaining(event_time): """ Args: event_time (time.struct_time): Time of the event. Returns: float: Time remaining between now and the event, in seconds since epoch. """ now = time.localtime() time_remaining = time.mktime(event_time) - time.mktime(now) return time_remaining
cb3dfcf916cffc3b45f215f7642aeac8a1d6fef7
709,558
def hasf(e): """ Returns a function which if applied with `x` tests whether `x` has `e`. Examples -------- >>> filter(hasf("."), ['statement', 'A sentence.']) ['A sentence.'] """ return lambda x: e in x
ac9ce7cf2ed2ee8a050acf24a8d0a3b95b7f2d50
709,560
def indent_multiline(s: str, indentation: str = " ", add_newlines: bool = True) -> str: """Indent the given string if it contains more than one line. Args: s: String to indent indentation: Indentation to prepend to each line. add_newlines: Whether to add newlines surrounding the result if indentation was added. """ lines = s.splitlines() if len(lines) <= 1: return s lines_str = "\n".join(f"{indentation}{line}" for line in lines) if add_newlines: return f"\n{lines_str}\n" else: return lines_str
62eb2fc7c3f3b493a6edc009692f472e50e960f7
709,563
from typing import Optional def _get_property(self, key: str, *, offset: int = 0) -> Optional[int]: """Get a property from the location details. :param key: The key for the property :param offset: Any offset to apply to the value (if found) :returns: The property as an int value if found, None otherwise """ value = self.location_details.get(key) if value is None: return None return int(value[0]) + offset
8d2c35a88810db5255cfb0ca9d7bfa6345ff3276
709,564
def is_iterable(value): """Return True if the object is an iterable type.""" return hasattr(value, '__iter__')
55e1ecc9b264d39aaf5cfcbe89fdc01264191d95
709,567
def remove_scope_from_name(name, scope): """ Args: name (str): full name of the tf variable with all the scopes Returns: (str): full name of the variable with the scope removed """ result = name.split(scope)[1] result = result[1:] if result[0] == '/' else result return result.split(":")[0]
aa70042a2f57185a0f5e401d182a02e5654eb2b0
709,568
def spg_line_search_step_length(current_step_length, delta, f_old, f_new, sigma_one=0.1, sigma_two=0.9): """Return next step length for line search.""" step_length_tmp = (-0.5 * current_step_length ** 2 * delta / (f_new - f_old - current_step_length * delta)) next_step_length = 0 if sigma_one <= step_length_tmp <= sigma_two * current_step_length: next_step_length = step_length_tmp else: next_step_length = 0.5 * current_step_length return next_step_length
844cccdfe1ec3f9c2c287384284ceb2ac3530e8e
709,570
def samps2ms(samples: float, sr: int) -> float: """samples to milliseconds given a sampling rate""" return (samples / sr) * 1000.0
49e07ee02984bf0e9a0a54715ef6b6e5a3c87798
709,572
def valid_identity(identity): """Determines whether or not the provided identity is a valid value.""" valid = (identity == "homer") or (identity == "sherlock") return valid
9865d19802b596d1d5fdce6ff8d236678da29ee6
709,576
def get_username() -> str: """ Prompts the user to enter a username and then returns it :return: The username entered by the user """ while True: print("Please enter your username (without spaces)") username = input().strip() if ' ' not in username: return username
1a18a229908b86c32a0822c068b5b9081cc9fdc3
709,581
def get_pip_package_name(provider_package_id: str) -> str: """ Returns PIP package name for the package id. :param provider_package_id: id of the package :return: the name of pip package """ return "apache-airflow-providers-" + provider_package_id.replace(".", "-")
e7aafbdfb0e296e60fedfcf7e4970d750e4f3ffa
709,582
def first(iterable, default=None): """ Returns the first item or a default value >>> first(x for x in [1, 2, 3] if x % 2 == 0) 2 >>> first((x for x in [1, 2, 3] if x > 42), -1) -1 """ return next(iter(iterable), default)
6907e63934967c332eea9cedb5e0ee767a88fe8f
709,583
def _str_conv(number, rounded=False): """ Convenience tool to convert a number, either float or int into a string. If the int or float is None, returns empty string. >>> print(_str_conv(12.3)) 12.3 >>> print(_str_conv(12.34546, rounded=1)) 12.3 >>> print(_str_conv(None)) <BLANKLINE> >>> print(_str_conv(1123040)) 11.2e5 """ if not number: return str(' ') if not rounded and isinstance(number, (float, int)): if number < 100000: string = str(number) else: exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1 divisor = 10 ** exponant string = '{0:.1f}'.format(number / divisor) + 'e' + str(exponant) elif rounded == 2 and isinstance(number, (float, int)): if number < 100000: string = '{0:.2f}'.format(number) else: exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1 divisor = 10 ** exponant string = '{0:.2f}'.format(number / divisor) + 'e' + str(exponant) elif rounded == 1 and isinstance(number, (float, int)): if number < 100000: string = '{0:.1f}'.format(number) else: exponant = int('{0:.2E}'.format(number).split('E+')[-1]) - 1 divisor = 10 ** exponant string = '{0:.1f}'.format(number / divisor) + 'e' + str(exponant) else: return str(number) return string
d352e8f0956b821a25513bf4a4eecfae5a6a7dcd
709,591
def label_smoothed_nll_loss(lprobs, target, epsilon: float = 1e-8, ignore_index=None): """Adapted from fairseq Parameters ---------- lprobs Log probabilities of amino acids per position target Target amino acids encoded as integer indices epsilon Smoothing factor between 0 and 1, by default 1e-8 ignore_index, optional Amino acid (encoded as integer) to ignore, by default None Returns ------- Negative log-likelihood loss """ nll_loss = -lprobs.gather(dim=-1, index=target) smooth_loss = -lprobs.sum(dim=-1, keepdim=True) if ignore_index is not None: pad_mask = target.eq(ignore_index) nll_loss.masked_fill_(pad_mask, 0.0) smooth_loss.masked_fill_(pad_mask, 0.0) else: nll_loss = nll_loss.squeeze(-1) smooth_loss = smooth_loss.squeeze(-1) nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / lprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss
eb09b7dd5c800b01b723f33cd0f7a84ae93b3489
709,592
import re def parse_date(regexen, date_str): """ Parse a messy string into a granular date `regexen` is of the form [ (regex, (granularity, groups -> datetime)) ] """ if date_str: for reg, (gran, dater) in regexen: m = re.match(reg, date_str) if m: try: return gran, dater(m.groups()) except ValueError: return 0, None return 0, None
a141cad6762556115699ca0327b801537bab1c7e
709,593
from typing import Dict def get_entity_contents(entity: Dict) -> Dict: """ :param entity: Entity is a dictionary :return: A dict representation of the contents of entity """ return { 'ID': entity.get('id'), 'Name': entity.get('name'), 'EmailAddress': entity.get('email_address'), 'Organization': entity.get('organization'), 'Tags': entity.get('labels'), 'StrictNameMatching': entity.get('strict_name_matching'), 'PolicyID': entity.get('policy_id'), 'Profile': entity.get('profile'), 'EntityGroupID': entity.get('entity_group', {}).get('id') if entity.get('entity_group') else None, 'EntityGroupName': entity.get('entity_group', {}).get('name') if entity.get('entity_group') else None, 'TypeID': entity.get('type', {}).get('id') if entity.get('type') else None, 'TypeName': entity.get('type', {}).get('name') if entity.get('type') else None }
3c9e133bf80bc4d59c6f663503b5083401acc4e0
709,595
def t68tot90(t68): """Convert from IPTS-68 to ITS-90 temperature scales, as specified in the CF Standard Name information for sea_water_temperature http://cfconventions.org/Data/cf-standard-names/27/build/cf-standard-name-table.html temperatures are in degrees C""" t90 = 0.99976 * t68 return t90
87ff55a196f01b8f1afd78381e7d012eafa079fa
709,596
def is_insertion(ref, alt): """Is alt an insertion w.r.t. ref? Args: ref: A string of the reference allele. alt: A string of the alternative allele. Returns: True if alt is an insertion w.r.t. ref. """ return len(ref) < len(alt)
17d7d6b8dfdf387e6dd491a6f782e8c9bde22aff
709,597
def get_valid_fields(val: int, cs: dict) -> set: """ A value is valid if there's at least one field's interval which contains it. """ return { field for field, intervals in cs.items() if any(map(lambda i: i[0] <= val <= i[1], intervals)) }
3016e78637374eadf7d0e2029d060538fea86377
709,605
def get_successors(graph): """Returns a dict of all successors of each node.""" d = {} for e in graph.get_edge_list(): src = e.get_source() dst = e.get_destination() if src in d.keys(): d[src].add(dst) else: d[src] = set([dst]) return d
1ec7b0ab8772dc738758bb14fe4abd5dd4b9074e
709,606
def to_int(text): """Text to integer.""" try: return int(text) except ValueError: return ''
d870ee05c3117111adcf85c91038b19beaf9585b
709,608
def nir_mean(msarr,nir_band=7): """ Calculate the mean of the (unmasked) values of the NIR (near infrared) band of an image array. The default `nir_band` value of 7 selects the NIR2 band in WorldView-2 imagery. If you're working with a different type of imagery, you will need figure out the appropriate value to use instead. Parameters ---------- msarr : numpy array (RxCxBands shape) The multispectral image array. See `OpticalRS.RasterDS` for more info. nir_band : int (Default value = 7) The default `nir_band` value of 7 selects the NIR2 band in WorldView-2 imagery. If you're working with a different type of imagery, you will need figure out the appropriate value to use instead. This is a zero indexed number (the first band is 0, not 1). Returns ------- float The mean radiance in the NIR band. """ return msarr[...,nir_band].mean()
7ba6ea8b7d51b8942a0597f2f89a05ecbee9f46e
709,610
def add_parser_arguments_misc(parser): """ Adds the options that the command line parser will search for, some miscellaneous parameters, like use of gpu, timing, etc. :param parser: the argument parser :return: the same parser, but with the added options. """ parser.add_argument('--use_gpu', action='store_true', help='use GPU (CUDA). For loading data on Windows OS, if you get an Access Denied or Operation ' 'Not Supported for cuda, you must set --loader_num_workers to 0 ' '(you can\'t share CUDA tensors among Windows processes).') parser.add_argument('--gpu_num', default="0", type=str) parser.add_argument('--map_gpu_beginning', action='store_true', help='Will map all tensors (including FULL dataset) to GPU at the start of the instance, if ' '--use_gpu flag is supplied and CUDA is available. This option is NOT recommended if you ' 'have low GPU memory or if you dataset is very large, since you may quickly run out of ' 'memory.') parser.add_argument('--timing', action='store_true', help='if specified, will display times for several parts of training') parser.add_argument('--load_args_from_json', type=str, default=None, help='Path to json file containing args to pass. Should be an object containing the keys of ' 'the attributes you want to change (keys that you don\'t supply will be left unchanged) ' 'and their values according to their type (int, str, bool, list, etc.)') return parser
706ec64dfd6393fd1bd4741568e5e1af1d22a4d0
709,615
from typing import List from typing import Set def grouping_is_valid( proposed_grouping: List[Set[str]], past_groups: List[Set[str]], max_intersection_size: int, ) -> bool: """Returns true if no group in the proposed grouping intersects with any past group with intersection size strictly greater than `max_intersection_size`. """ for group in proposed_grouping: for past_group in past_groups: if len(group & past_group) > max_intersection_size: return False return True
caeb7568a2e8fddea9058ccc512dc9c06070ece9
709,618
def or_default(none_or_value, default): """ inputs: none_or_value: variable to test default: value to return if none_or_value is None """ return none_or_value if none_or_value is not None else default
43200fe3bd1308eed87de0ad905873fd3c629067
709,621
import re def keyclean(key): """ Default way to clean table headers so they make good dictionary keys. """ clean = re.sub(r'\s+', '_', key.strip()) clean = re.sub(r'[^\w]', '', clean) return clean
0f28f0e92e2817a98a31396949690a46e7538ace
709,622
def value_left(self, right): """ Returns the value of the right type instance to use in an operator method, namely when the method's instance is on the left side of the expression. """ return right.value if isinstance(right, self.__class__) else right
f28c2f0548d3e004e3dd37601dda6c1ea5ab36f6
709,624
def get_mid_surface(in_surfaces): """get_mid_surface gives the mid surface when dealing with the 7 different surfaces Args: (list of strings) in_surfaces : List of path to the 7 different surfaces generated by mris_expand Returns: (string) Path to the mid surface """ return in_surfaces[3]
718ab8fa7a3b716241ae05a4e507f40ab6cb0efd
709,625
def parse_type(msg_type): """ Parse ROS message field type :param msg_type: ROS field type, ``str`` :returns: base_type, is_array, array_length, ``(str, bool, int)`` :raises: :exc:`ValueError` If *msg_type* cannot be parsed """ if not msg_type: raise ValueError("Invalid empty type") if '[' in msg_type: var_length = msg_type.endswith('[]') splits = msg_type.split('[') if len(splits) > 2: raise ValueError("Currently only support 1-dimensional array types: %s"%msg_type) if var_length: return msg_type[:-2], True, None else: try: length = int(splits[1][:-1]) return splits[0], True, length except ValueError: raise ValueError("Invalid array dimension: [%s]"%splits[1][:-1]) else: return msg_type, False, None
1dfe4f3abb7b69bed17b60ee2666279081666dc6
709,626
def zernike_name(index, framework='Noll'): """ Get the name of the Zernike with input index in input framework (Noll or WSS). :param index: int, Zernike index :param framework: str, 'Noll' or 'WSS' for Zernike ordering framework :return zern_name: str, name of the Zernike in the chosen framework """ noll_names = {1: 'piston', 2: 'tip', 3: 'tilt', 4: 'defocus', 5: 'astig45', 6: 'astig0', 7: 'ycoma', 8: 'xcoma', 9: 'ytrefoil', 10: 'xtrefoil', 11: 'spherical'} wss_names = {1: 'piston', 2: 'tip', 3: 'tilt', 5: 'defocus', 4: 'astig45', 6: 'astig0', 8: 'ycoma', 7: 'xcoma', 10: 'ytrefoil', 11: 'xtrefoil', 9: 'spherical'} if framework == 'Noll': zern_name = noll_names[index] elif framework == 'WSS': zern_name = wss_names[index] else: raise ValueError('No known Zernike convention passed.') return zern_name
33e73739c11bc2340a47162e161ba7d87e26d279
709,628
def xml_string(line, tag, namespace, default=None): """ Get string value from etree element """ try: val = (line.find(namespace + tag).text) except: val = default return val
77745d463cf6604ed787e220fdabf6ff998f770e
709,629
def probabilities (X) -> dict: """ This function maps the set of outcomes found in the sequence of events, 'X', to their respective probabilty of occuring in 'X'. The return value is a python dictionary where the keys are the set of outcomes and the values are their associated probabilities.""" # The set of outcomes, denoted as 'C', and the total events, denoted as 'T'. C, T = set(X), len(X) return {c: X.count(c) / T for c in C}
c908a1186feea270be71bb1f03485c901bc82733
709,630
def select_daily(ds, day_init=15, day_end=21): """ Select lead time days. Args: ds: xarray dataset. day_init (int): first lead day selection. Defaults to 15. day_end (int): last lead day selection. Defaults to 21. Returns: xarray dataset subset based on time selection. ::Lead time indices for reference:: Week 1: 1, 2, 3, 4, 5, 6, 7 Week 2: 8, 9, 10, 11, 12, 13, 14 Week 3: 15, 16, 17, 18, 19, 20, 21 Week 4: 22, 23, 24, 25, 26, 27, 28 Week 5: 29, 30, 31, 32, 33, 34, 35 Week 6: 36, 37, 38, 39, 40, 41, 42 """ return ds.isel(lead=slice(day_init, day_end + 1))
9948ecba5acc3c1ca2fe28526585d0bfa81fb862
709,631
import json def LoadJSON(json_string): """Loads json object from string, or None. Args: json_string: A string to get object from. Returns: JSON object if the string represents a JSON object, None otherwise. """ try: data = json.loads(json_string) except ValueError: data = None return data
598c9b4d5e358a7a4672b25541c9db7743fcd587
709,634
def map_aemo_facility_status(facility_status: str) -> str: """ Maps an AEMO facility status to an Opennem facility status """ unit_status = facility_status.lower().strip() if unit_status.startswith("in service"): return "operating" if unit_status.startswith("in commissioning"): return "commissioning" if unit_status.startswith("committed"): return "committed" if unit_status.startswith("maturing"): return "maturing" if unit_status.startswith("emerging"): return "emerging" raise Exception( "Could not find AEMO status for facility status: {}".format( unit_status ) )
43e1d5e5ea984d36260604cf25f4c7b90d5e56f1
709,635
def load_config_file(config_file): """ Loads the given file into a list of lines :param config_file: file name of the config file :type config_file: str :return: config file as a list (one item per line) as returned by open().readlines() """ with open(config_file, 'r') as f: config_document = f.readlines() return config_document
6a6e0199566e9ea27db309b2164f323cd5f57fdc
709,638
from pathlib import Path def check_overwrite(path: str, overwrite: bool = False) -> str: """ Check if a path exists, if so raising a RuntimeError if overwriting is disabled. :param path: Path :param overwrite: Whether to overwrite :return: Path """ if Path(path).is_file() and not overwrite: raise RuntimeError( f"Requested existing {path!r} as output, but overwriting is disabled." ) return path
961affdcc87b055cdd5acb9a28547ef87ae426b9
709,640
def mulaw_to_value(mudata): """Convert a mu-law encoded value to linear.""" position = ((mudata & 0xF0) >> 4) + 5 return ((1 << position) | ((mudata & 0xF) << (position - 4)) | (1 << (position - 5))) - 33
2ccca7f13861c7a212ac3a1dd2afc439839b19a7
709,643
import re def test_invalid_patterns(list, pattern): """ Function to facilitate the tests in MyRegExTest class :param list: list with strings of invalid cases :param pattern: a regular expression :return: list with the result of all matches which should be a list of None """ newList = [] for item in list: matched = re.match(pattern, item) if matched is None: newList.append(None) else: raise ValueError(item + ' matched to ' + pattern + ' while it should not have matched') return newList
94a8232d66ff4c705e7a587aedc9d1cbe0b4f072
709,645