content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _generate_tags(encoding_type, number_labels=4): """ :param encoding_type: 例如BIOES, BMES, BIO等 :param number_labels: 多少个label,大于1 :return: """ vocab = {} for i in range(number_labels): label = str(i) for tag in encoding_type: if tag == 'O': if tag not in vocab: vocab['O'] = len(vocab) + 1 continue vocab['{}-{}'.format(tag, label)] = len(vocab) + 1 # 其实表达的是这个的count return vocab
36125c684be9dc1d0abc522d536276be7e3d7328
136
import re def _name_xform(o): """transform names to lowercase, without symbols (except underscore) Any chars other than alphanumeric are converted to an underscore """ return re.sub("\W", "_", o.lower())
8ea563f805493d8885c143d9c2e2e54447ef19e8
138
def runner(app): """创建一个运行器,用于调用应用注册的 Click 命令""" return app.test_cli_runner()
f9ffb3040045e0789a5686eb9a80f3fdef126a9d
139
def extrapolate_coverage(lines_w_status): """ Given the following input: >>> lines_w_status = [ (1, True), (4, True), (7, False), (9, False), ] Return expanded lines with their extrapolated line status. >>> extrapolate_coverage(lines_w_status) == [ (1, True), (2, True), (3, True), (4, True), (5, None), (6, None), (7, False), (8, False), (9, False), ] """ lines = [] prev_lineno = 0 prev_status = True for lineno, status in lines_w_status: while (lineno - prev_lineno) > 1: prev_lineno += 1 if prev_status is status: lines.append((prev_lineno, status)) else: lines.append((prev_lineno, None)) lines.append((lineno, status)) prev_lineno = lineno prev_status = status return lines
e7685359f570ae979f2421c3a64513409b9df352
140
def extract_mesh_descriptor_id(descriptor_id_str: str) -> int: """ Converts descriptor ID strings (e.g. 'D000016') into a number ID (e.g. 16). """ if len(descriptor_id_str) == 0: raise Exception("Empty descriptor ID") if descriptor_id_str[0] != "D": raise Exception("Expected descriptor ID to start with 'D', {}".format(descriptor_id_str)) return int(descriptor_id_str[1:])
9f013eadee9a149b9617e4a1c058bbe67c6dd8ba
141
def lerp(x0, x1, t): """ Linear interpolation """ return (1.0 - t) * x0 + t * x1
82d9ce36dd5879c7aab64dc5615a2fb298471383
143
def backend_is_up(backend): """Returns whether a server is receiving traffic in HAProxy. :param backend: backend dict, like one of those returned by smartstack_tools.get_multiple_backends. :returns is_up: Whether the backend is in a state that receives traffic. """ return str(backend['status']).startswith('UP')
9cb729bc14821b97d21d3d864c3ca7a1d6d46085
145
def compare_elements(prev_hash_dict, current_hash_dict): """Compare elements that have changed between prev_hash_dict and current_hash_dict. Check if any elements have been added, removed or modified. """ changed = {} for key in prev_hash_dict: elem = current_hash_dict.get(key, '') if elem == '': changed[key] = 'deleted' elif elem != prev_hash_dict[key]: changed[key] = 'changed' for key in current_hash_dict: elem = prev_hash_dict.get(key, '') if elem == '': changed[key] = 'added' return changed
2f24863a16aca86ccd3a82a4148b34282349e640
146
from typing import Optional from typing import List def _check_str_input(var, input_name: str, valid_options: Optional[List[str]] = None) -> str: """ _check_str_input Convenience function to check if an input is a string. If argument valid_options is given, this function will also check that var is a valid option from the valid_options specified. Parameters ---------- var the input variable to check input_name : str the name of the variable to include if an error is raised valid_options: List[str], optional a list of valid options for var Returns ------- str the input var after lowering ans stripping the string """ if not isinstance(var, str): raise ValueError("Invalid input {0} for {1}. Input {1} must be a string.".format( var, input_name)) var = var.strip().lower() if valid_options is not None: valid_options = [option.strip().lower() for option in valid_options] if var not in valid_options: raise ValueError("Invalid input {0} for {1}. Input {1} must be one of the following " "options: {2}.".format(var, input_name, valid_options)) return var
357a8516fe65dddb35b7799ddc68b892da75ea02
147
def removeDuplicates(bookmarks, newBookmarks): """Creates and returns a new list of bookmarks without any duplicates""" nodup = [] for bmNew in newBookmarks: foundDup = False for bm in bookmarks: if (bm.linkURL == bmNew.linkURL): foundDup = True break if (not foundDup): nodup.append(bmNew) return nodup
12280e827796b95be30f645c5ca0e495379d6a55
148
def test_get_batch(source): """ Creates an input/target pair for evaluation """ seq_len = len(source) - 1 data = source[:seq_len] target = source[1:1+seq_len].view(-1) return data, target
0c26f9f957063bb136f9fe77ed1a8bbdedc38a15
149
import os def hash_bower_component(hash_obj, path): """Hash the contents of a bower component directory. This is a stable hash of a directory downloaded with `bower install`, minus the .bower.json file, which is autogenerated each time by bower. Used in lieu of hashing a zipfile of the contents, since zipfiles are difficult to hash in a stable manner. Args: hash_obj: an open hash object, e.g. hashlib.sha1(). path: path to the directory to hash. Returns: The passed-in hash_obj. """ if not os.path.isdir(path): raise ValueError('Not a directory: %s' % path) path = os.path.abspath(path) for root, dirs, files in os.walk(path): dirs.sort() for f in sorted(files): if f == '.bower.json': continue p = os.path.join(root, f) hash_obj.update(p[len(path)+1:].encode("utf-8")) hash_obj.update(open(p, "rb").read()) return hash_obj
2c80251b43477df295469989575f8cf47d5c3397
150
def _check_sample(sample_pair: dict): """ Controls a sample. Parameters ---------- sample_pair : dict Sample must contain image and mask: " "{'image': image, 'mask': mask} Returns ------- sample : dict Sample must contain image and mask: " "{'image': image, 'mask': mask} """ if isinstance(sample_pair, dict): if len(sample_pair) != 2: raise ValueError( "Sample must contain image and mask: " "{'image': image, 'mask': mask}" ) else: raise TypeError("Sample must be a dict like: {'image': image, 'mask': mask}") return sample_pair
112e9e46a753f8754d25ddbfe2505535c2f9ac96
153
def ps(s): """Process String: convert a string into a list of lowercased words.""" return s.lower().split()
9bf25b31d00544d96f96564ce67ff5def9a16348
156
import pandas def to_float(dataframe, column): """General Function to return floats""" dataframe[column] = dataframe[column].dropna().astype(float) dataframe[column] = dataframe[column].where(pandas.notnull(dataframe[column]), None) return dataframe[column]
2fdae992ec88e40c1e8c67711373d28390569166
157
from typing import Optional def binary_search(pool: list, target) -> Optional[int]: """Search for a target in a list, using binary search. Args: pool (list): a pool of all elements being searched. target: the target being searched. Returns: int: the index of the target. """ sorted_pool = sorted(pool) low = 0 high = len(sorted_pool) - 1 while low + 1 != high: mid = (low + high) // 2 if sorted_pool[mid] == target: return mid if sorted_pool[mid] < target: low = mid else: high = mid return None
7e7ef70126e02b3dc706b3b88bd950aa6322904e
158
def is_rotation(first, second): """Given two strings, is one a rotation of the other.""" if len(first) != len(second): return False double_second = second + second return first in double_second
f02576761014e1dc395f88f937dfdd0de15508d2
159
def bin_entities(uri_set, delimiter="/", splitpos=-1): """ Takes iteratable elemts and splits them according to the position (splitpos) of the delimiter. The first part is used as a key, whereas the second appended to a list connected to the former key. return: dict {key1: [id11, id12, id13, …], key2: […}} """ ent_dict = dict() for res in uri_set: # split entity up to splitpos using delimiter entity = delimiter.join(res.split(delimiter)[:splitpos]) # id_ is the remainder id_ = delimiter.join(res.split(delimiter)[splitpos:]) if entity in ent_dict: ent_dict[entity].append(id_) else: ent_dict[entity] = [id_] return ent_dict
fcbcddbff909d74fe14fe7cb3a21560c8ca9549a
160
def display_timestamp(num_seconds): """get a string to conveniently display a timestamp""" seconds = num_seconds % 60 minutes = int(num_seconds / 60) % 60 hrs = int(num_seconds / 3600) return "{}:{}:{}".format(hrs, minutes, seconds)
bdcc34ade38855df910d5005f6dac9b5e826f543
161
def filename(config, key, ext = '.h5', set = ''): """ Get the real file name by looking up the key in the config and suffixing. :param key: key to use in the config :type key: str :param ext: extension to use :type ext: str :param set: set name :type set: str :return: filepath :rtype: str """ name = config[key] + '_' if set: name += set + '_' name += str(config['multiplier']) + '_' + str(config['height']) + 'x' + str(config['width']) + 'x' + str(config['depth'])\ if ext: name += ext return name
f389a48e7e06a31722423857814149f474e46316
162
def isUp(): """ Whether this docker container is up """ return 'True'
e99c32dee79c4df516193c1a9d3fb8d34f8b0abc
163
def rand_perm_(img, x, y, x_max, y_max, kernel, flatten): """ Applies INPLACE the random permutation defined in `kernel` to the image `img` on the zone defined by `x`, `y`, `x_max`, `y_max` :param img: Input image of dimension (B*C*W*H) :param x: offset on x axis :param y: offset on y axis :param x_max: end of the zone to permute on the x axis :param y_max: end of the zone to permute on the y axis :param kernel: LongTensor of dim 1 containing one value for each point in the zone to permute :return: the permuted image. """ assert img.dim() == 4 if img.size(1) != 1: raise NotImplementedError('Not Implemented for multi-channel images') zone = img[:, :, x:x_max, y:y_max].contiguous() img[:, :, x:x_max, y:y_max] = zone.view(zone.size(0), -1)\ .index_select(1, kernel).view(zone.size()) return img.view(img.size(0), -1) if flatten else img
c838840c2428320825486c0cdacf23f5fb40a9a6
164
def get_urls(page_links): """Insert page links, return list of url addresses of the json""" urls = [] for link in page_links: link1 = link.replace('v3', 'VV') game_id = ''.join([char for char in link1 if char in list(map(str, list(range(10))))]) json_url = f'http://www.afa.com.ar/deposito/html/v3/htmlCenter/data/deportes/futbol/primeraa/events/{game_id}.json' urls.append(json_url) return urls
68c6796ad5a77676674252a0060776eabc4fb8e0
166
def Weekday(datetime): """Returns a weekday for display e.g. Mon.""" return datetime.strftime('%a')
bae413f0fa86f9e27bd6d7f6ee4480a6ddd564e7
168
def _process_null(_): """ Placeholder for an efficient replacement for when no columns of a `WaveformReducer` are activated. """ return dict()
377b355104a01d93916e8a5e91934f0be79d1b13
169
def FlowBalance_rule(model, node): """Ensures that flows into and out of a node are equal """ return model.Supply[node] \ + sum(model.Flow[i, node] for i in model.NodesIn[node]) \ - model.Demand[node] \ - sum(model.Flow[node, j] for j in model.NodesOut[node]) \ == 0
628e8e2bb6967c9114dfcb8ea449d760180ab206
170
def create_subword_vocab(input_data, subword_size): """create subword vocab from input data""" def generate_subword(word, subword_size): """generate subword for word""" subwords = [] chars = list(word) char_length = len(chars) for i in range(char_length-subword_size+1): subword = ''.join(chars[i:i+subword_size]) subwords.append(subword) return subwords subword_vocab_lookup = {} for sentence in input_data: words = sentence.strip().split(' ') for word in words: word_vocabs = [word, word.lower(), word.capitalize(), word.upper()] for word_vocab in word_vocabs: subword_vocabs = generate_subword(word_vocab, subword_size) for subword_vocab in subword_vocabs: if subword_vocab not in subword_vocab_lookup: subword_vocab_lookup[subword_vocab] = 1 else: subword_vocab_lookup[subword_vocab] += 1 return subword_vocab_lookup
651f9da1e1df0f8b78168870bbdec6b4aff65425
171
def rrange(x, y = 0): """ Creates a reversed range (from x - 1 down to y). Example: >>> rrange(10, 0) # => [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] """ return range(x - 1, y - 1, -1)
37c41673dab3fca797f4f6f0ab2f8160e7650248
172
def get_key(my_dict: dict, val): """Get key value form dictionary using key value. Args: collection_name: dict: collection in dictionary format val: Value in dictionary Returns: Key from dictionary. """ for key, value in my_dict.items(): if val == value: return key return "key doesn't exist"
99bb74468b4dd5bb02c6f642a86c345365d8d616
173
import math def col_round(x): """ As Python 3 rounds 0.5 fraction to closest even, floor and cell round methods used here to round 0.5 up to next digit and 0.4 down back to previos. """ frac = x - math.floor(x) if frac < 0.5: return math.floor(x) return math.ceil(x)
3f21a6dcc525daebf78c9adfd6afee9ba865399b
175
import os import pwd import socket def get_default_sender(): """ Determines the sender / to address for outgoing emails. """ try: return os.environ["EMAIL"] except KeyError: pass else: # Guess. # Not sure if euid is the right one to use here. user = pwd.getpwuid(os.geteuid()).pw_name host = socket.getfqdn() return f"{user}@{host}"
987e85327aea314c11122a926804b6d090b2d1a1
177
def get_best_response_actions_as_string(best_response_actions): """Turns a dict<bytes, int> into a bytestring compatible with C++. i.e. the bytestring can be copy-pasted as the brace initialization for a {std::unordered_,std::,absl::flat_hash_}map<std::string, int>. Args: best_response_actions: A dict mapping bytes to ints. Returns: A bytestring that can be copy-pasted to brace-initialize a C++ std::map<std::string, T>. """ best_response_keys = sorted(best_response_actions.keys()) best_response_strings = [ "%s: %i" % (k, best_response_actions[k]) for k in best_response_keys ] return "{%s}" % (", ".join(best_response_strings))
cf2b475d6bb76d262c17dc7753f1624e38cc69f4
178
def build_genome(tree, genome): """ Goes through a tree and builds a genome from all codons in the subtree. :param tree: An individual's derivation tree. :param genome: The list of all codons in a subtree. :return: The fully built genome of a subtree. """ if tree.codon: # If the current node has a codon, append it to the genome. genome.append(tree.codon) for child in tree.children: # Recurse on all children. genome = child.build_genome(genome) return genome
67fd7a23a9ca812717bde5d3e35affc5cc7474f4
179
def conv_bboxinfo_bboxXYHW_to_centerscale(bbox_xyhw, bLooseBox = False): """ from (bbox_xyhw) -> (center, scale) Args: bbox_xyhw: [minX,minY,W,H] bLooseBox: if true, draw less tight box with sufficient margin (SPIN's default setting) Output: center: bbox center scale: scaling images before cropping. reference size is 200 pix (why??). >1.0 means size up, <1.0 means size down. See get_transform() h = 200 * scale t = np.zeros((3, 3)) t[0, 0] = float(res[1]) / h t[1, 1] = float(res[0]) / h t[0, 2] = res[1] * (-float(center[0]) / h + .5) t[1, 2] = res[0] * (-float(center[1]) / h + .5) t[2, 2] = 1 """ center = [bbox_xyhw[0] + bbox_xyhw[2]/2, bbox_xyhw[1] + bbox_xyhw[3]/2] if bLooseBox: scaleFactor =1.2 scale = scaleFactor*max(bbox_xyhw[2], bbox_xyhw[3])/200 #This is the one used in SPIN's pre-processing. See preprocessdb/coco.py else: scale = max(bbox_xyhw[2], bbox_xyhw[3])/200 return center, scale
1d85f9ee0ee6db00877eeb091729d2748fec08cf
180
def GuessSlugFromPath(path): """Returns the slug.""" if path.endswith('index.md'): # If it ends with index, get the second last path component. return path.split('/')[-2] else: # Otherwise, just get the filename. return path.split('/')[-1].split('.')[0]
ff0c8f4f12fdc1ddf684393408a725a0d4c3ce0e
182
import argparse def args(): """Setup argument Parsing.""" parser = argparse.ArgumentParser( usage='%(prog)s', description='OpenStack Inventory Generator', epilog='Inventory Generator Licensed "Apache 2.0"') parser.add_argument( '-f', '--file', help='Inventory file.', required=False, default='openstack_inventory.json' ) parser.add_argument( '-s', '--sort', help='Sort items based on given key i.e. physical_host', required=False, default='component' ) exclusive_action = parser.add_mutually_exclusive_group(required=True) exclusive_action.add_argument( '-r', '--remove-item', help='host name to remove from inventory, this can be used multiple' ' times.', action='append', default=[] ) exclusive_action.add_argument( '-d', '--remove-group', help='group name to remove from inventory, this can be used multiple' ' times.', action='append', default=[] ) exclusive_action.add_argument( '-l', '--list-host', help='', action='store_true', default=False ) exclusive_action.add_argument( '-g', '--list-groups', help='List groups and containers in each group', action='store_true', default=False ) exclusive_action.add_argument( '-G', '--list-containers', help='List containers and their groups', action='store_true', default=False ) exclusive_action.add_argument( '-e', '--export', help='Export group and variable information per host in JSON.', action='store_true', default=False ) exclusive_action.add_argument( '--clear-ips', help='''Clears IPs from the existing inventory, but leaves all other information intact. LXC interface files and load balancers will *not* be modified.''', action='store_true', default=False ) return vars(parser.parse_args())
9a92d138d6b73f423b3a98d5fa046be061fe06ff
185
def diff_pf_potential(phi): """ Derivative of the phase field potential. """ return phi**3-phi
c22af096d27cf817ffee683453ecafb4e5c61cdc
186
def resolve_alias(term: str) -> str: """ Resolves search term aliases (e.g., 'loc' for 'locations'). """ if term in ("loc", "location"): return "locations" elif term == "kw": return "keywords" elif term == "setting": return "setting" elif term == "character": return "characters" else: return term
8080d6ffb73457fd61aeca610b30b18695ec01bd
188
import re def normalize_middle_high_german( text: str, to_lower_all: bool = True, to_lower_beginning: bool = False, alpha_conv: bool = True, punct: bool = True, ): """Normalize input string. to_lower_all: convert whole text to lowercase alpha_conv: convert alphabet to canonical form punct: remove punctuation >>> from cltk.alphabet import gmh >>> from cltk.languages.example_texts import get_example_text >>> gmh.normalize_middle_high_german(get_example_text("gmh"))[:50] 'ik gihorta ðat seggen\\nðat sih urhettun ænon muotin' """ if to_lower_all: text = text.lower() if to_lower_beginning: text = text[0].lower() + text[1:] text = re.sub(r"(?<=[\.\?\!]\s)(\w)", lambda x: x.group(1).lower(), text) if alpha_conv: text = ( text.replace("ē", "ê") .replace("ī", "î") .replace("ā", "â") .replace("ō", "ô") .replace("ū", "û") ) text = text.replace("ae", "æ").replace("oe", "œ") if punct: text = re.sub(r"[\.\";\,\:\[\]\(\)!&?‘]", "", text) return text
543a69175cd78bf2678bd0b173e1112e96d75fd8
189
def dict_has_key_and_value_include_str(the_dict,key,str): """指定字典中包括键,并且键值包含某个字符片段""" if the_dict.__contains__(key): if str in the_dict[key]: return True return False
56058581914233c9520986db7f80c4b879443e97
190
from typing import List from typing import Any from functools import reduce import sys def get_list_size(ls:List[Any]) -> float: """Return size in memory of a list and all its elements""" return reduce(lambda x, y: x + y, (sys.getsizeof(v) for v in ls), 0) + sys.getsizeof(ls)
d930f3ef4ca9c5728153591d15e9b55211225d9a
191
def cmyk_to_rgb(c, m, y, k): """ """ r = (1.0 - c) * (1.0 - k) g = (1.0 - m) * (1.0 - k) b = (1.0 - y) * (1.0 - k) return r, g, b
03ece22efe6f88ff6e9f2825c72bcb4b18a238ef
193
def get_wildcard_values(config): """Get user-supplied wildcard values.""" return dict(wc.split("=") for wc in config.get("wildcards", []))
0ca15b82ebed47dec9d46991cb4db45ee72eb3af
195
def add_standard_attention_hparams(hparams): """Adds the hparams used by get_standadized_layers.""" # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. # hparams used and which should have been defined outside (in # common_hparams): # Global flags # hparams.mode # hparams.hidden_size # Pre-post processing flags # hparams.layer_preprocess_sequence # hparams.layer_postprocess_sequence # hparams.layer_prepostprocess_dropout # hparams.norm_type # hparams.norm_epsilon # Mixture-of-Expert flags # hparams.moe_hidden_sizes # hparams.moe_num_experts # hparams.moe_k # hparams.moe_loss_coef # Attention layers flags hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("attention_dropout", 0.0) # Attention: Local hparams.add_hparam("attention_loc_block_length", 256) # Attention: Local (unmasked only): How much to look left. hparams.add_hparam("attention_loc_block_width", 128) # Attention: Memory-compressed hparams.add_hparam("attention_red_factor", 3) hparams.add_hparam("attention_red_type", "conv") hparams.add_hparam("attention_red_nonlinearity", "none") # Fully connected layers flags # To be more consistent, should use filter_size to also control the MOE # size if moe_hidden_sizes not set. hparams.add_hparam("filter_size", 2048) hparams.add_hparam("relu_dropout", 0.0) return hparams
de9f1a3b30a105a89d3400ca0b36e4c747f1ab46
198
def Storeligandnames(csv_file): """It identifies the names of the ligands in the csv file PARAMETERS ---------- csv_file : filename of the csv file with the ligands RETURNS ------- lig_list : list of ligand names (list of strings) """ Lig = open(csv_file,"rt") lig_aux = [] for ligand in Lig: lig_aux.append(ligand.replace(" ","_").replace("\n","").lower()) return lig_aux
dc4510a4ea946eaf00152cb445acdc7535ce0379
199
import requests import logging def upload(filename, url, token=None): """ Upload a file to a URL """ headers = {} if token: headers['X-Auth-Token'] = token try: with open(filename, 'rb') as file_obj: response = requests.put(url, data=file_obj, timeout=120, headers=headers, verify=False) except requests.exceptions.RequestException as err: logging.warning('RequestException when trying to upload file %s: %s', filename, err) return None except IOError as err: logging.warning('IOError when trying to upload file %s: %s', filename, err) return None if response.status_code == 200 or response.status_code == 201: return True return None
eb8a8060294322bd9df187c8076d8f66b4dc775c
202
def flatmap(fn, seq): """ Map the fn to each element of seq and append the results of the sublists to a resulting list. """ result = [] for lst in map(fn, seq): for elt in lst: result.append(elt) return result
c42d07f712a29ece76cd2d4cec4f91ec2562a1c0
203
def process_row(row, fiscal_fields): """Add and remove appropriate columns. """ surplus_keys = set(row) - set(fiscal_fields) missing_keys = set(fiscal_fields) - set(row) for key in missing_keys: row[key] = None for key in surplus_keys: del row[key] assert set(row) == set(fiscal_fields) return row
1c55fe628b53be72633d2fcae7cc1fbac91d04ae
206
def DefaultTo(default_value, msg=None): """Sets a value to default_value if none provided. >>> s = Schema(DefaultTo(42)) >>> s(None) 42 """ def f(v): if v is None: v = default_value return v return f
10401d7214d15c2b0bf28f52430ef71b5df0a116
207
import re from typing import Literal def extract_text( pattern: re.Pattern[str] | str, source_text: str, ) -> str | Literal[False]: """Match the given pattern and extract the matched text as a string.""" match = re.search(pattern, source_text) if not match: return False match_text = match.groups()[0] if match.groups() else match.group() return match_text
a6f762cfd26dd1231db4b6e88247e2566d186212
208
import torch def rotate_tensor(l: torch.Tensor, n: int = 1) -> torch.Tensor: """Roate tensor by n positions to the right Args: l (torch.Tensor): input tensor n (int, optional): positions to rotate. Defaults to 1. Returns: torch.Tensor: rotated tensor """ return torch.cat((l[n:], l[:n]))
9cdaa7be718f0676ad85e05b01ee918459697c60
210
def mark_as_widget(view): """ Marks @view as a widget so we can later inspect that attribute, for example, when hiding panels in _vi_enter_normal_mode. Used prominently by '/', '?' and ':'. XXX: This doesn't always work as we expect. For example, changing settings to a panel created instants before does not make those settings visible when the panel is activated. Investigate. We still need this so that contexts will ignore widgets, though. However, the fact that they are widgets should suffice to disable Vim keys for them... """ view.settings().set('is_vintageous_widget', True) return view
965555660b82f834e09ba3ffc985755d4fd7fa66
211
def get_label_for_line(line, leg): """ Can't remember what I was using this for but seems useful to keep """ # leg = line.figure.legends[0] # leg = line.axes.get_legend() for h, t in zip(leg.legendHandles, leg.texts): if h.get_label() == line.get_label(): return t.get_text()
4180ae7fd7fe5b98ebafa20fbdf2528205e4ec31
212
def _node_parent_listener(target, value, oldvalue, initiator): """Listen for Node.parent being modified and update path""" if value != oldvalue: if value is not None: if target._root != (value._root or value): target._update_root(value._root or value) target._update_path(newparent=value) else: # This node just got orphaned. It's a new root target._update_root(target) target._update_path(newparent=target) return value
06c06b144c777f33673e2051f1d4173204720f65
213
def grelha_nr_colunas(g): """ grelha_nr_colunas: grelha --> inteiro positivo grelha_nr_colunas(g) devolve o numero de colunas da grelha g. """ return len(g[0])
740b06c186ad1455aecadfaf112f253fb434d5ff
214
def readFile(sFile, sMode = 'rb'): """ Reads the entire file. """ oFile = open(sFile, sMode); sRet = oFile.read(); oFile.close(); return sRet;
d44e8217ae7dcab1c826ccbbe80e066d76db31b5
215
import re def clean_text_from_multiple_consecutive_whitespaces(text): """Cleans the text from multiple consecutive whitespaces, by replacing these with a single whitespace.""" multi_space_regex = re.compile(r"\s+", re.IGNORECASE) return re.sub(multi_space_regex, ' ', text)
f25b27da070d6a984012a4cb5b1ae4a477713033
220
import re def run(filename): """ MUST HAVE FUNCTION! Begins the plugin processing Returns a list of endpoints """ run_results = set() r_rule = re.compile(r"(Route\(\"[^,)]+)", flags=re.IGNORECASE) for line in filename: try: route_match = r_rule.search(line) if route_match: run_results.add(route_match.group(1)[7:-1]) except Exception: # Print the offending line the BurpSuite's extension Output tab print("Error! Couldn't parse: %s" % line) return list(run_results)
e5ad233e3c3e07769b2f8f61657fa712b1f151c4
221
def dict_comparator(first_dict, second_dict): """ Функция проверяет на совпадение множеств пар ключ-значение для двух словарей Возвращает True в случае совпадения, иначе False """ if set(first_dict.keys()) != set(second_dict.keys()): return False for key, value in first_dict.items(): if value != second_dict[key]: return False return True
47f28e8810b8437cc0e3bfca6ccba6734c988890
222
def word_check(seq1,seq2,word): """Returns False and aborts if seq2 contains a substring of seq1 of length word. Returns True otherwise""" for i in range(len(seq1)-word+1): if seq2.find(seq1[i:i+word])>-1: return seq2.find(seq1[i:i+word]) return -1
86b4cad571fdbf55073f30f9c5fd9a5e25da46d7
223
def plasma_parameter(N_particles, N_grid, dx): """ Estimates the plasma parameter as the number of particles per step. Parameters ---------- N_particles : int, float Number of physical particles N_grid : int Number of grid cells dx : float grid step size """ return (N_particles / N_grid) * dx
51d3b96ccba2689db461fd6117cb5c2961dc3812
224
import re def validate_name_dynamotable(table_name): """Validate if table name matches DynamoDB naming standards.""" if not isinstance(table_name, str): ValueError('Input argument \"name\" must a string') if table_name.__len__() < 3 or table_name.__len__() > (255 - 5): # note: deduct 5 chars to allow postfix space (e.g. for .lock) return (False, 'TableName should be of length: [3-255]') if not re.match(r'^[a-zA-Z0-9]', table_name): return (False, 'BucketName should start with a lowercase letter or number') if re.search(r'[-\._]{2}', table_name): return (False, 'TableName can\'t contain two special characters [-, ., _] in a row') if not re.match(r'^[-a-zA-Z0-9\._]*$', table_name): return (False, re.sub(' +', ' ', 'TableName contains invalid character. \ Allowed characters: [a-z, A-Z, 0-9, \'.\', \'-\', \'_\']')) return (True, 'Success')
139391e3ece6cacae24d5bd72fd0fd77b65ecc41
226
def single_model_embeddings_specify(single_model_embeddings): """Returns an instance of MultiTaskLSTMCRF initialized with the default configuration file, loaded embeddings and single specified model.""" single_model_embeddings.specify() return single_model_embeddings
fe23c571ca29dbbf87cbccdbfc1e11aaaf784c01
227
import bz2 import gzip import json def load_json(filename): """ Load a JSON file that may be .bz2 or .gz compressed """ if '.bz2' in filename: with bz2.open(filename, 'rt') as infile: return json.load(infile) elif '.gz' in filename: with gzip.open(filename, 'rt') as infile: return json.load(infile) else: with open(filename, 'rt') as infile: return json.load(infile)
1b985db386e85c3b8e87911d89a7652133bfee7b
228
import sys def getopt(clf, ret_val, isbool=False): """ Command Line Option input parser""" found = [] def getCLO(flag): iindx = sys.argv.index(flag) sys.argv.pop(iindx) return sys.argv.pop(iindx) if isbool: return (clf in sys.argv) while clf in sys.argv: found.append(getCLO(clf)) if found: ret_val = [found, found[0]][int(len(found) == 1)] return ret_val
9f7738bb308d2875f2c229dd7e33f7aae6981733
229
def check_score(encoding, min_qual, qual_str): """Return True if the average quality score is at least min_qual """ qscores = [encoding[q] for q in qual_str] return sum(qscores) >= min_qual * len(qscores)
427dd8617d5ab425e3b7989923a271599fc7371a
231
import functools import warnings def add_unsafe_warning(func, fig): """ Generate warning if not supported by Paxplot """ @functools.wraps(func) def wrapper(*args, **kwargs): if fig._show_unsafe_warning: warnings.warn( f'The function you have called ({func.__name__}) is not ' 'officially supported by Paxplot, but it may still work. ' 'Report issues to ' 'https://github.com/kravitsjacob/paxplot/issues', Warning ) return func(*args, **kwargs) return wrapper
8bca3fbc514315cd4c761b2e8f7f1168e01af7a9
232
def is_meeting_approved(meeting): """Returns True if the meeting is approved""" if meeting.session_set.first().status.slug == 'apprw': return False else: return True
0dca106890d195f613477334d2bb6187c1587e15
233
def reversebits5(max_bits, num): """ Like reversebits4, plus optimizations regarding leading zeros in original value. """ rev_num = 0 shifts = 0 while num != 0 and shifts < max_bits: rev_num |= num & 1 num >>= 1 rev_num <<= 1 shifts += 1 rev_num >>= 1 rev_num <<= (max_bits - shifts) return rev_num
ada43721780d512cda73c30d0279216b709501fc
234
def rescale(img, thresholds): """ Linear stretch of image between two threshold values. """ return img.subtract(thresholds[0]).divide(thresholds[1] - thresholds[0])
76d5f56384f408e57161848ded85142e68296258
235
import string def upper_to_title(text, force_title=False): """Inconsistently, NiH has fields as all upper case. Convert to titlecase""" if text == text.upper() or force_title: text = string.capwords(text.lower()) return text
939515204b841c5443c5767da20712dff684d286
236
def historico( historia="",sintomas="",medicamentos="" ): """Histótia: Adicionar os relatos de doenças anteriores do paciente,\n incluindo sintomas antigos e histórico de doenças familiares \n Sintomas: Descrever os atuais sintomas do paciente \n Medicamentos: Remédios e tratamentos usados durante o tratamento geral do paciente.""" historia = str( input( "Digite o histórico de vida do paciente: " ) ) sintomas = str( input( "Digite os sintomas do paciente: " ) ) medicamentos = str( input("Digite o medicamento a ser usado e a dosagem: " ) ) return historia, sintomas, medicamentos
a5bdb6cc6d13c73845650ec8fcd1d18fc1e4feb2
237
def transform(nodes, fxn, *args, **kwargs): """ Apply an arbitrary function to an array of node coordinates. Parameters ---------- nodes : numpy.ndarray An N x M array of individual node coordinates (i.e., the x-coords or the y-coords only) fxn : callable The transformation to be applied to the whole ``nodes`` array args, kwargs Additional positional and keyword arguments that are passed to ``fxn``. The final call will be ``fxn(nodes, *args, **kwargs)``. Returns ------- transformed : numpy.ndarray The transformed array. """ return fxn(nodes, *args, **kwargs)
edc487b7f1b83f750f868ee446ecf2676365a214
238
import os def GetPostgreSQLLoginInfo(): """ * Get database login information from pem file """ passfile = '/mnt/data/other/pem/sinnud_pg.dat' with open(passfile, 'r') as f: passinfo = f.read().strip() (host, user, dbname, password, port) = passinfo.split() if os.path.isfile(passfile): return (True, (host, user, dbname, password, port)) return (False, None)
8a37306f05443b6b92b07cf104a6564ad3a71625
239
from typing import Dict from typing import Any import yaml def as_yaml(config: Dict[str, Any], **yaml_args: Any) -> str: """Use PyYAML library to write YAML file""" return yaml.dump(config, **yaml_args)
28c792504d7a6ccd7dbf040d516343e44e072b16
240
def stack_exists(client, stack_name): """ Checks that stack was specified is existing """ cfn_stacks = client.list_stacks() for cfn_stack in cfn_stacks["StackSummaries"]: if cfn_stack['StackName'] == stack_name and "COMPLETE" in cfn_stack['StackStatus'] and "DELETE" not in cfn_stack['StackStatus']: return True return False
8e9476b57300cb030ba5292f83060bb5ae652d19
243
def select(arrays, index): """ Index each array in a tuple of arrays. If the arrays tuple contains a ``None``, the entire tuple will be returned as is. Parameters ---------- arrays : tuple of arrays index : array An array of indices to select from arrays. Returns ------- indexed_arrays : tuple of arrays Examples -------- >>> import numpy as np >>> select((np.arange(5), np.arange(-3, 2, 1)), [1, 3]) (array([1, 3]), array([-2, 0])) >>> select((None, None, None, None), [1, 2]) (None, None, None, None) """ if arrays is None or any(i is None for i in arrays): return arrays return tuple(i.ravel()[index] for i in arrays)
70109fbda58055d9712295dff261a95d99caac03
244
def get_bucket(self): """ Documentation: --- Description: Use bucket name to return a single S3 bucket object. --- Returns: bucket : S3 bucket S3 bucket object """ # return # 6 dictionary containing Name tag / EC2 instance object buckets = self.get_buckets() # check that there is an instance with that name assert self.bucket_name in self.get_bucket_names(), "\nNo S3 bucket with that name.\n" # filter instances by instance_name bucket = buckets[self.bucket_name] return bucket
0d8ed3c8557e57fb8094524bc4cb4dcae09fe384
245
def prepend_with_baseurl(files, base_url): """prepend url to beginning of each file Parameters ------ files (list): list of files base_url (str): base url Returns ------ list: a list of files with base url pre-pended """ return [base_url + file for file in files]
4c29b3e9230239c1ff8856c707253608ce2503cd
247
def get_bounding_box(dataframe, dataIdentifier): """Returns the rectangle in a format (min_lat, max_lat, min_lon, max_lon) which bounds all the points of the ´dataframe´. Parameters ---------- dataframe : pandas.DataFrame the dataframe with the data dataIdentifier : DataIdentifier the identifier of the dataframe to be used """ b_box = (getattr(dataframe, dataIdentifier.latitude).min(), getattr(dataframe, dataIdentifier.latitude).max(), getattr(dataframe, dataIdentifier.longitude).min(), getattr(dataframe, dataIdentifier.longitude).max()) return b_box
6989118af8db36cc38fd670f5cd7506859d2150e
249
def stat_cleaner(stat: str) -> int: """Cleans and converts single stat. Used for the tweets, followers, following, and likes count sections. Args: stat: Stat to be cleaned. Returns: A stat with commas removed and converted to int. """ return int(stat.replace(",", ""))
cb6b6035ab21871ca5c00d5d39d9efe87e0acc89
250
import os import json def load_image_ids(img_root, split_dir): """images in the same directory are in the same split""" pathXid = [] img_root = os.path.join(img_root, split_dir) for name in os.listdir(img_root): idx = name.split(".")[0] pathXid.append( ( os.path.join(img_root, name), idx)) if split_dir == 'val2014': print("Place the features of minival in the front of val2014 tsv.") # Put the features of 5000 minival images in front. minival_img_ids = set(json.load(open('data/mscoco_imgfeat/coco_minival_img_ids.json'))) a, b = [], [] for item in pathXid: img_id = item[1] if img_id in minival_img_ids: a.append(item) else: b.append(item) assert len(a) == 5000 assert len(a) + len(b) == len(pathXid) pathXid = a + b assert len(pathXid) == 40504 return pathXid
485675489805a0e3f4ac4dcab2eca8a40992c044
251
def do(ARGV): """Allow to check whether the exception handlers are all in place. """ if len(ARGV) != 3: return False elif ARGV[1] != "<<TEST:Exceptions/function>>" \ and ARGV[1] != "<<TEST:Exceptions/on-import>>": return False if len(ARGV) < 3: return False exception = ARGV[2] if exception == "KeyboardInterrupt": raise KeyboardInterrupt() elif exception == "AssertionError": raise AssertionError() elif exception == "Exception": raise Exception() # If we did not raise an exception here, we didn't do anything print("No exception was triggered.") return False
56b83d119f74a00f1b557c370d75fb9ff633d691
252
import logging import sys def _get_simconffile(args): """ Get experiment config file name from command line """ logger = logging.getLogger('fms') try: simconffile = args[1] except IndexError: logger.critical("Missing simulation config file name.") sys.exit(2) return simconffile
c71e0c58fa5929051f66836ad8fc45c361f94791
253
import subprocess def prime_gen(): """Returns prime based on 172 bit range. Results is 44 char""" x = subprocess.run( ['openssl', 'prime', '-generate', '-bits', '172', '-hex'], stdout=subprocess.PIPE) return x.stdout[:-1]
24c38f0c183367bac9e1a2e04f11d0d58dd503ab
254
def module_for_category( category ): """Return the OpenGL.GL.x module for the given category name""" if category.startswith( 'VERSION_' ): name = 'OpenGL.GL' else: owner,name = category.split( '_',1) if owner.startswith( '3' ): owner = owner[1:] name = 'OpenGL.GL.%s.%s'%( owner,name ) return __import__( name, {}, {}, name.split( '.' ))
0e88467a1dd7f5b132d46a9bdc99765c274f69f3
255
import shutil def cp_dir(src_dir, dest_dir): """Function: cp_dir Description: Copies a directory from source to destination. Arguments: (input) src_dir -> Source directory. (input) dest_dir -> Destination directory. (output) status -> True|False - True if copy was successful. (output) err_msg -> Error message from copytree exception or None. """ status = True err_msg = None try: shutil.copytree(src_dir, dest_dir) # Directory permission error. except shutil.Error as err: err_msg = "Directory not copied. Perms Error Message: %s" % (err) status = False # Directory does not exist. except OSError as err: err_msg = "Directory not copied. Exist Error Message: %s" % (err) status = False return status, err_msg
13f82a485fb46e102780c2462f0ab092f0d62df1
256
import re def remove_comment(to_remove, infile): """Removes trailing block comments from the end of a string. Parameters: to_remove: The string to remove the comment from. infile: The file being read from. Returns: The paramter string with the block comment removed (if comment was present in string). """ start_comment = re.search('\s*(\/\*|//)', to_remove) # Remove comments if they are in the matched group. if start_comment: end_comment = re.search('.*\*\/', to_remove) if end_comment or ('//' in to_remove and not '/*' in to_remove) : removed = to_remove[:start_comment.start(0)] + '\n' return removed while not end_comment: to_remove = next(infile) end_comment = end_comment = re.search('.*\*\/', to_remove) return '' else: removed = to_remove return removed
0172b295c9a023eb96fbad7a6c3a388874e106bc
264
def sexag_to_dec(sexag_unit): """ Converts Latitude and Longitude Coordinates from the Sexagesimal Notation to the Decimal/Degree Notation""" add_to_degree = (sexag_unit[1] + (sexag_unit[2]/60))/60 return sexag_unit[0]+add_to_degree
c9c4394920d2b483332eb4a81c0f0d9010179339
265
import random def read_motifs(fmotif): """ create a random pool of motifs to choose from for the monte-carlo simulations """ motif_pool = [] for line in open(fmotif): if not line.strip(): continue if line[0] == "#": continue motif, count = line.rstrip().split() motif_pool.extend(motif * int(count)) random.shuffle(motif_pool) return motif_pool
168a7f82727917aa5ca1a30b9aa9df1699261585
266
async def update_result(user: dict, form: dict) -> str: """Extract form data and update one result and corresponding start event.""" informasjon = await create_finish_time_events(user, "finish_bib", form) # type: ignore return informasjon
b9b97f3b08f08dc35a0744f38323d76ecb0c3fba
267
def index_wrap(data, index): """ Description: Select an index from an array data :param data: array data :param index: index (e.g. 1,2,3, account_data,..) :return: Data inside the position index """ return data[index]
42b53f1d9edf237b904f822c15ad1f1b930aa69c
268
def do_step_right(pos: int, step: int, width: int) -> int: """Takes current position and do 3 steps to the right. Be aware of overflow as the board limit on the right is reached.""" new_pos = (pos + step) % width return new_pos
530f3760bab00a7b943314ca735c3a11343b87f5
269
import unicodedata def simplify_name(name): """Converts the `name` to lower-case ASCII for fuzzy comparisons.""" return unicodedata.normalize('NFKD', name.lower()).encode('ascii', 'ignore')
a7c01471245e738fce8ab441e3a23cc0a67c71be
270
def values(df, varname): """Values and counts in index order. df: DataFrame varname: strign column name returns: Series that maps from value to frequency """ return df[varname].value_counts().sort_index()
ea548afc8e0b030e441baa54abad32318c9c007f
273
def get_or_none(l, n): """Get value or return 'None'""" try: return l[n] except (TypeError, IndexError): return 'None'
c46a0f4c8edc9286b0122f1643e24a04113a5bfc
274
import os def _is_cache_dir_appropriate(cache_dir, cache_file): """ Determine if a directory is acceptable for building. A directory is suitable if any of the following are true: - it doesn't exist - it is empty - it contains an existing build cache """ if os.path.exists(cache_dir): files = os.listdir(cache_dir) if cache_file in files: return True return not bool(files) return True
b7a94540b8e97c4628224c05bfff44b798e449c9
276
def filter_with_prefixes(value, prefixes): """ Returns true if at least one of the prefixes exists in the value. Arguments: value -- string to validate prefixes -- list of string prefixes to validate at the beginning of the value """ for prefix in prefixes: if value.startswith(prefix): return False return True
56b9bacedaa7aa06023e29d45809f6e9661ee483
277
def is_seq(x, step=1): """Checks if the elements in a list-like object are increasing by step Parameters ---------- x: list-like step Returns ------- True if elements increase by step, else false and the index at which the condition is violated. """ for i in range(1, len(x)): if not x[i] == (x[i - 1] + step): print('Not seq at: ', i) return False return True
032e12b86aa7e50dfba2ddccd244475f58d70b29
278
def ecio_quality_rating(value, unit): """ ECIO (Ec/Io) - Energy to Interference Ratio (3G, CDMA/UMTS/EV-DO) """ if unit != "dBm": raise ValueError("Unsupported unit '{:}'".format(unit)) rating = 0 if value > -2: rating = 4 elif -2 >= value > -5: rating = 3 elif -5 >= value > -10: rating = 2 elif value <= -10: rating = 1 return rating
4cc21012464b8476d026f9dfbc35b8b1ea3c2d85
279