content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def template_check(value): """Check if a rendered template string equals true. If value is not a string, return value as is. """ if isinstance(value, str): return value.lower() == "true" return value
3733db5c107068e815bac079fdef1a450f7acdc9
280
def return_npc(mcc, mnc): """ Format MCC and MNC into a NPC. :param mcc: Country code. :type mcc: int :param mnc: Network code. :type mnc: int """ return "{0}{1}30".format(str(mcc).zfill(3), str(mnc).zfill(3))
0ae5952fd7b026c2c90c72046f63ca4d08dacf06
281
def _get_capacity(): """Return constant values for dam level capacities. Storage capacity values are measured in million cubic metres i.e. Megalitres or Ml. Source: https://en.wikipedia.org/wiki/Western_Cape_Water_Supply_System @return capacity: Dict object containing maximum capacities of Western Cape dams. Includes aggregate values for small dams, big six dams and all dams. """ big_six_capacity = { 'Theewaterskloof': 480188, 'Wemmershoek': 58644, 'Steensbras Lower': 33517, 'Steenbras Upper': 31757, 'Voëlvlei': 164095, 'Berg River': 130010, } small_capacity = { 'Hely-Hutchinson': 925, 'Woodhead': 954, 'Victoria': 128, 'De Villiers': 243, 'Kleinplaats': 1368, 'Lewis Gay': 182, 'Land-en-Zeezicht': 451, } capacity = {**big_six_capacity, **small_capacity} capacity['Big Six Dams'] = sum(big_six_capacity.values()) capacity['Small Dams'] = sum(small_capacity.values()) capacity['All Dams'] = capacity['Small Dams'] + capacity['Big Six Dams'] return capacity
01d1a5e7470d578296e285e2e00cd44eaf00d15c
282
def prod_non_zero_diag(x): """Compute product of nonzero elements from matrix diagonal. input: x -- 2-d numpy array output: product -- integer number Not vectorized implementation. """ n = len(x) m = len(x[0]) res = 1 for i in range(min(n, m)): if (x[i][i] != 0): res *= x[i][i] return res pass
13e9f6cc9ea22e7901d454b23297a2e9c5da3a3a
283
def build_query(dct): """Build SQL with '?' and value tuples from clause dictionary""" if (dct is not {}): str_clauses = '' tpl_values = () bln_start = True #print dct for str_field, dct_op_val in dct.iteritems(): if (str_field is not None): if (bln_start): str_open = ' (' bln_start = False else: str_open = ' and (' str_clauses = ''.join([str_clauses, str_open, str_field, ' ', \ dct_op_val['logic'], ' ?)']) var_val = dct_op_val['value'] if (str(var_val).lower() == 'null'): var_val = None tpl_values = tpl_values + (var_val, ) else: # simple 1 or 0 (ALL records or NO records) ... # trumps all other clauses, so lets exit the loop str_clauses = ' ?' tpl_values = (dct_op_val['value'],) break return (tpl_values, str_clauses) else: return ((), " 1")
ac49014c8e629d2fdc12472f2b8b345cbee8ce18
286
from typing import Callable import click def with_input(func: Callable) -> Callable: """ Attaches a "source" argument to the command. """ return click.argument( "source", type=click.Path(exists=True), required=True )(func)
3117f183ac4e4d459a718b59fc9a3ba00b36e291
287
def check_loop_validity(inst_list): """ Given a list of instructions, check whether they can form a valid loop. This means, checking for anything that could create an infinite loop. We are also disallowing double loops right now""" for i, c in enumerate(inst_list): if c in [5, 6, 16, 25]: return False, i return True, -1
a58923e014947d1406165a831a57b73fcb9ab226
288
def calc_high_outlier(values) -> float: """Calculates the high outlier from a pandas Series""" q1, q3 = [values.quantile(x, 'midpoint') for x in (0.25, 0.75)] return q3 + 1.5 * (q3 - q1)
8ee929aec1cb4af9a90d04893f8f94444d00ad22
289
import re def diff_re(a, b, fromfile='', tofile='', fromfiledate='', tofiledate='', n=3, lineterm='\n'): """ A simple "diff" of two sets of lines when the expected lines are regular expressions. This is a really dumb thing that just compares each line in turn, so it doesn't look for chunks of matching lines and the like--but at least it lets you know exactly which line first didn't compare correctl... """ result = [] diff = len(a) - len(b) if diff < 0: a = a + [''] * (-diff) elif diff > 0: b = b + [''] * diff i = 0 for aline, bline in zip(a, b): s = "^" + aline + "$" try: expr = re.compile(s) except re.error as e: msg = "Regular expression error in %s: %s" raise re.error(msg % (repr(s), e.args[0])) if not expr.search(bline): result.append("%sc%s" % (i + 1, i + 1)) result.append('< ' + repr(a[i])) result.append('---') result.append('> ' + repr(b[i])) i = i + 1 return result
802dd3287502c3d3fe85242ba51043e4b5769cd5
290
from typing import Union from typing import Dict from typing import Tuple from typing import Any def serialize_framework_build_config(dict_: Union[Dict[str, str], str]) -> Tuple[Any, ...]: """Serialize a dict to a hashable tuple. Parameters ---------- dict_: Dict[str, str] Returns ------- hashable_tuple: Tuple[Any, ...] A hashable tuple. """ if isinstance(dict_, dict): return tuple(sorted(list(dict_.items()))) return (dict_,)
365b413ff21bf4fb7f5d153dbe74801ee125108f
291
def get_confidence(imgfilename): """ 1003_c60.jpg -> c6 """ if not imgfilename: return '' return 'c' + imgfilename.split('/')[-1][0:1]
7c98f2abd2119b41d7e2501823985a894da5a1a1
292
def min_max_median(lst): """ a function that takes a simple list of numbers lst as a parameter and returns a list with the min, max, and the median of lst. """ s = sorted(lst) n = len(s) return [ s[0], s[-1], s[n//2] if n % 2 == 1 else (s[n//2 - 1] + s[n//2]) / 2]
59b1ceef5796d77cc039a42593ddb3d1d2244bd7
293
def _enzyme_path_to_sequence(path, graph, enzymes_sites): """Converts a path of successive enzymes into a sequence.""" return "".join( [enzymes_sites[path[0]]] + [graph[(n1, n2)]["diff"] for n1, n2 in zip(path, path[1:])] )
a3de9de5dc37df641e36d09d07b49c402fa17fd1
295
import string def simple_caesar(txt, rot=7): """Caesar cipher through ASCII manipulation, lowercase only.""" alphabet = string.ascii_lowercase # pick alphabet shifted_alphabet = alphabet[rot:] + alphabet[:rot] # shift it table = str.maketrans(alphabet, shifted_alphabet) # create mapping table return txt.lower().translate(table) # apply
eb8d86d37d8a8902663ff68e095b3b822225859c
296
def _is_url_without_path_query_or_fragment(url_parts): """ Determines if a URL has a blank path, query string and fragment. :param url_parts: A URL. :type url_parts: :class:`urlparse.ParseResult` """ return url_parts.path.strip('/') in ['', 'search'] and url_parts.query == '' \ and url_parts.fragment == ''
4bad1f230adfa77df019519db276a181d57682dd
299
import re def dir_keys(path): """A function to take a path, and return a list of all the numbers in the path. This is mainly used for sorting by the parameters they contain""" regex = '[-+]?[0-9]+(?:\.[0-9]+)?(?:[eE][-+]?[0-9]+)?' # matching any floating point m = re.findall(regex, path) if(m): val = m else: raise ValueError('Your path does not contain any numbers') val = list(map(float,val)) return val
c2c32772771c9bae23a1fcc949a509eaaf36d602
300
def get_colours_extend(graph_size, start_set, end_set, source, target, reachable=None): """ Get colours for nodes including source and target nodes. Blue nodes are those in the source set. Orange nodes are those in the start set, not in the source set. Green nodes are those reachable from the source that are in target. Red nodes are those in target that are not reachable from the source. All other nodes are grey. """ # Setup the colours c = [] if reachable is None: reachable = end_set for acc_val in range(graph_size): if acc_val in start_set: if acc_val in source: c.append("dodgerblue") else: c.append("darkorange") elif acc_val in target: if acc_val in reachable: c.append("g") else: c.append("r") else: c.append("gray") return c
d366ed6c4c387d0b4de4440d34d358d5a142661a
301
def load_file(file_location): """ Opens a given file and returns its contents. :param str file_location: The absolute path to the file :rtype: str :return: The contents of the file """ with open(file_location, 'r') as file_contents: contents = file_contents.read() return contents
61b78432cffa4c22adc9af31bbad63bf8777737b
302
def travel_time_without_Rebalancing(tnet, i, j, exo=0): """ evalute the travel time function for edge i->j Parameters ---------- tnet: transportation network object i: starting node of edge j: ending node of edge Returns ------- float """ return sum( [tnet.fcoeffs[n] * ((tnet.G_supergraph[i][j]['flowNoRebalancing'] +exo )/ tnet.G_supergraph[i][j]['capacity']) ** n for n in range(len(tnet.fcoeffs))])
00ae58356d1a808d34a559267134cb52fc8b0dc5
305
import numpy import math def enhance_with_function(images, labels, ratio, enhance_func): """ :param images: :param labels: :param ratio: the ratio of max input class. for example, highest sample count is 1000, ratio is 3, the result will be around 1000 * 3 * how_many_classes :param enhance_func the func used for enhance f(image, label, how_many_to_generate) :return: new genrated features and labels """ inputs_per_class = numpy.bincount(labels) max_inputs = numpy.max(inputs_per_class) # One Class for i in range(len(inputs_per_class)): input_ratio = math.ceil((max_inputs * ratio - inputs_per_class[i]) / inputs_per_class[i]) print("generating class:{} with ratio:{}, max input:{}, current:{}".format( i, input_ratio, max_inputs, inputs_per_class[i])) if input_ratio <= 1: continue new_features = [] new_labels = [] mask = numpy.where(labels == i) for feature in images[mask]: generated_images = enhance_func(feature, input_ratio) for generated_image in generated_images: new_features.append(generated_image) new_labels.append(i) images = numpy.append(images, new_features, axis=0) labels = numpy.append(labels, new_labels, axis=0) return images, labels
d16b7d3726902653bce94c11dba808da1ee88d09
306
import uuid def uuid1_(): """用于生成GUID""" return str(uuid.uuid1())
8b1bf00c2c76429499a4300cc7f75fd075a0bf1c
308
import os def get_tmp_filepath(_file): """生成一个针对_file的临时文件名""" _path = os.path.dirname(_file) _tmp_filename = os.path.basename(_file) if not _tmp_filename.startswith('.'): _tmp_filename = '.' + _tmp_filename _tmp_filename += '_tmp' _tmp_filepath = os.path.join(_path, _tmp_filename) if os.path.exists(_tmp_filepath): return get_tmp_filepath(_tmp_filepath + '_1') return _tmp_filepath
f566825dec9c3a6330ba5e1578f74c2a171e4296
309
def upperLeftOrigin( largeSize, smallSize ): """ The upper left coordinate (tuple) of a small rectangle in a larger rectangle (centered) """ origin = tuple( map( lambda x: int( ( (x[0]-x[1])/2 ) ), zip( largeSize, smallSize )) ) return origin
bda31fc5eb021f40a62b00949ced940ef171005f
310
def is_square_inside(row, col, rows, cols): """Check if row and col is square inside grid having rows and cols.""" return row not in (0, rows - 1) and col not in (0, cols - 1)
f0cdcbc6d9bee6a41fd0cc84b16ffaf0638a522c
311
def clean_repository_clone_url( repository_clone_url ): """Return a URL that can be used to clone a tool shed repository, eliminating the protocol and user if either exists.""" if repository_clone_url.find( '@' ) > 0: # We have an url that includes an authenticated user, something like: # http://test@bx.psu.edu:9009/repos/some_username/column items = repository_clone_url.split( '@' ) tmp_url = items[ 1 ] elif repository_clone_url.find( '//' ) > 0: # We have an url that includes only a protocol, something like: # http://bx.psu.edu:9009/repos/some_username/column items = repository_clone_url.split( '//' ) tmp_url = items[ 1 ] else: tmp_url = repository_clone_url return tmp_url
c1d274e907d73aceaa5f1e2c52336edf1638cd8a
312
def add(n1, n2): """Adds the 2 given numbers""" return n1 + n2
ca670819dab8230e355e1b236d9cc74ed0b3b868
314
def kwarg_any(kwarg_functions): """Resolve kwarg predicates with short-circuit evaluation. This optimization technique means we do not have to evaluate every predicate if one is already true. """ return any(kwarg_function() for kwarg_function in kwarg_functions)
3303e1a871bb41920ba0f41e4928e05b6d876c1e
315
from typing import Tuple from typing import Optional def _get_build_to_download(build: str) -> Tuple[str, Optional[str]]: """Get the build version to download. If the passed value is not an explict build number (eg. 15.0) then the build for the current day of that major/minor will be downloaded. :param build: The target build number. :return: The target build information. """ components = build.split(".") num_components = len(components) if num_components == 1: components.append("0") if num_components == 2: return ".".join(components), None # Always treat the last component as the 'build'. Unlike Houdini itself # which would treat a release candidate version as part of the build number # the web api will treat the candidate version as the build number and the # the 3 main components as the version. return ".".join(components[: num_components - 1]), components[-1]
96215d80af60c25877da3eb7ff65147b2652a592
316
import torch def kl_reverse(logu: torch.Tensor) -> torch.Tensor: """ Log-space Csiszar function for reverse KL-divergence D_f(p,q) = KL(q||p). Also known as the exclusive KL-divergence and negative ELBO, minimizing results in zero-forcing / mode-seeking behavior. Args: logu (torch.Tensor): ``p.log_prob``s evaluated at samples from q. """ return -logu
fcc9035de183cb6d5b51e169dd764ff92ab290aa
317
def reload() -> bool: """Gracefully reloads uWSGI. * http://uwsgi.readthedocs.io/en/latest/Management.html#reloading-the-server """ return False
f020356774d0a500b6755d53d548a804392c39d3
319
import re def predict_imagen(titulo=None, grados=None, ano_lanzamiento=None, paginas=None, codbarras=None): """ Predictor for Imagen from model/5a143f443980b50a74003699 Created using BigMLer """ tm_tokens = 'tokens_only' tm_full_term = 'full_terms_only' tm_all = 'all' def term_matches(text, field_name, term): """ Counts the number of occurences of term and its variants in text """ forms_list = term_forms[field_name].get(term, [term]) options = term_analysis[field_name] token_mode = options.get('token_mode', tm_tokens) case_sensitive = options.get('case_sensitive', False) first_term = forms_list[0] if token_mode == tm_full_term: return full_term_match(text, first_term, case_sensitive) else: # In token_mode='all' we will match full terms using equals and # tokens using contains if token_mode == tm_all and len(forms_list) == 1: pattern = re.compile(r'^.+\b.+$', re.U) if re.match(pattern, first_term): return full_term_match(text, first_term, case_sensitive) return term_matches_tokens(text, forms_list, case_sensitive) def full_term_match(text, full_term, case_sensitive): """Counts the match for full terms according to the case_sensitive option """ if not case_sensitive: text = text.lower() full_term = full_term.lower() return 1 if text == full_term else 0 def get_tokens_flags(case_sensitive): """Returns flags for regular expression matching depending on text analysis options """ flags = re.U if not case_sensitive: flags = (re.I | flags) return flags def term_matches_tokens(text, forms_list, case_sensitive): """ Counts the number of occurrences of the words in forms_list in the text """ flags = get_tokens_flags(case_sensitive) expression = r'(\b|_)%s(\b|_)' % '(\\b|_)|(\\b|_)'.join(forms_list) pattern = re.compile(expression, flags=flags) matches = re.findall(pattern, text) return len(matches) term_analysis = { "titulo": { "case_sensitive": False, "token_mode": 'all', }, } term_forms = { "titulo": { "fantásticos": ['fantásticos', 'fantásticas'], "gigante": ['gigante', 'gigantes'], }, } if (codbarras is None): return {"prediction": 1.82, "error": 5.53698} if (codbarras > 9789872414340): if (ano_lanzamiento is None): return {"prediction": 9, "error": 7.02326} if (ano_lanzamiento > 2008): if (paginas is None): return {"prediction": 10.5, "error": 5.88884} if (paginas > 90): if (titulo is None): return {"prediction": 9, "error": 5.08228} if (term_matches(titulo, "titulo", u"fantásticos") > 0): return {"prediction":8, "error":5.08228} if (term_matches(titulo, "titulo", u"fantásticos") <= 0): if (grados is None): return {"prediction": 9.5, "error": 5.26764} if (grados == "Elsa Pizzi"): return {"prediction":9, "error":5.26764} if (grados != "Elsa Pizzi"): return {"prediction":10, "error":5.26764} if (paginas <= 90): if (titulo is None): return {"prediction": 12, "error": 5.08228} if (term_matches(titulo, "titulo", u"gigante") > 0): return {"prediction":11, "error":5.08228} if (term_matches(titulo, "titulo", u"gigante") <= 0): if (grados is None): return {"prediction": 12.5, "error": 5.26764} if (grados == "Patricia Roggio"): return {"prediction":13, "error":5.26764} if (grados != "Patricia Roggio"): return {"prediction":12, "error":5.26764} if (ano_lanzamiento <= 2008): if (grados is None): return {"prediction": 6, "error": 5.08228} if (grados == "4°, 5°"): return {"prediction":7, "error":5.08228} if (grados != "4°, 5°"): if (grados == "5°, 6°"): return {"prediction":5, "error":5.26764} if (grados != "5°, 6°"): return {"prediction":6, "error":5.26764} if (codbarras <= 9789872414340): if (codbarras > 9789872414309): if (paginas is None): return {"prediction": 3, "error": 5.08228} if (paginas > 100): if (grados is None): return {"prediction": 2.5, "error": 5.26764} if (grados == "4°, 5°"): return {"prediction":2, "error":5.26764} if (grados != "4°, 5°"): return {"prediction":3, "error":5.26764} if (paginas <= 100): return {"prediction":4, "error":5.08228} if (codbarras <= 9789872414309): if (codbarras > 9789871989852): return {"prediction":1, "error":0.26071} if (codbarras <= 9789871989852): return {"prediction":0, "error":0.04286}
ecee556bf9eb563cb40bf759bb6c4bfdf74922a0
320
def construct_item(passage: str, labels): """ 根据输入的passage和labels构建item, 我在巴拉巴拉... ['B-ASP', 'I-ASP', 'I-ASP', 'I-ASP', ..., 'I-OPI', 'I-OPI', 'O'] 构造结果示例如下: { 'passage': '使用一段时间才来评价,淡淡的香味,喜欢!', 'aspect': [['香味', 14, 16]], 'opinion': [['喜欢', 17, 19]] } :return: """ assert len(passage) == len(labels) aspects, opinions = [], [] for i, char, label in zip(range(len(passage)), passage, labels): if label == "O": continue elif label.startswith("B"): if label.endswith("ASP"): aspects.append([char, i]) elif label.endswith("OPI"): opinions.append([char, i]) else: raise Exception("label must be in set {'B-ASP', 'I-ASP', 'B-OPI', 'I-OPI', 'O'}.") elif label.endswith("ASP"): if (i==0 or not labels[i-1].endswith("ASP")): aspects.append([char, i]) else: aspects[-1][0] += char elif label.endswith("OPI"): if (i==0 or not labels[i-1].endswith("OPI")): opinions.append([char, i]) else: opinions[-1][0] += char else: raise Exception("label must be in set {'B-ASP', 'I-ASP', 'B-OPI', 'I-OPI', 'O'}.") aspects = [[aspect[0], aspect[1], aspect[1]+len(aspect[0])] for aspect in aspects] opinions = [[opinion[0], opinion[1], opinion[1] + len(opinion[0])] for opinion in opinions] result = { "passage": passage, "aspects": aspects, "opinions": opinions } return result
b4a31b67df7c82b56e0eb388e964422f257a9293
321
def getStartingAddress(packet): """Get the address of a modbus request""" return ((ord(packet[8]) << 8) + ord(packet[9]))
83dc55585d67169b0716cc3e98008574c434213b
322
import numbers def is_number(item): """Check if the item is a number.""" return isinstance(item, numbers.Number)
6c3fb6817a0eda2b27fcedd22763461dceef6bc1
323
def from_list(commands): """ Given a list of tuples of form (depth, text) that represents a DFS traversal of a command tree, returns a dictionary representing command tree. """ def subtrees(commands, level): if not commands: return acc = [] parent, *commands = commands for command in commands: if command["level"] > level: acc.append(command) else: yield (parent, acc) parent = command acc.clear() yield (parent, acc) def walk(commands, level=0): return [ { "description": key["description"], "children": walk(subtree, level + 1), "id": key["id"], } for key, subtree in subtrees(commands, level) ] return walk(commands)
39dad022bf81712e074f6e8bb26813302da9ef9f
324
def buildGeneMap(identifiers, separator="|"): """build map of predictions to genes. Use an identifier syntax of species|transcript|gene. If none is given, all transcripts are assumed to be from their own gene. """ map_id2gene, map_gene2ids = {}, {} for id in identifiers: f = id.split(separator) if len(f) < 3: gene = id else: gene = f[0] + separator + f[2] map_id2gene[id] = gene if gene not in map_gene2ids: map_gene2ids[gene] = [] map_gene2ids[gene].append(id) return map_id2gene, map_gene2ids
e854639142bd600338563ffc1160b43359876cdd
325
def checkdnsrr(): """Check DNS records corresponding to a given Internet host name or IP address""" return NotImplementedError()
7fda596230cc5f61e946e8a0949c67f365cf5563
327
def xml_attr_or_element(xml_node, name): """ Attempt to get the value of name from the xml_node. This could be an attribute or a child element. """ attr_val = xml_node.get(name, None) if attr_val is not None: return attr_val.encode('utf-8').strip() for child in xml_node.getchildren(): if child.tag == name: return child.text.encode('utf-8').strip() return None
4ec061a9a865291d8d26d8de474141175d5aab28
328
import numpy def kmeans_init_centroids(x_array, num_centroids_K): """ This function initializes K centroids that are to be used in K-means on the dataset x_array. Parameters ---------- x_array : array_like The dataset of size (m x n). num_centroids_K : int The number of clusters. Returns ------- rand_init_centroids : array_like Centroids of the clusters. This is a matrix of size (K x n). Instructions ------------ You should set centroids to randomly chosen examples from the dataset x_array. """ numpy.random.seed(seed=42) num_examples, num_features = x_array.shape rand_init_centroids = numpy.zeros((num_centroids_K, num_features)) randidx = numpy.random.permutation(num_examples) # Take the first K examples as centroids rand_init_centroids = x_array[randidx[:num_centroids_K], :] return rand_init_centroids
2e310dd3fe9eb6dd32999e32f583fc4a7fd0bbf0
329
def get_coinbase_candle_url(url, timestamp_from, pagination_id): """Get Coinbase candle URL.""" start = timestamp_from.replace(tzinfo=None).isoformat() url += f"&start={start}" if pagination_id: url += f"&end={pagination_id}" return url
a1bb4e975060ba5e3438b717d1c2281349cd51f1
330
def part2(): """This view will be at the path ``/part2``""" return "Part 2"
92a8789b669a66989a74be2c2126e8958e4beece
331
def subplot_index(nrow, ncol, k, kmin=1): """Return the i, j index for the k-th subplot.""" i = 1 + (k - kmin) // ncol j = 1 + (k - kmin) % ncol if i > nrow: raise ValueError('k = %d exceeds number of rows' % k) return i, j
2d2b7ef9bf9bc82d06637157949ca9cb3cc01105
333
def _split_keys(keypath, separator): """ Splits keys using the given separator: eg. 'item.subitem[1]' -> ['item', 'subitem[1]']. """ if separator: return keypath.split(separator) return [keypath]
2f67a35a2e08efce863d5d9e64d8a28f8aa47765
334
def spacify(string, spaces=2): """Add spaces to the beginning of each line in a multi-line string.""" return spaces * " " + (spaces * " ").join(string.splitlines(True))
7ab698d8b38a6d940ad0935b5a4ee8365e35f5da
336
import math def calc_dif_mod_cn (x, y): """ Check if the difference between the modulus of consecutive numbers is a prime number """ modx = math.sqrt(x.real ** 2 + x.imag ** 2) # modulus of the first complex number mody = math.sqrt(y.real ** 2 + y.imag ** 2) # modulus of the second complex number dif = modx-mody d = 0 # the number of the divisors of dif if dif == int(dif): # first, we check if the dif is an integer for i in range(2, int(int(dif)/2 + 1)): # then, we check if it's a prime number if dif % i == 0: d = d + 1 # if d = 0, then dif is a prime number if (d == 0 or dif==2): return dif else: return 0 else: return 0
88e353e4a948c3b6adc65b91265a5f6d2e68a1c1
339
def frohner_cor_3rd_order(sig1,sig2,sig3,n1,n2,n3): """ Takes cross-sections [barns] and atom densities [atoms/barn] for three thicknesses of the same sample, and returns extrapolated cross section according to Frohner. Parameters ---------- sig1 : array_like Cross section of the thinnest of the three samples. sig2 : array_like Cross section of the mid-thickness of the three samples. sig3 : array_like Cross section of the thickest of the three samples. n1 : float Atom density of the thinnest sample n2 : float Atom density of the mid-thickness sample n3 : float Atom density of the thickest sample Returns ------- sig0 : array_like The extrapolated cross section from sig1, sig2, and sig3 """ # two terms in the numerator numer1 = (n1*sig2-n2*sig1)*(n3**2-n1**2-(n1-n3)/(n1-n2)*(n2**2-n1**2)) numer2 = (n1*n2**2-n1**2*n2)*(sig3-sig2-(n1-n3)/(n1-n2)*(sig2-sig1)) denom = (n1-n2)*(n3**2-n1**2) - (n1-n3)*(n2**2-n1**2) return (numer1-numer2)/denom
d6f0b39368c19aeda899265eb187190bb4beb944
343
def nodeInTree(root, k): """ Checks if the node exists in the tree or not """ if root == None: return False if root.data == k or nodeInTree(root.left, k) or nodeInTree(root.right, k): return True return False
14db01c8d2370bfaa01220d3608798165ea1a096
344
def split_and_filter(intermediate_str, splitter): """ Split string with given splitter - practically either one of "," or "/'". Then filter ones that includes "https" in the split pickles :param intermediate_str : string that in the middle of parsing :param splitter :return: chunk of string(s) as a list """ intermediate_split = intermediate_str.split(splitter) intermediate_filter = [elem for elem in intermediate_split if 'https' in elem] return intermediate_filter[0]
a4b800df1aca89ca1e8eedfc65a5016a995acd48
345
import os def verify_image(filename): """Verifies whether the file exists""" image_extensions = ['tif', 'jpg', 'gif', 'png', 'jpeg'] if type(filename) is str: extension = filename.split('.') if len(extension) == 2: if extension[1].lower() in image_extensions: return os.path.isfile(filename) return False
84e9845ab3e146d94f2ba3e0a2fb3ecd458b822f
346
import copy import random def entries(): """ Basic data for a test case """ return copy.deepcopy( {"arb_key": "text", "randn": random.randint(0, 10), "nested": {"ntop": 0, "nmid": {"list": ["a", "b"]}, "lowest": {"x": {"a": -1, "b": 1}}}, "collection": {1, 2, 3}})
5d6cde325b69e43598f9d0158ae5989a4d70b54c
348
def count_ref_alleles(variant, *traits): """Count reference allels for a variant Parameters ---------- variant : a Variant as from funcgenom the variant for which alleles should be counted *traits : str the traits for which alleles should be counted Returns ------- int the reference allele count """ return ( ''.join(variant.traits[trait]['alleles'] for trait in traits) .replace(',', '.') .count('.') )
10ea3468f5de8f2b77bb97b27b888af808c541b7
349
import random def random_in_range(a: int, b: int) -> int: """ Return a random number r with a <= r <= b. """ return random.randint(a, b)
611c2754ace92eac4951f42e1e31af2f441ed0c2
351
def km_miles(kilometers): """Usage: Convert kilometers to miles""" return kilometers/1.609
5480c065f904dfc1959691e158653fd0e6bb67e6
353
def strategy(history, memory): """ Tit-for-tat, except we punish them N times in a row if this is the Nth time they've initiated a defection. memory: (initiatedDefections, remainingPunitiveDefections) """ if memory is not None and memory[1] > 0: choice = 0 memory = (memory[0], memory[1] - 1) return choice, memory num_rounds = history.shape[1] opponents_last_move = history[1, -1] if num_rounds >= 1 else 1 our_last_move = history[0, -1] if num_rounds >= 1 else 1 our_second_last_move = history[0, -2] if num_rounds >= 2 else 1 opponent_initiated_defection = ( opponents_last_move == 0 and our_last_move == 1 and our_second_last_move == 1 ) choice = 0 if opponent_initiated_defection else 1 if choice == 0: memory = (1, 0) if memory is None else (memory[0] + 1, memory[0]) return choice, memory
bf8d09417c246f9f88a721dfcc4408f49195fd1a
354
import torch def get_cali_samples(train_data_loader, num_samples, no_label=True): """Generate sub-dataset for calibration. Args: train_data_loader (torch.utils.data.DataLoader): num_samples (int): no_label (bool, optional): If the dataloader has no labels. Defaults to True. Returns: torch.Tensor: Concatenated data matrix. """ cali_data_list = [] if no_label: for batch_data in train_data_loader: cali_data_list.append(batch_data["image"]) if len(cali_data_list) >= num_samples: break else: for batch_data, _ in train_data_loader: cali_data_list.append(batch_data) if len(cali_data_list) >= num_samples: break return torch.cat(cali_data_list, dim=0)[:num_samples].cpu()
297ea0384b1e7f0a6ea51fc37325e57eb1cb8afa
355
def _swap_endian(val, length): """ Swap the endianness of a number """ if length <= 8: return val if length <= 16: return (val & 0xFF00) >> 8 | (val & 0xFF) << 8 if length <= 32: return ((val & 0xFF000000) >> 24 | (val & 0x00FF0000) >> 8 | (val & 0x0000FF00) << 8 | (val & 0x000000FF) << 24) raise Exception('Cannot swap endianness for length ' + length)
4b3b879ad04e43e9454b904ba65420a8d477b629
358
def about(template): """ Attach a template to a step which can be used to generate documentation about the step. """ def decorator(step_function): step_function._about_template = template return step_function return decorator
7c00256e39481247857b34dcd5b7783a39b0a8bd
359
import torch def _extend_batch_dim(t: torch.Tensor, new_batch_dim: int) -> torch.Tensor: """ Given a tensor `t` of shape [B x D1 x D2 x ...] we output the same tensor repeated along the batch dimension ([new_batch_dim x D1 x D2 x ...]). """ num_non_batch_dims = len(t.shape[1:]) repeat_shape = (new_batch_dim, *(1 for _ in range(num_non_batch_dims))) return t.repeat(repeat_shape)
7ee1d0930f843a9d31bcc4934d675109f3b2df9b
360
def split_component_chars(address_parts): """ :param address_parts: list of the form [(<address_part_1>, <address_part_1_label>), .... ] returns [(<char_0>, <address_comp_for_char_0), (<char_1>, <address_comp_for_char_1),.., (<char_n-1>, <address_comp_for_char_n-1)] """ char_arr = [] for address_part, address_part_label in address_parts: # The address part of the tuple (address_part, address_part_label) for c in address_part: char_arr.append((c, address_part_label)) return char_arr
f4f3dd59378a689e9048cee96b8d6f12e9d8fe21
361
import getpass def get_ssh_user(): """Returns ssh username for connecting to cluster workers.""" return getpass.getuser()
166048aa258bd0b2c926d03478e8492a405b0f7e
364
def adjacency_matrix(edges): """ Convert a directed graph to an adjacency matrix. Note: The distance from a node to itself is 0 and distance from a node to an unconnected node is defined to be infinite. Parameters ---------- edges : list of tuples list of dependencies between nodes in the graph [(source node, destination node, weight), ...] Returns ------- out : tuple (names, adjacency matrix) names - list of unique nodes in the graph adjacency matrix represented as list of lists """ # determine the set of unique nodes names = set() for src, dest, _ in edges: # add source and destination nodes names.add(src) names.add(dest) # convert set of names to sorted list names = sorted(names) # determine initial adjacency matrix with infinity weights matrix = [[float('Inf')] * len(names) for _ in names] for src, dest, weight in edges: # update weight in adjacency matrix matrix[names.index(src)][names.index(dest)] = weight for src in names: matrix[names.index(src)][names.index(src)] = 0 # return list of names and adjacency matrix return names, matrix
b8743a6fa549b39d5cb24ae1f276e911b954ee5a
365
def estimate_Cn(P=1013, T=273.15, Ct=1e-4): """Use Weng et al to estimate Cn from meteorological data. Parameters ---------- P : `float` atmospheric pressure in hPa T : `float` temperature in Kelvin Ct : `float` atmospheric struction constant of temperature, typically 10^-5 - 10^-2 near the surface Returns ------- `float` Cn """ return (79 * P / (T ** 2)) * Ct ** 2 * 1e-12
b74dd0c91197c24f880521a06d6bcd205d749448
366
def connection_type_validator(type): """ Property: ConnectionInput.ConnectionType """ valid_types = [ "CUSTOM", "JDBC", "KAFKA", "MARKETPLACE", "MONGODB", "NETWORK", "SFTP", ] if type not in valid_types: raise ValueError("% is not a valid value for ConnectionType" % type) return type
cc2ed6096097c719b505356e69a5bb5cdc109495
368
def add_sibling(data, node_path, new_key, new_data, _i=0): """ Traversal-safe method to add a siblings data node. :param data: The data object you're traversing. :param node_path: List of path segments pointing to the node you're creating a sibling of. Same as node_path of traverse() :param new_key: The sibling key to create. :param new_data: The new data to be stored at the key. """ if _i < len(node_path) - 1: return add_sibling(data[node_path[_i]], node_path, new_key, new_data, _i + 1) else: data[new_key] = new_data
4bc11315eab686659edc9f7eb8479508d3ca37fb
371
import re def strip_characters(text): """Strip characters in text.""" t = re.sub('\(|\)|:|,|;|\.|’|”|“|\?|%|>|<', '', text) t = re.sub('/', ' ', t) t = t.replace("'", '') return t
763ddc837ef9be19aa067e362c312ebd88632ed7
372
def publications_classification_terms_get(search=None): # noqa: E501 """List of Classification Terms List of Classification Terms # noqa: E501 :param search: search term applied :type search: str :rtype: ApiOptions """ return 'do some magic!'
6633c91d59a5df7805979bd85a01f8eb1c269946
374
def _card(item): """Handle card entries Returns: title (append " - Card" to the name, username (Card brand), password (card number), url (none), notes (including all card info) """ notes = item.get('notes', "") or "" # Add card info to the notes notes = notes + ("\n".join([f"{i}: {j}" for i, j in item.get('card', "").items()])) return f"{item['name']} - Card", \ item.get('card', {}).get('brand', '') or "", \ item.get('card', {}).get('number', "") or "", \ "", \ notes
fc7d5e4b960019b05ffe7ca02fd3d1a94d69b303
375
def get_width_and_height_from_size(x): """ Obtains width and height from a int or tuple """ if isinstance(x, int): return x, x if isinstance(x, list) or isinstance(x, tuple): return x else: raise TypeError()
581c9f332613dab5de9b786ce2bac3387ee1bd3b
377
def remove_stopwords(lista,stopwords): """Function to remove stopwords Args: lista ([list]): list of texts stopwords ([list]): [description] Returns: [list]: List of texts without stopwords """ lista_out = list() for idx, text in enumerate(lista): text = ' '.join([word for word in text.split() if word not in stopwords]) text = text.strip() lista_out.append(text) #print("Len original: {} - Len processed stopwords: {}".format(len(lista),len(lista_out))) return lista_out
edca74bb3a041a65a628fcd3f0c71be5ad4858df
378
def get_users_report(valid_users, ibmcloud_account_users): """get_users_report()""" users_report = [] valid_account_users = [] invalid_account_users = [] # use case 1: find users in account not in valid_users for account_user in ibmcloud_account_users: # check if account user is in valid_users is_valid_user=False for valid_user in valid_users: if ( account_user["email"] == valid_user["email"] ): account_user["name"] = valid_user["name"] account_user["identities"] = valid_user["identities"] if "resourceGroups" in valid_user: account_user["resourceGroups"] = valid_user["resourceGroups"] account_user["manager"] = valid_user["manager"] account_user["association"] = valid_user["association"] is_valid_user=True if is_valid_user: valid_account_users.append(account_user) else: invalid_account_users.append(account_user) users_report = { "valid_account_users" : valid_account_users, "invalid_account_users" : invalid_account_users } return users_report
a96f8835496f82d8b6f8cd4f248ed8a03676795b
379
def get_natural_num(msg): """ Get a valid natural number from the user! :param msg: message asking for a natural number :return: a positive integer converted from the user enter. """ valid_enter = False while not valid_enter: given_number = input(msg).strip() if given_number.isdigit(): num = int(given_number) valid_enter = True return num
77bed94bf6d3e5ceb56d58eaf37e3e687e3c94ba
381
import types def copy_function(old_func, updated_module): """Copies a function, updating it's globals to point to updated_module.""" new_func = types.FunctionType(old_func.__code__, updated_module.__dict__, name=old_func.__name__, argdefs=old_func.__defaults__, closure=old_func.__closure__) new_func.__dict__.update(old_func.__dict__) new_func.__module__ = updated_module.__name__ return new_func
e09022f734faa1774a3ac592c0e12b0b007ae8e3
382
import random def get_random_color(): """ 获得一个随机的bootstrap颜色字符串标识 :return: bootstrap颜色字符串 """ color_str = [ 'primary', 'secondary', 'success', 'danger', 'warning', 'info', 'dark', ] return random.choice(color_str)
898814996aa5ada8f4000244887af382b8b9e1bc
383
def neighborhood(index, npoints, maxdist=1): """ Returns the neighbourhood of the current index, = all points of the grid separated by up to *maxdist* from current point. @type index: int @type npoints: int @type maxdist int @rtype: list of int """ return [index + i for i in range(-maxdist, maxdist + 1) if i != 0 and 0 <= index + i <= npoints - 1]
98166d810daa6b99862a4c9f6d1629fdfa571bd0
384
def data_check(data): """Check the data in [0,1].""" return 0 <= float(data) <= 1
b292ef07a024e53d82e706f0d88d50d6318d6593
385
def decentralized_training_strategy(communication_rounds, epoch_samples, batch_size, total_epochs): """ Split one epoch into r rounds and perform model aggregation :param communication_rounds: the communication rounds in training process :param epoch_samples: the samples for each epoch :param batch_size: the batch_size for each epoch :param total_epochs: the total epochs for training :return: batch_per_epoch, total_epochs with communication rounds r """ if communication_rounds >= 1: epoch_samples = round(epoch_samples / communication_rounds) total_epochs = round(total_epochs * communication_rounds) batch_per_epoch = round(epoch_samples / batch_size) elif communication_rounds in [0.2, 0.5]: total_epochs = round(total_epochs * communication_rounds) batch_per_epoch = round(epoch_samples / batch_size) else: raise NotImplementedError( "The communication round {} illegal, should be 0.2 or 0.5".format(communication_rounds)) return batch_per_epoch, total_epochs
3a743208af50d7c7865d5d5f86a4f58b0ba98a4d
387
def create_config_file_lines(): """Wrapper for creating the initial config file content as lines.""" lines = [ "[default]\n", "config_folder = ~/.zettelkasten.d\n", "\n", "def_author = Ammon, Mathias\n", "def_title = Config Parsed Test Title\n", "def_location_specifier = None\n", "\n", "location = ~/zettelkasten\n", "\n", "initial_folder_structure = \n", " lobby,\n", " %(sources_directory)s,\n", " _sources/audios,\n", " _sources/images,\n", " _sources/pdfs,\n", " _sources/videos\n", "\n", "name_sep = /\n", "\n", "required_attributes = \n", " uid,\n", " category,\n", " subcategory\n", "\n", "sources_directory = _sources\n", "\n", "styles_file = styles.cfg\n", "\n", "reserved_folder_names = \n", " lobby,\n", " %(sources_directory)s,\n", " pytest_dir,\n", " doctest_dir,\n", " .zettelkasten.d\n", "\n", "zettelkasten_bib_file = zettelkasten.bib\n", "\n", "[source_file_formats]\n", "audios = \n", " mp3,\n", " wav\n", "images = \n", " webp,\n", " jpg,\n", " jpeg,\n", " png\n", "pdfs =\n", " pdf,\n", " odt\n", "videos =\n", " mkv,\n", " webm,\n", " mp4\n", "\n", "[zettel_meta_attribute_defaults]\n", "# required for zettel adding to work \n", "category= None\n", "subcategory= None\n", "# optional\n", "author = Mathias Ammon\n", "topics =\n", "tags =\n", "doc = today\n", "\n", "[zettel_meta_attribute_labels]\n", "# required for zettel adding to work\n", "uid = #+Title:\n", "category = #+Category:\n", "subcategory = #+Subcategory:\n", "# optional\n", "author = #+Author:\n", "doc = #+DOC:\n", "dole = #+DOLE:\n", "topics = #+Topics:\n", "tags = #+Tags:\n", ] return lines
d0d1057c3f450636279a8df9d4a39977f1eeef42
388
def get_recipes_from_dict(input_dict: dict) -> dict: """Get recipes from dict Attributes: input_dict (dict): ISO_639_1 language code Returns: recipes (dict): collection of recipes for input language """ if not isinstance(input_dict, dict): raise TypeError("Input is not type dict") recipes = input_dict return recipes
e710d9629d10897d4aae7bf3d5de5dbbe18196c5
389
def split(text): """Turns the mobypron.unc file into a dictionary""" map_word_moby = {} try: lines = text.split("\n") for line in lines: (word, moby) = line.split(" ", 1) map_word_moby[word] = moby except IOError as error: print(f"Failed due to IOError: {error}") return map_word_moby
ba051724f0399e918949c3e8b7fb010e2d87c9f9
391
def report(key_name=None, priority=-1, **formatters): """ Use this decorator to indicate what returns to include in the report and how to format it """ def tag_with_report_meta_data(cls): # guard: prevent bad coding by catching bad return key if key_name and key_name not in cls.return_keys: raise Exception("Task %s does not specify %s using the @returns decorator. " "It cannot be used in @report" % (cls.name, key_name)) report_entry = { "key_name": key_name, 'priority': priority, 'formatters': formatters, } if not hasattr(cls, 'report_meta'): cls.report_meta = [] cls.report_meta.append(report_entry) return cls return tag_with_report_meta_data
3830135de40bdc2a25bd3c6b6cecc194c6dbebac
392
def lerp(a,b,t): """ Linear interpolation between from @a to @b as @t goes between 0 an 1. """ return (1-t)*a + t*b
12cb8690ba5e5f2a4c08c1cd29d3497513b63438
394
def generate_annotation_dict(annotation_file): """ Creates a dictionary where the key is a file name and the value is a list containing the - start time - end time - bird class. for each annotation in that file. """ annotation_dict = dict() for line in open(annotation_file): file_name, start_time, end_time, bird_class = line.strip().split('\t') if file_name not in annotation_dict: annotation_dict[file_name] = list() annotation_dict[file_name].append([start_time, end_time, bird_class]) return annotation_dict
f40f210075e65f3dbe68bb8a594deb060a23ad8b
395
def extract_jasmine_summary(line): """ Example SUCCESS karma summary line: PhantomJS 2.1.1 (Linux 0.0.0): Executed 1 of 1 SUCCESS (0.205 secs / 0.001 secs) Exmaple FAIL karma summary line: PhantomJS 2.1.1 (Linux 0.0.0): Executed 1 of 1 (1 FAILED) ERROR (0.21 secs / 0.001 secs) """ # get totals totals = line.split(' Executed ')[1].split(' ') executed_tests, total_tests = int(totals[0]), int(totals[2]) # get failed if 'SUCCESS' in line: failed_tests = 0 else: failed_tests = int(totals[3][1:]) return { 'total_tests': total_tests, 'executed_tests': executed_tests, 'failed_tests': failed_tests, 'passed_tests': executed_tests - failed_tests }
f795ff015555cc3a2bd2d27527ae505a6dde9231
396
def degrees_of_freedom(s1, s2, n1, n2): """ Compute the number of degrees of freedom using the Satterhwaite Formula @param s1 The unbiased sample variance of the first sample @param s2 The unbiased sample variance of the second sample @param n1 Thu number of observations in the first sample @param n2 The number of observations in the second sample """ numerator = (s1**2/n1 + s2**2/n2)**2 denominator = ((s1**2/n1)**2)/(n1-1) + ((s2**2/n2)**2)/(n2-1) degrees_of_freedom = numerator/denominator return degrees_of_freedom
5f076e33584c61dca4410b7ed47feb0043ec97cb
397
import os def get_requires_file(dist): """Get the path to the egg-info requires.txt file for a given dist.""" return os.path.join( os.path.join(dist.location, dist.project_name + ".egg-info"), "requires.txt", )
f0fc66abc15fcba133240cc1783059d5694a08f6
398
def get_range_to_list(range_str): """ Takes a range string (e.g. 123-125) and return the list """ start = int(range_str.split('-')[0]) end = int(range_str.split('-')[1]) if start > end: print("Your range string is wrong, the start is larger than the end!", range_str) return range(start, end+1)
a88d9780ac2eba1d85ae70c1861f6a3c74991e5c
399
import io import traceback def _format_exception(e: BaseException): """ Shamelessly stolen from stdlib's logging module. """ with io.StringIO() as sio: traceback.print_exception(e.__class__, e, e.__traceback__, None, sio) return sio.getvalue().strip()
d80f60634a9862ca282b1c7ccf63ae8e945ffdc9
400
def annealing_epsilon(episode: int, min_e: float, max_e: float, target_episode: int) -> float: """Return an linearly annealed epsilon Epsilon will decrease over time until it reaches `target_episode` (epsilon) | max_e ---|\ | \ | \ | \ min_e ---|____\_______________(episode) | target_episode slope = (min_e - max_e) / (target_episode) intercept = max_e e = slope * episode + intercept Args: episode (int): Current episode min_e (float): Minimum epsilon max_e (float): Maximum epsilon target_episode (int): epsilon becomes the `min_e` at `target_episode` Returns: float: epsilon between `min_e` and `max_e` """ slope = (min_e - max_e) / (target_episode) intercept = max_e return max(min_e, slope * episode + intercept)
fab650085f271f1271025e23f260eb18e645a9ba
402
def pad_seq(seq, max_length, PAD=0): """ :param seq: list of int, :param max_length: int, :return seq: list of int, """ seq += [PAD for i in range(max_length - len(seq))] return seq
bb61677bc658e22b317e3d5fb10f7c85a84200d0
403
def pytest_funcarg__testname(request): """ The testname as string, or ``None``, if no testname is known. This is the parameter added by the test generation hook, or ``None`` if no parameter was set, because test generation didn't add a call for this test. """ return getattr(request, 'param', None)
87444cda36635b21c27d260835f96670d6b2d215
404
def find_opposite_reader(card_reader_list, find): """Returns the card reader on the opposite side of the door for the card reader in find""" for c in card_reader_list: if c.room_a == find.room_b and c.room_b == find.room_a: return c raise (Exception("No reader on opposite side found"))
8a70b9b35174be62f3ca816f385b4c29a6ebebe8
405
def extractYoloInfo(yolo_output_format_data): """ Extract box, objectness, class from yolo output format data """ box = yolo_output_format_data[..., :6] conf = yolo_output_format_data[..., 6:7] category = yolo_output_format_data[..., 7:] return box, conf, category
ff28a5ce5490c61722ca06b0e09b9bd85ee7e111
408
def replace_umlauts(s: str) -> str: """ Replace special symbols with the letters with umlauts (ä, ö and ü) :param s: string with the special symbols (::) :return: edited string """ out = s.replace('A::', 'Ä').replace('O::', 'Ö').replace('U::', 'Ü').replace('a::', 'ä').replace('o::', 'ö') \ .replace('u::', 'ü') return out
8fad1f1017a3fd860d7e32fd191dd060b75a7bb8
410
import json import base64 def read_amuselabs_data(s): """ Read in an amuselabs string, return a dictionary of data """ # Data might be base64'd or not try: data = json.loads(s) except json.JSONDecodeError: s1 = base64.b64decode(s) data = json.loads(s1) ret = {} # metadata # technically these can be codewords but i've never seen one kind = "crossword" width, height = data['w'], data['h'] ret['metadata'] = { 'width': width , 'height': height , 'kind': kind , 'author': data.get('author') , 'title': data.get('title') , 'copyright': data.get('copyright') , 'noClueCells': True # no notepad? } # grid grid = [] box = data['box'] cellInfos = data.get('cellInfos', []) # Reshape cellInfos to make lookup easier markup = {} for c in cellInfos: markup[(c['x'], c['y'])] = c for y in range(height): for x in range(width): cell = {'x': x, 'y': y, 'value': None} if box[x][y] == '\x00': cell['isBlock'] = True else: cell['solution'] = box[x][y] style = {} if markup.get((x, y)): thisMarkup = markup[(x, y)] if thisMarkup.get('isCircled'): style['shapebg'] = 'circle' if thisMarkup.get('isVoid'): cell['isBlock'] = False cell['isVoid'] = True bar_string = '' for letter, side in {'B': 'bottom', 'R': 'right'}.items(): if thisMarkup.get(f'{side}Wall'): bar_string += letter if bar_string: style['barred'] = bar_string cell['style'] = style grid.append(cell) ret['grid'] = grid # clues placed_words = data['placedWords'] across_words = [word for word in placed_words if word['acrossNotDown']] down_words = [word for word in placed_words if not word['acrossNotDown']] # sorting is probably unnecessary across_words = sorted(across_words, key=lambda x: (x['y'], x['x'])) down_words = sorted(down_words, key=lambda x: (x['y'], x['x'])) across_clues = [{'number': str(x['clueNum']), 'clue': x['clue']['clue']} for x in across_words] down_clues = [{'number': str(x['clueNum']), 'clue': x['clue']['clue']} for x in down_words] ret['clues'] = [{'title': 'Across', 'clues': across_clues}, {'title': 'Down', 'clues': down_clues}] return ret
f9c2fb2807d1003261bec7b58e4ba025aac65a6a
411
import os import sys import configparser def update_site_config(site_name, parameters): """Update the site config to establish the database settings""" site_directory = os.path.join('web', 'sites', site_name) if not os.path.isdir(site_directory): print('site directory {} missing'.format(site_directory)) sys.exit(-1) config_filename = os.path.join(site_directory, 'site.ini') if os.path.exists(config_filename): existing_config = configparser.ConfigParser() existing_config.read(config_filename) if existing_config.has_section('database'): print('database settings already exist in {}'.format( config_filename )) print(existing_config.options('database')) sys.exit(-1) new_config = configparser.RawConfigParser() new_config.add_section('database') for key, value in parameters.items(): if key == 'database': key = 'name' new_config.set('database', key, value) with open(config_filename, 'a') as configfile: new_config.write(configfile) return new_config
8dce45257189cb5c4830f18fc1bcad388a193252
412
import torch import math def sample_random_lightdirs(num_rays, num_samples, upper_only=False): """Randomly sample directions in the unit sphere. Args: num_rays: int or tensor shape dimension. Number of rays. num_samples: int or tensor shape dimension. Number of samples per ray. upper_only: bool. Whether to sample only on the upper hemisphere. Returns: lightdirs: [R, S, 3] float tensor. Random light directions sampled from the unit sphere for each sampled point. """ if upper_only: min_z = 0 else: min_z = -1 phi = torch.rand(num_rays, num_samples) * (2 * math.pi) # [R, S] cos_theta = torch.rand(num_rays, num_samples) * (1 - min_z) + min_z # [R, S] theta = torch.acos(cos_theta) # [R, S] x = torch.sin(theta) * torch.cos(phi) y = torch.sin(theta) * torch.sin(phi) z = torch.cos(theta) lightdirs = torch.cat((x[..., None], y[..., None], z[..., None]), dim=-1) # [R, S, 3] return lightdirs
7f7657ff66d0cffea6892dffdf49ba6b52b9def9
414
def date_handler(obj): """make datetime object json serializable. Notes ----- Taken from here: https://tinyurl.com/yd84fqlw """ if hasattr(obj, 'isoformat'): return obj.isoformat() else: raise TypeError
741867e05e1b5f3e9d0e042b3b1576fb61ab0219
415
def has_type(typestmt, names): """Return type with name if `type` has name as one of its base types, and name is in the `names` list. otherwise, return None.""" if typestmt.arg in names: return typestmt for t in typestmt.search('type'): # check all union's member types r = has_type(t, names) if r is not None: return r typedef = getattr(typestmt, 'i_typedef', None) if typedef is not None and getattr(typedef, 'i_is_circular', None) is False: t = typedef.search_one('type') if t is not None: return has_type(t, names) return None
d534331df62f76efdcbb93be52eb57ee600a7783
416
import base64 import struct def tiny_id(page_id): """Return *tiny link* ID for the given page ID.""" return base64.b64encode(struct.pack('<L', int(page_id)).rstrip(b'\0'), altchars=b'_-').rstrip(b'=').decode('ascii')
1a37b814ff9845949c3999999b61f79b26dacfdc
417
def getSpectra(dataframe, indices): """ Returns the files for training and testing Inputs ----------- dataframe: pd.DataFrame object from which we need to get spectra indices: row values for which we need the spectra Returns ----------- spec_vals: pd.DataFrame object containing spectra values for given indices """ colList = dataframe.columns spec_inds = [index for index in range(len(colList)) if colList[index].startswith('Spectrum_')] spec_cols = colList[spec_inds] spec_vals = dataframe[spec_cols].iloc[indices] return spec_vals
606757ffdde39c0847dd0402342441931d66a081
418