content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import sys import traceback def _on_process(*args, **kwargs): """Process the given function in the current subprocess""" try: func = kwargs['__func__'] del kwargs['__func__'] return func(*args, **kwargs) except KeyboardInterrupt: sys.exit() except Exception as e: raise type(e)(traceback.format_exc())
cc6b90daa3aba127f7c9ea596b0718bdefc5688b
419
def bpm_to_mspt(bpm, res=480): """ Coverts an integer value of beats per minute to miliseconds per quarter note """ return 60000 / res / bpm
6b962b8253eac29f52c48ca89a6dce0417adb11b
420
import re def matchPP(a_string): """assumes a_string is a string returns re match object if it finds two consecutive words that start with P, else returns None""" pattern = "[P|p]\w+\s[P|p]\w+" result = re.search(pattern, a_string) return result
c46eb4e0380a54cc36db0dc8969d17d65a546bf3
422
def get_qbert_v3_url(qbert_url, project_id): """Keystone only hands out a v1 url I need v3.""" qbert_v3_url = "{0}/v3/{1}".format(qbert_url[0:-3], project_id) return qbert_v3_url
423e1f7a601f4ecafbc7d52d1f95fd59195f193e
423
def gen_all_holds(hand): """ Generate all possible choices of dice from hand to hold. hand: sorted full yahtzee hand Returns a set of tuples, where each tuple is sorted dice to hold """ # start off with the original hand in set set_holds = set([(hand)]) # now iterate with all sub hands with one element removed for item in hand: list_hand = list(hand) list_hand.remove(item) # add to set_holds this sub hand set_holds.add(tuple(list_hand)) # also add to set_holds the recursion of this sub hand # set functionality also takes care of repeated sub hands set_holds.update(gen_all_holds(tuple(list_hand))) return set_holds
5c8af5040f619fabef56918d399b5a1cab8893a4
424
def langstring(value: str, language: str = "x-none") -> dict: """Langstring.""" return { "langstring": { "lang": language, "#text": value, } }
dca23a329cfc87d8cfa52cd2b009ce723b7d2270
425
def chinese_half2full(): """Convert all halfwidth Chinese characters to fullwidth . Returns: """ def string_op(input_str:str): rstring = "" for uchar in input_str: u_code = ord(uchar) if u_code == 32: u_code = 12288 elif 33 <= u_code <= 126: u_code += 65248 rstring += chr(u_code) return rstring return string_op
e89a6314a57192e62b32e1f7e044a09700b5bb73
426
import argparse def get_arguments(): """Parse command line arguments""" parser = argparse.ArgumentParser(description="""A simple popup calendar""") parser.add_argument( "-p", "--print", help="print date to stdout instead of opening a note", action="store_true", ) parser.add_argument( "-f", "--format", help="""option '-p' output format (datetime.strftime format, defaut='%%Y-{%%m}-%%d')""", dest="format", default="%Y-%m-%d", ) parser.add_argument( "-e", "--editor", help="""editor command to open notes""", dest="editor", default="xdg-open", ) parser.add_argument( "-l", "--locale", help="""force system locale, for example '-l es_ES.utf8'""", dest="locale", default="", ) parser.add_argument( "-c", "--read-cache", dest="is_force_read_cache", action="store_true", help="""force calendar to read old date from cache""" ) parser.add_argument( "-t", "--theme", help="""set calendar theme, default=classic_dark (theme file name without extention)""", dest="theme" ) args, unknown = parser.parse_known_args() unknown = unknown if len(unknown) == 0 else "".join(unknown).strip(' ') return args, unknown
7e7940001679e05f137798d127f54c9ab7512a63
427
def absModuleToDist(magApp, magAbs): """ Convert apparent and absolute magnitude into distance. Parameters ---------- magApp : float Apparent magnitude of object. magAbs : float Absolute magnitude of object. Returns ------- Distance : float The distance resulting from the difference in apparent and absolute magnitude [pc]. """ d = 10.0**(-(magAbs - magApp) / 5.0 + 1.0) return d
a7d98ff479114f08e47afefc97a1119f5e8ff174
428
import base64 def decoded_anycli(**kwargs): """ Return the decoded return from AnyCLI request - Do not print anything :param kwargs: keyword value: value to display :return: return the result of AnyCLI in UTF-8 :Example: result = cli(url=base_url, auth=s, command="show vlan") decoded_anycli(result) """ value = kwargs.get('value', None) return base64.b64decode(value['result_base64_encoded']).decode('utf-8')
223c4f9aabfef530896729205071e7fb8f9c8301
429
import tqdm def generate_formula_dict(materials_store, query=None): """ Function that generates a nested dictionary of structures keyed first by formula and then by task_id using mongo aggregation pipelines Args: materials_store (Store): store of materials Returns: Nested dictionary keyed by formula-mp_id with structure values. """ props = ["pretty_formula", "structure", "task_id", "magnetic_type"] results = list(materials_store.groupby("pretty_formula", properties=props, criteria=query)) formula_dict = {} for result in tqdm.tqdm(results): formula = result['_id']['pretty_formula'] task_ids = [d['task_id'] for d in result['docs']] structures = [d['structure'] for d in result['docs']] formula_dict[formula] = dict(zip(task_ids, structures)) return formula_dict
ae232c806972262029966307e489df0b12d646f5
430
import os def default_config(): """Provides a default configuration file location.""" return os.path.expanduser('~/.config/discogstagger/discogs_tagger.conf')
2b86700484916ea2f6c47935ec8a43aa0d920184
431
def shape_extent_to_header(shape, extent, nan_value=-9999): """ Create a header dict with shape and extent of an array """ ncols = shape[1] nrows = shape[0] xllcorner = extent[0] yllcorner = extent[2] cellsize_x = (extent[1]-extent[0])/ncols cellsize_y = (extent[3]-extent[2])/nrows if cellsize_x != cellsize_y: raise ValueError('extent produces different cellsize in x and y') cellsize = cellsize_x header = {'ncols':ncols, 'nrows':nrows, 'xllcorner':xllcorner, 'yllcorner':yllcorner, 'cellsize':cellsize, 'NODATA_value':nan_value} return header
957b59e7f464901a5430fd20ab52f28507b55887
433
import logging def logged(class_): """Class-level decorator to insert logging. This assures that a class has a ``.log`` member. :: @logged class Something: def __init__(self, args): self.log(f"init with {args}") """ class_.log= logging.getLogger(class_.__qualname__) return class_
cd58e355151ab99aa1694cbd9fb6b710970dfa19
434
def simple_scan_network(): """ Do a simple network scan, which only works if your network configuration is 192.168.1.x """ base_ip = "192.168.1." addresses = ['127.0.0.1'] for index in range(1, 255): addresses.extend([base_ip + str(index)]) return addresses
b0f19ae1c98678e87d270b308b5359df9a6a4d30
435
import math def _generate_resolution_shells(low, high): """Generate 9 evenly spaced in reciprocal space resolution shells from low to high resolution, e.g. in 1/d^2.""" dmin = (1.0 / high) * (1.0 / high) dmax = (1.0 / low) * (1.0 / low) diff = (dmin - dmax) / 8.0 shells = [1.0 / math.sqrt(dmax)] for j in range(8): shells.append(1.0 / math.sqrt(dmax + diff * (j + 1))) return shells
52fa4309f2f34a39a07d8524dd7f226e3d1bae6a
436
def get_page_url(skin_name, page_mappings, page_id): """ Returns the page_url for the given page_id and skin_name """ fallback = '/' if page_id is not None: return page_mappings[page_id].get('path', '/') return fallback
6ead4824833f1a7a002f54f83606542645f53dd6
437
def update_t_new_docker_image_names(main, file): """ Updates the names of the docker images from lasote to conanio """ docker_mappings = { "lasote/conangcc49": "conanio/gcc49", "lasote/conangcc5": "conanio/gcc5", "lasote/conangcc6": "conanio/gcc6", "lasote/conangcc7": "conanio/gcc7", "lasote/conangcc8": "conanio/gcc8", "lasote/conanclang39": "conanio/clang39", "lasote/conanclang40": "conanio/clang40", "lasote/conanclang50": "conanio/clang50", "lasote/conanclang60": "conanio/clang60", } found_old_name = False for old, new in docker_mappings.items(): if main.file_contains(file, old): main.replace_in_file(file, old, new) found_old_name = True if found_old_name: main.output_result_update(title="Travis: Update Docker image names from lasote/ to conanio/") return True return False
6d1a1dd0f254252cf73d7a89c926dc2476fc89e8
439
import json def unpack_nwchem_basis_block(data): """Unserialize a NWChem basis data block and extract components @param data: a JSON of basis set data, perhaps containing many types @type data : str @return: unpacked data @rtype : dict """ unpacked = json.loads(data) return unpacked
dfa920f80ae8f0caf15441c354802410c8add690
440
def pymodbus_mocked(mocker): """Patch pymodbus to deliver results.""" class ResponseContent: """Fake a response.""" registers = [0] class WriteStatus: """Mock a successful response.""" @staticmethod def isError(): # pylint: disable=invalid-name,missing-function-docstring return False # Patch connection function mocker.patch("pymodbus.client.sync.ModbusTcpClient.connect") mocker.patch( "pymodbus.client.sync.ModbusTcpClient.read_holding_registers", return_value=ResponseContent, ) mocker.patch( "pymodbus.client.sync.ModbusTcpClient.write_registers", return_value=WriteStatus )
fdee663d9a8a80496ab6678aacb0b820251c83e1
441
import zipfile def unzip_file(zip_src, dst_dir): """ 解压zip文件 :param zip_src: zip文件的全路径 :param dst_dir: 要解压到的目的文件夹 :return: """ r = zipfile.is_zipfile(zip_src) if r: fz = zipfile.ZipFile(zip_src, "r") for file in fz.namelist(): fz.extract(file, dst_dir) else: return "请上传zip类型压缩文件"
8b89f41f38cc688f6e0473a77215ae72b163654a
442
def abort_multipart_upload(resource, bucket_name, object_name, upload_id): """Abort in-progress multipart upload""" mpupload = resource.MultipartUpload(bucket_name, object_name, upload_id) return mpupload.abort()
93535c2404db98e30bd29b2abbda1444ae4d0e8a
443
def double(n): """ Takes a number n and doubles it """ return n * 2
8efeee1aa09c27d679fa8c5cca18d4849ca7e205
444
import subprocess def get_current_git_branch(): """Get current git branch name. Returns: str: Branch name """ branch_name = "unknown" try: branch_name = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('ascii').strip() except subprocess.CalledProcessError: pass return branch_name
6d677d0f4e15532c20774e479b49a23093dad09a
446
from typing import OrderedDict def get_setindices(header, setnames): """From header like ---ID, coverage, set1_q-value set2_q-value--- this returns indices for different sets {'q-value': {'set1': 2, 'set2: 3}} """ setindices = OrderedDict() for index, field in enumerate(header): for setname in setnames: if field.startswith('{}_'.format(setname)): fieldname = field[len(setname) + 1:] try: setindices[fieldname][setname] = index except KeyError: setindices[fieldname] = {setname: index} return setindices
1bdbda0528098a55438b4cb24ca22358fae7e682
447
import argparse def get_args(): """ Get arguments to the tool with argparse :return: The arguments """ parser = argparse.ArgumentParser() parser.add_argument("filename", action='store', help='.xyz file(s) with optimised geometries from which to make .top and .gro files', nargs="+") parser.add_argument('-id', type=str, default='AAA', help="Three letter name of the residue/molecule e.g LYS") parser.add_argument('-c', type=int, default=0, help="Charge of the molecule Default: %(default)s") parser.add_argument('-m', type=int, default=1, help="Multiplicity of the molecule. Default: %(default)s") parser.add_argument('-notrash', action='store_true', default=False, help="Don't trash all the output files. Only .gro and .top will be left by default") return parser.parse_args()
0085922ad21776521bda2b2ce204983dd7181b89
449
def gen_mode(): """获取玩家想要考试的模式""" while True: mode = input("如何考试?\n输入1顺序考试\n输入2乱序考试\n>>") if mode in ("1", "2"): return mode else: print() print("非法输入,请输入\"1\"或\"2\"") print("你不需要输入双引号") print("--------------------------------")
eb3ff4a0812fe088f3acb1302730f5f48c6fbcda
451
import random def random_sources(xSize, ySize, zSize, number): """ returns a list of random positions in the grid where the sources of nutrients (blood vessels) will be """ src = [] for _ in range(number): x = random.randint(0, xSize-1) y = random.randint(0, ySize-1) z = random.randint(0, zSize-1) if (x, y, z) not in src: src.append((x,y,z)) return src
17dab43ea2468a11e3720ff0f7eb33b605371496
452
def rho_err(coeffs, rho, z, density_func): """ Returns the difference between the estimated and actual data """ soln = density_func(z, coeffs) return rho - soln
4a2d7c7243cad062d8568ab72599b4d8be26f874
453
def sort_terms(node, parent_children, hierarchy): """Recursively create a list of nodes grouped by category.""" for c in parent_children.get(node, []): hierarchy.append(c) sort_terms(c, parent_children, hierarchy) return hierarchy
5ae737206f3859c01da6b8e9475db688e53a8d13
454
async def root(): """ :return: welcoming page returning Made by @woosal1337 """ try: return {f"Made by @woosal1337"} except Exception as e: return {f"{e} has happened!"}
3d4e9acf038f60a9d91755eafcfb7e9dcfaa7a71
456
def sequence_accuracy_score(y_true, y_pred): """ Return sequence accuracy score. Match is counted only when two sequences are equal. """ total = len(y_true) if not total: return 0 matches = sum(1 for yseq_true, yseq_pred in zip(y_true, y_pred) if yseq_true == yseq_pred) return matches / total
b1345aaa6fd0161f648a1ca5b15c921c2ed635ad
457
def load_content(sentence_file): """Load input file with sentences to build LSH. Args: sentence_file (str): Path to input with txt file with sentences to Build LSH. Returns: dict: Dict with strings and version of string in lower case and without comma. """ sentences = {} with open(sentence_file) as content: for line in content: line = line.strip() line_clean = line.replace(",", "") line_clean = line_clean.lower() sentences[line_clean] = line return sentences
31c3104179e995d59cffbea92caf2d32decc572c
458
def rare_last_digit(first): """Given a leading digit, first, return all possible last digits of a rare number""" if first == 2: return (2,) elif first == 4: return (0,) elif first == 6: return (0,5) elif first == 8: return (2,3,7,8) else: raise ValueError(f"Invalid first digit of rare number: {first}")
2b15d35a6281d679dce2dedd7c1944d2a93e8756
459
import webbrowser def _open_public_images(username): """ :param username: username of a given person :return: """ try: new_url = "https://www.facebook.com/" + username + "/photos_all" webbrowser.open_new_tab(new_url) return 1 except Exception as e: print(e) return -1
bd488bae2182bd2d529734f94fb6fc2b11ca88d0
460
def fermat_number(n: int) -> int: """ https://en.wikipedia.org/wiki/Fermat_number https://oeis.org/A000215 >>> [fermat_number(i) for i in range(5)] [3, 5, 17, 257, 65537] """ return 3 if n == 0 else (2 << ((2 << (n - 1)) - 1)) + 1
4427ab7171fd86b8e476241bc94ff098e0683363
461
def get_id_ctx(node): """Gets the id and attribute of a node, or returns a default.""" nid = getattr(node, "id", None) if nid is None: return (None, None) return (nid, node.ctx)
cbca8573b4246d0378297e0680ab05286cfc4fce
462
def printImproperDihedral(dihedral, alchemical = False): """Generate improper dihedral line Parameters ---------- dihedral : dihedral Object dihedral Object Returns ------- dihedralLine : str Improper dihedral line data """ V2 = dihedral.V2*0.5 V2_B = dihedral.V2_B*0.5 label = 'imptors %7s %5s %5s %5s %8.3f %4.1f %2d\n' % \ (dihedral.atomA.typeA, dihedral.atomB.typeA, dihedral.atomC.typeA, dihedral.atomD.typeA, V2, 180.0, 2) if alchemical: label = 'imptors %7s %5s %5s %5s %8.3f %4.1f %2d\n' % \ (dihedral.atomA.typeB, dihedral.atomB.typeB, dihedral.atomC.typeB, dihedral.atomD.typeB, V2_B, 180.0, 2) return label
bcfece212ac6cc0eb476cb96c44e6af910185bc7
464
from typing import Any import importlib def relative_subpackage_import(path: str, package: str) -> Any: """[summary] Args: path (str): [description] package (str): [description]. Returns: Any: [description] """ if not path.startswith('.'): path = '.' + path return importlib.import_module(path, package = package)
2345267b60947f57098b0678dce845d858f2d2a8
468
def _is_arraylike(arr): """Check if object is an array.""" return ( hasattr(arr, "shape") and hasattr(arr, "dtype") and hasattr(arr, "__array__") and hasattr(arr, "ndim") )
71bfbb7f93116879ee63bb4fc1ad8b3a3d8807c3
469
import os def source_ccp4(): """Function to return bash command to source CCP4""" if os.name == "nt": return return "source {}".format(os.path.join(os.environ["CCP4"], "bin", "ccp4.setup-sh"))
7b3f2920906ff4e6b680e4696a66a67e56c72d03
470
import torch def get_meshgrid_samples(lower, upper, mesh_size: tuple, dtype) ->\ torch.Tensor: """ Often we want to get the mesh samples in a box lower <= x <= upper. This returns a torch tensor of size (prod(mesh_size), sample_dim), where each row is a sample in the meshgrid. """ sample_dim = len(mesh_size) assert (len(upper) == sample_dim) assert (len(lower) == sample_dim) assert (len(mesh_size) == sample_dim) meshes = [] for i in range(sample_dim): meshes.append( torch.linspace(lower[i], upper[i], mesh_size[i], dtype=dtype)) mesh_tensors = torch.meshgrid(*meshes) return torch.cat( [mesh_tensors[i].reshape((-1, 1)) for i in range(sample_dim)], dim=1)
98a2c7b064d7b23824b547d0fc0a16eb37cb0923
471
def lengthOfLongestSubstring(s): """ :type s: str :rtype: int """ res = "" n = 0 for i in s: if i not in res: res = res + i else: indexofi = res.find(i) res = res[indexofi+1::] + i k = len(res) if k > n: n = k print(res) return n
951366d46a47030c5d37026bd6712eeb73c34af9
472
from functools import reduce def getattrs(o, *attrs, **kwargs): """ >>> getattrs((), '__iter__', '__name__', 'strip')('_') 'iter' >>> getattrs((), 'foo', 'bar', default=0) 0 """ if 'default' in kwargs: default = kwargs['default'] c = o for attr in attrs: try: c = getattr(c, attr) except AttributeError: return default return c else: return reduce(getattr, attrs, o)
64d55154d2399c7097476a8335eae81749588286
473
def me_length_filter(me_iv_pairs, min_length=100): """Returns list of (InsertionVertices, InsertionVertices) tuples with those containing paths going backwards through the ME sequence filtered out """ filtered = [] for iv_pair in me_iv_pairs: enter_iv, exit_iv = iv_pair me_seq_len = exit_iv.exit_ref.pos - enter_iv.enter_ref.pos if me_seq_len > min_length: filtered.append(iv_pair) return filtered
9c344ee913f60aace3b8d94d04500d95166e67d6
474
import functools def compose(fns): """Creates a function composition.""" def composition(*args, fns_): res = fns_[0](*args) for f in fns_[1:]: res = f(*res) return res return functools.partial(composition, fns_=fns)
5c791f52f70707078e941fe169679ddc80a32782
475
def set_index_da_ct(da): """Stacks all coordinates into one multindex and automatically generates a long_name""" coordnames = list(da.coords) da_stacked = da.set_index(ct=coordnames) if len(coordnames) == 1: #only one coordinate just rename ct to the coordinate name da_unstacked = da_stacked.rename(ct=coordnames[0]) else: #generate multindex long_name_string = 'Test Case (' for coord in da.coords: if 'long_name' in da.coords[coord].attrs: long_name_string = long_name_string + da.coords[coord].attrs['long_name'] + ', ' else: long_name_string = long_name_string + coord + ', ' #remove last comma and close parentheses long_name_string = long_name_string[0:-2] + ')' da_stacked.coords['ct'].attrs = dict(long_name=long_name_string) da_unstacked = da_stacked.unstack() for coord in da.coords: da_unstacked.coords[coord].attrs = da.coords[coord].attrs return da_unstacked, da_stacked
396b1c629352c3843617588071295684e1f2bf79
476
def cal_max_len(ids, curdepth, maxdepth): """calculate max sequence length""" assert curdepth <= maxdepth if isinstance(ids[0], list): res = max([cal_max_len(k, curdepth + 1, maxdepth) for k in ids]) else: res = len(ids) return res
0a6c4c96d7518b98d69141711272a97426a623b2
477
def hasPathSum(self, root, sum): """ :type root: TreeNode :type sum: int :rtype: bool """ if root is None: return False if sum - root.val == 0 and root.left is None and root.right is None: return True else: return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(root.right, sum - root.val)
ffab5b8205aa9785c86ac365bd6e854319138627
480
import sqlite3 def does_column_exist_in_db(db, table_name, col_name): """Checks if a specific col exists""" col_name = col_name.lower() query = f"pragma table_info('{table_name}');" all_rows = [] try: db.row_factory = sqlite3.Row # For fetching columns by name cursor = db.cursor() cursor.execute(query) all_rows = cursor.fetchall() except sqlite3.Error as ex: print(f'Query error, query={query} Error={ex}') for row in all_rows: if row['name'].lower() == col_name: return True return False
90abc20c9643e93641e37c0e94fd504cbcf09928
481
def calc_mean_score(movies): """Helper method to calculate mean of list of Movie namedtuples, round the mean to 1 decimal place""" ratings = [m.score for m in movies] mean = sum(ratings) / max(1, len(ratings)) return round(mean, 1)
6f837ff251e6221227ba4fa7da752312437da90f
483
def word_after(line, word): """'a black sheep', 'black' -> 'sheep'""" return line.split(word, 1)[-1].split(' ', 1)[0]
cfa16244d00af8556d7955b7edeb90bac0a213ba
484
def domain_in_domain(subdomain, domain): """Returns try if subdomain is a sub-domain of domain. subdomain A *reversed* list of strings returned by :func:`split_domain` domain A *reversed* list of strings as returned by :func:`split_domain` For example:: >>> domain_in_domain(['com', 'example'], ... ['com', 'example', 'www']) True""" if len(subdomain) <= len(domain): i = 0 for d in subdomain: if d != domain[i]: return False i += 1 return True else: return False
cb1b3a3f899f13c13d4168c88ca5b9d4ee345e47
485
import re import argparse def parse_list_or_range(arg): """ Parses a string that represents either an integer or a range in the notation ``<start>:<step>:<stop>``. Parameters ---------- arg : :obj:`str` Integer or range string. Returns ------- int or :obj:`list` of int Raises ------ ArgumentTypeError If input can neither be interpreted as an integer nor a valid range. """ if re.match(r'^\d+:\d+:\d+$', arg) or re.match(r'^\d+:\d+$', arg): rng_params = list(map(int, arg.split(':'))) step = 1 if len(rng_params) == 2: # start, stop start, stop = rng_params else: # start, step, stop start, step, stop = rng_params rng = list(range(start, stop + 1, step)) # include last stop-element in range if len(rng) == 0: raise argparse.ArgumentTypeError('{0} is an empty range'.format(arg)) return rng elif re.match(r'^\d+$', arg): return int(arg) raise argparse.ArgumentTypeError( '{0} is neither a integer list, nor valid range in the form <start>:[<step>:]<stop>'.format( arg ) )
0d487bd80fc14b763a16bc8a167983a1f7959e3e
486
import re def is_regex(regex, invert=False): """Test that value matches the given regex. The regular expression is searched against the value, so a match in the middle of the value will succeed. To specifically match the beginning or the whole regex, use anchor characters. If invert is true, then matching the regex will cause the test to fail. """ # pylint: disable=unused-argument # args defined by test definition rex = re.compile(regex) def is_regex_test(conf, path, value): match = rex.search(value) if invert and match: return u'"{0}" matches /{1}/'.format(value, regex) if not invert and not match: return u'"{0}" does not match /{1}/'.format(value, regex) return None return is_regex_test
0db71b3dae2b2013650b65ecacfe6aed0cd8366b
488
from typing import List def list_to_decimal(nums: List[int]) -> int: """Accept a list of positive integers in the range(0, 10) and return a integer where each int of the given list represents decimal place values from first element to last. E.g [1,7,5] => 175 [0,3,1,2] => 312 Place values are 10**n where n represents the digit position Eg to calculate 1345, we have 5 1's, 4 10's, 3 100's and 1 1000's 1, 3 , 4 , 5 1000's, 100's, 10's, 1's """ for num in nums: if isinstance(num, bool) or not isinstance(num, int): raise TypeError elif not num in range(0, 10): raise ValueError return int("".join(map(str, nums)))
7727ce610987fc9da03a5e23ec8674d1deb7c7f0
489
def str_to_bool(v): """ :type v: str """ return v.lower() in ("true", "1")
3eb7ae9e1fe040504ea57c65ed1cbd48be9269cf
490
def build_varint(val): """Build a protobuf varint for the given value""" data = [] while val > 127: data.append((val & 127) | 128) val >>= 7 data.append(val) return bytes(data)
46f7cd98b6858c003cd66d87ba9ec13041fcf9db
493
def analyze(osi, num_inc=1, dt=None, dt_min=None, dt_max=None, jd=None): """ Performs an analysis step. Returns 0 if successful, and <0 if fail Parameters ---------- osi num_inc dt dt_min dt_max jd Returns ------- """ op_type = 'analyze' if dt is None: parameters = [int(num_inc)] elif dt_min is None: parameters = [int(num_inc), float(dt)] else: parameters = [int(num_inc), float(dt), dt_min, dt_max, jd] return osi.to_process(op_type, parameters)
6c748a49c5e54cf88a04002d98995f4fd90d5130
494
def MAKEFOURCC(ch0: str, ch1: str, ch2: str, ch3: str) -> int: """Implementation of Window's `MAKEFOURCC`. This is simply just returning the bytes of the joined characters. `MAKEFOURCC(*"DX10")` can also be implemented by `Bytes(b"DX10")`. Args: ch0 (str): First char ch1 (str): Second char ch2 (str): Third char ch3 (str): Fourth char Returns: int: The integer representation of given characters. **Reference**: `Microsoft <https://goo.gl/bjtMFA>`__ """ return (ord(ch0) << 0) | (ord(ch1) << 8) | (ord(ch2) << 16) | (ord(ch3) << 24)
91afd9dcc8f1cd8c5ef167bdb560c8bf2d89b228
496
def sort_configs(configs): # pylint: disable=R0912 """Sort configs by global/package/node, then by package name, then by node name Attributes: configs (list): List of config dicts """ result = [] # Find all unique keys and sort alphabetically _keys = [] for config in configs: if config["key"] not in _keys: _keys.append(config["key"]) _keys = sorted(_keys, key=str.lower) # For each key find globals, then packages, then nodes for key in _keys: _packages = [] _nodes = [] for config in configs: if config["key"] == key: if config["type"] == "global": result.append(config) elif config["type"] == "package": _packages.append(config) elif config["type"] == "node": _nodes.append(config) # Sort the package end node elements alphabetically _package_ids = sorted([_package["id"] for _package in _packages], key=str.lower) for package in _package_ids: for config in configs: if config["key"] == key and config["type"] == "package" and config["id"] == package: result.append(config) break _node_ids = sorted([_node["id"] for _node in _nodes], key=str.lower) for node in _node_ids: for config in configs: if config["key"] == key and config["type"] == "node" and config["id"] == node: result.append(config) break return result
5c05214af42a81b35986f3fc0d8670fbef2e2845
497
def get_present_types(robots): """Get unique set of types present in given list""" return {type_char for robot in robots for type_char in robot.type_chars}
75c33e0bf5f97afe93829c51086100f8e2ba13af
498
import re def ruru_old_log_checker(s): """ 古いログ形式ならTrue、そうでないならFalseを返す :param s: :return: """ time_data_regex = r'[0-9]{4}\/[0-9]{2}\/[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}' # るる鯖新ログ形式なら1つ目のdiv:d12150で時刻が取得可能。そうでないなら取得不可 time_data = re.search(time_data_regex, str(s.find('div', class_='d12150'))) return False if time_data else True
54f6a94dab98ef6947496b8e1f95401d99424ee2
499
def symmetric_product(tensor): """ Symmetric outer product of tensor """ shape = tensor.size() idx = list(range(len(shape))) idx[-1], idx[-2] = idx[-2], idx[-1] return 0.5 * (tensor + tensor.permute(*idx))
4f96ab5f0bd41080352b1e5e806b6a73b3506950
500
def SFRfromLFIR(LFIR): """ Kennicut 1998 To get Star formation rate from LFIR (8-1000um) LFIR in erg s-1 SFR in Msun /year """ SFR = 4.5E-44 * LFIR return SFR
4adf401bbf2c6547cea817b52eb881531db8c798
502
import math def meanStdDev( valueList, scale ): """Compute the mean and standard deviation of a *non-empty* list of numbers.""" numElements = len(valueList) if numElements == 0: return(None, 0.0) mean = float(sum(valueList)) / numElements variance = 0 for value in valueList: variance += math.pow( value - mean, 2 ) variance = variance / float(numElements) return (scale * mean, scale * math.sqrt(variance))
2970ae1e4382092eb67219373aa26b9ca75226a3
504
def points_from_x0y0x1y1(xyxy): """ Constructs a polygon representation from a rectangle described as a list [x0, y0, x1, y1] """ [x0, y0, x1, y1] = xyxy return "%s,%s %s,%s %s,%s %s,%s" % ( x0, y0, x1, y0, x1, y1, x0, y1 )
8a7d766145dc31e6619b290b8d96a95983f9cc01
505
def get_basic_track_info(track): """ Given a track object, return a dictionary of track name, artist name, album name, track uri, and track id. """ # Remember that artist and album artist have different entries in the # spotify track object. name = track["name"] artist = track['artists'][0]['name'] album = track['album']['name'] uri = track["uri"] track_id = track['id'] output = {"name": name, "artist": artist, "album": album, "uri": uri, "id": track_id} return output
925f7bb00482e946ad7a6853bac8b243d24145c7
506
from datetime import datetime def temporal_filter(record_date_time, time_or_period, op): """ Helper function to perform temporal filters on feature set :param record_date_time: datetime field value of a feature :type record_date_time: :class:`datetime.datetime` :param time_or_period: the time instant or time span to use as a filter :type time_or_period: :class:`datetime.datetime` or a tuple of two datetimes or a tuple of one datetime and one :class:`datetime.timedelta` :param op: the comparison operation :type op: str :return: a comparison expression result :rtype: bool """ d = datetime.strptime(record_date_time, "%Y-%m-%dT%H:%M:%SZ") result = None # perform before and after operations if op in ['BEFORE', 'AFTER']: query_date_time = datetime.strptime( time_or_period.value, "%Y-%m-%dT%H:%M:%SZ") if op == 'BEFORE': return d <= query_date_time elif op == 'AFTER': return d >= query_date_time # perform during operation elif 'DURING' in op: low, high = time_or_period low = datetime.strptime(low.value, "%Y-%m-%dT%H:%M:%SZ") high = datetime.strptime(high.value, "%Y-%m-%dT%H:%M:%SZ") result = d >= low and d <= high if 'BEFORE' in op: result = d <= high elif 'AFTER' in op: result = d >= low return result
9f76d6a6eb96da9359c4bbb80f6cfb1dfdcb4159
507
import contextlib import os def supress_stdout(func): """Wrapper, makes a function non-verbose. Args: func: function to be silenced """ def wrapper(*a, **ka): with open(os.devnull, "w") as devnull: with contextlib.redirect_stdout(devnull): func(*a, **ka) return wrapper
a617f776df873086c3033416f6ce7bc783fd640b
509
def sum_and_count(x, y): """A function used for calculating the mean of a list from a reduce. >>> from operator import truediv >>> l = [15, 18, 2, 36, 12, 78, 5, 6, 9] >>> truediv(*reduce(sum_and_count, l)) == 20.11111111111111 True >>> truediv(*fpartial(sum_and_count)(l)) == 20.11111111111111 True """ try: return (x[0] + y, x[1] + 1) except TypeError: return ((x or 0) + (y or 0), len([i for i in [x, y] if i is not None]))
d43cc8dc39fb35afae4f6a4e32d34221d525f5d3
510
def calculate_pnl_per_equity(df_list): """Method that calculate the P&L of the strategy per equity and returns a list of P&L""" pnl_per_equity = [] # initialize the list of P&L per equity for df in df_list: # iterates over the dataframes of equities pnl = df['Strategy Equity'].iloc[-1] - df['Buy and Hold Equity'].iloc[-1] # calculating the difference at the last point pnl_per_equity.append(pnl) return pnl_per_equity
4f6ac1b9f6a949215c6b805f05a65897393f3288
513
def fpAbs(x): """ Returns the absolute value of the floating point `x`. So: a = FPV(-3.2, FSORT_DOUBLE) b = fpAbs(a) b is FPV(3.2, FSORT_DOUBLE) """ return abs(x)
d69f5f07b651ed4466ff768601c77f90232b8827
514
def perform_variants_query(job, **kwargs): """Query for variants. :param job: API to interact with the owner of the variants. :type job: :class:`cibyl.sources.zuul.transactions.JobResponse` :param kwargs: See :func:`handle_query`. :return: List of retrieved variants. :rtype: list[:class:`cibyl.sources.zuul.transactions.VariantResponse`] """ return job.variants().get()
c779080e2ef8c1900c293f70996e17bae932b142
516
from typing import Dict from typing import Any from typing import cast def _key_match(d1: Dict[str, Any], d2: Dict[str, Any], key: str) -> bool: """ >>> _key_match({"a": 1}, {"a": 2}, "a") False >>> _key_match({"a": 1}, {"a": 2}, "b") True >>> _key_match({"a": 2}, {"a": 1}, "a") False >>> _key_match({"a": 1}, {"a": 1}, "a") True >>> _key_match({"a": 2}, {"b": 1}, "a") False >>> _key_match({"b": 2}, {"a": 1}, "a") False """ try: return (key not in d1 and key not in d2) or cast(bool, d1[key] == d2[key]) except KeyError: return False
8e76ee70c6209b357b13890a9fcf2b0b7d770c1b
517
import logging def get_xml_string(stream_pointer): """ This function checks for valid xml in a stream and skips bytes until it hits something that looks like xml. In general, this 'skipping' should never be used, as we expect to see well-formed XML from the server. stream_pointer: input stream returns: string of xml """ # This function avoid stream_pointer.seek() for the vast majority # of cases (when xml is formatted correctly) just because i don't # like using 'seek' (never know when you're getting non-rewindable # streams c = stream_pointer.read(1) count = 0 while c != '<' and c != '': count = count + 1 c = stream_pointer.read(1) if c == '': stream_pointer.seek(0) logging.error("Poorly formatted schema - no '<' found", \ extra={'xml':stream_pointer.read()}) return xml_string = "<" + stream_pointer.read() if count > 0: stream_pointer.seek(0) logging.error("Poorly formatted schema", \ extra={'xml':stream_pointer.read()}) return xml_string
3fa2e3d05bfc66cee592c4c40cc1e9349e512c3a
518
def _gen_new_aux_page(label: str, is_title: bool) -> str: """Generate latex for auxillary pages""" page = [] if is_title: page.append("\\thispagestyle{empty}") page.append("\\begin{center}") page.append("\t\\vfil") page.append("\t\\vspace*{0.4\\textheight}\n") page.append("\t\\Huge") page.append(f"\t\\bf{{{label}}}\n") page.append("\t\\normalsize") page.append("\\end{center}") return "\n".join(page)
3ff31ae80f007fd5da2dd6153ea605978421c086
519
from unittest.mock import patch def method_mock(cls, method_name, request): """ Return a mock for method *method_name* on *cls* where the patch is reversed after pytest uses it. """ _patch = patch.object(cls, method_name) request.addfinalizer(_patch.stop) return _patch.start()
b14d991c42e0c05a51d9c193c3769b1e1e71dd1f
520
def _return_xarray_system_ids(xarrs: dict): """ Return the system ids for the given xarray object Parameters ---------- xarrs Dataset or DataArray that we want the sectors from Returns ------- list system identifiers as string within a list """ return list(xarrs.keys())
8380d1c2ae9db48eb4b97138dcd910d58085073e
521
def sub(a, b): """Subtracts b from a and stores the result in a.""" return "{b} {a} ?+1\n".format(a=a, b=b)
dcc0ddfc9dbefe05d79dea441b362f0ddfe82627
522
def factory(name, Base, Deriveds): """Find the base or derived class by registered name. Parameters ---------- Base: class Start the lookup here. Deriveds: iterable of (name, class) A list of derived classes with their names. Returns ------- class """ Derived = Base for (nm, NmCl) in Deriveds: if nm == name: Derived = NmCl break return Derived
1bce29651004cf1f04740fd95a4f62c6c2277a72
523
def find_expired(bucket_items, now): """ If there are no expired items in the bucket returns empty list >>> bucket_items = [('k1', 1), ('k2', 2), ('k3', 3)] >>> find_expired(bucket_items, 0) [] >>> bucket_items [('k1', 1), ('k2', 2), ('k3', 3)] Expired items are returned in the list and deleted from the bucket >>> find_expired(bucket_items, 2) ['k1'] >>> bucket_items [('k2', 2), ('k3', 3)] """ expired_keys = [] for i in range(len(bucket_items) - 1, -1, -1): key, expires = bucket_items[i] if expires < now: expired_keys.append(key) del bucket_items[i] return expired_keys
476fd079616e9f5c9ed56ee8c85171fcb0ddb172
524
import typing def empty_iterable() -> typing.Iterable: """ Return an empty iterable, i.e., an empty list. :return: an iterable :Example: >>> from flpy.iterators import empty_iterable >>> empty_iterable() [] """ return list()
904fe365abf94f790f962c9a49f275a6068be4f0
525
from typing import List from pathlib import Path import os def list_dir_files(path: str, suffix: str = "") -> List[str]: """ Lists all files (and only files) in a directory, or return [path] if path is a file itself. :param path: Directory or a file :param suffix: Optional suffix to match (case insensitive). Default is none. :return: list of absolute paths to files """ if suffix: suffix = suffix.lower() if Path(path).is_file(): files = [os.path.abspath(path)] else: files = [] for f in os.listdir(path): file_path = os.path.join(path, f) if Path(file_path).is_file(): if not suffix or f.lower().endswith(suffix): files.append(os.path.abspath(file_path)) return list(sorted(files))
aaba7de5d5f67c5addc054010c5a2bd811475a3e
526
def feature_selection(data, features): """ Choose which features to use for training. :param data: preprocessed dataset :param features: list of features to use :return: data with selected features """ return data[features]
6303e52a9c64acfbb5dcfd115b07b3bef2942821
527
from typing import Optional import yaml def get_repo_version(filename: str, repo: str) -> Optional[str]: """Return the version (i.e., rev) of a repo Args: filename (str): .pre-commit-config.yaml repo (str): repo URL Returns: Optional[str]: the version of the repo """ with open(filename, "r") as stream: pre_commit_data = yaml.safe_load(stream) pre_config_repo = next( (item for item in pre_commit_data["repos"] if item["repo"] == repo), None ) if pre_config_repo: return pre_config_repo["rev"] return None
821653bdeb60a86fce83fb3a05609996231ec5d4
531
def f(x): """ Try and have the NN approximate the xor function. """ if x[0] == x[1]: return 0. else: return 1.
8111e53f0ff0dfdd75f08d845e5176bc287a65e1
532
def decimal_to_binary(integer,nbits=8,grouped=0): """Converts integer to binary string of length nbits, sign bit and then m.s.b. on the left. Negative numbers are twos-complements, i.e., bitwise complement + 1.""" # Just remember that minus sign and ignore it if integer < 0: negative = True integer = abs(integer+1) else: negative = False # build up the strin result = '' # part of number left to process remaining_integer = integer while (remaining_integer > 0) & (nbits > 0): lsb = remaining_integer % 2 if negative: lsb = 1-lsb result = ''.join((str(lsb),result)) remaining_integer = remaining_integer >> 1 nbits -= 1 while nbits > 0: if negative: result = ''.join(('1',result)) else: result = ''.join(('0',result)) nbits -= 1 if grouped: temp = result result = "" for bit in range(len(temp)): if bit and (bit % grouped) == 0: result += ' ' result += temp[bit] return result
89cef0feaad6d1c25dd67b97a0caf2212ea4a55d
533
def recast_to_supercell(z, z_min, z_max): """Gets the position of the particle at ``z`` within the simulation supercell with boundaries ``z_min`` y ``z_max``. If the particle is outside the supercell, it returns the position of its closest image. :param z: :param z_min: :param z_max: :return: """ sc_size = (z_max - z_min) return z_min + (z - z_min) % sc_size
2d144a656a92eaf3a4d259cf5ad2eadb6cfdf970
534
import unittest def extra(): """Tests faint.extra. That is, less central faint code, possibly requiring extensions (e.g. tesseract or GraphViz dot). """ return unittest.defaultTestLoader.discover("py_tests/test_extra", top_level_dir="py_tests/")
c6fc2694144e852edaef219d95bc384b5b394d7d
537
import numpy as np def cca(x,y): """ canonical correlation analysis cca wx, wy, r = cca(x,y) returns wx, wy two matrices which columns [:,i] correspond to the canonical weights (normalized eigenvectors) and a vector r containing the canonical correlations, all sorted in decreasing order. cca assumes as input matrices x,y of size l*m (time*nvar), and l*n, that are centered (no mean along 1st axis) within the function. cca returns an error if either x,y are not full rank.""" mx = x.shape[1] my = y.shape[1] l = x.shape[0] #needs to be the same for y if l != y.shape[0]: raise ValueError('Time dimension is not same length for x,y') xrank = np.linalg.matrix_rank(x) yrank = np.linalg.matrix_rank(y) if mx > xrank: raise ValueError('Matrix x is not full rank.') if my > yrank: raise ValueError("Matrix y is not full rank.") #no mean x = x - np.outer(x.mean(axis=0),np.ones(l)).transpose() y = y - np.outer(y.mean(axis=0),np.ones(l)).transpose() #covariance estimators Sxy = np.dot(x.transpose(),y) / l Sxx = np.dot(x.transpose(),x) / l Syy = np.dot(y.transpose(),y) / l B1 = np.dot(np.linalg.inv(Sxx),Sxy) B2 = np.dot(np.linalg.inv(Syy),Sxy.transpose()) evalx, eigvx = np.linalg.eig(np.dot(B1,B2)) evaly, eigvy = np.linalg.eig(np.dot(B2,B1)) #normalize eigenvectors eigvx = eigvx / np.outer(np.ones((mx,1)),np.sqrt((eigvx**2).sum(axis=0))) eigvy = eigvy / np.outer(np.ones((my,1)),np.sqrt((eigvy**2).sum(axis=0))) # eigenvalues should be the same in evalx and evaly rx = np.sqrt(abs(evalx)) #correlation ry = np.sqrt(abs(evaly)) #sort ordargx = np.argsort(rx)[-1:-mx-1:-1] #decreasing order ordargy = np.argsort(ry)[-1:-mx-1:-1] rx = rx[ordargx] ry = ry[ordargy] eigvx = eigvx[:,ordargx] eigvy = eigvy[:,ordargy] if mx >= my: r = rx else: r = ry return eigvx, eigvy, r
f0d734fc927789d6ecca0685a85f727e48b334df
538
def b2str(data): """Convert bytes into string type.""" try: return data.decode("utf-8") except UnicodeDecodeError: pass try: return data.decode("utf-8-sig") except UnicodeDecodeError: pass try: return data.decode("ascii") except UnicodeDecodeError: return data.decode("latin-1")
05cbe6c8072e1bf24cc9ba7f8c8447d0fa7cbf7f
539
def get_cookie_date(date): """ Return a date string in a format suitable for cookies (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Date) :param date: datetime object :return: date string in cookie format """ return date.strftime("%a, %d %b %Y %H:%M:%S GMT")
f2b4d6decab72cf1f25754bc7e290f62eae92156
540
def valuedict(keys, value, default): """ Build value dictionary from a list of keys and a value. Parameters ---------- keys: list The list of keys value: {dict, int, float, str, None} A value or the already formed dictionary default: {int, float, str} A default value to set if no value Returns ------- dict A dictionary Notes ----- This standalone and generic function is only required by plotters. """ if isinstance(value, dict): return {key: value.get(key, default) for key in keys} else: return dict.fromkeys(keys, value or default)
44283bac3be75c3569e87a890f507f7cff4161b6
542
import subprocess def get_available_gpus(): """Return a list of available GPUs with their names""" cmd = 'nvidia-smi --query-gpu=name --format=csv,noheader' process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _ = process.communicate() if process.returncode == 0: return stdout.decode().splitlines() return []
9ef81b08aad25e0109604cdd538b5c9e6902c4d1
543
def dep_graph_parser_parenthesis(edge_str): """Given a string representing a dependency edge in the 'parenthesis' format, return a tuple of (parent_index, edge_label, child_index). Args: edge_str: a string representation of an edge in the dependency tree, in the format edge_label(parent_word-parent_index, child_word-child_index) Returns: tuple of (parent_index, edge_label, child_index) """ tokens = edge_str.split("(") label = tokens[0] tokens = tokens[1].split(", ") parent = int(tokens[0].split("-")[-1]) - 1 child = int(",".join(tokens[1:]).split("-")[-1][:-1]) - 1 return (parent, label, child)
a3f96ebec6fdcb00f3f64ea02e91147df16df196
544
import math def intersection_angle(m1, m2): """ Computes intersection angle between two slopes. """ return math.degrees(math.atan((m2-m1) / (1+m1*m2)))
244192d3d1fe74130d64350606e765d8f2d4831b
545
def generate_json_with_incorrect_prediction_value(features_definition: dict): """ Generates a list of dictonaries with keys from the given features_definitions, key in the dictionary has a corresponding value not allowed by the given definition """ mock_requests = [] def_keys = list(features_definition.keys()) for def_key in def_keys: mock_request = {key: list(value.keys())[0] for key, value in features_definition.items()} # Replace given keys, based on enumeration step, value with invalid prediction value mock_request[def_key] = 'q' mock_requests.append(mock_request) return mock_requests
a0019822fbc701e8cdda61192bf564d1f72af9dd
546
import html def formatTitle(title): """ The formatTitle function formats titles extracted from the scraped HTML code. """ title = html.unescape(title) if(len(title) > 40): return title[:40] + "..." return title
0a47e88ac024561dce18be140895dfd0825a9c37
548
def isPalindrome(x): """ :type x: int :rtype: bool """ def sub_judge(start, end, string): if start >= end: return True if string[start] == string[end]: return sub_judge(start + 1, end - 1, string) else: return False return sub_judge(0, len(str(x)) - 1, str(x))
c7ecea3934e1cceb6574630eb06703f18f02832a
549