content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def state_from_scratch(args: tuple, kwargs: dict) -> dict: """Create pipeline state from (args, kwargs).""" state = {i: v for i, v in enumerate(args)} state.update(kwargs) return state
26b84c95fcdeae55e903cdfa763bfc59597caedb
658,469
def count_fish(fish): """ Count how many fish are alive. :param fish: dictionary of fish :return: number of living fish """ counter = 0 for f in fish.values(): counter += f return counter
760ec10d2303a18b27d12f27fbeaf785260d60b8
164,045
def test_size(STDictonary_root, count=0): """Test function for the size counter of the STDict-class, STDictonary_root = root of STDict to test , O(n)""" if STDictonary_root is None: return count else: count += 1 count = test_size(STDictonary_root._left, count) count = test_size(STDictonary_root._right, count) return count
bf992f36bc0c337c36cf49e15e16636055b4ce1d
619,106
def norm(x1,x2=None): """ Normalizes x1. If also given as input x2, then normalizes x1 to x2. :param x1: input array :param x2: optional :returns: normalized x1 """ if x2 is None: return x1/x1.max() else: return x1*x2.max()/x1.max()
26a76b485b81dcc34d67a0b9879deda92d8f4035
610,955
def or_(a=False, b=False, c=False, d=False, e=False, f=False, g=False, h=False): """Checks if any value is True. `๐Ÿ“˜`_ - a: 1st boolean - b: 2nd boolean - ... Example: >>> or_(True, False) == True >>> or_(False, False) == False >>> or_(False, True, False, True) == True >>> or_(False, False, False, False) == False .. _๐Ÿ“˜: https://github.com/python3f/extra-boolean/wiki/or """ return a or b or c or d or e or f or g or h
99c3ac23ae06a5ff08ca6e90fd40873e17fd5540
477,415
def _year_number_to_string(year): """Converts year from number to string. :param year: Integer year. :return: year_string: String in format "yyyy" (with leading zeros if necessary). """ return '{0:04d}'.format(int(year))
8e1fad0e80ddbdc451c4503f479284351c5375e2
541,538
def _mutl_by_int(expr, val): """ Create an expression that multiply an expression by an integer Args: expr: Expression to constrain val: Integer value to multiply expression with Returns: New expression """ if val == 1: return expr if val == -1: return -expr if val == 0: return 0 return val * expr
465261491bd3bcbd3f3a946bd03f42eda51e4223
344,518
import jinja2 def create_run_hoc(template_path, main_protocol): """Returns a string containing run.hoc. Args: template_path (str): path to the template to fill in main_protocol (bool): whether the Main Protocol is used or not Returns: str: hoc script to run the simulation """ # load template with open(template_path, "r", encoding="utf-8") as template_file: template = template_file.read() template = jinja2.Template(template) # edit template return template.render( main_protocol=main_protocol, )
d334e1adcd85a3023c32977aa6f380a593a07c85
366,014
from datetime import datetime def validate_date_format(date, format): """ Return None if a date format does not parse. Parameters ---------- date : type Input date string. format : type Input accpeted format to try. Returns ------- type Returns date on successful parse or None on parsing failure. """ try: return(datetime.strptime(date, format)) except Exception: return(None)
0c37610f9b9f59582ff43a6f6d3c95bcfe46b4ad
341,060
import email def fetch_emails(connection, search_string = '(UNSEEN)'): """Fetch emails on a connection, specified by a search Usage: fetch_emails(connection[, search_string]) connection -- an IMAP connection, which should be logged in and pointed at a particular mailbox search_string -- an IMAP search string that specifies the messages to be fetched from the connection. Defaults to "(UNSEEN)", which fetches unread emails. Returns a list of email.message objects """ matching_emails = connection.search(None,search_string)[1] matching_emails = matching_emails[0].decode().split() messages = [connection.fetch(email, '(RFC822)') for email in matching_emails] messages = [email.message_from_bytes(msg[1][0][1]) for msg in messages] return messages
0ddd96be31d754fc21f4261f86e2854f8ccda000
544,118
def bit_reversed(x, n): """ Bit-reversal operation. Parameters ---------- x: ndarray<int>, int a vector of indices n: int number of bits per index in ``x`` Returns ---------- ndarray<int>, int bit-reversed version of x """ result = 0 for i in range(n): # for each bit number if (x & (1 << i)): # if it matches that bit result |= (1 << (n - 1 - i)) # set the "opposite" bit in result return result
5fe23c87becfe78b56d891d9732807522e2b9d57
492,312
import zlib def _adler32(fname): """Compute the adler32 checksum on a file. :param fname: File path to the file to checksum :type fname: str """ with open(fname, 'rb') as f: checksum = 1 while True: buf = f.read(1024*1024*8) if not buf: break checksum = zlib.adler32(buf, checksum) return checksum
7e7b37d39cdd7dbd1795aa25b48460350e121dae
30,319
def intersection_n (l1, * ls) : """Compute intersection of `l1` and all elements of `ls`.""" result = set (l1) for l in ls : result.intersection_update (l) return result
f3a081099afc5cb8563883992f8fc04d1462f85f
272,809
def fix_backspace(word): """ Replace ``\\`` with ``\\\\`` so that it will printed properly in the documentation. """ return word.replace('\\', '\\\\')
b3a98db39b8f85a43e8224288313a10a6eeaab94
584,129
def make_plucker(fields): """ Factory method that will create a filtering function. The returned function will accept a dict and return only the fields specified in the fields parameter. """ def filter(record): result = {} for field in fields: if field in record: result[field] = record[field] return result return filter
61192fe5cb94c1a3e56a0442b59979c1e41cba1c
287,412
def is_valid_index(idx, in_list, start_idx=0): """ param: idx (str) - a string that is expected to contain an integer index to validate param: in_list - a list that the idx indexes param: start_idx (int) - an expected starting value for idx (default is 0); gets subtracted from idx for 0-based indexing The function checks if the input string contains only digits and verifies that the provided index idx is a valid positive index that can retrieve an element from in_list. returns: - True, if idx is a positive numeric index that can retrieve an element from in_list. - False if idx is not an integer value, is negative or exceeds the size of in_list. """ # writing a function with the default parameter value # using Boolean value as a flag for whether printing needs to occur # working with dictionaries stored inside a list (a nested list) # indexing and printing dictionary entries (potentially using enumerate()) # formatting the output according to the specifications if (idx - start_idx) >= 0 and (idx - start_idx) < len(in_list) and idx.isdigit(): return True else: return False
c52e93a059d7f0e20a910bb4831cc4779a82f6ac
584,880
def parent(heap, i): """ Returns the index of the parent of a given node. """ if i == 0: return 0 return heap.array[(i-1)//2]
df1e7fa83a6376e394f53bdc465d77d3dd3c6b2f
54,635
import requests def get_launches() -> list: """ Return the list of all past launches. :return: """ url = 'https://api.spacexdata.com/v2/launches/' response = requests.get(url) # status code 200 means everything is ok # see https://httpstatuses.com/200 if response.status_code == 200: # JSON is a special format that most APIs use return response.json() else: # print an error message and exit the programme print("SpaceX API is unavailable") exit()
134a377a7278966a2fe4c4e253fef1995ddf2e21
382,776
def parser_shortname(parser_argument): """Return short name of the parser with dashes and no -- prefix""" return parser_argument[2:]
9827697680cab831f0c0a435e743955a3ede1859
657,091
import math def euclidean_distance(data1, data2): """ Calculates the euclidean distance between two data objects representing coordinates. The data objects must be same-sized sequences of numbers (or act like it). See http://en.wikipedia.org/wiki/Euclidean_distance """ distance = 0 for v1, v2 in zip(data1, data2): diff = v1 - v2 distance += diff * diff distance = math.sqrt(distance) return distance
1dff552716b354a63615159c66bd13aac45d409d
202,814
def check_if_given_importance(test_post, user): """Checks whether a user has given importance to a post or not.""" return any(post.id == test_post.id for post in user.given_importance_to)
bdafd76351e4095b14030855005f8d260e4b348c
484,421
def GetInstance(compute, instance, zone, project): """Get the data for a Google Compute Engine instance.""" cmd = compute.instances().get(instance=instance, project=project, zone=zone) return cmd.execute()
1cc4815f0bf6ddf64cbe90bc7464d6afb92d1d41
503,245
def normalise_number(a): """ Split the power part of a number and its float value normalised to 1 Example ------- >>> normalise_number(1.433364345e9) (0.1433364345, 10) >>> normalise_number(14e-6) (0.13999999999999999, -4) >>> normalise_number(-14e-6) (-0.13999999999999999, -4)""" i = 0 # power value v = a # normalised value if v == 0.: v = 0. i = 0. elif abs(v) < 1: while abs(v) < 0.1: v *= 10. i -= 1 elif abs(v) > 1: while abs(v) > 1: v /= 10. i += 1 return v, i
b34373f56ebb110f6a86572cbfc968a8b3094d7d
47,854
import sqlite3 def connect(db_file): """ Connect to an existing sqlite database file. :param db_file: string - path to database file :return: connection object or None """ try: conn = sqlite3.connect(db_file) c = conn.cursor() return conn, c except sqlite3.Error as err: print(err)
861debe016f000df919df08b7e529dce62e235f0
346,259
def get_grants(df): """Get list of grant numbers from dataframe. Assumptions: Dataframe has column called 'grantNumber' Returns: set: valid grant numbers, e.g. non-empty strings """ print(f"Querying for grant numbers...", end="") grants = set(df.grantNumber.dropna()) print(f"{len(grants)} found\n") return list(sorted(grants))
0e3c5ab5444e07360310d2f9baf2de2dc60a8dcd
686,153
def sort_by_timestamp(messages): """ Sort a group of messages by their timestamp. """ return sorted(messages, key=lambda x: x["timestamp"])
ff90c07c9db35d76d7da49debdc438dcb5158846
188,256
import re def get_version(release_id): """Get version from release id. :param release_id: str, release id, example: f33 :return: str, version ("f33" -> "33") """ m = re.match(r"^f(\d+)$", release_id) if not m: raise ValueError('Invalid release id: %s', release_id) return m.group(1)
6517f8a4f7d9593b2dee8a17cea4dd614154df06
562,294
def getClusterInstance(clusterName, serviceInstance): """ Get the cluster MO given the cluster name """ content = serviceInstance.RetrieveContent() searchIndex = content.searchIndex datacenters = content.rootFolder.childEntity for datacenter in datacenters: cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) if cluster is not None: return cluster return None
954957befab6c4f28cc968341d8ea00baced5757
403,209
import json def json_to_dict(text): """ convert a string received from an api query to a classical dict """ text = str(text).replace('Content-Type: application/json', '').strip() return json.loads(text)
6fd3cfd63511d745df6268ffb1dbfeac0024785d
255,624
import torch def batch_img_vec(data, vocab): """Batch a sequence of image vectors.""" imgs = torch.stack(data, dim=1) # [K, B, dim] return imgs
1bc6f5b53f1604db73d2924aa3e2791896a78199
212,353
import hashlib def sha1(text): """Generates SHA1 hash code from a text string""" return hashlib.sha1(text).hexdigest()
7a74e7804f0ef7ec250027b8f396a4449e506de7
440,942
import io def readReviews(path): """ Function to store reviews in a list buffer. Arguments --------- path: location of reviews Return --------- list of reviews """ # Create an empty buffer to hold all the reviews reviewBuf = [] # Open the file with io.open(path,'r') as raw: for line in raw: reviewBuf.append(line) return reviewBuf
e349cd18c63d5215e79f68cdfbc89a6a48371bd9
641,821
def split_column_to_dict(run_manifest, column="countries", sep=";"): """Splits a column in csv format into a dictionary""" for index, row in run_manifest.iterrows(): countries = { x.strip('" '): True for x in row[column].strip().split(";") } countries.pop('', None) run_manifest.loc[index, column] = [countries] # Listed as data frame tries to unpack otherwise return run_manifest
26aa9b4ebd419002d8f1a6443fc77abc12ba1589
467,000
def decode_char_to_bool(value='N'): """Decode SIP2 char representation to boolean.""" return value == 'Y'
19a68f0af947ac50753080aa0611f1cfa4c5aca0
349,191
def alter_projects_to_metax(projects): """Convert project objects from frontend to comply with the Metax schema. Arguments: project (list<dict>): List of project objects, containing details and organizations Returns: list<dict>: List of project objects in Metax schema """ output = [] for project in projects: details = project.get("details", {}) metax_project = { "name": details.get("title"), "identifier": details.get("identifier"), "has_funder_identifier": details.get("fundingIdentifier"), "funder_type": details.get("funderType"), "source_organization": project.get("organizations", []), "has_funding_agency": project.get("fundingAgencies", []), } output.append(metax_project) return output
45a38be5f66c78ee96feebfd0d3ba8e2efa04777
195,096
def size(heap): """The number of items in the heap.""" return len(heap)
98cb88e27be9f099fdaac6c826f65974fabcbdd2
517,279
import json def import_users_from_file(file): """Import our user data file and return the results.""" with open(file, newline='', encoding="utf-8") as load_file: users = json.load(load_file) return users
8357d6d284cb308be4f9645fc46f8deda817fc56
527,669
def filter_content(soup, content_filter): """ Filter page content and return a list of words visible on the web page. NOTE: The assignment specifies 'content' but doesn't explicitly state if this refers to visible content or all source content. I have assumed visible content for this exercise. This can be altered by specifying an alternative filter callback. :param soup: object BeautifulSoup object :param content_filter: function filter callback :return: list List of words found in page """ page_text = soup.findAll(text=True) visible_text = filter(content_filter, page_text) word_list = [] for row in visible_text: if row.strip(): words = row.strip().split(' ') word_list += [w.strip().replace(',', '').replace('.', '').lower() for w in words] return word_list
cfe4735029e24c0f2cbc9856c7f3b5f98ab3df3b
253,399
def extract_row(key: str, clientresult) -> dict: """ Extract one row from the client result, and return result as dict """ data = clientresult[key] return dict(data)
9ec1a20538931f782864f5962a03ad0ba14e0caf
346,038
def find_start(file_data): """Search the image for the start of raw data 'BCRM' marker""" # Search the file for the start of the raw data raw_start = file_data.find(b'BRCM') #check if flag was found if raw_start < 0: raise LookupError('Flag "BRCM" not found in jpeg data. File contains no raw data') return raw_start
647604fbd3cc16f5a9f653a62972076bea701314
466,750
def rrd_esc(string): """Escape : so rrd arguments parse properly""" return string.replace(':', r'\:')
abff3aeb8b421a52a4084cbffe78fc713c63f62f
558,989
from typing import Dict from typing import List def get_in(d: Dict, keys: List, default=None): """Returns the value in a nested dict, where keys is a list of keys. >>> get_in({"a": {"b": 1}}, ["a", "b"]) 1 >>> get_in({"a": [0, 1, 2]}, ["a", 1]) 1 >>> get_in({"a": {"b": 1}}, ["a", "x"], "not found") 'not found' """ if not keys: return d try: return get_in(d[keys[0]], keys[1:], default) except (KeyError, IndexError): return default
bd60966ee65c196c5554bd915b6c274a40d266ae
434,338
def frames2beats(n_frames, framerate, tempo): """Converts a number of frames to duration in beats, given a framerate and tempo.""" return (n_frames / float(framerate)) * (tempo / 60.)
c268c9b880203c0ade34b859cdf127a05d358c65
239,233
import itertools def cartesian_params(*paramList): """Make a test function or method parameterized by cartesian product of parameters .. code-block :: python import unittest from nose2.tools import cartesian_params @cartesian_params((1, 2, 3), ('a', 'b')) def test_nums(num, char): assert num < ord(char) class Test(unittest.TestCase): @cartesian_params((1, 2, 3), ('a', 'b')) def test_less_than(self, num, char): self.assertLess(num, ord(char)) Parameters in the list must be defined as iterable objects such as tuple or list. """ def decorator(func): func.paramList = itertools.product(*paramList) return func return decorator
61e9b4ef4bc93b90a4a4bb59e0a96146e1d644ba
62,350
def square_of_sum(number): """ Return the square of sum of first [number] neutral integers """ return sum(range(1, number + 1)) ** 2
5b009b50d09efba576d404ca45236dbfc9a8efd0
62,621
import unicodedata def _normalize(unistr): """Represent Unicode string with canonical composed form. Unicode has alternative representations called normalization forms, which means that e.g. "\\u017c" is the same character as "z\\u0307". Normalization ensures portability since e.g. filenames on Mac OS X are encoded with UTF-8 in NFD (decomposed), while Linux typically uses NFC for that purpose. """ return unicodedata.normalize('NFC', unistr)
058992ba0f531c8b7a0231587f23160aebeb5d0b
316,790
def unwrap_if_scalar(obj): """ Unwraps obj if it is a sequence with a single item. Returns obj[0] if len(obj) == 1 and obj otherwise. """ if len(obj) == 1: return obj[0] else: return obj
749fd6efd77030ff32ccc32056eb95f0bb717cd3
274,342
import copy def recode_ids(cropobjects, document_namespace, dataset_namespace): """Recode all IDs of the given CropObjects, so that they are (hopefully) globally unique. That is, from e.g. ``611``, we get ``MUSCIMA++_1.0::CVC-MUSCIMA_W-35_N-08_D-ideal::611. Creates new CropObjects, does *not* modify the input in-place. :param cropobjects: A list of CropObject instances. :param document_namespace: An identifier of the given CropObjectList. It should be unique for each dataset, i.e. ``absolute_dataset_namespace``. :param dataset_namespace: An identifier of the given dataset. It should be globally unique (which is impossible to guarantee, but at least within further versions of MUSCIMA++, it should hold). """ output_cropobjects = [] for c in cropobjects: c_out = copy.deepcopy(c) uid = c.UID_DELIMITER.join([dataset_namespace, document_namespace, str(c.objid)]) c_out.set_uid(uid) output_cropobjects.append(c_out) return output_cropobjects
2b8ace086c74938e36d4d30305f5b8040f1c1825
385,749
import torch def pois_llik(x, mean): """Return the log likelihood of x distributed as Poisson""" return x * torch.log(mean) - mean - torch.lgamma(x + 1)
f0ba834a41f8d108683ca93fd82ad479b520bfc5
478,904
def get_cmp_sign(a, b): """Convert comparison result to single character representation.""" if a < b: return '<' elif a > b: return '>' return '=='
0ea842b3694ef7193749471167b974426b73bfd3
93,184
def find_moves(board): """returns list of valid moves""" moves = [] for col in range(7): if board[0][col] == " ": moves.append(col) return moves
391cf536c7966a9906c5a13da37076326b11c109
377,840
import requests def get_remote_version(url: str) -> str: """Gets the remote file and returns it as a long string.""" response = requests.get(url) if response: #print("Getting remote version") s = response.text return s else: return "Url Not Found."
5d5ef45c5b74b326f9386214229529d9b71aca3d
700,934
import urllib3 from bs4 import BeautifulSoup import re def soup_extract_links(url, linkclass): """Process URL using BeautifulSoup and find all elements of linkclass.""" http = urllib3.PoolManager() r = http.request('GET', url) soup = BeautifulSoup(r.data, 'html.parser') return soup.find_all(class_=linkclass, attrs={'href': re.compile("^http://")})
badb69a2342c3f842f5338c05225f0cd2e312926
141,366
def add(value, arg): """ Sum values """ return value + arg
31a24b392591ea416c3d9717f640051f811a9f22
406,732
def findSelectedFields(fieldSearchList, fieldNames): """ fieldSearchList is a list of fields, potentially with wild cards. fieldNames is the real list of field names. Returns a list of all fields that match the SearchList. """ prefixes = [] exactMatches = [] for f in fieldSearchList: if f.endswith("*"): prefixes.append(f.rstrip("*")) else: exactMatches.append(f) fieldsShown = [] for f in fieldNames: if f in exactMatches: fieldsShown.append(f) continue for pf in prefixes: if f.startswith(pf): fieldsShown.append(f) break return fieldsShown
31f124f4f942a957707f3d17fd6456716fc7ece3
191,457
def get_values_map_keys(records, keyidx=0): """ Given a dict of str->2-tuples, e.g.: {'anat': [('modality', 'anat'), ('image_file', 'anat_hc.nii.gz')], 'pet': [('modality', 'pet'), ('image_file', 'pet_fdg.nii.gz')], or Given a list of list of 2-tuples of str, e.g.: [[('modality', 'anat'), ('image_file', 'anat_hc.nii.gz')], ('modality', 'pet'), ('image_file', 'pet_fdg.nii.gz')], Will return the unique values of each record value, in this case: {'modality', 'image_file'}. Parameters ---------- values_maps_dict: Dict[str->2-tuple] Returns ------- keys: set[str] """ if not records or records is None: return [] if isinstance(records, dict): itemset = records.values() elif isinstance(records, list): itemset = records else: raise NotImplementedError('Expected a `dict` or a `list of list` as `records, ' 'got {}.'.format(type(records))) crumb_args = set() for items in itemset: crumb_args = crumb_args.union(set([t[keyidx] for t in items])) return crumb_args
cfecb13857e72014c9ba0472404bffdf5af076d1
51,020
import re def is_dynamic_reference(input): """ Checks if the given input is a dynamic reference. Dynamic references follow the pattern '{{resolve:service-name:reference-key}}' This method does not validate if the dynamic reference is valid or not, only if it follows the valid pattern: {{resolve:service-name:reference-key}} :param input: Input value to check if it is a dynamic reference :return: True, if yes """ pattern = re.compile("^{{resolve:([a-z-]+):(.+)}}$") if input is not None and isinstance(input, str): if pattern.match(input): return True return False
0f14bf2a24cd9cba2805c23a27c00ed3136c94a3
151,325
def encryptMessage(key, message): """ Function to encrypt with transposition algorithm. Two arguments : - key : key of transposition - message : text to encrypt """ # Each string in ciphertext represents a column in the grid. ciphertext = [''] * key # Loop through each column in ciphertext. for column in range(key): currentIndex = column # Keep looping until currentIndex goes past the message length. while currentIndex < len(message): # Place the character at currentIndex in message at the # end of the current column in the ciphertext list. ciphertext[column] += message[currentIndex] # move currentIndex over currentIndex += key # Convert the ciphertext list into a single string value and return it. return ''.join(ciphertext)
74f8dae884ce8015ea802ab5ce16127f2c6ffc42
328,930
def calc_fixed_bn(func, in_data, **kwargs): """[FixedBatchNormalization](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.fixed_batch_normalization.html) Test-mode batch normalization. It consists of normalization part (using $\mu$ and $\sigma$) and bias part ($\\gamma$ and $\\beta$), both are composed of elementwise scale and shift. However this can actually be fused into single scale and shift operation. Therefore, regardless of existence of bias ($\\gamma$ and $\\beta$), computational cost is always $2 \|x\|$ FLOPs. Since scale-and-shift operation can be done by FMA, it becomes $\|x\|$ FLOPs if `fma_1flop` is set to `True`. Due to the same reason as explained above, reading learned scale and shift parameter is required only once (not twice) regardless of bias existence. Both are 1-dimensional array with $c_{\mathrm{in}}$ elements. | Item | Value | |:--------------|:------| | FLOPs(FMA) | $$ \| x \| $$ | | FLOPs(no-FMA) | $$ 2 \| x \| $$ | | mread | $$ \|x\| + 2 c_{\mathrm{in}} $$ | | mwrite | $$ \| x \| $$ | | params | `eps`: epsilon for BN | """ x, _, _, mean, var = in_data x = in_data[0] n_elements = len(x.flatten()) if kwargs.get('fma_1flop'): flops = n_elements else: flops = n_elements * 2 # *2 <- scale and shift mread = n_elements + len(mean) + len(var) mwrite = n_elements return (flops, mread, mwrite, {'eps': func.eps})
8f20d0210effb07989a35b6b439d3144b0fe6790
22,492
def normalized(z): """Returns the complex number with the same argument/phase but with a magnitude of 1.""" try: return z/abs(z) except ZeroDivisionError: raise ZeroDivisionError("Cannot normalize 0.")
441cbe83fbd88319830231d62822b85fcffc6ce3
35,089
def _lisp_deps_manifest(ctx, lisp_info): """Creates a file that lists all Lisp files needed by the target in order.""" out = ctx.actions.declare_file(ctx.label.name + ".deps") content = ctx.actions.args() content.set_param_file_format("multiline") content.add_joined( lisp_info.features, join_with = "\n", format_each = "feature: %s", ) content.add_joined( lisp_info.srcs, join_with = "\n", format_each = "src: %s", ) ctx.actions.write( output = out, content = content, ) return out
55bafb7567127cca257d3ce5c0434a192be16d93
436,477
def ingest_dns_record(neo4j_session, name, value, type, update_tag, points_to_record): """ Ingest a new DNS record :param neo4j_session: Neo4j session object :param name: record name :param value: record value :param type: record type :param update_tag: Update tag to set the node with and childs :param points_to_record: parent record to set DNS_POINTS_TO relationship to. Can be None :return: the intel graph node id for the new/merged record """ ingest = """ MERGE (record:DNSRecord{id: {Id}}) ON CREATE SET record.firstseen = timestamp(), record.name = {Name}, record.type = {Type} SET record.lastupdated = {update_tag}, record.value = {Value} WITH record MATCH (n{id: {PointsToId}}) MERGE (record)-[r:DNS_POINTS_TO]->(n) ON CREATE SET r.firstseen = timestamp() SET r.lastupdated = {update_tag} """ record_id = f"{name}+{type}" neo4j_session.run( ingest, Id=record_id, Name=name, Type=type, Value=value, PointsToId=points_to_record, update_tag=update_tag, ) return record_id
d87ed351395b9ca0dd726914fcddd22f92721af9
619,996
from typing import Any def _contains_str_like(pat: Any) -> bool: """Determine if the object is a str-like or array of str-like.""" if isinstance(pat, (str, bytes)): return True if not hasattr(pat, "dtype"): return False return pat.dtype.kind in ["U", "S"]
4b2cf9d2f300acf3c8435f3ddcd8e69c12b0e954
307,284
def dict_from_items_with_values(*dictionaries, **items): """Creates a dict with the inputted items; pruning any that are `None`. Args: *dictionaries(dict): Dictionaries of items to be pruned and included. **items: Items to be pruned and included. Returns: dict: A dictionary containing all of the items with a 'non-None' value. """ dict_list = list(dictionaries) dict_list.append(items) result = {} for d in dict_list: for key, value in d.items(): if value is not None: result[key] = value return result
bd33872b6c50761e702353ad41afed25313d7af6
431,484
def information_gain(data, split_feature_name, label_feature_name, metric): """ Calculate the information gain using the given metric. :param data: the data to anlyze :param split_feature_name: the name of the split feature :param label_feature_name: the name of the label feature :param metric: the metric to use :return: the information gain given by splitting on the split feature """ feature_values = data[split_feature_name] possible_values = feature_values.unique() expected_metric_value = 0 for possible_value in possible_values: subset = data.loc[data[split_feature_name] == possible_value] weight = len(subset) / len(data) expected_metric_value += weight * metric(subset, label_feature_name) return metric(data, label_feature_name) - expected_metric_value
297fd372f455377154024480a829665f3037606b
424,106
def resolve_none_string(val: str): """ To avoid 'none' or 'NONE' as strings, we need to resolve this to the NoneType Args: val(str): The potential none value as string Returns: None if the string is resolvable to None or the input parameter itself """ val_u = val.upper() if val_u == "NONE": return None return val
807bc205be383687151dcb7fc04bf8f0c755dd0b
435,225
def to_big(index): """Returns lambda to translate small to big move.""" return lambda action: (index, action)
b01ed8a0661f5c2f057d3ae6cbf93a41b64fbc9d
407,521
import random def create_word_sequence(words: list, length=10) -> str: """ Create string of random words (useful for testing). :param words: list of words :param length: amount of words :return: string of random words """ return ' '.join(random.sample(words, length))
5f72af7a459277036b5a047f2abfde97c6b7a8ba
373,403
import string def get_tilt_series_label(tiltnumber): """Generates a tilt series label using the alphabet as a base 26 number system For example 0 would = a 25 = z 26 = ba 27 = bb (there is no aa cause a=0) :returns: tiltseries label by converting tiltnumber to base 26 """ base = 26 convertedval = '' num = int(tiltnumber) while num >= base: nextchar = num % base convertedval = string.ascii_lowercase[nextchar] + convertedval num = int(num / base) convertedval = string.ascii_lowercase[num] + convertedval return convertedval
2684955f339344c336f89027e277fbb4c8d2fefa
181,511
import re def process_clinical_significance(clin_sig): """ Processes ClinVar clinical significance string into a format suitable for OT JSON schema. Namely, splits multiple clinical significance levels into an array and normalises names (to lowercase, using only spaces for delimiters). Multiple levels of clinical significance are separated using two delimieters: ('/', ', '). See /clinvar-variant-types/README.md for further explanation. The output array is sorted alphabetically. Example: 'Benign/Likely benign, risk_factor' โ†’ ['benign', 'likely benign', 'risk factor']. """ return sorted(re.split('/|, ', clin_sig.lower().replace('_', ' ')))
d266ca548455a50dba3c9ec0aa08a11a52eab53b
674,217
import itertools def subsequences(iterable, seq=2): """Return subsequences of an iterable Each element in the generator will be a tuple of `seq` elements that were ordered in the iterable. """ iters = itertools.tee(iterable, seq) for i, itr in enumerate(iters): for _ in range(i): next(itr, None) return zip(*iters)
da436028d37f74729a2b5c2dfb74716da61efb2d
18,382
def add_numbers(a, b): """Return the sum of the arguments 'a' and 'b'.""" return a + b
158f79706ff064cb16eb4bd32178720410ab832e
282,227
def say_hello(name='World'): """Say hello to someone Parameters ---------- name : string A string containing the name of the person who is to greeted. Returns ------- string : string The greetings string. """ if not isinstance(name, str): raise ValueError("I need a string.") string = 'Hello {}!'.format(name) return string
5d9728462b11b7f73e5d4b67dc38531aa9ceceb7
518,251
def chi_resonant(x, amplitude, pos, width): """lorenzian chi resonance. Parameters ---------- x : np.array The x axis, wavenumbers of frequencies amplitude: The amplitude of the resonance pos: The position of the resonance width: The FWHM of the resonance """ A = amplitude delta = pos - x gamma = width / 2 ChiR_i = A * gamma / (delta**2 + gamma**2) ChiR_r = A * delta / (delta**2 + gamma**2) ChiR = ChiR_r + 1j * ChiR_i return ChiR
8ab2268e0e5af65936af212f0c776336d23c2605
57,688
from io import StringIO def svg(plot, close=True): """ Creates SVG code and closes plot. Args: plot: matplotlib.pyplot Plot from which the SVG should be made. Returns: str SVG code. """ # make SVG svg_file = StringIO() plot.savefig(svg_file, format='svg') svg = svg_file.getvalue() svg_file.close() # close plot if close: plot.close() return svg
4acdd6f346af2de672e538415795e0e1181ee4e4
18,272
def extractNameComponents(value): """This function tries to extract a family name and a last name from the input and returns them as a tuple. >>> extractNameComponents('Lieber, Sven') ('Lieber', 'Sven') >>> extractNameComponents('van Gogh, Vincent') ('van Gogh', 'Vincent') Empty strings are returned if it did not work. If there is only one value, we assume the family name >>> extractNameComponents('') ('', '') >>> extractNameComponents('van Gogh') ('van Gogh', '') >>> extractNameComponents('Hermann') ('Hermann', '') """ familyName = '' givenName = '' if value != '': components = value.split(',') if len(components) == 0: familyName = value elif len(components) == 1: familyName = components[0].strip() elif len(components) > 1: familyName = components[0].strip() givenName = components[1].strip() return (familyName, givenName)
701c0c82cf459236fc4761c0a85b81871257c050
513,210
from typing import List from typing import Iterator import itertools def flatten(x: List[List]) -> Iterator: """ Flatten a list of list. Args: x: List of list of elements Returns: Iterator of flattened array. """ return itertools.chain.from_iterable(x)
4f1b87bfacf7b63b4256475fa30b21e948fc7c9c
98,660
def is_list_of_ints( intlist ): """ Return True if list is a list of ints. """ if not isinstance(intlist,list): return False for i in intlist: if not isinstance(i,int): return False return True
9b3e72d97b0cf62c6987620f8970561a3bc45182
133,176
from typing import List def get_corpora_for_lang(parallel_corpora: List[str], lang: str) -> List[str]: """ Fetches list of corpora that belong to given lang parallel_corpora: List[str] where each element is a str with the format "src_lang-tgt_lang:src_corpus,tgt_corpus" Returns [] if corpora for lang is not found """ corpora = [] for parallel_corpus_config in parallel_corpora: lang_pair, parallel_corpus = parallel_corpus_config.split(":") src_lang, tgt_lang = lang_pair.split("-") if src_lang == lang: corpora.append(parallel_corpus.split(",")[0]) if tgt_lang == lang: corpora.append(parallel_corpus.split(",")[1]) return corpora
b7e50ad0eaf17a39ae1e069f0c2a5c60c6ada9b3
329,590
from typing import List import logging def get_mc_definitions(resource: str) -> List[str]: """ Load Minecraft values from the assets folder. :param resource: The resource to load from disk. :return: The lines of the file assets/minecraft_definitions/<resource>, without the line breaks. """ logging.info(f"Loading definitions for '{resource}'") with open(f"assets/minecraft_definitions/{resource}.txt", "r") as f: return [line.rstrip("\n") for line in f.readlines() if line]
3366dda6ddd880387ee4824e7806346b855d8af7
236,751
def MOTP_frame(pairs_num, distance, frame_id, gt_num, hp_num): """calculate MOTP of a frame params pairs_num: mapping pairs num for one frame distance: frame_id: id of frame which is processing gt_num: object num of ground truth hp_num: object num of hypothesis ----------- return MOTP_frame: MOTP for a frame """ MOTP_frame = distance / pairs_num return MOTP_frame
f702a62c54c9722ff6c77820d0eed817973888c2
521,855
def get_direction(source, destination): """Find the direction drone needs to move to get from src to dest.""" lat_diff = abs(source[0] - destination[0]) long_diff = abs(source[1] - destination[1]) if lat_diff > long_diff: if source[0] > destination[0]: return "S" else: return "N" else: if source[1] > destination[1]: return "W" else: return "E"
224a8df79cbafbcf1eed8df522ab7f58cc93598d
703,447
import hashlib import itertools def md5_with_prefix(input, prefix, start_with=0): """Determine first number that generates a hash with the given prefix.""" md5_input = hashlib.md5(input) for number in itertools.count(start_with): md5 = md5_input.copy() md5.update(str(number).encode('ascii')) if md5.hexdigest().startswith(prefix): return number
8cc402384c91fa0674ee3e451eff8b7a9c4b359c
251,543
def dot(s, t): """Returns the mod-2 dot product of two n-bit strings s and t.""" return sum([s[i] * t[i] for i in range(len(s))]) % 2
ce877632253b8c3843bbabe40ba9736ca4239066
359,292
import random def t06_ManyGetPuts(C, pks, crypto, server): """Many clients upload many files and their contents are checked.""" clients = [C("c" + str(n)) for n in range(10)] kvs = [{} for _ in range(10)] for _ in range(200): i = random.randint(0, 9) uuid1 = "%08x" % random.randint(0, 100) uuid2 = "%08x" % random.randint(0, 100) clients[i].upload(str(uuid1), str(uuid2)) kvs[i][str(uuid1)] = str(uuid2) good = total = 0 # verify integrity for i, (c, kv) in enumerate(zip(clients, kvs)): for k, v in kv.items(): vv = c.download(k) if vv == v: good += 1 total += 1 return float(good) / total
384aa2b03169da613b25d2da60cdd1ec007aeed5
4,002
def is_prime(n): """ Return True if the given integer is prime, False otherwise. >>> is_prime(1) False >>> is_prime(2) True >>> is_prime(3) True >>> is_prime(4) False >>> is_prime(9) False >>> is_prime(10) False """ if n < 2: return False if n == 2: return True if n % 2 == 0: return False for i in range(3, n): if n % i == 0: return False return True
cc9ff9eedc00db258c28fe7262d6206c7c306e61
206,001
def getMaxLength(dic): """get the maximum list length of the values of a dictionary mapping lemmas to lists of lexical types """ m=1 for k in dic.keys(): c=len(dic[k]) if c>m: m=c return m
baa2c0bd065f7a231c7c336984f2ca5dc8f83ef5
196,365
def merge_two_dicts(dict_1, dict_2): """Given two dicts, merge them into a new dict as a shallow copy. :param dict_1: dictionary 1 :param dict_2: dictionary 2 :return: merged dictionary """ if not dict_1: return dict_2 if not dict_2: return dict_1 out_dict = dict_1.copy() out_dict.update(dict_2) return out_dict
37e7d5f8edcca27704afa00fa1c41ef4f55c5dc0
185,190
def drop_unnecessary_metrics(submission_scores: dict, list_of_metrics: list): """Return submission_scores with every metric not in list_of_metrics removed.""" for data_name, data in submission_scores.items(): if data_name in ["param_count", "submission_name"]: continue filtered_scores = {k: v for k, v in data.items() if k in list_of_metrics} submission_scores[data_name] = filtered_scores return submission_scores
e406d02fcfa7ae3cf9a3ddb460034c499fa4ee24
544,674
import difflib def get_diff(new_source, original_source, file_path): """ Get diff from two strings. :param new_source: :param original_source: :param file_path: :return: string with diff :rtype str """ diff = difflib.unified_diff( original_source.splitlines(), new_source.splitlines(), file_path, file_path, "(original)", "(refactored)", lineterm="", ) diff_text = "\n".join(list(diff)) return diff_text
b431837b1e7b9ed50a8932de91d9bbe2a0973c6d
87,495
def prettify_delta(delta): """ Returns a human-readable string representing the given timedelta. The biggest unit is the hour, because a working day is too ambiguous. """ d = {} d['minutes'], d['seconds'] = divmod(int(delta.total_seconds()), 60) d['hours'], d['minutes'] = divmod(d['minutes'], 60) li = [] for unit in ('hours', 'minutes', 'seconds'): if d[unit]: s = str(d[unit])+' '+unit if d[unit] == 1: s = s[:-1] li.append(s) s = ', '.join(li) if not s: s = '-' return s
f91614c5bbf668bc9d27886ba79b187ae5410cba
396,890
import socket def get_src(dest): """ Attempts to learn the source IP from the outbound interface used to reach the provided destination :param dest: destination address/ip :return: local ip """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) s.connect((dest, 1)) source_ip = s.getsockname()[0] s.close() return source_ip
2dcf711a3bc14a5d99490c85da94d09a8b099588
605,718
def sdiv(n): """ Safe division clamp, ensure n is not less than 1. """ return max(1, n)
8ccccef8e095501a519501847d2cb58d9523b8b1
599,709
def prepare_prediction_column(prediction): """Return the prediction directly.""" return prediction
42303330ee40cb37b6f5e1dbfd91056493050020
557,188
import re def _find_doi_in_extra(item: dict) -> str: """Finds DOI in 'extra' field of `item` or returns an empty string. Args: item (dict): Zotero item. Returns: str: DOI """ doi_regex = r'10\.\d+/[-;()\w.]+' if 'extra' in item['data'].keys(): doi_id = re.findall(doi_regex, item['data']['extra']) if len(doi_id) > 0: return doi_id[0] else: return '' else: return ''
c51c9e57040ac8ec1411c72f9a13894658a7773f
555,268
def get_contained_functions(f): """ Given a function object, return a tuple of function names called by f """ return f.__code__.co_names
16111f5f96c712472e6fd5778eb1d93e72678556
523,688
def subset_adata(adata, subset, verbose=True): """ Subsets AnnData object on one or more .obs columns Columns should contain 0/False for cells to throw out, and 1/True for cells to keep. Keeps union of all labels provided in subset. Parameters ---------- adata : anndata.AnnData the data subset : str or list of str adata.obs labels to use for subsetting. Labels must be binary (0, "0", False, "False" to toss - 1, "1", True, "True" to keep). Multiple labels will keep intersection. verbose : bool, optional (default=True) print updates to console Returns ------- adata : anndata.AnnData new anndata object as subset of `adata` """ if verbose: print("Subsetting AnnData on {}".format(subset), end="") if isinstance(subset, str): subset = [subset] # initialize .obs column for choosing cells adata.obs["adata_subset_combined"] = 0 # create label as union of given subset args for i in range(len(subset)): adata.obs.loc[ adata.obs[subset[i]].isin(["True", True, 1.0, 1]), "adata_subset_combined" ] = 1 adata = adata[adata.obs["adata_subset_combined"] == 1, :].copy() adata.obs.drop(columns="adata_subset_combined", inplace=True) if verbose: print(" - now {} cells and {} genes".format(adata.n_obs, adata.n_vars)) return adata
f33b51659d35ed62b12367893007ef2293611c9d
556,232
import random def random_sample_lst(lst): """ Argument Order: lst Extracts the a random 25% of a given list """ return random.sample(lst, len(lst)//4)
6c977a80d838623729a1957a27fde53e45e3e982
223,213
def _parse_csv_item_opts(entry): """Parse the _opts field in a SB Extended CSV item.""" # Accepting even slightly weirdly formatted entries: entry = entry.strip() if len(entry) == 0: return {} opts = {} for opt in entry.split(" "): opt_name, opt_val = opt.split(":") opts[opt_name] = opt_val return opts
086ef26f5b6e69bf556bfe0a0f6b2e87a6f2b684
291,935
def conv_out_shape(in_shape, layers): """ Calculates output shape of input_shape going through a list of pytorch convolutional layers in_shape: (H, W) layers: list of convolution layers """ shape = in_shape for layer in layers: h_out = ((shape[0] + 2*layer.padding[0] - layer.dilation[0] * (layer.kernel_size[0] - 1)-1) / layer.stride[0])+1 w_out = ((shape[1] + 2*layer.padding[1] - layer.dilation[1] * (layer.kernel_size[1] - 1)-1) / layer.stride[1])+1 shape = (int(h_out), int(w_out)) return shape
b75fb479f47304be03aef20a36583ad8a2edc0de
21,870