content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import pickle def load_pickle(filename): """Load Pickfle file""" filehandler = open(filename, 'rb') return pickle.load(filehandler)
f93b13616f94c31bc2673232de14b834a8163c5f
970
def is_str_str_dict(x): """Tests if something is a str:str dictionary""" return isinstance(x, dict) and all( isinstance(k, str) and isinstance(v, str) for k, v in x.items() )
ce6230714c0526764f2cc67e4dedf598acd28169
982
def _ensureListLike(item): """ Return the item if it is a list or tuple, otherwise add it to a list and return that. """ return item if (isinstance(item, list) or isinstance(item, tuple)) \ else [item]
1c602a1fcf8dd6a5b4583264e63e38747f5b0d50
983
import io def get_file_from_gitlab(gitpkg, path, ref="master"): """Retrieves a file from a Gitlab repository, returns a (StringIO) file.""" return io.StringIO(gitpkg.files.get(file_path=path, ref=ref).decode())
7eccad01a538bdd99651b0792aff150f73e82cdd
984
def count_disordered(arr, size): """Counts the number of items that are out of the expected order (monotonous increase) in the given list.""" counter = 0 state = { "expected": next(item for item in range(size) if item in arr), "checked": [] } def advance_state(): state["expected"] += 1 while True: in_arr = state["expected"] in arr is_overflow = state["expected"] > size not_checked = state["expected"] not in state["checked"] if not_checked and (in_arr or is_overflow): return state["expected"] += 1 for val in arr: if val == state["expected"]: advance_state() else: counter += 1 state["checked"].append(val) return counter
bb708e7d862ea55e81207cd7ee85e634675b3992
986
def every(n_steps): """Returns True every n_steps, for use as *_at functions in various places.""" return lambda step: step % n_steps == 0
02fc6bc59fa6f223b681539baeae32c40bd9577e
991
def calc_batch_size(num_examples, batches_per_loop, batch_size): """Reduce the batch size if needed to cover all examples without a remainder.""" assert batch_size > 0 assert num_examples % batches_per_loop == 0 while num_examples % (batch_size * batches_per_loop) != 0: batch_size -= 1 return batch_size
3c394813a98a8414645f633a519001937247e8b0
992
def has_admin_access(user): """Check if a user has admin access.""" return user == 'admin'
d178861bee504f6f3026c9e495d56cc8d2d7c3d3
993
def flatten_mock_calls(mock): """ Flatten the calls performed on a particular mock object, into a list of calls with arguments. """ result = [] for call in mock.mock_calls: call = list(call) call_name = call[0] if '.' in str(call_name): call_name = str(call_name).split('.')[-1] result.append([call_name] + call[1:]) return result
7c41025382f4ca25db1ccd328e9eb17e1d72a01a
995
def update_not_existing_kwargs(to_update, update_from): """ This function updates the keyword aguments from update_from in to_update, only if the keys are not set in to_update. This is used for updated kwargs from the default dicts. """ if to_update is None: to_update = {} to_update.update({k:v for k,v in update_from.items() if k not in to_update}) return to_update
a66de151e6bc6d8f5b2f1b0ff32e30d2c8cb5277
996
from typing import List def add_multiple_package(package_list: List[str]) -> str: """ Generate latex code to add multiple package to preamble :param package_list: List of package to add in preamble """ usepackage_command_list = [] for package in package_list: usepackage_command_list.append(rf"""\usepackage{{{package}}}""") return "\n".join(usepackage_command_list)
90bdd0a521c094d92c35ef92e62d6b43f6b135b4
1,002
def commit_veto(environ, status, headers): """Veto a commit. This hook is called by repoze.tm in case we want to veto a commit for some reason. Return True to force a rollback. By default we veto if the response's status code is an error code. Override this method, or monkey patch the instancemethod, to fine tune this behaviour. """ return not 200 <= int(status.split(None, 1)[0]) < 400
9fc96fe8cdbedde20cb325e189b71d9df94cf176
1,004
import torch def one_hot(y, num_dim=10): """ One Hot Encoding, similar to `torch.eye(num_dim).index_select(dim=0, index=y)` :param y: N-dim tenser :param num_dim: do one-hot labeling from `0` to `num_dim-1` :return: shape = (batch_size, num_dim) """ one_hot_y = torch.zeros(y.size(0), num_dim) if y.is_cuda: one_hot_y = one_hot_y.cuda() return one_hot_y.scatter_(1, y.view(-1, 1), 1.)
694bfea18ecbb5c5737e0d38c0aa0f5f52a82a55
1,007
def extend_dict(x, *y): """Similar to Object.assign() / _.extend() in Javascript, using 'dict.update()' Args: x (dict): the base dict to merge into with 'update()' *y (dict, iter): any number of dictionary or iterable key/value pairs to be sequentially merged into 'x'. Skipped if None. """ z = x.copy() for d in [d for d in y if d is not None]: z.update(d) return z
f10a5bc7d5ed3646e6a9f8f9535a16bd800c7fcd
1,012
def make_general_csv_rows(general_csv_dict): """ Method for make list of metrics from general metrics dict. Rows using in general metrics writer :param general_csv_dict: dict with all metrics :type general_csv_dict: dict :return: all metrics as rows :rtype: list """ rows = [] for key, value in general_csv_dict.items(): row = [key[0], key[1]] row.extend(value) rows.append(row) return rows
45ca165d312b39cd0b7088e0bcbfb402a92e7e2b
1,015
def get_speakable_timestamp(timestamp): """Return a 'speakable' timestamp, e.g. 8am, noon, 9pm, etc.""" speakable = f"{timestamp.strftime('%I').lstrip('0')} {timestamp.strftime('%p')}" if speakable == '12 PM': return 'noon' elif speakable == '12 AM': return 'midnight' return speakable
0b724686ebd5d3152d9017dc456d2945c78be0ee
1,016
import torch def _featurize(inputs,model): """ Helper function used to featurize exemplars before feeding into buffer. """ with torch.no_grad(): # Forward pass outputs = model(*inputs).detach() #Featurize raw exem return outputs
191fd1b362f38309a35618284fcf3f1910a06bd6
1,017
def make_file_prefix(run, component_name): """ Compose the run number and component name into string prefix to use with filenames. """ return "{}_{}".format(component_name, run)
73ef37d75d9e187ee49ee058958c3b8701185585
1,022
import base64 def multibase_b64decode(data): """ Follow forge's base64 urlsafe encode convention to decode string Args: data(string): encoded string Returns: bytes Examples: >>> multibase_b64decode('aGVsbG8') b'hello' """ if isinstance(data, str): data = data.encode() return base64.urlsafe_b64decode( (data + b'=' * (-len(data) % 4)))
fdbc0f937e33d7994737a3a515973598cac3debd
1,025
import hashlib def sha1_file(filename): """ Return the hex string representation of the SHA1 checksum of the filename """ s = hashlib.sha1() with open(filename, "rb") as f: for line in f: s.update(line) return s.hexdigest()
b993ac9f025d69124962905f87b1968617bb33f5
1,032
def read_from_file(file_path): """ Read a file and return a list with all the lines in the file """ file_in_list = [] with open(file_path, 'r') as f: for line in f.readlines(): file_in_list.append(line) return file_in_list
5fef3a3f50528c1a9786451666ae7e43be282bf9
1,033
import sqlite3 def cn(DB): """Return the cursor and connection object.""" conn = sqlite3.connect(DB) c = conn.cursor() return (c,conn)
76abbec283d45732213f8b94031242146cdb4ee0
1,043
import copy def partially_matched_crossover(random, mom, dad, args): """Return the offspring of partially matched crossover on the candidates. This function performs partially matched crossover (PMX). This type of crossover assumes that candidates are composed of discrete values that are permutations of a given set (typically integers). It produces offspring that are themselves permutations of the set. .. Arguments: random -- the random number generator object mom -- the first parent candidate dad -- the second parent candidate args -- a dictionary of keyword arguments Optional keyword arguments in args: - *crossover_rate* -- the rate at which crossover is performed (default 1.0) """ crossover_rate = args.setdefault('crossover_rate', 1.0) if random.random() < crossover_rate: size = len(mom) points = random.sample(range(size), 2) x, y = min(points), max(points) bro = copy.copy(dad) bro[x:y+1] = mom[x:y+1] sis = copy.copy(mom) sis[x:y+1] = dad[x:y+1] for parent, child in zip([dad, mom], [bro, sis]): for i in range(x, y+1): if parent[i] not in child[x:y+1]: spot = i while x <= spot <= y: spot = parent.index(child[spot]) child[spot] = parent[i] return [bro, sis] else: return [mom, dad]
b0d5132cf4ca14095f3d7c637cb50db3fe37d244
1,044
import re def regex_trim(input, regex, replace=''): """ Trims or replaces the regex match in an input string. input (string): the input string to search for matches regex (string): regex to match replace (string - optional): a string to replace any matches with. Defaults to trimming the match. """ return re.sub(regex, replace, input)
169bfaa0d2bfd7a1f32c1e05a63b41993f82bf4b
1,045
def prepare_definitions(defs, prefix=None): """ prepares definitions from a dictionary With a provided dictionary of definitions in key-value pairs and builds them into an definition list. For example, if a dictionary contains a key ``foo`` with a value ``bar``, the returns definitions will be a list with the values ``['foo=bar']``. If a key contains a value of ``None``, the key will be ignored and will not be part of the final definition list. If a ``prefix`` value is provided, each definition entry will be prefixed with the provided value. Args: defs: the arguments to process prefix (optional): prefix value to prefix each definition Returns: list of arguments """ final = [] if defs: for key, val in defs.items(): if val is None: continue if prefix: key = prefix + key if val: final.append('{}={}'.format(key, val)) else: final.append(key) return final
ddc6d14cc18f8afba766efee65ab365df1d226c2
1,048
def extract_job_url(job): """ parse the job data and extract the str for the URL of the job posted params: job str: html str representation from bs4 returns: url str: relative URL path of the job ad """ return job.a["href"]
7517badcc2814e641c04a8f880353d897d434b7f
1,049
import random def check_random_state(seed): """ Turn seed into a random.Random instance If seed is None, return the Random singleton used by random. If seed is an int, return a new Random instance seeded with seed. If seed is already a Random instance, return it. Otherwise raise ValueError. """ # Code slightly adjusted from scikit-learn utils/validation.py if seed is None or isinstance(seed, int): rng = random.Random(seed) elif isinstance(seed, random.Random): rng = seed else: raise ValueError( "### error: '{}' cannot be used to seed random.Random instance.".format( seed ) ) return rng
347481de01f4a3bba59bc9a2c484c10d4857e1e2
1,055
def get_user_playlists(spotipy_obj, username): """Gets and returns all Spotify playlists owned by the username specified. Parameters: spotipy_obj: Spotipy object username: Spotify username Returns: List of dictionaries, each dictionary a Spotify playlist object. """ # Grab all user playlists, including private ones initial_playlists = spotipy_obj.user_playlists(username) final_playlists = [] while initial_playlists: for playlist in initial_playlists["items"]: if playlist["owner"]["id"] == username: final_playlists.append(playlist) if initial_playlists["next"]: initial_playlists = spotipy_obj.next(initial_playlists) else: initial_playlists = None return final_playlists
90c06e0ddd91a7a84f4d905dd9334f9b4c27f890
1,060
def compute_median_survival_time(times, surv_function): """ Computes a median survival time estimate by looking for where the survival function crosses 1/2. Parameters ---------- times : 1D numpy array Sorted list of unique times (in ascending order). surv_function : 1D numpy array A survival function evaluated at each of time in `times`, in the same order. Returns ------- output : float Median survival time estimate. """ t_left = times[0] t_right = times[-1] if surv_function[-1] > 1/2: # survival function never crosses 1/2; just output this last time point return t_right for t, s in zip(times, surv_function): if s >= 0.5: t_left = t for t, s in zip(reversed(times), reversed(surv_function)): if s <= 0.5: t_right = t return (t_left + t_right) / 2.
22103bc705acb791c0937a403aa9c34e9145e1c2
1,063
def aggregate_ant(data, sub_num, response_type="full"): """ Aggregate data from the ANT task. Calculates various summary statistics for the ANT task for a given subject. Parameters ---------- data : dataframe Pandas dataframe containing a single subjects trial data for the task. sub_num : str Subject number to which the data file belongs. response_type : {'full', 'correct', 'incorrect'}, optional Should the summary data be calculated using all trials? Only correct trials? Or only incorrect trials? This is not supported in all tasks. Returns ------- stats : list List containing the calculated data for the subject. """ # Calculate times following errors and correct responses df = data follow_error_rt = df.loc[df.correct.shift() == 0, "RT"].mean() follow_correct_rt = df.loc[df.correct.shift() == 1, "RT"].mean() if response_type == "correct": df = data[data["correct"] == 1] elif response_type == "incorrect": df = data[data["correct"] == 0] elif response_type == "full": df = data # Aggregated descriptives ## congruency conditions grouped_congruency = df.groupby("congruency") neutral_rt = grouped_congruency.mean().get_value("neutral", "RT") congruent_rt = grouped_congruency.mean().get_value("congruent", "RT") incongruent_rt = grouped_congruency.mean().get_value("incongruent", "RT") neutral_rtsd = grouped_congruency.std().get_value("neutral", "RT") congruent_rtsd = grouped_congruency.std().get_value("congruent", "RT") incongruent_rtsd = grouped_congruency.std().get_value("incongruent", "RT") neutral_rtcov = neutral_rtsd / neutral_rt congruent_rtcov = congruent_rtsd / congruent_rt incongruent_rtcov = incongruent_rtsd / incongruent_rt neutral_correct = grouped_congruency.sum().get_value("neutral", "correct") congruent_correct = grouped_congruency.sum().get_value("congruent", "correct") incongruent_correct = grouped_congruency.sum().get_value("incongruent", "correct") ## cue conditions grouped_cue = df.groupby("cue") nocue_rt = grouped_cue.mean().get_value("nocue", "RT") center_rt = grouped_cue.mean().get_value("center", "RT") spatial_rt = grouped_cue.mean().get_value("spatial", "RT") double_rt = grouped_cue.mean().get_value("double", "RT") nocue_rtsd = grouped_cue.std().get_value("nocue", "RT") center_rtsd = grouped_cue.std().get_value("center", "RT") spatial_rtsd = grouped_cue.std().get_value("spatial", "RT") double_rtsd = grouped_cue.std().get_value("double", "RT") nocue_rtcov = nocue_rtsd / nocue_rt center_rtcov = center_rtsd / center_rt spatial_rtcov = spatial_rtsd / spatial_rt double_rtcov = double_rtsd / double_rt nocue_correct = grouped_cue.sum().get_value("nocue", "correct") center_correct = grouped_cue.sum().get_value("center", "correct") spatial_correct = grouped_cue.sum().get_value("spatial", "correct") double_correct = grouped_cue.sum().get_value("double", "correct") # OLS regression conflict_intercept, conflict_slope = congruent_rt, incongruent_rt - congruent_rt conflict_slope_norm = conflict_slope / congruent_rt alerting_intercept, alerting_slope = double_rt, nocue_rt - double_rt alerting_slope_norm = alerting_slope / double_rt orienting_intercept, orienting_slope = spatial_rt, center_rt - spatial_rt orienting_slope_norm = orienting_slope / spatial_rt return [ sub_num, follow_error_rt, follow_correct_rt, neutral_rt, congruent_rt, incongruent_rt, neutral_rtsd, congruent_rtsd, incongruent_rtsd, neutral_rtcov, congruent_rtcov, incongruent_rtcov, neutral_correct, congruent_correct, incongruent_correct, nocue_rt, center_rt, spatial_rt, double_rt, nocue_rtsd, center_rtsd, spatial_rtsd, double_rtsd, nocue_rtcov, center_rtcov, spatial_rtcov, double_rtcov, nocue_correct, center_correct, spatial_correct, double_correct, conflict_intercept, conflict_slope, conflict_slope_norm, alerting_intercept, alerting_slope, alerting_slope_norm, orienting_intercept, orienting_slope, orienting_slope_norm, ]
be01651d450560a5c36bc6240025fe59352d6347
1,064
def display_ordinal_value(glyph: str): """Displays the integer value of the given glyph Examples: >>> display_ordinal_value('🐍')\n 128013 >>> display_ordinal_value('G')\n 71 >>> display_ordinal_value('g')\n 103 """ return ord(glyph)
7daa53180023bfec2968308d463ac615a83a4e55
1,072
import re def tokens(s): """Return a list of strings containing individual words from string s. This function splits on whitespace transitions, and captures apostrophes (for contractions). >>> tokens("I'm fine, how are you?") ["I'm", 'fine', 'how', 'are', 'you'] """ words = re.findall(r"\b[\w']+\b", s) return words
aee0b6fad2f9107c893496f1f3807e80c9d2e44b
1,079
def potatoes(p0, w0, p1): """ - p1/100 = water1 / water1 + (1 - p0/100) * w0 => water1 = w0 * p1/100 * (1 - p0/100) / (1 - p1/100) - dry = w0 * (1 - p0/100) - w1 = water1 + dry = w0 * (100 - p0) / (100 - p1) Example: 98/100 = water1 / water1 + (1- 99/100) * 100 water1 = 49 w1 = 49 + 1 = 50 """ w1 = w0 * (100 - p0) / (100 - p1) return int(w1)
f2955a58db3a48c64b6acc4980e663f33332aeea
1,084
def relacao(lista): """Crie uma função que recebe uma lista de números reais e retorna uma outra lista de tamanho 3 em que (i) o primeiro elemento é a quantidade de números maiores que zero, (ii) o segundo elemento é a quantidade de números menores que zero e (iii) o último elemento é a quantidade de zeros da lista inicial. Args: lista (list): lista recebida para ser processada pela funcao Returns: list: lista com tamanho três na ordem (maiores, menores e iguais a zero) """ maior = menor = igual = 0 for i in lista: if i > 0: maior += 1 elif i < 0: menor += 1 else: igual += 1 return f'[{maior},{menor},{igual}]'
39e45d8221d5d5b7322ebec5aa3f761d9e2ef413
1,087
def german_weekday_name(date): """Return the german weekday name for a given date.""" days = [u'Montag', u'Dienstag', u'Mittwoch', u'Donnerstag', u'Freitag', u'Samstag', u'Sonntag'] return days[date.weekday()]
7d2919c61438ec913abe38cccd924bb69f866655
1,089
def identity_func(x): """The identify (a.k.a. transparent) function that returns it's input as is.""" return x
06e0296c338d68663aa87d08b21f84919be3f85e
1,090
def make_choice_validator( choices, default_key=None, normalizer=None): """ Returns a callable that accepts the choices provided. Choices should be provided as a list of 2-tuples, where the first element is a string that should match user input (the key); the second being the value associated with the key. The callable by default will match, upon complete match the first value associated with the result will be returned. Partial matches are supported. If a default is provided, that value will be returned if the user provided input is empty, i.e. the value that is mapped to the empty string. Finally, a normalizer function can be passed. This normalizes all keys and validation value. """ def normalize_all(_choices): # normalize all the keys for easier comparison if normalizer: _choices = [(normalizer(key), value) for key, value in choices] return _choices choices = normalize_all(choices) def choice_validator(value): if normalizer: value = normalizer(value) if not value and default_key: value = choices[default_key][0] results = [] for choice, mapped in choices: if value == choice: return mapped if choice.startswith(value): results.append((choice, mapped)) if len(results) == 1: return results[0][1] elif not results: raise ValueError('Invalid choice.') else: raise ValueError( 'Choice ambiguous between (%s)' % ', '.join( k for k, v in normalize_all(results)) ) return choice_validator
65ac672f16a1031a9051bc4f6769c6b1b88db727
1,091
def gen_event_type_entry_str(event_type_name, event_type, event_config): """ return string like: {"cpu-cycles", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES}, """ return '{"%s", %s, %s},\n' % (event_type_name, event_type, event_config)
ca89c19b45f182b8a7ae74ab76f3f42bddf46811
1,092
from pathlib import Path def get_force_charge() -> str: """ Gets the command object for the force charge command Returns: The command object as a json string """ force_charge = Path('force_charge.json').read_text() return force_charge
c67277c62664419c3b4a19ae57ea6de027c60416
1,093
def _non_string_elements(x): """ Simple helper to check that all values of x are string. Returns all non string elements as (position, element). :param x: Iterable :return: [(int, !String), ...] """ problems = [] for i in range(0, len(x)): if not isinstance(x[i], str): problems.append((i, x[i])) return problems
974715622949157693084823a52a88973b51d100
1,095
def get_filename_pair(filename): """ Given the name of a VASF data file (*.rsd) or parameter file (*.rsp) return a tuple of (parameters_filename, data_filename). It doesn't matter if the filename is a fully qualified path or not. - assumes extensions are all caps or all lower """ param_filename = data_filename = filename[:-1] if filename[-1:].isupper(): data_filename += 'D' param_filename += 'P' else: data_filename += 'd' param_filename += 'p' return (param_filename, data_filename)
f6eb5a64cf472f230c5806447d9c2ee8ae43a71d
1,097
def addBenchmark(df): """Add benchmark to df.""" # Compute the inverse of the distance distance_inv = (1. / df.filter(regex='^distance*', axis=1)).values # Extract the value at the nearest station values = df.filter(regex='value_*', axis=1) # Compute the benchmark numer = (distance_inv * values).sum(axis=1) denom = (distance_inv * (values != 0)).sum(axis=1) # Compute the benchmark benchmark = numer / denom df["Benchmark"] = benchmark return df
62c63215d622c46bed8200f97ad55b985e2beb20
1,100
def is_file_like(f): """Check to see if ```f``` has a ```read()``` method.""" return hasattr(f, 'read') and callable(f.read)
9eee8c8f4a6966d1db67fb4aa9149e2fbd390fb9
1,101
def check_protocol(protocol): """ Check if a given protocol works by computing the qubit excitation probabilities """ qubit_weight = {} qubit_weight[protocol[0][0][0]] = 1.0 for pair_set in protocol: for i, j, p in pair_set: qubit_weight[j] = qubit_weight[i] * (1.0 - p) qubit_weight[i] *= p return qubit_weight
8b9d0a8e329a340718d37bc79066be4a05cf2d20
1,102
def choose_first_not_none(*args): """ Choose first non None alternative in args. :param args: alternative list :return: the first non None alternative. """ for a in args: if a is not None: return a return None
fe3efba85251161cd0a6ecb50583cc443cd04dc0
1,103
def add_dict(dct1, dct2): """Returns a new dictionaries where the content of the dictionaries `dct1` and `dct2` are merged together.""" result = dct1.copy() result.update(dct2) return result
eba785e4d00534e94c1bdde413603d64e18aac05
1,105
def mapdict(itemfunc, dictionary): """ Much like the builtin function 'map', but works on dictionaries. *itemfunc* should be a function which takes one parameter, a (key, value) pair, and returns a new (or same) (key, value) pair to go in the dictionary. """ return dict(map(itemfunc, dictionary.items()))
1f0573410f82acb1f3c06029cf4bfaccd295e1ac
1,110
def get_classpath(obj): """ Return the full module and class path of the obj. For instance, kgof.density.IsotropicNormal Return a string. """ return obj.__class__.__module__ + "." + obj.__class__.__name__
bf986e2b27dd8a216a2cc2cdb2fb2b8a83b361cc
1,112
def _get_should_cache_fn(conf, group): """Build a function that returns a config group's caching status. For any given object that has caching capabilities, a boolean config option for that object's group should exist and default to ``True``. This function will use that value to tell the caching decorator if caching for that object is enabled. To properly use this with the decorator, pass this function the configuration group and assign the result to a variable. Pass the new variable to the caching decorator as the named argument ``should_cache_fn``. :param conf: config object, must have had :func:`configure` called on it. :type conf: oslo_config.cfg.ConfigOpts :param group: name of the configuration group to examine :type group: string :returns: function reference """ def should_cache(value): if not conf.cache.enabled: return False conf_group = getattr(conf, group) return getattr(conf_group, 'caching', True) return should_cache
7a11124c640bfb3ced28e2d9395593b70dc85a0a
1,128
def horizontal_move(t, h_speed=-2/320): """Probe moves horizontally at h_speed [cm/s]""" return 0.*t, h_speed*t, 2/16 + 0*t
d9cf0e5b968e7d8319b7f63f7d1d7a4666484ad3
1,134
def categories_report(x): """Returns value counts report. Parameters ---------- x: pd.Series The series with the values Returns ------- string The value counts report. str1 = False 22 | True 20 | nan 34 str2 = False (22) | True (20) | nan (34) """ # Do counting and sorting counts = x.value_counts(dropna=False) counts.index = counts.index.map(str) counts = counts.sort_index() # Create different strings str1 = ' | '.join(str(counts).split("\n")[:-1]) str2 = ' | '.join("%s (%s)" % (i, counts[i]) for i in counts.index) # Return return str2
695ccd73ee73a13e92edbdf0eb242121d136ddbb
1,135
def euler(step, y0): """ Implements Euler's method for the differential equation dy/dx = 1/(2(y-1)) on the interval [0,4] """ x = [0] index_x = 0 while x[index_x] < 4: x.append(x[index_x] + step) index_x += 1 index_y = 0 y = [y0] def yprime(y): yprime = 1 / (2 * (y - 1)) return yprime while index_y < index_x: y.append(y[index_y] + step * yprime(y[index_y])) index_y += 1 return x, y
89c6e6409a1c43ce4766507fba2f401bb01cfbb8
1,142
import logging def update_softwaretitle_packages(api, jssid, pkgs): """ Update packages of software title :param jssid: Patch Software Title ID :param pkgs: dict of {version: package, ...} :returns: None """ logger = logging.getLogger(__name__) data = api.get(f"patchsoftwaretitles/id/{jssid}") title = data['patch_software_title'] title_name = title['name'] logger.info(f"updating patch software title: {title_name} ({jssid})") # single version (dict), multiple versions (list) version = title['versions']['version'] _modified = False try: # access key of single version and count on TypeError being raised v = version['software_version'] if v in pkgs.keys(): version['package'] = {'name': pkgs[v]} _modified = True except TypeError: # looks like it was actually a list for _version in version: v = _version['software_version'] if v in pkgs.keys(): _version['package'] = {'name': pkgs[v]} _modified = True if _modified: result = api.put(f"patchsoftwaretitles/id/{jssid}", data) logger.info(f"succesfully updated: {title_name}") return result else: logger.info(f"software title was not modified")
0acb3dfbff0e85a2e8a876d5e5d484c4d1e52068
1,143
from typing import Sequence def _table(*rows: Sequence) -> str: """ >>> _table(['a', 1, 'c', 1.23]) '|a|1|c|1.23|' >>> _table(['foo', 0, None]) '|foo|||' >>> print(_table(['multiple', 'rows', 0], ['each', 'a', 'list'])) |multiple|rows|| |each|a|list| """ return '\n'.join([ '|'.join(['', *[str(cell or '') for cell in row], '']) for row in rows ])
d566da2ad9240e73b60af00d3e4b4e25607234b4
1,146
import difflib def lines_diff(lines1, lines2): """Show difference between lines.""" is_diff = False diffs = list() for line in difflib.ndiff(lines1, lines2): if not is_diff and line[0] in ('+', '-'): is_diff = True diffs.append(line) return is_diff, diffs
50916d46871980fadfd854dc698481a4b0f35834
1,150
import re def countBasesInFasta(fastaFile): """ Given a fasta file, return a dict where the number of records and the total number of bases are given by 'records' and 'bases' respectively. """ recordRE = re.compile(r'^>') whiteSpaceRE = re.compile(r'\s+') total_bases = 0 total_seqs = 0 with open(fastaFile) as f: for line in f: if recordRE.match(line): total_seqs += 1 continue total_bases += len(whiteSpaceRE.sub('', line)) return {'records': total_seqs, 'bases': total_bases}
45eaa5b8d36b4bae6b97bb29fdead1efc0aed8c2
1,156
def linear_search(iterable, item): """Returns the index of the item in the unsorted iterable. Iterates through a collection, comparing each item to the target item, and returns the index of the first item that is equal to the target item. * O(n) time complexity * O(1) space complexity Args: iterable: A collection that is iterable. item: An object to search for. Returns: The index of the item in the sorted iterable, or -1 if not found. Raises: TypeError: If iterable is not iterable. """ try: _ = iter(iterable) except TypeError: raise TypeError('\'{}\' object is not iterable'.format( type(iterable).__name__)) for index, _item in enumerate(iterable): if _item == item: return index return -1
bdbd7e70cea79deef1375648bde61067df1d2221
1,158
def create_MD_tag(reference_seq, query_seq): """Create MD tag Args: reference_seq (str) : reference sequence of alignment query_seq (str) : query bases of alignment Returns: md_tag(str) : md description of the alignment """ no_change = 0 md = [] for ref_base, query_base in zip(reference_seq, query_seq): if ref_base.upper() == query_base: no_change += 1 else: if no_change > 0: md.append(str(no_change)) md.append(ref_base) no_change = 0 if no_change > 0: md.append(str(no_change)) return ''.join(md)
4b711521d00af132e8e29fe4fc44785b985c2607
1,159
import re def calc_word_frequency(my_string, my_word): """Calculate the number of occurrences of a given word in a given string. Args: my_string (str): String to search my_word (str): The word to search for Returns: int: The number of occurrences of the given word in the given string. """ # Remove all non alphanumeric characters from the string filtered_string = re.sub(r'[^A-Za-z0-9 ]+', '', my_string) # Return the number of occurrences of my_word in the filtered string return filtered_string.split().count(my_word)
15ff723dd2ff089fb12cccb38283f1f75e37079d
1,160
import hashlib def intmd5(source: str, nbytes=4) -> int: """ Generate a predictive random integer of nbytes*8 bits based on a source string. :param source: seed string to generate random integer. :param nbytes: size of the integer. """ hashobj = hashlib.md5(source.encode()) return int.from_bytes(hashobj.digest()[:nbytes], byteorder="big", signed=False)
c03eb99a67af00a4a081423ecca3a724111514e1
1,161
def _partition_at_level(dendrogram, level) : """Return the partition of the nodes at the given level A dendrogram is a tree and each level is a partition of the graph nodes. Level 0 is the first partition, which contains the smallest snapshot_affiliations, and the best is len(dendrogram) - 1. The higher the level is, the bigger are the snapshot_affiliations """ partition = dendrogram[0].copy() for index in range(1, level + 1) : for node, community in partition.items() : partition[node] = dendrogram[index][community] return partition
b179127076c386480c31a18a0956eb30d5f4ef2a
1,167
def _format_stages_summary(stage_results): """ stage_results (list of (tuples of (success:boolean, stage_name:string, status_msg:string))) returns a string of a report, one line per stage. Something like: Stage: <stage x> :: SUCCESS Stage: <stage y> :: FAILED Stage: <stage z> :: SUCCESS """ #find the longest stage name to pad report lines max_name_len = 0 for entry in stage_results: x, stage_name, y = entry name_len = len(stage_name) if name_len > max_name_len: max_name_len = name_len summary = "" for entry in stage_results: x, stage_name, status_msg = entry summary += 'Stage: ' + stage_name.ljust(max_name_len) + ":: " summary += status_msg + '\n' return summary
2f5c757342e98ab258bdeaf7ffdc0c5d6d4668ca
1,174
import socket def canonical_ipv4_address(ip_addr): """Return the IPv4 address in a canonical format""" return socket.inet_ntoa(socket.inet_aton(ip_addr))
edacc70ccc3eef12030c4c597c257775d3ed5fa4
1,177
def make_chained_transformation(tran_fns, *args, **kwargs): """Returns a dataset transformation function that applies a list of transformations sequentially. Args: tran_fns (list): A list of dataset transformation. *args: Extra arguments for each of the transformation function. **kwargs: Extra keyword arguments for each of the transformation function. Returns: A transformation function to be used in :tf_main:`tf.data.Dataset.map <data/Dataset#map>`. """ def _chained_fn(data): for tran_fns_i in tran_fns: data = tran_fns_i(data, *args, **kwargs) return data return _chained_fn
5f24e030df74a0617e633ca8f8d4a3954674b001
1,179
def generate_outlier_bounds_iqr(df, column, multiplier=1.5): """ Takes in a dataframe, the column name, and can specify a multiplier (default=1.5). Returns the upper and lower bounds for the values in that column that signify outliers. """ q1 = df[column].quantile(.25) q3 = df[column].quantile(.75) iqr = q3 - q1 upper = q3 + (multiplier * iqr) lower = q1 - (multiplier * iqr) return upper, lower
7f096d5f5cf2417cbc161713715a39560efd140a
1,182
import math from PIL import ImageColor def indexedcolor(i, num, npersat=15, lightness=60): """Returns an rgb color triplet for a given index, with a finite max 'num'. Thus if you need 10 colors and want to get color #5, you would call this with (5, 10). The colors are "repeatable". """ nsats = int(math.ceil(num/float(npersat))) sat = 100 - int((i//npersat)*(100/nsats)) l = lightness nhues = int(math.ceil(num/float(nsats))) hue = (i % nhues) * (360//nhues) #print >>sys.stderr, 'For i %d, num %d, got %d sats, %d hues -> %d, %d, %d' % (i, num, nsats, nhues, hue, sat, l) return ImageColor.getrgb('hsl(%d,%d%%,%d%%)' % (hue, sat, l))
418a875bc8ae50ce21f9667f46718863ba0f55e3
1,185
def dot_to_dict(values): """Convert dot notation to a dict. For example: ["token.pos", "token._.xyz"] become {"token": {"pos": True, "_": {"xyz": True }}}. values (iterable): The values to convert. RETURNS (dict): The converted values. """ result = {} for value in values: path = result parts = value.lower().split(".") for i, item in enumerate(parts): is_last = i == len(parts) - 1 path = path.setdefault(item, True if is_last else {}) return result
a2c56a01b179d27eabc728d6ff2ec979885d5feb
1,186
from pathlib import Path def delta_path(base_path: Path, item_path: Path, new_base_path: Path) -> Path: """ Removes a base path from an item, and appends result to a new path :param base_path: The :py:class:`pathlib.Path` to be removed from `item_path` :param item_path: The :py:class:`pathlib.Path` to be delta-ed :param new_base_path: The new base :py:class:`pathlib.Path` for `item_path`. :raises ValueError: If base_path is not a sub-path of item_path. :return: The new combined path. """ path_stub = item_path.relative_to(base_path) new_item_path = new_base_path / path_stub return new_item_path
ec531a011e36f053a8092525faae2047f5f66ccc
1,189
import random def describe_current_subtask(subtask, prefix=True): """ Make a 'natural' language description of subtask name """ to_verb = {"AnswerQuestion": "answering a question", "ArmGoal": "moving my arm", "DemoPresentation": "giving a demo", "Find": "finding", "Follow": "following", "Guide": "guiding", "GripperGoal": "moving my gripper", "HandOver": "handing something over", "Inspect": "inspecting", "LookAt": "looking", "NavigateTo": "navigating", "PickUp": "picking up", "Place": "placing", "ResetWM": "resetting my world model", "Say": "speaking", "SendPicture": "sending a picture", "TurnTowardSound": "turning towards a sound"} description = to_verb.get(subtask, subtask + "ing") if prefix: description = random.choice(["I'm busy", "I'm"]) + " " + description return description
628c699201c26242bd72c6066cba07cce54b14ca
1,197
import re def parse_date(deadline_date): """ Given a date in the form MM/DD/YY or MM/DD/YYYY, returns the integers MM, DD, and YYYY (or YY) in this order. """ deadline_split = re.split('\\/|\\-', deadline_date) return int(deadline_split[0]), int(deadline_split[1]), int(deadline_split[2])
0ded6bccce8437aad61cfa5ff121c5ed0595849b
1,199
import re def get_file_name(part): """get file name using regex from fragment ID""" return re.findall(r"='(.*\-[a-z]+).*", part)[0]
30c8867d8e14b04c593359f1c16d9bf324711ba0
1,201
def black_color_func(word, font_size, position, orientation, random_state=None, **kwargs): """Make word cloud black and white.""" return("hsl(0,100%, 1%)")
d5e874a4f62d30abcba29476d0ba7fc3a31b0ca6
1,210
def detect_label_column(column_names): """ Detect the label column - which we display as the label for a joined column. If a table has two columns, one of which is ID, then label_column is the other one. """ if (column_names and len(column_names) == 2 and "id" in column_names): return [c for c in column_names if c != "id"][0] return None
40524e7ed0878316564ad8fd66a2c09fc892e979
1,211
def table(custom_headings, col_headings_formatted, rows, spec): """ Create a LaTeX table Parameters ---------- custom_headings : None, dict optional dictionary of custom table headings col_headings_formatted : list formatted column headings rows : list of lists of cell-strings Data in the table, pre-formatted spec : dict options for the formatter Returns ------- dict : contains key 'latex', which corresponds to a latex string representing the table """ longtables = spec['longtables'] table = "longtable" if longtables else "tabular" if custom_headings is not None \ and "latex" in custom_headings: latex = custom_headings['latex'] else: latex = "\\begin{%s}[l]{%s}\n\hline\n" % \ (table, "|c" * len(col_headings_formatted) + "|") latex += ("%s \\\\ \hline\n" % (" & ".join(col_headings_formatted))) for formatted_rowData in rows: if len(formatted_rowData) > 0: formatted_rowData_latex = [ (formatted_cell['latex'] if isinstance(formatted_cell, dict) else formatted_cell) for formatted_cell in formatted_rowData] latex += " & ".join(formatted_rowData_latex) #MULTI-ROW support for *data* (non-col-header) rows of table. Currently # unused (unneeded) - see multirow formatter that is commented out in formatters.py #multirows = [ ("multirow" in el) for el in formatted_rowData_latex ] #if any(multirows): # latex += " \\\\ " # last = True; lineStart = None; col = 1 # for multi,data in zip(multirows,formatted_rowData_latex): # if last == True and multi == False: # lineStart = col #line start # elif last == False and multi == True: # latex += "\cline{%d-%d} " % (lineStart,col) #line end # last=multi # res = _re.search("multicolumn{([0-9])}",data) # if res: col += int(res.group(1)) # else: col += 1 # if last == False: #need to end last line # latex += "\cline{%d-%d} "%(lineStart,col-1) # latex += "\n" #else: latex += " \\\\ \hline\n" latex += "\end{%s}\n" % table return {'latex': latex}
0ca28fce26fc7476aa5b88a621c5476ae8d381ce
1,213
from typing import List def split_to_sublists(initial_list:list, n:int, strict:bool=True) -> List[list]: """Takes a list and splits it into sublists of size n Parameters ---------- initial_list : list The initial list to split into sublists n : int The size of each sublist strict: bool Whether to force an error if the length of the initial list is not divisible by n (split into even groups), default True Returns ------- List[list] A list of lists of size n (unless strict is False, then the last list may be > n) Examples -------- ### Split gallery images into sublists of 3 #### JINJA USAGE ```jinja2 {% if gallery|length % 3 == 0 %} {% for sublist in gallery|split_to_sublists(3) %} <div class="row"> <div class="col-md-4"> <img src="{{ sublist.0[0]['file_path'] }}" alt="{{ sublist.0[0]['file_path'].split()[-1] }}"> </div> <div class="col-md-4"> <img src="{{ sublist.1[0]['file_path'] }}" alt="{{ sublist.1[0]['file_path'].split()[-1]}}"> </div> <div class="col-md-4"> <img src="{{ sublist.2[0]['file_path'] }}" alt="{{ sublist.2[0]['file_path'].split()[-1] }}"> </div> </div> {% endfor %} {% endif } ``` The above jinja is roughly equivalent to something like this in pure python: ```python gallery = ["image 1" , "image 2", "image 3", "image 4" , "image 5", "image 6"] if len(images) % 3 == 0: for sublist in split_to_sublists(gallery, 3): # Returns [["image 1" , "image 2", "image 3"], ["image 4" , "image 5", "image 6"]] ... # Do stuff with each sublist ``` """ if strict: if not len(initial_list) % n == 0: raise ValueError(f"Provided list was not of correct size: \n\tList: {initial_list}\n\tSegment size {n}") result = [] for i in range(0, len(initial_list), n): # Create sublists up to size n result.append( initial_list[i:i + n]) return result
fcca74f9814020c99aaf8b31f092ca3ca9533216
1,215
def sectionize(parts, first_is_heading=False): """Join parts of the text after splitting into sections with headings. This function assumes that a text was splitted at section headings, so every two list elements after the first one is a heading-section pair. This assumption is used to join sections with their corresponding headings. Parameters ---------- parts : list of str List of text parts. first_is_heading : bool Should first element be treated as heading in lists of length greater than 1. """ parts = parts.copy() if len(parts) <= 1: return parts first = [] if not first_is_heading: first.append(parts[0]) del parts[0] sections = first + [ "\n".join(parts[i:i+2]) for i in range(0, len(parts), 2) ] return sections
402832d55268dc808888f94b95e3a1c991394041
1,217
def byte_compare(stream_a, stream_b): """Byte compare two files (early out on first difference). Returns: (bool, int): offset of first mismatch or 0 if equal """ bufsize = 16 * 1024 equal = True ofs = 0 while True: b1 = stream_a.read(bufsize) b2 = stream_b.read(bufsize) if b1 != b2: equal = False if b1 and b2: # we have two different buffers: find first mismatch for a, b in zip(b1, b2): if a != b: break ofs += 1 break ofs += len(b1) if not b1: # both buffers empty break return (equal, ofs)
59adfe50fefdb79edd082a35437018d4b954ec75
1,218
import re def is_regex(param): """ 判断参数是否是合法正则表达式字符串 :param param: {String} 参数 :return: {Boolean} 是否是合法正则表达式 """ try: re.compile(param) return True except re.error: return False
6a3ee33e68e33d3557db546beadc005235360080
1,219
def min_count1(lst): """ Get minimal value of list, version 1 :param lst: Numbers list :return: Minimal value and its count on the list """ if len(lst) == 0: return [] count = 0 min_value = lst[0] for num in lst: if num == min_value: count += 1 elif num < min_value: count = 1 min_value = num return [min_value, count]
b441d0a37534909e9a990b91a953d4022698c04b
1,220
def exactly_one_topping(ketchup, mustard, onion): """Return whether the customer wants exactly one of the three available toppings on their hot dog. """ return True if int(ketchup) + int(mustard) + int(onion) == 1 else False
214c95d35c116993dc78740d5d16b874122960ed
1,221
def strip_line_endings(data: list) -> list: """Removes line endings(\n). Removes item if only contains \n.""" return [i.rstrip("\n") for i in data if i != "\n"]
5383b1bc3884395459ca63b6f15c0a1091eaaaf0
1,222
def taxon_id(_): """ Always returns 10090, the mouse taxon id. """ return 10090
117fe7f8d56eb9be4ee2b0f4d782b806576faedf
1,225
def getLogisticModelNames(config): """ Get the names of the models present in the configobj Args: config: configobj object defining the model and its inputs. Returns: list: list of model names. """ names = [] lmodel_space = config for key, value in lmodel_space.items(): if isinstance(value, str): continue else: # this is a model names.append(key) return names
f7f82b12eb50a58c92970b5c2a8f99eb01945523
1,227
def mp0(g0): """Return 0th order free energy.""" return g0.sum()
5aa3580fec1322bd7b4e357ec6bee4d52fae592e
1,228
import socket import fcntl import struct def get_ip_address(dev="eth0"): """Retrieves the IP address via SIOCGIFADDR - only tested on Linux.""" try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl(s.fileno(),0x8915,struct.pack('256s', dev[:15]))[20:24]) except: return None
96f59f17937543ed9cd4652af4703eaf975b8069
1,234
def gen_fov_chan_names(num_fovs, num_chans, return_imgs=False, use_delimiter=False): """Generate fov and channel names Names have the format 'fov0', 'fov1', ..., 'fovN' for fovs and 'chan0', 'chan1', ..., 'chanM' for channels. Args: num_fovs (int): Number of fov names to create num_chans (int): Number of channel names to create return_imgs (bool): Return 'chanK.tiff' as well if True. Default is False use_delimiter (bool): Appends '_otherinfo' to the first fov. Useful for testing fov id extraction from filenames. Default is False Returns: tuple (list, list) or (list, list, list): If return_imgs is False, only fov and channel names are returned If return_imgs is True, image names will also be returned """ fovs = [f'fov{i}' for i in range(num_fovs)] if use_delimiter: fovs[0] = f'{fovs[0]}_otherinfo' chans = [f'chan{i}' for i in range(num_chans)] if return_imgs: imgs = [f'{chan}.tiff' for chan in chans] return fovs, chans, imgs else: return fovs, chans
417490259c42a52c58aab418fbb63185602e6750
1,240
def flatten_list(a_list, parent_list=None): """Given a list/tuple as entry point, return a flattened list version. EG: >>> flatten_list([1, 2, [3, 4]]) [1, 2, 3, 4] NB: The kwargs are only for internal use of the function and should not be used by the caller. """ if parent_list is None: parent_list = [] for element in a_list: if isinstance(element, list): flatten_list(element, parent_list=parent_list) elif isinstance(element, tuple): flatten_list(element, parent_list=parent_list) else: parent_list.append(element) return parent_list
dd6c9c66a370e65744ede40dfdc295b0ec63379a
1,243
def unpad_pkcs7(data): """ Strips PKCS#7 padding from data. Raises ValueError if padding is invalid. """ if len(data) == 0: raise ValueError("Error: Empty input.") pad_value = data[-1] if pad_value == 0 or pad_value > 16: raise ValueError("Error: Invalid padding.") for i in range(1, pad_value + 1): if data[-i] != pad_value: raise ValueError("Error: Invalid padding.") unpadded = data[: (len(data) - pad_value)] return unpadded
27e59b8a880c130997f19814135c09cb6e94354d
1,247
import re def remove_multispaces(text): """ Replace multiple spaces with only 1 space """ return [re.sub(r' +', " ",word) for word in text]
0b87f6a4b0d49931b3f4bec6f9c313be05d476f0
1,252
def num_fixed_points(permutation): """ Compute the number of fixed points (elements mapping to themselves) of a permutation. :param permutation: Permutation in one-line notation (length n tuple of the numbers 0, 1, ..., n-1). :return: Number of fixed points in the permutation. .. rubric:: Examples >>> num_fixed_points((0, 2, 1)) 1 """ n = 0 for i in range(len(permutation)): if permutation[i] == i: n += 1 return n
124713cd4c90988c43630a74881e7107ff748682
1,257
def iscode(c): """ Tests if argument type could be lines of code, i.e. list of strings """ if type(c) == type([]): if c: return type(c[0]) == type('') else: return True else: return False
e60da6c05922ff1e67db15fa4caa1500a8f470c7
1,259
import re def split_bucket(s3_key): """ Returns the bucket name and the key from an s3 location string. """ match = re.match(r'(?:s3://)?([^/]+)/(.*)', s3_key, re.IGNORECASE) if not match: return None, s3_key return match.group(1), match.group(2)
6b854bdc9d105643a9fa528e6fefd19672451e63
1,261
def create_dictionary(names, months, years, max_sustained_winds, areas_affected, updated_damages, deaths): """Create dictionary of hurricanes with hurricane name as the key and a dictionary of hurricane data as the value.""" hurricanes = dict() num_hurricanes = len(names) for i in range(num_hurricanes): hurricanes[names[i]] = {"Name": names[i], "Month": months[i], "Year": years[i], "Max Sustained Wind": max_sustained_winds[i], "Areas Affected": areas_affected[i], "Damage": updated_damages[i], "Deaths": deaths[i]} return hurricanes
5a27d5349113f29d2af55df27a2ee2c2cc524549
1,266
def generate_file_prefix(bin_params): """ Use the bin params to generate a file prefix.""" prefix = "bin_" for j in range(0, len(bin_params)): if (j + 1) % 2 != 0: prefix += str(bin_params[j]) + "-" else: prefix += str(bin_params[j]) + "_" return prefix
cc058a64fcab77f6a4794a8bf7edb1e0e86c040c
1,270
def create_scale(tonic, pattern, octave=1): """ Create an octave-repeating scale from a tonic note and a pattern of intervals Args: tonic: root note (midi note number) pattern: pattern of intervals (list of numbers representing intervals in semitones) octave: span of scale (in octaves) Returns: list of midi notes in the scale """ assert(sum(pattern)==12) scale = [tonic] note = tonic for o in range(octave): for i in pattern: note += i if note <= 127: scale.append(note) return scale
f9337289fda2e1b08cd371d3e91cc5a23c9c9822
1,276
def get_subgraph_pos(G, pos): """ Returns the filtered positions for subgraph G. If subgraph = original graph then pos will be returned. Parameters ---------- G : nx.Graph A graph object. Pos : dict A dictionary with nodes as keys and positions as values. Example ------- >>> pos = nx.spring_layout(G) >>> subgraph_nodes = ['1','2','3'] >>> subgraph = G.subgraph(subgraph_nodes) >>> subgraph_positions = get_subgraph_pos(subgraph,pos) Returns ------- dict Assuming positions were generated earlier for a larger graph with some layout algorithm this functions returns the filtered positions by the subgraph. """ return {k: v for k, v in pos.items() if k in G.nodes()}
ca7fc389cc51aaace7a751f2107fe5cfbfd22e6c
1,280
def pds3_label_gen_date(file): """Returns the creation date of a given PDS3 label. :param path: File path :type path: str :return: Creation date :rtype: str """ generation_date = "N/A" with open(file, "r") as f: for line in f: if "PRODUCT_CREATION_TIME" in line: generation_date = line.split("=")[1].strip() return generation_date
c2877fa9246dd0c12c6ea47635ab248dc038b179
1,283
def harmony(*args): """ Takes an arbitrary number of floats and prints their harmonic medium value. Calculation is done with formula: number_of_args \ (1 \ item1 + 1 \ item2 + ...) Args: *args (tuple): number of arguments with a type: float, integer Returns: float: harmonic medium value """ result = 0 if 0 in args: return 0.0 for item in args: result += 1 / item return len(args) / result
bc66276b3ef27ef0bfd059afa8ca7afd5d9cbb82
1,284
def pack(pieces=()): """ Join a sequence of strings together. :param list pieces: list of strings :rtype: bytes """ return b''.join(pieces)
ffd0852a16c6292f921e5cf205301171e3a96fd3
1,286
import inspect def getsource(obj,is_binary=False): """Wrapper around inspect.getsource. This can be modified by other projects to provide customized source extraction. Inputs: - obj: an object whose source code we will attempt to extract. Optional inputs: - is_binary: whether the object is known to come from a binary source. This implementation will skip returning any output for binary objects, but custom extractors may know how to meaningfully process them.""" if is_binary: return None else: return inspect.getsource(obj)
9e97a030c695b9ea50d27abc5253e47be7d4c06a
1,292