content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def list_statistics_keys():
"""ListStatistics definition"""
return ["list", "counts"] | 39521910b4dbde3fc6c9836460c73945561be731 | 709,126 |
def forecast_handler(req, req_body, res, res_body, zip):
"""Handles forecast requests"""
return True | a2e35eaad472cfd52dead476d18d18ee2bcd3f6f | 709,127 |
from typing import Dict
from typing import Callable
from typing import Any
def override_kwargs(
kwargs: Dict[str, str],
func: Callable[..., Any],
filter: Callable[..., Any] = lambda _: True,
) -> Dict[str, str]:
"""Override the kwargs of a function given a function to apply and an optional filter.
Parameters
----------
kwargs : Tuple
The function kwargs input.
func : Callable
A function to apply on the kwargs.
filter : Callable
An optional filter to apply the function only on some kwargs. (Default value = lambda _: True).
Returns
-------
Dict
The changed kwargs as a Dict.
"""
return {
key: func(value) if filter(value) else value for key, value in kwargs.items()
} | 31c689a1e2df1e5168f784011fbac6cf4a86bf13 | 709,128 |
def get_qe_specific_fp_run_inputs(
configure, code_pw, code_wannier90, code_pw2wannier90,
get_repeated_pw_input, get_metadata_singlecore
):
"""
Creates the InSb inputs for the QE fp_run workflow. For the
higher-level workflows (fp_tb, optimize_*), these are passed
in the 'fp_run' namespace.
"""
def inner():
return {
'scf': get_repeated_pw_input(),
'bands': {
'pw': get_repeated_pw_input()
},
'to_wannier': {
'nscf': get_repeated_pw_input(),
'wannier': {
'code': code_wannier90,
'metadata': get_metadata_singlecore()
},
'pw2wannier': {
'code': code_pw2wannier90,
'metadata': get_metadata_singlecore()
}
}
}
return inner | b0f8fd6536a237ade55139ef0ec6daaad8c0fb08 | 709,129 |
import json
def config_string(cfg_dict):
""" Pretty-print cfg_dict with one-line queries """
upper_level = ["queries", "show_attributes", "priority", "gtf", "bed", "prefix", "outdir", "threads", "output_by_query"]
query_level = ["feature", "feature_anchor", "distance", "strand", "relative_location", "filter_attribute", "attribute_values", "internals", "name"]
upper_lines = []
for upper_key in upper_level:
if upper_key == "queries":
query_lines = "\"queries\":[\n"
#Convert sets to lists
for query in cfg_dict["queries"]:
for key in query:
if type(query[key]) == set:
query[key] = list(query[key])
query_strings = [json.dumps(query, sort_keys=True) for query in cfg_dict["queries"]]
query_lines += " " + ",\n ".join(query_strings) + "\n ]"
upper_lines.append(query_lines)
elif upper_key == "show_attributes" and upper_key in cfg_dict:
upper_lines.append("\"{0}\": {1}".format(upper_key, json.dumps(cfg_dict[upper_key])))
else:
if upper_key in cfg_dict:
upper_lines.append("\"{0}\": \"{1}\"".format(upper_key, cfg_dict[upper_key]))
config_string = "{\n" + ",\n".join(upper_lines) + "\n}\n"
return(config_string) | c6533512b6f87fea1726573c0588bbd3ddd54e41 | 709,130 |
from click.testing import CliRunner
def cli_runner(script_info):
"""Create a CLI runner for testing a CLI command.
Scope: module
.. code-block:: python
def test_cmd(cli_runner):
result = cli_runner(mycmd)
assert result.exit_code == 0
"""
def cli_invoke(command, input=None, *args):
return CliRunner().invoke(command, args, input=input, obj=script_info)
return cli_invoke | 3593354dd190bcc36f2099a92bad247c9f7c7cf1 | 709,131 |
def get_titlebar_text():
"""Return (style, text) tuples for startup."""
return [
("class:title", "Hello World!"),
("class:title", " (Press <Exit> to quit.)"),
] | 947b94f2e85d7a172f5c0ba84db0ec78045a0f6c | 709,132 |
import json
def image_fnames_captions(captions_file, images_dir, partition):
"""
Loads annotations file and return lists with each image's path and caption
Arguments:
partition: string
either 'train' or 'val'
Returns:
all_captions: list of strings
list with each image caption
all_img_paths: list of paths as strings
list with each image's path to file
"""
with open(captions_file, 'r') as f:
annotations = json.load(f)
all_captions = []
all_img_paths = []
for annot in annotations['annotations']:
caption = '<start> ' + annot['caption'] + ' <end>'
image_id = annot['image_id']
full_coco_image_path = images_dir / ('COCO_{}2014_'.format(partition) + \
'{:012d}.jpg'.format(image_id))
all_img_paths.append(full_coco_image_path)
all_captions.append(caption)
return all_captions, all_img_paths | f592decefaded079fca92091ad795d67150b4ca8 | 709,133 |
from typing import Any
def is_empty(value: Any) -> bool:
"""
empty means given value is one of none, zero length string, empty list, empty dict
"""
if value is None:
return True
elif isinstance(value, str):
return len(value) == 0
elif isinstance(value, list):
return len(value) == 0
elif isinstance(value, dict):
return len(value) == 0
else:
return False | fd4c68dd5f0369e0836ab775d73424360bad9219 | 709,134 |
def check(lst: list, search_element: int) -> bool:
"""Check if the list contains the search_element."""
return any([True for i in lst if i == search_element]) | 15f35ceff44e9fde28f577663e79a2216ffce148 | 709,135 |
from pathlib import Path
def data_dir(test_dir: Path) -> Path:
"""
Create a directory for storing the mock data set.
"""
_data_dir = test_dir / 'data'
_data_dir.mkdir(exist_ok=True)
return _data_dir | 3b204816252a2c87698197a416a4e2de218f639d | 709,136 |
import multiprocessing
def get_runtime_brief():
""" A digest version of get_runtime to be used more frequently """
return {"cpu_count": multiprocessing.cpu_count()} | 9dbb54c476d303bae401d52ce76197e094ee5d71 | 709,137 |
def build_graph(defined_routes):
"""
build the graph form route definitions
"""
G = {}
for row in defined_routes:
t_fk_oid = int(row["t_fk_oid"])
t_pk_oid = int(row["t_pk_oid"])
if not t_fk_oid in G:
G[t_fk_oid] = {}
if not t_pk_oid in G:
G[t_pk_oid] = {}
G[t_fk_oid][t_pk_oid] = row["routing_cost"]
G[t_pk_oid][t_fk_oid] = row["routing_cost"]
return G | 16962ee1f4e336a9a1edc7cc05712113461f9a1a | 709,139 |
def populate_user_flags(conf, args):
"""Populate a dictionary of configuration flag parameters, "conf", from
values supplied on the command line in the structure, "args"."""
if args.cflags:
conf['cflags'] = args.cflags.split(sep=' ')
if args.ldflags:
conf['ldflags'] = args.ldflags.split(sep=' ')
return conf | 3f3fe64e2e352e0685a048747c9c8351575e40fb | 709,140 |
import re
def get_list_from_comma_separated_string(comma_separated_list):
"""
get a python list of resource names from comma separated list
:param str comma_separated_list:
:return:
"""
# remove all extra whitespace after commas and before/after string but NOT in between resource names
removed_whitespace_str = re.sub(r"(,\s+)", ",", comma_separated_list).strip()
resource_names = removed_whitespace_str.split(",")
return resource_names | 73df5fe431aceec0fec42d6019269a247b5587a5 | 709,141 |
def fx_ugoira_frames():
"""frames data."""
return {
'000000.jpg': 1000,
'000001.jpg': 2000,
'000002.jpg': 3000,
} | e3517b37bb4c9cd1dfb70b13128d16ef80a9801a | 709,142 |
import re
def _abbreviations_to_word(text: str):
"""
对句子中的压缩次进行扩展成单词
:param text: 单个句子文本
:return: 转换后的句子文本
"""
abbreviations = [
(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort')
]
]
for regex, replacement in abbreviations:
text = re.sub(regex, replacement, text)
return text | 576eb1588c40ab4b9ffa7d368249e520ecf887ba | 709,143 |
def isstruct(ob):
""" isstruct(ob)
Returns whether the given object is an SSDF struct.
"""
if hasattr(ob, '__is_ssdf_struct__'):
return bool(ob.__is_ssdf_struct__)
else:
return False | 465196af79c9de1f7685e0004e92b68a7f524149 | 709,144 |
def where_between(field_name, start_date, end_date):
"""
Return the bit of query for the dates interval.
"""
str = """ {0} between date_format('{1}', '%%Y-%%c-%%d %%H:%%i:%%S')
and date_format('{2}', '%%Y-%%c-%%d 23:%%i:%%S')
""" .format( field_name,
start_date.strftime("%Y-%m-%d %H:%M:%S"),
end_date.strftime("%Y-%m-%d %H:%M:%S"))
return str | 4801d01ac8743f138e7c558da40518b75ca6daed | 709,145 |
def to_console_formatted_string(data: dict) -> str:
"""..."""
def make_line(key: str) -> str:
if key.startswith('__cauldron_'):
return ''
data_class = getattr(data[key], '__class__', data[key])
data_type = getattr(data_class, '__name__', type(data[key]))
value = '{}'.format(data[key])[:250].replace('\n', '\n ')
if value.find('\n') != -1:
value = '\n{}'.format(value)
return '+ {name} ({type}): {value}'.format(
name=key,
type=data_type,
value=value
)
keys = list(data.keys())
keys.sort()
lines = list(filter(
lambda line: len(line) > 0,
[make_line(key) for key in keys]
))
return '\n'.join(lines) | 05cec50b3eee8199b19024aae32dda2a8ba33115 | 709,146 |
def get_recommendation(anime_name, cosine_sim, clean_anime, anime_index):
"""
Getting pairwise similarity scores for all anime in the data frame.
The function returns the top 10 most similar anime to the given query.
"""
idx = anime_index[anime_name]
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[0:11]
anime_indices = [i[0] for i in sim_scores]
result = clean_anime[['name']].iloc[anime_indices].drop(idx)
return result | 93bc3e53071200810b34e31674fcaa0a98cdaebb | 709,147 |
import re
def sort_with_num(path):
"""Extract leading numbers in a file name for numerical sorting."""
fname = path.name
nums = re.match('^\d+', fname)
if nums:
return int(nums[0])
else:
return 0 | 2209384720c33b8201c06f7a14b431972712814a | 709,148 |
import csv
import re
def indices(input_file):
"""
Parse the index file or target file and return a list of values.
:return:
"""
index_list = []
line_num = 0
index_file = list(csv.reader(open(input_file), delimiter='\t'))
for line in index_file:
line_num += 1
col_count = len(line)
if col_count > 1 and len(line[0].split("#")[0]) > 1: # Skip any lines that are blank or comments.
tmp_line = []
for i in range(col_count):
try:
line[i] = line[i].split("#")[0] # Strip out end of line comments and white space.
except IndexError:
raise SystemExit(
"There is a syntax error in file {0} on line {1}, column {2} "
.format(input_file, str(line_num), str(i)))
line[i] = re.sub(",", '', line[i]) # Strip out any commas.
tmp_line.append(line[i])
index_list.append(tmp_line)
return index_list | ea07d6f2bc8f3d23cf2ae59cb2df6c19158752fc | 709,150 |
import argparse
def parse_arguments():
"""
Parse the arguments from the user
"""
parser = argparse.ArgumentParser(
description="omeClust visualization script.\n",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"adist",
help="the input file D*N, Rows: D features and columns: N samples OR \n" +
"a distance matrix file D*D (rows and columns should be the same and in the same order) \n ",
)
parser.add_argument(
"clusters",
help="the input file D*N, Rows: D features and columns: N samples OR \n" +
"a distance matrix file D*D (rows and columns should be the same and in the same order) \n ",
)
parser.add_argument(
"--metadata",
help="metadata",
)
parser.add_argument(
"--shapeby",
type=str,
help="the input file D*N, Rows: D features and columns: N samples OR \n" +
"a distance matrix file D*D (rows and columns should be the same and in the same order) \n ",
)
parser.add_argument(
"-o", "--output",
help="the output directory\n",
required=True)
parser.add_argument(
"--size-to-plot",
type=int,
dest='size_to_plot',
default=3,
help="Minimum size of cluster to be plotted")
parser.add_argument("--fig-size", nargs=2,
# type=int,
dest='fig_size',
default=[3, 2.5], help="width and height of plots")
parser.add_argument("--point-size",
type=int,
dest='point_size',
default=3, help="width and height of plots")
parser.add_argument("--show",
help="show ordination plot before save\n",
action="store_true",
default=False,
dest='show')
return parser.parse_args() | aaa649b34cdb6819f9a56e7e0d547ccc88bff139 | 709,151 |
def most_common(l):
""" Helper function.
:l: List of strings.
:returns: most common string.
"""
# another way to get max of list?
#from collections import Counter
#data = Counter(your_list_in_here)
#data.most_common() # Returns all unique items and their counts
#data.most_common(1)
count = 0
answer = ''
for element in l:
if l.count(element) > count:
count = l.count(element)
answer = element
return answer | 5010e4e26b00099c287f8597d8dc5881a67c4034 | 709,152 |
import base64
def urlsafe_b64decode_nopadding(val):
"""Deal with unpadded urlsafe base64."""
# Yes, it accepts extra = characters.
return base64.urlsafe_b64decode(str(val) + '===') | 22ed00b07e16b4b557dc46b5caeb9f7ce9513c0d | 709,153 |
def _subimg_bbox(img, subimage, xc, yc):
"""
Find the x/y bounding-box pixel coordinates in ``img`` needed to
add ``subimage``, centered at ``(xc, yc)``, to ``img``. Returns
``None`` if the ``subimage`` would extend past the ``img``
boundary.
"""
ys, xs = subimage.shape
y, x = img.shape
y0 = int(yc - (ys - 1) / 2.0)
y1 = y0 + ys
x0 = int(xc - (xs - 1) / 2.0)
x1 = x0 + xs
if (x0 >= 0) and (y0 >= 0) and (x1 < x) and (y1 < y):
return (x0, x1, y0, y1)
else:
return None | b299a6b3726ced525b538b4fea45b235fc0bd56e | 709,154 |
import math
def fcmp(x, y, precision):
"""fcmp(x, y, precision) -> -1, 0, or 1"""
if math.fabs(x-y) < precision:
return 0
elif x < y:
return -1
return 1 | 905421b36635ab830e2216ab34fee89f75c7f4c4 | 709,156 |
def fuzzy_lookup_item(name_or_id, lst):
"""Lookup an item by either name or id.
Looking up by id is exact match. Looking up by name is by containment, and
if the term is entirely lowercase then it's also case-insensitive.
Multiple matches will throw an exception, unless one of them was an exact
match.
"""
try:
idd = int(name_or_id)
for val in lst:
if val.id == idd:
return val
raise RuntimeError('Id %d not found!' % idd)
except ValueError:
insensitive = name_or_id.islower()
matches = []
for val in lst:
name = val.name or ''
if name_or_id == name:
return val
if insensitive:
name = name.lower()
if name_or_id in name:
matches.append(val)
if len(matches) == 1:
return matches[0]
if not matches:
raise RuntimeError(f'No name containing {name_or_id!r} found!') from None
raise RuntimeError(
f'Multiple matches for {name_or_id!r}: {[x.name for x in matches]}') from None | 604b3879d0f97822d5a36db6dcf468ef8eefaac9 | 709,157 |
def _flatten_value_to_list(batch_values):
"""Converts an N-D dense or sparse batch to a 1-D list."""
# Ravel for flattening and tolist so that we go to native Python types
# for more efficient followup processing.
#
batch_value, = batch_values
return batch_value.ravel().tolist() | 77bfd9d32cbbf86a16a8da2701417a9ac9b9cc93 | 709,158 |
import torch
def y_gate():
"""
Pauli y
"""
return torch.tensor([[0, -1j], [1j, 0]]) + 0j | c0da0112233773e1c764e103599a591bb7a4a7f5 | 709,159 |
import tarfile
def extract_tarball(tarball, install_dir):
"""Extract tarball to a local path"""
if not tarball.path.is_file():
raise IOError(f"<info>{tarball.path}</info> is not a file!")
try:
with tarfile.open(tarball.path, "r:gz") as f_tarball:
extraction_dir = [
obj.name
for obj in f_tarball.getmembers()
if obj.isdir() and "/" not in obj.name
][0]
f_tarball.extractall(install_dir)
except tarfile.ReadError as exc:
raise IOError(f"<info>{tarball.path}</info> is not a valid tarball!") from exc
return install_dir / extraction_dir | da9deeb71da36c7c01611f3be7965a8c4a22dc41 | 709,160 |
def dict_merge(set1, set2):
"""Joins two dictionaries."""
return dict(list(set1.items()) + list(set2.items())) | d88a68720cb9406c46bdef40f46e461a80e588c0 | 709,161 |
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10 | 4bd9b1c8d362f5e72e97f9f2c8e0d5711065291f | 709,162 |
def is_android(builder_cfg):
"""Determine whether the given builder is an Android builder."""
return ('Android' in builder_cfg.get('extra_config', '') or
builder_cfg.get('os') == 'Android') | 74b1620ba2f6fff46495174158f734c5aa8da372 | 709,163 |
def twoSum(self, numbers, target): # ! 这个方法可行
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
numbers_dict = {}
for idn, v in enumerate(numbers):
if target - v in numbers_dict:
return [numbers_dict[target - v] + 1, idn + 1]
numbers_dict[v] = idn | e2b93828b5db7256b9a1e90e7e21adad1ce0b4de | 709,164 |
def celcius_to_farenheit(x):
"""calculate celcius to farenheit"""
farenheit = (9*x/5) + 32
return farenheit | fa0041451c82b20283e4f20b501a6042ab19ec95 | 709,166 |
def sentinel_id(vocabulary, return_value=None):
"""Token ID to use as a sentinel.
By default, we use the last token in the vocabulary.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
return_value: an optional integer
Returns:
an integer
"""
if return_value is not None:
return return_value
return vocabulary.vocab_size - 1 | 08ad1116b7f41ba7070359675a0133f14b9917bd | 709,168 |
from typing import Type
def is_dict_specifier(value):
# type: (object) -> bool
""" Check if value is a supported dictionary.
Check if a parameter of the task decorator is a dictionary that specifies
at least Type (and therefore can include things like Prefix, see binary
decorator test for some examples).
:param value: Decorator value to check.
:return: True if value is a dictionary that specifies at least the Type of
the key.
"""
return isinstance(value, dict) and Type in value | e18ad83a1b79a8150dfda1c65f4ab7e72cc8c8c8 | 709,169 |
def parse_star_count(stars_str):
"""Parse strings like 40.3k and get the no. of stars as a number"""
stars_str = stars_str.strip()
return int(float(stars_str[:-1]) * 1000) if stars_str[-1] == 'k' else int(stars_str) | d47177f26656e6dc33d708a0c4824ff677f3387a | 709,170 |
import shutil
def is_libreoffice_sdk_available() -> bool:
""" do we have idlc somewhere (we suppose it is made available in current path var.) ? """
return shutil.which("idlc") is not None | 83f8b158bcf97aa875280b20e177895432116d21 | 709,171 |
def do_open(user_input):
"""identical to io.open in PY3"""
try:
with open(user_input) as f:
return f.read()
except Exception:
return None | 72037207adecb2758c844c2f0c7233d834060111 | 709,172 |
import os
def getDroppableFilename(mime_data):
"""
Returns the filename of a file dropped into the canvas (if it was
accepted via @see isDroppableMimeType).
"""
if mime_data.hasUrls():
# Return the first locally existing file
for url in mime_data.urls():
fpath = url.toLocalFile()
if os.path.exists(fpath):
return fpath.strip()
if mime_data.hasText():
txt = mime_data.text()
if txt.startswith('file://'):
return txt[7:].strip()
raise ValueError('Unsupported QMimeData for dropped file!') | c49370abf2b56f1cb3ded02c5edfab121a728096 | 709,173 |
def with_color(text, color, bold=False):
"""
Return a ZSH color-formatted string.
Arguments
---------
text: str
text to be colored
color: str
ZSH color code
bold: bool
whether or not to make the text bold
Returns
-------
str
string with ZSH color-coded text
"""
color_fmt = '$fg_bold[{:s}]' if bold else '$fg[{:s}]'
return '%{{{:s}%}}{:s}%{{$reset_color%}}'.format(
color_fmt.format(color), text) | 40c194d9de76ab504a25592cfb13407cb089da0a | 709,174 |
def transition_soil_carbon(area_final, carbon_final, depth_final,
transition_rate, year, area_initial,
carbon_initial, depth_initial):
"""This is the formula for calculating the transition of soil carbon
.. math:: (af * cf * df) - \
\\frac{1}{(1 + tr)^y} * \
[(af * cf * df) - \
(ai * ci * di)]
where
* :math:`af` is area_final
* :math:`cf` is carbon_final
* :math:`df` is depth_final
* :math:`tr` is transition_rate
* :math:`y` is year
* :math:`ai` is area_initial
* :math:`ci` is carbon_initial
* :math:`di` is depth_initial
Args:
area_final (float): The final area of the carbon
carbon_final (float): The final amount of carbon per volume
depth_final (float): The final depth of carbon
transition_rate (float): The rate at which the transition occurs
year (float): The amount of time in years overwhich the transition occurs
area_initial (float): The intial area of the carbon
carbon_initial (float): The iniital amount of carbon per volume
depth_initial (float): The initial depth of carbon
Returns:
float: Transition amount of soil carbon
"""
return (area_final * carbon_final * depth_final) - \
(1/((1 + transition_rate) ** year)) * \
((area_final * carbon_final * depth_final) - \
(area_initial * carbon_initial * depth_initial)) | bfbf83f201eb8b8b0be0ec6a8722e850f6084e95 | 709,175 |
import sys
def dijkstra(graph, source):
"""Find the shortest path from the source node to every other node in the given graph"""
# Declare and initialize result, unvisited, and path
result = {i: sys.maxsize if i != source else 0 for i in graph.nodes} # placeholder, by default set distance to maxsize
path = dict()
unvisited = set(graph.nodes)
while unvisited: # As long as unvisited is non-empty
min_node = None
# Find the unvisited node having smallest known distance from the source node.
for node in unvisited:
if min_node is None: # base case
min_node = node
elif result[node] < result[min_node]:
min_node = node # switch the nodes, so start with source, then next lowest...
"""tried to be fancy"""
# d = {i[0][1]: i[1] for i in graph.distances.items() if i[0][0] == node}
# min_node = min(d, key=d.get)
# result[min_node] = d[min_node]
current_distance = result[min_node]
# For the current node, find all the unvisited neighbours.
# For this, you have calculate the distance of each unvisited neighbour.
# unvisited_neighbours = unvisited.intersection(graph.neighbours[min_node]) does not work, might not be a path between nodes
for neighbour in graph.neighbours[min_node]:
if neighbour in unvisited:
distance = current_distance + graph.distances[(min_node, neighbour)]
# If the calculated distance of the unvisited neighbour is less than the already known distance in result dictionary,
# update the shortest distance in the result dictionary.
if distance < result[neighbour]:
result[neighbour] = distance
path[neighbour] = min_node
# Remove the current node from the unvisited set.
unvisited.remove(min_node)
# should do an ASSERT to check no values in result dict equal sys.maxsize
return result | 4c3fda4922795b8a47e7b94bf3a09016f5eb2551 | 709,176 |
import numpy as np
def remove_outliers(column):
"""
:param column: list of numbers
:return:
"""
if len(column) < 1:
return []
clean_column = []
q1 = np.percentile(column, 25)
q3 = np.percentile(column, 75)
#k = 1.5
k = 2
# [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
lower_bound = q1 - k*(q3-q1)
upper_bound = q3 + k*(q3-q1)
for c in column:
if c >= lower_bound and c <= upper_bound:
clean_column.append(c)
return clean_column | 04c1e736e27ffeaef528f25fd303d0f27c3a94ac | 709,177 |
import os
import platform
def get_os():
"""
if called in powershell returns "powershell"
if called in cygwin returns "cygwin"
if called in darwin/osx returns "osx"
for linux returns "linux"
"""
env = os.environ
p = platform.system().lower()
terminal = p
operating_system = p
if p == 'windows':
terminal = "powershell"
if 'TERM' in env:
terminal = env['TERM']
if p == 'darwin':
terminal = 'osx'
return terminal | 843dc64f40b50e7adc45f1f4c092550c578cddd3 | 709,178 |
def split_data_set(data_set, axis, value):
"""
按照给定特征划分数据集,筛选某个特征为指定特征值的数据
(然后因为是按该特征进行划分了,该特征在以后的划分中就不用再出现,所以把该特征在新的列表中移除)
:param data_set: 待划分的数据集,格式如下,每一行是一个list,list最后一个元素就是标签,其他元素是特征
:param axis: 划分数据集的特征(特征的序号)
:param value: 需要返回的特征的值(筛选特征的值要等于此值)
:return:
>>>myDat = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
>>>split_data_set(myDat,0,1)
[[1, 'yes'], [1, 'yes'], [0, 'no']]
>>>split_data_set(myDat,0,0)
[[1, 'no'], [1, 'no']]
"""
# 创建新的list对象
ret_data_set = []
for feature_vec in data_set:
if feature_vec[axis] == value:
# 抽取, 把指定特征从列表中去掉,组成一个新的特征+标签的列表
reduced_feature_vec = feature_vec[:axis]
reduced_feature_vec.extend(feature_vec[axis + 1:])
ret_data_set.append(reduced_feature_vec)
return ret_data_set | f90fdffee3bbee4b4477e371a9ed43094051126a | 709,179 |
import shutil
def rmdir_empty(f):
"""Returns a count of the number of directories it has deleted"""
if not f.is_dir():
return 0
removable = True
result = 0
for i in f.iterdir():
if i.is_dir():
result += rmdir_empty(i)
removable = removable and not i.exists()
else:
removable = removable and (i.name == '.DS_Store')
if removable:
items = list(f.iterdir())
assert not items or items[0].name == '.DS_Store'
print(f)
shutil.rmtree(f)
result += 1
return result | f2dba5bb7e87c395886574ca5f3844a8bab609d9 | 709,180 |
def check_source(module):
"""
Check that module doesn't have any globals.
Example::
def test_no_global(self):
result, line = check_source(self.module)
self.assertTrue(result, "Make sure no code is outside functions.\\nRow: " + line)
"""
try:
source = module.__file__
except Exception:
raise Exception('Varmista, että koodin suoritus onnistuu')
allowed = [
"import ",
"from ",
"def ",
"class ",
" ",
"\t",
"#",
"if __name__",
"@",
]
with open(source) as file:
for line in file.readlines():
if line.strip() == "":
continue
for prefix in allowed:
if line.startswith(prefix):
break
else:
return (False, line)
return (True, "") | 6bc012892d6ec7bb6788f20a565acac0f6d1c662 | 709,181 |
def return_request(data):
"""
Arguments:
data
Return if call detect: list[dist1, dist2, ...]:
dist = {
"feature": feature
}
Return if call extract: list[dist1, dist2, ...]:
dist = {
"confidence_score": predict probability,
"class": face,
"bounding_box": [xmin, ymin, xmax, ymax],
"keypoints": {'left_eye': (x,y), 'right_eye':(x,y), 'nose': (x,y), 'mouth_left': (x,y), 'mouth_right': (x,y)}
}
"""
contents = []
try:
boxs = data['predictions']
print(type(boxs))
print(boxs)
# for box in boxs:
# contents.append({
# "confidence_score": box[4],
# "class": 'face',
# "bounding_box": [box[0], box[1], box[2], box[3]]
# })
except:
pass
try:
features = data['features']
for feature in features:
contents.append({
"feature": feature
})
except:
pass
return contents | 11887921c89a846ee89bc3cbb79fb385382262fa | 709,182 |
def make_batch_keys(args, extras=None):
"""depending on the args, different data are used by the listener."""
batch_keys = ['objects', 'tokens', 'target_pos'] # all models use these
if extras is not None:
batch_keys += extras
if args.obj_cls_alpha > 0:
batch_keys.append('class_labels')
if args.lang_cls_alpha > 0:
batch_keys.append('target_class')
return batch_keys | a86c2a5cff58f811a67cbdd5eed322c86aa3e0e0 | 709,183 |
from typing import Any
def first_fail_second_succeed(_: Any, context: Any) -> str:
""" Simulate Etherscan saying for the first time 'wait', but for the second time 'success'. """
context.status_code = 200
try:
if first_fail_second_succeed.called: # type: ignore
return '{ "status": "1", "result" : "Pass - Verified", "message" : "" }'
except AttributeError: # first time
pass
first_fail_second_succeed.called = True # type: ignore
return '{ "status": "0", "result" : "wait for a moment", "message" : "" }' | 5feb3188bdee2d0d758584709df13dc876c37391 | 709,185 |
from typing import Any
def escape_parameter(value: Any) -> str:
"""
Escape a query parameter.
"""
if value == "*":
return value
if isinstance(value, str):
value = value.replace("'", "''")
return f"'{value}'"
if isinstance(value, bytes):
value = value.decode("utf-8")
return f"'{value}'"
if isinstance(value, bool):
return "TRUE" if value else "FALSE"
if isinstance(value, (int, float)):
return str(value)
return f"'{value}'" | 00b706681b002a3226874f04e74acbb67d54d12e | 709,186 |
def Get_Query(Fq):
""" Get_Query
"""
Q = ""
EoF = False
Ok = False
while True:
l = Fq.readline()
if ("--" in l) :
# skip line
continue
elif l=="":
EoF=True
break
else:
Q += l
if ";" in Q:
Ok = True
break
return EoF, Ok, Q | a1850799f7c35e13a5b61ba8ebbed5d49afc08df | 709,187 |
from pathlib import Path
def file(base_path, other_path):
"""
Returns a single file
"""
return [[Path(base_path), Path(other_path)]] | 3482041757b38929a58d7173731e84a915225809 | 709,188 |
def make_str_lst_unc_val(id, luv):
"""
make_str_lst_unc_val(id, luv)
Make a formatted string from an ID string and a list of uncertain values.
Input
-----
id A number or a string that will be output as a string.
luv A list of DTSA-II UncertainValue2 items. These will be printed
as comma-delimited pairs with 6 digits following the decimal.
Return
------
A string with comma-delimited values with the ID and mean and uncertainty
for each item in the list. This is suitable for writing output to a .csv
file.
Example:
--------
import dtsa2.jmGen as jmg
import gov.nist.microanalysis.Utility as epu
nmZnO1 = 40.1
uvOKa1 = epu.UncertainValue2(0.269157,0.000126)
uvZnLa1 = epu.UncertainValue2(0.259251,9.4e-05)
uvSiKa1 = epu.UncertainValue2(0.654561,8.4e-05)
l_uvals = [uvOKa1, uvZnLa1, uvSiKa1]
out = jmg.make_list_unc_val_string(nmZnO1, l_uvals)
print(out)
1> 40.1, 0.269157, 0.000126, 0.259251, 0.000094, 0.654561, 0.000084
"""
lv = len(luv)
i = 0
rv = "%s, " % (id)
for uv in luv:
rc = round(uv.doubleValue(), 6)
uc = round(uv.uncertainty(), 6)
if i == lv-1:
rv += "%g, %.6f" % (rc, uc)
else:
rv += "%g, %.6f, " % (rc, uc)
i += 1
return(rv) | c65b9bb0c6539e21746a06f7a864acebc2bade03 | 709,189 |
import typing
def translate_null_strings_to_blanks(d: typing.Dict) -> typing.Dict:
"""Map over a dict and translate any null string values into ' '.
Leave everything else as is. This is needed because you cannot add TableCell
objects with only a null string or the client crashes.
:param Dict d: dict of item values.
:rtype Dict:
"""
# Beware: locally defined function.
def translate_nulls(s):
if s == "":
return " "
return s
new_d = {k: translate_nulls(v) for k, v in d.items()}
return new_d | 1a6cfe2f8449d042eb01774054cddde08ba56f8c | 709,190 |
import time
def timer(func):
""" Decorator to measure execution time """
def wrapper(*args, **kwargs):
start_time = time.time()
ret = func(*args, **kwargs)
elapsed = time.time() - start_time
print('{:s}: {:4f} sec'.format(func.__name__, elapsed))
return ret
return wrapper | 0f6a8a4dc8eff1aa49efaf5d26ac46e0cc483b3e | 709,192 |
import uuid
def _create_keyword_plan_campaign(client, customer_id, keyword_plan):
"""Adds a keyword plan campaign to the given keyword plan.
Args:
client: An initialized instance of GoogleAdsClient
customer_id: A str of the customer_id to use in requests.
keyword_plan: A str of the keyword plan resource_name this keyword plan
campaign should be attributed to.create_keyword_plan.
Returns:
A str of the resource_name for the newly created keyword plan campaign.
Raises:
GoogleAdsException: If an error is returned from the API.
"""
keyword_plan_campaign_service = client.get_service(
"KeywordPlanCampaignService"
)
operation = client.get_type("KeywordPlanCampaignOperation")
keyword_plan_campaign = operation.create
keyword_plan_campaign.name = f"Keyword plan campaign {uuid.uuid4()}"
keyword_plan_campaign.cpc_bid_micros = 1000000
keyword_plan_campaign.keyword_plan = keyword_plan
network = client.enums.KeywordPlanNetworkEnum.GOOGLE_SEARCH
keyword_plan_campaign.keyword_plan_network = network
geo_target = client.get_type("KeywordPlanGeoTarget")
# Constant for U.S. Other geo target constants can be referenced here:
# https://developers.google.com/google-ads/api/reference/data/geotargets
geo_target.geo_target_constant = "geoTargetConstants/2840"
keyword_plan_campaign.geo_targets.append(geo_target)
# Constant for English
language = "languageConstants/1000"
keyword_plan_campaign.language_constants.append(language)
response = keyword_plan_campaign_service.mutate_keyword_plan_campaigns(
customer_id=customer_id, operations=[operation]
)
resource_name = response.results[0].resource_name
print(f"Created keyword plan campaign with resource name: {resource_name}")
return resource_name | b6ce2ee2ec40e1192461c41941f18fe04f901344 | 709,193 |
def is_hermitian(mx, tol=1e-9):
"""
Test whether mx is a hermitian matrix.
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is hermitian, otherwise False.
"""
(m, n) = mx.shape
for i in range(m):
if abs(mx[i, i].imag) > tol: return False
for j in range(i + 1, n):
if abs(mx[i, j] - mx[j, i].conjugate()) > tol: return False
return True | 31e9a1faff21707b2fc44c7824bb05fc85967f00 | 709,194 |
from typing import Any
def get_object_unique_name(obj: Any) -> str:
"""Return a unique string associated with the given object.
That string is constructed as follows: <object class name>_<object_hex_id>
"""
return f"{type(obj).__name__}_{hex(id(obj))}" | f817abf636673f7ef6704cbe0ff5a7a2b897a3f6 | 709,195 |
def filter_dict(regex_dict, request_keys):
"""
filter regular expression dictionary by request_keys
:param regex_dict: a dictionary of regular expressions that
follows the following format:
{
"name": "sigma_aldrich",
"regexes": {
"manufacturer": {
"regex": "[C|c]ompany(?P\u003cdata\u003e.{80})",
"flags": "is"
},
"product_name": {
"regex": "\\s[P|p]roduct\\s(?P\u003cdata\u003e.{80})",
"flags": "is"
},
...
}
returns
{
'sigma_aldrich': {
"manufacturer": {
"regex": "[C|c]ompany(?P\u003cdata\u003e.{80})",
"flags": "is"
},
}
:param request_keys: a list of dictionary keys that correspond to valid
regex lookups i.e. ['manufacturer', 'product_name']
"""
out_dict = dict()
nested_regexes = regex_dict['regexes']
for request_key in request_keys:
if request_key in nested_regexes:
out_dict[request_key] = nested_regexes[request_key]
return {'name': regex_dict['name'], 'regexes': out_dict} | fb503f0d4df0a7965c276907b7a9e43bd14f9cac | 709,196 |
import six
def calculate_partition_movement(prev_assignment, curr_assignment):
"""Calculate the partition movements from initial to current assignment.
Algorithm:
For each partition in initial assignment
# If replica set different in current assignment:
# Get Difference in sets
:rtype: tuple
dict((partition, (from_broker_set, to_broker_set)), total_movements
"""
total_movements = 0
movements = {}
for prev_partition, prev_replicas in six.iteritems(prev_assignment):
curr_replicas = curr_assignment[prev_partition]
diff = len(set(curr_replicas) - set(prev_replicas))
if diff:
total_movements += diff
movements[prev_partition] = (
(set(prev_replicas) - set(curr_replicas)),
(set(curr_replicas) - set(prev_replicas)),
)
return movements, total_movements | 180a47944523f0c814748d1918935e47d9a7ada4 | 709,197 |
def recurse_while(predicate, f, *args):
"""
Accumulate value by executing recursively function `f`.
The function `f` is executed with starting arguments. While the
predicate for the result is true, the result is fed into function `f`.
If predicate is never true then starting arguments are returned.
:param predicate: Predicate function guarding execution.
:param f: Function to execute.
:param *args: Starting arguments.
"""
result = f(*args)
result = result if type(result) == tuple else (result, )
while predicate(*result):
args = result # predicate(args) is always true
result = f(*args)
result = result if type(result) == tuple else (result, )
return args if len(args) > 1 else args[0] | fd3313760c246336519a2e89281cc94a2bee6833 | 709,198 |
def total_allocation_constraint(weight, allocation: float, upper_bound: bool = True):
"""
Used for inequality constraint for the total allocation.
:param weight: np.array
:param allocation: float
:param upper_bound: bool if true the constraint is from above (sum of weights <= allocation) else from below
(sum of weights <= allocation)
:return: np.array
"""
if upper_bound:
return allocation - weight.sum()
else:
return weight.sum() - allocation | b92c4bd18d1c6246ff202987c957a5098fd66ba1 | 709,199 |
import re
def split_prec_rows(df):
"""Split precincts into two rows.
NOTE: Because this creates a copy of the row values, don't rely on total vote counts, just look at percentage.
"""
for idx in df.index:
# look for rows with precincts that need to be split
if re.search('\d{4}/\d{4}',idx):
row_values = df.loc[idx]
split = idx.split('/')
for p in split:
df.loc[p] = row_values
# delete original row
df = df.drop(idx, axis=0)
return(df) | 72ba424080b0ff3e04ecc5d248bc85b4f409167c | 709,200 |
def mu_model(u, X, U, k):
"""
Returns the utility of the kth player
Parameters
----------
u
X
U
k
Returns
-------
"""
M = X.T @ X
rewards = M @ u
penalties = u.T @ M @ U[:, :k] * U[:, :k]
return rewards - penalties.sum(axis=1) | 59bce1ce8617f0e11340d1c1ab18315fd81e6925 | 709,201 |
def rightOfDeciSeperatorToDeci(a):
"""This function only convert value at the right side of decimal seperator to decimal"""
deciNum = 0
for i in range(len(a)):
deciNum += (int(a[i]))*2**-(i+1)
return deciNum | 14cfd187758836d329ac4778a30167ddece0f2a0 | 709,202 |
import torch
def conv(input, weight):
"""
Returns the convolution of input and weight tensors,
where input contains sequential data.
The convolution is along the sequence axis.
input is of size [batchSize, inputDim, seqLength]
"""
output = torch.nn.functional.conv1d(input=input, weight=weight)
return output | e213be11c423ff63a1ebffda55331298fcf53443 | 709,203 |
from typing import List
from typing import Optional
def label_to_span(labels: List[str],
scheme: Optional[str] = 'BIO') -> dict:
"""
convert labels to spans
:param labels: a list of labels
:param scheme: labeling scheme, in ['BIO', 'BILOU'].
:return: labeled spans, a list of tuples (start_idx, end_idx, label)
"""
assert scheme in ['BIO', 'BILOU'], ValueError("unknown labeling scheme")
labeled_spans = dict()
i = 0
while i < len(labels):
if labels[i] == 'O' or labels[i] == 'ABS':
i += 1
continue
else:
if scheme == 'BIO':
if labels[i][0] == 'B':
start = i
lb = labels[i][2:]
i += 1
try:
while labels[i][0] == 'I':
i += 1
end = i
labeled_spans[(start, end)] = lb
except IndexError:
end = i
labeled_spans[(start, end)] = lb
i += 1
# this should not happen
elif labels[i][0] == 'I':
i += 1
elif scheme == 'BILOU':
if labels[i][0] == 'U':
start = i
end = i + 1
lb = labels[i][2:]
labeled_spans[(start, end)] = lb
i += 1
elif labels[i][0] == 'B':
start = i
lb = labels[i][2:]
i += 1
try:
while labels[i][0] != 'L':
i += 1
end = i
labeled_spans[(start, end)] = lb
except IndexError:
end = i
labeled_spans[(start, end)] = lb
break
i += 1
else:
i += 1
return labeled_spans | 01e3a1f3d72f8ec0b1cfa2c982fc8095c06c09f8 | 709,204 |
def _normalize_handler_method(method):
"""Transforms an HTTP method into a valid Python identifier."""
return method.lower().replace("-", "_") | aad23dba304ba39708e4415de40019479ccf0195 | 709,205 |
def compare_files(og_maxima,new_maxima, compare_file, until=100, divisor=1000):
"""
given input of the maxima of a graph, compare it to the maxima from data100.txt
maxima will be a series of x,y coordinates corresponding to the x,y values of a maximum from a file.
First see if there is a maxima with the same x value as data100.txt, if there is not expand the x value ranges
until a maximum is found. Find out what this dx is for the new file.
Note do it for all the peaks of data100.txt at once, so that if it finds a peak for the 2nd peak of data100.txt,
it doesn't also assign this to the first peak as well.
kewyword arguments until and divisor:
for the dx loop the loop will increase dx from 0 until until/divisor in steps of 1/divisor
eg for default values until=100 and divisor=1000,
it will increase dx from 0 until 100/1000 (=0.1) in steps of 1/1000 (=0.001)
changing these arguments will lead to more or less peak matching, which could
affect the results of the calculation significantly.
"""
if compare_file == 'data100.txt':
return None
# Whenever there is a match we will iterate this, so that we can compare
#this at the end?
number_of_matches = 0
# Initiate two lists to contain all the dx and dy values for each peak that
# is matched by the code.
dx_values = []
dy_values = []
# Loop through the original maxima list (supplied as an argument)
# and also loop through the maxima from the file being compared.
for og_idx,og_val in enumerate(og_maxima.T[0]):
for idx,val in enumerate(new_maxima.T[0]):
#this will loop dx from 0 to (until)/divisor in steps of 1/divisor
for x in range(until+1):
dx = x/divisor
# For the current value of dx see if there is a matching
# peak between the data100.txt file and the file being compared.
# There is a match if the val from the compare_file is within the range
# of the original peak x value +/- the dx value.
if og_val - dx <= val <= og_val + dx:
#if there is a match print some logging information to the console.
print(f"Peak Match : index {og_idx} from data100.txt and {idx} from {compare_file}")
print(f"values are {og_val} and {val} respectively")
# iterate the number of peak matches between the two files being compared.
number_of_matches+=1
# append the current dx value to our running list which will keep track
# of the dx values for all the matched peaks
dx_values.append(dx)
# Get the absolute value of the difference in y values (dy)
dy = abs(og_maxima.T[1][og_idx] - new_maxima.T[1][idx])
dy_values.append(dy)
#breaks us out of the "for x in range" loop
break
# If the for loop (for x in range ...) isn't terminated by a break statement
# I.E. we didn't get a match
else:
"move onto next peak in new_maxima"
continue
# If the for loop does get terminated by the break statement
# I.E. we get a match
"""compare next peak in og_maxima, IE break the new_maxima loop and move onto
next in the original maxima list"""
break
# Calculate the absolute value of the difference in number of peaks
# between the two data files
different_no_peaks = abs(len(new_maxima) - len(og_maxima))
return [dx_values, dy_values, number_of_matches, different_no_peaks] | 86fe2ffd02785d41284b8edfef44d0dc0e097c90 | 709,206 |
import argparse
def command_line():
"""Generate an Argument Parser object to control the command line options
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-w", "--webdir", dest="webdir",
help="make page and plots in DIR", metavar="DIR",
default=None)
parser.add_argument("-s", "--samples", dest="samples",
help="Posterior samples hdf5 file", nargs='+',
default=None)
parser.add_argument("--labels", dest="labels",
help="labels used to distinguish runs", nargs='+',
default=None)
parser.add_argument("--prior", dest="prior",
choices=["population", "default", "both"],
default="both",
help=("Prior to use when calculating source "
"classification probabilities"))
parser.add_argument("--plot", dest="plot",
help="name of the plot you wish to make",
default="bar", choices=["bar", "mass_1_mass_2"])
return parser | f1463f291cc99acc66cb1fb46d1be0a7ef60e9ca | 709,207 |
import re
def strip_price(header_list):
"""input a list of tag-type values and return list of strings with surrounding html characters removed"""
match_obs = []
regex = '\$(((\d+).\d+)|(\d+))'
string_list = []#['' for item in range(len(header_list))]
for item in range(len(header_list)):
match_obs.append(re.search(regex, str(header_list[item])))
for i in range(len(match_obs)):
#print(match_obs[i])
string_list.append(match_obs[i].group(1))
#print(string_list)
return string_list | 7b3d90416e44f8aa61ababc0e7b68f82ae754413 | 709,208 |
def minutes_to_restarttime(minutes) :
"""
converts an int meaning Minutes after midnight into a
restartTime string understood by the bos command
"""
if minutes == -1 :
return "never"
pod = "am"
if minutes > 12*60 :
pod = "pm"
minutes -= 12*60
time = "%d:%02d %s" % (minutes / 60, minutes % 60, pod)
return time | 6d7807cebb7a474553dda8eadfd27e5ce7b2a657 | 709,209 |
import re
def Substitute_Percent(sentence):
"""
Substitutes percents with special token
"""
sentence = re.sub(r'''(?<![^\s"'[(])[+-]?[.,;]?(\d+[.,;']?)+%(?![^\s.,;!?'")\]])''',
' @percent@ ', sentence)
return sentence | 61bc6970af09703ef018bfcc9378393241ae21ed | 709,210 |
def replace_cipd_revision(file_path, old_revision, new_revision):
"""Replaces cipd revision strings in file.
Args:
file_path: Path to file.
old_revision: Old cipd revision to be replaced.
new_revision: New cipd revision to use as replacement.
Returns:
Number of replaced occurrences.
Raises:
IOError: If no occurrences were found.
"""
with open(file_path) as f:
contents = f.read()
num = contents.count(old_revision)
if not num:
raise IOError('Did not find old CIPD revision {} in {}'.format(
old_revision, file_path))
newcontents = contents.replace(old_revision, new_revision)
with open(file_path, 'w') as f:
f.write(newcontents)
return num | f429e74f0dd7180ab4bf90d662f8042b958b81f8 | 709,211 |
import getpass
def espa_login() -> str:
"""
Get ESPA password using command-line input
:return:
"""
return getpass.getpass("Enter ESPA password: ") | 3ba61567d23ba3771effd6f0aa1a4ac504467378 | 709,212 |
def escape_cdata(cdata):
"""Escape a string for an XML CDATA section"""
return cdata.replace(']]>', ']]>]]><![CDATA[') | c38b934b4c357e8c15fd1f3942f84ca3aaab4ee1 | 709,213 |
def _strip_unbalanced_punctuation(text, is_open_char, is_close_char):
"""Remove unbalanced punctuation (e.g parentheses or quotes) from text.
Removes each opening punctuation character for which it can't find
corresponding closing character, and vice versa.
It can only handle one type of punctuation
(e.g. it could strip quotes or parentheses but not both).
It takes functions (is_open_char, is_close_char),
instead of the characters themselves,
so that we can determine from nearby characters whether a straight quote is
an opening or closing quote.
Args:
text (string): the text to fix
is_open_char: a function that accepts the text and an index,
and returns true if the character at that index is
an opening punctuation mark.
is_close_char: same as is_open_char for closing punctuation mark.
Returns:
The text with unmatched punctuation removed.
"""
# lists of unmatched opening and closing chararacters
opening_chars = []
unmatched_closing_chars = []
for idx, c in enumerate(text):
if is_open_char(text, idx):
opening_chars.append(idx)
elif is_close_char(text, idx):
if opening_chars:
# this matches a character we found earlier
opening_chars.pop()
else:
# this doesn't match any opening character
unmatched_closing_chars.append(idx)
char_indices = [i for (i, _) in enumerate(text)
if not(i in opening_chars or i in unmatched_closing_chars)]
stripped_text = "".join([text[i] for i in char_indices])
return stripped_text | db4b8f201e7b01922e6c06086594a8b73677e2a2 | 709,214 |
def get_min_max_value(dfg):
"""
Gets min and max value assigned to edges
in DFG graph
Parameters
-----------
dfg
Directly follows graph
Returns
-----------
min_value
Minimum value in directly follows graph
max_value
Maximum value in directly follows graph
"""
min_value = 9999999999
max_value = -1
for edge in dfg:
if dfg[edge] < min_value:
min_value = dfg[edge]
if dfg[edge] > max_value:
max_value = dfg[edge]
return min_value, max_value | 17a98350f4e13ec51e72d4357e142ad661e57f54 | 709,215 |
import time
def fmt_time(timestamp):
"""Return ISO formatted time from seconds from epoch."""
if timestamp:
return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(timestamp))
else:
return '-' | c87f1da7b6a3b1b8d8daf7d85a2b0746be58133b | 709,216 |
from typing import Tuple
def break_word_by_trailing_integer(pname_fid: str) -> Tuple[str, str]:
"""
Splits a word that has a value that is an integer
Parameters
----------
pname_fid : str
the DVPRELx term (e.g., A(11), NSM(5))
Returns
-------
word : str
the value not in parentheses
value : int
the value in parentheses
Examples
--------
>>> break_word_by_trailing_integer('T11')
('T', '11')
>>> break_word_by_trailing_integer('THETA11')
('THETA', '11')
"""
nums = []
i = 0
for i, letter in enumerate(reversed(pname_fid)):
if letter.isdigit():
nums.append(letter)
else:
break
num = ''.join(nums[::-1])
if not num:
msg = ("pname_fid=%r does not follow the form 'T1', 'T11', 'THETA42' "
"(letters and a number)" % pname_fid)
raise SyntaxError(msg)
word = pname_fid[:-i]
assert len(word)+len(num) == len(pname_fid), 'word=%r num=%r pname_fid=%r' % (word, num, pname_fid)
return word, num | e9b9c85b4225269c94918ce1cc2e746d3c74aa5c | 709,217 |
def get_data_shape(X_train, X_test, X_val=None):
"""
Creates, updates and returns data_dict containing metadata of the dataset
"""
# Creates data_dict
data_dict = {}
# Updates data_dict with lenght of training, test, validation sets
train_len = len(X_train)
test_len = len(X_test)
data_dict.update({'train_len': train_len, 'test_len': test_len})
if X_val is not None:
val_len = len(X_val)
data_dict.update({'val_len': val_len})
# else : val_len = None
# Updates number of dimensions of data
no_of_dim = X_train.ndim
data_dict.update({'no_of_dim': no_of_dim})
# Updates number of features(, number of channels, width, height)
if no_of_dim == 2:
no_of_features = X_train.shape[1]
data_dict.update({'no_of_features': no_of_features})
elif no_of_dim == 3:
channels = X_train.shape[1]
features_per_c = X_train.shape[2]
no_of_features = channels * features_per_c
data_dict.update({'no_of_features': no_of_features,
'channels': channels,
'features_per_c': features_per_c})
elif no_of_dim == 4:
channels = X_train.shape[1]
height = X_train.shape[2]
width = X_train.shape[3]
features_per_c = height*width
no_of_features = channels*features_per_c
data_dict.update({'height':height, 'width':width, 'channels':channels,
'features_per_c':features_per_c,
'no_of_features':no_of_features})
return data_dict | 231a334b625d0bfe6aa6e63b79de2b2226b8e684 | 709,218 |
def _get_unique_barcode_ids(pb_index, isoseq_mode=False):
"""
Get a list of sorted, unique fw/rev barcode indices from an index object.
"""
bc_sel = (pb_index.bcForward != -1) & (pb_index.bcReverse != -1)
bcFw = pb_index.bcForward[bc_sel]
bcRev = pb_index.bcReverse[bc_sel]
bc_ids = sorted(list(set(zip(bcFw, bcRev))))
if isoseq_mode:
bc_ids = sorted(list(set([tuple(sorted(bc)) for bc in bc_ids])))
return bc_ids | bdfb386d26415a7b3f9f16661d83a38a63958ad0 | 709,219 |
import time
def local_timezone():
"""
Returns:
(str): Name of current local timezone
"""
try:
return time.tzname[0]
except (IndexError, TypeError):
return "" | c97c11582b27d8aa0205555535616d6ea11775b9 | 709,220 |
import getpass
def ask_credentials():
"""Interactive function asking the user for ASF credentials
:return: tuple of username and password
:rtype: tuple
"""
# SciHub account details (will be asked by execution)
print(
" If you do not have a ASF/NASA Earthdata user account"
" go to: https://search.asf.alaska.edu/ and register"
)
uname = input(" Your ASF/NASA Earthdata Username:")
pword = getpass.getpass(" Your ASF/NASA Earthdata Password:")
return uname, pword | a601a460b3aeddf9939f3acf267e58fdaf9ed7cd | 709,221 |
def rule_valid_histone_target(attr):
""" {
"applies" : ["ChIP-Seq", "experiment_target_histone"],
"description" : "'experiment_target_histone' attributes must be 'NA' only for ChIP-Seq Input"
} """
histone = attr.get('experiment_target_histone', [''])[0]
if attr.get('experiment_type', [""])[0].lower() in ['ChIP-Seq Input'.lower()]:
return histone == 'NA'
else:
return histone != 'NA' | 0a10f09c6b9e50cf01583d0c803e5112629e503b | 709,222 |
def split_exclude_string(people):
"""
Function to split a given text of persons' name who wants to exclude
with comma separated for each name e.g. ``Konrad, Titipat``
"""
people = people.replace('Mentor: ', '').replace('Lab-mates: ', '').replace('\r\n', ',').replace(';', ',')
people_list = people.split(',')
return [p.strip() for p in people_list if p.strip() is not ''] | 5748a52039548175923f53384474f40ac8fb5e38 | 709,223 |
def peek_with_kwargs(init, args=[], permissive=False):
"""
Make datatypes passing keyworded arguments to the constructor.
This is a factory function; returns the actual `peek` routine.
Arguments:
init (callable): type constructor.
args (iterable): arguments NOT to be keyworded; order does matter.
permissive (bool): missing positional arguments are set to None (*new in 0.8.5*).
Returns:
callable: deserializer (`peek` routine).
All the peeked attributes that are not referenced in `args` are passed to `init` as
keyworded arguments.
"""
if permissive:
def try_peek(store, attr, container, _stack=None):
try:
return store.peek(attr, container, _stack=_stack)
except KeyError:
return None
def peek(store, container, _stack=None):
return init(\
*[ try_peek(store, attr, container, _stack) for attr in args ], \
**dict([ (attr, store.peek(attr, container, _stack=_stack)) \
for attr in container if attr not in args ]))
else:
def peek(store, container, _stack=None):
return init(\
*[ store.peek(attr, container, _stack=_stack) for attr in args ], \
**dict([ (attr, store.peek(attr, container, _stack=_stack)) \
for attr in container if attr not in args ]))
return peek | d06df21ab439da1cacb52befa6c619f1efa23d1a | 709,224 |
def initialise_halo_params():
"""Initialise the basic parameters needed to simulate a forming Dark matter halo.
Args:
None
Returns:
G: gravitational constant.
epsilon: softening parameter.
limit: width of the simulated universe.
radius: simulated radius of each particle
(for proper handling of boundary conditions).
num_pos_particles: number of positive mass particles.
num_neg_particles: number of negative mass particles.
chunks_value: dask chunks value.
time_steps: number of time steps to simulate.
"""
G = 1.0
epsilon = 0.07
limit = 80000
radius = 4
num_pos_particles = 5000
num_neg_particles = 45000
chunks_value = (num_pos_particles+num_neg_particles)/5.0
time_steps = 1000
return G, epsilon, limit, radius, num_pos_particles, num_neg_particles, chunks_value, time_steps | ee3311fd17a40e8658f11d2ddf98d0ff8eb27a6d | 709,226 |
def argMax(scores):
"""
Returns the key with the highest value.
"""
if len(scores) == 0: return None
all = scores.items()
values = [x[1] for x in all]
maxIndex = values.index(max(values))
return all[maxIndex][0] | 9310988a0f8aa1279882d060ade7febdc102b0c5 | 709,227 |
def get_ratio(numerator, denominator):
"""Get ratio from numerator and denominator."""
return (
0 if not denominator else round(float(numerator or 0) / float(denominator), 2)
) | e51a860292d54d2e44909ad878d0b1d8e66c37c2 | 709,228 |
def irrf(valor=0):
"""
-> Função para cálcular o valor do IRRF.
:param valor: Valor base do salário para cálculo do IRRF.
:return: Retorna o valor do IRRF e alíquota utilizada.
"""
irrf = []
if valor < 1903.99:
irrf.append(0)
irrf.append(0)
elif valor >= 1903.99 and valor <= 2826.65:
irrf.append((valor * 7.5) / 100 - 142.80) # Alíquota de 7.5%, menos parcela de dedução.
irrf.append('7,5')
elif valor >= 2826.66 and valor <= 3751.05:
irrf.append((valor * 15) / 100 - 354.80) # Alíquota de 15%, menos parcela de dedução.
irrf.append('15')
elif valor >= 3751.06 and valor <= 4664.68:
irrf.append((valor * 22.5) / 100 - 636.13) # Alíquota de 22.5%, menos parcela de dedução.
irrf.append('22,5')
elif valor > 4664.68:
irrf.append((valor * 27.5) / 100 - 869.36) # Alíquota de 27.5%, menos parcela de dedução.
irrf.append('27,5')
return irrf | 53646b770b2c2359e1e8c4f725b27396cc972050 | 709,229 |
import os
def add_absname(file):
"""Prefix a file name with the working directory."""
work_dir = os.path.dirname(__file__)
return os.path.join(work_dir, file) | 34d78ff980cbe16ace897cf164563badc9d36d2a | 709,230 |
def dataset_labels(alldata, tag=None):
""" Return label for axis of dataset
Args:
ds (DataSet): dataset
tag (str): can be 'x', 'y' or 'z'
"""
if tag == 'x':
d = alldata.default_parameter_array()
return d.set_arrays[0].label
if tag == 'y':
d = alldata.default_parameter_array()
return d.set_arrays[1].label
if tag is None or tag == 'z':
d = alldata.default_parameter_array()
return d.label
return '?' | 4ccd3af38d3f18e9fbf43e98f8a898426c6c1440 | 709,231 |
def max_sequence(arr):
"""
The maximum sum subarray problem consists in finding the maximum sum of a contiguous subsequence in an array or
list of integers.
:param arr: an array or list of integers.
:return: the maximum value found within the subarray.
"""
best = 0
for x in range(len(arr)):
for y in range(len(arr)):
if sum(arr[x:y+1]) > best:
best = sum(arr[x:y+1])
return best | 3ae6dafb4879476ba6e15610645f26299a4c6719 | 709,232 |
def get_reddit_slug(permalink):
"""
Get the reddit slug from a submission permalink, with '_' replaced by '-'
Args:
permalink (str): reddit submission permalink
Returns:
str: the reddit slug for a submission
"""
return list(filter(None, permalink.split("/")))[-1].replace("_", "-") | 587239a0b7bbd88e10d49985dd6ebfd3768038d8 | 709,233 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.