content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import struct
def pack4(v):
"""
Takes a 32 bit integer and returns a 4 byte string representing the
number in little endian.
"""
assert 0 <= v <= 0xffffffff
# The < is for little endian, the I is for a 4 byte unsigned int.
# See https://docs.python.org/2/library/struct.html for more info.
return struct.pack('<I', v) | bbaeb0026624a7ec30ec379466ef11398f93d573 | 708,595 |
def _merge_sse(sum1, sum2):
"""Merge the partial SSE."""
sum_count = sum1 + sum2
return sum_count | 0aae96262cfb56c6052fdbe5bbd92437d37b1f76 | 708,598 |
def fix_units(dims):
"""Fill in missing units."""
default = [d.get("units") for d in dims][-1]
for dim in dims:
dim["units"] = dim.get("units", default)
return dims | d3a47ad84e1b4e44bedebb1e5739778df975a6fe | 708,602 |
def partCmp(verA: str, verB: str) -> int:
"""Compare parts of a semver.
Args:
verA (str): lhs part to compare
verB (str): rhs part to compare
Returns:
int: 0 if equal, 1 if verA > verB and -1 if verA < verB
"""
if verA == verB or verA == "*" or verB == "*":
return 0
if int(verA) > int(verB):
return 1
return -1 | d9417ce482bf0c2332175412ba3125435f884336 | 708,607 |
def second_order_difference(t, y):
""" Calculate the second order difference.
Args:
t: ndarray, the list of the three independent variables
y: ndarray, three values of the function at every t
Returns:
double: the second order difference of given points
"""
# claculate the first order difference
first_order_difference = (y[1:] - y[:-1]) / (t[1:] - t[:-1])
return (first_order_difference[1] - first_order_difference[0]) / (t[2] - t[0]) | 40e37d2b34104772966afc34e41c1ebc742c9adf | 708,612 |
import requests
def download(url, local_filename, chunk_size=1024 * 10):
"""Download `url` into `local_filename'.
:param url: The URL to download from.
:type url: str
:param local_filename: The local filename to save into.
:type local_filename: str
:param chunk_size: The size to download chunks in bytes (10Kb by default).
:type chunk_size: int
:rtype: str
:returns: The path saved to.
"""
response = requests.get(url)
with open(local_filename, 'wb') as fp:
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk:
fp.write(chunk)
return fp.name | 0a86b8600e72e349a4e1344d2ce1ad2bb00b889d | 708,614 |
def sum_2_level_dict(two_level_dict):
"""Sum all entries in a two level dict
Parameters
----------
two_level_dict : dict
Nested dict
Returns
-------
tot_sum : float
Number of all entries in nested dict
"""
'''tot_sum = 0
for i in two_level_dict:
for j in two_level_dict[i]:
tot_sum += two_level_dict[i][j]
'''
tot_sum = 0
for _, j in two_level_dict.items():
tot_sum += sum(j.values())
return tot_sum | 6b5be015fb84fa20006c11e9a3e0f094a6761e74 | 708,615 |
import re
def check_ip(ip):
"""
Check whether the IP is valid or not.
Args:
IP (str): IP to check
Raises:
None
Returns:
bool: True if valid, else False
"""
ip = ip.strip()
if re.match(r'^(?:(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])'
'(\.(?!$)|$)){4}$', ip):
return True
else:
return False | 2ff9a9262e46546fcb8854edee4b3b18ae1e2cc4 | 708,617 |
from typing import Iterator
from typing import Optional
def _stream_lines(blob: bytes) -> Iterator[bytes]:
"""
Split bytes into lines (newline (\\n) character) on demand.
>>> iter = _stream_lines(b"foo\\nbar\\n")
>>> next(iter)
b'foo'
>>> next(iter)
b'bar'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
>>> iter = _stream_lines(b"\\x00")
>>> next(iter)
b'\\x00'
:param blob: the bytes to split.
:return: a generated list of lines.
"""
start = 0
def _index(needle: bytes) -> Optional[int]:
try:
return blob.index(needle, start)
except ValueError:
return None
line_index = _index(b"\n")
while line_index is not None:
yield blob[start:line_index]
start = line_index + 1
line_index = _index(b"\n")
# Deal with blobs that do not end in a newline.
if start < len(blob):
yield blob[start:] | 8a166af1f765ca9eb70728d4c4bb21c00d7ddbf8 | 708,618 |
def chao1_var_no_doubletons(singles, chao1):
"""Calculates chao1 variance in absence of doubletons.
From EstimateS manual, equation 7.
chao1 is the estimate of the mean of Chao1 from the same dataset.
"""
s = float(singles)
return s*(s-1)/2 + s*(2*s-1)**2/4 - s**4/(4*chao1) | 6b93743a35c70c9ed5b9f3fc9bece1e9363c5802 | 708,619 |
def inBarrel(chain, index):
"""
Establish if the outer hit of a muon is in the barrel region.
"""
if abs(chain.muon_outerPositionz[index]) < 108:
return True | 9cbc5dad868d6e0ca221524ef8fc5ed5501adaa4 | 708,620 |
def molefraction_2_pptv(n):
"""Convert mixing ratio units from mole fraction to parts per
thousand by volume (pptv)
INPUTS
n: mole fraction (moles per mole air)
OUTPUTS
q: mixing ratio in parts per trillion by volume (pptv)
"""
# - start with COS mixing ratio n as mole fraction:
# (n mol COS) / (mol air)
# convert to mixing ratio as volumetric fraction
# = (n * 6.023 * 10^23 molecules COS) / (6.023 * 10^23 molecules air)
# = (q molecules COS) / (1000 molecules air)
# q is mixing ratio in pptv, n is mole fraction
# solve for q --> 1000n = q
# therefore pptv = 1000 * mole fraction
q = 1e3 * n
return(q) | a6a26267f45fb70c346e86421c427bd155bfa65a | 708,622 |
def convert_to_number(string):
"""
Tries to cast input into an integer number, returning the
number if successful and returning False otherwise.
"""
try:
number = int(string)
return number
except:
return False | 30110377077357d3e7d45cac4c106f5dc9349edd | 708,626 |
from typing import List
def mean(nums: List) -> float:
"""
Find mean of a list of numbers.
Wiki: https://en.wikipedia.org/wiki/Mean
>>> mean([3, 6, 9, 12, 15, 18, 21])
12.0
>>> mean([5, 10, 15, 20, 25, 30, 35])
20.0
>>> mean([1, 2, 3, 4, 5, 6, 7, 8])
4.5
>>> mean([])
Traceback (most recent call last):
...
ValueError: List is empty
"""
if not nums:
raise ValueError("List is empty")
return sum(nums) / len(nums) | 3c802b4967f646b6338e52b4ce12977274054c15 | 708,627 |
def is_gzip(name):
"""Return True if the name indicates that the file is compressed with
gzip."""
return name.endswith(".gz") | a6ea06f04808a07c4b26338f87273986eda86ef1 | 708,629 |
def upcoming_movie_name(soup):
"""
Extracts the list of movies from BeautifulSoup object.
:param soup: BeautifulSoup object containing the html content.
:return: list of movie names
"""
movie_names = []
movie_name_tag = soup.find_all('h4')
for _movie in movie_name_tag:
_movie_result = _movie.find_all('a')
try:
_movie_name = _movie_result[0]['title']
movie_names.append(_movie_name)
except KeyError as e:
continue
return movie_names | 6bac06375109ec103492a079746e2c0364bfac17 | 708,633 |
def learning_rate_decay(alpha, decay_rate, global_step, decay_step):
"""learning_rate_decay: updates the learning rate using
inverse time decay in numpy
Args:
alpha : is the original learning rate
decay_rate : is the weight used to determine the
rate at which alpha will decay
global_step : is the number of passes of gradient
descent that have elapsed
decay_step : is the number of passes of gradient descent
that should occur before alpha is decayed further
Returns:
the updated value for alpha
"""
alpha = alpha / (1 + decay_rate * int(global_step / decay_step))
return alpha | a98f893acc7f14dafcf2dea551df4eb44da07bc4 | 708,634 |
import hashlib
def get_hash(x: str):
"""Generate a hash from a string."""
h = hashlib.md5(x.encode())
return h.hexdigest() | 538c936c29867bb934776333fb2dcc73c06e23d0 | 708,636 |
def is_anagram(s,t):
"""True if strings s and t are anagrams.
"""
# We can use sorted() on a string, which will give a list of characters
# == will then compare two lists of characters, now sorted.
return sorted(s)==sorted(t) | 2b615f8180bcaa598e24c0772893c9a528bc5153 | 708,637 |
def _organize_parameter(parameter):
"""
Convert operation parameter message to its dict format.
Args:
parameter (OperationParameter): Operation parameter message.
Returns:
dict, operation parameter.
"""
parameter_result = dict()
parameter_keys = [
'mapStr',
'mapBool',
'mapInt',
'mapDouble',
]
for parameter_key in parameter_keys:
base_attr = getattr(parameter, parameter_key)
parameter_value = dict(base_attr)
# convert str 'None' to None
for key, value in parameter_value.items():
if value == 'None':
parameter_value[key] = None
parameter_result.update(parameter_value)
# drop `mapStrList` and `strValue` keys in result parameter
str_list_para = dict(getattr(parameter, 'mapStrList'))
result_str_list_para = dict()
for key, value in str_list_para.items():
str_list_para_list = list()
for str_ele in getattr(value, 'strValue'):
str_list_para_list.append(str_ele)
str_list_para_list = list(map(lambda x: None if x == '' else x, str_list_para_list))
result_str_list_para[key] = str_list_para_list
parameter_result.update(result_str_list_para)
return parameter_result | 8cbd7c863bb244e71266a573ba756647d0ba13ea | 708,639 |
def return_intersect(cameraList):
"""
Calculates the intersection of the Camera objects in the *cameraList*.
Function returns an empty Camera if there exists no intersection.
Parameters:
cameraList : *list* of *camera.Camera* objects
A list of cameras from the camera.Camera class, each containing
a *poly* and a *coordsList*.
Returns:
intersectCam : *camera.Camera* object
An object from the camera.Camera class that is the
intersection between all cameras in the cameraList. If there
exists no intersection between any cameras in the camerList,
an empty Camera will be returned.
"""
intersectCam = None
for camera in cameraList:
if intersectCam is None: # Initiates the intersectCam variable
intersectCam = camera
else:
intersectCam = intersectCam.intersect(camera)
return intersectCam | a47613b8d79c4a4535cd5e7e07aa3b26dea019a5 | 708,643 |
import struct
def readShort(f):
"""Read 2 bytes as BE integer in file f"""
read_bytes = f.read(2)
return struct.unpack(">h", read_bytes)[0] | 1b31c2285d055df3c128e8158dcc67eb6c0a2b18 | 708,644 |
import csv
def load_data(filename):
"""
Load shopping data from a CSV file `filename` and convert into a list of
evidence lists and a list of labels. Return a tuple (evidence, labels).
evidence should be a list of lists, where each list contains the
following values, in order:
- Administrative, an integer
- Administrative_Duration, a floating point number
- Informational, an integer
- Informational_Duration, a floating point number
- ProductRelated, an integer
- ProductRelated_Duration, a floating point number
- BounceRates, a floating point number
- ExitRates, a floating point number
- PageValues, a floating point number
- SpecialDay, a floating point number
- Month, an index from 0 (January) to 11 (December)
- OperatingSystems, an integer
- Browser, an integer
- Region, an integer
- TrafficType, an integer
- VisitorType, an integer 0 (not returning) or 1 (returning)
- Weekend, an integer 0 (if false) or 1 (if true)
labels should be the corresponding list of labels, where each label
is 1 if Revenue is true, and 0 otherwise.
"""
with open("shopping.csv") as f:
reader = csv.reader(f)
next(reader)
months = ["Jan", "Feb", "Mar", "Apr", "May", "June",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
data = []
for row in reader:
data.append({
"evidence": [int(row[0]), float(row[1]), int(row[2]), float(row[3]), int(row[4]), float(row[5]), float(row[6]), float(row[7]), float(row[8]), float(row[9]),
months.index(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), 0 if row[15] == "New_Visitor" else 1, 0 if row[16] == "FALSE" else 1],
"label": 0 if row[17] == "FALSE" else 1
})
evidence = [row["evidence"] for row in data]
labels = [row["label"] for row in data]
return (evidence, labels) | eb2465d0ebfb7398a3742d8fb79463d3d7b076f0 | 708,647 |
import time
def timeit(func):
"""
Decorator that returns the total runtime of a function
@param func: function to be timed
@return: (func, time_taken). Time is in seconds
"""
def wrapper(*args, **kwargs) -> float:
start = time.time()
func(*args, **kwargs)
total_time = time.time() - start
return total_time
return wrapper | 68723a74c96c2d004eed9533f9023d77833c509b | 708,654 |
def merge_count(data1, data2):
"""Auxiliary method to merge the lengths."""
return data1 + data2 | 8c0280b043b7d21a411ac14d3571acc50327fdbc | 708,655 |
def B(s):
"""string to byte-string in
Python 2 (including old versions that don't support b"")
and Python 3"""
if type(s)==type(u""): return s.encode('utf-8') # Python 3
return s | b741bf4a64bd866283ca789745f373db360f4016 | 708,656 |
def get_receiver_type(rinex_fname):
"""
Return the receiver type (header line REC # / TYPE / VERS) found
in *rinex_fname*.
"""
with open(rinex_fname) as fid:
for line in fid:
if line.rstrip().endswith('END OF HEADER'):
break
elif line.rstrip().endswith('REC # / TYPE / VERS'):
return line[20:40].strip()
raise ValueError('receiver type not found in header of RINEX file '
'{}'.format(rinex_fname)) | 7391f7a100455b8ff5ab01790f62518a3c4a079b | 708,658 |
def buildHeaderString(keys):
"""
Use authentication keys to build a literal header string that will be
passed to the API with every call.
"""
headers = {
# Request headers
'participant-key': keys["participantKey"],
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': keys["subscriptionKey"]
}
return headers | 4505fb679dec9727a62dd328f92f832ab45c417b | 708,659 |
def classname(obj):
"""Returns the name of an objects class"""
return obj.__class__.__name__ | 15b03c9ce341bd151187f03e8e95e6299e4756c3 | 708,660 |
def getbias(x, bias):
"""Bias in Ken Perlin’s bias and gain functions."""
return x / ((1.0 / bias - 2.0) * (1.0 - x) + 1.0 + 1e-6) | 0bc551e660e133e0416f5e426e5c7c302ac3fbbe | 708,661 |
from typing import Dict
from typing import Optional
def get_exif_flash_fired(exif_data: Dict) -> Optional[bool]:
"""
Parses the "flash" value from exif do determine if it was fired.
Possible values:
+-------------------------------------------------------+------+----------+-------+
| Status | Hex | Binary | Fired |
+-------------------------------------------------------+------+----------+-------+
| No Flash | 0x0 | 00000000 | No |
| Fired | 0x1 | 00000001 | Yes |
| "Fired, Return not detected" | 0x5 | 00000101 | Yes |
| "Fired, Return detected" | 0x7 | 00000111 | Yes |
| "On, Did not fire" | 0x8 | 00001000 | No |
| "On, Fired" | 0x9 | 00001001 | Yes |
| "On, Return not detected" | 0xd | 00001011 | Yes |
| "On, Return detected" | 0xf | 00001111 | Yes |
| "Off, Did not fire" | 0x10 | 00010000 | No |
| "Off, Did not fire, Return not detected" | 0x14 | 00010100 | No |
| "Auto, Did not fire" | 0x18 | 00011000 | No |
| "Auto, Fired" | 0x19 | 00011001 | Yes |
| "Auto, Fired, Return not detected" | 0x1d | 00011101 | Yes |
| "Auto, Fired, Return detected" | 0x1f | 00011111 | Yes |
| No flash function | 0x20 | 00100000 | No |
| "Off, No flash function" | 0x30 | 00110000 | No |
| "Fired, Red-eye reduction" | 0x41 | 01000001 | Yes |
| "Fired, Red-eye reduction, Return not detected" | 0x45 | 01000101 | Yes |
| "Fired, Red-eye reduction, Return detected" | 0x47 | 01000111 | Yes |
| "On, Red-eye reduction" | 0x49 | 01001001 | Yes |
| "On, Red-eye reduction, Return not detected" | 0x4d | 01001101 | Yes |
| "On, Red-eye reduction, Return detected" | 0x4f | 01001111 | Yes |
| "Off, Red-eye reduction" | 0x50 | 01010000 | No |
| "Auto, Did not fire, Red-eye reduction" | 0x58 | 01011000 | No |
| "Auto, Fired, Red-eye reduction" | 0x59 | 01011001 | Yes |
| "Auto, Fired, Red-eye reduction, Return not detected" | 0x5d | 01011101 | Yes |
| "Auto, Fired, Red-eye reduction, Return detected" | 0x5f | 01011111 | Yes |
+-------------------------------------------------------+------+----------+-------+
:param exif_data:
:return: If the flash was fired, or None if the exif information is not present
"""
if 'Flash' not in exif_data:
return None
return bool((int(exif_data['Flash']) & 1) > 0) | 82b4fc095d60426622202243f141614b9632340f | 708,662 |
def merge_named_payload(name_to_merge_op):
"""Merging dictionary payload by key.
name_to_merge_op is a dict mapping from field names to merge_ops.
Example:
If name_to_merge_op is
{
'f1': mergeop1,
'f2': mergeop2,
'f3': mergeop3
},
Then two payloads { 'f1': a1, 'f2': b1, 'f3': c1 } and
{ 'f1': a2, 'f2': b2, 'f3': c2 } will be merged into
{
'f1': mergeop1(a1, a2),
'f2': mergeop2(b1, b2),
'f3': mergeop3(c1, c2)
}.
"""
def merge(p1,p2):
p = {}
for name, op in name_to_merge_op.items():
p[name] = op(p1[name], p2[name])
return p
return merge | ee20147b7937dff208da6ea0d025fe466d8e92ed | 708,665 |
def model(p, x):
""" Evaluate the model given an X array """
return p[0] + p[1]*x + p[2]*x**2. + p[3]*x**3. | fe923f6f6aea907d3dc07756813ed848fbcc2ac6 | 708,668 |
def TourType_LB_rule(M, t):
"""
Lower bound on tour type
:param M: Model
:param t: tour type
:return: Constraint rule
"""
return sum(M.TourType[i, t] for (i, s) in M.okTourType if s == t) >= M.tt_lb[t] | 0495e2d01c7d5d02e8bc85374ec1d05a8fdcbd91 | 708,673 |
def pick_ind(x, minmax):
""" Return indices between minmax[0] and minmax[1].
Args:
x : Input vector
minmax : Minimum and maximum values
Returns:
indices
"""
return (x >= minmax[0]) & (x <= minmax[1]) | 915a1003589b880d4edf5771a23518d2d4224094 | 708,674 |
def word_flipper(our_string):
"""
Flip the individual words in a sentence
Args:
our_string(string): Strings to have individual words flip
Returns:
string: String with words flipped
"""
word_list = our_string.split(" ")
for idx in range(len(word_list)):
word_list[idx] = word_list[idx][::-1] # [index1:index2:step]
return " ".join(word_list) | fd484079407342925fc13583fb1fbee9ee472b14 | 708,675 |
import six
from typing import Any
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind | 094298763f9bf1e3e7a421c19e08016f2138b7d7 | 708,682 |
def parse_custom_commands(command, separator=";"):
"""Parse run custom command string into the commands list
:param str command: run custom [config] command(s)
:param str separator: commands separator in the string
:rtype: list[str]
"""
if not command:
return []
return command.strip(separator).split(separator) | 4d55ef149aa16e224f5894fb0ef506a1bd8285f3 | 708,685 |
def lower_volatility_band(c, dev_target, band_target, center_target):
"""
| Calculates the lower volatility band
| Name: lower\_volatility\_band\_\ **c**\ \_times\_\ **band_target.name**\ &\ **dev_target.name**\ \_over\_\ **center_target.name**
:param c: Multiplier constant
:type c: float
:param dev_target: Used for band displacement. Can be a constant or a function
:type dev_target: function or float
:param band_target: Used for band displacement. Can be a constant or a function
:type band_target: function or float
:param center_target: Data column for the band center
:type center_target: str
"""
def return_function(data):
if hasattr(band_target, "name") & hasattr(dev_target, "name"):
column_name = f"lower_volatility_band_{c}_times_{band_target.name}&{dev_target.name}_under_{center_target.name}"
elif hasattr(band_target, "name"):
column_name = f"lower_volatility_band_{c}_times_{band_target.name}&{dev_target}_under_{center_target.name}"
else:
column_name = f"lower_volatility_band_{c}_times_{band_target}&{dev_target}_under_{center_target.name}"
if column_name not in data.columns:
data[column_name] = center_target - c * dev_target * band_target
return data[column_name].copy()
return return_function | d910c1f9e14fa28b171dd16e937fa65c220839d7 | 708,686 |
def is_scalar(a) -> bool:
"""
Tests if a python object is a scalar (instead of an array)
Parameters
----------
a : object
Any object to be checked
Returns
-------
bool
Whether the input object is a scalar
"""
if isinstance(a, (list, tuple)):
return False
if hasattr(a, "__array__") and hasattr(a, "__len__"): # np.array(1) is scalar
return False
return True | 29206a7921da74257e6af66311c0bbfc4b576ac0 | 708,691 |
import json
def payload_from_api_post_event(event):
"""Maps an API event to the expected payload"""
# event = {
# 'timeserie1': [(1, 100), (2, 100)],
# 'timeserie2': [(3, 100), (4, 100)],
# }
body = json.loads(event['body'])
return body | 897a3d2e846e7bbf96d0acd288924d96b07acc78 | 708,694 |
def format_link_header(link_header_data):
"""Return a string ready to be used in a Link: header."""
links = ['<{0}>; rel="{1}"'.format(data['link'], data['rel'])
for data in link_header_data]
return ', '.join(links) | 9a68ff381d51e6e10fe257d2d2d6766295ffc050 | 708,695 |
import re
def prune_string(string):
"""Prune a string.
- Replace multiple consecutive spaces with a single space.
- Remove spaces after open brackets.
- Remove spaces before close brackets.
"""
return re.sub(
r" +(?=[\)\]\}])",
"",
re.sub(r"(?<=[\(\[\{]) +", "", re.sub(r" +", " ", string)),
) | 53a2c00f50c16b568a75e59bc32a124a5f152b4a | 708,696 |
def solve_capcha(capcha_str):
"""Function which calculates the solution to part 1
Arguments
---------
capcha_str : str, a string of numbers
Returns
-------
total : int, the sum of adjacent matches
"""
capcha = [int(cc) for cc in list(capcha_str)]
total = 0
for ii in range(len(capcha)):
if capcha[ii] == capcha[ii - 1]:
total += capcha[ii]
return total | 85a74f9b708f8134500d9c7add6e2df8617ec305 | 708,700 |
def benchmark_summary(benchmark_snapshot_df):
"""Creates summary table for a benchmark snapshot with columns:
|fuzzer|time||count|mean|std|min|25%|median|75%|max|
"""
groups = benchmark_snapshot_df.groupby(['fuzzer', 'time'])
summary = groups['edges_covered'].describe()
summary.rename(columns={'50%': 'median'}, inplace=True)
return summary.sort_values(('median'), ascending=False) | 5cdaa888adb47906659a249076c8a4acb27c6d1d | 708,702 |
import re
def normalize_spaces(s: str) -> str:
"""
連続する空白を1つのスペースに置き換え、前後の空白を削除した新しい文字列を取得する。
"""
return re.sub(r'\s+', ' ', s).strip() | aac95ed5b77b5c65f9ce16cfa685d80c56f0e66f | 708,704 |
def create_abstract_insert(table_name, row_json, return_field=None):
"""Create an abstracted raw insert psql statement for inserting a single
row of data
:param table_name: String of a table_name
:param row_json: dictionary of ingestion data
:param return_field: String of the column name to RETURNING in statement
:return: String of an insert statement
"""
columns = []
for key, value in row_json.items():
if key in columns:
continue
else:
columns.append(key)
values = [':' + item for item in columns]
values = ', '.join(map(str, values))
list_columns = ', '.join(map(str, columns))
if return_field is not None:
statement = 'INSERT INTO ' + str(table_name) + '(' + list_columns + ')' \
+ ' VALUES (' + values + ') RETURNING ' + str(return_field)
else:
statement = 'INSERT INTO ' + str(table_name) + '(' + list_columns + ')' \
+ ' VALUES (' + values + ')'
return statement | 8b0a960178a0162b7a0c339682541f0f13520d85 | 708,705 |
def convert(secs):
"""Takes a time in seconds and converts to min:sec:msec"""
mins = int(secs // 60)
secs %= 60
msecs = int(round(((secs - int(secs)) * 1000)))
secs = int(secs)
return f'{mins} mins, {secs} secs, {msecs} msecs' | 70752f190f94d3bdb4cb3b562b6bf9d1c7d28479 | 708,709 |
def import_class(path):
"""
Import a class from a dot-delimited module path. Accepts both dot and
colon seperators for the class portion of the path.
ex::
import_class('package.module.ClassName')
or
import_class('package.module:ClassName')
"""
if ':' in path:
module_path, class_name = path.split(':')
else:
module_path, class_name = path.rsplit('.', 1)
module = __import__(module_path, fromlist=[class_name], level=0)
return getattr(module, class_name) | dcdf71a3bb665dae1fe5913e19be3a4c0aa3c5d3 | 708,714 |
def num_range(num):
"""
Use in template language to loop through numberic range
"""
return range(num) | 7b66e4ffd264ea7b49850a9300c3a6c80282fce1 | 708,718 |
def filter_background(bbox, bg_data):
"""
Takes bounding box and background geojson file assumed to be the US states, and outputs a geojson-like dictionary
containing only those features with at least one point within the bounding box, or any state that completely
contains the bounding box.
This tests if a feature contains the bounding box by drawing the box that contains the feature and checking if that
box also contains the bounding box. Because features are odd shapes, this may find that more than one feature
completely contains the bounding box. E.g., if you draw a box around Maryland it will also contain a chunk of West
Virginia. To deal with this, we are allowed to find that multiple states contain the bounding box.
:param bbox: The coordinates of the bounding box as [lon, lat, lon, lat]
:param bg_data: a geojson-like dict describing the background
:return: the features from bg_filename whose borders intersect bbox OR the feature which completely contains bbox
"""
box_lon = [bbox[0], bbox[2]]
box_lat = [bbox[1], bbox[3]]
features = bg_data['features']
in_box = []
for f in features:
starting_len = len(in_box)
# Define points for bounding box around the feature.
feature_max_lat = -90
feature_max_lon = -180
feature_min_lat = 90
feature_min_lon = 180
coordinates = f['geometry']['coordinates']
for group in coordinates:
if len(in_box) > starting_len:
# This feature has already been added
break
# actual points for MultiPolygons are nested one layer deeper than those for polygons
if f['geometry']['type'] == 'MultiPolygon':
geom = group[0]
else:
geom = group
for lon, lat in geom:
# check if any point along the state's borders falls within the bounding box.
if min(box_lon) <= lon <= max(box_lon) and min(box_lat) <= lat <= max(box_lat):
in_box.append(f)
break
# If any point of a feature falls within the bounding box, then the feature cannot contain the box,
# so this only needs to be run if the above if statement is not executed
feature_min_lon = min(feature_min_lon, lon)
feature_min_lat = min(feature_min_lat, lat)
feature_max_lon = max(feature_max_lon, lon)
feature_max_lat = max(feature_max_lat, lat)
# If the box containing a feature also contains the bounding box, keep this feature
# Allow adding more than one because otherwise MD contains boxes in WV, and CA would contain most of NV.
if feature_min_lat < min(box_lat) and feature_max_lat > max(box_lat) and \
feature_min_lon < min(box_lon) and feature_max_lon > max(box_lon):
in_box.append(f)
keepers = {
'type': 'FeatureCollection',
'features': in_box
}
return keepers | f06fe5efe1e3920d8b1092601a121e313da4eec4 | 708,719 |
def rename_columns(table, mapper):
""" Renames the table headings to conform with the ketos naming convention.
Args:
table: pandas DataFrame
Annotation table.
mapper: dict
Dictionary mapping the headings of the input table to the
standard ketos headings.
Returns:
: pandas DataFrame
Table with new headings
"""
return table.rename(columns=mapper) | c9c9228f4f477b8d5ade234964c2540fd20ddd09 | 708,720 |
from typing import Union
import json
def parse_tuple(s: Union[str, tuple]) -> tuple:
"""Helper for load_detections_csv, to parse string column into column of Tuples."""
if isinstance(s, str):
result = s.replace("(", "[").replace(")", "]")
result = result.replace("'", '"').strip()
result = result.replace(",]", "]")
if result:
# print(result)
return tuple(sorted((json.loads(result))))
else:
return tuple()
else:
return s | ad568bfc8ccdf8440378e852daccaf2f24a7e2d0 | 708,721 |
def pair_sorter(aln):
"""Get the alignment name and attributes for sorting."""
return (
aln.name,
not aln.first_in_pair,
aln.unmapped,
aln.supplementary_alignment,
aln.secondary_alignment) | 217eac7c89a12f68f4c9fe324c4feb6c2a955d58 | 708,724 |
def is_private_bool(script_dict):
""" Returns is_private boolean value from user dictionary object """
return script_dict['entry_data']['ProfilePage'][0]['graphql']['user']['is_private'] | 1e8b30a38dc527dc5e2ea73e75c253d8f1a59550 | 708,726 |
def fromRGB(rgb):
"""Convert tuple or list to red, green and blue values that can be accessed as follows:
a = fromRGB((255, 255, 255))
a["red"]
a["green"]
a["blue"]
"""
return {"red":rgb[0], "green":rgb[1], "blue":rgb[2]} | 205a8f189d177e7af5cdc686e7c52fd2053a3c87 | 708,731 |
import math
def computeTelescopeTransmission(pars, offAxis):
"""
Compute tel. transmission (0 < T < 1) for a given set of parameters
as defined by the MC model and for a given off-axis angle.
Parameters
----------
pars: list of float
Parameters of the telescope transmission. Len(pars) should be 4.
offAxis: float
Off-axis angle in deg.
Returns
-------
float
Telescope transmission.
"""
_degToRad = math.pi / 180.0
if pars[1] == 0:
return pars[0]
else:
t = math.sin(offAxis * _degToRad) / (pars[3] * _degToRad)
return pars[0] / (1.0 + pars[2] * t ** pars[4]) | 50b2e2908726b8a77bc83a2821cf760b7475300b | 708,732 |
def guarantee_trailing_slash(directory_name: str) -> str:
"""Adds a trailling slash when missing
Params:
:directory_name: str, required
A directory name to add trailling slash if missing
Returns:
A post processed directory name with trailling slash
"""
if not directory_name.endswith('/'):
return directory_name + '/'
return directory_name | 38cfdf971262fceb4888277522b22ba7276fa9b7 | 708,733 |
def pretty_print_large_number(number):
"""Given a large number, it returns a string of the sort: '10.5 Thousand' or '12.3 Billion'. """
s = str(number).ljust(12)
if number > 0 and number < 1e3:
pass
elif number >= 1e3 and number < 1e6:
s = s + " (%3.1f Thousand)" % (number * 1.0 / 1e3)
elif number >= 1e6 and number < 1e9:
s = s + " (%3.1f Million)" % (number * 1.0 / 1e6)
elif number >= 1e9 and number < 1e12:
s = s + " (%3.1f Billion)" % (number * 1.0 / 1e9)
elif number >= 1e12 and number < 1e15:
s = s + " (%3.1f Trillion)" % (number * 1.0 / 1e12)
return s | 6762f34744da360b36d4a4fc0659fcf7d3fb0465 | 708,736 |
def get_defense_type(action: int, game_config) -> int:
"""
Utility method for getting the defense type of action-id
:param action: action-id
:param game_config: game configuration
:return: action type
"""
defense_type = action % (game_config.num_attack_types+1) # +1 for detection
return defense_type | 68a05cf15bd833fb24aa448b8be2d08c1a949d12 | 708,738 |
def plot3dOnFigure(ax, pixels, colors_rgb,axis_labels=list("RGB"), axis_limits=((0, 255), (0, 255), (0, 255))):
"""Plot pixels in 3D."""
# Set axis limits
ax.set_xlim(*axis_limits[0])
ax.set_ylim(*axis_limits[1])
ax.set_zlim(*axis_limits[2])
# Set axis labels and sizes
ax.tick_params(axis='both', which='major', labelsize=14, pad=8)
ax.set_xlabel(axis_labels[0], fontsize=16, labelpad=16)
ax.set_ylabel(axis_labels[1], fontsize=16, labelpad=16)
ax.set_zlabel(axis_labels[2], fontsize=16, labelpad=16)
# Plot pixel values with colors given in colors_rgb
ax.scatter(
pixels[:, :, 0].ravel(),
pixels[:, :, 1].ravel(),
pixels[:, :, 2].ravel(),
c=colors_rgb.reshape((-1, 3)), edgecolors='none')
return ax | 067219abba7f77f7c4fbb4404ff16a3f5192f7cd | 708,745 |
def _get_controller_of(pod):
"""Get a pod's controller's reference.
This uses the pod's metadata, so there is no guarantee that
the controller object reference returned actually corresponds to a
controller object in the Kubernetes API.
Args:
- pod: kubernetes pod object
Returns: the reference to a controller object
"""
if pod["metadata"].get("ownerReferences"):
for owner_ref in pod["metadata"]["ownerReferences"]:
if owner_ref.get("controller"):
return owner_ref
return None | 9c9e58e2fc49729c618af2c5bb9b4d033d90a831 | 708,749 |
def parse_record1(raw_record):
"""Parse raw record and return it as a set of unique symbols without \n"""
return set(raw_record) - {"\n"} | 4ffd3ebd0aaa17ddd42baf3b9d44614784c8ff33 | 708,751 |
from operator import sub
def sub_fft(f_fft, g_fft):
"""Substraction of two polynomials (FFT representation)."""
return sub(f_fft, g_fft) | a559429a4d10889be3ffa776153854248ac7a496 | 708,755 |
import uuid
def generate_code() -> str:
"""Generates password reset code
:return: Password reset code
:rtype: str
"""
return str(uuid.uuid4()) | bcd8377afd5598e71f8bb8eb217c3f3fd53fc5c7 | 708,758 |
import hashlib
def calculate_file_sha256(file_path):
"""calculate file sha256 hash code."""
with open(file_path, 'rb') as fp:
sha256_cal = hashlib.sha256()
sha256_cal.update(fp.read())
return sha256_cal.hexdigest() | bfa7a43516e51a80ccd63ea3ace6be6e5e9dd2c0 | 708,765 |
def Maj(x, y, z):
""" Majority function: False when majority are False
Maj(x, y, z) = (x ∧ y) ⊕ (x ∧ z) ⊕ (y ∧ z)
"""
return (x & y) ^ (x & z) ^ (y & z) | 7d4013dfc109b4fc39fd3b0bd3f2f5947d207ff0 | 708,768 |
def create_dictionary(timestamp, original_sentence, sequence_switched, err_message, suggestion_list):
"""Create Dictionary Function
Generates and exports a dictionary object with relevant data for website interaction to take place.
"""
if len(suggestion_list) != 0:
err_message_str = "Possible error: " + err_message + "\n \n"
new_dictionary = {
"timestamp": timestamp,
"original_sentence": original_sentence,
"masked_sentence": sequence_switched,
"err_message": err_message,
"possible_corrections": suggestion_list
}
return new_dictionary
else:
return {} | 057d407089a7bb4e445bd0db2632dfcb9f291ed6 | 708,769 |
def response_json(status, message, response):
"""
Helper method that converts the given data in json format
:param success: status of the APIs either true or false
:param data: data returned by the APIs
:param message: user-friendly message
:return: json response
"""
data = {
"status": status,
"message": message,
"response": response,
}
return data | 9c7e30e81c5412998bc8523b0e45a353c82b5a41 | 708,771 |
def NDVI(R, NIR):
""" Compute the NDVI
INPUT : R (np.array) -> the Red band images as a numpy array of float
NIR (np.array) -> the Near Infrared images as a numpy array of float
OUTPUT : NDVI (np.array) -> the NDVI
"""
NDVI = (NIR - R) / (NIR + R + 1e-12)
return NDVI | aa1789c80720c09aa464b3ae67da7de821e2ba97 | 708,772 |
import re
def convert_check_filter(tok):
"""Convert an input string into a filter function.
The filter function accepts a qualified python identifier string
and returns a bool.
The input can be a regexp or a simple string. A simple string must
match a component of the qualified name exactly. A regexp is
matched against the entire qualified name.
Matches are case-insensitive.
Examples::
convert_check_filter('foo')('a.foo.b') == True
convert_check_filter('foo')('a.foobar') == False
convert_check_filter('foo.*')('a.foobar') == False
convert_check_filter('foo.*')('foobar') == True
"""
tok = tok.lower()
if '+' in tok or '*' in tok:
return re.compile(tok, re.I).match
else:
toklist = tok.split('.')
def func(name):
chunks = name.lower().split('.')
if len(toklist) > len(chunks):
return False
for i in range(len(chunks)):
if chunks[i:i + len(toklist)] == toklist:
return True
return False
return func | 9d1aaa9a5007371e4f33ce3b4fbc86edd15875c6 | 708,774 |
import torch
def ppg_acoustics_collate(batch):
"""Zero-pad the PPG and acoustic sequences in a mini-batch.
Also creates the stop token mini-batch.
Args:
batch: An array with B elements, each is a tuple (PPG, acoustic).
Consider this is the return value of [val for val in dataset], where
dataset is an instance of PPGSpeechLoader.
Returns:
ppg_padded: A (batch_size, feature_dim_1, num_frames_1) tensor.
input_lengths: A batch_size array, each containing the actual length
of the input sequence.
acoustic_padded: A (batch_size, feature_dim_2, num_frames_2) tensor.
gate_padded: A (batch_size, num_frames_2) tensor. If "1" means reaching
stop token. Currently assign "1" at the last frame and the padding.
output_lengths: A batch_size array, each containing the actual length
of the output sequence.
"""
# Right zero-pad all PPG sequences to max input length.
# x is (PPG, acoustic), x[0] is PPG, which is an (L(varied), D) tensor.
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([x[0].shape[0] for x in batch]), dim=0,
descending=True)
max_input_len = input_lengths[0]
ppg_dim = batch[0][0].shape[1]
ppg_padded = torch.FloatTensor(len(batch), max_input_len, ppg_dim)
ppg_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
curr_ppg = batch[ids_sorted_decreasing[i]][0]
ppg_padded[i, :curr_ppg.shape[0], :] = curr_ppg
# Right zero-pad acoustic features.
feat_dim = batch[0][1].shape[1]
max_target_len = max([x[1].shape[0] for x in batch])
# Create acoustic padded and gate padded
acoustic_padded = torch.FloatTensor(len(batch), max_target_len, feat_dim)
acoustic_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
curr_acoustic = batch[ids_sorted_decreasing[i]][1]
acoustic_padded[i, :curr_acoustic.shape[0], :] = curr_acoustic
gate_padded[i, curr_acoustic.shape[0] - 1:] = 1
output_lengths[i] = curr_acoustic.shape[0]
ppg_padded = ppg_padded.transpose(1, 2)
acoustic_padded = acoustic_padded.transpose(1, 2)
return ppg_padded, input_lengths, acoustic_padded, gate_padded,\
output_lengths | 1357a8a9fa901a9be4f79ea13fd5ae7c3810bbeb | 708,778 |
def is_associative(value):
"""Checks if `value` is an associative object meaning that it can be
accessed via an index or key
Args:
value (mixed): Value to check.
Returns:
bool: Whether `value` is associative.
Example:
>>> is_associative([])
True
>>> is_associative({})
True
>>> is_associative(1)
False
>>> is_associative(True)
False
.. versionadded:: 2.0.0
"""
return hasattr(value, '__getitem__') | 5d2a9e0e69ad793a98657dc13b26f79900f29294 | 708,780 |
def csi_from_sr_and_pod(success_ratio_array, pod_array):
"""Computes CSI (critical success index) from success ratio and POD.
POD = probability of detection
:param success_ratio_array: np array (any shape) of success ratios.
:param pod_array: np array (same shape) of POD values.
:return: csi_array: np array (same shape) of CSI values.
"""
return (success_ratio_array ** -1 + pod_array ** -1 - 1.) ** -1 | 84952fe6f7c8bd780c64c53183342ab0d8f3f90f | 708,782 |
def get_account_number(arn):
"""
Extract the account number from an arn.
:param arn: IAM SSL arn
:return: account number associated with ARN
"""
return arn.split(":")[4] | 3d0fe552691ae98cf0dc70bc2055297f01a5d800 | 708,783 |
def all_equal(values: list):
"""Check that all values in given list are equal"""
return all(values[0] == v for v in values) | 8ed08f63959367f3327554adc11b1286291963d8 | 708,786 |
def _tester(func, *args):
"""
Tests function ``func`` on arguments and returns first positive.
>>> _tester(lambda x: x%3 == 0, 1, 2, 3, 4, 5, 6)
3
>>> _tester(lambda x: x%3 == 0, 1, 2)
None
:param func: function(arg)->boolean
:param args: other arguments
:return: something or none
"""
for arg in args:
if arg is not None and func(arg):
return arg
return None | 035c8bf68b4ff7e4fbdb7ed1b2601f04110287d8 | 708,787 |
import math
def entropy(data):
"""
Compute the Shannon entropy, a measure of uncertainty.
"""
if len(data) == 0:
return None
n = sum(data)
_op = lambda f: f * math.log(f)
return - sum(_op(float(i) / n) for i in data) | ebfd9a84885a95ec6e4e7b2d88a0fb69fbbfaea1 | 708,788 |
def indexate(points):
"""
Create an array of unique points and indexes into this array.
Arguments:
points: A sequence of 3-tuples
Returns:
An array of indices and a sequence of unique 3-tuples.
"""
pd = {}
indices = tuple(pd.setdefault(tuple(p), len(pd)) for p in points)
pt = sorted([(v, k) for k, v in pd.items()], key=lambda x: x[0])
unique = tuple(i[1] for i in pt)
return indices, unique | f78ef40ea9bf6cfe427d366026b633fbb67016a2 | 708,789 |
import re
def install_package_family(pkg):
"""
:param: pkg ie asr900rsp2-universal.03.13.03.S.154-3.S3-ext.bin
:return: device_type of the installed image ie asr900
"""
img_dev = None
m = re.search(r'(asr\d+)\w*', pkg)
if m:
img_dev = m.group(1)
return img_dev | b344d51ae426e167dbd2397ab93cbf8707b01496 | 708,790 |
def recipe_clone_message(recipe):
"""
Renders the recipe clone message.
"""
return dict(recipe=recipe) | 09728b431966b12415861a212f2cb85af475dc37 | 708,793 |
def get_drawdowns(cum_returns):
"""
Computes the drawdowns of the cumulative returns.
Parameters
----------
cum_returns : Series or DataFrame, required
a Series or DataFrame of cumulative returns
Returns
-------
Series or DataFrame
"""
cum_returns = cum_returns[cum_returns.notnull()]
highwater_marks = cum_returns.expanding().max()
drawdowns = cum_returns/highwater_marks - 1
return drawdowns | 1f4da9e405b8b4f8a691b09e42e479cd6fdec3ae | 708,794 |
import torch
def make_coordinate_grid(spatial_size, type):
"""
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
"""
h, w = spatial_size
x = torch.arange(w).type(type)
y = torch.arange(h).type(type)
x = (2 * (x / (w - 1)) - 1)
y = (2 * (y / (h - 1)) - 1)
yy = y.view(-1, 1).repeat(1, w)
xx = x.view(1, -1).repeat(h, 1)
meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
return meshed | 0bbbd2f0e0d588b58feebce19b3f2fd9c84934d8 | 708,797 |
def drop_duplicates(df):
"""Drop duplicate rows and reindex.
Args:
df (pd.DataFrame): Dataframe.
Returns:
pd.DataFrame: Dataframe with the replaced value.
Examples:
>>> df = pd.DataFrame({'letters':['b','b','c'], 'numbers':[2,2,3]})
>>> drop_duplicates(df)
letters numbers
0 b 2
1 c 3
"""
return df.drop_duplicates().reset_index(drop=True) | 517d9faf09267df72def3fa7b90b0f59d819d660 | 708,800 |
def compute_acc_bin(conf_thresh_lower, conf_thresh_upper, conf, pred, true):
"""
# Computes accuracy and average confidence for bin
Args:
conf_thresh_lower (float): Lower Threshold of confidence interval
conf_thresh_upper (float): Upper Threshold of confidence interval
conf (numpy.ndarray): list of confidences
pred (numpy.ndarray): list of predictions
true (numpy.ndarray): list of true labels
Returns:
(accuracy, avg_conf, len_bin): accuracy of bin, confidence of bin and number of elements in bin.
"""
filtered_tuples = [x for x in zip(pred, true, conf) if x[2] > conf_thresh_lower and x[2] <= conf_thresh_upper]
if len(filtered_tuples) < 1:
return 0, 0, 0
else:
correct = len([x for x in filtered_tuples if x[0] == x[1]]) # How many correct labels
len_bin = len(filtered_tuples) # How many elements falls into given bin
avg_conf = sum([x[2] for x in filtered_tuples]) / len_bin # Avg confidence of BIN
accuracy = float(correct) / len_bin # accuracy of BIN
return accuracy, avg_conf, len_bin | eb338800751de635e6b72213254287554cd34dc0 | 708,805 |
def _is_valid_requirement(requirement: str) -> bool:
"""Returns True is the `requirement.txt` line is valid."""
is_invalid = (
not requirement or # Empty line
requirement.startswith('#') or # Comment
requirement.startswith('-r ') # Filter the `-r requirement.txt`
)
return not is_invalid | 73b8ad139329698ad334b230cb04976db4ec05ba | 708,806 |
from typing import Union
from typing import Sequence
def wrap_singleton_string(item: Union[Sequence, str]):
""" Wrap a single string as a list. """
if isinstance(item, str):
# Can't check if iterable, because a string is an iterable of
# characters, which is not what we want.
return [item]
return item | 6e0946fee8fddd23631ff66d405dce2ae8a15fa6 | 708,807 |
def remove_multi_whitespace(string_or_list):
""" Cleans redundant whitespace from extracted data """
if type(string_or_list) == str:
return ' '.join(string_or_list.split())
return [' '.join(string.split()) for string in string_or_list] | a284eb1ea685fb55afeefe78d863a716475a9182 | 708,809 |
import json
def writeJSONFile(filename,JSONDocument):
""" Writes a JSON document to a named file
Parameters
----------
filename : str
name of the file
JSONDocument : str
JSON document to write to the file
Returns
-------
True
"""
filename='data/'+filename
with open(filename, 'w') as outfile:
json.dump(JSONDocument, outfile)
return True | 4f20b42a5f38554589a7bb03039ba348e3b0bb15 | 708,810 |
def get_monotask_from_macrotask(monotask_type, macrotask):
""" Returns a Monotask of the specified type from the provided Macrotask. """
return next((monotask for monotask in macrotask.monotasks if isinstance(monotask, monotask_type))) | 46d4516327c89755eaa3ba6f6fa3503aae0c5bd9 | 708,811 |
from typing import List
from typing import Tuple
def choose_page(btn_click_list: List[Tuple[int, str]]) -> str:
"""
Given a list of tuples of (num_clicks, next_page) choose the next_page that
corresponds to exactly 1 num_clicks.
This is to help with deciding which page to go to next when clicking on one
of many buttons on a page.
The expectation is that exactly one button will have been clicked, so we get
a deterministic next page.
:param btn_click_list: List of tuples of (num_clicks, next_page).
:return: The id of the next page.
"""
for tup in btn_click_list:
if tup[0] == 1:
return tup[1]
raise ValueError(
"No clicks were detected, or the click list is misconfigured: {}".format(
btn_click_list
)
) | e61bc1e52c6531cf71bc54faea0d03976eb137ad | 708,813 |
def specific_kinetic_energy(particles):
"""
Returns the specific kinetic energy of each particle in the set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.vx = [1.0, 1.0] | units.ms
>>> particles.vy = [0.0, 0.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.specific_kinetic_energy()
quantity<[0.5, 0.5] m**2 * s**-2>
"""
return 0.5*(particles.vx**2+particles.vy**2+particles.vz**2) | 89a126c23b291a526401a00f812b40a5283319f4 | 708,816 |
def parse_loot_percentage(text):
"""Use to parse loot percentage string, ie: Roubo: 50% becomes 0.5"""
percentage = float(text.split(':')[1].strip("%")) / 100
return percentage | 97dc4f20f02ef0e5d3e592d3084dce80549777ce | 708,817 |
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
minor_mismatch = old_version.minor != new_version.minor
if major_mismatch or minor_mismatch:
return True
return False | effa9f55c82a9edcacd79e07716527f314e41f39 | 708,818 |
def jsonify(records):
"""
Parse asyncpg record response into JSON format
"""
return [dict(r.items()) for r in records] | 618cb538331c4eb637aa03f0ba857da3f2fa4c1c | 708,822 |
import ntpath
def path_leaf(path):
"""
Extracts file name from given path
:param str path: Path be extracted the file name from
:return str: File name
"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head) | 98ef27b218fdb5003ac988c42aff163d1067021f | 708,824 |
def next_permutation(a):
"""Generate the lexicographically next permutation inplace.
https://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
Return false if there is no next permutation.
"""
# Find the largest index i such that a[i] < a[i + 1]. If no such
# index exists, the permutation is the last permutation
for i in reversed(range(len(a) - 1)):
if a[i] < a[i + 1]:
break # found
else: # no break: not found
a.reverse()
return False # no next permutation
# Find the largest index j greater than i such that a[i] < a[j]
j = next(j for j in reversed(range(i + 1, len(a))) if a[i] < a[j])
# Swap the value of a[i] with that of a[j]
a[i], a[j] = a[j], a[i]
# Reverse sequence from a[i + 1] up to and including the final element a[n]
a[i + 1:] = reversed(a[i + 1:])
return True | b6246d53b5e0ac0e28aa5afda03d7756657a40bf | 708,825 |
import itertools
import shlex
def combine_arg_list_opts(opt_args):
"""Helper for processing arguments like impalad_args. The input is a list of strings,
each of which is the string passed into one instance of the argument, e.g. for
--impalad_args="-foo -bar" --impalad_args="-baz", the input to this function is
["-foo -bar", "-baz"]. This function combines the argument lists by tokenised each
string into separate arguments, if needed, e.g. to produce the output
["-foo", "-bar", "-baz"]"""
return list(itertools.chain(*[shlex.split(arg) for arg in opt_args])) | 77cfc6fa54201083c2cb058b8a9493b7d020273e | 708,830 |
def path_to_filename(username, path_to_file):
""" Converts a path formated as path/to/file.txt to a filename, ie. path_to_file.txt """
filename = '{}_{}'.format(username, path_to_file)
filename = filename.replace('/','_')
print(filename)
return filename | a29e98db8ac4cd7f39e0f0e7fc1f76e72f5fa398 | 708,831 |
def parse_hostportstr(hostportstr):
""" Parse hostportstr like 'xxx.xxx.xxx.xxx:xxx'
"""
host = hostportstr.split(':')[0]
port = int(hostportstr.split(':')[1])
return host, port | 7d67b548728d8cc159a7baa3e5f419bf7cbbc4d3 | 708,833 |