content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def count_partitions(n, m):
"""Count the partitions of n using parts up to size m.
>>> count_partitions(6, 4)
9
>>> count_partitions(10, 10)
42
"""
if n == 0:
return 1
elif n < 0:
return 0
elif m == 0:
return 0
else:
with_m = count_partitions(n-m, m)
without_m = count_partitions(n, m-1)
return with_m + without_m | 941984ffd1912ff66fd6a006ccc2bc58fc41eaa8 | 550 |
import unicodedata
def has_alphanum(s):
"""
Return True if s has at least one alphanumeric character in any language.
See https://en.wikipedia.org/wiki/Unicode_character_property#General_Category
"""
for c in s:
category = unicodedata.category(c)[0]
if category == 'L' or category == 'N':
return True
return False | 3ac778e5f415bce4fa1e8667a1599ca73367b733 | 551 |
def is_remote(path):
"""Determine whether a file is in a remote location (which can be handled) based on prefix of connection string."""
for token in ["s3://", "http://", "https://"]: # add
if path.startswith(token):
return True
return False | b459e20104b6e0e326a86ef44b53e18a335ded96 | 552 |
def route_distance(route):
"""
returns the distance traveled for a given tour
route - sequence of nodes traveled, does not include
start node at the end of the route
"""
dist = 0
prev = route[-1]
for node in route:
dist += node.euclidean_dist(prev)
prev = node
return dist | 227b6476f6abd9efdf690062e0d4034c4ece2408 | 553 |
import os
def wdirectory(path):
"""
Change the work directory for a specific path of the data
___
path: string, data path in the system
"""
return os.chdir(path) | ca44546ca3d35e85c3dc339fd53fd0d79bf63ecd | 554 |
def hello(world):
"""Hello, You!"""
return "Hello, {}!".format(world) | d08d2685d3341f0b6474dbd40fc7f9650ddc1092 | 555 |
def rob(nums):
"""
:type nums: List[int]
:rtype: int
"""
if nums == [] or len(nums) == 0:
return 0
elif len(nums) == 1:
return nums[0]
runningTotal = [-1, -1]
runningTotal[0] = nums[0]
runningTotal[1] = max(nums[0], nums[1])
for i in range(2, len(nums)):
runningTotal.append(max([nums[i] + runningTotal[i - 2],
runningTotal[i - 1]]))
return runningTotal[-1] | e58e4d04cbe490b9bd2957d23c5dfd42e92aa0fb | 557 |
def update_datapackage(datapackage, mappings):
"""Update the field names and delete the `maps_to` properties."""
for i, resource in enumerate(datapackage['resources']):
fields = []
for field in resource['schema']['fields']:
fiscal_key = mappings[i][field['name']]
if fiscal_key not in ('_unknown', '_ignored'):
field.update({'name': fiscal_key})
del field['maps_to']
if 'translates_to' in field:
del field['translates_to']
fields.append(field)
resource['schema']['fields'] = fields
return datapackage | f56cf5917331a55d2ac0d5783e0b9c3962eccb5f | 558 |
def getSourceUrls(db):
"""获取未被爬取的文献来源链接"""
sql = """
SELECT DISTINCT
re_article_source.url_source
FROM
re_article_source
LEFT JOIN source ON re_article_source.url_source = source.url
WHERE
source.url IS NULL
"""
# sql = 'SELECT DISTINCT re_article_source.url_source FROM re_article_source LEFT JOIN source ON re_article_source.url_article=source.url WHERE source.url is NULL'
curr = db.cursor()
curr.execute(sql)
urls = []
for data in curr.fetchall():
url = data[0]
urls.append(url)
return urls | edc84e224b76ff84ffef5f12845add6680ccb25d | 559 |
def function_f1a(x):
"""Function with one argument, returning one value.
:type x: types.IntType
:rtype: types.StringType
"""
return '{}'.format(x) | 2ccdaa819ad83902353a1c823fae7f4db3eca487 | 561 |
def _pos_from_before_after(
before: int, after: int, length: int, base0: bool
) -> int:
"""Get the position to insert from before and after"""
if before is not None and after is not None:
raise ValueError("Can't specify both `_before` and `_after`.")
if before is None and after is None:
return length
if after is not None:
return position_after(after, length, base0)
return position_at(before, length, base0) | 8a3fe871c144b00d6bcb4f1286726124f48302de | 562 |
def generate_ansible_coverage_config(): # type: () -> str
"""Generate code coverage configuration for Ansible tests."""
coverage_config = '''
[run]
branch = True
concurrency = multiprocessing
parallel = True
omit =
*/python*/dist-packages/*
*/python*/site-packages/*
*/python*/distutils/*
*/pyshared/*
*/pytest
*/AnsiballZ_*.py
*/test/results/*
'''
return coverage_config | 88fa630613ff12cb5fd33f90883393ee21b574fa | 563 |
def mel_to_hz(mel):
"""From Young et al. "The HTK book", Chapter 5.4."""
return 700.0 * (10.0**(mel / 2595.0) - 1.0) | 8306b95bcdf866dda0759a71c2d5d538155173df | 564 |
def generate_resource_link(pid, resource_path, static=False, title=None):
"""
Returns a valid html link to a public resource within an autogenerated instance.
Args:
pid: the problem id
resource_path: the resource path
static: boolean whether or not it is a static resource
title: the displayed text. Defaults to the path
Returns:
The html link to the resource.
"""
return '<a target=_blank href="/api/autogen/serve/{}?static={}&pid={}">{}</a>'.format(
resource_path,
"true" if static else "false",
pid,
resource_path if not title else title
) | c2523e254d93ecc36198ffea6f2f54c48dfe529d | 566 |
def F_to_C(Tf):
"""convertit une temperature de Fahrenheit en Celsius"""
Tc = (Tf-32)*5/9
return Tc | 9264ac7b0d03bc5d44e716656bafac8a1f112978 | 567 |
import torch
def _check_tensor_info(*tensors, size, dtype, device):
"""Check if sizes, dtypes, and devices of input tensors all match prescribed values."""
tensors = list(filter(torch.is_tensor, tensors))
if dtype is None and len(tensors) == 0:
dtype = torch.get_default_dtype()
if device is None and len(tensors) == 0:
device = torch.device("cpu")
sizes = [] if size is None else [size]
sizes += [t.shape for t in tensors]
dtypes = [] if dtype is None else [dtype]
dtypes += [t.dtype for t in tensors]
devices = [] if device is None else [device]
devices += [t.device for t in tensors]
if len(sizes) == 0:
raise ValueError(f"Must either specify `size` or pass in `W` or `H` to implicitly define the size.")
if not all(i == sizes[0] for i in sizes):
raise ValueError(f"Multiple sizes found. Make sure `size` and `W` or `H` are consistent.")
if not all(i == dtypes[0] for i in dtypes):
raise ValueError(f"Multiple dtypes found. Make sure `dtype` and `W` or `H` are consistent.")
if not all(i == devices[0] for i in devices):
raise ValueError(f"Multiple devices found. Make sure `device` and `W` or `H` are consistent.")
# Make sure size is a tuple (not a torch.Size) for neat repr-printing purposes.
return tuple(sizes[0]), dtypes[0], devices[0] | 1a00aa0e09e520a23591d9fd461422f7b0acf0e2 | 568 |
def moderator_name():
"""Return the name of the test game moderator."""
return 'Hannah' | 55132bc74510ee9c3c2a74048bf35bae94b9a6ef | 569 |
def reversebits2(max_bits, num):
""" Like reversebits1, plus small optimization regarding bit index
calculation. """
rev_num = 0
high_shift = max_bits - 1
low_shift = 0
for _ in range(0, (max_bits + 1) // 2):
low_bit = (num & (1 << low_shift)) >> low_shift
high_bit = (num & (1 << high_shift)) >> high_shift
rev_num |= low_bit << high_shift
rev_num |= high_bit << low_shift
high_shift -= 1
low_shift += 1
return rev_num | cbc41754928f758d689ea6b0241205a9a1c02ccd | 570 |
def complex_to_xy(complex_point):
"""turns complex point (x+yj) into cartesian point [x,y]"""
xy_point = [complex_point.real, complex_point.imag]
return xy_point | 2984b70c3015cb69a0f7dfd62bd022bb26310852 | 571 |
from typing import Union
def permission_confirm(perm_key_pair: list) -> Union[bool, str, None]:
"""Converts string versions of bool inputs to raw bool values."""
if perm_key_pair[1].strip() == 'true': pi = True
elif perm_key_pair[1].strip() == 'false': pi = False
elif perm_key_pair[1].strip() == 'none': pi = None
else: pi = 'None'
return pi | c1827694019dd999f71d54be148dfe2abf5aeb4e | 573 |
from typing import Any
import json
def _type_cast(type_cast: Any, content_to_typecast: bytes, func_dict: dict) -> Any:
"""
Basis for type casting on the server
If testing, replace `func_dict` with a dummy one
Currently NOT guarenteed to return, please remember to change this API
"""
if type_cast == bytes:
return content_to_typecast
if type_cast == str:
try:
typecasted_content = content_to_typecast.decode()
return typecasted_content # Remember to change this, but I"m lazy rn
except UnicodeDecodeError as e:
raise TypeError(
f"Type casting from bytes to string failed for function "
f"\"{func_dict['name']}\"\n{str(e)}"
) from UnicodeDecodeError
elif type_cast == int:
try:
typecasted_content = int(content_to_typecast)
return typecasted_content # Remember to change this, but I"m lazy rn
except ValueError as e:
raise TypeError(
f"Type casting from bytes to int failed for function "
f"\"{func_dict['name']}\":\n {e}"
) from ValueError
elif type_cast == float:
try:
typecasted_content = float(content_to_typecast)
return typecasted_content # Remember to change this, but I"m lazy rn
except ValueError as e:
raise TypeError(
f"Type casting from bytes to float failed for function "
f"\"{func_dict['name']}\":\n {e}"
) from ValueError
elif type_cast is None:
return content_to_typecast
for _type in [list, dict]:
if type_cast == _type:
try:
typecasted_content = json.loads(content_to_typecast)
return typecasted_content
except UnicodeDecodeError:
raise TypeError(
f"Cannot decode message data during "
f"bytes->{_type.__name__} type cast"
"(current implementation requires string to "
"type cast, not bytes)"
) from UnicodeDecodeError
except ValueError:
raise TypeError(
f"Type casting from bytes to {_type.__name__} "
f"failed for function \"{func_dict['name']}\""
f":\n Message is not a {_type.__name__}"
) from ValueError
except Exception as e:
raise TypeError(
f"Type casting from bytes to {_type.__name__} "
f"failed for function \"{func_dict['name']}\""
f":\n {e}"
) from type(e) | de7121ea1f29448bcd7ab44d60d6a64bbdba59d0 | 574 |
import os
def directory_structure_to_front_matter(file_path: str) -> dict[str, str]:
"""
Converts the directory structure of a recipe into a front matter.
"""
# Make sure the path is well-formed and normalised
path_to_recipe = os.path.normpath(file_path)
# Unpack the directory structure into variable names
*_, meal, difficulty, recipe_filename = path_to_recipe.split(os.sep)
# Set some front matter using the extracted data
return {
"layout": "recipe",
"difficulties": difficulty,
"meals": meal,
"originalfilename": recipe_filename,
"originalpath": os.path.join(meal, difficulty, recipe_filename),
} | 93e940356e7527dad003525e6b3a8edfffc2fb63 | 575 |
def is_gradle_build_python_test(file_name):
"""
Return True if file_name matches a regexp for on of the python test run during gradle build. False otherwise.
:param file_name: file to test
"""
return file_name in ["gen_all.py", "test_gbm_prostate.py", "test_rest_api.py"] | 27b683a9062e09aec89be23f5f8e9dd41e9b870d | 576 |
import requests
def patch_get(monkeypatch, mockresponse):
"""monkeypatch the requests.get function to return response dict for API calls. succesful API responses come from Tradier website.
:param mockresponse: [description]
:type mockresponse: [type]
:return: [description]
:rtype: [type]
:yield: [description]
:rtype: [type]
"""
class PatchGet:
def __init__(self, status, response_json_path):
self.mocked = mockresponse(status, response_json_path)
self.setter()
def mock_get(self, url, params, headers):
return self.mocked
def setter(self):
monkeypatch.setattr(requests, "get", self.mock_get)
yield PatchGet | 54c927b421fe0e26023b4020a0fadc489e134429 | 577 |
def addr(arr):
""" Get address of numpy array's data """
return arr.__array_interface__['data'][0] | 910c893dc47e3f864e915cdf114c3ed127f3ea43 | 578 |
def zipper(sequence):
"""Given a sequence return a list that has the same length as the original
sequence, but each element is now a list with an integer and the original
element of the sequence."""
n = len(sequence)
rn = range(n)
data = zip(rn,sequence)
return data | af7f0c495d920e54ea033696aefc27379b667102 | 579 |
import re
import argparse
def soundtone_type(value):
"""
Parse tone sounds parameters from args.
value: 'square:90hz,10s,100%'
returns: {'waveform': 'square', 'frequency': '90', 'amplitude': '100'}'
"""
abbr_map = {"hz": "frequency", "%": "amplitude", "s": "duration"}
tone_form, generator_raw_params = value.lower().split(":", 1)
parameters = {"waveform": tone_form}
for param in generator_raw_params.split(","):
match = re.match(r"(\d+)(\D+)$", param)
if not match:
raise argparse.ArgumentTypeError(f"invalid tone parameter, format: '{generator_raw_params}'.")
param_name, param_value = abbr_map[match.group(2)], int(match.group(1))
if param_name == "amplitude":
param_value = param_value / 100
parameters[param_name] = param_value
return parameters | cdbf98939ac99210c2722653427cd8a7b2e847e2 | 580 |
def stitch_frame(frames, _):
"""
Stitching for single frame.
Simply returns the frame of the first index in the frames list.
"""
return frames[0] | 833ceb66f9df61e042d1c936c68b8a77566545c4 | 581 |
def margin_to_brightness(margin, max_lead=30, pct_pts_base=0):
""""Tweak max_lead and pct_pts_base to get the desired brightness range"""
return int((abs(margin) / max_lead) * 100) + pct_pts_base | d6f101c52ddee9f520e36e31fac7042e0aba3992 | 583 |
def bring_contact_bonus_list(pb_client, obj_pb_ids, arm_pb_id, table_pb_id):
""" For some bring goals, may be useful to also satisfy an object touching table and
not touching arm condition. """
correct_contacts = []
for o in obj_pb_ids:
o2ee_contact = len(pb_client.getContactPoints(o, arm_pb_id)) > 0
o2t_contact = len(pb_client.getContactPoints(o, table_pb_id)) > 0
correct_contacts.append(not o2ee_contact and o2t_contact)
return correct_contacts | 6c0033b0bfb1d3f4d08c8ca114855e089fe852f7 | 584 |
import os
def open_expand(file_path, *args, **kwargs):
"""
Allows to use '~' in file_path.
"""
return open(os.path.expanduser(file_path), *args, **kwargs) | 6ad3d6ae98bdb2295e66f4d52a8282dfd3162a3d | 585 |
def snakify(str_: str) -> str:
"""Convert a string to snake case
Args:
str_: The string to convert
"""
return str_.replace(" ", "_").lower() | c40d972fc99f2cb99f3c2b4a83296e793018c32b | 586 |
def bibtexNoteszotero(bibtex_names):
"""
params:
bibtex_names, {}
response, {}
return: notes_dict, {}
"""
#
notes_dict = {}
notes_dict["itemType"] = "note"
notes_dict["relations"] = {}
notes_dict["tags"] = []
notes_dict["note"] = bibtex_names["notes"].strip()
#
return notes_dict | 97e30f746f59ee5e1cfed8581a2dc272fc4b477f | 587 |
import time
import os
def get_result_filename(params, commit=''):
"""
获取时间
:return:
"""
save_result_dir = params['test_save_dir']
batch_size = params['batch_size']
epochs = params['epochs']
max_length_inp = ['max_dec_len']
embedding_dim = ['embed_size']
now_time = time.strftime('%Y_%m_%d_%H_%M_%S')
filename = now_time + '_batch_size_{}_epochs_{}_max_length_inp_{}_embedding_dim_{}{}.csv'.format(batch_size, epochs,
max_length_inp,
embedding_dim,
commit)
result_save_path = os.path.join(save_result_dir, filename)
return result_save_path | 51b79d6a5850b50fcb41847ba16acfc12d7c323a | 588 |
import math
def Norm(x, y):
"""求一个二维向量模长"""
return math.pow(math.pow(x, 2) + math.pow(y, 2), 0.5) | 4c161ada3c446d996f6e33be602a9475948f5bf8 | 589 |
def demandNameItem(listDb,phrase2,mot):
"""
put database name of all items in string to insert in database
listDb: list with datbase name of all items
phrase2: string with database name of all items
mot: database name of an item
return a string with database name of all items separated with ','
"""
for i in range(len(listDb)):
mot = str(listDb[i])
phrase2 += mot
if not i == len(listDb)-1:
phrase2 += ','
return phrase2 | 67af8c68f0ba7cd401067e07c5de1cd25de9e66c | 590 |
def escape_yaml(data: str) -> str:
"""
Jinja2 фильтр для экранирования строк в yaml
экранирует `$`
"""
return data.replace("$", "$$") | d1142af7447ad372e6b0df5848beb28e0dd84e68 | 591 |
from datetime import datetime
def parse(s):
""" Date parsing tool.
Change the formats here cause a changement in the whole application.
"""
formats = ['%Y-%m-%dT%H:%M:%S.%fZ','%d/%m/%Y %H:%M:%S','%d/%m/%Y%H:%M:%S', '%d/%m/%Y','%H:%M:%S']
d = None
for format in formats:
try:
d = datetime.strptime(s, format)
break
except ValueError:
pass
return d | c665dd91a03a6d9876b8c36a46699b813c540cea | 593 |
def get_loader():
"""Returns torch.utils.data.DataLoader for custom Pypipes dataset. """
data_loader = None
return data_loader | 0e3b0107e355169049dbdfa45cba9abdf479dcbe | 594 |
def replace_text_comment(comments, new_text):
"""Replace "# text = " comment (if any) with one using new_text instead."""
new_text = new_text.replace('\n', ' ') # newlines cannot be represented
new_text = new_text.strip(' ')
new_comments, replaced = [], False
for comment in comments:
if comment.startswith('# text ='):
new_comments.append('# text = {}'.format(new_text))
replaced = True
else:
new_comments.append(comment)
if not replaced:
new_comments.append('# text = {}'.format(new_text))
return new_comments | 4b1284966eb02ca2a6fd80f8f639adcb4f1fde6c | 595 |
from typing import Optional
import binascii
def hex_xformat_decode(s: str) -> Optional[bytes]:
"""
Reverse :func:`hex_xformat_encode`.
The parameter is a hex-encoded BLOB like
.. code-block:: none
"X'CDE7A24B1A9DBA3148BCB7A0B9DA5BB6A424486C'"
Original purpose and notes:
- SPECIAL HANDLING for BLOBs: a string like ``X'01FF'`` means a hex-encoded
BLOB. Titanium is rubbish at BLOBs, so we encode them as special string
literals.
- SQLite uses this notation: https://sqlite.org/lang_expr.html
- Strip off the start and end and convert it to a byte array:
http://stackoverflow.com/questions/5649407
"""
if len(s) < 3 or not s.startswith("X'") or not s.endswith("'"):
return None
return binascii.unhexlify(s[2:-1]) | 8f868d4bbd5b6843632f9d3420fe239f688ffe15 | 596 |
import requests
import os
def _download(url, dest, timeout=30):
"""Simple HTTP/HTTPS downloader."""
# Optional import: requests is not needed for local big data setup.
dest = os.path.abspath(dest)
with requests.get(url, stream=True, timeout=timeout) as r:
with open(dest, 'w+b') as data:
for chunk in r.iter_content(chunk_size=0x4000):
data.write(chunk)
return dest | 7fe29752866707e3bbcb4f5e5b8a97507a7b71f8 | 597 |
def height(tree):
"""Return the height of tree."""
if tree.is_empty():
return 0
else:
return 1+ max(height(tree.left_child()),\
height(tree.right_child())) | a469216fc13ed99acfb1bab8db7e031acc759f90 | 598 |
def max_power_rule(mod, g, tmp):
"""
**Constraint Name**: DAC_Max_Power_Constraint
**Enforced Over**: DAC_OPR_TMPS
Power consumption cannot exceed capacity.
"""
return (
mod.DAC_Consume_Power_MW[g, tmp]
<= mod.Capacity_MW[g, mod.period[tmp]] * mod.Availability_Derate[g, tmp]
) | 2c1845253524a8383f2256a7d67a8231c2a69485 | 599 |
import requests
def get_mc_uuid(username):
"""Gets the Minecraft UUID for a username"""
url = f"https://api.mojang.com/users/profiles/minecraft/{username}"
res = requests.get(url)
if res.status_code == 204:
raise ValueError("Users must have a valid MC username")
else:
return res.json().get("id") | fceeb1d9eb096cd3e29f74d389c7c851422ec022 | 600 |
def api(default=None, api=None, **kwargs):
"""Returns the api instance in which this API function is being ran"""
return api or default | 3d636408914e2888f4dc512aff3f729512849ddf | 601 |
import hashlib
def file_md5(input_file):
"""
:param input_file: Path to input file.
:type input_file: str
:return: Returns the encoded data in the inputted file in hexadecimal format.
"""
with open(input_file, 'rb') as f:
data = f.read()
return hashlib.md5(data).hexdigest() | 4a7ea12e3b5e0429787eb65e651852e49b40ecf7 | 603 |
from typing import Dict
def message_args() -> Dict[str, str]:
"""A formatted message."""
return {"subject": "Test message", "message": "This is a test message"} | 4d25d5c9f54aa0997f2e619f90eb6632717cf0d3 | 604 |
def annualize_metric(metric: float, holding_periods: int = 1) -> float:
"""
Annualize metric of arbitrary periodicity
:param metric: Metric to analyze
:param holding_periods:
:return: Annualized metric
"""
days_per_year = 365
trans_ratio = days_per_year / holding_periods
return (1 + metric) ** trans_ratio - 1 | 0c84816f29255d49e0f2420b17abba66e2387c99 | 605 |
import argparse
import sys
def parse_args():
"""Command line arguments parser."""
app = argparse.ArgumentParser()
app.add_argument("in_chain", help="Input chain file or stdin")
app.add_argument("reference_2bit", help="Reference 2bit file")
app.add_argument("query_2bit", help="Query 2bit file")
app.add_argument("output", help="Output chain or stdout")
app.add_argument("-linearGap", choices=['loose', 'medium', 'filename'], help="loose|medium|filename")
app.add_argument("-scoreScheme", help="Read the scoring matrix from a blastz-format file")
if len(sys.argv) < 5:
app.print_help()
sys.exit(0)
args = app.parse_args()
return args | a0ef04f4769e247dea8816b70807f10c7efd5571 | 606 |
def read_gold_conll2003(gold_file):
"""
Reads in the gold annotation from a file in CoNLL 2003 format.
Returns:
- gold: a String list containing one sequence tag per token.
E.g. [B-Kochschritt, L-Kochschritt, U-Zutat, O]
- lines: a list list containing the original line split at "\t"
"""
gold = []
lines = []
with open(gold_file, encoding="utf-8") as f:
for line in f:
if line == "\n":
continue
line = line.strip().split("\t")
gold.append(line[3])
lines.append(line)
return gold, lines | 1e11513c85428d20e83d54cc2fa2d42ddd903341 | 607 |
def get_bsj(seq, bsj):
"""Return transformed sequence of given BSJ"""
return seq[bsj:] + seq[:bsj] | d1320e5e3257ae22ca982ae4dcafbd4c6def9777 | 608 |
def get_from_address(sending_profile, template_from_address):
"""Get campaign from address."""
# Get template display name
if "<" in template_from_address:
template_display = template_from_address.split("<")[0].strip()
else:
template_display = None
# Get template sender
template_sender = template_from_address.split("@")[0].split("<")[-1]
# Get sending profile domain
if type(sending_profile) is dict:
sp_from = sending_profile["from_address"]
else:
sp_from = sending_profile.from_address
sp_domain = sp_from.split("<")[-1].split("@")[1].replace(">", "")
# Generate from address
if template_display:
from_address = f"{template_display} <{template_sender}@{sp_domain}>"
else:
from_address = f"{template_sender}@{sp_domain}"
return from_address | 8617d2b793b76456cb7d1a17168f27fd1d548e6d | 609 |
import random
def d3():
"""Simulate the roll of a 3 sided die"""
return random.randint(1, 3) | c2eac44bb36b7e35c66894bce3467f568a735ca1 | 610 |
def stop_tuning(step):
""" stop tuning the current step method """
if hasattr(step, 'tune'):
step.tune = False
elif hasattr(step, 'methods'):
step.methods = [stop_tuning(s) for s in step.methods]
return step | 45e02b8d3ec86ceda97de69bbc730aa62affb06d | 614 |
import re
def parse_year(inp, option='raise'):
"""
Attempt to parse a year out of a string.
Parameters
----------
inp : str
String from which year is to be parsed
option : str
Return option:
- "bool" will return True if year is found, else False.
- Return year int / raise a RuntimeError otherwise
Returns
-------
out : int | bool
Year int parsed from inp,
or boolean T/F (if found and option is bool).
Examples
--------
>>> year_str = "NSRDB_2018.h5"
>>> parse_year(year_str)
2018
>>> year_str = "NSRDB_2018.h5"
>>> parse_year(year_str, option='bool')
True
>>> year_str = "NSRDB_TMY.h5"
>>> parse_year(year_str)
RuntimeError: Cannot parse year from NSRDB_TMY.h5
>>> year_str = "NSRDB_TMY.h5"
>>> parse_year(year_str, option='bool')
False
"""
# char leading year cannot be 0-9
# char trailing year can be end of str or not 0-9
regex = r".*[^0-9]([1-2][0-9]{3})($|[^0-9])"
match = re.match(regex, inp)
if match:
out = int(match.group(1))
if 'bool' in option:
out = True
else:
if 'bool' in option:
out = False
else:
raise RuntimeError('Cannot parse year from {}'.format(inp))
return out | a91efb0614e7d0ad6753118f9b4efe8c3b40b4e2 | 615 |
import os
import sys
def readable_dir(prospective_dir):
""" check if dir is exist or acessable"""
if not os.path.isdir(prospective_dir):
sys.exit("{} is not a valid path".format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
return prospective_dir
else:
sys.exit("{} is not a readable dir".format(prospective_dir)) | 8c8ea6928605baa3dfb224c258102b2d263932cd | 616 |
def is_gzipped(filename):
""" Returns True if the target filename looks like a GZIP'd file.
"""
with open(filename, 'rb') as fh:
return fh.read(2) == b'\x1f\x8b' | b1afb5b9cddc91fbc304392171f04f4b018fa929 | 617 |
def tag_helper(tag, items, locked=True, remove=False):
""" Simple tag helper for editing a object. """
if not isinstance(items, list):
items = [items]
data = {}
if not remove:
for i, item in enumerate(items):
tagname = '%s[%s].tag.tag' % (tag, i)
data[tagname] = item
if remove:
tagname = '%s[].tag.tag-' % tag
data[tagname] = ','.join(items)
data['%s.locked' % tag] = 1 if locked else 0
return data | 27500df099824fff1d93afbe7649d42141ffa9c1 | 619 |
def get_keys_from_file(csv):
"""Extract the credentials from a csv file."""
lines = tuple(open(csv, 'r'))
creds = lines[1]
access = creds.split(',')[2]
secret = creds.split(',')[3]
return access, secret | eccf56c52dd82656bf85fef618133f86fd9276e6 | 620 |
import os
def create_save_directory(path, directory_name):
"""
This function makes the directory to save the data.
Parameters
----------
path : string
Where the the directory_name will be.
directory_name : string
The directory name where the plots will be save
Returns
----------
succes : bool
True if the directories were created successfully.
"""
try:
if not os.path.isdir(f'{path}'):
os.mkdir(f'{path}')
os.mkdir(f'{path}\\{directory_name}')
return True
except OSError:
print('Error creating directories')
return False | 3f449935bd5e3e72fdffd9c31968d8dcef615b0d | 621 |
from typing import Union
def metric_try_to_float(s: str) -> Union[float, str]:
"""
Try to convert input string to float value.
Return float value on success or input value on failure.
"""
v = s
try:
if "%" in v:
v = v[:-1]
return float(v)
except ValueError:
return str(s) | 6b0121469d35bc6af04d4808721c3ee06955d02e | 623 |
def jsonify(comment_lower: str) -> str:
"""pyNastran: SPOINT={'id':10, 'xyz':[10.,10.,10.]}"""
sline = comment_lower.split('=')
rhs = sline[1].rstrip()
return rhs.replace("'", '"').replace('}', ',}').replace(',,}', ',}') | e8641d5e94cff32389f7ade3360935a2abbcf297 | 624 |
import shutil
def disk_usage(pathname):
"""Return disk usage statistics for the given path"""
### Return tuple with the attributes total,used,free in bytes.
### usage(total=118013599744, used=63686647808, free=48352747520)
return shutil.disk_usage(pathname) | c7a36e2f3200e26a67c38d50f0a97dd015f7ccfa | 625 |
import os
import yaml
def get_default_log_config():
"""Get the default logging configuration.
Returns:
dict: The default logging configuration.
"""
root = os.path.dirname(__file__)
config_file = os.path.join(root, "logging.yaml")
with open(config_file, "r") as file_object:
data = yaml.load(file_object, yaml.FullLoader)
return data["logging"] | 7fc4479c7efb666b80ddd3e450b107ac73cf3c16 | 626 |
import numbers
def ensure_r_vector(x):
"""Ensures that the input is rendered as a vector in R.
It is way more complicated to define an array in R than in Python because an array
in R cannot end with an comma.
Examples
--------
>>> ensure_r_vector("string")
"c('string')"
>>> ensure_r_vector(1)
'c(1)'
>>> ensure_r_vector(list("abcd"))
"c('a', 'b', 'c', 'd')"
>>> ensure_r_vector((1, 2))
'c(1, 2)'
"""
if isinstance(x, str):
out = f"c('{x}')"
elif isinstance(x, numbers.Number):
out = f"c({x})"
elif isinstance(x, (tuple, list)):
mapped = map(lambda l: str(l) if isinstance(l, numbers.Number) else f"'{l}'", x)
concatenated = ", ".join(mapped)
out = f"c({concatenated})"
else:
raise NotImplementedError(
f"'ensure_r_vector' is not defined for dtype {type(x)}"
)
return out | 14fdeb6bf73244c69d9a6ef89ba93b33aa4a66d8 | 629 |
def output_results(results, way):
"""Helper method with most of the logic"""
tails = way(results)
heads = len(results) - tails
result = ", ".join([["Heads", "Tails"][flip] for flip in results])
return result + f"\n{heads} Heads; {tails} Tails" | f60716004b11e115fe69a14b70957b5b66080dbc | 630 |
def evaluate_tuple(columns,mapper,condition):
"""
"""
if isinstance(condition, tuple):
return condition[0](columns,mapper,condition[1],condition[2])
else:
return condition(columns,mapper) | 5200da50900329431db4ce657e79135534b8469e | 632 |
def step(init_distr,D):
"""
"""
for k in init_distr.keys():
init_distr[k] = D[init_distr[k]]()
return init_distr | 6270dd2818d2148e7d979d249fbb2a3a596dc2de | 633 |
def _percentages(self):
"""
An extension method for Counter that
returns a dict mapping the keys of the Counter to their percentages.
:param self: Counter
:return: a dict mapping the keys of the Counter to their percentages
"""
# type: () -> dict[any, float]
length = float(sum(count for count in self.viewvalues()))
return {value: self[value] / length for value in self} | 752781a9697113ebf3297050649a7f4ba1580b97 | 634 |
def locate_all_occurrence(l, e):
"""
Return indices of all element occurrences in given list
:param l: given list
:type l: list
:param e: element to locate
:return: indices of all occurrences
:rtype: list
"""
return [i for i, x in enumerate(l) if x == e] | 95b662f359bd94baf68ac86450d94298dd6b366d | 636 |
from typing import List
def format_count(
label: str, counts: List[int], color: str, dashed: bool = False
) -> dict:
"""Format a line dataset for chart.js"""
ret = {
"label": label,
"data": counts,
"borderColor": color,
"borderWidth": 2,
"fill": False,
}
if dashed:
ret["borderDash"] = [5, 5]
return ret | 40f5aee7ad5d66f57737345b7d82e45a97cf6633 | 638 |
def get_valid_segment(text):
""" Returns None or the valid Loki-formatted urn segment for the given input string. """
if text == '':
return None
else:
# Return the converted text value with invalid characters removed.
valid_chars = ['.', '_', '-']
new_text = ''
for char in text:
if char in valid_chars or char.isalnum():
new_text += char
return new_text | 423c1764b590df635b0794bfe52a0a8479d53fbf | 639 |
def sort_f_df(f_df):
"""Sorts f_df by s_idx first then by l_idx.
E.g. for scenario 0, see all decision alternatives in order,
then scenario 1, scenario 2, etc.
Parameters
----------
f_df : pandas.DataFrame
A dataframe of performance values, `f`, with indexes for the
scenario, `s`, and decision alternative, `l`.
Columns: `['s_idx', 'l_idx', '<f1_name>', '<f2_name>', ...]`
"""
# This will sort first by s_idx then by l_idx, both from 0 to ...
f_df.sort_values(['l_idx', 's_idx'], ascending=[True, True])
return f_df | ec82966a7a2fb417312198afe42109ed5883d31d | 640 |
def _fit_solver(solver):
"""
Call ``fit`` on the solver. Needed for multiprocessing.
"""
return solver.fit() | 7007752777445d2cc6d476d7af1f83d6cdfe236b | 641 |
def positive_dice_parse(dice: str) -> str:
"""
:param dice: Formatted string, where each line is blank or matches
t: [(t, )*t]
t = (0|T|2A|SA|2S|S|A)
(note: T stands for Triumph here)
:return: Formatted string matching above, except tokens are replaced
with their corresponding values in the 4-tuple system,
(successes, advantages, triumphs, despairs)
"""
return dice.replace("0", "(0, 0, 0, 0)")\
.replace("T", "(1, 0, 1, 0)")\
.replace("2A", "(0, 2, 0, 0)")\
.replace("SA", "(1, 1, 0, 0)")\
.replace("2S", "(2, 0, 0, 0)")\
.replace("S", "(1, 0, 0, 0)")\
.replace("A", "(0, 1, 0, 0)") | 5b266a4025706bfc8f4deabe67735a32f4b0785d | 642 |
def build_trib_exp(trib_identifier, trib_key_field):
"""Establishes a SQL query expresion associating a given tributary id"""
return '"{0}"'.format(trib_key_field) + " LIKE '%{0}%'".format(trib_identifier) | 792d5e4237268410f050323ff1748246a5cdee5d | 643 |
import torch
def get_similarity_transform_matrix(
from_pts: torch.Tensor, to_pts: torch.Tensor) -> torch.Tensor:
"""
Args:
from_pts, to_pts: b x n x 2
Returns:
torch.Tensor: b x 3 x 3
"""
mfrom = from_pts.mean(dim=1, keepdim=True) # b x 1 x 2
mto = to_pts.mean(dim=1, keepdim=True) # b x 1 x 2
a1 = (from_pts - mfrom).square().sum([1, 2], keepdim=False) # b
c1 = ((to_pts - mto) * (from_pts - mfrom)).sum([1, 2], keepdim=False) # b
to_delta = to_pts - mto
from_delta = from_pts - mfrom
c2 = (to_delta[:, :, 0] * from_delta[:, :, 1] - to_delta[:,
:, 1] * from_delta[:, :, 0]).sum([1], keepdim=False) # b
a = c1 / a1
b = c2 / a1
dx = mto[:, 0, 0] - a * mfrom[:, 0, 0] - b * mfrom[:, 0, 1] # b
dy = mto[:, 0, 1] + b * mfrom[:, 0, 0] - a * mfrom[:, 0, 1] # b
ones_pl = torch.ones_like(a1)
zeros_pl = torch.zeros_like(a1)
return torch.stack([
a, b, dx,
-b, a, dy,
zeros_pl, zeros_pl, ones_pl,
], dim=-1).reshape(-1, 3, 3) | 76524a1f85644cfedfda9dd60497768614a058b0 | 644 |
def GetVerificationStepsKeyName(name):
"""Returns a str used to uniquely identify a verification steps."""
return 'VerificationSteps_' + name | e50e9bd7b586d8bbfaf8902ce343d35d752948a4 | 646 |
import codecs
import logging
def read_file(file_path):
"""
Read the contents of a file using utf-8 encoding, or return an empty string
if it does not exist
:param file_path: str: path to the file to read
:return: str: contents of file
"""
try:
with codecs.open(file_path, 'r', encoding='utf-8', errors='xmlcharrefreplace') as infile:
return infile.read()
except OSError as e:
logging.exception('Error opening {}'.format(file_path))
return '' | 13a72bc939021e3046243ed9afc7014cb403652a | 647 |
import argparse
def parse_args():
"""read arguments from command line
"""
parser = argparse.ArgumentParser(
description='preprocess.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset',
type=str,
nargs='?',
default='data/datasets/solid-state_dataset_2019-09-27_upd.json',
help="Path to dataset to use")
parser.add_argument('--elem-dict',
type=str,
nargs='?',
default='data/elem_dict',
help="Path to element to index dictionary without extension")
parser.add_argument('--action-dict',
type=str,
nargs='?',
default='data/action_dict',
help="Path to element to index dictionary without extension")
parser.add_argument('--magpie-embed',
type=str,
nargs='?',
default='data/magpie_embed',
help="Path to magpie embeddings dictionary without extension")
parser.add_argument('--clean-set',
type=str,
nargs='?',
default='data/dataset',
help="Path to full clean dataset to use without extension")
parser.add_argument('--train-set',
type=str,
nargs='?',
default='data/train',
help="Path to train dataset to use without extension")
parser.add_argument('--test-set',
type=str,
nargs='?',
default='data/test',
help="Path to test dataset to use without extension")
parser.add_argument('--val-set',
type=str,
nargs='?',
default='data/val',
help="Path to val dataset to use without extension")
parser.add_argument('--test-size',
type=float,
nargs='?',
default=0.2,
help="size of clean dataset for testing")
parser.add_argument('--val-size',
type=float,
nargs='?',
default=0,
help="size of clean dataset for validation")
parser.add_argument('--seed',
type=int,
nargs='?',
default=0,
help="Random seed for splitting data")
parser.add_argument('--ps',
type=str,
nargs='?',
default='',
help="postscript on path for save files")
parser.add_argument('--max-prec',
type=int,
nargs='?',
default=10,
help='Max number of precursors per reaction.')
parser.add_argument('--min-prec',
type=int,
nargs='?',
default=2,
help='Min number of precursors per reaction. Default 2')
parser.add_argument('--augment',
action="store_true",
help="augment data with precursor rearrangements")
parser.add_argument('--split-prec-amts',
action="store_true",
help="split out data for the baseline model")
parser.add_argument('--num-elem',
type=int,
metavar='N',
nargs='?',
default=-1,
help='Take N most common elements only. Default: -1 (all)')
args = parser.parse_args()
return args | 7896e1e6edf431a3293ecc2a3970714212132322 | 648 |
def _list_subclasses(cls):
"""
Recursively lists all subclasses of `cls`.
"""
subclasses = cls.__subclasses__()
for subclass in cls.__subclasses__():
subclasses += _list_subclasses(subclass)
return subclasses | 4cebf48916c64f32fcd5dfff28ecde7a155edb90 | 649 |
from pathlib import Path
def test_data_dir():
"""
Returns path of test datas like excel
Used for test or notebook
"""
path = Path(__file__).parent.parent / 'testdata'
return path | f410f26276797204dd100d884b162f893b5ce4aa | 651 |
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False | 701488e530913bfc2e5d382a544679315dc1f013 | 653 |
def rho_MC(delta, rhoeq=4.39e-38):
"""
returns the characteristic density of an
axion minicluster in [solar masses/km^3]
forming from an overdensity with
overdensity parameter delta.
rhoeq is the matter density at matter
radiation equality in [solar masses/km^3]
"""
return 140 * (1 + delta) * delta**3 * rhoeq | f28e382cfcf661199728363b3ebe86f25e92760c | 654 |
def is_ascii(string):
"""Return True is string contains only is us-ascii encoded characters."""
def is_ascii_char(char):
return 0 <= ord(char) <= 127
return all(is_ascii_char(char) for char in string) | cd3aeddcad7610de83af6ec5a67ecbac95f11fd8 | 655 |
def summary(task):
"""Given an ImportTask, produce a short string identifying the
object.
"""
if task.is_album:
return u'{0} - {1}'.format(task.cur_artist, task.cur_album)
else:
return u'{0} - {1}'.format(task.item.artist, task.item.title) | 87387c47e90998c270f6f8f2f63ceacebd4cdc78 | 658 |
from typing import List
def build_graph(order: int, edges: List[List[int]]) -> List[List[int]]:
"""Builds an adjacency list from the edges of an undirected graph."""
adj = [[] for _ in range(order)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
return adj | 86bdd0d4314777ff59078b1c0f639e9439f0ac08 | 660 |
def post_config_adobe_granite_saml_authentication_handler(key_store_password=None, key_store_password_type_hint=None, service_ranking=None, service_ranking_type_hint=None, idp_http_redirect=None, idp_http_redirect_type_hint=None, create_user=None, create_user_type_hint=None, default_redirect_url=None, default_redirect_url_type_hint=None, user_id_attribute=None, user_id_attribute_type_hint=None, default_groups=None, default_groups_type_hint=None, idp_cert_alias=None, idp_cert_alias_type_hint=None, add_group_memberships=None, add_group_memberships_type_hint=None, path=None, path_type_hint=None, synchronize_attributes=None, synchronize_attributes_type_hint=None, clock_tolerance=None, clock_tolerance_type_hint=None, group_membership_attribute=None, group_membership_attribute_type_hint=None, idp_url=None, idp_url_type_hint=None, logout_url=None, logout_url_type_hint=None, service_provider_entity_id=None, service_provider_entity_id_type_hint=None, assertion_consumer_service_url=None, assertion_consumer_service_url_type_hint=None, handle_logout=None, handle_logout_type_hint=None, sp_private_key_alias=None, sp_private_key_alias_type_hint=None, use_encryption=None, use_encryption_type_hint=None, name_id_format=None, name_id_format_type_hint=None, digest_method=None, digest_method_type_hint=None, signature_method=None, signature_method_type_hint=None, user_intermediate_path=None, user_intermediate_path_type_hint=None): # noqa: E501
"""post_config_adobe_granite_saml_authentication_handler
# noqa: E501
:param key_store_password:
:type key_store_password: str
:param key_store_password_type_hint:
:type key_store_password_type_hint: str
:param service_ranking:
:type service_ranking: int
:param service_ranking_type_hint:
:type service_ranking_type_hint: str
:param idp_http_redirect:
:type idp_http_redirect: bool
:param idp_http_redirect_type_hint:
:type idp_http_redirect_type_hint: str
:param create_user:
:type create_user: bool
:param create_user_type_hint:
:type create_user_type_hint: str
:param default_redirect_url:
:type default_redirect_url: str
:param default_redirect_url_type_hint:
:type default_redirect_url_type_hint: str
:param user_id_attribute:
:type user_id_attribute: str
:param user_id_attribute_type_hint:
:type user_id_attribute_type_hint: str
:param default_groups:
:type default_groups: List[str]
:param default_groups_type_hint:
:type default_groups_type_hint: str
:param idp_cert_alias:
:type idp_cert_alias: str
:param idp_cert_alias_type_hint:
:type idp_cert_alias_type_hint: str
:param add_group_memberships:
:type add_group_memberships: bool
:param add_group_memberships_type_hint:
:type add_group_memberships_type_hint: str
:param path:
:type path: List[str]
:param path_type_hint:
:type path_type_hint: str
:param synchronize_attributes:
:type synchronize_attributes: List[str]
:param synchronize_attributes_type_hint:
:type synchronize_attributes_type_hint: str
:param clock_tolerance:
:type clock_tolerance: int
:param clock_tolerance_type_hint:
:type clock_tolerance_type_hint: str
:param group_membership_attribute:
:type group_membership_attribute: str
:param group_membership_attribute_type_hint:
:type group_membership_attribute_type_hint: str
:param idp_url:
:type idp_url: str
:param idp_url_type_hint:
:type idp_url_type_hint: str
:param logout_url:
:type logout_url: str
:param logout_url_type_hint:
:type logout_url_type_hint: str
:param service_provider_entity_id:
:type service_provider_entity_id: str
:param service_provider_entity_id_type_hint:
:type service_provider_entity_id_type_hint: str
:param assertion_consumer_service_url:
:type assertion_consumer_service_url: str
:param assertion_consumer_service_url_type_hint:
:type assertion_consumer_service_url_type_hint: str
:param handle_logout:
:type handle_logout: bool
:param handle_logout_type_hint:
:type handle_logout_type_hint: str
:param sp_private_key_alias:
:type sp_private_key_alias: str
:param sp_private_key_alias_type_hint:
:type sp_private_key_alias_type_hint: str
:param use_encryption:
:type use_encryption: bool
:param use_encryption_type_hint:
:type use_encryption_type_hint: str
:param name_id_format:
:type name_id_format: str
:param name_id_format_type_hint:
:type name_id_format_type_hint: str
:param digest_method:
:type digest_method: str
:param digest_method_type_hint:
:type digest_method_type_hint: str
:param signature_method:
:type signature_method: str
:param signature_method_type_hint:
:type signature_method_type_hint: str
:param user_intermediate_path:
:type user_intermediate_path: str
:param user_intermediate_path_type_hint:
:type user_intermediate_path_type_hint: str
:rtype: None
"""
return 'do some magic!' | b6b082929904123f96c044753995ff1d19cb9cbf | 663 |
import os
def get_req_env(var_name: str) -> str:
"""
Try to get environment variable and exits if not available
"""
try:
return os.environ[var_name]
except KeyError:
print(f"Missing required environment variable '{var_name}'.")
exit(1) | c1dadd65bf1da91f7304246d236afc922e65fb54 | 664 |
def calculate(cart):
"""Return the total shipping cost for the cart. """
total = 0
for line in cart.get_lines():
total += line.item.shipping_cost * line.quantity
return total | 4b6d9bd94ce3a5748f0d94ab4b23dab993b430e4 | 666 |
def format_perm(model, action):
"""
Format a permission string "app.verb_model" for the model and the
requested action (add, change, delete).
"""
return '{meta.app_label}.{action}_{meta.model_name}'.format(
meta=model._meta, action=action) | 12f532e28f685c2a38a638de63928f07039d44c8 | 668 |
import argparse
def parseArg():
"""
CMD argument parsing
:return: the parser
"""
parser = argparse.ArgumentParser(description='SAT solver')
parser.add_argument('infile', nargs=1, type=argparse.FileType('r'))
parser.add_argument('level', nargs='?', default=0, type=int)
return parser | 9bb757294d699208307c9a7188396f84743dcc28 | 669 |
import math
def normal_to_angle(x, y):
"""
Take two normal vectors and return the angle that they give.
:type x: float
:param x: x normal
:type y: float
:param y: y normal
:rtype: float
:return: angle created by the two normals
"""
return math.atan2(y, x) * 180 / math.pi | c6f5b5e2952858cd3592b4e0849806b0ccd5de78 | 670 |
import socket
def is_open_port(port):
"""
Check if port is open
:param port: port number to be checked
:type port: int
:return: is port open
:rtype: bool
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
except socket.error:
return False
return True | df81cc942f39d00bbdb8cb11628666a117c9788f | 672 |
def build_attribute_set(items, attr_name):
"""Build a set off of a particular attribute of a list of
objects. Adds 'None' to the set if one or more of the
objects in items is missing the attribute specified by
attr_name.
"""
attribute_set = set()
for item in items:
attribute_set.add(getattr(item, attr_name, None))
return attribute_set | 2cee5922463188a4a8d7db79d6be003e197b577f | 673 |
def get_elk_command(line):
"""Return the 2 character command in the message."""
if len(line) < 4:
return ""
return line[2:4] | 550eda4e04f57ae740bfd294f9ec3b243e17d279 | 674 |
def safe_div(a, b):
"""
Safe division operation. When b is equal to zero, this function returns 0.
Otherwise it returns result of a divided by non-zero b.
:param a: number a
:param b: number b
:return: a divided by b or zero
"""
if b == 0:
return 0
return a / b | 68e5bccbe812315b9a1d27a1fa06d26d5339d6fd | 675 |
def shouldAvoidDirectory(root, dirsToAvoid):
"""
Given a directory (root, of type string) and a set of directory
paths to avoid (dirsToAvoid, of type set of strings), return a boolean value
describing whether the file is in that directory to avoid.
"""
subPaths = root.split('/')
for i, subPath in enumerate(subPaths):
dir = '/'.join(subPaths[:i+1])
if dir in dirsToAvoid:
return True
return False | afc92111f57031eb1e2ba797d80ea4abc2a7ccd0 | 676 |
from bs4 import BeautifulSoup
def get_blb_links(driver):
"""takes (driver) and returns list of links to scrape"""
homepage = "https://www.bloomberg.com/europe"
rootpage = "https://www.bloomberg.com"
driver.get(homepage)
ssm = driver.find_elements_by_class_name("single-story-module")[0].get_attribute(
"outerHTML"
)
spm_1 = driver.find_elements_by_class_name("story-package-module")[0].get_attribute(
"outerHTML"
)
spm_2 = driver.find_elements_by_class_name("story-package-module")[1].get_attribute(
"outerHTML"
)
oped = driver.find_elements_by_class_name("story-package-module")[2].get_attribute(
"outerHTML"
)
soup = BeautifulSoup(ssm + spm_1 + spm_2 + oped, "lxml")
links = [
rootpage + link.get("href")
for link in soup.findAll("a")
if "/news/" in link.get("href")
]
links = list(dict.fromkeys(links))
return links | f2ecf967aa6e755b51e43450239b5606013cb9bf | 677 |