repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
dipper-0.2.32 | dipper-0.2.32//dipper/utils/pysed.pyfile:/dipper/utils/pysed.py:function:rmlinenumber/rmlinenumber | def rmlinenumber(linenumber, infile, dryrun=False):
"""
Sed-like line deletion function based on given line number..
Usage: pysed.rmlinenumber(<Unwanted Line Number>, <Text File>)
Example: pysed.rmlinenumber(10, '/path/to/file.txt')
Example 'DRYRUN': pysed.rmlinenumber(10, '/path/to/file.txt', dryrun=True)
#This will dump the output to STDOUT instead of changing the input file.
"""
linelist = []
linecounter = 0
if isinstance(linenumber) != isinstance(linecounter):
exit("'linenumber' argument must be an integer.")
with open(infile) as f:
for item in f:
linecounter = linecounter + 1
if linecounter != linenumber:
linelist.append(item)
if dryrun is False:
with open(infile, 'w') as f:
f.truncate()
for line in linelist:
f.writelines(line)
elif dryrun is True:
for line in linelist:
print(line, end='')
else:
exit(
"""Unknown option specified to 'dryrun' argument,
Usage: dryrun=<True|False>."""
)
|
nose | nose//util.pyfile:/util.py:function:ln/ln | def ln(label):
"""Draw a 70-char-wide divider, with label in the middle.
>>> ln('hello there')
'---------------------------- hello there -----------------------------'
"""
label_len = len(label) + 2
chunk = (70 - label_len) // 2
out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
pad = 70 - len(out)
if pad > 0:
out = out + '-' * pad
return out
|
shwirl-0.1.14 | shwirl-0.1.14//shwirl/extern/vispy/gloo/wrappers.pyfile:/shwirl/extern/vispy/gloo/wrappers.py:function:_check_valid/_check_valid | def _check_valid(key, val, valid):
"""Helper to check valid options"""
if val not in valid:
raise ValueError('%s must be one of %s, not "%s"' % (key, valid, val))
|
pydevd-pycharm-201.7223.92 | pydevd-pycharm-201.7223.92//pydevd_attach_to_process/winappdbg/textio.pyclass:HexDump/hexline | @classmethod
def hexline(cls, data, separator=' ', width=None):
"""
Dump a line of hexadecimal numbers from binary data.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@type width: int
@param width:
(Optional) Maximum number of characters to convert per text line.
This value is also used for padding.
@rtype: str
@return: Multiline output text.
"""
if width is None:
fmt = '%s %s'
else:
fmt = '%%-%ds %%-%ds' % ((len(separator) + 2) * width - 1, width)
return fmt % (cls.hexadecimal(data, separator), cls.printable(data))
|
cmake-3.17.2 | cmake-3.17.2//versioneer.pyfile:/versioneer.py:function:render_git_describe/render_git_describe | def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
cloudomate | cloudomate//hoster/vps/twosync.pyclass:TwoSync/get_options | @classmethod
def get_options(cls):
"""
Linux (OpenVZ) and Windows (KVM) pages are slightly different, therefore their pages are parsed by different
methods. Windows configurations allow a selection of Linux distributions, but not vice-versa.
:return: possible configurations.
"""
browser = cls._create_browser()
browser.open('https://www.2sync.co/vps/ukraine/')
options = cls._parse_openvz_hosting(browser.get_current_page())
lst = list(options)
return lst
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/ses.pyfile:/pyboto3/ses.py:function:delete_verified_email_address/delete_verified_email_address | def delete_verified_email_address(EmailAddress=None):
"""
Deletes the specified email address from the list of verified addresses.
This action is throttled at one request per second.
See also: AWS API Documentation
Examples
The following example deletes an email address from the list of identities that have been submitted for verification with Amazon SES:
Expected Output:
:example: response = client.delete_verified_email_address(
EmailAddress='string'
)
:type EmailAddress: string
:param EmailAddress: [REQUIRED]
An email address to be removed from the list of verified addresses.
:return: response = client.delete_verified_email_address(
EmailAddress='user@example.com',
)
print(response)
"""
pass
|
qcore-1.6.0 | qcore-1.6.0//qcore/microtime.pyfile:/qcore/microtime.py:function:add_time_offset/add_time_offset | def add_time_offset(offset):
"""Adds specified number of microseconds to the offset applied to time() function result."""
global _time_offset
_time_offset += int(offset)
|
dnsdb-0.2.5 | dnsdb-0.2.5//dnsdb/utils.pyfile:/dnsdb/utils.py:function:epoch_to_timestamp/epoch_to_timestamp | def epoch_to_timestamp(records):
"""
Convert epoch timestamps to ISO 8601 (2015-01-04T09:30:21Z)
:param records: List (of dictionaries)
:return: List (of dictionaries)
"""
from datetime import datetime
for record in records:
timestamp_keys = ['time_first', 'time_last']
for key in timestamp_keys:
if key in record:
record[key] = datetime.fromtimestamp(record[key]).isoformat(
) + 'Z'
return records
|
opensitua_core | opensitua_core//strings.pyfile:/strings.py:function:arr2dict/arr2dict | def arr2dict(arr, keyname='key', valuename='value'):
"""
arr2dict - transform an array to dictionary key:
"""
res = {}
for item in arr:
res[item[keyname]] = item[valuename] if valuename in item else None
return res
|
gwpy | gwpy//cli/cliproduct.pyclass:FFTMixin/arg_fft | @classmethod
def arg_fft(cls, parser):
"""Add an `~argparse.ArgumentGroup` for FFT options
"""
group = parser.add_argument_group('Fourier transform options')
group.add_argument('--secpfft', type=float, default=1.0, help=
'length of FFT in seconds')
group.add_argument('--overlap', type=float, help=
'overlap as fraction of FFT length [0-1)')
group.add_argument('--window', type=str, default='hann', help=
'window function to use when overlapping FFTs')
return group
|
certbot | certbot//interfaces.pyclass:IInstaller/deploy_cert | def deploy_cert(domain, cert_path, key_path, chain_path, fullchain_path):
"""Deploy certificate.
:param str domain: domain to deploy certificate file
:param str cert_path: absolute path to the certificate file
:param str key_path: absolute path to the private key file
:param str chain_path: absolute path to the certificate chain file
:param str fullchain_path: absolute path to the certificate fullchain
file (cert plus chain)
:raises .PluginError: when cert cannot be deployed
"""
|
fake-blender-api-2.79-0.3.1 | fake-blender-api-2.79-0.3.1//bpy/ops/wm.pyfile:/bpy/ops/wm.py:function:addon_userpref_show/addon_userpref_show | def addon_userpref_show(module: str=''):
"""Show add-on user preferences
:param module: Module, Module name of the add-on to expand
:type module: str
"""
pass
|
PyUniProt-0.0.10 | PyUniProt-0.0.10//src/pyuniprot/manager/database.pyclass:DbManager/get_recommended_protein_name | @classmethod
def get_recommended_protein_name(cls, entry):
"""
get recommended full and short protein name as tuple from XML node
:param entry: XML node entry
:return: (str, str) => (full, short)
"""
query_full = './protein/recommendedName/fullName'
full_name = entry.find(query_full).text
short_name = None
query_short = './protein/recommendedName/shortName'
short_name_tag = entry.find(query_short)
if short_name_tag is not None:
short_name = short_name_tag.text
return full_name, short_name
|
reynir-2.2.0 | reynir-2.2.0//src/reynir/binparser.pyclass:WordMatchers/matcher_strong_literal | @staticmethod
def matcher_strong_literal(token, terminal, m):
""" Check whether the meaning matches a strong literal terminal,
i.e. one that is enclosed in double quotes ("dæmi:hk") """
return terminal.matches_first(m.ordfl, m.stofn, token.t1_lower)
|
wger-1.8 | wger-1.8//wger/exercises/migrations/0003_auto_20160921_2000.pyfile:/wger/exercises/migrations/0003_auto_20160921_2000.py:function:copy_name/copy_name | def copy_name(apps, schema_editor):
"""
Copies the exercise name to the original name field
"""
Excercise = apps.get_model('exercises', 'Exercise')
for exercise in Excercise.objects.all():
exercise.name_original = exercise.name
exercise.save()
|
core_custom_queries_app-1.1.0 | core_custom_queries_app-1.1.0//core_custom_queries_app/utils.pyfile:/core_custom_queries_app/utils.py:function:print_headers/print_headers | def print_headers(list_headers, list_file_xml, list_file_json, list_file_csv):
"""
File creation function - Print header into output file
:param list_headers: List of headers
:param list_file_xml: Different parts of xml file.
:param list_file_json: Different parts of json file.
:param list_file_csv: Different parts of csv file.
"""
list_particular_header_csv = list()
list_particular_header_json = list()
list_particular_header_xml = ['\t<Item ']
list_file_json.append(3 * '\t' + '{\r\n')
for header in list_headers:
if header['key'] is not None:
if 'title' in header:
if header['key'] != '#title':
list_particular_header_csv.append('_'.join((header[
'title'], header['key'], str(header['value']))))
list_particular_header_json.append(': '.join(('"-' +
str(header['title']) + '_' + str(header['key']).
replace('@', '', 1) + '"', '"' + str(header['value'
]) + '"')))
list_particular_header_xml.append('='.join((str(header[
'title']) + '_' + str(header['key']).replace('@',
'', 1), '"' + str(header['value']) + '"')))
elif header['key'] != '#title':
list_particular_header_csv.append('_'.join((str(header[
'key']), str(header['value']))))
list_particular_header_json.append(': '.join(('"-' + str(
header['key']).replace('@', '', 1) + '"', '"' + str(
header['value']) + '"')))
list_particular_header_xml.append('='.join((str(header[
'key']).replace('@', '', 1), '"' + str(header['value']) +
'"')))
else:
list_particular_header_csv.append(str(header['title']))
list_file_csv.append(', '.join(list_particular_header_csv) + '\r\n')
list_file_json.append('\t\t\t\t' + ',\r\n\t\t\t\t'.join(
list_particular_header_json) + ',\r\n')
list_file_xml.append(' '.join(list_particular_header_xml) + '>\r\n')
|
geoana-0.0.6 | geoana-0.0.6//geoana/spatial.pyfile:/geoana/spatial.py:function:vector_dot/vector_dot | def vector_dot(xyz, vector):
"""
Take a dot product between an array of vectors, xyz and a vector [x, y, z]
**Required**
:param numpy.ndarray xyz: grid (npoints x 3)
:param numpy.ndarray vector: vector (1 x 3)
**Returns**
:returns: dot product between the grid and the (1 x 3) vector, returns an
(npoints x 1) array
:rtype: numpy.ndarray
"""
if len(vector) != 3:
raise Exception('vector should be length 3, the provided length is {}'
.format(len(vector)))
return vector[0] * xyz[:, (0)] + vector[1] * xyz[:, (1)] + vector[2] * xyz[
:, (2)]
|
academic | academic//cli.pyfile:/cli.py:function:clean_bibtex_str/clean_bibtex_str | def clean_bibtex_str(s):
"""Clean BibTeX string and escape TOML special characters"""
s = s.replace('\\', '')
s = s.replace('"', '\\"')
s = s.replace('{', '').replace('}', '')
s = s.replace('\t', ' ').replace('\n', ' ').replace('\r', '')
return s
|
pytzer | pytzer//parameters.pyfile:/parameters.py:function:theta_Cs_K_PK74/theta_Cs_K_PK74 | def theta_Cs_K_PK74(T, P):
"""c-c': caesium potassium [PK74]."""
theta = 0.0
valid = T == 298.15
return theta, valid
|
graphite_beacon_cron-0.25.4.2 | graphite_beacon_cron-0.25.4.2//graphite_beacon_cron/alerts.pyclass:AlertFabric/get | def get(cls, reactor, source='graphite', **options):
"""Get Alert Class by source."""
acls = cls.alerts[source]
return acls(reactor, **options)
|
ocean | ocean//modules/mixin_repoapi.pyclass:MixIn/repo_list | @classmethod
def repo_list(cls, *options):
"""
List repositories in the registry.
"""
repomgr = cls._ocean.RepoManager(cls._ocean)
return repomgr.list_repos()
|
betterpath-0.2.2 | betterpath-0.2.2//bp/abstract.pyclass:IFilePath/islink | def islink():
"""
Check if this file path refers to a symbolic ("soft") link.
@return: C{True} if the file at this file path is a symbolic link, or
C{False} otherwise.
"""
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/elasticloadbalancingv2.pyfile:/pyboto3/elasticloadbalancingv2.py:function:create_load_balancer/create_load_balancer | def create_load_balancer(Name=None, Subnets=None, SecurityGroups=None,
Scheme=None, Tags=None, IpAddressType=None):
"""
Creates an Application Load Balancer.
When you create a load balancer, you can specify security groups, subnets, IP address type, and tags. Otherwise, you could do so later using SetSecurityGroups , SetSubnets , SetIpAddressType , and AddTags .
To create listeners for your load balancer, use CreateListener . To describe your current load balancers, see DescribeLoadBalancers . When you are finished with a load balancer, you can delete it using DeleteLoadBalancer .
You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Limits for Your Application Load Balancer in the Application Load Balancers Guide .
For more information, see Application Load Balancers in the Application Load Balancers Guide .
See also: AWS API Documentation
Examples
This example creates an Internet-facing load balancer and enables the Availability Zones for the specified subnets.
Expected Output:
This example creates an internal load balancer and enables the Availability Zones for the specified subnets.
Expected Output:
:example: response = client.create_load_balancer(
Name='string',
Subnets=[
'string',
],
SecurityGroups=[
'string',
],
Scheme='internet-facing'|'internal',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
IpAddressType='ipv4'|'dualstack'
)
:type Name: string
:param Name: [REQUIRED]
The name of the load balancer.
This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen.
:type Subnets: list
:param Subnets: [REQUIRED]
The IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from at least two Availability Zones.
(string) --
:type SecurityGroups: list
:param SecurityGroups: The IDs of the security groups to assign to the load balancer.
(string) --
:type Scheme: string
:param Scheme: The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the Internet.
The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can only route requests from clients with access to the VPC for the load balancer.
The default is an Internet-facing load balancer.
:type Tags: list
:param Tags: One or more tags to assign to the load balancer.
(dict) --Information about a tag.
Key (string) -- [REQUIRED]The key of the tag.
Value (string) --The value of the tag.
:type IpAddressType: string
:param IpAddressType: The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4 .
:rtype: dict
:return: {
'LoadBalancers': [
{
'LoadBalancerArn': 'string',
'DNSName': 'string',
'CanonicalHostedZoneId': 'string',
'CreatedTime': datetime(2015, 1, 1),
'LoadBalancerName': 'string',
'Scheme': 'internet-facing'|'internal',
'VpcId': 'string',
'State': {
'Code': 'active'|'provisioning'|'failed',
'Reason': 'string'
},
'Type': 'application',
'AvailabilityZones': [
{
'ZoneName': 'string',
'SubnetId': 'string'
},
],
'SecurityGroups': [
'string',
],
'IpAddressType': 'ipv4'|'dualstack'
},
]
}
:returns:
(string) --
"""
pass
|
serpentTools-0.9.2 | serpentTools-0.9.2//serpentTools/seed.pyfile:/serpentTools/seed.py:function:_getBitsForLength/_getBitsForLength | def _getBitsForLength(length):
"""
Return the number of bits required to obtain a random digit
of a given length.
"""
SLOPE = 0.3010142116935483
OFFSET = 0.0701126088709696
return int((length - OFFSET) / SLOPE)
|
soma-workflow-3.0.0 | soma-workflow-3.0.0//python/soma_workflow/scheduler.pyclass:Scheduler/build_scheduler | @classmethod
def build_scheduler(cls, config):
""" Create a scheduler of the expected type, using configuration to
parameterize it.
Parameters
----------
config: Configuration
configuration object instance
"""
raise Exception('Scheduler is an abstract class!')
|
myokit-1.30.6 | myokit-1.30.6//myokit/formats/cellml/v1/_api.pyclass:Units/si_unit_names | @classmethod
def si_unit_names(cls):
"""
Returns an iterator over the predefined unit names.
"""
return cls._si_units.keys()
|
hue-sensors-1.2 | hue-sensors-1.2//hue_sensors.pyfile:/hue_sensors.py:function:parse_zgp/parse_zgp | def parse_zgp(response):
"""Parse the json response for a ZGPSWITCH Hue Tap."""
TAP_BUTTONS = {(34): '1_click', (16): '2_click', (17): '3_click', (18):
'4_click'}
press = response['state']['buttonevent']
if press is None:
button = 'No data'
else:
button = TAP_BUTTONS[press]
data = {'model': 'ZGP', 'name': response['name'], 'state': button,
'last_updated': response['state']['lastupdated'].split('T')}
return data
|
discord | discord//colour.pyclass:Colour/darker_grey | @classmethod
def darker_grey(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x546e7a``."""
return cls(5533306)
|
biobb_structure_manager-1.0.0 | biobb_structure_manager-1.0.0//biobb_structure_manager/model_utils.pyfile:/biobb_structure_manager/model_utils.py:function:same_residue/same_residue | def same_residue(at1, at2):
"""
Checks whether atoms belong to the same residue
"""
return at1.get_parent() == at2.get_parent()
|
AGEpy-0.8.0 | AGEpy-0.8.0//AGEpy/sam.pyfile:/AGEpy/sam.py:function:SAMflags/SAMflags | def SAMflags(x):
"""
Explains a SAM flag.
:param x: flag
:returns: complete SAM flag explanaition
"""
flags = []
if x & 1:
l = '1: Read paired'
else:
l = '0: Read unpaired'
flags.append(l)
if x & 2:
l = '1: Read mapped in proper pair'
else:
l = '0: Read not mapped in proper pair'
flags.append(l)
if x & 4:
l = '1: Read unmapped'
else:
l = '0: Read mapped'
flags.append(l)
if x & 8:
l = '1: Mate unmapped'
else:
l = '0: Mate mapped'
flags.append(l)
if x & 16:
l = '1: Read reverse strand'
else:
l = '0: Read direct strand'
flags.append(l)
if x & 32:
l = '1: Mate reverse strand'
else:
l = '0: Mate direct strand'
flags.append(l)
if x & 64:
l = '1: First in pair'
else:
l = '0: Second in pair'
flags.append(l)
if x & 128:
l = '1: Second in pair'
else:
l = '0: First in pair'
flags.append(l)
if x & 256:
l = '1: Not primary alignment'
else:
l = '0: Primary alignment'
flags.append(l)
if x & 512:
l = '1: Read fails platform/vendor quality checks'
else:
l = '0: Read passes platform/vendor quality checks'
flags.append(l)
if x & 1024:
l = '1: Read is PCR or optical duplicate'
else:
l = '0: Read is not PCR or optical duplicate'
flags.append(l)
if x & 2048:
l = '1: Supplementary alignment'
else:
l = '0: Not supplementary alignment'
flags.append(l)
return flags
|
samsung_multiroom-0.0.7 | samsung_multiroom-0.0.7//samsung_multiroom/event/type/speaker_player_shuffle_changed.pyclass:SpeakerPlayerShuffleChangedEvent/factory | @classmethod
def factory(cls, response):
"""
Factory event from response.
:returns: SpeakerPlayerShuffleChangedEvent instance or None if response is unsupported
"""
if response.name != 'ShuffleMode':
return None
return cls(response.data['shuffle'] == 'on')
|
juju-deployer-0.11.0 | juju-deployer-0.11.0//deployer/utils.pyfile:/deployer/utils.py:function:x_in_y/x_in_y | def x_in_y(x, y):
"""Check to see if the second argument is named in the first
argument's unit placement spec.
Both arguments provided are services with unit placement directives.
If the first service appears in the second service's unit placement,
either colocated on a default unit, colocated with a specific unit,
or containerized alongside that service, then True is returned, False
otherwise.
"""
for placement in y.unit_placement:
if ':' in placement:
_, placement = placement.split(':')
if '/' in placement:
placement, _ = placement.split('/')
if x.name == placement:
return True
return False
|
future | future//backports/datetime.pyfile:/backports/datetime.py:function:_days_before_year/_days_before_year | def _days_before_year(year):
"""year -> number of days before January 1st of year."""
y = year - 1
return y * 365 + y // 4 - y // 100 + y // 400
|
solar-0.2.1 | solar-0.2.1//solar/core/validation.pyfile:/solar/core/validation.py:function:schema_input_type/schema_input_type | def schema_input_type(schema):
"""Input type from schema
:param schema:
:return: simple/list
"""
if isinstance(schema, list):
return 'list'
return 'simple'
|
bpy | bpy//ops/sequencer.pyfile:/ops/sequencer.py:function:unlock/unlock | def unlock():
"""Unlock strips so they can be transformed
"""
pass
|
epicode-1.0.2 | epicode-1.0.2//bin/epicode.pyfile:/bin/epicode.py:function:parse_bed/parse_bed | def parse_bed(fn):
"""(internal) Parses a BED6+ file.
"""
regions = []
with open(fn) as fh:
for line in fh:
fields = line.strip().split('\t')
fields[1:3] = map(int, fields[1:3])
bed6 = fields[:6]
if fields[1] < 0:
continue
regions.append(bed6)
return regions
|
aiida | aiida//backends/sqlalchemy/utils.pyfile:/backends/sqlalchemy/utils.py:function:get_pg_tc/get_pg_tc | def get_pg_tc(links_table_name, links_table_input_field,
links_table_output_field, closure_table_name,
closure_table_parent_field, closure_table_child_field):
"""
Return the transitive closure table template
"""
from string import Template
pg_tc = Template(
"""
DROP TRIGGER IF EXISTS autoupdate_tc ON $links_table_name;
DROP FUNCTION IF EXISTS update_tc();
CREATE OR REPLACE FUNCTION update_tc()
RETURNS trigger AS
$$BODY$$
DECLARE
new_id INTEGER;
old_id INTEGER;
num_rows INTEGER;
BEGIN
IF tg_op = 'INSERT' THEN
IF EXISTS (
SELECT Id FROM $closure_table_name
WHERE $closure_table_parent_field = new.$links_table_input_field
AND $closure_table_child_field = new.$links_table_output_field
AND depth = 0
)
THEN
RETURN null;
END IF;
IF new.$links_table_input_field = new.$links_table_output_field
OR EXISTS (
SELECT id FROM $closure_table_name
WHERE $closure_table_parent_field = new.$links_table_output_field
AND $closure_table_child_field = new.$links_table_input_field
)
THEN
RETURN null;
END IF;
INSERT INTO $closure_table_name (
$closure_table_parent_field,
$closure_table_child_field,
depth)
VALUES (
new.$links_table_input_field,
new.$links_table_output_field,
0);
new_id := lastval();
UPDATE $closure_table_name
SET entry_edge_id = new_id
, exit_edge_id = new_id
, direct_edge_id = new_id
WHERE id = new_id;
INSERT INTO $closure_table_name (
entry_edge_id,
direct_edge_id,
exit_edge_id,
$closure_table_parent_field,
$closure_table_child_field,
depth)
SELECT id
, new_id
, new_id
, $closure_table_parent_field
, new.$links_table_output_field
, depth + 1
FROM $closure_table_name
WHERE $closure_table_child_field = new.$links_table_input_field;
INSERT INTO $closure_table_name (
entry_edge_id,
direct_edge_id,
exit_edge_id,
$closure_table_parent_field,
$closure_table_child_field,
depth)
SELECT new_id
, new_id
, id
, new.$links_table_input_field
, $closure_table_child_field
, depth + 1
FROM $closure_table_name
WHERE $closure_table_parent_field = new.$links_table_output_field;
INSERT INTO $closure_table_name (
entry_edge_id,
direct_edge_id,
exit_edge_id,
$closure_table_parent_field,
$closure_table_child_field,
depth)
SELECT A.id
, new_id
, B.id
, A.$closure_table_parent_field
, B.$closure_table_child_field
, A.depth + B.depth + 2
FROM $closure_table_name A
CROSS JOIN $closure_table_name B
WHERE A.$closure_table_child_field = new.$links_table_input_field
AND B.$closure_table_parent_field = new.$links_table_output_field;
END IF;
IF tg_op = 'DELETE' THEN
IF NOT EXISTS(
SELECT id FROM $closure_table_name
WHERE $closure_table_parent_field = old.$links_table_input_field
AND $closure_table_child_field = old.$links_table_output_field AND
depth = 0 )
THEN
RETURN NULL;
END IF;
CREATE TABLE PurgeList (Id int);
INSERT INTO PurgeList
SELECT id FROM $closure_table_name
WHERE $closure_table_parent_field = old.$links_table_input_field
AND $closure_table_child_field = old.$links_table_output_field AND
depth = 0;
WHILE (1 = 1)
loop
INSERT INTO PurgeList
SELECT id FROM $closure_table_name
WHERE depth > 0
AND ( entry_edge_id IN ( SELECT Id FROM PurgeList )
OR direct_edge_id IN ( SELECT Id FROM PurgeList )
OR exit_edge_id IN ( SELECT Id FROM PurgeList ) )
AND Id NOT IN (SELECT Id FROM PurgeList );
GET DIAGNOSTICS num_rows = ROW_COUNT;
if (num_rows = 0) THEN
EXIT;
END IF;
end loop;
DELETE FROM $closure_table_name WHERE Id IN ( SELECT Id FROM PurgeList);
DROP TABLE PurgeList;
END IF;
RETURN NULL;
END
$$BODY$$
LANGUAGE plpgsql VOLATILE
COST 100;
CREATE TRIGGER autoupdate_tc
AFTER INSERT OR DELETE OR UPDATE
ON $links_table_name FOR each ROW
EXECUTE PROCEDURE update_tc();
"""
)
return pg_tc.substitute(links_table_name=links_table_name,
links_table_input_field=links_table_input_field,
links_table_output_field=links_table_output_field,
closure_table_name=closure_table_name, closure_table_parent_field=
closure_table_parent_field, closure_table_child_field=
closure_table_child_field)
|
bpy | bpy//ops/mask.pyfile:/ops/mask.py:function:shape_key_insert/shape_key_insert | def shape_key_insert():
"""Insert mask shape keyframe for active mask layer at the current frame
"""
pass
|
dask-2.15.0 | dask-2.15.0//dask/utils.pyclass:OperatorMethodMixin/_get_binary_operator | @classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
|
guillotina | guillotina//component/interfaces.pyclass:IComponentRegistrationConvenience/provide_handler | def provide_handler(handler, adapts=None):
"""Register a handler
Handlers are subscription adapter factories that don't produce
anything. They do all of their work when called. Handlers
are typically used to handle events.
If the handler has an adapts declaration, then the adapts
argument can be omitted and the declaration will be used. (An
adapts argument can be provided to override the declaration.)
CAUTION: This API should only be used from test or
application-setup code. This API shouldn't be used by regular
library modules, as component registration is a configuration
activity.
"""
|
Keras-2.3.1 | Keras-2.3.1//keras/engine/training_utils.pyfile:/keras/engine/training_utils.py:function:check_generator_arguments/check_generator_arguments | def check_generator_arguments(y=None, sample_weight=None, validation_split=None
):
"""Validates arguments passed when using a generator."""
if y is not None:
raise ValueError(
'`y` argument is not supported when data isa generator or Sequence instance. Instead pass targets as the second element of the generator.'
)
if sample_weight is not None:
raise ValueError(
'`sample_weight` argument is not supported when data isa generator or Sequence instance. Instead pass sample weights as the third element of the generator.'
)
if validation_split:
raise ValueError(
'If your data is in the form of a Python generator, you cannot use `validation_split`.'
)
|
taxcalc-0.7.82 | taxcalc-0.7.82//taxcalc/macro_elasticity.pyfile:/taxcalc/macro_elasticity.py:function:proportional_change_gdp/proportional_change_gdp | def proportional_change_gdp(calc1, calc2, elasticity=0.0):
"""
This function harnesses econometric estimates of the historic relationship
between tax policy and the macroeconomy to predict the effect of tax
reforms on growth.
In particular, this model relies on estimates of how GDP responds to
changes in the average after tax rate on wage income across all taxpayers
(one minus the average marginal tax rate, or 1-AMTR). These estimates are
derived from calculations of income-weighted marginal tax rates under the
baseline and reform.
Evidence for this parameter can be found in Barro and Redlick's
"Macroeconomic Effects from Government Purchases and Taxes." In particular,
Barro and Redlick find that from a 1 percentage point increase in the AMTR
leads to a 0.54 percent increase in GDP. Evaluated at the sample mean,
this translates to an elasticity of GDP with respect to the average after
tax rate of 0.36.
Karel Mertens' "Marginal Tax Rates and Income: New Time Series Evidence"
contains additional evidence, focussed on tax cuts affecting the upper part
of the income distribution.
Both Mertens and Karel tentatively conclude that the effect stems from
marginal rather than average tax rates.
Parameters
----------
calc1 : Calculator object for the pre-reform baseline
calc2 : Calculator object for the policy reform
elasticity: Float estimate of elasticity of GDP wrt 1-AMTR
Returns
-------
Float estimate of proportional GDP impact of the reform.
"""
_, _, mtr_combined1 = calc1.mtr()
_, _, mtr_combined2 = calc2.mtr()
avg_one_mtr1 = 1.0 - (mtr_combined1 * calc1.records.c00100 * calc1.
records.s006).sum() / (calc1.records.c00100 * calc1.records.s006).sum()
avg_one_mtr2 = 1.0 - (mtr_combined2 * calc2.records.c00100 * calc2.
records.s006).sum() / (calc2.records.c00100 * calc2.records.s006).sum()
diff_avg_one_mtr = avg_one_mtr2 - avg_one_mtr1
proportional_diff_mtr = diff_avg_one_mtr / avg_one_mtr1
gdp_effect_of_reform = proportional_diff_mtr * elasticity
print('{:.5f}'.format(gdp_effect_of_reform))
return gdp_effect_of_reform
|
shutilwhich-cwdpatch-0.1.0 | shutilwhich-cwdpatch-0.1.0//versioneer.pyfile:/versioneer.py:function:plus_or_dot/plus_or_dot | def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if '+' in pieces.get('closest-tag', ''):
return '.'
return '+'
|
dehinter | dehinter//font.pyfile:/font.py:function:has_ttfa_table/has_ttfa_table | def has_ttfa_table(tt):
"""Tests for the presence of a TTFA table in a TrueType font."""
return 'TTFA' in tt
|
Products.CMFCore-2.4.6 | Products.CMFCore-2.4.6//Products/CMFCore/interfaces/_content.pyclass:ICatalogableDublinCore/created | def created():
""" Return the DateTime form of CreationDate.
o Permission: View
"""
|
py_ball | py_ball//salaries.pyfile:/salaries.py:function:get_option/get_option | def get_option(option):
""" get_option returns the type of option (if any) applied
to that year's salary
@param **option** (*str*): Sting of color indicators that
correspond to yearly options.
Returns:
**option** (*list*): One of '', 'Team',
'Qualifying', 'Two-Way', 'Player'
"""
if option == 'color:black':
option = ''
elif option == 'color:rgb(255, 0, 0)':
option = 'Team'
elif option == 'color:rgb(0, 153, 0)':
option = 'Qualifying'
elif option == 'color:rgb(168, 0, 212)':
option = 'Two-Way'
elif option == 'color:rgb(4, 134, 176)':
option = 'Player'
else:
option = ''
return option
|
tensorboard_plugin_profile | tensorboard_plugin_profile//convert/input_pipeline_proto_to_gviz.pyfile:/convert/input_pipeline_proto_to_gviz.py:function:get_input_op_table_args/get_input_op_table_args | def get_input_op_table_args(ipa):
"""Creates an input operator from an Input Pipeline Analyzer proto.
Args:
ipa: An input_pipeline_pb2.InputPipelineAnalysisResult.
Returns:
Returns a gviz_api.DataTable
"""
table_description = [('opName', 'string', 'Input Op'), ('count',
'number', 'Count'), ('timeInMs', 'number', 'Total Time (in ms)'), (
'timeInPercent', 'number',
'Total Time (as % of total input-processing time)'), (
'selfTimeInMs', 'number', 'Total Self Time (in ms)'), (
'selfTimeInPercent', 'number',
'Total Self Time (as % of total input-processing time)'), (
'category', 'string', 'Category')]
data = []
for details in ipa.input_op_details:
row = [details.op_name, details.count, details.time_in_ms, details.
time_in_percent / 100.0, details.self_time_in_ms, details.
self_time_in_percent / 100.0, details.category]
data.append(row)
enqueue_us = '{:.3f}'.format(ipa.input_time_breakdown.enqueue_us)
demanded_file_read_us = '{:.3f}'.format(ipa.input_time_breakdown.
demanded_file_read_us)
advanced_file_read_us = '{:.3f}'.format(ipa.input_time_breakdown.
advanced_file_read_us)
preprocessing_us = '{:.3f}'.format(ipa.input_time_breakdown.
preprocessing_us)
unclassified_non_enqueue_us = '{:.3f}'.format(ipa.input_time_breakdown.
unclassified_non_enqueue_us)
custom_properties = {'enqueue_us': enqueue_us, 'demanded_file_read_us':
demanded_file_read_us, 'advanced_file_read_us':
advanced_file_read_us, 'preprocessing_us': preprocessing_us,
'unclassified_nonequeue_us': unclassified_non_enqueue_us}
return table_description, data, custom_properties
|
kubeflow-fairing-0.7.2 | kubeflow-fairing-0.7.2//kubeflow/fairing/preprocessors/base.pyfile:/kubeflow/fairing/preprocessors/base.py:function:reset_tar_mtime/reset_tar_mtime | def reset_tar_mtime(tarinfo):
"""Reset the mtime on the the tarball for reproducibility.
:param tarinfo: the tarball var
:returns: tarinfo: the modified tar ball
"""
tarinfo.mtime = 0
return tarinfo
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bpy/ops/scene.pyfile:/bpy/ops/scene.py:function:freestyle_lineset_add/freestyle_lineset_add | def freestyle_lineset_add():
"""Add a line set into the list of line sets
"""
pass
|
ofxstatement-austrian-0.0.4 | ofxstatement-austrian-0.0.4//src/ofxstatement/plugins/utils.pyfile:/src/ofxstatement/plugins/utils.py:function:clean_multiple_whitespaces/clean_multiple_whitespaces | def clean_multiple_whitespaces(uncleaned_string):
"""Clean a string from multiple consecutive white spaces."""
return ' '.join(uncleaned_string.split())
|
dinglebop-0.0.4 | dinglebop-0.0.4//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old | def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/directconnect.pyfile:/pyboto3/directconnect.py:function:associate_hosted_connection/associate_hosted_connection | def associate_hosted_connection(connectionId=None, parentConnectionId=None):
"""
Associates a hosted connection and its virtual interfaces with a link aggregation group (LAG) or interconnect. If the target interconnect or LAG has an existing hosted connection with a conflicting VLAN number or IP address, the operation fails. This action temporarily interrupts the hosted connection's connectivity to AWS as it is being migrated.
See also: AWS API Documentation
:example: response = client.associate_hosted_connection(
connectionId='string',
parentConnectionId='string'
)
:type connectionId: string
:param connectionId: [REQUIRED]
The ID of the hosted connection.
Example: dxcon-abc123
Default: None
:type parentConnectionId: string
:param parentConnectionId: [REQUIRED]
The ID of the interconnect or the LAG.
Example: dxcon-abc123 or dxlag-abc123
Default: None
:rtype: dict
:return: {
'ownerAccount': 'string',
'connectionId': 'string',
'connectionName': 'string',
'connectionState': 'ordering'|'requested'|'pending'|'available'|'down'|'deleting'|'deleted'|'rejected',
'region': 'string',
'location': 'string',
'bandwidth': 'string',
'vlan': 123,
'partnerName': 'string',
'loaIssueTime': datetime(2015, 1, 1),
'lagId': 'string',
'awsDevice': 'string'
}
:returns:
Ordering : The initial state of a hosted connection provisioned on an interconnect. The connection stays in the ordering state until the owner of the hosted connection confirms or declines the connection order.
Requested : The initial state of a standard connection. The connection stays in the requested state until the Letter of Authorization (LOA) is sent to the customer.
Pending : The connection has been approved, and is being initialized.
Available : The network link is up, and the connection is ready for use.
Down : The network link is down.
Deleting : The connection is in the process of being deleted.
Deleted : The connection has been deleted.
Rejected : A hosted connection in the 'Ordering' state will enter the 'Rejected' state if it is deleted by the end customer.
"""
pass
|
os-vm-expire-0.9.12 | os-vm-expire-0.9.12//os_vm_expire/queue/server.pyfile:/os_vm_expire/queue/server.py:function:monitored/monitored | def monitored(fn):
"""Provides monitoring capabilities for task methods."""
return fn
|
dropbox-10.1.2 | dropbox-10.1.2//dropbox/team_log.pyclass:EventType/paper_folder_change_subscription | @classmethod
def paper_folder_change_subscription(cls, val):
"""
Create an instance of this class set to the
``paper_folder_change_subscription`` tag with value ``val``.
:param PaperFolderChangeSubscriptionType val:
:rtype: EventType
"""
return cls('paper_folder_change_subscription', val)
|
mailman-3.3.1 | mailman-3.3.1//src/mailman/interfaces/domain.pyclass:IDomainManager/remove | def remove(mail_host):
"""Remove the domain.
:param mail_host: The email host name of the domain to remove.
:type mail_host: string
:raises KeyError: if the named domain does not exist.
"""
|
libhxl-4.19 | libhxl-4.19//hxl/datatypes.pyfile:/hxl/datatypes.py:function:normalise_number/normalise_number | def normalise_number(v):
"""Attempt to convert a value to a number.
Will convert to int type if it has no decimal places.
@param v: the value (string, int, float, etc) to convert.
@returns: an int or float value
@exception ValueError: if the value cannot be converted
@see: L{is_number}
"""
try:
n = float(v)
if n == int(n):
return int(n)
else:
return n
except:
raise ValueError('Cannot convert to number: {}'.format(v))
|
zope.formlib-4.7.1 | zope.formlib-4.7.1//src/zope/formlib/interfaces.pyclass:IWidgets/__iter__ | def __iter__():
"""Return an interator in the widgets, in order
"""
|
opal | opal//core/patient_lists.pyclass:PatientList/get_icon | @classmethod
def get_icon(klass):
"""
Default getter function - returns the `icon` proprety
"""
return klass.icon
|
royalherald | royalherald//response.pyclass:Response/from_dict | @classmethod
def from_dict(cls, d: dict) ->'Response':
"""Recreate the response from a received :py:class:`dict`."""
del d['type']
return cls(**d)
|
corpkit-2.3.8 | corpkit-2.3.8//corpkit/annotate.pyfile:/corpkit/annotate.py:function:delete_lines/delete_lines | def delete_lines(corpus, annotation, dry_run=True, colour={}):
"""
Show or delete the necessary lines
"""
from corpkit.constants import OPENER, PYTHON_VERSION
import re
import os
tagmode = True
no_can_do = ['sent_id', 'parse']
if isinstance(annotation, dict):
tagmode = False
for k, v in annotation.items():
if k in no_can_do:
print("You aren't allowed to delete '%s', sorry." % k)
return
if not v:
v = '.*?'
regex = re.compile('(# %s=%s)\\n' % (k, v), re.MULTILINE)
else:
if annotation in no_can_do:
print("You aren't allowed to delete '%s', sorry." % k)
return
regex = re.compile('((# tags=.*?)%s;?(.*?))\\n' % annotation, re.
MULTILINE)
fs = []
for root, dirs, fls in os.walk(corpus):
for f in fls:
fs.append(os.path.join(root, f))
for f in fs:
if PYTHON_VERSION == 2:
from corpkit.process import saferead
data = saferead(f)[0]
else:
with open(f, 'rb') as fo:
data = fo.read().decode('utf-8', errors='ignore')
if dry_run:
if tagmode:
repl_str = '\\1 <=======\\n%s\\2\\3 <=======\\n' % colour.get(
'green', '')
else:
repl_str = '\\1 <=======\\n'
try:
repl_str = colour['red'] + repl_str + colour['reset']
except:
pass
data, n = re.subn(regex, repl_str, data)
nspl = 100 if tagmode else 50
delim = '<======='
data = re.split(delim, data, maxsplit=nspl)
toshow = delim.join(data[:nspl + 1])
toshow = toshow.rsplit('\n\n', 1)[0]
print(toshow)
if n > 50:
n = n - 50
print('\n... and %d more changes ... ' % n)
else:
if tagmode:
repl_str = '\\2\\3\\n'
else:
repl_str = ''
data = re.sub(regex, repl_str, data)
with OPENER(f, 'w') as fo:
from corpkit.constants import PYTHON_VERSION
if PYTHON_VERSION == 2:
data = data.encode('utf-8', errors='ignore')
fo.write(data)
|
silx-0.12.0 | silx-0.12.0//silx/utils/array_like.pyfile:/silx/utils/array_like.py:function:is_array/is_array | def is_array(obj):
"""Return True if object implements necessary attributes to be
considered similar to a numpy array.
Attributes needed are "shape", "dtype", "__getitem__"
and "__array__".
:param obj: Array-like object (numpy array, h5py dataset...)
:return: boolean
"""
for attr in ('shape', 'dtype', '__array__', '__getitem__'):
if not hasattr(obj, attr):
return False
return True
|
pyFreenet3-0.4.7 | pyFreenet3-0.4.7//freenet_passlib_170/totp.pyclass:TOTP/_uri_parse_error | @staticmethod
def _uri_parse_error(reason):
"""uri parsing helper -- creates preformatted error message"""
return ValueError('Invalid otpauth uri: %s' % (reason,))
|
logdivv | logdivv//divanalysis/classification.pyfile:/divanalysis/classification.py:function:classification_tex/classification_tex | def classification_tex(f, weblog, threshold_requests_per_session,
classification_wanted_transaction, weblog_columns_dict):
"""
Write on latex file information variables on classification
Parameters
----------
f: file
weblog: pandas dataframe of requests
threshold_requests_per_session: int for filter number of requests per session
classification_wanted_transaction: list of items wanted to analyse corresponding to the ones given in
classification_diversity
weblog_columns_dict: dict recupered with function of 'file_function'
Returns
-------
File (Optionnal)
"""
classification_wanted_transaction = list(set(
classification_wanted_transaction) - {'social', 'search', 'other'})
requests_per_session = weblog.groupby('session_id').size()
sessions_requests_over_threshold = list(requests_per_session[
requests_per_session > threshold_requests_per_session].index)
divpat_log = weblog[weblog.session_id.isin(
sessions_requests_over_threshold)]
num_reqs_inside = divpat_log.shape[0]
divpat_log = divpat_log[~divpat_log.requested_category.isin(['social',
'search', 'other'])]
divpat_log = divpat_log[~divpat_log.referrer_category.isin(['social',
'search', 'other'])]
num_reqs_selected_cats = divpat_log.shape[0]
f.write('\n% 5. Diversifying patterns according to classification')
f.write('\n\\newcommand{\\%s}{%.1f}' % ('PCDivPatTotalSelectedCat',
100.0 * num_reqs_selected_cats / num_reqs_inside))
f.write('\n\\newcommand{\\%s}{%d}' % ('DivPatTotalNumberRequests',
divpat_log.shape[0]))
f.write('\n\\newcommand{\\%s}{%d}' % ('DivPatTotalNumberUsers', len(
divpat_log.userID.unique())))
f.write('\n\\newcommand{\\%s}{%d}' % ('DivPatTotalNumberSessions', len(
divpat_log.session_id.unique())))
f.write('\n\\newcommand{\\%s}{%d}' % ('DivPatTotalNumberPages', len(
list(set(divpat_log[weblog_columns_dict['requested_page_column']].
unique()) | set(divpat_log[weblog_columns_dict[
'referrer_page_column']].unique())))))
return f
|
Nikola-8.0.4 | Nikola-8.0.4//nikola/utils.pyclass:LocaleBorg/initialize | @classmethod
def initialize(cls, locales: 'typing.Dict[str, str]', initial_lang: str):
"""Initialize LocaleBorg.
locales: dict with custom locale name overrides.
"""
if not initial_lang:
raise ValueError('Unknown initial language {0}'.format(initial_lang))
cls.reset()
cls.locales = locales
cls.__initial_lang = initial_lang
cls.initialized = True
|
imexam | imexam//ds9_viewer.pyclass:ds9/_stop_running_process | @classmethod
def _stop_running_process(cls):
"""stop self generated DS9 windows when user quits python window."""
while cls._process_list:
process = cls._process_list.pop()
if process.poll() is None:
process.terminate()
|
z3c | z3c//table/interfaces.pyclass:IColumnHeader/update | def update():
"""Override this method in subclasses if required"""
|
cmu-course-api-1.5.3 | cmu-course-api-1.5.3//cmu_course_api/parse_schedules.pyfile:/cmu_course_api/parse_schedules.py:function:get_table_rows/get_table_rows | def get_table_rows(page):
"""
return a list of relevant <tr> bs4 Tags
page: a BeautifulSoup with a <table> with interesting rows
"""
return page.find_all('tr')[2:]
|
xandikos-0.2.1 | xandikos-0.2.1//xandikos/store/vdir.pyclass:VdirStore/open_from_path | @classmethod
def open_from_path(cls, path: str) ->'VdirStore':
"""Open a VdirStore from a path.
:param path: Path
:return: A `VdirStore`
"""
return cls(path)
|
mlfns | mlfns//evaluate/perf.pyfile:/evaluate/perf.py:function:summarize_evaluation/summarize_evaluation | def summarize_evaluation(metrics, model=None):
"""
Summary of model evaluation metrics from Keras "evaluate" method;
the evaluation is typically performed for test dataset.
Metrics include: loss and accuracy
# Arguments
metrics: metrics return by the Keras evaluate() method
model: the model evaluated;
if provided the attribute "metrics_names" will be used to
obtain the metric names
# Returns
None
# Raise
None
"""
print('\nSummary - Model Evaluation:')
print('-' * 30)
if model:
no_metrics = len(model.metrics_names)
for i in range(no_metrics):
print(' {} = {:.3f}'.format(model.metrics_names[i], metrics[i]))
else:
loss = metrics[0]
acc = metrics[1] * 100
print(' (Loss, Accuracy) = ({:.3f}, {:.3f}%)'.format(loss, acc))
|
cis_interface | cis_interface//communication/ZMQComm.pyclass:ZMQComm/close_registry_entry | @classmethod
def close_registry_entry(cls, value):
"""Close a registry entry."""
out = False
if not value.closed:
value.close(linger=0)
out = True
return out
|
NREL-reV-0.3.2 | NREL-reV-0.3.2//reV/rep_profiles/rep_profiles.pyclass:RepProfiles/run | @classmethod
def run(cls, gen_fpath, rev_summary, reg_cols, gid_col='gen_gids', cf_dset=
'cf_profile', rep_method='meanoid', err_method='rmse', weight=
'gid_counts', n_profiles=1, fout=None, save_rev_summary=True,
scaled_precision=False, max_workers=None):
"""Run representative profiles by finding the closest single profile
to the weighted meanoid for each SC region.
Parameters
----------
gen_fpath : str
Filepath to reV gen output file to extract "cf_profile" from.
rev_summary : str | pd.DataFrame
Aggregated rev supply curve summary file. Str filepath or full df.
reg_cols : str | list | None
Label(s) for a categorical region column(s) to extract profiles
for. e.g. "state" will extract a rep profile for each unique entry
in the "state" column in rev_summary.
gid_col : str
Column label in rev_summary that contains the generation gids
(data index in gen_fpath).
cf_dset : str
Dataset name to pull generation profiles from.
rep_method : str
Method identifier for calculation of the representative profile.
err_method : str
Method identifier for calculation of error from the representative
profile.
weight : str | None
Column in rev_summary used to apply weighted mean to profiles.
The supply curve table data in the weight column should have
weight values corresponding to the gid_col in the same row.
n_profiles : int
Number of representative profiles to save to fout.
fout : str, optional
filepath to output h5 file, by default None
save_rev_summary : bool, optional
Flag to save full reV SC table to rep profile output.,
by default True
scaled_precision : bool, optional
Flag to scale cf_profiles by 1000 and save as uint16.,
by default False
max_workers : int, optional
Number of parallel workers. 1 will run serial, None will use all
available., by default None
Returns
-------
profiles : dict
dict of n_profile-keyed arrays with shape (time, n) for the
representative profiles for each region.
meta : pd.DataFrame
Meta dataframes recording the regions and the selected rep profile
gid.
time_index : pd.DatatimeIndex
Datetime Index for represntative profiles
"""
rp = cls(gen_fpath, rev_summary, reg_cols, gid_col=gid_col, cf_dset=
cf_dset, rep_method=rep_method, err_method=err_method, n_profiles=
n_profiles, weight=weight)
rp._run(fout=fout, save_rev_summary=save_rev_summary, scaled_precision=
scaled_precision, max_workers=max_workers)
return rp._profiles, rp._meta, rp._time_index
|
nmrglue | nmrglue//process/pipe_proc.pyfile:/process/pipe_proc.py:function:ebs/ebs | def ebs(dic, data):
"""
EBS Reconstruction
"""
raise NotImplementedError
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bpy/ops/constraint.pyfile:/bpy/ops/constraint.py:function:delete/delete | def delete():
"""Remove constraint from constraint stack
"""
pass
|
dropbox-10.1.2 | dropbox-10.1.2//dropbox/team_log.pyclass:FederationStatusChangeAdditionalInfo/non_trusted_team_details | @classmethod
def non_trusted_team_details(cls, val):
"""
Create an instance of this class set to the ``non_trusted_team_details``
tag with value ``val``.
:param NonTrustedTeamDetails val:
:rtype: FederationStatusChangeAdditionalInfo
"""
return cls('non_trusted_team_details', val)
|
CNFgen-0.8.4.1 | CNFgen-0.8.4.1//cnfformula/graphs.pyfile:/cnfformula/graphs.py:function:enumerate_edges/enumerate_edges | def enumerate_edges(graph):
"""Return the ordered list of edges of `graph`
Parameters
----------
graph : input graph
"""
if hasattr(graph, 'ordered_edges'):
assert set(graph.edges()) == set(graph.ordered_edges)
return graph.ordered_edges
else:
setattr(graph, 'ordered_edges', sorted(graph.edges()))
return graph.ordered_edges
|
betterbib-3.5.7 | betterbib-3.5.7//betterbib/tools.pyfile:/betterbib/tools.py:function:translate_month/translate_month | def translate_month(key):
"""The month value can take weird forms. Sometimes, it's given as an int, sometimes
as a string representing an int, and sometimes the name of the month is spelled out.
Try to handle most of this here.
"""
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep',
'oct', 'nov', 'dec']
try:
return months[int(key) - 1]
except (TypeError, ValueError):
pass
strings = []
for k in key.split('-'):
month = k[:3].lower()
if month in months:
strings.append(month)
else:
print("Unknown month value '{}'. Skipping.".format(key))
return None
return ' # "-" # '.join(strings)
|
python-amazon-mws-tools-0.0.4 | python-amazon-mws-tools-0.0.4//mwstools/parsers/base.pyfile:/mwstools/parsers/base.py:function:first_element_or_none/first_element_or_none | def first_element_or_none(element_list):
"""
Return the first element or None from an lxml selector result.
:param element_list: lxml selector result
:return:
"""
if element_list:
return element_list[0]
return
|
yolo | yolo//yolo_file.pyclass:YoloFile/from_path | @classmethod
def from_path(cls, path):
"""Load a yolo.yaml file given a path to the file."""
with open(path) as fp:
return cls.from_file(fp)
|
artellapipe-libs-kitsu-0.0.7 | artellapipe-libs-kitsu-0.0.7//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old | def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
geokey-1.11.2 | geokey-1.11.2//geokey/categories/models.pyclass:Field/get_field_types | @classmethod
def get_field_types(cls):
"""
Returns the names of the subclasses of Field.
Returns
-------
List
The names of available field types.
"""
return cls.__subclasses__()
|
meshless | meshless//particles.pyfile:/particles.py:function:get_dx/get_dx | def get_dx(x1, x2, y1, y2, L=1.0, periodic=True):
"""
Compute difference of vectors [x1 - x2, y1 - y2] while
checking for periodicity if necessary
L: boxsize
periodic: whether to assume periodic boundaries
"""
dx = x1 - x2
dy = y1 - y2
if periodic:
if hasattr(L, '__len__'):
Lxhalf = L[0] / 2.0
Lyhalf = L[1] / 2.0
else:
Lxhalf = L / 2.0
Lyhalf = L / 2.0
L = [L, L]
if dx > Lxhalf:
dx -= L[0]
elif dx < -Lxhalf:
dx += L[0]
if dy > Lyhalf:
dy -= L[1]
elif dy < -Lyhalf:
dy += L[1]
return dx, dy
|
webdataset-0.1.21 | webdataset-0.1.21//webdataset/tenbin.pyfile:/webdataset/tenbin.py:function:check_infos/check_infos | def check_infos(data, infos, required_infos=None):
"""Implement infos verification logic."""
if required_infos is False or required_infos is None:
return data
if required_infos is True:
return data, infos
if not isinstance(required_infos, (tuple, list)):
raise ValueError('required_infos must be tuple or list')
for required, actual in zip(required_infos, infos):
raise ValueError(
f"actual info {actual} doesn't match required info {required}")
return data
|
nbsite | nbsite//examples/sites/holoviews/holoviews/core/tree.pyclass:AttrTree/merge | @classmethod
def merge(cls, trees):
"""
Merge a collection of AttrTree objects.
"""
first = trees[0]
for tree in trees:
first.update(tree)
return first
|
perceval | perceval//backends/core/pipermail.pyclass:Pipermail/has_archiving | @classmethod
def has_archiving(cls):
"""Returns whether it supports archiving items on the fetch process.
:returns: this backend does not support items archive
"""
return False
|
hydrostats-0.78 | hydrostats-0.78//hydrostats/data.pyfile:/hydrostats/data.py:function:daily_std_dev/daily_std_dev | def daily_std_dev(merged_data):
"""Calculates daily seasonal standard deviation of the timeseries data in a DataFrame
Parameters
----------
merged_data: DataFrame
A pandas DataFrame with a datetime index and columns containing float type values.
Returns
-------
DataFrame
A pandas dataframe with a string type index of date representations and the daily seasonal standard deviation as
float values in the columns.
Examples
--------
>>> import hydrostats.data as hd
>>> import pandas as pd
>>> pd.options.display.max_rows = 15
The data URLs contain streamflow data from two different models, and are provided from the Hydrostats Github page
>>> sfpt_url = r'https://github.com/waderoberts123/Hydrostats/raw/master/Sample_data/sfpt_data/magdalena-calamar_interim_data.csv'
>>> glofas_url = r'https://github.com/waderoberts123/Hydrostats/raw/master/Sample_data/GLOFAS_Data/magdalena-calamar_ECMWF_data.csv'
>>> merged_df = hd.merge_data(sfpt_url, glofas_url, column_names=('Streamflow Prediction Tool', 'GLOFAS'))
>>> hd.daily_std_dev(merged_df)
Streamflow Prediction Tool GLOFAS
01/01 3349.139373 2969.748253
01/02 3273.308852 2617.851437
01/03 3165.117397 2556.319898
01/04 3043.888685 2501.999235
01/05 2894.662206 2436.603046
01/06 2741.049485 2372.487729
01/07 2612.931931 2341.011275
... ...
12/25 3631.744428 3352.464257
12/26 3487.081980 3355.480036
12/27 3448.825041 3300.770870
12/28 3439.995086 3194.812751
12/29 3395.528078 3061.536706
12/30 3318.884936 2928.699478
12/31 3235.528520 2808.611992
[366 rows x 2 columns]
"""
a = merged_data.groupby(merged_data.index.strftime('%m/%d'))
return a.std()
|
cli | cli//sg.pyfile:/sg.py:function:postgresql_h/postgresql_h | def postgresql_h():
'\n \r\n PostgreSQL Help Menu\n\n Usage:\n sg-cli postgresql <command> [<args>...]\n\n Options:\n -v, --verbose Increase verbosity\n -h, --help Show this menu\n -V --version Show version\n\n Commands:\n postgresql commands\n '
|
datacube | datacube//drivers/postgres/sql.pyfile:/drivers/postgres/sql.py:function:escape_pg_identifier/escape_pg_identifier | def escape_pg_identifier(engine, name):
"""
Escape identifiers (tables, fields, roles, etc) for inclusion in SQL statements.
psycopg2 can safely merge query arguments, but cannot do the same for dynamically
generating queries.
See http://initd.org/psycopg/docs/sql.html for more information.
"""
return engine.execute('select quote_ident(%s)', name).scalar()
|
channels-2.4.0 | channels-2.4.0//channels/consumer.pyfile:/channels/consumer.py:function:get_handler_name/get_handler_name | def get_handler_name(message):
"""
Looks at a message, checks it has a sensible type, and returns the
handler name for that type.
"""
if 'type' not in message:
raise ValueError("Incoming message has no 'type' attribute")
if message['type'].startswith('_'):
raise ValueError('Malformed type in message (leading underscore)')
return message['type'].replace('.', '_')
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bpy/ops/curve.pyfile:/bpy/ops/curve.py:function:separate/separate | def separate():
"""Separate selected points from connected unselected points into a new object
"""
pass
|
tiquations | tiquations//equations.pyfile:/equations.py:function:mass_fmg/mass_fmg | def mass_fmg(force, gravitational_field_strength):
"""Usage: Find mass using force of gravity and a gravitational constant """
return force / gravitational_field_strength
|
openquake | openquake//hmtk/parsers/source_model/nrml04_parser.pyfile:/hmtk/parsers/source_model/nrml04_parser.py:function:float_/float_ | def float_(value):
"""
Returns float of a value, or None
"""
if value:
return float(value)
else:
return None
|
csirtg-peers-0.5 | csirtg-peers-0.5//versioneer.pyfile:/versioneer.py:function:render_pep440_pre/render_pep440_pre | def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '.post.dev%d' % pieces['distance']
else:
rendered = '0.post.dev%d' % pieces['distance']
return rendered
|
UCCA-1.3.0 | UCCA-1.3.0//scripts/distances/align.pyfile:/scripts/distances/align.py:function:two_sided_f/two_sided_f | def two_sided_f(count1, count2, sum1, sum2):
"""computes an F score like measure"""
if not (sum1 and sum2):
print('got empty sums for F scores')
return 0
if sum1 < count1 or sum2 < count2:
print('got empty sums for F scores')
return 0
precision = count2 / sum2
recall = count1 / sum1
if precision + recall == 0:
return 0
return 2 * (precision * recall) / (precision + recall)
|
jinjamodificado-2.10 | jinjamodificado-2.10//jinja2/sandbox.pyfile:/jinja2/sandbox.py:function:unsafe/unsafe | def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
|
smoke-zephyr-2.0.0 | smoke-zephyr-2.0.0//smoke_zephyr/utilities.pyfile:/smoke_zephyr/utilities.py:function:xfrange/xfrange | def xfrange(start, stop=None, step=1):
"""
Iterate through an arithmetic progression.
:param start: Starting number.
:type start: float, int, long
:param stop: Stopping number.
:type stop: float, int, long
:param step: Stepping size.
:type step: float, int, long
"""
if stop is None:
stop = start
start = 0.0
start = float(start)
while start < stop:
yield start
start += step
|
statsmodels-0.11.1 | statsmodels-0.11.1//statsmodels/stats/libqsturng/qsturng_.pyfile:/statsmodels/stats/libqsturng/qsturng_.py:function:_select_ps/_select_ps | def _select_ps(p):
"""returns the points to use for interpolating p"""
if p >= 0.99:
return 0.99, 0.995, 0.999
elif p >= 0.975:
return 0.975, 0.99, 0.995
elif p >= 0.95:
return 0.95, 0.975, 0.99
elif p >= 0.9125:
return 0.9, 0.95, 0.975
elif p >= 0.875:
return 0.85, 0.9, 0.95
elif p >= 0.825:
return 0.8, 0.85, 0.9
elif p >= 0.7625:
return 0.75, 0.8, 0.85
elif p >= 0.675:
return 0.675, 0.75, 0.8
elif p >= 0.5:
return 0.5, 0.675, 0.75
else:
return 0.1, 0.5, 0.675
|
activitysim-0.9.2 | activitysim-0.9.2//activitysim/core/orca.pyfile:/activitysim/core/orca.py:function:_is_leaf_node/_is_leaf_node | def _is_leaf_node(merge_node):
"""
Returns True for dicts like {'a': {}}.
"""
return len(merge_node) == 1 and not next(iter(merge_node.values()))
|
suspect | suspect//core.pyfile:/core.py:function:adjust_phase/adjust_phase | def adjust_phase(data, zero_phase, first_phase=0, fixed_frequency=0):
"""
Adjust the phase of an MRSBase object
Parameters
----------
data : MRSSpectrum
The MRSSpectrum object to be phased
zero_phase : scalar
The change to the zero order phase, in radians
first_phase : scalar, optional
The change to the first order phase, in radians per Hz
fixed_frequency : scalar, optional
The frequency, in Hz, which is unchanged by the first order
phase shift
Returns
-------
out : MRSSpectrum
A new MRSSpectrum object with adjusted phase.
"""
return data.adjust_phase(zero_phase, first_phase, fixed_frequency)
|
django-view-acl-0.2 | django-view-acl-0.2//view_acl/utils.pyfile:/view_acl/utils.py:function:generate_perm_name/generate_perm_name | def generate_perm_name(func):
"""Generate permission name from callback function."""
return '.'.join((func and func.__module__ or '', func and func.__name__ or
''))
|