text
stringlengths 4
1.02M
| meta
dict |
---|---|
import numpy as np
import pytest
from ..algorithms.segment.trained_model import calculate_volume
@pytest.fixture
def centroids(scope='session'):
yield [
{'x': 0, 'y': 0, 'z': 0},
{'x': 32, 'y': 32, 'z': 28},
{'x': 45, 'y': 45, 'z': 12}]
@pytest.fixture
def centroids_alt(scope='session'):
yield [
{'x': 0, 'y': 0, 'z': 0},
{'x': 0, 'y': 0, 'z': 0},
{'x': 45, 'y': 45, 'z': 12}]
@pytest.fixture
def volumes(scope='session'):
yield [100, 20, 30]
@pytest.fixture
def volumes_alt(scope='session'):
yield [100, 100, 30]
def get_mask_path(tmpdir, mask):
path = tmpdir.mkdir('masks').join('mask.npy')
np.save(str(path), mask)
return path
def generate_mask(centroids, volumes, shape=(50, 50, 29)):
mask = np.zeros(shape, dtype=np.bool_)
for centroid, volume in zip(centroids, volumes):
centroid_ = np.asarray([centroid['x'], centroid['y'], centroid['z']])
free_voxels = np.where(mask != -1)
free_voxels = np.asarray(free_voxels).T
free_voxels = sorted(free_voxels, key=lambda x: np.linalg.norm(x - centroid_, ord=2))
free_voxels = np.asarray(free_voxels[:volume]).T
mask[(free_voxels[0], free_voxels[1], free_voxels[2])] = True
return mask
def test_volume_calculation(tmpdir, centroids, volumes):
mask = generate_mask(centroids, volumes)
# The balls modeled to be not overlapped
assert mask.sum() == 150
path = get_mask_path(tmpdir, mask)
calculated_volumes = calculate_volume(str(path), centroids)
# Despite they are overlapped, the amount of volumes must have preserved
assert len(calculated_volumes) == len(volumes)
assert calculated_volumes == volumes
def test_overlapped_volume_calculation(tmpdir, centroids_alt, volumes_alt):
mask = generate_mask(centroids_alt, volumes_alt)
# The balls area must be 100 + 30, since first ball have overlapped with the second one
assert mask.sum() == 130
path = get_mask_path(tmpdir, mask)
calculated_volumes = calculate_volume(str(path), centroids_alt)
# Despite they are overlapped, the amount of volumes must have preserved
assert len(calculated_volumes) == len(volumes_alt)
assert calculated_volumes == volumes_alt
def test_overlapped_dicom_volume_calculation(tmpdir, dicom_path, centroids_alt, volumes_alt):
mask = generate_mask(centroids_alt, volumes_alt)
# The balls area must be 100 + 30, since first ball have overlapped with the second one
assert mask.sum() == 130
path = get_mask_path(tmpdir, mask)
calculated_volumes = calculate_volume(str(path), centroids_alt, dicom_path)
# Despite they are overlapped, the amount of volumes must have preserved
assert len(calculated_volumes) == len(volumes_alt)
assert all(1.2360 >= mm / vox >= 1.2358 for vox, mm in zip(volumes_alt, calculated_volumes))
| {
"content_hash": "9816be662fa6fb24a5375af9d9e13db1",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 96,
"avg_line_length": 31.47826086956522,
"alnum_prop": 0.6567679558011049,
"repo_name": "vessemer/concept-to-clinic",
"id": "7f342d23bfe7481dfa564d929e04a4103f4c4ac9",
"size": "2896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prediction/src/tests/test_volume_calculation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "777"
},
{
"name": "HTML",
"bytes": "442"
},
{
"name": "JavaScript",
"bytes": "31269"
},
{
"name": "Python",
"bytes": "296297"
},
{
"name": "Shell",
"bytes": "2951"
},
{
"name": "Vue",
"bytes": "70043"
}
],
"symlink_target": ""
} |
"""
The ACLHound command-line client assists in ACL management.
Usage: aclhound [-d] [-j] [--version] [--help] <command> [<args>...]
Options:
-h --help Show this screen
-d --debug Enable debugging output
-j --jenkins Use jenkins environmental variables like WORKSPACE
--version Show version information
Subcommands, use 'aclhould help <subcommand>' to learn more:
init Initialise aclhound end-user configuration.
fetch Retrieve latest ACLHound policy from repository server.
build Compile policy into network configuration, output on STDOUT
deploy Deploy compiled configuration to a network device
reset Delete aclhound directory and fetch copy from repository.
"""
from __future__ import print_function, division, absolute_import, \
unicode_literals
from docopt import docopt
from docopt import DocoptExit
from grako.exceptions import * # noqa
from grako.parsing import * # noqa
from subprocess import call, Popen, PIPE, check_output
import ConfigParser
import os
import sys
import aclhound
from aclhound.deploy import Deploy
from aclhound.generate import generate_policy
class Settings(ConfigParser.ConfigParser):
"""
Settings are a combination of system-wide settings and
user specific settings.
Configuration is derived by taking 'etc/aclhound/aclhound.conf'
and overlaying that with '~/.aclhound/client.conf'
"""
def __init__(self):
"""
Test whether appropiate files exist, return config object
"""
user_path = os.path.expanduser('~/.aclhound')
if not os.path.exists('/etc/aclhound/aclhound.conf'):
print("ERROR: Could not open /etc/aclhound/aclhound.conf")
print("Has ACLHound been properly installed? Contact your admin")
sys.exit(2)
if not os.path.isdir(user_path):
err = "~/.aclhound/ does not exist yet"
raise Exception(err)
elif not os.path.exists('%s/client.conf' % user_path):
err = "~/.aclhound/client.conf does not exist yet"
raise Exception(err)
ConfigParser.ConfigParser.__init__(self)
self.readfp(open('/etc/aclhound/aclhound.conf'))
self.read([os.path.expanduser("~/.aclhound/client.conf")])
def do_init(args, write_config=True):
"""
Initialise user-specific settings, ask the user for username on
repository server, location to store aclhound policy, ask to make
initial clone.
Usage: aclhound [-d] init [--batch]
Options:
--batch Automatically guess all settings (non-interactive mode).
"""
if len(args) == 2:
batch = True if args[1] == "--batch" else False
if not batch:
print("""Welcome to ACLHound!
A few user-specific settings are required to set up the proper
environment. The settings can always be changed by editting the
'aclhound/client.conf' file with a text editor.""")
import getpass
username = getpass.getuser()
if not batch:
username = raw_input("Username on Gerrit server [%s]: "
% username) or username
location = "~/aclhound"
if not batch:
location = raw_input("Location for ACLHound datafiles [%s]: "
% location) or location
if not os.path.exists(os.path.expanduser("~/.aclhound")):
os.mkdir(os.path.expanduser("~/.aclhound"), 0700)
if not os.path.exists(os.path.expanduser(location)):
os.mkdir(os.path.expanduser(location), 0700)
if write_config:
cfgfile = open("%s/client.conf" % os.path.expanduser("~/.aclhound"), 'w')
config = ConfigParser.ConfigParser()
config.add_section('user')
config.set('user', 'username', username)
config.set('user', 'location', location)
config.write(cfgfile)
if not batch:
clone = raw_input("Make initial clone of repository data [y]: ") or "y"
elif batch:
clone = 'y'
if clone == 'y':
cfg = Settings()
if cfg.getboolean('general', 'local_only'):
print("INFO: 'local_only' enabled in /etc/aclhound/aclhound.conf.")
print("HINT: manually copy your data to %s"
% os.path.expanduser(location))
print("INFO: git-review and gerrit intergration are skipped for now")
return
os.chdir(os.path.expanduser(location))
run(['git', 'clone', 'ssh://%s@%s:%s/%s' %
(username,
cfg.get('gerrit', 'hostname'),
cfg.get('gerrit', 'port'),
cfg.get('gerrit', 'repository')), '.'], 0)
if not os.path.exists('.gitreview'):
# create .gitreview file if it does not exist
gerritcfg = ConfigParser.ConfigParser()
gerritcfg.add_section('gerrit')
gerritcfg.set('gerrit', 'host', cfg.get('gerrit', 'hostname'))
gerritcfg.set('gerrit', 'project', cfg.get('gerrit', 'repository'))
gerritcfg.write(open('.gitreview', 'w'))
run(['git', 'add', '.gitreview'], 0)
run(['git', 'commit', '-am', 'add gitreview'], 0)
run(['git', 'push'], 0)
if not os.path.exists('.gitignore'):
gitignore = open('.gitignore', 'w')
gitignore.write('networkconfigs/**\n')
gitignore.close()
run(['git', 'add', '.gitignore'], 0)
run(['git', 'commit', '-am', 'add gitreview'], 0)
run(['git', 'push'], 0)
# create directories
for directory in ['objects', 'devices', 'policy', 'networkconfig']:
if not os.path.exists(directory):
os.mkdir(directory)
# setup the review hooks
run(['git', 'review', '--setup'], 0)
# Rebase is better to work with in Gerrit, see
# http://stevenharman.net/git-pull-with-automatic-rebase
run(['git', 'config', '--local', 'branch.autosetuprebase', 'always'], 0)
def run(cmd, return_channel=0, debug=None):
if return_channel == 0:
print('INFO: executing: %s' % ' '.join(cmd))
ret = call(cmd)
if not ret == 0:
print("ERROR: executing '%s' failed." % ' '.join(cmd))
print('HINT: investigate manually')
sys.exit(2)
elif return_channel == 1:
ret = check_output(cmd)
if debug:
print('INFO: executing: %s' % ' '.join(cmd))
print(ret)
return ret
class ACLHoundClient(object):
"""
A client which compiles abstract ACL policy into vendor-specific network
configurations.
"""
def __init__(self, args):
try:
self._settings = Settings()
except Exception as err:
print("ERROR: Whoops!")
print("ERROR: %s" % " ".join(err.args))
print("""HINT: possible config corruption, delete it and run 'aclhound init'""")
sys.exit(2)
if args['jenkins'] and 'WORKSPACE' in os.environ:
data_dir = os.environ['WORKSPACE']
else:
data_dir = os.path.expanduser(self._settings.get('user', 'location'))
os.chdir(data_dir)
print("INFO: working with data in %s" % data_dir)
def fetch(self, args):
"""
Retrieve latest changes in 'master' from the repository server.
Usage: aclhound fetch
"""
run(['git', 'checkout', 'master'])
run(['git', 'remote', 'update'])
run(['git', 'pull', '--rebase'])
run(['git', 'pull', '--all', '--prune'])
def build(self, args):
"""
Show unified build between last commit and current state.
Usage: aclhound [-d] [-j] build <devicename>
aclhound [-d] [-j] build all
Options:
-d --debug Enable debugging output
-j --jenkins Use jenkins environmental variables like WORKSPACE
Arguments:
<devicename>
The device file for which a network config must be generated.
<all>
Build all network policies into their respective vendor specific
representation. Useful as 'review' test in Jenkins.
Note: please ensure you run 'build' inside your ACLHound data directory
"""
if args['<devicename>'] == "all":
import glob
devices_list = set(glob.glob('devices/*')) - \
set(glob.glob('devices/*.ignore'))
else:
devices_list = [args['<devicename>'].encode('ascii', 'ignore')]
def go_build(filename):
print("INFO: building configuration for %s" % filename)
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if line.split(' ')[0] == "vendor":
vendor = line.split(' ')[1]
elif line.split(' ')[0] == "transport":
transport = line.split(' ')[1]
if transport not in ['telnet', 'ssh']:
print("ERROR: unknown transport mechanism: %s" % transport)
sys.exit(2)
elif line.split(' ')[0] == "include":
polname = line.split(' ')[1]
print("")
print("")
print("Seed policy name: %s" % polname)
print(" IPv4:")
for line in generate_policy(polname, afi=4,
vendor=vendor).split('\n'):
print(" %s" % line)
print(" ---------")
print(" IPv6:")
for line in generate_policy(polname, afi=6,
vendor=vendor).split('\n'):
print(" %s" % line)
print("")
for device in devices_list:
go_build(device)
def deploy(self, args):
"""
Deploy a compiled version of the ACLs on a network device
Usage: aclhound [-d] [-j] deploy <devicename>
aclhound [-d] [-j] deploy all
Options:
-d --debug Enable debugging output
-j --jenkins Use jenkins environmental variables like WORKSPACE
Arguments:
<devicename>
Hostname of the device on which the generated ACLs must be
deployed.
<all>
ACLHound will take all device files from devices/ (except
filenames with a '.ignore' suffix), compile the policy and
upload the policies to the device. "all" is suitable for cron or
jenkins.
Note: please ensure you run 'deploy' inside your ACLHound data directory
"""
if args['<devicename>'] == "all":
import glob
devices_list = set(glob.glob('devices/*')) - \
set(glob.glob('devices/*.ignore'))
else:
devices_list = [args['<devicename>'].encode('ascii', 'ignore')]
def do_deploy(filename):
print("INFO: deploying %s" % filename)
acls = {}
hostname = os.path.basename(filename)
with open(filename, 'r') as f:
timeout = 60
transport = "ssh"
for line in f:
line = line.strip()
if line.split(' ')[0] == "vendor":
vendor = line.split(' ')[1]
elif line.split(' ')[0] == "transport":
transport = line.split(' ')[1]
if transport not in ['telnet', 'ssh']:
print("ERROR: unknown transport mechanism: %s" % transport)
sys.exit(2)
elif line.split(' ')[0] == "save_config":
save_config = str2bool(line.split(' ')[1])
elif line.split(' ')[0] == "timeout":
timeout = int(line.split(' ')[1])
elif line.split(' ')[0] == "include":
polname = line.split(' ')[1]
for afi in [4, 6]:
name = "%s-v%s" % (polname, afi)
policy = generate_policy(vendor=vendor,
filename=polname,
afi=afi)
acls[name] = {"afi": afi,
"name": name,
"policy": policy}
a = Deploy(hostname=hostname, vendor=vendor, acls=acls,
transport=transport, save_config=save_config,
timeout=timeout)
a.deploy()
for dev in devices_list:
do_deploy(dev)
def reset(self, args):
"""
Reset ACLHound data directory by deleting the directory, followed
by a fresh clone based on ~/.aclhound/client.conf settings
Usage: aclhound reset
If you are terribly lost in branches and git voodoo, this is an
easy way out.
"""
location = os.path.expanduser(self._settings.get('user', 'location'))
confirm = raw_input("Do you want to destroy all local work (%s) and start over? [yn] " \
% location)
if confirm == "y":
import shutil
os.chdir(os.path.expanduser('~'))
shutil.rmtree(location)
do_init((None, "--batch"), write_config=False)
else:
print("INFO: Did not touch anything...")
def trim(docstring):
"""
Function to trim whitespace from docstring
c/o PEP 257 Docstring Conventions
<http://www.python.org/dev/peps/pep-0257/>
"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def print_debug(func, *args):
print('in function %s():' % func)
from pprint import pprint
for arg in args:
pprint(arg)
print('-----')
def str2bool(configstring):
return configstring.lower() in ("yes", "true", "t", "1")
def main():
"""
Create an ACLHound client, parse the arguments received on the command
line, and call the appropiate method.
"""
try:
if sys.argv[1] == "init":
do_init(sys.argv)
sys.exit(0)
except IndexError:
pass
args = docopt(__doc__, version=aclhound.__version__, options_first=True)
args['debug'] = args.pop('--debug')
args['jenkins'] = args.pop('--jenkins')
cmd = args['<command>']
cli = ACLHoundClient(args)
help_flag = True if cmd == "help" else False
# first parse commands in help context
if help_flag:
# concat first and second argument to get real function name
cmd = "_".join(args['<args>'][0:2])
# see if command is a function in the cli object
if cmd in dir(cli):
print(trim(getattr(cli, cmd).__doc__))
return
# init is special because we don't want to depend on _settings
elif cmd == "init":
print(trim(do_init.__doc__))
return
docopt(__doc__, argv=['--help'])
if hasattr(cli, cmd):
# lookup function method for a given subcommand
method = getattr(cli, cmd)
else:
# display help message if command not found in cli object
raise DocoptExit("Found no matching command, try 'aclhound help'")
docstring = trim(getattr(cli, cmd).__doc__)
if 'Usage: ' in docstring:
args.update(docopt(docstring))
method(args)
if __name__ == '__main__':
main()
sys.exit(0)
| {
"content_hash": "1bb99a2049845c74f0d667a74a177792",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 96,
"avg_line_length": 36.559471365638764,
"alnum_prop": 0.5390408482949753,
"repo_name": "job/aclhound",
"id": "4465963b9d0366c4a8ebfeb573c541635a37985a",
"size": "17983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aclhound/cli.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "366"
},
{
"name": "Python",
"bytes": "148566"
},
{
"name": "Roff",
"bytes": "883"
},
{
"name": "Shell",
"bytes": "1105"
}
],
"symlink_target": ""
} |
import argparse
import md5
import os
import pymongo
import re
import requests
import sys
import unicodedata
import urllib
MimeExtensions = {
'image/gif': '.gif',
'image/jpeg': '.jpg',
'image/png': '.png',
}
UsedMD5Sums = {}
def safe_path(value, noperiods=False):
"""
Make sure a string is a safe file path.
:param value: the string to escape.
:param noperids: if true, escape periods, too.
:returns: the escaped string
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
value = re.sub('[-\s]+', '-', value)
if noperiods:
value = value.replace('.', '-')
return value
def fetch_image_twitter(basepath, username):
"""
Check if we need to fetch an image from Twitter. If so, do so.
:param basepath: base path to store images. Created if necessary.
:param username: username to fetch.
"""
if not username:
return
safe_name = safe_path(username, True)
path = os.path.join(basepath, safe_name[:2])
filename = os.path.join(path, safe_name)
if not os.path.exists(path):
os.makedirs(path)
files = [file for file in os.listdir(path)
if file.split('.')[0] == safe_name]
if len(files):
if os.path.getsize(os.path.join(path, files[0])):
md5sum = md5.new(open(os.path.join(path, files[0]), 'rb').read(
)).hexdigest()
UsedMD5Sums[md5sum] = UsedMD5Sums.get(md5sum, 0) + 1
# Report if more than a few users have the same image
if UsedMD5Sums[md5sum] >= 3:
print 'used', UsedMD5Sums[md5sum], os.path.join(path, files[0])
return
url = 'https://twitter.com/%s/profile_image?size=original' % (
urllib.quote(username), )
req = requests.get(url)
if req.status_code in (403, 404):
data = ''
elif req.status_code == 200:
data = req.content
md5sum = md5.new(data).hexdigest()
mime = req.headers['Content-Type']
# If the image matches particular md5 sums, then they are twitter
# default images and should be excluded.
if mime.startswith('text/') or md5sum in (
'eff522de713c9faf5306578a1a5f6f00',
'4cc37d5daba30f3a3e9eb579987f484e',
'd22796f1ffef64e584899475096801a0',
'bafcc4c38220b75c6739a8f68a1c88bd',
'0d75373e1c612ef553b64c3a9e638aae',
'531003390ee8fa9a0cf3b7fbc36f5960',
'6df7294d31afb0d070aa59b90a650223',
):
data = ''
else:
UsedMD5Sums[md5sum] = UsedMD5Sums.get(md5sum, 0) + 1
# Report if more than a few users have the same image
if UsedMD5Sums[md5sum] >= 3:
print 'used', UsedMD5Sums[md5sum], os.path.join(path, files[0])
if mime not in MimeExtensions:
print 'Unknown mime', mime, url
sys.exit(0)
filename += MimeExtensions[mime]
else:
print 'Unknown status code', req.status_code, url
sys.exit(0)
open(filename, 'wb').write(data)
print filename, len(data)
def fetch_images(mongo, mongoCollection, out, **kwargs):
"""
Get a list of user names from Mongo, then fetch images for each user if
we don't already have them.
:param mongo: the mongo server and database.
:param mongoCollection: the name of the mongo collection.
:param out: the output directory. This is created if it does not exist.
"""
service = None
if mongoCollection.lower().startswith('twitter'):
service = 'twitter'
if not service:
print 'No known service'
return
if not mongo.startswith('mongodb://'):
mongo = 'mongodb://' + mongo
mongoClientOptions = {'connectTimeoutMS': 15000}
mconn = pymongo.MongoClient(mongo, **mongoClientOptions)
mdb = mconn.get_default_database()
mcoll = mdb[mongoCollection]
cur = mcoll.find({'type': 'node'}, {'data.name': True, '_id': False},
limit=kwargs.get('limit'))
usernames = {entry['data']['name'].strip() for entry in cur}
func = globals()['fetch_image_' + service]
for username in sorted(usernames):
func(os.path.join(out, service), username)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Fetch user profile images based on users in a mongo '
'database.')
parser.add_argument(
'--mongo', help='The mongo database. For example, '
'127.0.0.1:27017/may2016_isil.',
default='127.0.0.1:27017/may2016_isil')
parser.add_argument(
'--coll', '--collection', help='Mongo collection name. For example, '
'twitter_isil_36_nodelink.', default='twitter_isil_36_nodelink',
dest='mongoCollection')
parser.add_argument(
'--limit', help='Only check this many usernames.', type=int, default=0)
parser.add_argument(
'--out', help='Output directory. Within this directory a '
'sub-directory for each service (twitter, instagram) will be '
'created. Within those directories, images are stored in the form '
'(username).(extension), where a zero-length file indicates that we '
'received a successfull response from the service but that no profile '
'picture was available or the default is being used.',
default='profileimages')
parser.set_defaults(nodup=True)
args = vars(parser.parse_args())
fetch_images(**args)
| {
"content_hash": "ee8d97297ae8da1d133b20e19fdb2477",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 79,
"avg_line_length": 36.94078947368421,
"alnum_prop": 0.6105075690115761,
"repo_name": "XDATA-Year-3/EntityAlignLarge",
"id": "8f71cdb240a16fe13b41bf41db48a237be53809b",
"size": "6400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/fetchimages.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6716"
},
{
"name": "HTML",
"bytes": "20228"
},
{
"name": "JavaScript",
"bytes": "307103"
},
{
"name": "Python",
"bytes": "147037"
},
{
"name": "Shell",
"bytes": "1233"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import errno
import mimetypes
import os
import re
import rfc822
import StringIO
import base64
import binascii
import math
import urllib
import boto.utils
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
try:
from hashlib import md5
except ImportError:
from md5 import md5
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self.storage_class = 'STANDARD'
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
return '<Key: %s,%s>' % (self.bucket.name, self.name)
else:
return '<Key: None,%s>' % self.name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_base64(self.local_hashes['md5']).rstrip('\n')
def _set_base64md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = base64.encodestring(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
header = response.getheader('x-amz-restore')
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp == None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() == 'etag':
self.etag = value
elif name.lower() == 'content-type':
self.content_type = value
elif name.lower() == 'content-encoding':
self.content_encoding = value
elif name.lower() == 'content-language':
self.content_language = value
elif name.lower() == 'last-modified':
self.last_modified = value
elif name.lower() == 'cache-control':
self.cache_control = value
elif name.lower() == 'content-disposition':
self.content_disposition = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
if new_storage_class == 'STANDARD':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name))
def delete(self):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
self.metadata[name] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket != None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket != None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket != None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket != None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds
:type method: string
:param method: The method to use for retrieving the file
(default is GET)
:type headers: dict
:param headers: Any headers to pass along in the request
:type query_auth: bool
:param query_auth:
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
if self.etag != '"%s"' % self.md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5')
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket != None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if isinstance(s, unicode):
s = s.encode("utf-8")
fp = StringIO.StringIO(s)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError, e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.bucket != None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
fp = open(filename, 'wb')
try:
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
finally:
fp.close()
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified != None:
try:
modified_tuple = rfc822.parsedate_tz(self.last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:rtype: string
:returns: The contents of the file as a string
"""
fp = StringIO.StringIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
return fp.getvalue()
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| {
"content_hash": "8b5f0ecef165860ee67d140a3d505531",
"timestamp": "",
"source": "github",
"line_count": 1786,
"max_line_length": 89,
"avg_line_length": 42.23908174692049,
"alnum_prop": 0.5755245960312305,
"repo_name": "donny/mako-mori",
"id": "d16352fd95856dfd675ce670cfbbcd1791af6053",
"size": "76664",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "external/boto/s3/key.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3180853"
}
],
"symlink_target": ""
} |
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
import os
import re
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.options import ArgOptions
from bzt.resources.selenium_extras import waiter, get_elements, get_locator
class TestLocSc(unittest.TestCase):
def setUp(self):
self.vars = {'city_select_name': 'fromPort', 'input_name_id': 'inputName'}
timeout = 3.5
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--disable-gpu')
options.set_capability('unhandledPromptBehavior', 'ignore')
self.driver = webdriver.Chrome(
service_log_path='/somewhere/webdriver.log',
options=options)
self.driver.implicitly_wait(timeout)
apiritif.put_into_thread_store(timeout=timeout, func_mode=False, driver=self.driver, windows={},
scenario_name='loc_sc')
def _1_Foreach_test(self):
with apiritif.smart_transaction('Foreach test'):
elements = get_elements([{'css': 'input'}, {'xpath': '/table/input/'}])
for el in elements:
self.assertEqual(el.get_attribute('innerText').strip(), 'text'.strip())
var_loc_as = get_locator([{'css': 'style'}, {'xpath': '//tr'}], el)
self.assertEqual(el.find_element(
var_loc_as[0],
var_loc_as[1]).get_attribute('innerText').strip(), 'text'.strip())
self.assertEqual(el.get_attribute('value').strip(), 'value'.strip())
self.assertEqual(el.get_attribute('value').strip(), 'value'.strip())
if el.get_attribute('contenteditable'):
self.driver.execute_script(("arguments[0].innerHTML = '%s';" % 'new text'), el)
else:
raise NoSuchElementException(("The element '%s' (tag name: '%s', text: '%s') is not a contenteditable element" % ('el', el.tag_name, el.text)))
if el.get_attribute('contenteditable'):
self.driver.execute_script(("arguments[0].innerHTML = '%s';" % 'new text'), el)
else:
raise NoSuchElementException(("The element '%s' (tag name: '%s', text: '%s') is not a contenteditable element" % ('el', el.tag_name, el.text)))
el.click()
waiter()
var_loc_keys = get_locator([{'css': 'input-cls'}, {'xpath': '//input'}], el)
el.find_element(
var_loc_keys[0],
var_loc_keys[1]).click()
waiter()
ActionChains(self.driver).double_click(el).perform()
waiter()
ActionChains(self.driver).double_click(el).perform()
waiter()
ActionChains(self.driver).context_click(el).perform()
waiter()
ActionChains(self.driver).context_click(el).perform()
waiter()
ActionChains(self.driver).click_and_hold(el).perform()
ActionChains(self.driver).click_and_hold(el).perform()
ActionChains(self.driver).release(el).perform()
ActionChains(self.driver).release(el).perform()
ActionChains(self.driver).move_to_element_with_offset(el, -10, -10).perform()
ActionChains(self.driver).move_to_element_with_offset(el, -10, -10).perform()
ActionChains(self.driver).move_to_element(el).perform()
ActionChains(self.driver).move_to_element(el).perform()
target = get_locator([{'id': 'id12'}])
ActionChains(self.driver).drag_and_drop(el, self.driver.find_element(
target[0],
target[1])).perform()
waiter()
source = get_locator([{'id': 'id34'}])
ActionChains(self.driver).drag_and_drop(self.driver.find_element(
source[0],
source[1]), el).perform()
waiter()
target = get_locator([{'id': 'id12'}])
ActionChains(self.driver).drag_and_drop(el, self.driver.find_element(
target[0],
target[1])).perform()
waiter()
source = get_locator([{'id': 'id34'}])
ActionChains(self.driver).drag_and_drop(self.driver.find_element(
source[0],
source[1]), el).perform()
waiter()
Select(el).select_by_visible_text('value')
waiter()
Select(el).select_by_visible_text('value')
waiter()
self.vars['my_var'] = el.get_attribute('innerText')
self.vars['my_var'] = el.get_attribute('innerText')
self.vars['my_var'] = el.get_attribute('value')
self.vars['my_var'] = el.get_attribute('value')
el.clear()
el.send_keys('text')
waiter()
el.clear()
el.send_keys('text')
waiter()
el.clear()
el.send_keys('passwd')
el.clear()
el.send_keys('passwd')
el.submit()
el.submit()
el.send_keys(Keys.ENTER)
el.send_keys(Keys.ENTER)
def test_locsc(self):
self._1_Foreach_test()
def tearDown(self):
if self.driver:
self.driver.quit()
| {
"content_hash": "87cb14cab6f85fe388b28ba81b91145a",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 163,
"avg_line_length": 42.289655172413795,
"alnum_prop": 0.5459882583170255,
"repo_name": "Blazemeter/taurus",
"id": "3a1027f513a5d22dd1254f416ce33bab7db8350a",
"size": "6148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/resources/selenium/generated_from_requests_foreach.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4357"
},
{
"name": "C",
"bytes": "5131"
},
{
"name": "C#",
"bytes": "18482"
},
{
"name": "CSS",
"bytes": "5298"
},
{
"name": "Dockerfile",
"bytes": "5222"
},
{
"name": "Groovy",
"bytes": "3280"
},
{
"name": "HTML",
"bytes": "5136"
},
{
"name": "Java",
"bytes": "9586"
},
{
"name": "JavaScript",
"bytes": "27121"
},
{
"name": "PHP",
"bytes": "8787"
},
{
"name": "PLpgSQL",
"bytes": "3712"
},
{
"name": "Python",
"bytes": "2160323"
},
{
"name": "RobotFramework",
"bytes": "6383"
},
{
"name": "Ruby",
"bytes": "4184"
},
{
"name": "Scala",
"bytes": "15526"
},
{
"name": "Shell",
"bytes": "12058"
},
{
"name": "Smarty",
"bytes": "13606"
}
],
"symlink_target": ""
} |
"""
kombu.entity
================
Exchange and Queue declarations.
"""
from __future__ import absolute_import
import numbers
from .abstract import MaybeChannelBound
from .exceptions import ContentDisallowed
from .five import string_t
from .serialization import prepare_accept_content
TRANSIENT_DELIVERY_MODE = 1
PERSISTENT_DELIVERY_MODE = 2
DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE,
'persistent': PERSISTENT_DELIVERY_MODE}
__all__ = ['Exchange', 'Queue', 'binding', 'maybe_delivery_mode']
def _reprstr(s):
s = repr(s)
if isinstance(s, string_t) and s.startswith("u'"):
return s[2:-1]
return s[1:-1]
def pretty_bindings(bindings):
return '[%s]' % (', '.join(map(str, bindings)))
def maybe_delivery_mode(
v, modes=DELIVERY_MODES, default=PERSISTENT_DELIVERY_MODE):
if v:
return v if isinstance(v, numbers.Integral) else modes[v]
return default
class Exchange(MaybeChannelBound):
"""An Exchange declaration.
:keyword name: See :attr:`name`.
:keyword type: See :attr:`type`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword delivery_mode: See :attr:`delivery_mode`.
:keyword arguments: See :attr:`arguments`.
.. attribute:: name
Name of the exchange. Default is no name (the default exchange).
.. attribute:: type
*This description of AMQP exchange types was shamelessly stolen
from the blog post `AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. Reading this article is recommended if you're
new to amqp.*
"AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your broker
manual for more information about available exchange types.
* `direct` (*default*)
Direct match between the routing key in the message, and the
routing criteria used when a queue is bound to this exchange.
* `topic`
Wildcard match between the routing key and the routing pattern
specified in the exchange/queue binding. The routing key is
treated as zero or more words delimited by `"."` and
supports special wildcard characters. `"*"` matches a
single word and `"#"` matches zero or more words.
* `fanout`
Queues are bound to this exchange with no arguments. Hence any
message sent to this exchange will be forwarded to all queues
bound to this exchange.
* `headers`
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument
named "x-match" determines the matching algorithm, where
`"all"` implies an `AND` (all pairs must match) and
`"any"` implies `OR` (at least one pair must match).
:attr:`arguments` is used to specify the arguments.
.. _`AMQP in 10 minutes: Part 4`:
http://bit.ly/amqp-exchange-types
.. attribute:: channel
The channel the exchange is bound to (if bound).
.. attribute:: durable
Durable exchanges remain active when a server restarts. Non-durable
exchanges (transient exchanges) are purged when a server restarts.
Default is :const:`True`.
.. attribute:: auto_delete
If set, the exchange is deleted when all queues have finished
using it. Default is :const:`False`.
.. attribute:: delivery_mode
The default delivery mode used for messages. The value is an integer,
or alias string.
* 1 or `"transient"`
The message is transient. Which means it is stored in
memory only, and is lost if the server dies or restarts.
* 2 or "persistent" (*default*)
The message is persistent. Which means the message is
stored both in-memory, and on disk, and therefore
preserved if the server dies or restarts.
The default value is 2 (persistent).
.. attribute:: arguments
Additional arguments to specify when the exchange is declared.
"""
TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE
PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE
name = ''
type = 'direct'
durable = True
auto_delete = False
passive = False
delivery_mode = None
attrs = (
('name', None),
('type', None),
('arguments', None),
('durable', bool),
('passive', bool),
('auto_delete', bool),
('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m),
)
def __init__(self, name='', type='', channel=None, **kwargs):
super(Exchange, self).__init__(**kwargs)
self.name = name or self.name
self.type = type or self.type
self.maybe_bind(channel)
def __hash__(self):
return hash('E|%s' % (self.name,))
def declare(self, nowait=False, passive=None):
"""Declare the exchange.
Creates the exchange on the broker.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
passive = self.passive if passive is None else passive
if self.name:
return self.channel.exchange_declare(
exchange=self.name, type=self.type, durable=self.durable,
auto_delete=self.auto_delete, arguments=self.arguments,
nowait=nowait, passive=passive,
)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False, **kwargs):
"""Binds the exchange to another exchange.
:keyword nowait: If set the server will not respond, and the call
will not block waiting for a response. Default is :const:`False`.
"""
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.exchange_bind(destination=self.name,
source=exchange,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def unbind_from(self, source='', routing_key='',
nowait=False, arguments=None):
"""Delete previously created exchange binding from the server."""
if isinstance(source, Exchange):
source = source.name
return self.channel.exchange_unbind(destination=self.name,
source=source,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def Message(self, body, delivery_mode=None, priority=None,
content_type=None, content_encoding=None,
properties=None, headers=None):
"""Create message instance to be sent with :meth:`publish`.
:param body: Message body.
:keyword delivery_mode: Set custom delivery mode. Defaults
to :attr:`delivery_mode`.
:keyword priority: Message priority, 0 to 9. (currently not
supported by RabbitMQ).
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword properties: Message properties.
:keyword headers: Message headers.
"""
# XXX This method is unused by kombu itself AFAICT [ask].
properties = {} if properties is None else properties
properties['delivery_mode'] = maybe_delivery_mode(self.delivery_mode)
return self.channel.prepare_message(body,
properties=properties,
priority=priority,
content_type=content_type,
content_encoding=content_encoding,
headers=headers)
def publish(self, message, routing_key=None, mandatory=False,
immediate=False, exchange=None):
"""Publish message.
:param message: :meth:`Message` instance to publish.
:param routing_key: Routing key.
:param mandatory: Currently not supported.
:param immediate: Currently not supported.
"""
exchange = exchange or self.name
return self.channel.basic_publish(message,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
def delete(self, if_unused=False, nowait=False):
"""Delete the exchange declaration on server.
:keyword if_unused: Delete only if the exchange has no bindings.
Default is :const:`False`.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_delete(exchange=self.name,
if_unused=if_unused,
nowait=nowait)
def binding(self, routing_key='', arguments=None, unbind_arguments=None):
return binding(self, routing_key, arguments, unbind_arguments)
def __eq__(self, other):
if isinstance(other, Exchange):
return (self.name == other.name and
self.type == other.type and
self.arguments == other.arguments and
self.durable == other.durable and
self.auto_delete == other.auto_delete and
self.delivery_mode == other.delivery_mode)
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return super(Exchange, self).__repr__(str(self))
def __str__(self):
return 'Exchange %s(%s)' % (_reprstr(self.name) or repr(''), self.type)
@property
def can_cache_declaration(self):
return not self.auto_delete
class binding(object):
"""Represents a queue or exchange binding.
:keyword exchange: Exchange to bind to.
:keyword routing_key: Routing key used as binding key.
:keyword arguments: Arguments for bind operation.
:keyword unbind_arguments: Arguments for unbind operation.
"""
def __init__(self, exchange=None, routing_key='',
arguments=None, unbind_arguments=None):
self.exchange = exchange
self.routing_key = routing_key
self.arguments = arguments
self.unbind_arguments = unbind_arguments
def declare(self, channel, nowait=False):
"""Declare destination exchange."""
if self.exchange and self.exchange.name:
ex = self.exchange(channel)
ex.declare(nowait=nowait)
def bind(self, entity, nowait=False):
"""Bind entity to this binding."""
entity.bind_to(exchange=self.exchange,
routing_key=self.routing_key,
arguments=self.arguments,
nowait=nowait)
def unbind(self, entity, nowait=False):
"""Unbind entity from this binding."""
entity.unbind_from(self.exchange,
routing_key=self.routing_key,
arguments=self.unbind_arguments,
nowait=nowait)
def __repr__(self):
return '<binding: %s>' % (self,)
def __str__(self):
return '%s->%s' % (
_reprstr(self.exchange.name), _reprstr(self.routing_key),
)
class Queue(MaybeChannelBound):
"""A Queue declaration.
:keyword name: See :attr:`name`.
:keyword exchange: See :attr:`exchange`.
:keyword routing_key: See :attr:`routing_key`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword exclusive: See :attr:`exclusive`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword queue_arguments: See :attr:`queue_arguments`.
:keyword binding_arguments: See :attr:`binding_arguments`.
:keyword on_declared: See :attr:`on_declared`
.. attribute:: name
Name of the queue. Default is no name (default queue destination).
.. attribute:: exchange
The :class:`Exchange` the queue binds to.
.. attribute:: routing_key
The routing key (if any), also called *binding key*.
The interpretation of the routing key depends on
the :attr:`Exchange.type`.
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (`"."`, like domain names), and
two special characters are available; star (`"*"`) and hash
(`"#"`). The star matches any word, and the hash matches
zero or more words. For example `"*.stock.#"` matches the
routing keys `"usd.stock"` and `"eur.stock.db"` but not
`"stock.nasdaq"`.
.. attribute:: channel
The channel the Queue is bound to (if bound).
.. attribute:: durable
Durable queues remain active when a server restarts.
Non-durable queues (transient queues) are purged if/when
a server restarts.
Note that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
Default is :const:`True`.
.. attribute:: exclusive
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
Default is :const:`False`.
.. attribute:: auto_delete
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
.. attribute:: queue_arguments
Additional arguments used when declaring the queue.
.. attribute:: binding_arguments
Additional arguments used when binding the queue.
.. attribute:: alias
Unused in Kombu, but applications can take advantage of this.
For example to give alternate names to queues with automatically
generated queue names.
.. attribute:: on_declared
Optional callback to be applied when the queue has been
declared (the ``queue_declare`` operation is complete).
This must be a function with a signature that accepts at least 3
positional arguments: ``(name, messages, consumers)``.
"""
ContentDisallowed = ContentDisallowed
name = ''
exchange = Exchange('')
routing_key = ''
durable = True
exclusive = False
auto_delete = False
no_ack = False
attrs = (
('name', None),
('exchange', None),
('routing_key', None),
('queue_arguments', None),
('binding_arguments', None),
('durable', bool),
('exclusive', bool),
('auto_delete', bool),
('no_ack', None),
('alias', None),
('bindings', list),
)
def __init__(self, name='', exchange=None, routing_key='',
channel=None, bindings=None, on_declared=None,
**kwargs):
super(Queue, self).__init__(**kwargs)
self.name = name or self.name
self.exchange = exchange or self.exchange
self.routing_key = routing_key or self.routing_key
self.bindings = set(bindings or [])
self.on_declared = on_declared
# allows Queue('name', [binding(...), binding(...), ...])
if isinstance(exchange, (list, tuple, set)):
self.bindings |= set(exchange)
if self.bindings:
self.exchange = None
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.maybe_bind(channel)
def bind(self, channel):
on_declared = self.on_declared
bound = super(Queue, self).bind(channel)
bound.on_declared = on_declared
return bound
def __hash__(self):
return hash('Q|%s' % (self.name,))
def when_bound(self):
if self.exchange:
self.exchange = self.exchange(self.channel)
def declare(self, nowait=False):
"""Declares the queue, the exchange and binds the queue to
the exchange."""
# - declare main binding.
if self.exchange:
self.exchange.declare(nowait)
self.queue_declare(nowait, passive=False)
if self.exchange and self.exchange.name:
self.queue_bind(nowait)
# - declare extra/multi-bindings.
for B in self.bindings:
B.declare(self.channel)
B.bind(self, nowait=nowait)
return self.name
def queue_declare(self, nowait=False, passive=False):
"""Declare queue on the server.
:keyword nowait: Do not wait for a reply.
:keyword passive: If set, the server will not create the queue.
The client can use this to check whether a queue exists
without modifying the server state.
"""
ret = self.channel.queue_declare(queue=self.name,
passive=passive,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.queue_arguments,
nowait=nowait)
if not self.name:
self.name = ret[0]
if self.on_declared:
self.on_declared(*ret)
return ret
def queue_bind(self, nowait=False):
"""Create the queue binding on the server."""
return self.bind_to(self.exchange, self.routing_key,
self.binding_arguments, nowait=nowait)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False):
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.queue_bind(queue=self.name,
exchange=exchange,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def get(self, no_ack=None, accept=None):
"""Poll the server for a new message.
Must return the message if a message was available,
or :const:`None` otherwise.
:keyword no_ack: If enabled the broker will automatically
ack messages.
:keyword accept: Custom list of accepted content types.
This method provides direct access to the messages in a
queue using a synchronous dialogue, designed for
specific types of applications where synchronous functionality
is more important than performance.
"""
no_ack = self.no_ack if no_ack is None else no_ack
message = self.channel.basic_get(queue=self.name, no_ack=no_ack)
if message is not None:
m2p = getattr(self.channel, 'message_to_python', None)
if m2p:
message = m2p(message)
if message.errors:
message._reraise_error()
message.accept = prepare_accept_content(accept)
return message
def purge(self, nowait=False):
"""Remove all ready messages from the queue."""
return self.channel.queue_purge(queue=self.name,
nowait=nowait) or 0
def consume(self, consumer_tag='', callback=None,
no_ack=None, nowait=False):
"""Start a queue consumer.
Consumers last as long as the channel they were created on, or
until the client cancels them.
:keyword consumer_tag: Unique identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
:keyword no_ack: If enabled the broker will automatically ack
messages.
:keyword nowait: Do not wait for a reply.
:keyword callback: callback called for each delivered message
"""
if no_ack is None:
no_ack = self.no_ack
return self.channel.basic_consume(queue=self.name,
no_ack=no_ack,
consumer_tag=consumer_tag or '',
callback=callback,
nowait=nowait)
def cancel(self, consumer_tag):
"""Cancel a consumer by consumer tag."""
return self.channel.basic_cancel(consumer_tag)
def delete(self, if_unused=False, if_empty=False, nowait=False):
"""Delete the queue.
:keyword if_unused: If set, the server will only delete the queue
if it has no consumers. A channel error will be raised
if the queue has consumers.
:keyword if_empty: If set, the server will only delete the queue
if it is empty. If it is not empty a channel error will be raised.
:keyword nowait: Do not wait for a reply.
"""
return self.channel.queue_delete(queue=self.name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait)
def queue_unbind(self, arguments=None, nowait=False):
return self.unbind_from(self.exchange, self.routing_key,
arguments, nowait)
def unbind_from(self, exchange='', routing_key='',
arguments=None, nowait=False):
"""Unbind queue by deleting the binding from the server."""
return self.channel.queue_unbind(queue=self.name,
exchange=exchange.name,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Queue):
return (self.name == other.name and
self.exchange == other.exchange and
self.routing_key == other.routing_key and
self.queue_arguments == other.queue_arguments and
self.binding_arguments == other.binding_arguments and
self.durable == other.durable and
self.exclusive == other.exclusive and
self.auto_delete == other.auto_delete)
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
s = super(Queue, self).__repr__
if self.bindings:
return s('Queue {name} -> {bindings}'.format(
name=_reprstr(self.name),
bindings=pretty_bindings(self.bindings),
))
return s(
'Queue {name} -> {0.exchange!r} -> {routing_key}'.format(
self, name=_reprstr(self.name),
routing_key=_reprstr(self.routing_key),
),
)
@property
def can_cache_declaration(self):
return not self.auto_delete
@classmethod
def from_dict(self, queue, **options):
binding_key = options.get('binding_key') or options.get('routing_key')
e_durable = options.get('exchange_durable')
if e_durable is None:
e_durable = options.get('durable')
e_auto_delete = options.get('exchange_auto_delete')
if e_auto_delete is None:
e_auto_delete = options.get('auto_delete')
q_durable = options.get('queue_durable')
if q_durable is None:
q_durable = options.get('durable')
q_auto_delete = options.get('queue_auto_delete')
if q_auto_delete is None:
q_auto_delete = options.get('auto_delete')
e_arguments = options.get('exchange_arguments')
q_arguments = options.get('queue_arguments')
b_arguments = options.get('binding_arguments')
bindings = options.get('bindings')
exchange = Exchange(options.get('exchange'),
type=options.get('exchange_type'),
delivery_mode=options.get('delivery_mode'),
routing_key=options.get('routing_key'),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return Queue(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get('exclusive'),
auto_delete=q_auto_delete,
no_ack=options.get('no_ack'),
queue_arguments=q_arguments,
binding_arguments=b_arguments,
bindings=bindings)
def as_dict(self, recurse=False):
res = super(Queue, self).as_dict(recurse)
if not recurse:
return res
bindings = res.get('bindings')
if bindings:
res['bindings'] = [b.as_dict(recurse=True) for b in bindings]
return res
| {
"content_hash": "c913933c61bdb2040e745a324b104ce3",
"timestamp": "",
"source": "github",
"line_count": 749,
"max_line_length": 79,
"avg_line_length": 36.257676902536716,
"alnum_prop": 0.5676252899804839,
"repo_name": "tkanemoto/kombu",
"id": "a482e90aa01e2c0d25ac77e23ce93634a7e1697d",
"size": "27157",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kombu/entity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1501"
},
{
"name": "Python",
"bytes": "988849"
},
{
"name": "Shell",
"bytes": "1830"
}
],
"symlink_target": ""
} |
from sqlalchemy import and_, or_
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from ggrc import db
from ggrc.models.person import Person
from ggrc.models.mixins import Mapping
from ggrc.models.reflection import PublishOnly
class ObjectOwner(Mapping, db.Model):
__tablename__ = 'object_owners'
person_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable=False)
ownable_id = db.Column(db.Integer, nullable=False)
ownable_type = db.Column(db.String, nullable=False)
@property
def ownable_attr(self):
return '{0}_ownable'.format(self.ownable_type)
@property
def ownable(self):
return getattr(self, self.ownable_attr)
@ownable.setter
def ownable(self, value):
self.ownable_id = value.id if value is not None else None
self.ownable_type = \
value.__class__.__name__ if value is not None else None
return setattr(self, self.ownable_attr, value)
@staticmethod
def _extra_table_args(cls):
return (
db.UniqueConstraint('person_id', 'ownable_id', 'ownable_type'),
db.Index('ix_object_owners_ownable', 'ownable_type', 'ownable_id'),
)
_publish_attrs = [
'person',
'ownable',
]
# @classmethod
# def eager_query(cls):
# from sqlalchemy import orm
# query = super(ObjectOwner, cls).eager_query()
# return query.options(
# orm.subqueryload('person'))
def _display_name(self):
return self.ownable.display_name + '<->' + self.person.display_name
class Ownable(object):
@declared_attr
def object_owners(cls):
cls.owners = association_proxy(
'object_owners', 'person',
creator=lambda person: ObjectOwner(
person=person,
ownable_type=cls.__name__,
)
)
joinstr = 'and_(foreign(ObjectOwner.ownable_id) == {type}.id, '\
'foreign(ObjectOwner.ownable_type) == "{type}")'
joinstr = joinstr.format(type=cls.__name__)
return db.relationship(
'ObjectOwner',
primaryjoin=joinstr,
backref='{0}_ownable'.format(cls.__name__),
cascade='all, delete-orphan',
)
_publish_attrs = [
'owners',
PublishOnly('object_owners'),
]
_include_links = []
_aliases = {
"owners": {
"display_name": "Owner",
"mandatory": True,
"filter_by": "_filter_by_owners",
}
}
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Ownable, cls).eager_query()
return cls.eager_inclusions(query, Ownable._include_links).options(
orm.subqueryload('object_owners'))
@classmethod
def _filter_by_owners(cls, predicate):
return ObjectOwner.query.join(Person).filter(and_(
(ObjectOwner.ownable_id == cls.id),
(ObjectOwner.ownable_type == cls.__name__),
or_(predicate(Person.name), predicate(Person.email))
)).exists()
| {
"content_hash": "e79f97f497dd7c0613e8499363ad2377",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 27.49056603773585,
"alnum_prop": 0.6458476321207962,
"repo_name": "NejcZupec/ggrc-core",
"id": "9ddc01d62e37f98aba470c39ff90778c86e88752",
"size": "3027",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "src/ggrc/models/object_owner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "163629"
},
{
"name": "Cucumber",
"bytes": "136321"
},
{
"name": "HTML",
"bytes": "1057522"
},
{
"name": "JavaScript",
"bytes": "1494189"
},
{
"name": "Makefile",
"bytes": "6161"
},
{
"name": "Mako",
"bytes": "2178"
},
{
"name": "Python",
"bytes": "2151120"
},
{
"name": "Shell",
"bytes": "29929"
}
],
"symlink_target": ""
} |
"""nova HACKING file compliance testing
built on top of pep8.py
"""
import fnmatch
import inspect
import logging
import os
import re
import subprocess
import sys
import tokenize
import warnings
import pep8
# Don't need this for testing
logging.disable('LOG')
#N1xx comments
#N2xx except
#N3xx imports
#N4xx docstrings
#N5xx dictionaries/lists
#N6xx calling methods
#N7xx localization
#N8xx git commit messages
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session']
DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
# Monkey patch broken excluded filter in pep8
def filename_match(filename, patterns, default=True):
"""
Check if patterns contains a pattern that matches filename.
If patterns is unspecified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch.fnmatch(filename, pattern) for pattern in patterns)
def excluded(filename):
"""
Check if options.exclude contains a pattern that matches filename.
"""
basename = os.path.basename(filename)
return any((filename_match(filename, pep8.options.exclude,
default=False),
filename_match(basename, pep8.options.exclude,
default=False)))
def input_dir(dirname, runner=None):
"""
Check all Python source files in this directory and all subdirectories.
"""
dirname = dirname.rstrip('/')
if excluded(dirname):
return
if runner is None:
runner = pep8.input_file
for root, dirs, files in os.walk(dirname):
if pep8.options.verbose:
print('directory ' + root)
pep8.options.counters['directories'] += 1
dirs.sort()
for subdir in dirs[:]:
if excluded(os.path.join(root, subdir)):
dirs.remove(subdir)
files.sort()
for filename in files:
if pep8.filename_match(filename) and not excluded(filename):
pep8.options.counters['files'] += 1
runner(os.path.join(root, filename))
def is_import_exception(mod):
return (mod in IMPORT_EXCEPTIONS or
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS))
def import_normalize(line):
# convert "from x import y" to "import x.y"
# handle "from x import y as z" to "import x.y as z"
split_line = line.split()
if (line.startswith("from ") and "," not in line and
split_line[2] == "import" and split_line[3] != "*" and
split_line[1] != "__future__" and
(len(split_line) == 4 or
(len(split_line) == 6 and split_line[4] == "as"))):
return "import %s.%s" % (split_line[1], split_line[3])
else:
return line
def nova_todo_format(physical_line):
"""Check for 'TODO()'.
nova HACKING guide recommendation for TODO:
Include your name with TODOs as in "#TODO(termie)"
N101
"""
pos = physical_line.find('TODO')
pos1 = physical_line.find('TODO(')
pos2 = physical_line.find('#') # make sure it's a comment
if (pos != pos1 and pos2 >= 0 and pos2 < pos):
return pos, "NOVA N101: Use TODO(NAME)"
def nova_except_format(logical_line):
"""Check for 'except:'.
nova HACKING guide recommends not using except:
Do not write "except:", use "except Exception:" at the very least
N201
"""
if logical_line.startswith("except:"):
return 6, "NOVA N201: no 'except:' at least use 'except Exception:'"
def nova_except_format_assert(logical_line):
"""Check for 'assertRaises(Exception'.
nova HACKING guide recommends not using assertRaises(Exception...):
Do not use overly broad Exception type
N202
"""
if logical_line.startswith("self.assertRaises(Exception"):
return 1, "NOVA N202: assertRaises Exception too broad"
def nova_one_import_per_line(logical_line):
"""Check for import format.
nova HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
BAD: from nova.rpc.common import RemoteError, LOG
N301
"""
pos = logical_line.find(',')
parts = logical_line.split()
if (pos > -1 and (parts[0] == "import" or
parts[0] == "from" and parts[2] == "import") and
not is_import_exception(parts[1])):
return pos, "NOVA N301: one import per line"
_missingImport = set([])
def nova_import_module_only(logical_line):
"""Check for import module only.
nova HACKING guide recommends importing only modules:
Do not import objects, only modules
N302 import only modules
N303 Invalid Import
N304 Relative Import
"""
def importModuleCheck(mod, parent=None, added=False):
"""
If can't find module on first try, recursively check for relative
imports
"""
current_path = os.path.dirname(pep8.current_file)
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
valid = True
if parent:
if is_import_exception(parent):
return
parent_mod = __import__(parent, globals(), locals(),
[mod], -1)
valid = inspect.ismodule(getattr(parent_mod, mod))
else:
__import__(mod, globals(), locals(), [], -1)
valid = inspect.ismodule(sys.modules[mod])
if not valid:
if added:
sys.path.pop()
added = False
return logical_line.find(mod), ("NOVA N304: No "
"relative imports. '%s' is a relative import"
% logical_line)
return logical_line.find(mod), ("NOVA N302: import only "
"modules. '%s' does not import a module"
% logical_line)
except (ImportError, NameError) as exc:
if not added:
added = True
sys.path.append(current_path)
return importModuleCheck(mod, parent, added)
else:
name = logical_line.split()[1]
if name not in _missingImport:
if VERBOSE_MISSING_IMPORT != 'False':
print >> sys.stderr, ("ERROR: import '%s' in %s "
"failed: %s" %
(name, pep8.current_file, exc))
_missingImport.add(name)
added = False
sys.path.pop()
return
except AttributeError:
# Invalid import
return logical_line.find(mod), ("NOVA N303: Invalid import, "
"AttributeError raised")
# convert "from x import y" to " import x.y"
# convert "from x import y as z" to " import x.y"
import_normalize(logical_line)
split_line = logical_line.split()
if (logical_line.startswith("import ") and "," not in logical_line and
(len(split_line) == 2 or
(len(split_line) == 4 and split_line[2] == "as"))):
mod = split_line[1]
return importModuleCheck(mod)
# TODO(jogo) handle "from x import *"
#TODO(jogo): import template: N305
def nova_import_alphabetical(physical_line, line_number, lines):
"""Check for imports in alphabetical order.
nova HACKING guide recommendation for imports:
imports in human alphabetical order
N306
"""
# handle import x
# use .lower since capitalization shouldn't dictate order
split_line = import_normalize(physical_line.strip()).lower().split()
split_previous = import_normalize(lines[line_number - 2]
).strip().lower().split()
# with or without "as y"
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
return (0, "NOVA N306: imports not in alphabetical order (%s, %s)"
% (split_previous[1], split_line[1]))
def nova_docstring_start_space(physical_line):
"""Check for docstring not start with space.
nova HACKING guide recommendation for docstring:
Docstring should not start with space
N401
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) > pos + 1):
if (physical_line[pos + 3] == ' '):
return (pos, "NOVA N401: one line docstring should not start with"
" a space")
def nova_docstring_one_line(physical_line):
"""Check one line docstring end.
nova HACKING guide recommendation for one line docstring:
A one line docstring looks like this and ends in a period.
N402
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
if (pos != -1 and end and len(physical_line) > pos + 4):
if (physical_line[-5] != '.'):
return pos, "NOVA N402: one line docstring needs a period"
def nova_docstring_multiline_end(physical_line):
"""Check multi line docstring end.
nova HACKING guide recommendation for docstring:
Docstring should end on a new line
N403
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) == pos):
print physical_line
if (physical_line[pos + 3] == ' '):
return (pos, "NOVA N403: multi line docstring end on new line")
FORMAT_RE = re.compile("%(?:"
"%|" # Ignore plain percents
"(\(\w+\))?" # mapping key
"([#0 +-]?" # flag
"(?:\d+|\*)?" # width
"(?:\.\d+)?" # precision
"[hlL]?" # length mod
"\w))") # type
class LocalizationError(Exception):
pass
def check_i18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
token_type, text, _, _, line = yield
except GeneratorExit:
return
if (token_type == tokenize.NAME and text == "_" and
not line.startswith('def _(msg):')):
while True:
token_type, text, start, _, _ = yield
if token_type != tokenize.NL:
break
if token_type != tokenize.OP or text != "(":
continue # not a localization call
format_string = ''
while True:
token_type, text, start, _, _ = yield
if token_type == tokenize.STRING:
format_string += eval(text)
elif token_type == tokenize.NL:
pass
else:
break
if not format_string:
raise LocalizationError(start,
"NOVA N701: Empty localization string")
if token_type != tokenize.OP:
raise LocalizationError(start,
"NOVA N701: Invalid localization call")
if text != ")":
if text == "%":
raise LocalizationError(start,
"NOVA N702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
raise LocalizationError(start,
"NOVA N702: Use bare string concatenation instead"
" of +")
else:
raise LocalizationError(start,
"NOVA N702: Argument to _ must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(start,
"NOVA N703: Multiple positional placeholders")
def nova_localization_strings(logical_line, tokens):
"""Check localization in line.
N701: bad localization call
N702: complex expression instead of string as argument to _()
N703: multiple positional placeholders
"""
gen = check_i18n()
next(gen)
try:
map(gen.send, tokens)
gen.close()
except LocalizationError as e:
return e.args
#TODO(jogo) Dict and list objects
current_file = ""
def readlines(filename):
"""Record the current file being tested."""
pep8.current_file = filename
return open(filename).readlines()
def add_nova():
"""Monkey patch in nova guidelines.
Look for functions that start with nova_ and have arguments
and add them to pep8 module
Assumes you know how to write pep8.py checks
"""
for name, function in globals().items():
if not inspect.isfunction(function):
continue
args = inspect.getargspec(function)[0]
if args and name.startswith("nova"):
exec("pep8.%s = %s" % (name, name))
def once_git_check_commit_title():
"""Check git commit messages.
nova HACKING recommends not referencing a bug or blueprint in first line,
it should provide an accurate description of the change
N801
N802 Title limited to 50 chars
"""
#Get title of most recent commit
subp = subprocess.Popen(['git', 'log', '--no-merges', '--pretty=%s', '-1'],
stdout=subprocess.PIPE)
title = subp.communicate()[0]
if subp.returncode:
raise Exception("git log failed with code %s" % subp.returncode)
#From https://github.com/openstack/openstack-ci-puppet
# /blob/master/modules/gerrit/manifests/init.pp#L74
#Changeid|bug|blueprint
git_keywords = (r'(I[0-9a-f]{8,40})|'
'([Bb]ug|[Ll][Pp])[\s\#:]*(\d+)|'
'([Bb]lue[Pp]rint|[Bb][Pp])[\s\#:]*([A-Za-z0-9\\-]+)')
GIT_REGEX = re.compile(git_keywords)
error = False
#NOTE(jogo) if match regex but over 3 words, acceptable title
if GIT_REGEX.search(title) is not None and len(title.split()) <= 3:
print ("N801: git commit title ('%s') should provide an accurate "
"description of the change, not just a reference to a bug "
"or blueprint" % title.strip())
error = True
if len(title.decode('utf-8')) > 72:
print ("N802: git commit title ('%s') should be under 50 chars"
% title.strip())
error = True
return error
if __name__ == "__main__":
#include nova path
sys.path.append(os.getcwd())
#Run once tests (not per line)
once_error = once_git_check_commit_title()
#NOVA error codes start with an N
pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}')
add_nova()
pep8.current_file = current_file
pep8.readlines = readlines
pep8.excluded = excluded
pep8.input_dir = input_dir
try:
pep8._main()
sys.exit(once_error)
finally:
if len(_missingImport) > 0:
print >> sys.stderr, ("%i imports missing in this test environment"
% len(_missingImport))
| {
"content_hash": "660453b0edb381307769adf51da110d1",
"timestamp": "",
"source": "github",
"line_count": 466,
"max_line_length": 79,
"avg_line_length": 33.802575107296136,
"alnum_prop": 0.5670391061452514,
"repo_name": "tylertian/Openstack",
"id": "096cf778621e0e4ba6ea70bac9043a944cf96691",
"size": "16452",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack F/nova/tools/hacking.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "239919"
},
{
"name": "JavaScript",
"bytes": "156942"
},
{
"name": "Python",
"bytes": "16949418"
},
{
"name": "Shell",
"bytes": "96743"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsletters', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='StoreEmailsClass',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('accounts', models.EmailField(unique=True, max_length=254)),
],
),
migrations.DeleteModel(
name='TestingClass',
),
]
| {
"content_hash": "72d0bbd69871cc502d339740b53b1447",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 26.217391304347824,
"alnum_prop": 0.572139303482587,
"repo_name": "Juanvulcano/gci15_email",
"id": "e22bbce64508fe5565071dd949ee291ff40146e6",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newsletters/migrations/0002_auto_20151218_0932.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1528"
},
{
"name": "Python",
"bytes": "11894"
}
],
"symlink_target": ""
} |
"""Special-case field selection functions.
All functions should take
the current astropy.time.Time
an ObsLogger instance
a dictionary of other observing programs field_ids and field_selection_functions
a Fields object (for efficiency)
and return a list of field_ids.
Note that any cadence cuts should occur within the functions defined here--the cadence cuts in ObservingProgram.py are overridden.
"""
import logging
import numpy as np
import astropy.units as u
from astropy.time import Time
from .field_selection.srg import get_srg_fields
from .Fields import Fields
from .constants import PROGRAM_NAME_TO_ID, P48_Observer
from .utils import RA_to_HA
logger = logging.getLogger(__name__)
def msip_nss_selection_phaseii(time, obs_log, other_program_fields, fields,
silent=False):
"""Select MSIP NSS fields so we ensure lowdec coverage."""
candidate_nss_field_ids = fields.select_field_ids(dec_range=[-32,90.],
grid_id=0,
# lowest rung fields are above airmass 2.5 for 2 hrs
observable_hours_range=[2.0, 24.])
candidate_nss_fields = fields.fields.loc[candidate_nss_field_ids].copy()
msip_cadence = 2
msip_internight_gap = msip_cadence*u.day
msip_nobs_per_night = 2
nss_requests_allowed = other_program_fields[
(PROGRAM_NAME_TO_ID['MSIP'],'all_sky')]['requests_allowed']
n_fields_to_observe = int(np.round(nss_requests_allowed / msip_nobs_per_night))
# define a footprint for the 2-night cadence by cutting on HA at midnight
candidate_nss_fields['HA_midnight'] = RA_to_HA(
candidate_nss_fields['ra'].values*u.degree,
P48_Observer.midnight(time, which='nearest')).wrap_at(180*u.deg)
candidate_nss_fields['abs_HA_midnight'] = np.abs(candidate_nss_fields['HA_midnight'])
nss_footprint_field_ids = candidate_nss_fields.sort_values(by='abs_HA_midnight', ascending=True).iloc[:(msip_cadence*n_fields_to_observe-1)].index.tolist()
# save these if we need filler
candidate_field_ids_outside_footprint = candidate_nss_fields.sort_values(by='abs_HA_midnight', ascending=True).iloc[(msip_cadence*n_fields_to_observe-1):].index.tolist()
# unfortunate duplication of code from ObservingProgram.py
# get the times they were last observed:
# (note that fields *never* observed will not be included)
# since this is function is for determining requests
# at the start of the night, exclude observations taken tonight
# this lets us restart the scheduler without breaking things
last_observed_times = obs_log.select_last_observed_time_by_field(
field_ids = nss_footprint_field_ids,
filter_ids = [1,2],
program_ids = [PROGRAM_NAME_TO_ID['MSIP']],
subprogram_names = ['all_sky'],
# arbitrary early date; start of night tonight
mjd_range = [Time('2001-01-01').mjd,np.floor(time.mjd)])
# we want an object observed at the end of the night N days ago
# to be observed at the start of the night now.
# Max night length is 12.2 hours
cutoff_time = (time - (msip_internight_gap - 0.6 * u.day)).mjd
# find fields last observed more recently than that
wrecent = (last_observed_times['expMJD'] >= cutoff_time)
recent_field_ids = last_observed_times.loc[wrecent].index.tolist()
# reduce the list to only those not recently observed:
nss_field_ids_due = [idi for idi in nss_footprint_field_ids if idi not in recent_field_ids]
if len(nss_field_ids_due)*msip_nobs_per_night > nss_requests_allowed:
# we have more fields we could observe than we are able to
# select the fields with the least recent observations
if not silent:
logger.info(f'MSIP NSS: {nss_requests_allowed/msip_nobs_per_night} fields needed, {len(nss_field_ids_due)} available--removing fields.')
available_nss_fields = fields.fields.loc[nss_field_ids_due]
available_nss_fields = available_nss_fields.join(last_observed_times)
available_nss_fields['expMJD'] = available_nss_fields['expMJD'].fillna(Time('2001-01-01').mjd)
nss_field_ids_to_observe = available_nss_fields.sort_values(by='expMJD',ascending=True).iloc[:n_fields_to_observe].index.tolist()
if not silent:
logger.info(f'MSIP NSS: requesting {nss_field_ids_to_observe}')
else:
# we have fewer fields available than expected--pad back on some recent
# fields
if not silent:
logger.info(f'MSIP NSS: {nss_requests_allowed/msip_nobs_per_night} fields needed, {len(nss_field_ids_due)} available--adding fields.')
nss_field_ids_to_observe = nss_field_ids_due
n_fields_needed = n_fields_to_observe - len(nss_field_ids_due)
# for now just pad on the extra fields outside the footprint
# TODO: think more carefully about how best to handle this
recent_field_ids.extend(candidate_field_ids_outside_footprint)
extra_fields = fields.fields.loc[recent_field_ids]
# if we don't have enough extras it's okay; optimize.py will truncate
# to the number of requests actually available
if len(recent_field_ids) <= n_fields_needed:
n_to_extend = len(recent_field_ids)
else:
n_to_extend = n_fields_needed
# choose the ones with the lowest total observations? Could make the
# optimization tough since they will tend to only be up for short
# periods
nobs = obs_log.select_n_obs_by_field(field_ids=recent_field_ids,
program_ids = [PROGRAM_NAME_TO_ID['MSIP']])
extra_fields = extra_fields.join(nobs).fillna(0)
field_ids_to_extend = extra_fields.sort_values(by='n_obs',ascending=True).iloc[:n_to_extend].index.tolist()
if not silent:
logger.info(f'MSIP NSS: requesting {nss_field_ids_to_observe}')
nss_field_ids_to_observe.extend(field_ids_to_extend)
if not silent:
logger.info(f'MSIP NSS: Extending by {n_to_extend} fields: {field_ids_to_extend}')
return nss_field_ids_to_observe
def partnership_HC_selection(time, obs_log, other_program_fields, fields):
"""Select partnership HC fields"""
return phase_II_selection(time, obs_log, other_program_fields, fields,
subprogram='Partnership')
def Caltech_1DC_selection(time, obs_log, other_program_fields, fields):
"""Select Caltech 1DC fields"""
return phase_II_selection(time, obs_log, other_program_fields, fields,
subprogram='Caltech')
def phase_II_selection(time, obs_log, other_program_fields, fields,
subprogram='Partnership', silent=False):
"""Select partnership HC or Caltech 1DC fields"""
assert (subprogram in ['Partnership', 'Caltech'])
candidate_field_ids_prio1 = [473, 525, 624, 651, 671]
candidate_field_ids_prio2 = [349, 350, 406, 414, 479, 480, 481, 528, 529, 530, 531, 532, 533, 534, 544,
545, 546, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 596, 597, 600, 601,
623, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 644, 645, 646, 647,
668, 669, 670, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 708, 709, 710,
711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 748, 749,
750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 784, 785, 786,
787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 815, 816, 817, 818, 819, 820,
821, 822, 823, 824, 825, 841, 843, 844]
candidate_field_ids_prio3 = [345, 346, 347, 348, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401,
402, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 441, 442,
443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 464, 465, 466, 467, 468, 469,
470, 471, 472, 474, 475, 476, 477, 478, 493, 494, 496, 497, 498, 499, 500, 501,
502, 503, 516, 517, 518, 519, 520, 521, 522, 523, 524, 526, 527, 547, 548, 549,
550, 551, 552, 553, 554, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 599,
602, 604, 615, 616, 617, 618, 619, 620, 621, 622, 662, 663, 664, 665, 666, 667,
706, 707]
candidate_field_ids_prio4 = [296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 351, 352, 353, 354,
355, 403, 405, 413, 482, 483, 484, 535, 543, 586, 594, 595, 682, 746, 747, 763,
782, 783, 814, 839, 840, 842, 845, 846, 847, 848, 858, 859, 860, 861, 862, 863,
864]
candidate_field_ids_byprio = [candidate_field_ids_prio1, candidate_field_ids_prio2, candidate_field_ids_prio3, candidate_field_ids_prio4]
candidate_field_ids = [296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 345, 346, 347, 348, 349,
350, 351, 352, 353, 354, 355, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400,
401, 402, 403, 405, 406, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
424, 425, 426, 427, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452,
464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479,
480, 481, 482, 483, 484, 493, 494, 496, 497, 498, 499, 500, 501, 502, 503, 516,
517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532,
533, 534, 535, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 566,
567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
583, 584, 585, 586, 594, 595, 596, 597, 599, 600, 601, 602, 604, 615, 616, 617,
618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633,
634, 635, 644, 645, 646, 647, 651, 662, 663, 664, 665, 666, 667, 668, 669, 670,
671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 706, 707, 708, 709,
710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 746,
747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762,
763, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796,
814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 839, 840, 841, 842,
843, 844, 845, 846, 847, 848, 858, 859, 860, 861, 862, 863, 864]
# define the parameters that vary
if subprogram == 'Partnership':
label = 'Partnership HC'
min_hours_visible = 2.5
nobs_per_night = 4
requests_allowed = other_program_fields[
(PROGRAM_NAME_TO_ID['collaboration'],'high_cadence')]['requests_allowed']
offset = 0
exclude_fields = []
elif subprogram == 'Caltech':
label = 'Caltech 1DC'
min_hours_visible = 1.5
nobs_per_night = 2
requests_allowed = other_program_fields[
(PROGRAM_NAME_TO_ID['Caltech'],'Caltech_1DC')]['requests_allowed']
# to prevent fields from thrashing back and forth between programs
# we place a buffer in the priority list
offset = 15
exclude_fields = phase_II_selection(time, obs_log, other_program_fields, fields, subprogram='Partnership', silent=False)
else:
raise ValueError(f'Unknown subprogram {subprogram}')
# do a visibility check (also handles moon exclusion)
visible_field_ids = fields.select_field_ids(dec_range=[-32,90.],
grid_id=0,
observable_hours_range=[min_hours_visible, 24.])
visible_field_ids = np.intersect1d(visible_field_ids,
candidate_field_ids).tolist()
n_fields_to_observe = int(np.round(requests_allowed / nobs_per_night))
n_fields_remaining = n_fields_to_observe
if not silent:
logger.info(f'{label}: need {n_fields_to_observe} fields')
field_ids_to_observe = []
for i, prio_field_ids in enumerate(candidate_field_ids_byprio):
assert (n_fields_remaining >= 0)
if n_fields_remaining == 0:
break
visible_prio_field_ids = np.intersect1d(visible_field_ids,
prio_field_ids).tolist()
# for Caltech, exclude partnership fields being observed
visible_prio_field_ids = [f for f in visible_prio_field_ids if f not in exclude_fields]
if len(visible_prio_field_ids) == 0:
if not silent:
logger.info(f'{label}: No fields remaining at priority {i+1}')
continue
if len(visible_prio_field_ids) <= offset:
#always observe prio any 1 fields if available, without offset
# they weren't included in Partnership HC due to min_hours_visible
if i==0: # prio 1
field_ids_to_observe.extend(visible_prio_field_ids)
n_fields_remaining -= len(visible_prio_field_ids)
if not silent:
logger.info(f'{label}: requesting {len(visible_prio_field_ids)} priority {i+1} fields: {visible_prio_field_ids}')
else:
offset -= len(visible_prio_field_ids)
if not silent:
logger.info(f'{label}: Offsetting {len(visible_prio_field_ids)} at priority {i+1}')
continue
if (len(visible_prio_field_ids)+offset) <= n_fields_remaining:
# include all fields in this priority
field_ids_to_observe.extend(visible_prio_field_ids[offset:])
n_fields_remaining -= len(visible_prio_field_ids[offset:])
if not silent and offset != 0:
logger.info(f'{label}: Offsetting {offset} at priority {i+1}')
offset = 0
if not silent:
logger.info(f'{label}: requesting {len(visible_prio_field_ids[offset:])} priority {i+1} fields: {visible_prio_field_ids}')
else:
# prioritize by cutting on HA at midnight
if not silent and offset != 0:
logger.info(f'{label}: Offsetting {offset} at priority {i+1}')
candidate_fields = fields.fields.loc[visible_prio_field_ids].copy()
candidate_fields['HA_midnight'] = RA_to_HA(
candidate_fields['ra'].values*u.degree,
P48_Observer.midnight(time, which='nearest')).wrap_at(180*u.deg)
candidate_fields['abs_HA_midnight'] = np.abs(candidate_fields['HA_midnight'])
footprint_field_ids = candidate_fields.sort_values(
by='abs_HA_midnight', ascending=True).iloc[offset:offset+n_fields_remaining].index.tolist()
offset=0
n_fields_remaining -= len(footprint_field_ids)
field_ids_to_observe.extend(footprint_field_ids)
if not silent:
logger.info(f'{label}: requesting {len(footprint_field_ids)} priority {i+1} fields of {len(candidate_fields)} total: {footprint_field_ids}')
if not silent:
logger.info(f'{label}: requesting {field_ids_to_observe}')
return field_ids_to_observe
def srg_selection(time, obs_log, other_program_fields, fields):
"""Select SRG fields."""
# use the fields for multiple days so we can improve the sampling
SRG_PAD_DAYS = 2
dt = [0]
for i in range(SRG_PAD_DAYS):
dt.extend([i+1])
dt.extend([-1*(i+1)])
srg_fields = []
for dti in dt:
srg_fields.extend(get_srg_fields(time + dti * u.day, fields))
srg_fields_unique = np.unique(srg_fields).tolist()
# exclude fields being observed by MSIP
nss_selection_function = globals()[other_program_fields[(PROGRAM_NAME_TO_ID['MSIP'],'all_sky')]['field_selection_function']]
nss_field_ids = nss_selection_function(time,
obs_log, other_program_fields, fields, silent=True)
srg_fields = np.setdiff1d(srg_fields_unique, nss_field_ids)
logger.debug(f'SRG fields for {time.iso}+/-{SRG_PAD_DAYS} days: {srg_fields}')
return srg_fields
##################### previous/unused -- for reference
def aam_caltech_june21(time, obs_log, other_program_fields, fields):
"""Select Ashish fields, June 2021."""
aam_fields = []
# determine fields being observed by MSIP
nss_selection_function = globals()[other_program_fields[(PROGRAM_NAME_TO_ID['MSIP'],'all_sky')]['field_selection_function']]
nss_field_ids = nss_selection_function(time,
obs_log, other_program_fields, fields, silent=True)
# if (time >= Time('2021-06-05')) and (time < Time('2021-06-11')):
# # Daily from Jun 4 through Jun 9 PT inclusive (== Jun 5 through Jun 10 UT - 6 nights) 3 ZTF fields viz. 1433, 1476, and 1525 once each in g and r (g and r need not be consecutive, but close in time will be useful).
# # set 1 fields: observe every night
# set1 = [1433, 1476, 1525]
# aam_fields.extend(set1)
#
# # Fields 530, 482, 388 from Jun 4 through Jun 9 PT (== Jun 5 through Jun 10 UT, 6 nights), one image each field in g and r on the days when these fields are not being covered by the MSIP survey (the alternate days). Again, g and r need not be consecutive in time.
#
# # set 2 fields: observe when not being observed by MSIP
# set2 = [530, 482, 388]
# set2_use = np.setdiff1d(set2, nss_field_ids)
#
# aam_fields.extend(set2_use)
#
# if (time >= Time('2021-06-03')) and (time < Time('2021-06-05')):
# #(3) Fields 1433, 1476, and 1525 once in g and r on Jun 2 PT (Jun 3 UT)
# #If these can not be taken on Jun 2 PT, we request that the observations are attempted on Jun 3 PT (Jun 4 UT).
# set3 = [1433, 1476, 1525]
#
# observed_ids = obs_log.select_last_observed_time_by_field(
# field_ids = set3,
# filter_ids = [1,2],
# program_ids = [PROGRAM_NAME_TO_ID['Caltech']],
# subprogram_names = ['AAM_June21'],
# # arbitrary early date; start of night tonight
# mjd_range = [Time('2001-01-01').mjd,np.floor(time.mjd)]).index.tolist()
#
# set3_use = np.setdiff1d(set3, observed_ids)
#
# aam_fields.extend(set3_use)
if (time >= Time('2021-06-11')) and (time < Time('2021-07-16')):
#(4) Fields 1433, 1476, and 1525 in g and r once per week for five weeks after Jun 10 (ideally on days when Fields 530, 482, 388 are not done).
set4 = [1433, 1476, 1525]
set4_use = set4
# check if it's been observed in the last week under this program
# unfortunate duplication of code from ObservingProgram.py
# get the times they were last observed:
# (note that fields *never* observed will not be included)
# since this is function is for determining requests
# at the start of the night, exclude observations taken tonight
# this lets us restart the scheduler without breaking things
last_observed_times = obs_log.select_last_observed_time_by_field(
field_ids = set4_use,
filter_ids = [1,2],
program_ids = [PROGRAM_NAME_TO_ID['Caltech']],
subprogram_names = ['AAM_June21'],
# arbitrary early date; start of night tonight
mjd_range = [Time('2001-01-01').mjd,np.floor(time.mjd)])
# we want an object observed at the end of the night N days ago
# to be observed at the start of the night now.
# Max night length is 12.2 hours
intranight_gap = 7. * u.day
cutoff_time = (time - (intranight_gap - 0.6 * u.day)).mjd
# find fields last observed more recently than that
wrecent = (last_observed_times['expMJD'] >= cutoff_time)
recent_field_ids = last_observed_times.loc[wrecent].index.tolist()
# reduce the list to only those not recently observed:
field_ids_due = [idi for idi in set4_use if idi not in recent_field_ids]
aam_fields.extend(field_ids_due)
logger.info(f'Caltech AAM: requesting {aam_fields}')
return aam_fields
def msip_nss_selection(time, obs_log, other_program_fields, fields):
"""Select MSIP NSS fields so they don't overlap with other MSIP subprograms."""
candidate_nss_fields = fields.select_field_ids(dec_range=[-32,90.],
grid_id=0,
observable_hours_range=[1.0, 24.])
# now find the fields used by other MSIP subprograms
other_MSIP_sp_fields = []
for key, oops in other_program_fields.items():
assert(len(key) == 2)
if key[0] == PROGRAM_NAME_TO_ID['MSIP']:
if key[1] != 'all_sky':
if oops['field_ids'] is not None:
other_MSIP_sp_fields.extend(oops['field_ids'])
else:
# we have to run the field selection function
# this is duplicative but easier than making a DAG
try:
selection_function = globals()[oops['field_selection_function']]
field_ids = selection_function(time,
obs_log, other_program_fields, fields)
other_MSIP_sp_fields.extend(field_ids)
except Exception as e:
logger.exception(e)
logger.warning(f'During MSIP NSS field selection, error in generating nightly field list for {key}')
nightly_nss_fields = np.setdiff1d(candidate_nss_fields, other_MSIP_sp_fields)
logger.debug(f'MSIP NSS fields: {len(nightly_nss_fields)} are disjoint from other MSIP programs.')
return nightly_nss_fields
| {
"content_hash": "32e7280aa68667e97ed004a4edf5c7ba",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 272,
"avg_line_length": 47.89164785553047,
"alnum_prop": 0.6268853695324283,
"repo_name": "ZwickyTransientFacility/ztf_sim",
"id": "aa09e1cd0e221003d3e5278e5de7c1824c7cf2f9",
"size": "21216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ztf_sim/field_selection_functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1665177"
},
{
"name": "Python",
"bytes": "212685"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
@app.route("/")
def index():
return render_template("index.html")
if __name__ == "__main__":
app.run() | {
"content_hash": "a11095ed31b43b1bca93678dd23d9be5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 42,
"avg_line_length": 17,
"alnum_prop": 0.6289592760180995,
"repo_name": "xiao-ming-team/xiao-ming",
"id": "cb8515e97a54e7bacc3d4f3908ae027a4df8cd49",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "406"
},
{
"name": "Python",
"bytes": "2335"
}
],
"symlink_target": ""
} |
"""
Tests for geography support in PostGIS
"""
import os
from django.contrib.gis.db import models
from django.contrib.gis.db.models.functions import Area, Distance
from django.contrib.gis.measure import D
from django.db import NotSupportedError, connection
from django.db.models.functions import Cast
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from ..utils import FuncTestMixin
from .models import City, County, Zipcode
class GeographyTest(TestCase):
fixtures = ['initial']
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
@skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic")
def test02_distance_lookup(self):
"Testing distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
if not connection.ops.postgis:
self.skipTest('This is a PostGIS-specific test.')
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
with self.assertRaises(ValueError):
City.objects.filter(point__within=z.poly).count()
# `@` operator not available.
with self.assertRaises(ValueError):
City.objects.filter(point__contained=z.poly).count()
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
with self.assertRaises(ValueError):
City.objects.get(point__exact=htown.point)
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {
'name': 'Name',
'state': 'State',
'mpoly': 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
class GeographyFunctionTests(FuncTestMixin, TestCase):
fixtures = ['initial']
@skipUnlessDBFeature("supports_extent_aggr")
def test_cast_aggregate(self):
"""
Cast a geography to a geometry field for an aggregate function that
expects a geometry input.
"""
if not connection.features.supports_geography:
self.skipTest("This test needs geography support")
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
res = City.objects.filter(
name__in=('Houston', 'Dallas')
).aggregate(extent=models.Extent(Cast('point', models.PointField())))
for val, exp in zip(res['extent'], expected):
self.assertAlmostEqual(exp, val, 4)
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_function(self):
"""
Testing Distance() support on non-point geography fields.
"""
if connection.ops.oracle:
ref_dists = [0, 4899.68, 8081.30, 9115.15]
elif connection.ops.spatialite:
# SpatiaLite returns non-zero distance for polygons and points
# covered by that polygon.
ref_dists = [326.61, 4899.68, 8081.30, 9115.15]
else:
ref_dists = [0, 4891.20, 8071.64, 9123.95]
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.annotate(
distance=Distance('poly', htown.point),
distance2=Distance(htown.point, 'poly'),
)
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance.m, ref, 2)
if connection.ops.postgis:
# PostGIS casts geography to geometry when distance2 is calculated.
ref_dists = [0, 4899.68, 8081.30, 9115.15]
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance2.m, ref, 2)
if not connection.ops.spatialite:
# Distance function combined with a lookup.
hzip = Zipcode.objects.get(code='77002')
self.assertEqual(qs.get(distance__lte=0), hzip)
@skipUnlessDBFeature("has_Area_function", "supports_area_geodetic")
def test_geography_area(self):
"""
Testing that Area calculations work on geography columns.
"""
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
z = Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
# Round to the nearest thousand as possible values (depending on
# the database and geolib) include 5439084, 5439100, 5439101.
rounded_value = z.area.sq_m
rounded_value -= z.area.sq_m % 1000
self.assertEqual(rounded_value, 5439000)
@skipUnlessDBFeature("has_Area_function")
@skipIfDBFeature("supports_area_geodetic")
def test_geodetic_area_raises_if_not_supported(self):
with self.assertRaisesMessage(NotSupportedError, 'Area on geodetic coordinate systems not supported.'):
Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
| {
"content_hash": "b96e9abc85beb5abd98ebf8123eea339",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 111,
"avg_line_length": 44.70322580645161,
"alnum_prop": 0.6345793043729254,
"repo_name": "elena/django",
"id": "53852517e86424da0b0620de3382ecf670c28d0f",
"size": "6929",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/gis_tests/geogapp/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43253"
},
{
"name": "HTML",
"bytes": "171768"
},
{
"name": "JavaScript",
"bytes": "105066"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11016010"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import tweepy
import pandas as pd
import matplotlib.pyplot as plt
import csv
import time
pd.options.display.max_columns = 50
pd.options.display.max_rows= 50
pd.options.display.width= 120
auth = tweepy.auth.OAuthHandler('mEnkTsYmvFGgzjV73SPOz084K', 'YnQxTyFhTCG5KSGBeRq1qeVwUkxOhZ99amm6uauy8ett51UE3t')
auth.set_access_token('301689344-MG8rknSLPC8dUXAjWE6Eo4DQTeS4JJGjNuTJ6i41', 'vcjYSSekdT0O8qwMVhh9e6flVC1LaP5OlssIsU4nGWewh')
api = tweepy.API(auth)
# <codecell>
partiler = {}
partiler["akp"] = 0
partiler["chp"] = 0
partiler["mhp"] = 0
partiler["bdp"] = 0
counter = 0
general_counter = 0
try:
for tweet in tweepy.Cursor(api.search, q="akp OR chp OR mhp OR bdp OR hdp",lang="tr", since="2015-03-19", until="2015-03-20").items(999999999):
counter = counter + 1
twit = tweet.text
if '\n' in twit:
twit = twit.replace('\n', ' ')
twit = twit.replace('\t', ' ')
twit = twit.replace('"', ' ')
print counter, "--", tweet.id, tweet.created_at, twit.encode('utf-8')
if ("AKP" or "akp") in tweet.text:
partiler["akp"] = partiler["akp"] + 1
if ("CHP" or "chp") in tweet.text:
partiler["chp"] = partiler["chp"] + 1
if ("MHP" or "mhp") in tweet.text:
partiler["mhp"] = partiler["mhp"] + 1
if ("BDP" or "bdp") in tweet.text:
partiler["bdp"] = partiler["bdp"] + 1
if ("HDP" or "hdp") in tweet.text:
partiler["bdp"] = partiler["bdp"] + 1
with open('tweets.csv', 'a') as csvfile:
fieldnames = ['id', 'date', 'tweet','coordinates','user_location']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'id': tweet.id, 'date': tweet.created_at, 'tweet': twit.encode('utf-8'), 'coordinates': tweet.coordinates, 'user_location': tweet.author.location.encode('utf-8') })
if counter == 15:
time.sleep(5)
counter = 0
general_counter = general_counter + 1
if general_counter == 20:
time.sleep(60)
general_counter = 0
except BaseException, e:
print 'failed ondata,', str(e)
time.sleep(5)
# <codecell>
| {
"content_hash": "9606edeb1f9a9c5cdcc45c057369a4a4",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 193,
"avg_line_length": 36.86666666666667,
"alnum_prop": 0.6003616636528029,
"repo_name": "Searil/big_data_politics",
"id": "455f05720a2a775c9ed18ad409291ef1e1a57ade",
"size": "2320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "partilere_gore_tweet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42580"
},
{
"name": "Shell",
"bytes": "401"
}
],
"symlink_target": ""
} |
import logging
from osp_classifier.classifiers import SyllabusClassifier
from pyspark import SparkContext
from pyspark.sql import types as T
from pyspark.sql.functions import array, col, lit, struct, udf, when
from osp_pipeline import PICKLE_PATH
from osp_pipeline.classifier.models import DocumentSyllabus
from osp_pipeline.classifier.joins import MatchDocument
from osp_pipeline.core.utils import try_or_log
from osp_pipeline.corpus.models import Document, DocumentText
log = logging.getLogger(__name__)
MIN_TEXT_LENGTH = 150
@try_or_log(log_args=False)
def estimate(pipeline, row, doc_id):
"""Get the probability that a document is a syllabus.
Documents that have extracted text less than or equal to
MIN_TEXT_LENGTH characters are removed from consideration.
NOTE: Estimation is done with the `predict_proba` method, which
means the pipeline must support this method.
:param pipeline: An sklearn pipeline.
:param row: A pyspark.sql.Row.
:returns: A float in [0.0, 1.0].
"""
if len(row['Text'][0]) <= MIN_TEXT_LENGTH:
log.info((f"Filtered out 'document_id={doc_id}', extracted text <= "
f"{MIN_TEXT_LENGTH} characters."))
return None
return float(pipeline.predict_proba(row)[:, 1])
def find_syllabi(docs, texts):
"""Create a dataframe of information from a syllabus classifier.
:param docs: A Document DataFrame.
:param texts: A DocumentText DataFrame.
:returns: A DocumentSyllabus DataFrame.
"""
sc = SparkContext.getOrCreate()
classifier = SyllabusClassifier.load(PICKLE_PATH, no_download=True)
# NOTE: For some reason, trying to pass the classifier in directly
# to the spark job causes errors (stuff about thread locking and
# being unable to pickle).
pipeline_bc = sc.broadcast(classifier.pipeline)
match_docs = MatchDocument.build_df(docs, texts)
# Currently, our sklearn pipeline expects a particular format for
# the object it reads:
# - The object needs to implement __getitem__() (e.g., dict-like
# `d[k]`.)
# - The object needs to have specific, hard-coded keys.
# - The object values behind those keys need to be iterables of
# strings.
# This presents a challenge when trying to use sklearn with our
# Spark objects, and the solution below is fragile. Something about
# this process should be made more robust, either in osp-classifier
# or osp-pipeline.
predictWithPipeline = udf(
lambda row, doc_id: estimate(pipeline_bc.value, row, doc_id),
T.DoubleType()
)
to_rename = match_docs.columns
to_rename.remove('id')
new_names = {
'url': "WARC-Target-URI",
'source_url': "X-Source-Url",
'source_anchor': "X-Source-Anchor",
'text': "Text"
}
def wrap_and_rename(column):
return (
when(match_docs[column].isNotNull(), array(match_docs[column]))
.otherwise(array(lit("")))
.alias(new_names[column])
)
result = (
match_docs
.select(match_docs.id, *[
wrap_and_rename(column)
for column in to_rename
])
.withColumn(
"probability",
predictWithPipeline(struct(*new_names.values()), 'id')
)
.where(col("probability").isNotNull())
.withColumnRenamed('id', 'document_id')
.select(*DocumentSyllabus.schema.names)
)
return result
def main(sc, spark):
"""Create a DocumentSyllabus dataframe and save it.
"""
docs = Document.read(spark)
texts = DocumentText.read(spark)
result = find_syllabi(docs, texts)
DocumentSyllabus.save_df(result)
| {
"content_hash": "88dc9e533ee11ce7af5272701884c604",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 76,
"avg_line_length": 31.584745762711865,
"alnum_prop": 0.6570968607459082,
"repo_name": "opensyllabus/osp-pipeline",
"id": "926681a8f372d27d3a63a3efb4c47567d5f112c2",
"size": "3727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osp_pipeline/classifier/jobs/find_syllabi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "72698"
},
{
"name": "Python",
"bytes": "283679"
},
{
"name": "Shell",
"bytes": "10441"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Prospectus.stars'
db.add_column(u'campaign_prospectus', 'stars',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Prospectus.stars'
db.delete_column(u'campaign_prospectus', 'stars')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'campaign.campaign': {
'Meta': {'object_name': 'Campaign'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'genre': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'prospectus': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['campaign.Prospectus']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'campaign.prospectus': {
'Meta': {'object_name': 'Prospectus'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'point_multiplier': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'privacy_status': ('django.db.models.fields.CharField', [], {'default': "'PR'", 'max_length': '2'}),
'private_votes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'stars': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vote_type': ('django.db.models.fields.CharField', [], {'default': "'RANK'", 'max_length': '10'})
},
u'campaign.vote': {
'Meta': {'object_name': 'Vote'},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['campaign.Campaign']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {}),
'voter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['campaign'] | {
"content_hash": "cba8117731774b621ba7a82986ad8b94",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 195,
"avg_line_length": 68.1590909090909,
"alnum_prop": 0.5541847282427476,
"repo_name": "tdphillips/campaigns",
"id": "66e2dab0ae14864486fc734bff31ea3d643926f5",
"size": "6022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "campaign/migrations/0004_auto__add_field_prospectus_stars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6091"
},
{
"name": "JavaScript",
"bytes": "1287"
},
{
"name": "Python",
"bytes": "47189"
}
],
"symlink_target": ""
} |
import sys, os
# If your extensions are in another directory, add it here.
#sys.path.append('some/directory')
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Pebl'
copyright = '2008, Abhik Shah'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '1.0.1'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pebldoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
#latex_documents = []
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Custom extension for configuration parameters
# ---------------------------------------------
def setup(app):
app.add_description_unit('confparam', 'confparam', '%s; configuration parameter')
| {
"content_hash": "8567d0bc6bbd68e4611dd6358ea3b979",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 85,
"avg_line_length": 29.80952380952381,
"alnum_prop": 0.7039403620873269,
"repo_name": "Delwddrylliwr/pebl-project",
"id": "d6bf8f13dfb101804a10d32955dbc53e8ee20a3c",
"size": "4252",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docs/src/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9560"
},
{
"name": "CSS",
"bytes": "4331"
},
{
"name": "HTML",
"bytes": "4269"
},
{
"name": "JavaScript",
"bytes": "33578"
},
{
"name": "Python",
"bytes": "219403"
}
],
"symlink_target": ""
} |
import bpy, sys, os, re, struct, traceback
ARGS_PATTERN = re.compile(r'''(?:"([^"]+)"|'([^']+)'|(\S+))''')
# Background mode seems to require quit() in some 2.80 builds
def _quitblender():
bpy.ops.wm.quit_blender()
quit()
MIN_BLENDER_MAJOR = 2
MIN_BLENDER_MINOR = 83
# Extract pipe file descriptors from arguments
print('HECL Blender Launch', sys.argv)
if '--' not in sys.argv:
_quitblender()
args = sys.argv[sys.argv.index('--')+1:]
readfd = int(args[0])
writefd = int(args[1])
verbosity_level = int(args[2])
err_path = ""
if sys.platform == "win32":
import msvcrt
readfd = msvcrt.open_osfhandle(readfd, os.O_RDONLY | os.O_BINARY)
writefd = msvcrt.open_osfhandle(writefd, os.O_WRONLY | os.O_BINARY)
err_path = "/Temp"
if 'TEMP' in os.environ:
err_path = os.environ['TEMP']
else:
err_path = "/tmp"
if 'TMPDIR' in os.environ:
err_path = os.environ['TMPDIR']
err_path += "/hecl_%016X.derp" % os.getpid()
def readpipestr():
read_bytes = os.read(readfd, 4)
if len(read_bytes) != 4:
print('HECL connection lost or desynchronized')
_quitblender()
read_len = struct.unpack('I', read_bytes)[0]
return os.read(readfd, read_len)
def writepipestr(linebytes):
#print('LINE', linebytes)
os.write(writefd, struct.pack('I', len(linebytes)))
os.write(writefd, linebytes)
def writepipebuf(linebytes):
#print('BUF', linebytes)
os.write(writefd, linebytes)
def quitblender():
writepipestr(b'QUITTING')
_quitblender()
class PathHasher:
def hashpath32(self, path):
writepipestr(path.encode())
read_str = readpipestr()
return int(read_str[0:8], 16)
# Ensure Blender 2.83+ is being used
if bpy.app.version < (MIN_BLENDER_MAJOR, MIN_BLENDER_MINOR, 0):
writepipestr(b'INVALIDBLENDERVER')
_quitblender()
# If there's a third argument, use it as the .zip path containing the addon
did_install = False
if len(args) >= 4 and args[3] != 'SKIPINSTALL':
bpy.ops.preferences.addon_install(overwrite=True, target='DEFAULT', filepath=args[3])
bpy.ops.preferences.addon_refresh()
did_install = True
# Make addon available to commands
if bpy.context.preferences.addons.find('hecl') == -1:
try:
bpy.ops.preferences.addon_enable(module='hecl')
bpy.ops.wm.save_userpref()
except:
pass
try:
import hecl
except:
writepipestr(b'NOADDON')
_quitblender()
# Quit if just installed
if did_install:
writepipestr(b'ADDONINSTALLED')
_quitblender()
# Intro handshake
writepipestr(b'READY')
ackbytes = readpipestr()
if ackbytes != b'ACK':
quitblender()
# Count brackets
def count_brackets(linestr):
bracket_count = 0
for ch in linestr:
if ch in {'[','{','('}:
bracket_count += 1
elif ch in {']','}',')'}:
bracket_count -= 1
return bracket_count
# Read line of space-separated/quoted arguments
def read_cmdargs():
cmdline = readpipestr()
if cmdline == b'':
print('HECL connection lost')
_quitblender()
cmdargs = []
for match in ARGS_PATTERN.finditer(cmdline.decode()):
cmdargs.append(match.group(match.lastindex))
return cmdargs
# Complete sequences of statements compiled/executed here
def exec_compbuf(compbuf, globals):
if verbosity_level >= 3:
print(compbuf)
try:
co = compile(compbuf, '<HECL>', 'exec')
exec(co, globals)
except Exception as e:
trace_prefix = 'Error processing:\n'
trace_prefix += compbuf
raise RuntimeError(trace_prefix) from e
# Command loop for writing animation key data to blender
def animin_loop(globals):
writepipestr(b'ANIMREADY')
while True:
crv_type = struct.unpack('b', os.read(readfd, 1))
if crv_type[0] < 0:
writepipestr(b'ANIMDONE')
return
elif crv_type[0] == 0:
crvs = globals['rotCurves']
elif crv_type[0] == 1:
crvs = globals['transCurves']
elif crv_type[0] == 2:
crvs = globals['scaleCurves']
key_info = struct.unpack('ii', os.read(readfd, 8))
crv = crvs[key_info[0]]
crv.keyframe_points.add(count=key_info[1])
if crv_type[0] == 1:
for k in range(key_info[1]):
key_data = struct.unpack('if', os.read(readfd, 8))
pt = crv.keyframe_points[k]
pt.interpolation = 'LINEAR'
pt.co = (key_data[0], key_data[1])
else:
for k in range(key_info[1]):
key_data = struct.unpack('if', os.read(readfd, 8))
pt = crv.keyframe_points[k]
pt.interpolation = 'LINEAR'
pt.co = (key_data[0], key_data[1])
def writelight(obj):
wmtx = obj.matrix_world
writepipebuf(struct.pack('ffffffffffffffff',
wmtx[0][0], wmtx[0][1], wmtx[0][2], wmtx[0][3],
wmtx[1][0], wmtx[1][1], wmtx[1][2], wmtx[1][3],
wmtx[2][0], wmtx[2][1], wmtx[2][2], wmtx[2][3],
wmtx[3][0], wmtx[3][1], wmtx[3][2], wmtx[3][3]))
writepipebuf(struct.pack('fff', obj.data.color[0], obj.data.color[1], obj.data.color[2]))
type = 2
spotCutoff = 0.0
hasFalloff = False
castShadow = False
if obj.data.type == 'POINT':
type = 2
hasFalloff = True
castShadow = obj.data.use_shadow
elif obj.data.type == 'SPOT':
type = 3
hasFalloff = True
spotCutoff = obj.data.spot_size
castShadow = obj.data.use_shadow
elif obj.data.type == 'SUN':
type = 1
castShadow = obj.data.use_shadow
constant = 1.0
linear = 0.0
quadratic = 0.0
if hasFalloff:
if obj.data.falloff_type == 'INVERSE_COEFFICIENTS':
constant = obj.data.constant_coefficient
linear = obj.data.linear_coefficient
quadratic = obj.data.quadratic_coefficient
layer = 0
if 'retro_layer' in obj.data.keys():
layer = obj.data['retro_layer']
writepipebuf(struct.pack('IIfffffb', layer, type, obj.data.energy, spotCutoff, constant, linear, quadratic,
castShadow))
writepipestr(obj.name.encode())
# Command loop for reading data from blender
def dataout_loop():
writepipestr(b'READY')
while True:
cmdargs = read_cmdargs()
print(cmdargs)
if cmdargs[0] == 'DATAEND':
writepipestr(b'DONE')
return
elif cmdargs[0] == 'MESHLIST':
meshCount = 0
for meshobj in bpy.data.objects:
if meshobj.type == 'MESH' and not meshobj.data.library:
meshCount += 1
writepipebuf(struct.pack('I', meshCount))
for meshobj in bpy.data.objects:
if meshobj.type == 'MESH' and not meshobj.data.library:
writepipestr(meshobj.name.encode())
elif cmdargs[0] == 'LIGHTLIST':
lightCount = 0
for obj in bpy.context.scene.objects:
if obj.type == 'LIGHT' and not obj.data.library:
lightCount += 1
writepipebuf(struct.pack('I', lightCount))
for obj in bpy.context.scene.objects:
if obj.type == 'LIGHT' and not obj.data.library:
writepipestr(obj.name.encode())
elif cmdargs[0] == 'MESHAABB':
writepipestr(b'OK')
hecl.mesh_aabb(writepipebuf)
elif cmdargs[0] == 'MESHCOMPILE':
meshName = bpy.context.scene.hecl_mesh_obj
if meshName not in bpy.data.objects:
writepipestr(('mesh %s not found' % meshName).encode())
continue
writepipestr(b'OK')
hecl.hmdl.cook(writepipebuf, bpy.data.objects[meshName])
elif cmdargs[0] == 'ARMATURECOMPILE':
armName = bpy.context.scene.hecl_arm_obj
if armName not in bpy.data.objects:
writepipestr(('armature %s not found' % armName).encode())
continue
writepipestr(b'OK')
hecl.armature.cook(writepipebuf, bpy.data.objects[armName].data)
elif cmdargs[0] == 'MESHCOMPILENAME':
meshName = cmdargs[1]
useLuv = int(cmdargs[2])
if meshName not in bpy.data.objects:
writepipestr(('mesh %s not found' % meshName).encode())
continue
writepipestr(b'OK')
hecl.hmdl.cook(writepipebuf, bpy.data.objects[meshName], useLuv)
elif cmdargs[0] == 'MESHCOMPILENAMECOLLISION':
meshName = cmdargs[1]
if meshName not in bpy.data.objects:
writepipestr(('mesh %s not found' % meshName).encode())
continue
writepipestr(b'OK')
hecl.hmdl.cookcol(writepipebuf, bpy.data.objects[meshName])
elif cmdargs[0] == 'MESHCOMPILECOLLISIONALL':
writepipestr(b'OK')
colCount = 0
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and not obj.data.library:
colCount += 1
writepipebuf(struct.pack('I', colCount))
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and not obj.data.library:
hecl.hmdl.cookcol(writepipebuf, obj)
elif cmdargs[0] == 'MESHCOMPILEPATH':
meshName = bpy.context.scene.hecl_path_obj
if meshName not in bpy.data.objects:
writepipestr(('mesh %s not found' % meshName).encode())
continue
writepipestr(b'OK')
hecl.path.cook(writepipebuf, bpy.data.objects[meshName])
elif cmdargs[0] == 'WORLDCOMPILE':
writepipestr(b'OK')
hecl.swld.cook(writepipebuf)
elif cmdargs[0] == 'FRAMECOMPILE':
version = int(cmdargs[1])
if version != 0 and version != 1:
writepipestr(b'bad version')
continue
writepipestr(b'OK')
buffer = hecl.frme.cook(writepipebuf, version, PathHasher())
writepipestr(b'FRAMEDONE')
writepipebuf(struct.pack('I', len(buffer)))
writepipebuf(buffer)
elif cmdargs[0] == 'LIGHTCOMPILEALL':
writepipestr(b'OK')
lampCount = 0
firstSpot = None
for obj in bpy.context.scene.objects:
if obj.type == 'LIGHT':
lampCount += 1
if firstSpot is None and obj.data.type == 'SPOT':
firstSpot = obj
# Ambient
world = bpy.context.scene.world
ambient_energy = 0.0
ambient_color = None
if world.use_nodes and 'Background' in world.node_tree.nodes:
bg_node = world.node_tree.nodes['Background']
ambient_energy = bg_node.inputs[1].default_value
ambient_color = bg_node.inputs[0].default_value
if ambient_energy:
lampCount += 1
writepipebuf(struct.pack('I', lampCount))
if firstSpot is not None:
writelight(firstSpot)
if ambient_energy:
writepipebuf(struct.pack('ffffffffffffffff',
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0))
writepipebuf(struct.pack('fff', ambient_color[0], ambient_color[1], ambient_color[2]))
writepipebuf(struct.pack('IIfffffb', 0, 0, ambient_energy, 0.0, 1.0, 0.0, 0.0, False))
writepipestr(b'AMBIENT')
# Lamp objects
for obj in bpy.context.scene.objects:
if obj != firstSpot and obj.type == 'LIGHT':
writelight(obj)
elif cmdargs[0] == 'GETTEXTURES':
writepipestr(b'OK')
img_count = 0
for img in bpy.data.images:
if img.type == 'IMAGE':
img_count += 1
writepipebuf(struct.pack('I', img_count))
for img in bpy.data.images:
if img.type == 'IMAGE':
path = os.path.normpath(bpy.path.abspath(img.filepath))
writepipebuf(struct.pack('I', len(path)))
writepipebuf(path.encode())
elif cmdargs[0] == 'ACTORCOMPILE':
writepipestr(b'OK')
hecl.sact.cook(writepipebuf)
elif cmdargs[0] == 'ACTORCOMPILECHARACTERONLY':
writepipestr(b'OK')
hecl.sact.cook_character_only(writepipebuf)
elif cmdargs[0] == 'ACTIONCOMPILECHANNELSONLY':
actionName = cmdargs[1]
writepipestr(b'OK')
hecl.sact.cook_action_channels_only(writepipebuf, actionName)
elif cmdargs[0] == 'GETSUBTYPENAMES':
writepipestr(b'OK')
hecl.sact.get_subtype_names(writepipebuf)
elif cmdargs[0] == 'GETSUBTYPEOVERLAYNAMES':
subtypeName = cmdargs[1]
writepipestr(b'OK')
hecl.sact.get_subtype_overlay_names(writepipebuf, subtypeName)
elif cmdargs[0] == 'GETATTACHMENTNAMES':
writepipestr(b'OK')
hecl.sact.get_attachment_names(writepipebuf)
elif cmdargs[0] == 'GETACTIONNAMES':
writepipestr(b'OK')
hecl.sact.get_action_names(writepipebuf)
elif cmdargs[0] == 'GETBONEMATRICES':
armName = cmdargs[1]
if armName not in bpy.data.objects:
writepipestr(('armature %s not found' % armName).encode())
continue
armObj = bpy.data.objects[armName]
if armObj.type != 'ARMATURE':
writepipestr(('object %s not an ARMATURE' % armName).encode())
continue
writepipestr(b'OK')
writepipebuf(struct.pack('I', len(armObj.data.bones)))
for bone in armObj.data.bones:
writepipebuf(struct.pack('I', len(bone.name)))
writepipebuf(bone.name.encode())
for r in bone.matrix_local.to_3x3():
for c in r:
writepipebuf(struct.pack('f', c))
elif cmdargs[0] == 'RENDERPVS':
pathOut = cmdargs[1]
locX = float(cmdargs[2])
locY = float(cmdargs[3])
locZ = float(cmdargs[4])
hecl.srea.render_pvs(pathOut, (locX, locY, locZ))
writepipestr(b'OK')
elif cmdargs[0] == 'RENDERPVSLIGHT':
pathOut = cmdargs[1]
lightName = cmdargs[2]
hecl.srea.render_pvs_light(pathOut, lightName)
writepipestr(b'OK')
elif cmdargs[0] == 'MAPAREACOMPILE':
if 'MAP' not in bpy.data.objects:
writepipestr(('"MAP" object not in .blend').encode())
continue
map_obj = bpy.data.objects['MAP']
if map_obj.type != 'MESH':
writepipestr(('object "MAP" not a MESH').encode())
continue
writepipestr(b'OK')
hecl.mapa.cook(writepipebuf, map_obj)
elif cmdargs[0] == 'MAPUNIVERSECOMPILE':
writepipestr(b'OK')
hecl.mapu.cook(writepipebuf)
loaded_blend = None
# Main exception handling
try:
# Command loop
while True:
cmdargs = read_cmdargs()
print(cmdargs)
if cmdargs[0] == 'QUIT':
quitblender()
elif cmdargs[0] == 'OPEN':
if 'FINISHED' in bpy.ops.wm.open_mainfile(filepath=cmdargs[1]):
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode = 'OBJECT')
loaded_blend = cmdargs[1]
writepipestr(b'FINISHED')
else:
writepipestr(b'CANCELLED')
elif cmdargs[0] == 'CREATE':
if len(cmdargs) >= 4:
bpy.ops.wm.open_mainfile(filepath=cmdargs[3])
else:
bpy.ops.wm.read_homefile(use_empty=True)
bpy.context.scene.world = bpy.data.worlds.new('World')
loaded_blend = cmdargs[1]
bpy.context.preferences.filepaths.save_version = 0
if 'FINISHED' in bpy.ops.wm.save_as_mainfile(filepath=cmdargs[1]):
bpy.ops.file.hecl_patching_load()
bpy.context.scene.hecl_type = cmdargs[2]
writepipestr(b'FINISHED')
else:
writepipestr(b'CANCELLED')
elif cmdargs[0] == 'GETTYPE':
writepipestr(bpy.context.scene.hecl_type.encode())
elif cmdargs[0] == 'GETMESHRIGGED':
meshName = bpy.context.scene.hecl_mesh_obj
if meshName not in bpy.data.objects:
writepipestr(b'FALSE')
else:
if len(bpy.data.objects[meshName].vertex_groups):
writepipestr(b'TRUE')
else:
writepipestr(b'FALSE')
elif cmdargs[0] == 'SAVE':
bpy.context.preferences.filepaths.save_version = 0
print('SAVING %s' % loaded_blend)
if loaded_blend:
if 'FINISHED' in bpy.ops.wm.save_as_mainfile(filepath=loaded_blend, check_existing=False, compress=True):
writepipestr(b'FINISHED')
else:
writepipestr(b'CANCELLED')
elif cmdargs[0] == 'PYBEGIN':
writepipestr(b'READY')
globals = {'hecl':hecl}
compbuf = str()
bracket_count = 0
while True:
try:
line = readpipestr()
# ANIM check
if line == b'PYANIM':
# Ensure remaining block gets executed
if len(compbuf):
exec_compbuf(compbuf, globals)
compbuf = str()
animin_loop(globals)
continue
# End check
elif line == b'PYEND':
# Ensure remaining block gets executed
if len(compbuf):
exec_compbuf(compbuf, globals)
compbuf = str()
writepipestr(b'DONE')
break
# Syntax filter
linestr = line.decode().rstrip()
if not len(linestr) or linestr.lstrip()[0] == '#':
writepipestr(b'OK')
continue
leading_spaces = len(linestr) - len(linestr.lstrip())
# Block lines always get appended right away
if linestr.endswith(':') or leading_spaces or bracket_count:
if len(compbuf):
compbuf += '\n'
compbuf += linestr
bracket_count += count_brackets(linestr)
writepipestr(b'OK')
continue
# Complete non-block statement in compbuf
if len(compbuf):
exec_compbuf(compbuf, globals)
# Establish new compbuf
compbuf = linestr
bracket_count += count_brackets(linestr)
except Exception as e:
writepipestr(b'EXCEPTION')
raise
break
writepipestr(b'OK')
elif cmdargs[0] == 'PYEND':
writepipestr(b'ERROR')
elif cmdargs[0] == 'DATABEGIN':
try:
dataout_loop()
except Exception as e:
writepipestr(b'EXCEPTION')
raise
elif cmdargs[0] == 'DATAEND':
writepipestr(b'ERROR')
else:
hecl.command(cmdargs, writepipestr, writepipebuf)
except Exception:
fout = open(err_path, 'w')
traceback.print_exc(file=fout)
fout.close()
raise
| {
"content_hash": "88b0ad577213348d7fba0ff9b1148107",
"timestamp": "",
"source": "github",
"line_count": 590,
"max_line_length": 121,
"avg_line_length": 34.391525423728815,
"alnum_prop": 0.533241338524469,
"repo_name": "AxioDL/PathShagged",
"id": "7af8a934fe8816d85b9a6bae83bd1d4619da1aa6",
"size": "20291",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hecl/blender/hecl_blendershell.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "30964"
},
{
"name": "C++",
"bytes": "1853098"
},
{
"name": "CMake",
"bytes": "25640"
},
{
"name": "Python",
"bytes": "29052"
}
],
"symlink_target": ""
} |
"""
Contains tests class for convert.py
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .base import TestsBase
from ..convert import convert
from ..reader import read, get_version
from ..current import current_nbformat
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestConvert(TestsBase):
def test_downgrade(self):
"""Do notebook downgrades work?"""
# Open a version 3 notebook and attempt to downgrade it to version 2.
with self.fopen(u'test3.ipynb', u'r') as f:
nb = read(f)
nb = convert(nb, 2)
# Check if downgrade was successful.
(major, minor) = get_version(nb)
self.assertEqual(major, 2)
def test_upgrade(self):
"""Do notebook upgrades work?"""
# Open a version 2 notebook and attempt to upgrade it to version 3.
with self.fopen(u'test2.ipynb', u'r') as f:
nb = read(f)
nb = convert(nb, 3)
# Check if upgrade was successful.
(major, minor) = get_version(nb)
self.assertEqual(major, 3)
def test_open_current(self):
"""Can an old notebook be opened and converted to the current version
while remembering the original version of the notebook?"""
# Open a version 2 notebook and attempt to upgrade it to the current version
# while remembering it's version information.
with self.fopen(u'test2.ipynb', u'r') as f:
nb = read(f)
(original_major, original_minor) = get_version(nb)
nb = convert(nb, current_nbformat)
# Check if upgrade was successful.
(major, minor) = get_version(nb)
self.assertEqual(major, current_nbformat)
# Check if the original major revision was remembered.
self.assertEqual(original_major, 2)
| {
"content_hash": "6268a6f0685aa68d32d6219eeb89257a",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 84,
"avg_line_length": 34.88405797101449,
"alnum_prop": 0.5143331948483589,
"repo_name": "Lightmatter/django-inlineformfield",
"id": "9eb45ad2a8c5e4a52e8577f6535aef1aeba8faf5",
"size": "2407",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": ".tox/py27/lib/python2.7/site-packages/IPython/nbformat/tests/test_convert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43622"
},
{
"name": "Groff",
"bytes": "3667"
},
{
"name": "HTML",
"bytes": "108126"
},
{
"name": "JavaScript",
"bytes": "853457"
},
{
"name": "Python",
"bytes": "10506732"
},
{
"name": "Shell",
"bytes": "3801"
},
{
"name": "Smarty",
"bytes": "21023"
}
],
"symlink_target": ""
} |
class Base:
def __init__(self):
print "object of type Base initialized."
return
def baseMethod(self):
print "baseMethod invoked."
return
class Delegate:
def __init__(self, base):
print "object of type Delegate initialized."
# save a reference to 'base'
self.__dict__['base'] = base
return
def delegateMethod(self):
print "delegateMethod invoked."
return
def __del__(self): # Our extension - the destructor
print "destructor for Delegate class fired."
return
# Pass everything else through to the Base by
# using __{get,set}attr__ (deligated inheritance)
#
def __getattr__(self, attr):
return getattr(self.base, attr)
def __setattr__(self, attr, value):
return setattr(self.base, attr, value)
#
# Example usage
#
if __name__ == '__main__':
b = Base() # create our Base object (before the delegate)
#
d = Delegate(b) # initialze Delegate with the _already created_ base
#
d.delegateMethod()
d.baseMethod()
#
# destructor will fire as script terminates
#
print "done."
| {
"content_hash": "20ea46ddeaa552178df269161d6438f9",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 73,
"avg_line_length": 24.893617021276597,
"alnum_prop": 0.5914529914529915,
"repo_name": "egustafson/sandbox",
"id": "a7919388e0c0d43cb6dcda317f24dcacceaa635d",
"size": "2026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/py-inheritance-by-delegation/deligatedInheritance.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "65426"
},
{
"name": "Assembly",
"bytes": "2103"
},
{
"name": "C",
"bytes": "94748"
},
{
"name": "C++",
"bytes": "52883"
},
{
"name": "Dockerfile",
"bytes": "873"
},
{
"name": "FreeMarker",
"bytes": "195"
},
{
"name": "Game Maker Language",
"bytes": "24204"
},
{
"name": "Go",
"bytes": "128092"
},
{
"name": "Groovy",
"bytes": "584"
},
{
"name": "HTML",
"bytes": "2491"
},
{
"name": "Java",
"bytes": "232698"
},
{
"name": "JavaScript",
"bytes": "278"
},
{
"name": "Lex",
"bytes": "2806"
},
{
"name": "Lua",
"bytes": "809"
},
{
"name": "M4",
"bytes": "1718"
},
{
"name": "Makefile",
"bytes": "22166"
},
{
"name": "Perl",
"bytes": "25945"
},
{
"name": "Python",
"bytes": "131732"
},
{
"name": "Roff",
"bytes": "1455"
},
{
"name": "Ruby",
"bytes": "5870"
},
{
"name": "Scala",
"bytes": "2130"
},
{
"name": "Shell",
"bytes": "7117"
},
{
"name": "Tcl",
"bytes": "4561"
},
{
"name": "TeX",
"bytes": "63201"
},
{
"name": "Yacc",
"bytes": "924"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: aws_ec2
plugin_type: inventory
short_description: EC2 inventory source
requirements:
- boto3
- botocore
extends_documentation_fragment:
- inventory_cache
- constructed
- aws_credentials
description:
- Get inventory hosts from Amazon Web Services EC2.
- Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)).
notes:
- If no credentials are provided and the control node has an associated IAM instance profile then the
role will be used for authentication.
author:
- Sloane Hertel (@s-hertel)
options:
plugin:
description: Token that ensures this is a source file for the plugin.
required: True
choices: ['aws_ec2']
iam_role_arn:
description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
credentials with enough privilege to perform the AssumeRole action.
version_added: '2.9'
regions:
description:
- A list of regions in which to describe EC2 instances.
- If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
type: list
default: []
hostnames:
description:
- A list in order of precedence for hostname variables.
- You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
- To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
type: list
default: []
filters:
description:
- A dictionary of filter value pairs.
- Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
type: dict
default: {}
include_extra_api_calls:
description:
- Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
- Spot instances may be persistent and instances may have associated events.
type: bool
default: False
version_added: '2.8'
strict_permissions:
description:
- By default if a 403 (Forbidden) error code is encountered this plugin will fail.
- You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
type: bool
default: True
use_contrib_script_compatible_sanitization:
description:
- By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
This option allows you to override that, in efforts to allow migration from the old inventory script and
matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
you will need to replace hyphens with underscores via the regex_replace filter for those entries.
- For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
otherwise the core engine will just use the standard sanitization on top.
- This is not the default as such names break certain functionality as not all characters are valid Python identifiers
which group names end up being used as.
type: bool
default: False
version_added: '2.8'
'''
EXAMPLES = '''
# Minimal example using environment vars or instance role credentials
# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
plugin: aws_ec2
regions:
- us-east-1
# Example using filters, ignoring permission errors, and specifying the hostname precedence
plugin: aws_ec2
boto_profile: aws_profile
# Populate inventory with instances in these regions
regions:
- us-east-1
- us-east-2
filters:
# All instances with their `Environment` tag set to `dev`
tag:Environment: dev
# All dev and QA hosts
tag:Environment:
- dev
- qa
instance.group-id: sg-xxxxxxxx
# Ignores 403 errors rather than failing
strict_permissions: False
# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
# inventory_hostname use compose (see example below).
hostnames:
- tag:Name=Tag1,Name=Tag2 # Return specific hosts only
- tag:CustomDNSName
- dns-name
- private-ip-address
# Example using constructed features to create groups and set ansible_host
plugin: aws_ec2
regions:
- us-east-1
- us-west-1
# keyed_groups may be used to create custom groups
strict: False
keyed_groups:
# Add e.g. x86_64 hosts to an arch_x86_64 group
- prefix: arch
key: 'architecture'
# Add hosts to tag_Name_Value groups for each Name/Value tag pair
- prefix: tag
key: tags
# Add hosts to e.g. instance_type_z3_tiny
- prefix: instance_type
key: instance_type
# Create security_groups_sg_abcd1234 group for each SG
- key: 'security_groups|json_query("[].group_id")'
prefix: 'security_groups'
# Create a group for each value of the Application tag
- key: tags.Application
separator: ''
# Create a group per region e.g. aws_region_us_east_2
- key: placement.region
prefix: aws_region
# Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
- key: tags['Role']
prefix: foo
parent_group: "project"
# Set individual variables with compose
compose:
# Use the private IP address to connect to the host
# (note: this does not modify inventory_hostname, which is set via I(hostnames))
ansible_host: private_ip_address
'''
import re
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.utils.display import Display
try:
import boto3
import botocore
except ImportError:
raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.')
display = Display()
# The mappings give an array of keys to get from the filter name to the value
# returned by boto3's EC2 describe_instances method.
instance_meta_filter_to_boto_attr = {
'group-id': ('Groups', 'GroupId'),
'group-name': ('Groups', 'GroupName'),
'network-interface.attachment.instance-owner-id': ('OwnerId',),
'owner-id': ('OwnerId',),
'requester-id': ('RequesterId',),
'reservation-id': ('ReservationId',),
}
instance_data_filter_to_boto_attr = {
'affinity': ('Placement', 'Affinity'),
'architecture': ('Architecture',),
'availability-zone': ('Placement', 'AvailabilityZone'),
'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
'client-token': ('ClientToken',),
'dns-name': ('PublicDnsName',),
'host-id': ('Placement', 'HostId'),
'hypervisor': ('Hypervisor',),
'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
'image-id': ('ImageId',),
'instance-id': ('InstanceId',),
'instance-lifecycle': ('InstanceLifecycle',),
'instance-state-code': ('State', 'Code'),
'instance-state-name': ('State', 'Name'),
'instance-type': ('InstanceType',),
'instance.group-id': ('SecurityGroups', 'GroupId'),
'instance.group-name': ('SecurityGroups', 'GroupName'),
'ip-address': ('PublicIpAddress',),
'kernel-id': ('KernelId',),
'key-name': ('KeyName',),
'launch-index': ('AmiLaunchIndex',),
'launch-time': ('LaunchTime',),
'monitoring-state': ('Monitoring', 'State'),
'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
'network-interface.attachment.instance-id': ('InstanceId',),
'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
'network-interface.description': ('NetworkInterfaces', 'Description'),
'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
# 'network-interface.requester-id': (),
'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
'network-interface.status': ('NetworkInterfaces', 'Status'),
'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
'placement-group-name': ('Placement', 'GroupName'),
'platform': ('Platform',),
'private-dns-name': ('PrivateDnsName',),
'private-ip-address': ('PrivateIpAddress',),
'product-code': ('ProductCodes', 'ProductCodeId'),
'product-code.type': ('ProductCodes', 'ProductCodeType'),
'ramdisk-id': ('RamdiskId',),
'reason': ('StateTransitionReason',),
'root-device-name': ('RootDeviceName',),
'root-device-type': ('RootDeviceType',),
'source-dest-check': ('SourceDestCheck',),
'spot-instance-request-id': ('SpotInstanceRequestId',),
'state-reason-code': ('StateReason', 'Code'),
'state-reason-message': ('StateReason', 'Message'),
'subnet-id': ('SubnetId',),
'tag': ('Tags',),
'tag-key': ('Tags',),
'tag-value': ('Tags',),
'tenancy': ('Placement', 'Tenancy'),
'virtualization-type': ('VirtualizationType',),
'vpc-id': ('VpcId',),
}
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = 'aws_ec2'
def __init__(self):
super(InventoryModule, self).__init__()
self.group_prefix = 'aws_ec2_'
# credentials
self.boto_profile = None
self.aws_secret_access_key = None
self.aws_access_key_id = None
self.aws_security_token = None
self.iam_role_arn = None
def _compile_values(self, obj, attr):
'''
:param obj: A list or dict of instance attributes
:param attr: A key
:return The value(s) found via the attr
'''
if obj is None:
return
temp_obj = []
if isinstance(obj, list) or isinstance(obj, tuple):
for each in obj:
value = self._compile_values(each, attr)
if value:
temp_obj.append(value)
else:
temp_obj = obj.get(attr)
has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
if has_indexes and len(temp_obj) == 1:
return temp_obj[0]
return temp_obj
def _get_boto_attr_chain(self, filter_name, instance):
'''
:param filter_name: The filter
:param instance: instance dict returned by boto3 ec2 describe_instances()
'''
allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
if filter_name not in allowed_filters:
raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name,
allowed_filters))
if filter_name in instance_data_filter_to_boto_attr:
boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
else:
boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
instance_value = instance
for attribute in boto_attr_list:
instance_value = self._compile_values(instance_value, attribute)
return instance_value
def _get_credentials(self):
'''
:return A dictionary of boto client credentials
'''
boto_params = {}
for credential in (('aws_access_key_id', self.aws_access_key_id),
('aws_secret_access_key', self.aws_secret_access_key),
('aws_session_token', self.aws_security_token)):
if credential[1]:
boto_params[credential[0]] = credential[1]
return boto_params
def _get_connection(self, credentials, region='us-east-1'):
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
if self.boto_profile:
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
else:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
return connection
def _boto3_assume_role(self, credentials, region):
"""
Assume an IAM role passed by iam_role_arn parameter
:return: a dict containing the credentials of the assumed role
"""
iam_role_arn = self.iam_role_arn
try:
sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
return dict(
aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
aws_session_token=sts_session['Credentials']['SessionToken']
)
except botocore.exceptions.ClientError as e:
raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
def _boto3_conn(self, regions):
'''
:param regions: A list of regions to create a boto3 client
Generator that yields a boto3 client and the region
'''
credentials = self._get_credentials()
iam_role_arn = self.iam_role_arn
if not regions:
try:
# as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
client = self._get_connection(credentials)
resp = client.describe_regions()
regions = [x['RegionName'] for x in resp.get('Regions', [])]
except botocore.exceptions.NoRegionError:
# above seems to fail depending on boto3 version, ignore and lets try something else
pass
# fallback to local list hardcoded in boto3 if still no regions
if not regions:
session = boto3.Session()
regions = session.get_available_regions('ec2')
# I give up, now you MUST give me regions
if not regions:
raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
for region in regions:
connection = self._get_connection(credentials, region)
try:
if iam_role_arn is not None:
assumed_credentials = self._boto3_assume_role(credentials, region)
else:
assumed_credentials = credentials
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
if self.boto_profile:
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
else:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
yield connection, region
def _get_instances_by_region(self, regions, filters, strict_permissions):
'''
:param regions: a list of regions in which to describe instances
:param filters: a list of boto3 filter dictionaries
:param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
:return A list of instance dictionaries
'''
all_instances = []
for connection, region in self._boto3_conn(regions):
try:
# By default find non-terminated/terminating instances
if not any([f['Name'] == 'instance-state-name' for f in filters]):
filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
paginator = connection.get_paginator('describe_instances')
reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
instances = []
for r in reservations:
new_instances = r['Instances']
for instance in new_instances:
instance.update(self._get_reservation_details(r))
if self.get_option('include_extra_api_calls'):
instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId')))
instances.extend(new_instances)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
instances = []
else:
raise AnsibleError("Failed to describe instances: %s" % to_native(e))
except botocore.exceptions.BotoCoreError as e:
raise AnsibleError("Failed to describe instances: %s" % to_native(e))
all_instances.extend(instances)
return sorted(all_instances, key=lambda x: x['InstanceId'])
def _get_reservation_details(self, reservation):
return {
'OwnerId': reservation['OwnerId'],
'RequesterId': reservation.get('RequesterId', ''),
'ReservationId': reservation['ReservationId']
}
def _get_event_set_and_persistence(self, connection, instance_id, spot_instance):
host_vars = {'Events': '', 'Persistent': False}
try:
kwargs = {'InstanceIds': [instance_id]}
host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '')
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if not self.get_option('strict_permissions'):
pass
else:
raise AnsibleError("Failed to describe instance status: %s" % to_native(e))
if spot_instance:
try:
kwargs = {'SpotInstanceRequestIds': [spot_instance]}
host_vars['Persistent'] = bool(
connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent'
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if not self.get_option('strict_permissions'):
pass
else:
raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e))
return host_vars
def _get_tag_hostname(self, preference, instance):
tag_hostnames = preference.split('tag:', 1)[1]
if ',' in tag_hostnames:
tag_hostnames = tag_hostnames.split(',')
else:
tag_hostnames = [tag_hostnames]
tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
for v in tag_hostnames:
if '=' in v:
tag_name, tag_value = v.split('=')
if tags.get(tag_name) == tag_value:
return to_text(tag_name) + "_" + to_text(tag_value)
else:
tag_value = tags.get(v)
if tag_value:
return to_text(tag_value)
return None
def _get_hostname(self, instance, hostnames):
'''
:param instance: an instance dict returned by boto3 ec2 describe_instances()
:param hostnames: a list of hostname destination variables in order of preference
:return the preferred identifer for the host
'''
if not hostnames:
hostnames = ['dns-name', 'private-dns-name']
hostname = None
for preference in hostnames:
if 'tag' in preference:
if not preference.startswith('tag:'):
raise AnsibleError("To name a host by tags name_value, use 'tag:name=value'.")
hostname = self._get_tag_hostname(preference, instance)
else:
hostname = self._get_boto_attr_chain(preference, instance)
if hostname:
break
if hostname:
if ':' in to_text(hostname):
return self._sanitize_group_name((to_text(hostname)))
else:
return to_text(hostname)
def _query(self, regions, filters, strict_permissions):
'''
:param regions: a list of regions to query
:param filters: a list of boto3 filter dictionaries
:param hostnames: a list of hostname destination variables in order of preference
:param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
'''
return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)}
def _populate(self, groups, hostnames):
for group in groups:
group = self.inventory.add_group(group)
self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames)
self.inventory.add_child('all', group)
def _add_hosts(self, hosts, group, hostnames):
'''
:param hosts: a list of hosts to be added to a group
:param group: the name of the group to which the hosts belong
:param hostnames: a list of hostname destination variables in order of preference
'''
for host in hosts:
hostname = self._get_hostname(host, hostnames)
host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
# Allow easier grouping by region
host['placement']['region'] = host['placement']['availability_zone'][:-1]
if not hostname:
continue
self.inventory.add_host(hostname, group=group)
for hostvar, hostval in host.items():
self.inventory.set_variable(hostname, hostvar, hostval)
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
def _set_credentials(self):
'''
:param config_data: contents of the inventory config file
'''
self.boto_profile = self.get_option('aws_profile')
self.aws_access_key_id = self.get_option('aws_access_key')
self.aws_secret_access_key = self.get_option('aws_secret_key')
self.aws_security_token = self.get_option('aws_security_token')
self.iam_role_arn = self.get_option('iam_role_arn')
if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
session = botocore.session.get_session()
try:
credentials = session.get_credentials().get_frozen_credentials()
except AttributeError:
pass
else:
self.aws_access_key_id = credentials.access_key
self.aws_secret_access_key = credentials.secret_key
self.aws_security_token = credentials.token
if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
"inventory configuration file or set them as environment variables.")
def verify_file(self, path):
'''
:param loader: an ansible.parsing.dataloader.DataLoader object
:param path: the path to the inventory config file
:return the contents of the config file
'''
if super(InventoryModule, self).verify_file(path):
if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
return True
display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
return False
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path)
if self.get_option('use_contrib_script_compatible_sanitization'):
self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
self._set_credentials()
# get user specifications
regions = self.get_option('regions')
filters = ansible_dict_to_boto3_filter_list(self.get_option('filters'))
hostnames = self.get_option('hostnames')
strict_permissions = self.get_option('strict_permissions')
cache_key = self.get_cache_key(path)
# false when refresh_cache or --flush-cache is used
if cache:
# get the user-specified directive
cache = self.get_option('cache')
# Generate inventory
cache_needs_update = False
if cache:
try:
results = self._cache[cache_key]
except KeyError:
# if cache expires or cache file doesn't exist
cache_needs_update = True
if not cache or cache_needs_update:
results = self._query(regions, filters, strict_permissions)
self._populate(results, hostnames)
# If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
# when the user is using caching, update the cached inventory
if cache_needs_update or (not cache and self.get_option('cache')):
self._cache[cache_key] = results
@staticmethod
def _legacy_script_compatible_group_sanitization(name):
# note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
regex = re.compile(r"[^A-Za-z0-9\_\-]")
return regex.sub('_', name)
| {
"content_hash": "d2f0caa358a479430c43134f2ae9b192",
"timestamp": "",
"source": "github",
"line_count": 656,
"max_line_length": 155,
"avg_line_length": 45.86280487804878,
"alnum_prop": 0.6279664960446719,
"repo_name": "thaim/ansible",
"id": "5f757956162d05da00f7b54e4130357b8933dbc3",
"size": "30217",
"binary": false,
"copies": "19",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/plugins/inventory/aws_ec2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
"""Utility to display flash messages.
To add a flash message:
flash('Login successful!', category='info')
To display flash messages in a template:
$ for flash in get_flashed_messages():
<div class="$flash.type">$flash.message</div>
Note: This should be added with web.py or become an independent module.
"""
import json
import web
def get_flashed_messages():
# cache the flashed messages in request context to support
# multiple invocations of this function.
if "flashed_messages" not in web.ctx:
web.ctx.flashed_messages = web.ctx.get('flashes', [])
web.ctx.flashes = []
return web.ctx.flashed_messages
def flash(message, category="info"):
flashes = web.ctx.setdefault('flashes', [])
flashes.append(web.storage(category=category, message=message))
def flash_processor(handler):
flashes_json = web.cookies(flashes="[]").flashes
try:
flashes = [web.storage(d) for d in json.loads(flashes_json)
if isinstance(d, dict) and 'category' in d and
'message' in d]
except ValueError:
flashes = []
web.ctx.flashes = list(flashes)
try:
return handler()
finally:
# Flash changed. Need to save it.
if flashes != web.ctx.flashes:
if web.ctx.flashes:
web.setcookie('flashes', json.dumps(web.ctx.flashes))
else:
web.setcookie('flashes', '', expires=-1)
| {
"content_hash": "f27aa7ed704a79ff135aa08d04729591",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 71,
"avg_line_length": 28.173076923076923,
"alnum_prop": 0.6266211604095563,
"repo_name": "anandology/broadgauge",
"id": "beb708a07763af9888af7642130de4f490b3246e",
"size": "1465",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "broadgauge/flash.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13402"
},
{
"name": "Python",
"bytes": "61595"
}
],
"symlink_target": ""
} |
import ast
import datetime
import jinja2
class Option:
"""An option to be used in a report
Attributes:
type (str): the type of the option of the form 'gnc:make-number-range-option'
section (str): the section/tab where the option should appear in the option dialog
sort_tag (str): a string defining the sort order in the tab
documentation_string (str): the doc string of the option
default_value (str): the default value of the option
name (str): the name of the variable
"""
def __init__(self, type, section, sort_tag, documentation_string, default_value, name=None):
self.type = type
self.section = section
self.name = name
self.sort_tag = sort_tag
self.documentation_string = documentation_string
self.default_value = default_value
def render_scheme(self):
pass
def render_serialise(self):
return jinja2.Template("""
(op-value "{{option.section}}" "{{option.name}}")""").render(option=self)
def parse(self, value):
return ast.literal_eval(value)
class DateOption(Option):
def __init__(self, is_datetime=False, **kwargs):
super(DateOption, self).__init__(type="gnc:make-date-option", **kwargs)
self.is_datetime = is_datetime
def render_serialise(self):
return jinja2.Template("""(cadr (op-value "{{option.section}}" "{{option.name}}"))""").render(option=self)
def parse(self, value):
return datetime.datetime.fromtimestamp(ast.literal_eval(value))
def render_scheme(self):
return jinja2.Template(""" (add-option
({{ option.type }}
(N_ "{{option.section}}") (N_ "{{option.name}}")
"{{option.sort_tag}}" (N_ "{{option.documentation_string}}")
{{option.default_value}} ;; default
#f 'absolute #f
))
""").render(option=self)
class RangeOption(Option):
def __init__(self, lower=0, upper=10000, decimals=2, step_size=0.01, **kwargs):
super(RangeOption, self).__init__(type="gnc:make-number-range-option", **kwargs)
self.lower = lower
self.upper = upper
self.decimals = decimals
self.step_size = step_size
def render_scheme(self):
return jinja2.Template("""(add-option
({{ option.type }}
(N_ "{{option.section}}") (N_ "{{option.name}}")
"{{option.sort_tag}}" (N_ "{{option.documentation_string}}")
{{option.default_value}} ;; default
{{option.lower }} ;; lower bound
{{option.upper }} ;; upper bound
{{option.decimals }} ;; number of decimals
{{option.step_size }} ;; step size
))
""").render(option=self)
class StringOption(Option):
def __init__(self, **kwargs):
super(StringOption, self).__init__(type="gnc:make-string-option", **kwargs)
def render_scheme(self):
return jinja2.Template(""" (add-option
({{ option.type }}
(N_ "{{option.section}}") (N_ "{{option.name}}")
"{{option.sort_tag}}" (N_ "{{option.documentation_string}}")
"{{option.default_value}}" ;; default
))
""").render(option=self)
def parse(self, value):
return value
| {
"content_hash": "2c209fb6c572ddcdec36099a34ec2dc8",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 114,
"avg_line_length": 33.93684210526316,
"alnum_prop": 0.5955334987593052,
"repo_name": "sdementen/gnucash-utilities",
"id": "a58afb842fd9a0e9fd0e36cd289e40ee4cec2e75",
"size": "3224",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "piecash_utilities/report/options.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1572"
},
{
"name": "Python",
"bytes": "27519"
},
{
"name": "Scheme",
"bytes": "4702"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "..")))
from common.FileHelper import writeFile
from datetime import datetime
import shutil
import json
def loadStockList(fileName):
dictData = {}
for line in open(fileName):
line = line.strip().zfill(6)
if line not in dictData:
dictData[line] = 1
saveFileName = os.path.abspath(
os.path.join(os.getcwd(), "../config/goodStockList.json"))
backupFileName = os.path.abspath(
os.path.join(os.getcwd(), "../backup/goodStockList_" + datetime.now().strftime('%Y-%m-%d') + ".json"))
if os.path.exists(saveFileName):
shutil.move(saveFileName, backupFileName)
writeFile(saveFileName, json.dumps(dictData.keys()))
def main(argv):
reload(sys)
sys.setdefaultencoding('utf-8')
loadStockList('goodStocks.txt')
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "05a7531d2cbbb6f888697afb02e49fa0",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 110,
"avg_line_length": 25.61111111111111,
"alnum_prop": 0.6518438177874186,
"repo_name": "zwffff2015/stock",
"id": "d0256a094f1e21577e8aae64271aacd99eea6098",
"size": "938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/GenerateStockList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "208046"
},
{
"name": "Shell",
"bytes": "831"
}
],
"symlink_target": ""
} |
"""Test for exporters.
Note that we actually train and export models within these tests.
"""
import os
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import exporter
from tensorflow_model_analysis.eval_saved_model import load
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.eval_saved_model.example_trainers import fixed_prediction_estimator
class ExporterTest(testutil.TensorflowModelAnalysisTest):
def _getEvalExportDir(self):
return os.path.join(self._getTempDir(), 'eval_export_dir')
def runTestForExporter(self, exporter_class):
estimator_metadata = (
fixed_prediction_estimator
.get_simple_fixed_prediction_estimator_and_metadata())
exporter_name = 'TFMA'
temp_eval_export_dir = self._getEvalExportDir()
exporter_instance = exporter_class(
name=exporter_name,
eval_input_receiver_fn=estimator_metadata['eval_input_receiver_fn'],
serving_input_receiver_fn=estimator_metadata[
'serving_input_receiver_fn'])
self.assertEqual(exporter_name, exporter_instance.name)
estimator_metadata['estimator'].train(
input_fn=estimator_metadata['train_input_fn'], steps=100)
eval_export_dir = exporter_instance.export(
estimator=estimator_metadata['estimator'],
export_path=temp_eval_export_dir,
checkpoint_path=None,
eval_result=None,
is_the_final_export=True)
# Check the eval graph.
eval_saved_model = load.EvalSavedModel(eval_export_dir)
example1 = self._makeExample(prediction=0.9, label=0.0).SerializeToString()
eval_saved_model.metrics_reset_update_get(example1)
metric_values = eval_saved_model.get_metric_values()
self.assertDictElementsAlmostEqual(metric_values, {'average_loss': 0.81})
# Check the serving graph.
# TODO(b/124466113): Remove tf.compat.v2 once TF 2.0 is the default.
if hasattr(tf, 'compat.v2'):
imported = tf.compat.v2.saved_model.load(
eval_export_dir, tags=tf.saved_model.SERVING)
predictions = imported.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY](
inputs=tf.constant([example1.SerializeToString()]))
self.assertAllClose(predictions['outputs'], np.array([[0.9]]))
def testFinalExporter(self):
self.runTestForExporter(exporter.FinalExporter)
def testLatestExporter(self):
self.runTestForExporter(exporter.LatestExporter)
def testAdaptToRemoveMetricsRemoveList(self):
estimator_metadata = (
fixed_prediction_estimator
.get_simple_fixed_prediction_estimator_and_metadata())
exporter_name = 'TFMA'
temp_eval_export_dir = self._getEvalExportDir()
exporter_instance = exporter.FinalExporter(
name=exporter_name,
eval_input_receiver_fn=estimator_metadata['eval_input_receiver_fn'],
serving_input_receiver_fn=estimator_metadata[
'serving_input_receiver_fn'])
exporter_instance = exporter.adapt_to_remove_metrics(
exporter_instance, ['average_loss'])
self.assertEqual(exporter_name, exporter_instance.name)
estimator_metadata['estimator'].train(
input_fn=estimator_metadata['train_input_fn'], steps=100)
eval_export_dir = exporter_instance.export(
estimator=estimator_metadata['estimator'],
export_path=temp_eval_export_dir,
checkpoint_path=None,
eval_result=None,
is_the_final_export=True)
# Check the eval graph.
eval_saved_model = load.EvalSavedModel(eval_export_dir)
example1 = self._makeExample(prediction=0.9, label=0.0).SerializeToString()
eval_saved_model.metrics_reset_update_get(example1)
metric_values = eval_saved_model.get_metric_values()
self.assertNotIn('average_loss', metric_values)
# Check the serving graph.
# TODO(b/124466113): Remove tf.compat.v2 once TF 2.0 is the default.
if hasattr(tf, 'compat.v2'):
imported = tf.compat.v2.saved_model.load(
eval_export_dir, tags=tf.saved_model.SERVING)
predictions = imported.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY](
inputs=tf.constant([example1.SerializeToString()]))
self.assertAllClose(predictions['outputs'], np.array([[0.9]]))
def testAdaptToRemoveMetricsRemoveFn(self):
estimator_metadata = (
fixed_prediction_estimator
.get_simple_fixed_prediction_estimator_and_metadata())
exporter_name = 'TFMA'
temp_eval_export_dir = self._getEvalExportDir()
exporter_instance = exporter.FinalExporter(
name=exporter_name,
eval_input_receiver_fn=estimator_metadata['eval_input_receiver_fn'],
serving_input_receiver_fn=estimator_metadata[
'serving_input_receiver_fn'])
exporter_instance = exporter.adapt_to_remove_metrics(
exporter_instance, lambda key: key.endswith('loss'))
self.assertEqual(exporter_name, exporter_instance.name)
estimator_metadata['estimator'].train(
input_fn=estimator_metadata['train_input_fn'], steps=100)
eval_export_dir = exporter_instance.export(
estimator=estimator_metadata['estimator'],
export_path=temp_eval_export_dir,
checkpoint_path=None,
eval_result=None,
is_the_final_export=True)
# Check the eval graph.
eval_saved_model = load.EvalSavedModel(eval_export_dir)
example1 = self._makeExample(prediction=0.9, label=0.0).SerializeToString()
eval_saved_model.metrics_reset_update_get(example1)
metric_values = eval_saved_model.get_metric_values()
self.assertNotIn('average_loss', metric_values)
# Check the serving graph.
# TODO(b/124466113): Remove tf.compat.v2 once TF 2.0 is the default.
if hasattr(tf, 'compat.v2'):
imported = tf.compat.v2.saved_model.load(
eval_export_dir, tags=tf.saved_model.SERVING)
predictions = imported.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY](
inputs=tf.constant([example1.SerializeToString()]))
self.assertAllClose(predictions['outputs'], np.array([[0.9]]))
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "f6e30914b036ab414483f088ef4393bc",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 98,
"avg_line_length": 38.76875,
"alnum_prop": 0.6967596324359181,
"repo_name": "tensorflow/model-analysis",
"id": "cf0dd46955de98d600fa18387bd3511eec16f466",
"size": "6778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_model_analysis/eval_saved_model/exporter_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "125312"
},
{
"name": "JavaScript",
"bytes": "1415355"
},
{
"name": "Python",
"bytes": "3261298"
},
{
"name": "Shell",
"bytes": "813"
},
{
"name": "Starlark",
"bytes": "11590"
}
],
"symlink_target": ""
} |
"""
Kolibri Webpack hooks
---------------------
To manage assets, we use the webpack format. In order to have assets bundled in,
you should put them in ``yourapp/assets/src``.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import io
import json
import logging
import os
import re
import time
from abc import abstractproperty
from functools import partial
from django.conf import settings
from django.contrib.staticfiles.finders import find as find_staticfiles
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.cache import caches
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.six.moves.urllib.request import url2pathname
from django.utils.translation import get_language
from django.utils.translation import get_language_info
from django.utils.translation import to_locale
from pkg_resources import resource_filename
from six import text_type
from kolibri.plugins import hooks
# Use the cache specifically for built files
# Only reference the specific cache inside methods
# to allow this file to be imported without initiating
# Django settings configuration.
CACHE_NAMESPACE = "built_files"
IGNORE_PATTERNS = (re.compile(I) for I in [r".+\.hot-update.js", r".+\.map"])
class WebpackError(Exception):
def __init__(self, message, extra_info={}):
self.extra_info = extra_info
return Exception.__init__(self, message)
logger = logging.getLogger(__name__)
def filter_by_bidi(bidi, chunk):
if chunk["name"].split(".")[-1] != "css":
return True
if bidi:
return chunk["name"].split(".")[-2] == "rtl"
else:
return chunk["name"].split(".")[-2] != "rtl"
@hooks.define_hook
class WebpackBundleHook(hooks.KolibriHook):
"""
This is the abstract hook class that all plugins that wish to load any
assets into the front end must implement, in order for them to be part of
the webpack asset loading pipeline.
"""
# : You should set a human readable name that is unique within the
# : plugin in which this is defined.
@abstractproperty
def bundle_id(self):
pass
# : When being included for synchronous loading, should the source files
# : for this be inlined?
inline = False
# : A mapping of key to JSON serializable value.
# : This plugin_data will be bootstrapped into a global object on window
# : with a key of the unique_id as a Javascript object
plugin_data = {}
@classmethod
def get_by_unique_id(cls, unique_id):
"""
Fetch a registered hook instance by its unique_id
"""
hook = cls.get_hook(unique_id)
if hook:
return hook
raise WebpackError("No bundle with that name is loaded: '{}'".format(unique_id))
@cached_property
def _stats_file_content(self):
"""
:returns: A dict of the data contained in the JSON files which are
written by Webpack.
"""
cache_key = "json_stats_file_cache_{unique_id}".format(unique_id=self.unique_id)
stats_file_content = caches[CACHE_NAMESPACE].get(cache_key)
if not stats_file_content or getattr(settings, "DEVELOPER_MODE", False):
STATS_ERR = "Error accessing stats file '{}': {}"
try:
with io.open(self._stats_file, mode="r", encoding="utf-8") as f:
stats = json.load(f)
except IOError as e:
raise WebpackError(STATS_ERR.format(self._stats_file, e))
if getattr(settings, "DEVELOPER_MODE", False):
timeout = 0
while stats["status"] == "compiling":
time.sleep(0.1)
timeout += 0.1
try:
with io.open(self._stats_file, mode="r", encoding="utf-8") as f:
stats = json.load(f)
except IOError as e:
raise WebpackError(STATS_ERR.format(self._stats_file, e))
if timeout >= 5:
raise WebpackError("Compilation still in progress")
if stats["status"] == "error":
raise WebpackError("Compilation has errored", stats)
stats_file_content = {
"files": stats.get("chunks", {}).get(self.unique_id, []),
"hasMessages": stats.get("messages", False),
}
# Don't invalidate during runtime.
# Might need to change this if we move to a different cache backend.
caches[CACHE_NAMESPACE].set(cache_key, stats_file_content, None)
return stats_file_content
@property
def bundle(self):
"""
:returns: a generator yielding dict objects with properties of the built
asset, most notably its URL.
"""
for f in self._stats_file_content["files"]:
filename = f["name"]
if not getattr(settings, "DEVELOPER_MODE", False):
if any(list(regex.match(filename) for regex in IGNORE_PATTERNS)):
continue
relpath = "{0}/{1}".format(self.unique_id, filename)
if getattr(settings, "DEVELOPER_MODE", False):
try:
f["url"] = f["publicPath"]
except KeyError:
f["url"] = staticfiles_storage.url(relpath)
else:
f["url"] = staticfiles_storage.url(relpath)
yield f
@property
def unique_id(self):
"""
Returns a globally unique id for the frontend module bundle.
This is created by appending the locally unique bundle_id to the
Python module path. This should give a globally unique id for the module
and prevent accidental or malicious collisions.
"""
return "{}.{}".format(self._module_path, self.bundle_id)
@property
def _build_path(self):
"""
An auto-generated path to where the build-time files are stored,
containing information about the built bundles.
"""
return resource_filename(self._module_path, "build")
@property
def _stats_file(self):
"""
An auto-generated path to where the build-time files are stored,
containing information about the built bundles.
"""
return os.path.join(
self._build_path, "{plugin}_stats.json".format(plugin=self.unique_id)
)
@property
def _module_file_path(self):
"""
Returns the path of the class inheriting this classmethod.
"""
return os.path.dirname(self._build_path)
def frontend_message_file(self, lang_code):
message_file_name = "{name}-messages.json".format(name=self.unique_id)
for path in getattr(settings, "LOCALE_PATHS", []):
file_path = os.path.join(
path, to_locale(lang_code), "LC_MESSAGES", message_file_name
)
if os.path.exists(file_path):
return file_path
def frontend_messages(self):
lang_code = get_language()
cache_key = "json_message_file_cache_{unique_id}_{lang}".format(
unique_id=self.unique_id, lang=lang_code
)
message_file_content = caches[CACHE_NAMESPACE].get(cache_key, {})
if not message_file_content or getattr(settings, "DEVELOPER_MODE", False):
frontend_message_file = self.frontend_message_file(lang_code)
if frontend_message_file:
with io.open(frontend_message_file, mode="r", encoding="utf-8") as f:
message_file_content = json.load(f)
caches[CACHE_NAMESPACE].set(cache_key, message_file_content, None)
return message_file_content
def sorted_chunks(self):
bidi = get_language_info(get_language())["bidi"]
return sorted(
filter(partial(filter_by_bidi, bidi), self.bundle),
key=lambda x: x["name"].split(".")[-1],
)
def js_and_css_tags(self):
js_tag = '<script type="text/javascript" src="{url}"></script>'
css_tag = '<link type="text/css" href="{url}" rel="stylesheet"/>'
inline_js_tag = '<script type="text/javascript">{src}</script>'
inline_css_tag = "<style>{src}</style>"
# Sorted to load css before js
for chunk in self.sorted_chunks():
src = None
if chunk["name"].endswith(".js"):
if self.inline:
# During development, we do not write built files to disk
# Because of this, this call might return None
src = self.get_filecontent(chunk["url"])
if src is not None:
# If it is not None, then we can inline it
yield inline_js_tag.format(src=src)
else:
# If src is None, either this is not something we should be inlining
# or we are in development mode and need to fetch the file from the
# development server, not the disk
yield js_tag.format(url=chunk["url"])
elif chunk["name"].endswith(".css"):
if self.inline:
# During development, we do not write built files to disk
# Because of this, this call might return None
src = self.get_filecontent(chunk["url"])
if src is not None:
# If it is not None, then we can inline it
yield inline_css_tag.format(src=src)
else:
# If src is None, either this is not something we should be inlining
# or we are in development mode and need to fetch the file from the
# development server, not the disk
yield css_tag.format(url=chunk["url"])
def frontend_message_tag(self):
if self.frontend_messages():
return [
"""
<script>
{kolibri_name}.registerLanguageAssets('{bundle}', '{lang_code}', JSON.parse({messages}));
</script>""".format(
kolibri_name="kolibriCoreAppGlobal",
bundle=self.unique_id,
lang_code=get_language(),
messages=json.dumps(
json.dumps(
self.frontend_messages(),
separators=(",", ":"),
ensure_ascii=False,
)
),
)
]
else:
return []
def plugin_data_tag(self):
if self.plugin_data:
return [
"""
<script>
window['{name}'] = window['{name}'] || {{}};
window['{name}']['{bundle}'] = JSON.parse({plugin_data});
</script>
""".format(
name="kolibriPluginDataGlobal",
bundle=self.unique_id,
plugin_data=json.dumps(
json.dumps(
self.plugin_data, separators=(",", ":"), ensure_ascii=False
)
),
)
]
else:
return []
def get_basename(self, url):
"""
Takes full path to a static file (eg. "/static/css/style.css") and
returns path with storage's base url removed (eg. "css/style.css").
"""
base_url = staticfiles_storage.base_url
# Cast ``base_url`` to a string to allow it to be
# a string-alike object to e.g. add ``SCRIPT_NAME``
# WSGI param as a *path prefix* to the output URL.
# See https://code.djangoproject.com/ticket/25598.
base_url = text_type(base_url)
if not url.startswith(base_url):
return None
basename = url.replace(base_url, "", 1)
# drop the querystring, which is used for non-compressed cache-busting.
return basename.split("?", 1)[0]
def get_filename(self, basename):
"""
Returns full path to a file, for example:
get_filename('css/one.css') -> '/full/path/to/static/css/one.css'
"""
filename = None
# First try finding the file using the storage class.
# This is skipped in DEVELOPER_MODE mode as files might be outdated
# Or may not even be on disk.
if not getattr(settings, "DEVELOPER_MODE", False):
filename = staticfiles_storage.path(basename)
if not staticfiles_storage.exists(basename):
filename = None
# secondly try to find it with staticfiles
if not filename:
filename = find_staticfiles(url2pathname(basename))
return filename
def get_filecontent(self, url):
"""
Reads file contents using given `charset` and returns it as text.
"""
cache_key = "inline_static_file_content_{url}".format(url=url)
content = caches[CACHE_NAMESPACE].get(cache_key)
if content is None:
# Removes Byte Oorder Mark
charset = "utf-8-sig"
basename = self.get_basename(url)
if basename is None:
return None
filename = self.get_filename(basename)
if filename is None:
return None
with codecs.open(filename, "r", charset) as fd:
content = fd.read()
# Cache this forever, as URLs will update for new files
caches[CACHE_NAMESPACE].set(cache_key, content, None)
return content
def render_to_page_load_sync_html(self):
"""
Generates the appropriate script tags for the bundle, be they JS or CSS
files.
:param bundle_data: The data returned from
:return: HTML of script tags for insertion into a page.
"""
tags = (
self.plugin_data_tag()
+ self.frontend_message_tag()
+ list(self.js_and_css_tags())
)
return mark_safe("\n".join(tags))
def render_to_page_load_async_html(self):
"""
Generates script tag containing Javascript to register an
asynchronously loading Javascript FrontEnd plugin against the core
front-end Kolibri app. It passes in the events that would trigger
loading the plugin, both multi-time firing events (events) and one time
firing events (once).
It also passes in information about the methods that the events should
be delegated to once the plugin has loaded.
TODO: What do we do with the extension parameter here?
:returns: HTML of a script tag to insert into a page.
"""
urls = [chunk["url"] for chunk in self.sorted_chunks()]
tags = (
self.plugin_data_tag()
+ self.frontend_message_tag()
+ [
'<script>{kolibri_name}.registerKolibriModuleAsync("{bundle}", ["{urls}"]);</script>'.format(
kolibri_name="kolibriCoreAppGlobal",
bundle=self.unique_id,
urls='","'.join(urls),
)
]
)
return mark_safe("\n".join(tags))
class WebpackInclusionMixin(object):
@abstractproperty
def bundle_html(self):
pass
@abstractproperty
def bundle_class(self):
pass
@classmethod
def html(cls):
tags = []
for hook in cls.registered_hooks:
tags.append(hook.bundle_html)
return mark_safe("\n".join(tags))
class WebpackInclusionSyncMixin(hooks.KolibriHook, WebpackInclusionMixin):
@property
def bundle_html(self):
bundle = self.bundle_class()
html = bundle.render_to_page_load_sync_html()
return mark_safe(html)
class WebpackInclusionASyncMixin(hooks.KolibriHook, WebpackInclusionMixin):
@property
def bundle_html(self):
bundle = self.bundle_class()
html = bundle.render_to_page_load_async_html()
return mark_safe(html)
| {
"content_hash": "0d8a1e2c8dfa2809fd2d6beb0eb00b28",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 109,
"avg_line_length": 36.478841870824056,
"alnum_prop": 0.5685328774650467,
"repo_name": "indirectlylit/kolibri",
"id": "5eb34a64057c9293bd61fd91345e03a6bd371348",
"size": "16379",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "kolibri/core/webpack/hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2554964"
},
{
"name": "Dockerfile",
"bytes": "4114"
},
{
"name": "Gherkin",
"bytes": "365088"
},
{
"name": "HTML",
"bytes": "24294"
},
{
"name": "JavaScript",
"bytes": "1613945"
},
{
"name": "Makefile",
"bytes": "11953"
},
{
"name": "Python",
"bytes": "2860587"
},
{
"name": "SCSS",
"bytes": "5225"
},
{
"name": "Shell",
"bytes": "5245"
},
{
"name": "Vue",
"bytes": "1604613"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from data_dir import DATA_DIR
# Where the output boxplots will go:
AUTHORS_PDF = 'authors-box.pdf'
CREATORS_PDF = 'creators-box.pdf'
DATA_FILE = os.path.join(DATA_DIR,'authors.txt')
SEP = '&'
######################################################################
def read_authors(data_file):
''' Gather a list, each entry is (app, then no. of authors data) '''
authors = []
with open(data_file, 'r') as in_fd:
for line in in_fd:
if line.startswith('#'):
continue
line = line.strip()[:-len('\\\\')] # remove \\ at end
data = [col.strip() for col in line.split(SEP)]
authors.append((data[0], [int(n) for n in data[1:]]))
return authors
def plot_authors(data, xlabel, xticks, violin=False, save_as=None):
''' Violin plot or boxplot'''
fig, ax = plt.subplots(figsize=(8,2))
if violin:
ax.violinplot(data, vert=False, widths=0.7, showmeans=True,
showextrema=True, showmedians=True)
else:
bplot = ax.boxplot(data, widths=0.5, sym='rx', vert=False, patch_artist=True)
for path in bplot['boxes']:
path.set_facecolor('lightblue')
plt.rc('xtick', labelsize=8)
ax.set_xticks(xticks)
ax.xaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_xlabel(xlabel)
ax.set_yticks([]) # Nothing on vertical axis
if save_as:
plt.savefig(save_as, bbox_inches='tight')
# Display some data on the quartiles:
print('min={}, 10%={} Q1={}, median={}, Q3={}, max={}'\
.format(*np.percentile(data, [0, 10, 25, 50, 75, 100])))
else:
plt.show()
def plot_authors_hist(data, xlabel, xticks, save_as=None):
'''Histogram'''
fig, ax = plt.subplots(figsize=(8,4))
plt.hist(data, bins=xticks, facecolor='green', alpha=0.75)
plt.plot(data, np.zeros(data.shape), 'b+', ms=20)
ax.set_xlabel(xlabel)
ax.set_xticks(xticks)
ax.set_ylabel('No. of applications')
ax.set_yticks(np.arange(0, 25, 2))
if save_as:
plt.savefig(save_as, bbox_inches='tight')
else:
plt.show()
if __name__ == '__main__':
author_data = read_authors(DATA_FILE)
all_authors = np.array([d[1][0] for d in author_data])
all_creators = np.array([d[1][1] for d in author_data])
#plot_authors_hist(all_authors, 'No. of committers', np.arange(0, 1700, 100), 'authors-hist.pdf')
plot_authors(all_authors, 'No. of committers',
np.arange(0, 2000, 100), False, AUTHORS_PDF)
plot_authors(all_creators, 'No. of creators',
np.arange(0, 360, 20), False, CREATORS_PDF)
| {
"content_hash": "0ba514dcc680170fb458a49a6f974a4b",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 101,
"avg_line_length": 35.35443037974684,
"alnum_prop": 0.5760830648048694,
"repo_name": "MalloyPower/python-compliance",
"id": "5c10d4337e6eb36bd5f3e146587fa31d2ef8e826",
"size": "2911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis-code/plot_authors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5903"
},
{
"name": "Lex",
"bytes": "499347"
},
{
"name": "Makefile",
"bytes": "4761"
},
{
"name": "Python",
"bytes": "78573"
},
{
"name": "Shell",
"bytes": "10775"
},
{
"name": "Yacc",
"bytes": "456204"
}
],
"symlink_target": ""
} |
a = input("insert first number ")
b = input("insert second number")
if set(a) == set(b):
print (a, " and " , b , " have the P property!")
else:
print (a, " and " , b , " does NOT have the P property!")
| {
"content_hash": "ae0f78a18a37d09af3f78101460abef5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 58,
"avg_line_length": 25.75,
"alnum_prop": 0.5825242718446602,
"repo_name": "rusucosmin/courses",
"id": "5a0e3cb3c958d92857468b8984b8b8c47ae7eb56",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ubb/fop/lab01/ex12.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "66934"
},
{
"name": "Awk",
"bytes": "390"
},
{
"name": "C",
"bytes": "506519"
},
{
"name": "C#",
"bytes": "18080"
},
{
"name": "C++",
"bytes": "300998"
},
{
"name": "CMake",
"bytes": "1294"
},
{
"name": "CSS",
"bytes": "34492"
},
{
"name": "Common Lisp",
"bytes": "24300"
},
{
"name": "HTML",
"bytes": "1125773"
},
{
"name": "Hack",
"bytes": "1121"
},
{
"name": "Java",
"bytes": "158144"
},
{
"name": "JavaScript",
"bytes": "35305"
},
{
"name": "Jupyter Notebook",
"bytes": "20740224"
},
{
"name": "Lex",
"bytes": "3733"
},
{
"name": "M",
"bytes": "1745"
},
{
"name": "MATLAB",
"bytes": "26207"
},
{
"name": "Makefile",
"bytes": "398"
},
{
"name": "NewLisp",
"bytes": "197"
},
{
"name": "PHP",
"bytes": "56891"
},
{
"name": "Pascal",
"bytes": "672"
},
{
"name": "Prolog",
"bytes": "25141"
},
{
"name": "Python",
"bytes": "440544"
},
{
"name": "R",
"bytes": "3431"
},
{
"name": "Roff",
"bytes": "196"
},
{
"name": "Ruby",
"bytes": "27888"
},
{
"name": "Scala",
"bytes": "620018"
},
{
"name": "Shell",
"bytes": "25651"
},
{
"name": "TeX",
"bytes": "22510"
},
{
"name": "TypeScript",
"bytes": "14752"
},
{
"name": "XSLT",
"bytes": "1489"
},
{
"name": "Yacc",
"bytes": "14087"
}
],
"symlink_target": ""
} |
'''
The MIT License (MIT)
Copyright (c) <2014> <Mathias Lesche>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
contact: mat.lesche(at)gmail.com
'''
''' python modules '''
import logging
from argparse import ArgumentParser as ArgumentParser
from argparse import RawDescriptionHelpFormatter
from collections import defaultdict
from collections import OrderedDict
from os import sep
from types import NoneType
''' own modules '''
from main.io_module import check_Fileslist
from main.io_module import create_Directory
from main.io_module import check_Directorylist
from main.io_module import get_fileobject
from main.io_module import readFile_getList_withSep
from main.io_module import write_list
from main.main_logger import MainLogger
class Parser(object):
def __init__(self):
self.__parser = ArgumentParser(description="""
Script removes chromosomes and changes chromosome names in a fasta file.
Single chromosomes will be saved separately in a subdirectory of the output folder.
Write chromosomes which are going to be kept in a file (-c option) and sort them.
Next to the old name, write the new name (separated by {tab}).
It can either be
>1{tab}>chr1 or 1{tab}chr1
The new complete reference fasta will have the same order as in this file.
""", formatter_class=RawDescriptionHelpFormatter)
self.initialiseParser()
self.__classname = self.__class__.__name__
self.__log = False
self.__logger = ''
self.start_logging()
self.__fasta = ''
self.__chrom = ''
self.__output = ''
self.__prefix = ''
def initialiseParser(self):
self.__parser.add_argument('-f', '--fasta', type=str, metavar='FILE', dest='fasta', required=True, help='reference fasta file')
self.__parser.add_argument('-c', '--chromosome', type=str, metavar='FILE', dest='chrom', required=True, help='file with the old and new chromosome names (tab separated)')
self.__parser.add_argument('-o', '--output', type=str, metavar='DIRECTORY', dest='output', required=True, help='output directory')
self.__parser.add_argument('-p', '--prefix', type=str, metavar='STRING', dest='prefix', required=True, help='prefix for the new files')
def parse(self, inputstring = None):
if isinstance(inputstring, NoneType):
self.__options = self.__parser.parse_args()
else:
self.__options = self.__parser.parse_args(inputstring)
def getParser(self):
return self.__parser
def start_logging(self):
self.__log = True
self.__logger = logging.getLogger('pipeline.createReference')
def show_log(self, level, message):
if self.__log:
if level == 'debug':
self.__logger.debug(message)
elif level == 'info':
self.__logger.info(message)
elif level == 'warning':
self.__logger.warning(message)
elif level == 'error':
self.__logger.error(message)
elif level == 'critical':
self.__logger.critical(message)
else:
print message
def checkDirectory(self):
dirlist = (self.__options.output, )
good, bad = check_Directorylist(dirlist)
if len(good) == 0 or len(good) > 1:
for i in bad:
self.show_log('error', "check output directory (-o): {0}".format(i))
exit(2)
self.__output = good[0]
self.show_log('info', "output directory: {0}".format(self.__output))
def checkFiles(self):
good, bad = check_Fileslist((self.__options.fasta, self.__options.chrom))
if len(good) != 2:
for i in bad:
self.show_log('warning', "check file: {0}".format(i))
self.show_log('error', "check input for gtf and/or chromosome file")
exit(2)
self.__fasta = good[0]
self.show_log('info', "fasta file: {0}".format(self.__fasta))
self.__chrom = good[1]
self.show_log('info', "chromosome file: {0}".format(self.__chrom))
def main(self):
self.checkDirectory()
self.checkFiles()
self.__prefix = self.__options.prefix
self.show_log('info', "prefix: {0}".format(self.__prefix))
def get_chrom(self):
return self.__chrom
def get_fasta(self):
return self.__fasta
def get_output(self):
return self.__output
def get_prefix(self):
return self.__prefix
chrom = property(get_chrom, None, None, None)
fasta = property(get_fasta, None, None, None)
output = property(get_output, None, None, None)
prefix = property(get_prefix, None, None, None)
class Reference(object):
def __init__(self, fasta, chrom, output, prefix):
self.__log = False
self.__logger = ''
self.start_logging()
self.__fasta = fasta
self.__chrom = chrom
self.__chromdict = OrderedDict()
self.__fastadict = defaultdict(list)
self.__output = output
self.__prefix = prefix
def start_logging(self):
self.__log = True
self.__logger = logging.getLogger('pipeline.createReference')
def show_log(self, level, message):
if self.__log:
if level == 'debug':
self.__logger.debug(message)
elif level == 'info':
self.__logger.info(message)
elif level == 'warning':
self.__logger.warning(message)
elif level == 'error':
self.__logger.error(message)
elif level == 'critical':
self.__logger.critical(message)
else:
print message
def prepare_chromdict(self):
self.show_log('info', 'chromosome which are kept and names are changed:')
for entry in readFile_getList_withSep(self.__chrom, '\t'):
self.show_log('info', '{0} -> {1}'.format(entry[0], entry[1]))
if entry[0][0] != '>':
entry[0] = '>{0}'.format(entry[0])
if entry[1][0] != '>':
entry[1] = '>{0}'.format(entry[1])
self.__chromdict[entry[0]] = entry[1]
def process_Fasta(self):
self.show_log('info', 'Reading the fasta file')
chromremove = set()
linecount = 0
chromid = ''
with get_fileobject(self.__fasta, 'r') as filein:
for line in filein:
if line.startswith('>'):
try:
chromid = self.__chromdict[line.rstrip('\n')]
except KeyError:
chromid = ''
chromremove.add(line.rstrip('\n'))
linecount += 1
elif chromid != '':
self.__fastadict[chromid].append(line)
else:
linecount += 1
self.show_log('info', 'chromosomes removed: {0}'.format(len(chromremove)))
self.show_log('info', 'lines removed: {0}'.format(linecount))
def writeFasta(self):
self.show_log('info', 'Writing the new fasta files')
chromfolder = '{0}chromosomes{1}'.format(self.__output, sep)
if not create_Directory(chromfolder):
self.show_log('error', "Can't create directory: {0}".format(chromfolder))
exit(2)
fastafile = '{0}{1}.fa'.format(self.__output, self.__prefix)
self.show_log('info', 'Fasta file: {0}'.format(fastafile))
self.show_log('info', 'Chromosome folder: {0}'.format(chromfolder))
for chrom in self.__chromdict.viewvalues():
if len(self.__fastadict[chrom]) != 0:
chromfilename = '{0}{1}.fa'.format(chromfolder, chrom.lstrip('>'))
self.show_log('info', "Writing '{0}'".format(chromfilename))
write_list([chrom+'\n'] + self.__fastadict[chrom], chromfilename)
write_list([chrom+'\n'] + self.__fastadict[chrom], fastafile, 'a')
if __name__ == '__main__':
mainlog = MainLogger('', False)
parseinst = Parser()
parseinst.parse()
parseinst.main()
refinst = Reference(parseinst.fasta, parseinst.chrom, parseinst.output, parseinst.prefix)
refinst.prepare_chromdict()
refinst.process_Fasta()
refinst.writeFasta() | {
"content_hash": "df24fedeb12f5f418f3ff70fe0bb48fd",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 174,
"avg_line_length": 35.16803278688525,
"alnum_prop": 0.6548187856893136,
"repo_name": "mlesche/deep_seq_pipeline",
"id": "2828564b92f78559096608a1ead53c3677516283",
"size": "8603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_seq_pipeline/src/general_scripts/createReference.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "398289"
},
{
"name": "Shell",
"bytes": "9011"
}
],
"symlink_target": ""
} |
""" Classification using the WEKA experimenter
A WEKA classification process consists of executing a certain
WEKA experiment. The results of all these processes are stored in a temporary
directory and after the completion of all processes of the operation,
the consolidate method of the *WekaClassificationOperation* is executed and the
results are merged into a consistent representation of the operations result
collection.
http://www.cs.waikato.ac.nz/ml/weka/
"""
import sys
import os
import glob
import yaml
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
import processing
else:
import multiprocessing as processing
import pySPACE
from pySPACE.missions.operations.base import Operation, Process
from pySPACE.resources.dataset_defs.base import BaseDataset
from pySPACE.resources.dataset_defs.performance_result import PerformanceResultSummary
from pySPACE.tools.filesystem import create_directory
class WekaClassificationOperation(Operation):
""" Operation for classification using Weka experimenter
A Weka classification operation consists of a set of WEKA processes. Each of
these processes consists of executing a certain WEKA experiment.
The results of this operation are collected using the
consolidate method that produces a consistent representation
of the result collections.
"""
def __init__(self, processes, operation_spec, result_directory,
number_processes, create_process=None):
super(WekaClassificationOperation, self).__init__(processes,
operation_spec,
result_directory)
self.create_process = create_process
self.number_processes = number_processes
@classmethod
def create(cls, operation_spec, result_directory, debug=False, input_paths=[]):
"""
A factory method that creates an WEKA operation based on the
information given in the operation specification operation_spec
"""
assert(operation_spec["type"] == "weka_classification")
# Determine all parameter combinations that should be tested
parameter_settings = cls._get_parameter_space(operation_spec)
# Read the command template from a file
template_file = open(os.path.join(pySPACE.configuration.spec_dir,
"operations",
"weka_templates",
operation_spec["template"]),
'r')
command_template = template_file.read()
template_file.close()
# number of processes
if "runs" in operation_spec:
number_processes = len(input_paths) * len(parameter_settings) * \
operation_spec["runs"]
else: # approximate the number of processes
runs = []
for dataset_dir in input_paths:
collection = BaseDataset.load(dataset_dir)
runs.append(collection.meta_data["runs"])
runs = max(runs)
number_processes = len(input_paths) * len(parameter_settings) * \
runs
if debug == True:
# To better debug creation of processes we don't limit the queue
# and create all processes before executing them
processes = processing.Queue()
cls._createProcesses(processes, result_directory, operation_spec,
parameter_settings, input_paths,
command_template)
# create and return the weka operation object
return cls(processes, operation_spec, result_directory,
number_processes)
else:
# Create all processes by calling a recursive helper method in
# another thread so that already created processes can be executed in
# parallel. Therefore a queue is used which size is maximized to
# guarantee that not to much objects are created (because this costs
# memory). However, the actual number of 100 is arbitrary and might
# be reviewed.
processes = processing.Queue(100)
create_process = processing.Process(target=cls._createProcesses,
args=( processes, result_directory, operation_spec,
parameter_settings, input_paths,
command_template))
create_process.start()
# create and return the weka operation object
return cls(processes, operation_spec, result_directory,
number_processes, create_process)
@classmethod
def _createProcesses(cls, processes, result_directory, operation_spec,
parameter_settings, input_collections, command_template):
# For each combination of classifier, input-collection and
# run number, create one WEKA_process
for dataset_dir in input_collections:
collection = BaseDataset.load(dataset_dir)
# Determine the number of iterations and splits to be used
iterations = collection.meta_data["runs"]
splits = collection.meta_data["splits"]
if "runs" in operation_spec:
assert(iterations in [1, operation_spec["runs"]])
iterations = operation_spec["runs"]
if "cv_folds" in operation_spec:
assert(splits in [1, operation_spec["cv_folds"]])
splits = operation_spec["cv_folds"]
for parametrization in parameter_settings:
for run_number in range(iterations):
process = WEKAClassificationProcess(dataset_dir,
command_template,
parametrization,
splits,
run_number,
result_directory)
processes.put(process)
# give executing process the sign that creation is now finished
processes.put(False)
def consolidate(self):
"""
Consolidates the results obtained by the single WEKA processes into
a consistent structure of collections that are stored on the
file system.
"""
self._log("Consolidating results ...")
# We load and store the results once into a PerformanceResultSummary.
# From_multiple csv does the necessary consolidation
# and mixes and parses the table.
self._log("Reading intermediate results...")
result_collection = PerformanceResultSummary(dataset_dir=self.result_directory)
self._log("done")
self._log("Storing result collection")
result_collection.store(self.result_directory)
self._log("done")
# Write the specification of this operation
# to the result directory in order to make later
# analysis of results more easy
source_operation_file = open(os.path.join(self.result_directory,
"source_operation.yaml"), 'w')
yaml.dump(self.operation_spec, source_operation_file)
source_operation_file.close()
class WEKAClassificationProcess(Process):
""" Process for classification using Weka
A WEKA classification process consists of executing a certain WEKA
experiment. This experiment is defined by a template in which certain aspects
can be configured, for instance:
* which classifier is used
* which data set is processed
* how many cross validation folds are used etc.
The results of the WEKA experiment are written to the file system and
later on collected and consolidated during the consolidation of
the *WekaClassificationOperation*.
"""
unique_id = 0
def __init__(self,
dataset_dir,
command_template,
parametrization,
cv_folds,
run_number,
operation_result_dir):
super(WEKAClassificationProcess, self).__init__()
# Load the abbreviations
abbreviations_file = open(os.path.join(pySPACE.configuration.spec_dir,
'operations/weka_templates',
'abbreviations.yaml'), 'r')
self.abbreviations = yaml.load(abbreviations_file)
abbreviations_file.close()
# Determine the directory in which the process' results
# are stored
self.result_directory = operation_result_dir
# Create collection
collection = BaseDataset.load(dataset_dir)
# The parametrization that is independent of the collection type
# and the specific weka command template that is executed
self.params = {"collection_name": dataset_dir.strip(os.sep).split(os.sep)[-1],
"run_number": run_number,
"cv_folds": cv_folds,
"weka_class_path": pySPACE.configuration.weka_class_path,
"temp_results": self.result_directory,
"unique_id": WEKAClassificationProcess.unique_id}
# Collection dependent parameters
if not collection.meta_data["train_test"] \
and collection.meta_data["splits"] == 1:
raise NotImplementedError()
else:
# The pattern of the train and test files generated by crossvalidation
data_pattern = os.path.join(dataset_dir,
collection.meta_data["data_pattern"])
# One example arff file in which WEKa can look up relation name etc.
sample_dataset = data_pattern.replace("_run", "_run0")\
.replace("_sp_","_sp0_")\
.replace("_tt","_train")
self.params.update({"sample_dataset": sample_dataset,
"data_pattern": data_pattern})
# Add custom parameters for the weka command template
for parameter_name, parameter_value in parametrization.iteritems():
self.params[parameter_name + "_abbr"] = parameter_value
# Auto-expand abbreviations
if parameter_value in self.abbreviations:
parameter_value = self.abbreviations[parameter_value]
elif parameter_name == 'classifier':
import warnings
warnings.warn("Did not find classifier abbreviation %s. "
" Expecting full name." % parameter_value)
self.params[parameter_name] = parameter_value
# Build the WEKA command by repeatedly replacing all placeholders in
# the template
while True:
instantiated_template = command_template % self.params
if instantiated_template == command_template:
# All placeholders replace
self.weka_command = instantiated_template
break
else:
# We have to continue since we are not converged
command_template = instantiated_template
self.handler_class = None
WEKAClassificationProcess.unique_id += 1
def __call__(self):
"""
Executes this process on the respective modality
"""
# Restore configuration
pySPACE.configuration = self.configuration
############## Prepare benchmarking ##############
super(WEKAClassificationProcess, self).pre_benchmarking()
# Execute the java command in this OS process
os.system(self.weka_command)
############## Clean up after benchmarking ##############
super(WEKAClassificationProcess, self).post_benchmarking()
| {
"content_hash": "8d19c760f5f1699a9654eebfdc61cb79",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 87,
"avg_line_length": 47.17175572519084,
"alnum_prop": 0.5815195404158913,
"repo_name": "pyspace/pyspace",
"id": "6176bd8116389eb8882009b3a84d602ec381de40",
"size": "12359",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pySPACE/missions/operations/weka_classification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11128"
},
{
"name": "C++",
"bytes": "309606"
},
{
"name": "Matlab",
"bytes": "3768"
},
{
"name": "Python",
"bytes": "3160853"
},
{
"name": "QMake",
"bytes": "3217"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
} |
print "I'm a python"
| {
"content_hash": "d9a1e41a9032ec0d09d77c830eafd3d9",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 20,
"avg_line_length": 21,
"alnum_prop": 0.6666666666666666,
"repo_name": "rcaught/todos_export",
"id": "8ae20230dfa90c469fb758a8c4e3e87a4bf3877e",
"size": "41",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spec/resources/python_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41"
},
{
"name": "Ruby",
"bytes": "18663"
}
],
"symlink_target": ""
} |
from subprocess import run, PIPE
from west import log
# Given a path to the applicable C compiler, a C source file, and the
# corresponding TargetCompileGroup, determine which include files would
# be used.
# Arguments:
# 1) path to applicable C compiler
# 2) C source file being analyzed
# 3) TargetCompileGroup for the current target
# Returns: list of paths to include files, or [] on error or empty findings.
def getCIncludes(compilerPath, srcFile, tcg):
log.dbg(f" - getting includes for {srcFile}")
# prepare fragments
fragments = [fr for fr in tcg.compileCommandFragments if fr.strip() != ""]
# prepare include arguments
includes = ["-I" + incl.path for incl in tcg.includes]
# prepare defines
defines = ["-D" + d.define for d in tcg.defines]
# prepare command invocation
cmd = [compilerPath, "-E", "-H"] + fragments + includes + defines + [srcFile]
cp = run(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
if cp.returncode != 0:
log.dbg(f" - calling {compilerPath} failed with error code {cp.returncode}")
return []
else:
# response will be in cp.stderr, not cp.stdout
return extractIncludes(cp.stderr)
# Parse the response from the CC -E -H call, to extract the include file paths
def extractIncludes(resp):
includes = set()
# lines we want will start with one or more periods, followed by
# a space and then the include file path, e.g.:
# .... /home/steve/programming/zephyr/zephyrproject/zephyr/include/kernel.h
# the number of periods indicates the depth of nesting (for transitively-
# included files), but here we aren't going to care about that. We'll
# treat everything as tied to the corresponding source file.
# once we hit the line "Multiple include guards may be useful for:",
# we're done; ignore everything after that
for rline in resp.splitlines():
if rline.startswith("Multiple include guards"):
break
if rline[0] == ".":
sline = rline.split(" ", maxsplit=1)
if len(sline) != 2:
continue
includes.add(sline[1])
includesList = list(includes)
includesList.sort()
return includesList
| {
"content_hash": "aa8bf5421ac97eba0cc9b5a0780a2247",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 87,
"avg_line_length": 36.868852459016395,
"alnum_prop": 0.666073810582481,
"repo_name": "finikorg/zephyr",
"id": "c1060e661786d201831235944aee96438aa4ece8",
"size": "2332",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "scripts/west_commands/zspdx/getincludes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "445128"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "44321001"
},
{
"name": "C++",
"bytes": "29292"
},
{
"name": "CMake",
"bytes": "1369918"
},
{
"name": "Cadence",
"bytes": "1501"
},
{
"name": "EmberScript",
"bytes": "997"
},
{
"name": "Forth",
"bytes": "1648"
},
{
"name": "GDB",
"bytes": "1285"
},
{
"name": "Haskell",
"bytes": "722"
},
{
"name": "JetBrains MPS",
"bytes": "3152"
},
{
"name": "PLSQL",
"bytes": "281"
},
{
"name": "Perl",
"bytes": "215338"
},
{
"name": "Python",
"bytes": "2251570"
},
{
"name": "Shell",
"bytes": "171294"
},
{
"name": "SmPL",
"bytes": "36840"
},
{
"name": "Smalltalk",
"bytes": "1885"
},
{
"name": "SourcePawn",
"bytes": "14890"
},
{
"name": "Tcl",
"bytes": "5838"
},
{
"name": "VBA",
"bytes": "294"
},
{
"name": "Verilog",
"bytes": "6394"
}
],
"symlink_target": ""
} |
import json
import logging
import os
import time
from autobahn.twisted import websocket
from universe.twisty import reactor
from twisted.internet import endpoints
logger = logging.getLogger(__name__)
class RewardServerClient(websocket.WebSocketClientProtocol, object):
def __init__(self):
super(RewardServerClient, self).__init__()
self.id = -1
self.proxy_server = None
self._connected = False
def onConnect(self, request):
self.id = self.factory.proxy_server.id
logger.info('[RewardProxyClient] [%d] Connected to rewarder', self.id)
self.proxy_server = self.factory.proxy_server
self._connected = True
buffered = self.proxy_server.pop_buffer()
logger.info('[RewardProxyClient] [%d] Flushing %d buffered messages', self.id, len(buffered))
for msg in buffered:
self.sendMessage(msg)
def onOpen(self):
logger.info('[RewardProxyClient] [%d] Rewarder websocket connection established', self.id)
def onMessage(self, msg, isBinary):
logger.debug('[RewardProxyClient] [%d] Received message from server: %s', self.id, msg)
self.proxy_server.sendMessage(msg)
# Record the message
self.proxy_server.record_message(msg.decode('utf-8'), from_rewarder=True)
# # Process the message for recording
# method, headers, body = unpack_message(msg)
#
# if method == "env.reward":
# # {"body":{"info":{"episode":0},"reward":0.0,"done":false},
# # "headers":{"sent_at":1473126129.231828928,"message_id":207},
# # "method":"env.reward"}
def onClose(self, wasClean, code, reason):
logger.info('[RewardProxyClient] [%d] Rewarder websocket connection closed: %s', self.id, reason)
def close(self):
logger.info('[RewardProxyClient] [%d] Closing connection', self.id)
self.transport.loseConnection()
class RewardProxyServer(websocket.WebSocketServerProtocol, object):
_next_id = 0
_n_open_files = 0
@classmethod
def next_id(cls):
id = cls._next_id
cls._next_id += 1
return id
def __init__(self):
super(RewardProxyServer, self).__init__()
self.id = self.next_id()
self.client = None
self.file = None # We do not open open the file until we have established an end-to-end connection
self.buffered = []
self._closed = False
def pop_buffer(self):
"""Called by the client once it's ready to start sending messages.
"""
buffered = self.buffered
self.buffered = []
return buffered
def begin_recording(self):
"""
Open the file and write the metadata header to describe this recording. Called after we establish an end-to-end connection
This uses Version 1 of our protocol
Version 0 can be seen here: https://github.com/openai/universe/blob/f85a7779c3847fa86ec7bb513a1da0d3158dda78/bin/recording_agent.py
"""
logger.info("[RewardProxyServer] [%d] Starting recording", self.id)
if self._closed:
logger.error(
"[RewardProxyServer] [%d] Attempted to start writing although client connection is already closed. Aborting", self.id)
self.close()
return
if self._n_open_files != 0:
logger.error("[RewardProxyServer] [%d] WARNING: n open rewards files = %s. This is unexpected. Dropping connection.", self.id, self._n_open_files)
self.close()
return
logfile_path = os.path.join(self.factory.logfile_dir, 'rewards.demo')
logger.info('Recording to {}'.format(logfile_path))
self.file = open(logfile_path, 'w')
self._n_open_files += 1
logger.info("[RewardProxyServer] [%d] n open rewards files incremented: %s", self.id, self._n_open_files)
self.file.write(json.dumps({
'version': 1,
'_debug_version': '0.0.1', # Give this an internal version for debugging corrupt reward.demo files # TODO, pull this from setup.py or the host docker image
}))
self.file.write('\n')
self.file.flush()
logger.info("[RewardProxyServer] [%d] Wrote version number", self.id)
def onConnect(self, request):
logger.info('[RewardProxyServer] [%d] Client connecting: %s', self.id, request.peer)
self._request = request
def onOpen(self):
logger.info("[RewardProxyServer] [%d] Websocket connection established", self.id)
self.connect_upstream()
def connect_upstream(self, tries=1, max_attempts=7):
if self._closed:
logger.info("[RewardProxyServer] [%d] Attempted to connect upstream although client connection is already closed. Aborting",
self.id)
return
remote = getattr(self.factory, 'rewarder_address', 'localhost:15900')
endpoint = endpoints.clientFromString(reactor, 'tcp:' + remote)
client_factory = websocket.WebSocketClientFactory('ws://' + remote)
headers = {'authorization': self._request.headers['authorization']}
if self._request.headers.get('openai-observer'):
headers['openai-observer'] = self._request.headers.get('openai-observer')
client_factory.headers = headers
client_factory.protocol = RewardServerClient
client_factory.proxy_server = self
client_factory.endpoint = endpoint
logger.info("[RewardProxyServer] [%d] Connecting to upstream %s (try %d/%d)", self.id, remote, tries, max_attempts)
def _connect_callback(client):
logger.info('[RewardProxyServer] [%d] Upstream connection %s established', self.id, remote)
self.client = client
if self.factory.logfile_dir:
self.begin_recording()
def _connect_errback(reason):
if tries < max_attempts:
# Somewhat arbitrary exponential backoff: should be
# pretty rare, and indicate that we're just starting
# up.
delay = 1.5 ** tries
logger.info('[RewardProxyServer] [%d] Connection to %s failed: %s. Try %d/%d; going to retry in %fs', self.id, remote, reason, tries, max_attempts, delay)
reactor.callLater(
delay, self.connect_upstream,
tries=tries+1, max_attempts=max_attempts)
else:
logger.error('[RewardProxyServer] [%d] Connection to %s failed: %s. Completed %d/%d atttempts; disconnecting.', self.id, remote, reason, tries, max_attempts)
self.transport.loseConnection()
endpoint.connect(client_factory).addCallbacks(_connect_callback, _connect_errback)
def close(self):
logger.info('[RewardProxyServer] [%d] Closing...', self.id)
self.transport.loseConnection()
def onClose(self, wasClean, code, reason):
logger.info('[RewardProxyServer] [%d] Client connection closed: %s', self.id, reason)
if self.client:
self.client.close()
if self.file:
self.file.close()
self._closed = True
def onMessage(self, msg, binary):
logger.debug('[RewardProxyServer] [%d] Received message from client: %s', self.id, msg)
# Pass the message on to the client
if self.client and self.client._connected:
self.client.sendMessage(msg)
else:
self.buffered.append(msg)
self.record_message(msg.decode('utf-8'), from_rewarder=False)
def record_message(self, msg, from_rewarder):
"""Record a message to our rewards.demo file if it is has been opened"""
if self.file:
# Include an authoritative timestamp (because the `sent_at` from the server is likely to be different
timestamped_message = {
'timestamp': time.time(),
'message': json.loads(msg),
'from_rewarder': from_rewarder,
}
self.file.write(json.dumps(timestamped_message))
self.file.write('\n')
self.file.flush()
| {
"content_hash": "34bd68d8331bff21bde853c92950208d",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 173,
"avg_line_length": 40.26600985221675,
"alnum_prop": 0.6181795938341081,
"repo_name": "openai/universe",
"id": "472c702ffda91a62d2363af22f5ebc6a4880c8a4",
"size": "8174",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "universe/rewarder/reward_proxy_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "828"
},
{
"name": "Python",
"bytes": "546632"
}
],
"symlink_target": ""
} |
'''
Monitor the Windows Event Log
'''
# stdlib
import calendar
from datetime import datetime, timedelta
# project
from checks.wmi_check import WinWMICheck, to_time, from_time
from utils.containers import hash_mutable
from utils.timeout import TimeoutException
SOURCE_TYPE_NAME = 'event viewer'
EVENT_TYPE = 'win32_log_event'
class Win32EventLogWMI(WinWMICheck):
EVENT_PROPERTIES = [
"Message",
"SourceName",
"TimeGenerated",
"Type",
"User",
"InsertionStrings",
"EventCode"
]
NAMESPACE = "root\\CIMV2"
CLASS = "Win32_NTLogEvent"
def __init__(self, name, init_config, agentConfig, instances=None):
WinWMICheck.__init__(self, name, init_config, agentConfig,
instances=instances)
self.last_ts = {}
def check(self, instance):
# Connect to the WMI provider
host = instance.get('host', "localhost")
username = instance.get('username', "")
password = instance.get('password', "")
instance_tags = instance.get('tags', [])
notify = instance.get('notify', [])
user = instance.get('user')
ltypes = instance.get('type', [])
source_names = instance.get('source_name', [])
log_files = instance.get('log_file', [])
event_ids = instance.get('event_id', [])
message_filters = instance.get('message_filters', [])
instance_hash = hash_mutable(instance)
instance_key = self._get_instance_key(host, self.NAMESPACE, self.CLASS, instance_hash)
# Store the last timestamp by instance
if instance_key not in self.last_ts:
self.last_ts[instance_key] = datetime.utcnow()
return
query = {}
filters = []
last_ts = self.last_ts[instance_key]
query['TimeGenerated'] = ('>=', self._dt_to_wmi(last_ts))
if user:
query['User'] = ('=', user)
if ltypes:
query['Type'] = []
for ltype in ltypes:
query['Type'].append(('=', ltype))
if source_names:
query['SourceName'] = []
for source_name in source_names:
query['SourceName'].append(('=', source_name))
if log_files:
query['LogFile'] = []
for log_file in log_files:
query['LogFile'].append(('=', log_file))
if event_ids:
query['EventCode'] = []
for event_id in event_ids:
query['EventCode'].append(('=', event_id))
if message_filters:
query['NOT Message'] = []
query['Message'] = []
for filt in message_filters:
if filt[0] == '-':
query['NOT Message'].append(('LIKE', filt[1:]))
else:
query['Message'].append(('LIKE', filt))
filters.append(query)
wmi_sampler = self._get_wmi_sampler(
instance_key,
self.CLASS, self.EVENT_PROPERTIES,
filters=filters,
host=host, namespace=self.NAMESPACE,
username=username, password=password,
and_props=['Message']
)
try:
wmi_sampler.sample()
except TimeoutException:
self.log.warning(
u"[Win32EventLog] WMI query timed out."
u" class={wmi_class} - properties={wmi_properties} -"
u" filters={filters} - tags={tags}".format(
wmi_class=self.CLASS, wmi_properties=self.EVENT_PROPERTIES,
filters=filters, tags=instance_tags
)
)
else:
for ev in wmi_sampler:
# for local events we dont need to specify a hostname
hostname = None if (host == "localhost" or host == ".") else host
log_ev = LogEvent(ev, hostname, instance_tags, notify,
self.init_config.get('tag_event_id', False))
# Since WQL only compares on the date and NOT the time, we have to
# do a secondary check to make sure events are after the last
# timestamp
if log_ev.is_after(last_ts):
self.event(log_ev.to_event_dict())
else:
self.log.debug('Skipping event after %s. ts=%s' % (last_ts, log_ev.timestamp))
# Update the last time checked
self.last_ts[instance_key] = datetime.utcnow()
def _dt_to_wmi(self, dt):
''' A wrapper around wmi.from_time to get a WMI-formatted time from a
time struct.
'''
return from_time(year=dt.year, month=dt.month, day=dt.day,
hours=dt.hour, minutes=dt.minute,
seconds=dt.second, microseconds=0, timezone=0)
class LogEvent(object):
def __init__(self, ev, hostname, tags, notify_list, tag_event_id):
self.event = ev
self.hostname = hostname
self.tags = self._tags(tags, ev.EventCode) if tag_event_id else tags
self.notify_list = notify_list
self.timestamp = self._wmi_to_ts(self.event['TimeGenerated'])
@property
def _msg_title(self):
return '{logfile}/{source}'.format(
logfile=self.event['Logfile'],
source=self.event['SourceName'])
@property
def _msg_text(self):
msg_text = ""
if 'Message' in self.event:
msg_text = "{message}\n".format(message=self.event['Message'])
elif 'InsertionStrings' in self.event:
msg_text = "\n".join([i_str for i_str in self.event['InsertionStrings']
if i_str.strip()])
if self.notify_list:
msg_text += "\n{notify_list}".format(
notify_list=' '.join([" @" + n for n in self.notify_list]))
return msg_text
@property
def _alert_type(self):
event_type = self.event['Type']
# Convert to a Datadog alert type
if event_type == 'Warning':
return 'warning'
elif event_type == 'Error':
return 'error'
return 'info'
@property
def _aggregation_key(self):
return self.event['SourceName']
def to_event_dict(self):
event_dict = {
'timestamp': self.timestamp,
'event_type': EVENT_TYPE,
'msg_title': self._msg_title,
'msg_text': self._msg_text.strip(),
'aggregation_key': self._aggregation_key,
'alert_type': self._alert_type,
'source_type_name': SOURCE_TYPE_NAME,
'tags': self.tags
}
if self.hostname:
event_dict['host'] = self.hostname
return event_dict
def is_after(self, ts):
''' Compare this event's timestamp to a give timestamp. '''
if self.timestamp >= int(calendar.timegm(ts.timetuple())):
return True
return False
def _wmi_to_ts(self, wmi_ts):
''' Convert a wmi formatted timestamp into an epoch.
'''
year, month, day, hour, minute, second, microsecond, tz = to_time(wmi_ts)
tz_delta = timedelta(minutes=int(tz))
if '+' in wmi_ts:
tz_delta = - tz_delta
dt = datetime(year=year, month=month, day=day, hour=hour, minute=minute,
second=second, microsecond=microsecond) + tz_delta
return int(calendar.timegm(dt.timetuple()))
def _tags(self, tags, event_code):
''' Inject additional tags into the list already supplied to LogEvent.
'''
tags_list = []
if tags is not None:
tags_list += list(tags)
tags_list.append("event_id:{event_id}".format(event_id=event_code))
return tags_list
| {
"content_hash": "91784b26afac1caa9b6d0fd24847b5ba",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 98,
"avg_line_length": 35.265765765765764,
"alnum_prop": 0.540937539915698,
"repo_name": "mderomph-coolblue/dd-agent",
"id": "3264bcd5beaa6323bf972b1e57547a53e70d13c2",
"size": "7829",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "checks.d/win32_event_log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "8758"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "2051024"
},
{
"name": "Ruby",
"bytes": "98141"
},
{
"name": "Shell",
"bytes": "54709"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
} |
"""Detection Engine used for detection tasks."""
from collections import Counter
from collections import defaultdict
from edgetpu.basic.basic_engine import BasicEngine
import numpy as np
from PIL import Image
class EmbeddingEngine(BasicEngine):
"""Engine used to obtain embeddings from headless mobilenets."""
def __init__(self, model_path):
"""Creates a EmbeddingEngine with given model and labels.
Args:
model_path: String, path to TF-Lite Flatbuffer file.
Raises:
ValueError: An error occurred when model output is invalid.
"""
BasicEngine.__init__(self, model_path)
output_tensors_sizes = self.get_all_output_tensors_sizes()
if output_tensors_sizes.size != 1:
raise ValueError(
('Dectection model should have only 1 output tensor!'
'This model has {}.'.format(output_tensors_sizes.size)))
def DetectWithImage(self, img):
"""Calculates embedding from an image.
Args:
img: PIL image object.
Returns:
Embedding vector as np.float32
Raises:
RuntimeError: when model's input tensor format is invalid.
"""
input_tensor_shape = self.get_input_tensor_shape()
if (input_tensor_shape.size != 4 or input_tensor_shape[3] != 3 or
input_tensor_shape[0] != 1):
raise RuntimeError(
'Invalid input tensor shape! Expected: [1, height, width, 3]')
required_image_size = (input_tensor_shape[2], input_tensor_shape[1])
with img.resize(required_image_size, Image.NEAREST) as resized_img:
input_tensor = np.asarray(resized_img).flatten()
return self.RunInference(input_tensor)[1]
class KNNEmbeddingEngine(EmbeddingEngine):
"""Extends embedding engine to also provide kNearest Neighbor detection.
This class maintains an in-memory store of embeddings and provides
functions to find k nearest neighbors against a query emedding.
"""
def __init__(self, model_path, kNN=3):
"""Creates a EmbeddingEngine with given model and labels.
Args:
model_path: String, path to TF-Lite Flatbuffer file.
Raises:
ValueError: An error occurred when model output is invalid.
"""
EmbeddingEngine.__init__(self, model_path)
self.clear()
self._kNN = kNN
def clear(self):
"""Clear the store: forgets all stored embeddings."""
self._labels = []
self._embedding_map = defaultdict(list)
self._embeddings = None
def addEmbedding(self, emb, label):
"""Add an embedding vector to the store."""
normal = emb/np.sqrt((emb**2).sum()) # Normalize the vector
self._embedding_map[label].append(normal) # Add to store, under "label"
# Expand labelled blocks of embeddings for when we have less than kNN
# examples. Otherwise blocks that have more examples unfairly win.
emb_blocks = []
self._labels = [] # We'll be reconstructing the list of labels
for label, embeds in self._embedding_map.items():
emb_block = np.stack(embeds)
if emb_block.shape[0] < self._kNN:
emb_block = np.pad(emb_block,
[(0,self._kNN - emb_block.shape[0]), (0,0)],
mode="reflect")
emb_blocks.append(emb_block)
self._labels.extend([label]*emb_block.shape[0])
self._embeddings = np.concatenate(emb_blocks, axis=0)
def kNNEmbedding(self, query_emb):
"""Returns the self._kNN nearest neighbors to a query embedding."""
# If we have nothing stored, the answer is None
if self._embeddings is None: return None
# Normalize query embedding
query_emb = query_emb/np.sqrt((query_emb**2).sum())
# We want a cosine distance ifrom query to each stored embedding. A matrix
# multiplication can do this in one step, resulting in a vector of
# distances.
dists = np.matmul(self._embeddings, query_emb)
# If we have less than self._kNN distances we can only return that many.
kNN = min(len(dists), self._kNN)
# Get the N largest cosine similarities (larger means closer).
n_argmax = np.argpartition(dists, -kNN)[-kNN:]
# Get the corresponding labels associated with each distance.
labels = [self._labels[i] for i in n_argmax]
# Return the most common label over all self._kNN nearest neighbors.
most_common_label = Counter(labels).most_common(1)[0][0]
return most_common_label
def exampleCount(self):
"""Just returns the size of the embedding store."""
return sum(len(v) for v in self._embedding_map.values())
| {
"content_hash": "7acbe909cce7bf304cb7d2a252c693a7",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 78,
"avg_line_length": 34.56153846153846,
"alnum_prop": 0.6708212775428444,
"repo_name": "google-coral/project-teachable",
"id": "03951e9ca94bf9318f1504d8a46622720d9accff",
"size": "5069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "embedding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25912"
},
{
"name": "Shell",
"bytes": "2817"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.auth import authenticate
from tiny_rest.models import Token
User = get_user_model()
class TestToken(TestCase):
def test_create_model(self):
user = User.objects.create_user('user', 'user@email.com', '123456')
token = Token.objects.create(user=user)
self.assertFalse(authenticate(token='invalid-key'))
self.assertTrue(authenticate(token=token.key))
self.assertEqual(authenticate(token=token.key), user)
| {
"content_hash": "a15c35434812902905188cc3374dc197",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 31.105263157894736,
"alnum_prop": 0.7258883248730964,
"repo_name": "allisson/django-tiny-rest",
"id": "6652633d468091ae45253432e96404a8095786ae",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tiny_rest/tests/test_auth_backends.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53979"
},
{
"name": "Shell",
"bytes": "105"
}
],
"symlink_target": ""
} |
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import mock
from cassandra import timestamps
import time
from threading import Thread, Lock
class _TimestampTestMixin(object):
@mock.patch('cassandra.timestamps.time')
def _call_and_check_results(self,
patched_time_module,
system_time_expected_stamp_pairs,
timestamp_generator=None):
"""
For each element in an iterable of (system_time, expected_timestamp)
pairs, call a :class:`cassandra.timestamps.MonotonicTimestampGenerator`
with system_times as the underlying time.time() result, then assert
that the result is expected_timestamp. Skips the check if
expected_timestamp is None.
"""
patched_time_module.time = mock.Mock()
system_times, expected_timestamps = zip(*system_time_expected_stamp_pairs)
patched_time_module.time.side_effect = system_times
tsg = timestamp_generator or timestamps.MonotonicTimestampGenerator()
for expected in expected_timestamps:
actual = tsg()
if expected is not None:
self.assertEqual(actual, expected)
# assert we patched timestamps.time.time correctly
with self.assertRaises(StopIteration):
tsg()
class TestTimestampGeneratorOutput(unittest.TestCase, _TimestampTestMixin):
"""
Mock time.time and test the output of MonotonicTimestampGenerator.__call__
given different patterns of changing results.
"""
def test_timestamps_during_and_after_same_system_time(self):
"""
Test that MonotonicTimestampGenerator's output increases by 1 when the
underlying system time is the same, then returns to normal when the
system time increases again.
@since 3.8.0
@expected_result Timestamps should increase monotonically over repeated system time.
@test_category timing
"""
self._call_and_check_results(
system_time_expected_stamp_pairs=(
(15.0, 15 * 1e6),
(15.0, 15 * 1e6 + 1),
(15.0, 15 * 1e6 + 2),
(15.01, 15.01 * 1e6))
)
def test_timestamps_during_and_after_backwards_system_time(self):
"""
Test that MonotonicTimestampGenerator's output increases by 1 when the
underlying system time goes backward, then returns to normal when the
system time increases again.
@since 3.8.0
@expected_result Timestamps should increase monotonically over system time going backwards.
@test_category timing
"""
self._call_and_check_results(
system_time_expected_stamp_pairs=(
(15.0, 15 * 1e6),
(13.0, 15 * 1e6 + 1),
(14.0, 15 * 1e6 + 2),
(13.5, 15 * 1e6 + 3),
(15.01, 15.01 * 1e6))
)
class TestTimestampGeneratorLogging(unittest.TestCase):
def setUp(self):
self.log_patcher = mock.patch('cassandra.timestamps.log')
self.addCleanup(self.log_patcher.stop)
self.patched_timestamp_log = self.log_patcher.start()
def assertLastCallArgRegex(self, call, pattern):
last_warn_args, last_warn_kwargs = call
self.assertEqual(len(last_warn_args), 1)
self.assertEqual(len(last_warn_kwargs), 0)
self.assertRegexpMatches(
last_warn_args[0],
pattern,
)
def test_basic_log_content(self):
"""
Tests there are logs
@since 3.8.0
@jira_ticket PYTHON-676
@expected_result logs
@test_category timing
"""
tsg = timestamps.MonotonicTimestampGenerator(
warning_threshold=1e-6,
warning_interval=1e-6
)
#The units of _last_warn is seconds
tsg._last_warn = 12
tsg._next_timestamp(20, tsg.last)
self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0)
tsg._next_timestamp(16, tsg.last)
self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1)
self.assertLastCallArgRegex(
self.patched_timestamp_log.warn.call_args,
r'Clock skew detected:.*\b16\b.*\b4\b.*\b20\b'
)
def test_disable_logging(self):
"""
Tests there are no logs when there is a clock skew if logging is disabled
@since 3.8.0
@jira_ticket PYTHON-676
@expected_result no logs
@test_category timing
"""
no_warn_tsg = timestamps.MonotonicTimestampGenerator(warn_on_drift=False)
no_warn_tsg.last = 100
no_warn_tsg._next_timestamp(99, no_warn_tsg.last)
self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0)
def test_warning_threshold_respected_no_logging(self):
"""
Tests there are no logs if `warning_threshold` is not exceeded
@since 3.8.0
@jira_ticket PYTHON-676
@expected_result no logs
@test_category timing
"""
tsg = timestamps.MonotonicTimestampGenerator(
warning_threshold=2e-6,
)
tsg.last, tsg._last_warn = 100, 97
tsg._next_timestamp(98, tsg.last)
self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 0)
def test_warning_threshold_respected_logs(self):
"""
Tests there are logs if `warning_threshold` is exceeded
@since 3.8.0
@jira_ticket PYTHON-676
@expected_result logs
@test_category timing
"""
tsg = timestamps.MonotonicTimestampGenerator(
warning_threshold=1e-6,
warning_interval=1e-6
)
tsg.last, tsg._last_warn = 100, 97
tsg._next_timestamp(98, tsg.last)
self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1)
def test_warning_interval_respected_no_logging(self):
"""
Tests there is only one log in the interval `warning_interval`
@since 3.8.0
@jira_ticket PYTHON-676
@expected_result one log
@test_category timing
"""
tsg = timestamps.MonotonicTimestampGenerator(
warning_threshold=1e-6,
warning_interval=2e-6
)
tsg.last = 100
tsg._next_timestamp(70, tsg.last)
self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1)
tsg._next_timestamp(71, tsg.last)
self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1)
def test_warning_interval_respected_logs(self):
"""
Tests there are logs again if the
clock skew happens after`warning_interval`
@since 3.8.0
@jira_ticket PYTHON-676
@expected_result logs
@test_category timing
"""
tsg = timestamps.MonotonicTimestampGenerator(
warning_interval=1e-6,
warning_threshold=1e-6,
)
tsg.last = 100
tsg._next_timestamp(70, tsg.last)
self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 1)
tsg._next_timestamp(72, tsg.last)
self.assertEqual(len(self.patched_timestamp_log.warn.call_args_list), 2)
class TestTimestampGeneratorMultipleThreads(unittest.TestCase):
def test_should_generate_incrementing_timestamps_for_all_threads(self):
"""
Tests when time is "stopped", values are assigned incrementally
@since 3.8.0
@jira_ticket PYTHON-676
@expected_result the returned values increase
@test_category timing
"""
lock = Lock()
def request_time():
for _ in range(timestamp_to_generate):
timestamp = tsg()
with lock:
generated_timestamps.append(timestamp)
tsg = timestamps.MonotonicTimestampGenerator()
fixed_time = 1
num_threads = 5
timestamp_to_generate = 1000
generated_timestamps = []
with mock.patch('time.time', new=mock.Mock(return_value=fixed_time)):
threads = []
for _ in range(num_threads):
threads.append(Thread(target=request_time))
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(len(generated_timestamps), num_threads * timestamp_to_generate)
for i, timestamp in enumerate(sorted(generated_timestamps)):
self.assertEqual(int(i + 1e6), timestamp)
| {
"content_hash": "c52a24f2d84a96aac29424fe23b61669",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 99,
"avg_line_length": 32.86363636363637,
"alnum_prop": 0.6038497003227293,
"repo_name": "coldeasy/python-driver",
"id": "50c0ba92ecb4d246fa218ed99b6433fd8a7631a6",
"size": "9256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_timestamps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28924"
},
{
"name": "PowerShell",
"bytes": "3817"
},
{
"name": "Python",
"bytes": "2238540"
}
],
"symlink_target": ""
} |
"""
AmazonScrape
Usage: amazon_scrape.py [--number=<count>] [--output=output.xlsx]
Select and scrape a given number of Amazon book listings.
"""
from docopt import docopt
from api_key import *
import requests
from bs4 import BeautifulSoup
import json
import time
import bottlenose
from openpyxl import Workbook
def random_amazon_link(number):
"""Use the site http://www.bookbookgoose.com/ to generate a random
amazon link. Grab the page and slice it apart for a link"""
r = requests.get(
'http://www.bookbookgoose.com/v1/get_books?n={0}'.format(number)
)
response = r.json()
books = []
for item in response:
book = {
'author':item[0],
'title':item[1],
'link':item[2],
}
books.append(book)
return books
def grab_amazon_data(link):
"""Grab data from an amazon link. Data to get:
1) Price (amazon price-watch for hardback)
2) Product score/user rating
3) Number of ratings
4) Score of first user rating
5) Date of first user rating
6) Shipping weight
7) Time since first user rating
8) Prime Product or not
return as dictionary of values
"""
amazon = bottlenose.Amazon(
AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY,
AWS_ASSOCIATE_TAG,
)
product = {}
response = amazon.ItemLookup(ItemId=find_amazon_id(link), ResponseGroup="Large")
soup = BeautifulSoup(response, "html.parser")
product['sales_rank'] = int(soup.salesrank.string)
product['title'] = soup.title.string
product['author'] = soup.author.string
product['binding'] = soup.binding.string
product['has_reviews'] = soup.hasreviews.string
product['reviews_url'] = soup.customerreviews.iframeurl.string
product['ship_weight'] = float(soup.find(units="hundredths-pounds").string)/100.00
product['price'] = soup.listprice.formattedprice.string
product['product_url'] = link
return product
def find_amazon_id(link):
"""Find amazon item id from a passed link
ONLY WORKS FOR BOOKS RIGHT NOW
sample book url:
http://www.amazon.com/Carbon-isotope-fractionation-trophic-transfer/dp/B000RR3CXS%3FSubscriptionId%3D1XJTRNMGKSD3T57YM002%26tag%3Dquasika-20%26linkCode%3Dxm2%26camp%3D2025%26creative%3D165953%26creativeASIN%3DB000RR3CXS
"""
return link.split('/dp/')[1].split('%3F')[0]
def build_excel_doc(data,filename="output.xlsx"):
"""Take the data and output an excel file"""
print 'Building workbook....',
wb = Workbook(guess_types=True)
ws = wb.active
ws.title = 'Products'
#fill in column headers
for c,k in enumerate(data[0]):
col = c + 1
_ = ws.cell(column=col, row=1, value=k)
#for each column fill in data down the line
for r,v in enumerate(data):
row = r + 2
_ = ws.cell(column=col, row=row, value=v[k])
wb.save(filename)
print 'done!'
def main():
arguments = docopt(__doc__)
count = arguments['--number'] or 10
print 'Will grab ' + str(count) + ' links from Amazon'
output_data = []
while len(output_data) < count:
links = random_amazon_link(max(50,count))
for amazon_link in links:
if len(output_data) < count:
try:
data = grab_amazon_data(amazon_link['link'])
output_data.append(data)
print 'Successfully grabbed ' + data['title']
except:
print 'ERROR GRABBING #' + find_amazon_id(amazon_link['link'])
time.sleep(1.01)
build_excel_doc(output_data,filename="output.xlsx")
if __name__ == '__main__':
main() | {
"content_hash": "39dbdde1191ff1bf4bce381bc0e3a0c0",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 223,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.6178191489361702,
"repo_name": "markperdomo/amazon_scrape",
"id": "33a6152e69e53f593b70eca1faa4a6b6352b40dd",
"size": "3778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amazon_scrape.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3778"
}
],
"symlink_target": ""
} |
import collections
import sys
import mock
from six import moves
from cinderclient import exceptions
from cinderclient import utils
from cinderclient import base
from cinderclient.tests.unit import utils as test_utils
UUID = '8e8ec658-c7b0-4243-bdf8-6f7f2952c0d0'
class FakeResource(object):
NAME_ATTR = 'name'
def __init__(self, _id, properties):
self.id = _id
try:
self.name = properties['name']
except KeyError:
pass
class FakeManager(base.ManagerWithFind):
resource_class = FakeResource
resources = [
FakeResource('1234', {'name': 'entity_one'}),
FakeResource(UUID, {'name': 'entity_two'}),
FakeResource('5678', {'name': '9876'})
]
def get(self, resource_id):
for resource in self.resources:
if resource.id == str(resource_id):
return resource
raise exceptions.NotFound(resource_id)
def list(self, search_opts):
return self.resources
class FakeDisplayResource(object):
NAME_ATTR = 'display_name'
def __init__(self, _id, properties):
self.id = _id
try:
self.display_name = properties['display_name']
except KeyError:
pass
class FakeDisplayManager(FakeManager):
resource_class = FakeDisplayResource
resources = [
FakeDisplayResource('4242', {'display_name': 'entity_three'}),
]
class FindResourceTestCase(test_utils.TestCase):
def setUp(self):
super(FindResourceTestCase, self).setUp()
self.manager = FakeManager(None)
def test_find_none(self):
self.manager.find = mock.Mock(side_effect=self.manager.find)
self.assertRaises(exceptions.CommandError,
utils.find_resource,
self.manager,
'asdf')
self.assertEqual(2, self.manager.find.call_count)
def test_find_by_integer_id(self):
output = utils.find_resource(self.manager, 1234)
self.assertEqual(self.manager.get('1234'), output)
def test_find_by_str_id(self):
output = utils.find_resource(self.manager, '1234')
self.assertEqual(self.manager.get('1234'), output)
def test_find_by_uuid(self):
output = utils.find_resource(self.manager, UUID)
self.assertEqual(self.manager.get(UUID), output)
def test_find_by_str_name(self):
output = utils.find_resource(self.manager, 'entity_one')
self.assertEqual(self.manager.get('1234'), output)
def test_find_by_str_displayname(self):
display_manager = FakeDisplayManager(None)
output = utils.find_resource(display_manager, 'entity_three')
self.assertEqual(display_manager.get('4242'), output)
class CaptureStdout(object):
"""Context manager for capturing stdout from statements in its block."""
def __enter__(self):
self.real_stdout = sys.stdout
self.stringio = moves.StringIO()
sys.stdout = self.stringio
return self
def __exit__(self, *args):
sys.stdout = self.real_stdout
self.stringio.seek(0)
self.read = self.stringio.read
class PrintListTestCase(test_utils.TestCase):
def test_print_list_with_list(self):
Row = collections.namedtuple('Row', ['a', 'b'])
to_print = [Row(a=3, b=4), Row(a=1, b=2)]
with CaptureStdout() as cso:
utils.print_list(to_print, ['a', 'b'])
# Output should be sorted by the first key (a)
self.assertEqual("""\
+---+---+
| a | b |
+---+---+
| 1 | 2 |
| 3 | 4 |
+---+---+
""", cso.read())
def test_print_list_with_None_data(self):
Row = collections.namedtuple('Row', ['a', 'b'])
to_print = [Row(a=3, b=None), Row(a=1, b=2)]
with CaptureStdout() as cso:
utils.print_list(to_print, ['a', 'b'])
# Output should be sorted by the first key (a)
self.assertEqual("""\
+---+---+
| a | b |
+---+---+
| 1 | 2 |
| 3 | - |
+---+---+
""", cso.read())
def test_print_list_with_list_sortby(self):
Row = collections.namedtuple('Row', ['a', 'b'])
to_print = [Row(a=4, b=3), Row(a=2, b=1)]
with CaptureStdout() as cso:
utils.print_list(to_print, ['a', 'b'], sortby_index=1)
# Output should be sorted by the second key (b)
self.assertEqual("""\
+---+---+
| a | b |
+---+---+
| 2 | 1 |
| 4 | 3 |
+---+---+
""", cso.read())
def test_print_list_with_list_no_sort(self):
Row = collections.namedtuple('Row', ['a', 'b'])
to_print = [Row(a=3, b=4), Row(a=1, b=2)]
with CaptureStdout() as cso:
utils.print_list(to_print, ['a', 'b'], sortby_index=None)
# Output should be in the order given
self.assertEqual("""\
+---+---+
| a | b |
+---+---+
| 3 | 4 |
| 1 | 2 |
+---+---+
""", cso.read())
def test_print_list_with_generator(self):
Row = collections.namedtuple('Row', ['a', 'b'])
def gen_rows():
for row in [Row(a=1, b=2), Row(a=3, b=4)]:
yield row
with CaptureStdout() as cso:
utils.print_list(gen_rows(), ['a', 'b'])
self.assertEqual("""\
+---+---+
| a | b |
+---+---+
| 1 | 2 |
| 3 | 4 |
+---+---+
""", cso.read())
def test_print_list_with_return(self):
Row = collections.namedtuple('Row', ['a', 'b'])
to_print = [Row(a=3, b='a\r'), Row(a=1, b='c\rd')]
with CaptureStdout() as cso:
utils.print_list(to_print, ['a', 'b'])
# Output should be sorted by the first key (a)
self.assertEqual("""\
+---+-----+
| a | b |
+---+-----+
| 1 | c d |
| 3 | a |
+---+-----+
""", cso.read())
class PrintDictTestCase(test_utils.TestCase):
def test_print_dict_with_return(self):
d = {'a': 'A', 'b': 'B', 'c': 'C', 'd': 'test\rcarriage\n\rreturn'}
with CaptureStdout() as cso:
utils.print_dict(d)
self.assertEqual("""\
+----------+---------------+
| Property | Value |
+----------+---------------+
| a | A |
| b | B |
| c | C |
| d | test carriage |
| | return |
+----------+---------------+
""", cso.read())
| {
"content_hash": "7369af7b00e8e0a6737ca85dfb326e47",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 76,
"avg_line_length": 27.47136563876652,
"alnum_prop": 0.5423348300192431,
"repo_name": "scottdangelo/cinderclient-api-microversions",
"id": "2d2ebd1498b8129412c6f442205c81dbf19b310b",
"size": "6782",
"binary": false,
"copies": "2",
"ref": "refs/heads/cinderclient-api-microversions",
"path": "cinderclient/tests/unit/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "829037"
},
{
"name": "Shell",
"bytes": "9081"
}
],
"symlink_target": ""
} |
"""Script to parse Protein files
"""
import sys
from metadata_updates import update_metadata_data_list
def parse_file(project_id, bq_dataset, bucket_name, file_data, filename, outfilename, metadata, cloudsql_tables, config, logger):
logger.log_text('uduprocessor: Begin low-level processing {0}'.format(filename), severity='INFO')
sample_metadata_list = []
new_metadata = metadata.copy()
new_metadata['sample_barcode'] = 'low_level_data_barcode'
new_metadata['file_path'] = file_data['FILENAME']
sample_metadata_list.append(new_metadata)
update_metadata_data_list(config, cloudsql_tables['METADATA_DATA'], sample_metadata_list)
def get_column_mapping(columns):
column_map = {}
for column in columns:
if 'MAP_TO' in column.keys():
# pandas automatically replaces spaces with underscores, so we will too,
# then map them to provided column headers
column_map[column['NAME'].replace(' ', '_')] = column['MAP_TO']
return column_map
if __name__ == '__main__':
project_id = sys.argv[1]
bucket_name = sys.argv[2]
filename = sys.argv[3]
outfilename = sys.argv[4]
metadata = {
'AliquotBarcode':'AliquotBarcode',
'SampleBarcode':'SampleBarcode',
'CaseBarcode':'CaseBarcode',
'Study':'Study',
'SampleTypeLetterCode':'SampleTypeLetterCode',
'Platform':'Platform'
}
| {
"content_hash": "4ea4b4ea6f7a1cb95f11a51056fd238b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 129,
"avg_line_length": 35.4,
"alnum_prop": 0.6567796610169492,
"repo_name": "isb-cgc/User-Data-Processor",
"id": "fb5fb58bf588f36f49696f9de08e5cc2b026d40f",
"size": "2034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isb_cgc_user_data/user_gen/low_level_processing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "124684"
},
{
"name": "Shell",
"bytes": "12616"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from .responses import SecretsManagerResponse
url_bases = ["https?://secretsmanager.(.+).amazonaws.com"]
url_paths = {"{0}/$": SecretsManagerResponse.dispatch}
| {
"content_hash": "ec442ceb47a7c59146c0546c4dfd0b6a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.7425742574257426,
"repo_name": "william-richard/moto",
"id": "57cbac0e401400289fbfcf56cb4302ff84cdaf50",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/secretsmanager/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
} |
from wptserve.utils import isomorphic_encode
def get_response(raw_headers, filter_value, filter_name):
result = b""
# raw_headers.raw_items() returns the (name, value) header pairs as
# tuples of strings. Convert them to bytes before comparing.
# TODO: Get access to the raw headers, so that whitespace between
# name, ":" and value can also be checked:
# https://github.com/web-platform-tests/wpt/issues/28756
for field in raw_headers.raw_items():
name = isomorphic_encode(field[0])
value = isomorphic_encode(field[1])
if filter_value:
if value == filter_value:
result += name + b","
elif name.lower() == filter_name:
result += name + b": " + value + b"\n"
return result
def main(request, response):
headers = []
if b"cors" in request.GET:
headers.append((b"Access-Control-Allow-Origin", b"*"))
headers.append((b"Access-Control-Allow-Credentials", b"true"))
headers.append((b"Access-Control-Allow-Methods", b"GET, POST, PUT, FOO"))
headers.append((b"Access-Control-Allow-Headers", b"x-test, x-foo"))
headers.append((
b"Access-Control-Expose-Headers",
b"x-request-method, x-request-content-type, x-request-query, x-request-content-length"))
headers.append((b"content-type", b"text/plain"))
filter_value = request.GET.first(b"filter_value", b"")
filter_name = request.GET.first(b"filter_name", b"").lower()
result = get_response(request.raw_headers, filter_value, filter_name)
return headers, result
| {
"content_hash": "aa53843f9bd86762b5f0b6c8864823de",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 100,
"avg_line_length": 44.416666666666664,
"alnum_prop": 0.6397748592870544,
"repo_name": "nwjs/chromium.src",
"id": "123d637134dc624f0e9f0b3ddfe35d07e62920a3",
"size": "1599",
"binary": false,
"copies": "21",
"ref": "refs/heads/nw70",
"path": "third_party/blink/web_tests/external/wpt/xhr/resources/inspect-headers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import time
from typing import TYPE_CHECKING, List, Optional, Union, Dict, Any
from decimal import Decimal
import attr
from .json_db import StoredObject
from .i18n import _
from .util import age, InvoiceError
from .lnaddr import lndecode, LnAddr
from . import constants
from .bitcoin import COIN, TOTAL_COIN_SUPPLY_LIMIT_IN_BTC
from .bitcoin import address_to_script
from .transaction import PartialTxOutput
from .crypto import sha256d
if TYPE_CHECKING:
from .paymentrequest import PaymentRequest
# convention: 'invoices' = outgoing , 'request' = incoming
# status of payment requests
PR_UNPAID = 0 # if onchain: invoice amt not reached by txs in mempool+chain. if LN: invoice not paid.
PR_EXPIRED = 1 # invoice is unpaid and expiry time reached
PR_UNKNOWN = 2 # e.g. invoice not found
PR_PAID = 3 # if onchain: paid and mined (1 conf). if LN: invoice is paid.
PR_INFLIGHT = 4 # only for LN. payment attempt in progress
PR_FAILED = 5 # only for LN. we attempted to pay it, but all attempts failed
PR_ROUTING = 6 # only for LN. *unused* atm.
PR_UNCONFIRMED = 7 # only onchain. invoice is satisfied but tx is not mined yet.
pr_color = {
PR_UNPAID: (.7, .7, .7, 1),
PR_PAID: (.2, .9, .2, 1),
PR_UNKNOWN: (.7, .7, .7, 1),
PR_EXPIRED: (.9, .2, .2, 1),
PR_INFLIGHT: (.9, .6, .3, 1),
PR_FAILED: (.9, .2, .2, 1),
PR_ROUTING: (.9, .6, .3, 1),
PR_UNCONFIRMED: (.9, .6, .3, 1),
}
pr_tooltips = {
PR_UNPAID:_('Unpaid'),
PR_PAID:_('Paid'),
PR_UNKNOWN:_('Unknown'),
PR_EXPIRED:_('Expired'),
PR_INFLIGHT:_('In progress'),
PR_FAILED:_('Failed'),
PR_ROUTING: _('Computing route...'),
PR_UNCONFIRMED: _('Unconfirmed'),
}
PR_DEFAULT_EXPIRATION_WHEN_CREATING = 24*60*60 # 1 day
pr_expiration_values = {
0: _('Never'),
10*60: _('10 minutes'),
60*60: _('1 hour'),
24*60*60: _('1 day'),
7*24*60*60: _('1 week'),
}
assert PR_DEFAULT_EXPIRATION_WHEN_CREATING in pr_expiration_values
def _decode_outputs(outputs) -> Optional[List[PartialTxOutput]]:
if outputs is None:
return None
ret = []
for output in outputs:
if not isinstance(output, PartialTxOutput):
output = PartialTxOutput.from_legacy_tuple(*output)
ret.append(output)
return ret
# hack: BOLT-11 is not really clear on what an expiry of 0 means.
# It probably interprets it as 0 seconds, so already expired...
# Our higher level invoices code however uses 0 for "never".
# Hence set some high expiration here
LN_EXPIRY_NEVER = 100 * 365 * 24 * 60 * 60 # 100 years
@attr.s
class Invoice(StoredObject):
# mandatory fields
amount_msat = attr.ib(kw_only=True) # type: Optional[Union[int, str]] # can be '!' or None
message = attr.ib(type=str, kw_only=True)
time = attr.ib(type=int, kw_only=True, validator=attr.validators.instance_of(int)) # timestamp of the invoice
exp = attr.ib(type=int, kw_only=True, validator=attr.validators.instance_of(int)) # expiration delay (relative). 0 means never
# optional fields.
# an request (incoming) can be satisfied onchain, using lightning or using a swap
# an invoice (outgoing) is constructed from a source: bip21, bip70, lnaddr
# onchain only
outputs = attr.ib(kw_only=True, converter=_decode_outputs) # type: Optional[List[PartialTxOutput]]
height = attr.ib(type=int, kw_only=True, validator=attr.validators.instance_of(int)) # only for receiving
bip70 = attr.ib(type=str, kw_only=True) # type: Optional[str]
#bip70_requestor = attr.ib(type=str, kw_only=True) # type: Optional[str]
# lightning only
lightning_invoice = attr.ib(type=str, kw_only=True) # type: Optional[str]
__lnaddr = None
def is_lightning(self):
return self.lightning_invoice is not None
def get_status_str(self, status):
status_str = pr_tooltips[status]
if status == PR_UNPAID:
if self.exp > 0 and self.exp != LN_EXPIRY_NEVER:
expiration = self.get_expiration_date()
status_str = _('Expires') + ' ' + age(expiration, include_seconds=True)
return status_str
def get_address(self) -> Optional[str]:
"""returns the first address, to be displayed in GUI"""
if self.is_lightning():
return self._lnaddr.get_fallback_address() or None
else:
return self.outputs[0].address
def get_outputs(self):
if self.is_lightning():
address = self.get_address()
amount = self.get_amount_sat()
if address and amount is not None:
outputs = [PartialTxOutput.from_address_and_value(address, int(amount))]
else:
outputs = []
else:
outputs = self.outputs
return outputs
def can_be_paid_onchain(self) -> bool:
if self.is_lightning():
return bool(self._lnaddr.get_fallback_address())
else:
return True
def get_expiration_date(self):
# 0 means never
return self.exp + self.time if self.exp else 0
def has_expired(self) -> bool:
exp = self.get_expiration_date()
return bool(exp) and exp < time.time()
def get_amount_msat(self) -> Union[int, str, None]:
return self.amount_msat
def get_time(self):
return self.time
def get_message(self):
return self.message
def get_amount_sat(self) -> Union[int, str, None]:
"""
Returns an integer satoshi amount, or '!' or None.
Callers who need msat precision should call get_amount_msat()
"""
amount_msat = self.amount_msat
if amount_msat in [None, "!"]:
return amount_msat
return int(amount_msat // 1000)
def get_bip21_URI(self, *, include_lightning: bool = False) -> Optional[str]:
from electrum_ltc.util import create_bip21_uri
addr = self.get_address()
amount = self.get_amount_sat()
if amount is not None:
amount = int(amount)
message = self.message
extra = {}
if self.time and self.exp:
extra['time'] = str(int(self.time))
extra['exp'] = str(int(self.exp))
lightning = self.lightning_invoice if include_lightning else None
if lightning:
extra['lightning'] = lightning
if not addr and lightning:
return "litecoin:?lightning="+lightning
if not addr and not lightning:
return None
uri = create_bip21_uri(addr, amount, message, extra_query_params=extra)
return str(uri)
@lightning_invoice.validator
def _validate_invoice_str(self, attribute, value):
if value is not None:
lndecode(value) # this checks the str can be decoded
@amount_msat.validator
def _validate_amount(self, attribute, value):
if value is None:
return
if isinstance(value, int):
if not (0 <= value <= TOTAL_COIN_SUPPLY_LIMIT_IN_BTC * COIN * 1000):
raise InvoiceError(f"amount is out-of-bounds: {value!r} msat")
elif isinstance(value, str):
if value != '!':
raise InvoiceError(f"unexpected amount: {value!r}")
else:
raise InvoiceError(f"unexpected amount: {value!r}")
@property
def _lnaddr(self) -> LnAddr:
if self.__lnaddr is None:
self.__lnaddr = lndecode(self.lightning_invoice)
return self.__lnaddr
@property
def rhash(self) -> str:
return self._lnaddr.paymenthash.hex()
@classmethod
def from_bech32(cls, invoice: str) -> 'Invoice':
"""Constructs Invoice object from BOLT-11 string.
Might raise InvoiceError.
"""
try:
lnaddr = lndecode(invoice)
except Exception as e:
raise InvoiceError(e) from e
amount_msat = lnaddr.get_amount_msat()
timestamp = lnaddr.date
exp_delay = lnaddr.get_expiry()
message = lnaddr.get_description()
return Invoice(
message=message,
amount_msat=amount_msat,
time=timestamp,
exp=exp_delay,
outputs=None,
bip70=None,
height=0,
lightning_invoice=invoice,
)
@classmethod
def from_bip70_payreq(cls, pr: 'PaymentRequest', *, height: int = 0) -> 'Invoice':
return Invoice(
amount_msat=pr.get_amount()*1000,
message=pr.get_memo(),
time=pr.get_time(),
exp=pr.get_expiration_date() - pr.get_time(),
outputs=pr.get_outputs(),
bip70=pr.raw.hex(),
height=height,
lightning_invoice=None,
)
def to_debug_json(self) -> Dict[str, Any]:
d = self.to_json()
d.update({
'pubkey': self._lnaddr.pubkey.serialize().hex(),
'amount_LTC': str(self._lnaddr.amount),
'rhash': self._lnaddr.paymenthash.hex(),
'description': self._lnaddr.get_description(),
'exp': self._lnaddr.get_expiry(),
'time': self._lnaddr.date,
# 'tags': str(lnaddr.tags),
})
return d
def get_id(self) -> str:
if self.is_lightning():
return self.rhash
else: # on-chain
return get_id_from_onchain_outputs(outputs=self.get_outputs(), timestamp=self.time)
def get_id_from_onchain_outputs(outputs: List[PartialTxOutput], *, timestamp: int) -> str:
outputs_str = "\n".join(f"{txout.scriptpubkey.hex()}, {txout.value}" for txout in outputs)
return sha256d(outputs_str + "%d" % timestamp).hex()[0:10]
| {
"content_hash": "6fb1a2e24e1a833707290bae64a93285",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 132,
"avg_line_length": 34.73571428571429,
"alnum_prop": 0.6009664815957229,
"repo_name": "pooler/electrum-ltc",
"id": "04d908cb1ee331ea00c2ce237ecd82e5a865af25",
"size": "9726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum_ltc/invoices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13024"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "NSIS",
"bytes": "7354"
},
{
"name": "Python",
"bytes": "5325268"
},
{
"name": "QML",
"bytes": "318745"
},
{
"name": "Ruby",
"bytes": "16856"
},
{
"name": "Shell",
"bytes": "105672"
},
{
"name": "kvlang",
"bytes": "70748"
}
],
"symlink_target": ""
} |
DATABASE = 'postgresql://postgres:postgres@localhost/bible'
| {
"content_hash": "f5516cb7b44bbefb15ae013b62271940",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 59,
"avg_line_length": 60,
"alnum_prop": 0.8,
"repo_name": "tulustul/BibleReader",
"id": "73cdb6260932e8fc6aa2af0b0af4e105186130dc",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/settings/dev.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18749"
},
{
"name": "HTML",
"bytes": "6186"
},
{
"name": "JavaScript",
"bytes": "29139"
},
{
"name": "Makefile",
"bytes": "276"
},
{
"name": "Python",
"bytes": "35945"
},
{
"name": "TypeScript",
"bytes": "15750"
}
],
"symlink_target": ""
} |
import random
def xor_encode(data, seed_key=None):
"""
Encode data using the XOR algorithm. This is not suitable for encryption
purposes and should only be used for light obfuscation. The key is
prepended to the data as the first byte which is required to be decoded
py the :py:func:`.xor_decode` function.
:param str data: The data to encode.
:param int seed_key: The optional value to use as the for XOR key.
:return: The encoded data.
:rtype: str
"""
seed_key = (seed_key or random.randint(0, 255))
data = map(ord, data)
encoded_data = [seed_key]
last_key = seed_key
for b in data:
e = (b ^ last_key)
last_key = e
encoded_data.append(e)
return ''.join(map(chr, encoded_data))
def xor_decode(data):
"""
Decode data using the XOR algorithm. This is not suitable for encryption
purposes and should only be used for light obfuscation. This function
requires the key to be set as the first byte of *data* as done in the
:py:func:`.xor_encode` function.
:param str data: The data to decode.
:return: The decoded data.
:rtype: str
"""
data = list(map(ord, data))
last_key = data.pop(0)
decoded_data = []
for b in data:
d = (b ^ last_key)
last_key = b
decoded_data.append(d)
return ''.join(map(chr, decoded_data))
| {
"content_hash": "a49a2aec8d7c2f6014e73fa2d2680abe",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 73,
"avg_line_length": 29.186046511627907,
"alnum_prop": 0.6972111553784861,
"repo_name": "xujun10110/king-phisher",
"id": "4e64ca2d62d01fb8eba66941f8e8a4a1d817c27e",
"size": "2827",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "king_phisher/xor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13650"
},
{
"name": "HTML",
"bytes": "6676"
},
{
"name": "JavaScript",
"bytes": "1328"
},
{
"name": "Mako",
"bytes": "574"
},
{
"name": "Python",
"bytes": "556460"
},
{
"name": "Ruby",
"bytes": "6757"
},
{
"name": "Shell",
"bytes": "4993"
}
],
"symlink_target": ""
} |
import mock
from heat.common import exception
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.keystone import role_assignments
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
RESOURCE_TYPE = 'OS::Keystone::DummyRoleAssignment'
keystone_role_assignment_template = {
'heat_template_version': '2013-05-23',
'resources': {
'test_role_assignment': {
'type': RESOURCE_TYPE,
'properties': {
'roles': [
{
'role': 'role_1',
'project': 'project_1',
},
{
'role': 'role_1',
'domain': 'domain_1'
}
]
}
}
}
}
class KeystoneRoleAssignmentTest(common.HeatTestCase):
def setUp(self):
super(KeystoneRoleAssignmentTest, self).setUp()
self.ctx = utils.dummy_context()
# For unit testing purpose. Register resource provider explicitly.
resource._register_class(RESOURCE_TYPE,
role_assignments.KeystoneRoleAssignment)
self.stack = stack.Stack(
self.ctx, 'test_stack_keystone',
template.Template(keystone_role_assignment_template)
)
self.test_role_assignment = self.stack['test_role_assignment']
# Mock client
self.keystoneclient = mock.MagicMock()
self.test_role_assignment.client = mock.MagicMock()
self.test_role_assignment.client.return_value = self.keystoneclient
self.roles = self.keystoneclient.client.roles
# Mock client plugin
def _side_effect(value):
return value
self.keystone_client_plugin = mock.MagicMock()
(self.keystone_client_plugin.get_domain_id.
side_effect) = _side_effect
(self.keystone_client_plugin.get_role_id.
side_effect) = _side_effect
(self.keystone_client_plugin.get_project_id.
side_effect) = _side_effect
self.test_role_assignment.client_plugin = mock.MagicMock()
(self.test_role_assignment.client_plugin.
return_value) = self.keystone_client_plugin
def test_resource_mapping_not_defined(self):
# this resource is not planned to support in heat, so resource_mapping
# is not to be defined in KeystoneRoleAssignment
try:
from ..resources.role_assignments import resource_mapping # noqa
self.fail("KeystoneRoleAssignment is designed to be exposed as"
"Heat resource")
except Exception:
pass
def test_properties_title(self):
property_title_map = {
role_assignments.KeystoneRoleAssignment.ROLES: 'roles'
}
for actual_title, expected_title in property_title_map.items():
self.assertEqual(
expected_title,
actual_title,
'KeystoneRoleAssignment PROPERTIES(%s) title modified.' %
actual_title)
def test_property_roles_validate_schema(self):
schema = (role_assignments.KeystoneRoleAssignment.
properties_schema[
role_assignments.KeystoneRoleAssignment.ROLES])
self.assertEqual(
True,
schema.update_allowed,
'update_allowed for property %s is modified' %
role_assignments.KeystoneRoleAssignment.ROLES)
self.assertEqual(properties.Schema.LIST,
schema.type,
'type for property %s is modified' %
role_assignments.KeystoneRoleAssignment.ROLES)
self.assertEqual('List of role assignments.',
schema.description,
'description for property %s is modified' %
role_assignments.KeystoneRoleAssignment.ROLES)
def test_role_assignment_handle_create_user(self):
# validate the properties
self.assertEqual([
{
'role': 'role_1',
'project': 'project_1',
'domain': None
},
{
'role': 'role_1',
'project': None,
'domain': 'domain_1'
}],
(self.test_role_assignment.properties.
get(role_assignments.KeystoneRoleAssignment.ROLES)))
self.test_role_assignment.handle_create(user_id='user_1',
group_id=None)
# validate role assignment creation
# role-user-domain
self.roles.grant.assert_any_call(
role='role_1',
user='user_1',
domain='domain_1')
# role-user-project
self.roles.grant.assert_any_call(
role='role_1',
user='user_1',
project='project_1')
def test_role_assignment_handle_create_group(self):
# validate the properties
self.assertEqual([
{
'role': 'role_1',
'project': 'project_1',
'domain': None
},
{
'role': 'role_1',
'project': None,
'domain': 'domain_1'
}],
(self.test_role_assignment.properties.
get(role_assignments.KeystoneRoleAssignment.ROLES)))
self.test_role_assignment.handle_create(user_id=None,
group_id='group_1')
# validate role assignment creation
# role-group-domain
self.roles.grant.assert_any_call(
role='role_1',
group='group_1',
domain='domain_1')
# role-group-project
self.roles.grant.assert_any_call(
role='role_1',
group='group_1',
project='project_1')
def test_role_assignment_handle_update_user(self):
self.test_role_assignment._stored_properties_data = {
'roles': [
{
'role': 'role_1',
'project': 'project_1'
},
{
'role': 'role_1',
'domain': 'domain_1'
}
]
}
prop_diff = {
role_assignments.KeystoneRoleAssignment.ROLES: [
{
'role': 'role_2',
'project': 'project_1'
},
{
'role': 'role_2',
'domain': 'domain_1'
}
]
}
self.test_role_assignment.handle_update(
user_id='user_1',
group_id=None,
prop_diff=prop_diff)
# Add role2-project1-domain1
# role-user-domain
self.roles.grant.assert_any_call(
role='role_2',
user='user_1',
domain='domain_1')
# role-user-project
self.roles.grant.assert_any_call(
role='role_2',
user='user_1',
project='project_1')
# Remove role1-project1-domain1
# role-user-domain
self.roles.revoke.assert_any_call(
role='role_1',
user='user_1',
domain='domain_1')
# role-user-project
self.roles.revoke.assert_any_call(
role='role_1',
user='user_1',
project='project_1')
def test_role_assignment_handle_update_group(self):
self.test_role_assignment._stored_properties_data = {
'roles': [
{
'role': 'role_1',
'project': 'project_1'
},
{
'role': 'role_1',
'domain': 'domain_1'
}
]
}
prop_diff = {
role_assignments.KeystoneRoleAssignment.ROLES: [
{
'role': 'role_2',
'project': 'project_1'
},
{
'role': 'role_2',
'domain': 'domain_1'
}
]
}
self.test_role_assignment.handle_update(
user_id=None,
group_id='group_1',
prop_diff=prop_diff)
# Add role2-project1-domain1
# role-group-domain
self.roles.grant.assert_any_call(
role='role_2',
group='group_1',
domain='domain_1')
# role-group-project
self.roles.grant.assert_any_call(
role='role_2',
group='group_1',
project='project_1')
# Remove role1-project1-domain1
# role-group-domain
self.roles.revoke.assert_any_call(
role='role_1',
group='group_1',
domain='domain_1')
# role-group-project
self.roles.revoke.assert_any_call(
role='role_1',
group='group_1',
project='project_1')
def test_role_assignment_handle_delete_user(self):
self.test_role_assignment._stored_properties_data = {
'roles': [
{
'role': 'role_1',
'project': 'project_1'
},
{
'role': 'role_1',
'domain': 'domain_1'
}
]
}
self.assertIsNone(self.test_role_assignment.handle_delete(
user_id='user_1',
group_id=None
))
# Remove role1-project1-domain1
# role-user-domain
self.roles.revoke.assert_any_call(
role='role_1',
user='user_1',
domain='domain_1')
# role-user-project
self.roles.revoke.assert_any_call(
role='role_1',
user='user_1',
project='project_1')
def test_role_assignment_handle_delete_group(self):
self.test_role_assignment._stored_properties_data = {
'roles': [
{
'role': 'role_1',
'project': 'project_1'
},
{
'role': 'role_1',
'domain': 'domain_1'
}
]
}
self.assertIsNone(self.test_role_assignment.handle_delete(
user_id=None,
group_id='group_1'
))
# Remove role1-project1-domain1
# role-group-domain
self.roles.revoke.assert_any_call(
role='role_1',
group='group_1',
domain='domain_1')
# role-group-project
self.roles.revoke.assert_any_call(
role='role_1',
group='group_1',
project='project_1')
def test_validate_1(self):
self.test_role_assignment.properties = mock.MagicMock()
# both project and domain are none
self.test_role_assignment.properties.get.return_value = [
dict(role='role1')]
self.assertRaises(exception.StackValidationFailed,
self.test_role_assignment.validate)
def test_validate_2(self):
self.test_role_assignment.properties = mock.MagicMock()
# both project and domain are not none
self.test_role_assignment.properties.get.return_value = [
dict(role='role1',
project='project1',
domain='domain1')
]
self.assertRaises(exception.ResourcePropertyConflict,
self.test_role_assignment.validate)
| {
"content_hash": "930bf1253504adbfb0b171b6f5326acf",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 78,
"avg_line_length": 31.368700265251988,
"alnum_prop": 0.49661762218839844,
"repo_name": "rh-s/heat",
"id": "f78236f5d8802b6d82b52278391d4e8b32f44e7a",
"size": "12401",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "heat/tests/keystone/test_role_assignments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6288599"
},
{
"name": "Shell",
"bytes": "32845"
}
],
"symlink_target": ""
} |
import six
from six.moves import urllib
from keystone.tests import unit
from keystone.token import provider
class TestRandomStrings(unit.BaseTestCase):
def test_strings_are_url_safe(self):
s = provider.random_urlsafe_str()
self.assertEqual(s, urllib.parse.quote_plus(s))
def test_strings_can_be_converted_to_bytes(self):
s = provider.random_urlsafe_str()
self.assertTrue(isinstance(s, six.string_types))
b = provider.random_urlsafe_str_to_bytes(s)
self.assertTrue(isinstance(b, bytes))
| {
"content_hash": "25636fe6fef921272a8555966663d0e5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 30.38888888888889,
"alnum_prop": 0.7038391224862889,
"repo_name": "takeshineshiro/keystone",
"id": "be831484dedc63eae50e233ddb777cdbd9a06d19",
"size": "1093",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "keystone/tests/unit/token/test_provider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "3994869"
}
],
"symlink_target": ""
} |
import json
from unittest import TestCase
from nose.tools import assert_equal, assert_raises
from couchforms.geopoint import GeoPoint
from corehq.apps.es.es_query import HQESQuery
from corehq.apps.es.queries import geo_distance, match
class TestQueries(TestCase):
def assertHasQuery(self, es_query, desired_query):
generated = es_query._query
msg = "Expected to find query\n{}\nInstead found\n{}".format(
json.dumps(desired_query, indent=4),
json.dumps(generated, indent=4),
)
self.assertEqual(generated, desired_query, msg=msg)
def test_query(self):
query = HQESQuery('forms').set_query({"fancy_query": {"foo": "bar"}})
self.assertHasQuery(query, {"fancy_query": {"foo": "bar"}})
def test_null_query_string_queries(self):
query = HQESQuery('forms').search_string_query("")
self.assertHasQuery(query, {"match_all": {}})
query = HQESQuery('forms').search_string_query(None)
self.assertHasQuery(query, {"match_all": {}})
def test_basic_query_string_query(self):
query = HQESQuery('forms').search_string_query("foo")
self.assertHasQuery(query, {
"query_string": {
"query": "*foo*",
"default_operator": "AND",
"fields": None,
}
})
def test_query_with_fields(self):
default_fields = ['name', 'type', 'date']
query = HQESQuery('forms').search_string_query("foo", default_fields)
self.assertHasQuery(query, {
"query_string": {
"query": "*foo*",
"default_operator": "AND",
"fields": ['name', 'type', 'date'],
}
})
def test_complex_query_with_fields(self):
default_fields = ['name', 'type', 'date']
query = (HQESQuery('forms')
.search_string_query("name: foo", default_fields))
self.assertHasQuery(query, {
"simple_query_string": {
"query": "name: foo",
"default_operator": "AND",
"fields": None,
}
})
def test_match_raises_with_invalid_operator(self):
with self.assertRaises(ValueError):
match("cyrus", "pet_name", operator="And")
def test_valid_geo_distance():
assert_equal(
geo_distance('gps_location', GeoPoint(-33.1, 151.8), kilometers=100),
{
'geo_distance': {
'gps_location': {
'lat': -33.1,
'lon': 151.8
},
'distance': '100kilometers',
}
}
)
def test_invalid_geo_distance():
with assert_raises(ValueError):
geo_distance('gps_location', GeoPoint(-33.1, 151.8), smoots=100)
| {
"content_hash": "0e7a7585b011b21abf9ad58cc36bbe88",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 77,
"avg_line_length": 32.01136363636363,
"alnum_prop": 0.5470358537451189,
"repo_name": "dimagi/commcare-hq",
"id": "bbede1b5786ccdc3c049812f002d350a908f2d48",
"size": "2817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/es/tests/test_queries.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
import media
import fresh_tomatoes
"""Movie definition
media.Movie contains Title, Story Line, Poster Image,Trailer in Youtube
From Movie class: (movie_title, movie_storyline,
poster_image,trailer_youtube)"""
godfather = media.Movie('The Godfather',
'American mafia is born',
'http://bit.ly/1u2LOBu',
'https://www.youtube.com/watch?v=vjPmaneLadQ')
avatar = media.Movie('Avatar',
'A marine on alien planet',
'http://bit.ly/1F6nt8g',
'https://www.youtube.com/watch?v=cRdxXPV9GNQ')
trainspotting = media.Movie('Trainspotting',
'Crazy life',
'http://bit.ly/1Jzbujd',
'https://www.youtube.com/watch?v=Sl6O7sad9hI')
amelie = media.Movie('Amelie',
'Le fabuleux destin d Amelie Poulain',
'http://bit.ly/1xMfTfw',
'https://www.youtube.com/watch?v=B-uxeZaM-VM')
# We create an array of movies
movies = [godfather, avatar, trainspotting, amelie]
# Pass Movies array so it will be display in our web page by fresh_tomatoes
fresh_tomatoes.open_movies_page(movies)
| {
"content_hash": "da6c57549067b1fb83d9a1c999fff418",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 37.84848484848485,
"alnum_prop": 0.5708566853482786,
"repo_name": "spicyramen/movie",
"id": "9979758a50cb750fe092dfb7f8a30e64f42ee475",
"size": "1249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entertainment_center.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4750"
},
{
"name": "Python",
"bytes": "8198"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import os
import sys
import shutil
import unittest
import tempfile
import mock
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Container
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
try:
from libcloud.storage.drivers.local import LocalStorageDriver
from libcloud.storage.drivers.local import LockLocalStorage
from lockfile import LockTimeout
except ImportError:
print('lockfile library is not available, skipping local_storage tests...')
LocalStorageDriver = None
LockTimeout = None
class LocalTests(unittest.TestCase):
driver_type = LocalStorageDriver
@classmethod
def create_driver(self):
self.key = tempfile.mkdtemp()
return self.driver_type(self.key, None)
def setUp(self):
self.driver = self.create_driver()
def tearDown(self):
shutil.rmtree(self.key)
self.key = None
def make_tmp_file(self):
_, tmppath = tempfile.mkstemp()
with open(tmppath, 'w') as fp:
fp.write('blah' * 1024)
return tmppath
def remove_tmp_file(self, tmppath):
os.unlink(tmppath)
def test_list_containers_empty(self):
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_containers_success(self):
self.driver.create_container('test1')
self.driver.create_container('test2')
containers = self.driver.list_containers()
self.assertEqual(len(containers), 2)
container = containers[1]
self.assertTrue('creation_time' in container.extra)
self.assertTrue('modify_time' in container.extra)
self.assertTrue('access_time' in container.extra)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
objects = container.list_objects()
self.assertEqual(len(objects), 0)
for container in containers:
self.driver.delete_container(container)
def test_objects_success(self):
tmppath = self.make_tmp_file()
tmpfile = open(tmppath)
container = self.driver.create_container('test3')
obj1 = container.upload_object(tmppath, 'object1')
obj2 = container.upload_object(tmppath, 'path/object2')
obj3 = container.upload_object(tmppath, 'path/to/object3')
obj4 = container.upload_object(tmppath, 'path/to/object4.ext')
obj5 = container.upload_object_via_stream(tmpfile, 'object5')
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 5)
for obj in objects:
self.assertNotEqual(obj.hash, None)
self.assertEqual(obj.size, 4096)
self.assertEqual(obj.container.name, 'test3')
self.assertTrue('creation_time' in obj.extra)
self.assertTrue('modify_time' in obj.extra)
self.assertTrue('access_time' in obj.extra)
obj1.delete()
obj2.delete()
objects = container.list_objects()
self.assertEqual(len(objects), 3)
container.delete_object(obj3)
container.delete_object(obj4)
container.delete_object(obj5)
objects = container.list_objects()
self.assertEqual(len(objects), 0)
container.delete()
tmpfile.close()
self.remove_tmp_file(tmppath)
def test_get_container_doesnt_exist(self):
try:
self.driver.get_container(container_name='container1')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_container_success(self):
self.driver.create_container('test4')
container = self.driver.get_container(container_name='test4')
self.assertTrue(container.name, 'test4')
container.delete()
def test_get_object_container_doesnt_exist(self):
try:
self.driver.get_object(container_name='test-inexistent',
object_name='test')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_object_success(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test5')
container.upload_object(tmppath, 'test')
obj = self.driver.get_object(container_name='test5',
object_name='test')
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.container.name, 'test5')
self.assertEqual(obj.size, 4096)
self.assertNotEqual(obj.hash, None)
self.assertTrue('creation_time' in obj.extra)
self.assertTrue('modify_time' in obj.extra)
self.assertTrue('access_time' in obj.extra)
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
def test_create_container_invalid_name(self):
try:
self.driver.create_container(container_name='new/container')
except InvalidContainerNameError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_already_exists(self):
container = self.driver.create_container(
container_name='new-container')
try:
self.driver.create_container(container_name='new-container')
except ContainerAlreadyExistsError:
pass
else:
self.fail('Exception was not thrown')
# success
self.driver.delete_container(container)
def test_create_container_success(self):
name = 'new_container'
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
self.driver.delete_container(container)
def test_delete_container_doesnt_exist(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_container_not_empty(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
try:
self.driver.delete_container(container=container)
except ContainerIsNotEmptyError:
pass
else:
self.fail('Exception was not thrown')
# success
obj.delete()
self.remove_tmp_file(tmppath)
self.assertTrue(self.driver.delete_container(container=container))
def test_delete_container_not_found(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Container does not exist but an exception was not' +
'thrown')
def test_delete_container_success(self):
container = self.driver.create_container('test7')
self.assertTrue(self.driver.delete_container(container=container))
def test_download_object_success(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
destination_path = tmppath + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
os.unlink(destination_path)
def test_download_object_and_overwrite(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
destination_path = tmppath + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
try:
self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
except LibcloudError:
pass
else:
self.fail('Exception was not thrown')
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=True,
delete_on_failure=True)
self.assertTrue(result)
# success
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
os.unlink(destination_path)
def test_download_object_as_stream_success(self):
tmppath = self.make_tmp_file()
container = self.driver.create_container('test6')
obj = container.upload_object(tmppath, 'test')
stream = self.driver.download_object_as_stream(obj=obj,
chunk_size=1024)
self.assertTrue(hasattr(stream, '__iter__'))
data = ''
for buff in stream:
data += buff.decode('utf-8')
self.assertTrue(len(data), 4096)
obj.delete()
container.delete()
self.remove_tmp_file(tmppath)
@mock.patch("lockfile.mkdirlockfile.MkdirLockFile.acquire",
mock.MagicMock(side_effect=LockTimeout))
def test_proper_lockfile_imports(self):
# LockLocalStorage was previously using an un-imported exception
# in its __enter__ method, so the following would raise a NameError.
lls = LockLocalStorage("blah")
self.assertRaises(LibcloudError, lls.__enter__)
if not LocalStorageDriver:
class LocalTests(unittest.TestCase): # NOQA
pass
if __name__ == '__main__':
sys.exit(unittest.main())
| {
"content_hash": "43ff4e4794b643de53501615e11a7e1a",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 79,
"avg_line_length": 34.896875,
"alnum_prop": 0.5895047909017641,
"repo_name": "Hybrid-Cloud/badam",
"id": "6e534dfb11cbf6787b20a2084b76aae0591a328c",
"size": "11962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "patches_tool/aws_patch/aws_deps/libcloud/test/storage/test_local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3817"
},
{
"name": "Python",
"bytes": "29372474"
},
{
"name": "Shell",
"bytes": "17334"
}
],
"symlink_target": ""
} |
import base64
import logging
from typing import Dict, List, Optional, Union
from django.conf import settings
from django.contrib.auth import get_user_model, logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import (ImproperlyConfigured, ObjectDoesNotExist,
PermissionDenied, ValidationError)
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.template.backends.django import Template
from django.template.exceptions import (TemplateDoesNotExist,
TemplateSyntaxError)
from django.template.loader import get_template
from django.urls import reverse
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.decorators import method_decorator
from django.utils.module_loading import import_string
from django.utils.translation import gettext as _
from django.views import View
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from saml2 import BINDING_HTTP_POST, BINDING_HTTP_REDIRECT
from saml2.authn_context import PASSWORD, AuthnBroker, authn_context_class_ref
from saml2.ident import NameID
from saml2.saml import NAMEID_FORMAT_UNSPECIFIED
from .error_views import error_cbv
from .idp import IDP
from .models import ServiceProvider
from .processors import BaseProcessor
from .utils import repr_saml, verify_request_signature
logger = logging.getLogger(__name__)
User = get_user_model()
def store_params_in_session(request: HttpRequest) -> None:
""" Gathers the SAML parameters from the HTTP request and store them in the session
"""
if request.method == 'POST':
# future TODO: parse also SOAP and PAOS format from POST
passed_data = request.POST
binding = BINDING_HTTP_POST
else:
passed_data = request.GET
binding = BINDING_HTTP_REDIRECT
try:
saml_request = passed_data['SAMLRequest']
except (KeyError, MultiValueDictKeyError) as e:
raise ValidationError(_('not a valid SAMLRequest: {}').format(repr(e)))
request.session['Binding'] = binding
request.session['SAMLRequest'] = saml_request
request.session['RelayState'] = passed_data.get('RelayState', '')
@never_cache
@csrf_exempt
@require_http_methods(["GET", "POST"])
def sso_entry(request: HttpRequest, *args, **kwargs) -> HttpResponse:
""" Entrypoint view for SSO. Store the saml info in the request session
and redirects to the login_process view.
"""
try:
store_params_in_session(request)
except ValidationError as e:
return error_cbv.handle_error(request, e, status_code=400)
logger.debug("SSO requested to IDP with binding {}".format(request.session['Binding']))
logger.debug("--- SAML request [\n{}] ---".format(repr_saml(request.session['SAMLRequest'], b64=True)))
return HttpResponseRedirect(reverse('djangosaml2idp:saml_login_process'))
def check_access(processor: BaseProcessor, request: HttpRequest) -> None:
""" Check if user has access to the service of this SP. Raises a PermissionDenied exception if not.
"""
if not processor.has_access(request):
raise PermissionDenied(_("You do not have access to this resource"))
def get_sp_config(sp_entity_id: str) -> ServiceProvider:
""" Get a dict with the configuration for a SP according to the SAML_IDP_SPCONFIG settings.
Raises an exception if no SP matching the given entity id can be found.
"""
try:
sp = ServiceProvider.objects.get(entity_id=sp_entity_id, active=True)
except ObjectDoesNotExist:
raise ImproperlyConfigured(_("No active Service Provider object matching the entity_id '{}' found").format(sp_entity_id))
return sp
def get_authn(req_info=None):
req_authn_context = req_info.message.requested_authn_context if req_info else PASSWORD
broker = AuthnBroker()
broker.add(authn_context_class_ref(req_authn_context), "")
return broker.get_authn_by_accr(req_authn_context)
def build_authn_response(user: User, authn, resp_args, service_provider: ServiceProvider) -> list: # type: ignore
""" pysaml2 server.Server.create_authn_response wrapper
"""
policy = resp_args.get('name_id_policy', None)
if policy is None:
name_id_format = NAMEID_FORMAT_UNSPECIFIED
else:
name_id_format = policy.format
idp_server = IDP.load()
idp_name_id_format_list = idp_server.config.getattr("name_id_format", "idp") or [NAMEID_FORMAT_UNSPECIFIED]
if name_id_format not in idp_name_id_format_list:
raise ImproperlyConfigured(_('SP requested a name_id_format that is not supported in the IDP: {}').format(name_id_format))
processor: BaseProcessor = service_provider.processor # type: ignore
user_id = processor.get_user_id(user, name_id_format, service_provider, idp_server.config)
name_id = NameID(format=name_id_format, sp_name_qualifier=service_provider.entity_id, text=user_id)
return idp_server.create_authn_response(
authn=authn,
identity=processor.create_identity(user, service_provider.attribute_mapping),
name_id=name_id,
userid=user_id,
sp_entity_id=service_provider.entity_id,
# Signing
sign_response=service_provider.sign_response,
sign_assertion=service_provider.sign_assertion,
sign_alg=service_provider.signing_algorithm,
digest_alg=service_provider.digest_algorithm,
# Encryption
encrypt_assertion=service_provider.encrypt_saml_responses,
encrypted_advice_attributes=service_provider.encrypt_saml_responses,
**resp_args
)
class IdPHandlerViewMixin:
""" Contains some methods used by multiple views """
def render_login_html_to_string(self, context=None, request=None, using=None):
""" Render the html response for the login action. Can be using a custom html template if set on the view. """
default_login_template_name = 'djangosaml2idp/login.html'
custom_login_template_name = getattr(self, 'login_html_template', None)
if custom_login_template_name:
template = self._fetch_custom_template(custom_login_template_name, default_login_template_name, using)
return template.render(context, request)
template = get_template(default_login_template_name, using=using)
return template.render(context, request)
@staticmethod
def _fetch_custom_template(custom_name: str, default_name: str, using: Optional[str] = None) -> Template:
""" Grabs the custom login template. Falls back to default if issues arise. """
try:
template = get_template(custom_name, using=using)
except (TemplateDoesNotExist, TemplateSyntaxError) as e:
logger.error(
'Specified template {} cannot be used due to: {}. Falling back to default login template {}'.format(
custom_name, str(e), default_name))
template = get_template(default_name, using=using)
return template
def create_html_response(self, request: HttpRequest, binding, authn_resp, destination, relay_state):
""" Login form for SSO
"""
if binding == BINDING_HTTP_POST:
context = {
"acs_url": destination,
"saml_response": base64.b64encode(str(authn_resp).encode()).decode(),
"relay_state": relay_state,
}
html_response = {
"data": self.render_login_html_to_string(context=context, request=request),
"type": "POST",
}
else:
idp_server = IDP.load()
http_args = idp_server.apply_binding(
binding=binding,
msg_str=authn_resp,
destination=destination,
relay_state=relay_state,
response=True)
logger.debug('http args are: %s' % http_args)
html_response = {
"data": http_args['headers'][0][1],
"type": "REDIRECT",
}
return html_response
def render_response(self, request: HttpRequest, html_response, processor: BaseProcessor = None) -> HttpResponse:
""" Return either a response as redirect to MultiFactorView or as html with self-submitting form to log in.
"""
if not processor:
# In case of SLO, where processor isn't relevant
if html_response['type'] == 'POST':
return HttpResponse(html_response['data'])
else:
return HttpResponseRedirect(html_response['data'])
request.session['saml_data'] = html_response
if processor.enable_multifactor(request.user):
logger.debug("Redirecting to process_multi_factor")
return HttpResponseRedirect(reverse('djangosaml2idp:saml_multi_factor'))
# No multifactor
logger.debug("Performing SAML redirect")
if html_response['type'] == 'POST':
return HttpResponse(html_response['data'])
else:
return HttpResponseRedirect(html_response['data'])
@method_decorator(never_cache, name='dispatch')
class LoginProcessView(LoginRequiredMixin, IdPHandlerViewMixin, View):
""" View which processes the actual SAML request and returns a self-submitting form with the SAML response.
The login_required decorator ensures the user authenticates first on the IdP using 'normal' ways.
"""
def get(self, request, *args, **kwargs):
binding = request.session.get('Binding', BINDING_HTTP_POST)
# TODO: would it be better to store SAML info in request objects?
# AuthBackend takes request obj as argument...
try:
idp_server = IDP.load()
# Parse incoming request
req_info = idp_server.parse_authn_request(request.session['SAMLRequest'], binding)
# check SAML request signature
try:
verify_request_signature(req_info)
except ValueError as excp:
return error_cbv.handle_error(request, exception=excp, status_code=400)
# Compile Response Arguments
resp_args = idp_server.response_args(req_info.message)
# Set SP and Processor
sp_entity_id = resp_args.pop('sp_entity_id')
service_provider = get_sp_config(sp_entity_id)
# Check if user has access
try:
# Check if user has access to SP
check_access(service_provider.processor, request)
except PermissionDenied as excp:
return error_cbv.handle_error(request, exception=excp, status_code=403)
# Construct SamlResponse message
authn_resp = build_authn_response(request.user, get_authn(), resp_args, service_provider)
except Exception as e:
return error_cbv.handle_error(request, exception=e, status_code=500)
html_response = self.create_html_response(
request,
binding=resp_args['binding'],
authn_resp=authn_resp,
destination=resp_args['destination'],
relay_state=request.session['RelayState'])
logger.debug("--- SAML Authn Response [\n{}] ---".format(repr_saml(str(authn_resp))))
return self.render_response(request, html_response, service_provider.processor)
@method_decorator(never_cache, name='dispatch')
class SSOInitView(LoginRequiredMixin, IdPHandlerViewMixin, View):
""" View used for IDP initialized login, doesn't handle any SAML authn request
"""
def post(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
return self.get(request, *args, **kwargs)
def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
request_data = request.POST or request.GET
passed_data: Dict[str, Union[str, List[str]]] = request_data.copy().dict()
try:
# get sp information from the parameters
sp_entity_id = str(passed_data['sp'])
service_provider = get_sp_config(sp_entity_id)
processor: BaseProcessor = service_provider.processor # type: ignore
except (KeyError, ImproperlyConfigured) as excp:
return error_cbv.handle_error(request, exception=excp, status_code=400)
try:
# Check if user has access to SP
check_access(processor, request)
except PermissionDenied as excp:
return error_cbv.handle_error(request, exception=excp, status_code=403)
idp_server = IDP.load()
binding_out, destination = idp_server.pick_binding(
service="assertion_consumer_service",
entity_id=sp_entity_id)
# Adding a few things that would have been added if this were SP Initiated
passed_data['destination'] = destination
passed_data['in_response_to'] = "IdP_Initiated_Login"
# Construct SamlResponse messages
authn_resp = build_authn_response(request.user, get_authn(), passed_data, service_provider)
html_response = self.create_html_response(request, binding_out, authn_resp, destination, passed_data.get('RelayState', ""))
return self.render_response(request, html_response, processor)
@method_decorator(never_cache, name='dispatch')
class ProcessMultiFactorView(LoginRequiredMixin, View):
""" This view is used in an optional step is to perform 'other' user validation, for example 2nd factor checks.
Override this view per the documentation if using this functionality to plug in your custom validation logic.
"""
def multifactor_is_valid(self, request: HttpRequest) -> bool:
""" The code here can do whatever it needs to validate your user (via request.user or elsewise).
It must return True for authentication to be considered a success.
"""
return True
def get(self, request: HttpRequest, *args, **kwargs):
if self.multifactor_is_valid(request):
logger.debug('MultiFactor succeeded for %s' % request.user)
html_response = request.session['saml_data']
if html_response['type'] == 'POST':
return HttpResponse(html_response['data'])
else:
return HttpResponseRedirect(html_response['data'])
logger.debug(_("MultiFactor failed; %s will not be able to log in") % request.user)
logout(request)
raise PermissionDenied(_("MultiFactor authentication factor failed"))
@method_decorator([never_cache, csrf_exempt], name='dispatch')
class LogoutProcessView(LoginRequiredMixin, IdPHandlerViewMixin, View):
""" View which processes the actual SAML Single Logout request
The login_required decorator ensures the user authenticates first on the IdP using 'normal' way.
"""
__service_name = 'Single LogOut'
def post(self, request: HttpRequest, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get(self, request: HttpRequest, *args, **kwargs):
logger.info("--- {} Service ---".format(self.__service_name))
# do not assign a variable that overwrite request object, if it will fail the return with HttpResponseBadRequest trows naturally
store_params_in_session(request)
binding = request.session['Binding']
relay_state = request.session['RelayState']
logger.debug("--- {} requested [\n{}] to IDP ---".format(self.__service_name, binding))
idp_server = IDP.load()
# adapted from pysaml2 examples/idp2/idp_uwsgi.py
try:
req_info = idp_server.parse_logout_request(request.session['SAMLRequest'], binding)
except Exception as excp:
expc_msg = "{} Bad request: {}".format(self.__service_name, excp)
logger.error(expc_msg)
return error_cbv.handle_error(request, exception=expc_msg, status_code=400)
logger.debug("{} - local identifier: {} from {}".format(self.__service_name, req_info.message.name_id.text, req_info.message.name_id.sp_name_qualifier))
logger.debug("--- {} SAML request [\n{}] ---".format(self.__service_name, repr_saml(req_info.xmlstr, b64=False)))
# TODO
# check SAML request signature
try:
verify_request_signature(req_info)
except ValueError as excp:
return error_cbv.handle_error(request, exception=excp, status_code=400)
resp = idp_server.create_logout_response(req_info.message, [binding])
'''
# TODO: SOAP
# if binding == BINDING_SOAP:
# destination = ""
# response = False
# else:
# binding, destination = IDP.pick_binding(
# "single_logout_service", [binding], "spsso", req_info
# )
# response = True
# END TODO SOAP'''
try:
# hinfo returns request or response, it depends by request arg
hinfo = idp_server.apply_binding(binding, resp.__str__(), resp.destination, relay_state, response=True)
except Exception as excp:
logger.error("ServiceError: %s", excp)
return error_cbv.handle_error(request, exception=excp, status=400)
logger.debug("--- {} Response [\n{}] ---".format(self.__service_name, repr_saml(resp.__str__().encode())))
logger.debug("--- binding: {} destination:{} relay_state:{} ---".format(binding, resp.destination, relay_state))
# TODO: double check username session and saml login request
# logout user from IDP
logout(request)
if hinfo['method'] == 'GET':
return HttpResponseRedirect(hinfo['headers'][0][1])
else:
html_response = self.create_html_response(
request,
binding=binding,
authn_resp=resp.__str__(),
destination=resp.destination,
relay_state=relay_state)
return self.render_response(request, html_response, None)
@never_cache
def get_multifactor(request: HttpRequest) -> HttpResponse:
if hasattr(settings, "SAML_IDP_MULTIFACTOR_VIEW"):
multifactor_class = import_string(getattr(settings, "SAML_IDP_MULTIFACTOR_VIEW"))
else:
multifactor_class = ProcessMultiFactorView
return multifactor_class.as_view()(request)
@never_cache
def metadata(request: HttpRequest) -> HttpResponse:
""" Returns an XML with the SAML 2.0 metadata for this Idp.
The metadata is constructed on-the-fly based on the config dict in the django settings.
"""
return HttpResponse(content=IDP.metadata().encode('utf-8'), content_type="text/xml; charset=utf8")
| {
"content_hash": "c5ff7e2e6ac3a01ceb859021d4458efc",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 160,
"avg_line_length": 43.77262180974478,
"alnum_prop": 0.6556238736351108,
"repo_name": "OTA-Insight/djangosaml2idp",
"id": "401ca764c26f05d942d61b12ab7ae1b655075f8f",
"size": "18866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangosaml2idp/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "594"
},
{
"name": "HTML",
"bytes": "3052"
},
{
"name": "Makefile",
"bytes": "658"
},
{
"name": "Python",
"bytes": "148086"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
import markdown
from markdown.blockprocessors import BlockProcessor
import re
from markdown import util
class AlignProcessor(BlockProcessor):
""" Process Align. """
def __init__(self, parser):
BlockProcessor.__init__(self, parser)
exprs = (("->", "right"), ("<-", "center"))
self.REStart = re.compile(r'(^|\n)' + re.escape('->'))
self._ending_re = [re.compile(re.escape(end_expr) + r'(\n|$)') for end_expr, _ in exprs]
self._kind_align = [kind_align for _, kind_align in exprs]
def test(self, parent, block):
return bool(self.REStart.search(block))
def run(self, parent, blocks):
FirstBlock = blocks[0]
m = self.REStart.search(FirstBlock)
if not m: # pragma: no cover
# Run should only be fired if test() return True, then this should never append
# Do not raise an exception because exception should never be generated.
return False
StartBlock = (0, m.start(), m.end())
EndBlock = (-1, -1, -1)
content_align = "left"
for i in range(len(blocks)):
if i == 0:
txt = FirstBlock[m.end() + 1:]
dec = m.end() - m.start() + 1
else:
txt = blocks[i]
dec = 0
# Test all ending aligns
t_ends = ((i, re_end.search(txt)) for i, re_end in enumerate(self._ending_re))
# Catch only matching re
t_ends = list(filter(lambda e: e[1] is not None, t_ends))
if len(t_ends) > 0:
# retrieve first matching re
selected_align, mEnd = min(t_ends, key=lambda e: e[1].start())
EndBlock = (i, mEnd.start() + dec, mEnd.end() + dec)
content_align = self._kind_align[selected_align]
break
if EndBlock[0] < 0:
# Block not ended, do not transform
return False
# Split blocks into before/content aligned/ending
# There should never have before and ending because regex require that the expression is starting/ending the
# block. This is set for security : if regex are updated the code should always work.
Before = FirstBlock[:StartBlock[1]]
Content = []
After = blocks[EndBlock[0]][EndBlock[2]:]
for i in range(0, EndBlock[0] + 1):
blck = blocks.pop(0)
if i == StartBlock[0]:
startIndex = StartBlock[2]
else:
startIndex = 0
if i == EndBlock[0]:
endIndex = EndBlock[1]
else:
endIndex = len(blck)
Content.append(blck[startIndex: endIndex])
Content = "\n\n".join(Content)
if Before: # pragma: no cover
# This should never occur because regex require that the expression is starting the block.
# Do not raise an exception because exception should never be generated.
self.parser.parseBlocks(parent, [Before])
sibling = self.lastChild(parent)
if (sibling and
sibling.tag == "div" and
"align" in sibling.attrib and
sibling.attrib["align"] == content_align):
# If previous block is the same align content, merge it !
h = sibling
if h.text: # pragma: no cover
# This should never occur because there should never have content text outside of blocks html elements.
# this code come from other markdown processors, maybe this can happen because of this shitty ast.
h.text += '\n'
else:
h = util.etree.SubElement(parent, 'div')
h.set("align", content_align)
self.parser.parseChunk(h, Content)
if After: # pragma: no cover
# This should never occur because regex require that the expression is ending the block.
# Do not raise an exception because exception should never be generated.
blocks.insert(0, After)
class AlignExtension(markdown.extensions.Extension):
"""Adds align extension to Markdown class."""
def extendMarkdown(self, md, md_globals):
"""Modifies inline patterns."""
md.registerExtension(self)
md.parser.blockprocessors.add('align', AlignProcessor(md.parser), '_begin')
def makeExtension(*args, **kwargs):
return AlignExtension(*args, **kwargs)
| {
"content_hash": "0aa561085b64ea04d6d22a33ed1255a4",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 119,
"avg_line_length": 38.1965811965812,
"alnum_prop": 0.5701499216827031,
"repo_name": "Situphen/Python-ZMarkdown",
"id": "3dd930a6358960ea87573be6b1ce528115821b6b",
"size": "4493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master-zds",
"path": "markdown/extensions/align.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "664195"
},
{
"name": "Makefile",
"bytes": "937"
},
{
"name": "Python",
"bytes": "359072"
},
{
"name": "Shell",
"bytes": "912"
}
],
"symlink_target": ""
} |
import os
from recaptcha_config import *
# NOTICE: Remove this on production
DEBUG = True
# change base directory to get templates correctly
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
UPLOADS_PATH = os.path.join(BASE_DIR, "uploads")
# General flask stuff
SECRET_KEY = "blahblahblah"
# CSRF_SESSION_KEY = "blahblahblah"
# CSRF_ENABLED = True
# SQLAlchemy stuff
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'database.sqlite')
# Recaptcha stuff
RECAPTCHA_PARAMETERS = {'hl': 'zh', 'render': 'explicit'}
RECAPTCHA_DATA_ATTRS = {'theme': 'dark'}
| {
"content_hash": "189241804f21088e9e628e3e130092cd",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 82,
"avg_line_length": 20.821428571428573,
"alnum_prop": 0.7101200686106347,
"repo_name": "undeadpixel/overlapy_web",
"id": "1cb9dc023358dfe52c88e2d784614b78888b7be1",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4016"
},
{
"name": "HTML",
"bytes": "6325"
},
{
"name": "JavaScript",
"bytes": "304"
},
{
"name": "Python",
"bytes": "8552"
}
],
"symlink_target": ""
} |
from AbstractManager import AbstractManager
class ActionManager(AbstractManager):
def __init__(self):
self._itemList = [] #a list of all items
def addItem(self, newItem): #adds an item to the item list
for existingItem in self._itemList: #checks if binder already exists
if newItem.binder == existingItem.binder:
raise Exception() #if it does, raise exception
self._itemList.append(newItem) #if it doesn't, add item to list
def removeItem(self, item): #removes an item from the item list
if item in self._itemList: #checks if item is in item list
self._itemList.remove(item) #if it is, remove it
else:
raise Exception() #if it isn't, raise exception
def execute(self, binder): #executes function in item
found = False
for item in self._itemList: #checks if binder is in specifiecd
if item.binder == binder: #if it is, execute it
found = True
item.execute()
if found == False: #if it isn't, raise exception
raise Exception()
def itemList(self): #returns the item list
return self._itemList() | {
"content_hash": "c03955073d11c7d0340f782006663341",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 95,
"avg_line_length": 52.206896551724135,
"alnum_prop": 0.4953764861294584,
"repo_name": "paulydboy/Quad-Vision",
"id": "a21924d59c85c009ce176f4a9dc767dbcb7a1991",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DroneControl/ActionManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18374"
}
],
"symlink_target": ""
} |
import sys, time
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
def scan(yourIP, targetIP, startPort, endPort):
# construct layers 3 and 4
lyrThree=IP()
lyrFour=TCP()
lyrThree.src = yourIP
lyrThree.dst = targetIP
if endPort == 0:
endPort = startPort
for x in range(startPort, endPort+1):
# set source-port,seq-number,flag respectively
# source port was RandShort(), but hardcode will get the job done
sourcePort = 49564
lyrFour.sport = sourcePort
lyrFour.seq = RandShort()
lyrFour.ack = 0
lyrFour.dport = x
lyrFour.flags = 'S'
# combine layers
packet = lyrThree/lyrFour
# send packet
response = sr1(packet,verbose=0)
# 0x12 is the raw data of SYN/ACK, check wireshark out if you don't believe me.
if response.getlayer(TCP).flags == 0x12:
# tear down 3 way HS, send RST flag
time.sleep(1)
ip = IP(dst=targetIP, src=yourIP)
tcp = TCP(ack=response.getlayer(TCP).seq + 1, seq=response.getlayer(TCP).ack, flags='R', sport=sourcePort, dport=x, window=0)
send(ip/tcp,verbose=0)
print "Port " + str(x) + " is open on " + targetIP
def setUp(yourIP, targetIP, ports):
if '-' in ports:
index = ports.find('-')
startPort = int(ports[0:index])
endPort = int(ports[index+1:len(ports)])
else:
startPort = int(ports)
endPort = 0
scan(yourIP, targetIP, startPort, endPort)
if len(sys.argv) != 4:
print "\nAuthor: B. Cone"
print " Usage: ./synScan.py <YourIP> <TargetIP> <Ports(0-65535)>\n"
sys.exit(0)
yourIP = sys.argv[1]
targetIP = sys.argv[2]
ports = sys.argv[3]
setUp(yourIP, targetIP, ports)
| {
"content_hash": "4ca84c440ac78a55708f6cd156de6155",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 131,
"avg_line_length": 25.939393939393938,
"alnum_prop": 0.6477803738317757,
"repo_name": "bartcone/syn-stealth-portscanner",
"id": "d76807fc10d09a61ed12de0dc11a1d81f6d521e3",
"size": "1757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "syn-stealth.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1757"
}
],
"symlink_target": ""
} |
import time
import datetime
import os
from docplex.mp.model import Model
from ticdat import TicDatFactory, Progress, LogFile, Slicer, standard_main
# ------------------------ define the input schema --------------------------------
# There are three input tables, with 4 primary key fields and 4 data fields.
dataFactory = TicDatFactory (
sites = [['name'],['demand', 'center_status']],
distance = [['source', 'destination'],['distance']],
parameters = [["key"], ["value"]])
# add foreign key constraints
dataFactory.add_foreign_key("distance", "sites", ['source', 'name'])
dataFactory.add_foreign_key("distance", "sites", ['destination', 'name'])
# center_status is a flag field which can take one of two string values.
dataFactory.set_data_type("sites", "center_status", number_allowed=False,
strings_allowed=["Can Be Center", "Pure Demand Point"])
# The default type of non infinite, non negative works for distance
dataFactory.set_data_type("distance", "distance")
# ---------------------------------------------------------------------------------
# ------------------------ define the output schema -------------------------------
# There are three solution tables, with 2 primary key fields and 3
# data fields amongst them.
solutionFactory = TicDatFactory(
openings = [['site'],[]],
assignments = [['site', 'assigned_to'],[]],
parameters = [["key"], ["value"]])
# ---------------------------------------------------------------------------------
# ------------------------ create a solve function --------------------------------
def time_stamp() :
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def solve(dat, out, err, progress):
assert isinstance(progress, Progress)
assert isinstance(out, LogFile) and isinstance(err, LogFile)
assert dataFactory.good_tic_dat_object(dat)
assert not dataFactory.find_foreign_key_failures(dat)
assert not dataFactory.find_data_type_failures(dat)
out.write("COG output log\n%s\n\n"%time_stamp())
err.write("COG error log\n%s\n\n"%time_stamp())
def get_distance(x,y):
if (x,y) in dat.distance:
return dat.distance[x,y]["distance"]
if (y,x) in dat.distance:
return dat.distance[y,x]["distance"]
return float("inf")
def can_assign(x, y):
return dat.sites[y]["center_status"] == "Can Be Center" \
and get_distance(x,y)<float("inf")
unassignables = [n for n in dat.sites if not
any(can_assign(n,y) for y in dat.sites) and
dat.sites[n]["demand"] > 0]
if unassignables:
# Infeasibility detected. Generate an error table and return None
err.write("The following sites have demand, but can't be " +
"assigned to anything.\n")
err.log_table("Un-assignable Demand Points",
[["Site"]] + [[_] for _ in unassignables])
return
useless = [n for n in dat.sites if not any(can_assign(y,n) for y in dat.sites) and
dat.sites[n]["demand"] == 0]
if useless:
# Log in the error table as a warning, but can still try optimization.
err.write("The following sites have no demand, and can't serve as the " +
"center point for any assignments.\n")
err.log_table("Useless Sites", [["Site"]] + [[_] for _ in useless])
progress.numerical_progress("Feasibility Analysis" , 100)
m = Model("cog")
assign_vars = {(n, assigned_to) : m.binary_var(name = "%s_%s"%(n,assigned_to))
for n in dat.sites for assigned_to in dat.sites
if can_assign(n, assigned_to)}
open_vars = {n : m.binary_var(name = "open_%s"%n)
for n in dat.sites
if dat.sites[n]["center_status"] == "Can Be Center"}
if not open_vars:
err.write("Nothing can be a center!\n") # Infeasibility detected.
return
progress.numerical_progress("Core Model Creation", 50)
assign_slicer = Slicer(assign_vars)
for n, r in dat.sites.items():
if r["demand"] > 0:
m.add_constraint(m.sum(assign_vars[n, assign_to]
for _, assign_to in assign_slicer.slice(n, "*"))
== 1,
ctname = "must_assign_%s"%n)
crippledfordemo = "formulation" in dat.parameters and \
dat.parameters["formulation"]["value"] == "weak"
for assigned_to, r in dat.sites.items():
if r["center_status"] == "Can Be Center":
_assign_vars = [assign_vars[n, assigned_to]
for n,_ in assign_slicer.slice("*", assigned_to)]
if crippledfordemo:
m.add_constraint(m.sum(_assign_vars) <=
len(_assign_vars) * open_vars[assigned_to],
ctname="weak_force_open%s"%assigned_to)
else:
for var in _assign_vars :
m.add_constraint(var <= open_vars[assigned_to],
ctname = "strong_force_open_%s"%assigned_to)
number_of_centroids = dat.parameters["Number of Centroids"]["value"] \
if "Number of Centroids" in dat.parameters else 1
if number_of_centroids <= 0:
err.write("Need to specify a positive number of centroids\n") # Infeasibility detected.
return
m.add_constraint(m.sum(v for v in open_vars.values()) == number_of_centroids,
ctname= "numCentroids")
if "mipGap" in dat.parameters:
m.parameters.mip.tolerances.mipgap = dat.parameters["mipGap"]["value"]
progress.numerical_progress("Core Model Creation", 100)
m.minimize(m.sum(var * get_distance(n,assigned_to) * dat.sites[n]["demand"]
for (n, assigned_to),var in assign_vars.items()))
progress.add_cplex_listener("COG Optimization", m)
if m.solve():
progress.numerical_progress("Core Optimization", 100)
cplex_soln = m.solution
sln = solutionFactory.TicDat()
# see code trick http://ibm.co/2aQwKYG
if m.solve_details.status == 'optimal':
sln.parameters["Lower Bound"] = cplex_soln.get_objective_value()
else:
sln.parameters["Lower Bound"] = m.solve_details.get_best_bound()
sln.parameters["Upper Bound"] = cplex_soln.get_objective_value()
out.write('Upper Bound: %g\n' % sln.parameters["Upper Bound"]["value"])
out.write('Lower Bound: %g\n' % sln.parameters["Lower Bound"]["value"])
def almostone(x) :
return abs(x-1) < 0.0001
for (n, assigned_to), var in assign_vars.items() :
if almostone(cplex_soln.get_value(var)) :
sln.assignments[n,assigned_to] = {}
for n,var in open_vars.items() :
if almostone(cplex_soln.get_value(var)) :
sln.openings[n]={}
out.write('Number Centroids: %s\n' % len(sln.openings))
progress.numerical_progress("Full Cog Solve", 100)
return sln
# ---------------------------------------------------------------------------------
# ------------------------ provide stand-alone functionality ----------------------
def percent_error(lb, ub):
assert lb<=ub
return "%.2f"%(100.0 * (ub-lb) / ub) + "%"
# when run from the command line, will read/write json/xls/csv/db/mdb files
if __name__ == "__main__":
if os.path.exists("cog.stop"):
print "Removing the cog.stop file so that solve can proceed."
print "Add cog.stop whenever you want to stop the optimization"
os.remove("cog.stop")
class CogStopProgress(Progress):
def mip_progress(self, theme, lower_bound, upper_bound):
super(CogStopProgress, self).mip_progress(theme, lower_bound, upper_bound)
print "%s:%s:%s"%(theme.ljust(30), "Percent Error".ljust(20),
percent_error(lower_bound, upper_bound))
# return False (to stop optimization) if the cog.stop file exists
return not os.path.exists("cog.stop")
# creating a single argument version of solve to pass to standard_main
def _solve(dat):
# create local text files for logging
with LogFile("output.txt") as out :
with LogFile("error.txt") as err :
solution = solve(dat, out, err, CogStopProgress())
if solution :
print('\n\nUpper Bound : %g' % solution.parameters["Upper Bound"]["value"])
print('Lower Bound : %g' % solution.parameters["Lower Bound"]["value"])
print('Percent Error : %s' % percent_error(solution.parameters["Lower Bound"]["value"],
solution.parameters["Upper Bound"]["value"]))
return solution
else :
print('\nNo solution')
standard_main(dataFactory, solutionFactory, _solve)
# ---------------------------------------------------------------------------------
| {
"content_hash": "6ae2dee12ceba16f5d186725c54f725b",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 108,
"avg_line_length": 44.35096153846154,
"alnum_prop": 0.5482926829268293,
"repo_name": "opalytics/opalytics-ticdat",
"id": "9d44d4a1d50f614855a0b2c39ec3233bf232c01b",
"size": "10824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/expert_section/cplex/cog/cogmodel.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "AMPL",
"bytes": "2877"
},
{
"name": "Python",
"bytes": "660699"
}
],
"symlink_target": ""
} |
from qiita_client import QiitaCommand
from .trim import trim
__all__ = ['trim']
# Define the trim command
req_params = {'input': ('artifact', ['per_sample_FASTQ'])}
opt_params = {
# 3' adapter
'Fwd read adapter': ['string', 'GATCGGAAGAGCACACGTCTGAACTCCAGTCAC'],
# 3' adapter for rev
'Rev read adapter': ['string', 'GATCGGAAGAGCGTCGTGTAGGGAAAGGAGTGT'],
# 3' quality cutoff
'Trim low-quality bases': ['integer', '15'],
# min length after trimming
'Minimum trimmed read length': ['integer', '80'],
# drop pairs whose mates are filtered out
'Pair-end read required to match': ['choice:["any", "both"]', 'any'],
# maximum Ns to drop sequence
'Maximum number of N bases in a read to keep it': ['integer', '80'],
# trim Ns on end of read
'Trim Ns on ends of reads': ['boolean', True],
# Threads used
'Number of threads used': ['integer', '15'],
# NextSeq-specific quality trimming
'NextSeq-specific quality trimming': ['boolean', False],
}
outputs = {'Adapter trimmed files': 'per_sample_FASTQ'}
dflt_param_set = {
'KAPA HyperPlus with iTru': {
'Fwd read adapter': 'GATCGGAAGAGCACACGTCTGAACTCCAGTCAC',
'Rev read adapter': 'GATCGGAAGAGCGTCGTGTAGGGAAAGGAGTGT',
'Trim low-quality bases': 15,
'Minimum trimmed read length': 80,
'Pair-end read required to match': 'any',
'Maximum number of N bases in a read to keep it': 80,
'Trim Ns on ends of reads': True,
'NextSeq-specific quality trimming': False,
'Number of threads used': 15
}
}
trim_cmd = QiitaCommand(
'Atropos v1.1.24', "Sequence QC - adapter trimming", trim,
req_params, opt_params, outputs, dflt_param_set)
| {
"content_hash": "bdb0e2e29921b31d38fd27e13c44795a",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 73,
"avg_line_length": 37.58695652173913,
"alnum_prop": 0.6396761133603239,
"repo_name": "antgonza/qp-shotgun",
"id": "cf640d1a189028a4cc9052ed4294d2c967027426",
"size": "2080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qp_shogun/trim/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "91805"
},
{
"name": "Python",
"bytes": "95892"
}
],
"symlink_target": ""
} |
'''
ghmiles unit tests
:copyright: Copyright 2011 Barthelemy Dagenais
:license: BSD, see LICENSE for details
'''
import unittest
import ghmiles
class TestMilestonesModel(unittest.TestCase):
def test_key_label(self):
self.assertEqual(ghmiles.label_key('1.0'),'00001.00000')
self.assertEqual(ghmiles.label_key('12'),'00012')
self.assertEqual(ghmiles.label_key('v3.35.67e-234b'),
'v00003.00035.00067e-00234b')
def test_get_milestone_labels(self):
labels = list(
ghmiles.get_milestone_labels('bartdag/py4j',
ghmiles.MILESTONE_LABEL_V,
False))
self.assertTrue(len(labels) >= 7)
self.assertEqual('v0.1',labels[0])
self.assertEqual('v0.7',labels[6])
def test_get_intel_milestone_labels(self):
(project_labels, labels) = ghmiles.get_intel_milestone_labels('bartdag/py4j', False)
self.assertTrue(len(labels) > len(project_labels))
self.assertTrue(len(project_labels) >= 7)
self.assertEqual('v0.1',project_labels[0])
self.assertEqual('v0.7',project_labels[6])
def test_get_milestones(self):
milestones = ghmiles.get_milestones('bartdag/py4j',
ghmiles.MILESTONE_LABEL_V, False)
milestone1 = milestones.next()
self.assertEqual(milestone1.title, 'v0.1')
self.assertAlmostEqual(milestone1.progress, 100.0)
self.assertEqual(milestone1.total, 9)
self.assertEqual(milestone1.opened, 0)
issues_title = (issue.title for issue in milestone1.issues)
self.assertTrue(u'Write a getting started tutorial' in issues_title)
def test_get_milestones_from_labels(self):
milestones = list(ghmiles.get_milestones_from_labels('bartdag/py4j',
['v0.2','v0.4']))
self.assertEqual(milestones[0].total, 12)
self.assertEqual(milestones[1].total, 14)
def test_get_simple_html_page(self):
milestones = list(ghmiles.get_milestones_from_labels('bartdag/py4j',
['v0.2','v0.1']))
html = ghmiles.get_simple_html_page(milestones, 'Py4J')
self.assertTrue(html.startswith('<!DOCTYPE html PUBLIC'))
self.assertTrue(html.endswith('</html>'))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "beeec6f4e02958b0ecaefdb47e188b6a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 92,
"avg_line_length": 37.61904761904762,
"alnum_prop": 0.6206751054852321,
"repo_name": "bartdag/ghmiles",
"id": "5806b3fd86e33af53e945c30d1704f241647c470",
"size": "2370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20204"
}
],
"symlink_target": ""
} |
from oauth import OAuth
class Fitbit(OAuth):
def __init__(self, consumer_key, consumer_secret):
self.request_token_url = "http://api.fitbit.com/oauth/request_token"
self.authorize_url = "http://www.fitbit.com/oauth/authorize"
self.access_token_url = "http://api.fitbit.com/oauth/access_token"
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
| {
"content_hash": "9de84d28f79fdf5066071fb4d4a902da",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 70,
"avg_line_length": 34.45454545454545,
"alnum_prop": 0.7335092348284961,
"repo_name": "joushx/OAuth.py",
"id": "b9e4bbff867bebc652681e65d1486ef86e4426a0",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fitbit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6276"
}
],
"symlink_target": ""
} |
from decimal import Decimal
import logging
from django.core.management.base import LabelCommand
from corehq.apps.accounting.models import Currency
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.sms.test_backend import TestSMSBackend
from corehq.apps.smsbillables.models import SmsGatewayFee, SmsGatewayFeeCriteria
logger = logging.getLogger('accounting')
def bootstrap_test_gateway(orm):
default_currency = (orm['accounting.Currency'] if orm else Currency).get_default()
sms_gateway_fee_class = orm['smsbillables.SmsGatewayFee'] if orm else SmsGatewayFee
sms_gateway_fee_criteria_class = orm['smsbillables.SmsGatewayFeeCriteria'] if orm else SmsGatewayFeeCriteria
SmsGatewayFee.create_new(
TestSMSBackend.get_api_id(),
INCOMING,
Decimal('0.0'),
currency=default_currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
SmsGatewayFee.create_new(
TestSMSBackend.get_api_id(),
OUTGOING,
Decimal('0.0'),
currency=default_currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
logger.info("Updated Test gateway fees.")
class Command(LabelCommand):
help = "bootstrap Test SMS backend gateway fees"
args = ""
label = ""
def handle(self, *args, **options):
bootstrap_test_gateway(None)
| {
"content_hash": "5de8ad7af2fe71cfc44ac0912d50a91c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 112,
"avg_line_length": 30.595744680851062,
"alnum_prop": 0.7107093184979137,
"repo_name": "puttarajubr/commcare-hq",
"id": "6186968becdfbeba283324f2497bc3a6dc524ebc",
"size": "1438",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "corehq/apps/smsbillables/management/commands/bootstrap_test_gateway.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib.contenttypes.management import update_all_contenttypes
from django.contrib.contenttypes.models import ContentType
from django.db import models, migrations
def migrate_therapy(apps, schema_editor):
update_all_contenttypes(interactive=False)
ct = ContentType.objects.get_by_natural_key("services", "Therapy")
ct_wt = ContentType.objects.get_by_natural_key("services", "WorkTherapy")
ct_wtm = ContentType.objects.get_by_natural_key("services", "WorkTherapyMeeting")
ct_cw = ContentType.objects.get_by_natural_key("services", "CommunityWork")
Therapy = apps.get_model("services", "Therapy")
WorkTherapy = apps.get_model("services", "WorkTherapy")
WorkTherapyMeeting = apps.get_model("services", "WorkTherapyMeeting")
CommunityWork = apps.get_model("services", "CommunityWork")
count = 0
for service in WorkTherapy.objects.all():
if service.content_type_id == ct_wt.id:
s = Therapy()
s.work_therapy = True
s.encounter = service.encounter
s.title = service.title
s.created = service.created
s.modified = service.modified
s.content_type_id = ct.id
s.save()
service.delete()
count += 1
for service in WorkTherapyMeeting.objects.all():
if service.content_type_id == ct_wtm.id:
s = Therapy()
s.therapy_meeting = True
s.encounter = service.encounter
s.title = service.title
s.created = service.created
s.modified = service.modified
s.content_type_id = ct.id
s.save()
service.delete()
count += 1
for service in CommunityWork.objects.all():
if service.content_type_id == ct_cw.id:
s = Therapy()
s.community_work = True
s.encounter = service.encounter
s.title = service.title
s.created = service.created
s.modified = service.modified
s.content_type_id = ct.id
s.save()
service.delete()
count += 1
print 'Successfully migrated %d services of type Therapy' % count
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('services', '0028_auto_20180530_1159'),
]
operations = [
migrations.CreateModel(
name='Therapy',
fields=[
('service_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='services.Service')),
('work_therapy', models.BooleanField(default=False, verbose_name='a) Pracovn\xed terapie')),
('therapy_meeting', models.BooleanField(default=False, verbose_name='b) Sch\u016fzka pracovn\xed terapie')),
('community_work', models.BooleanField(default=False, verbose_name='c) Obecn\u011b prosp\u011b\u0161n\xe9 pr\xe1ce')),
],
options={
'verbose_name': 'Pracovn\xed terapie (samospr\xe1va)',
'verbose_name_plural': 'Pracovn\xed terapie (samospr\xe1va)',
},
bases=('services.service',),
),
migrations.RunPython(code=migrate_therapy, reverse_code=reverse),
]
| {
"content_hash": "267e63bd12b0c3697723c0426a0f1f28",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 149,
"avg_line_length": 38.31818181818182,
"alnum_prop": 0.6079478054567022,
"repo_name": "fragaria/BorIS",
"id": "566209441a573594893d7d9e2e02a2323db74f93",
"size": "3396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boris/services/migrations/0029_therapy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "302491"
},
{
"name": "HTML",
"bytes": "148721"
},
{
"name": "JavaScript",
"bytes": "208867"
},
{
"name": "Python",
"bytes": "396225"
}
],
"symlink_target": ""
} |
"""Interface to GNU Privacy Guard (GnuPG)
!!! This was renamed to gpginterface.py.
Please refer to duplicity's README for the reason. !!!
gpginterface is a Python module to interface with GnuPG which based on
GnuPGInterface by Frank J. Tobin.
It concentrates on interacting with GnuPG via filehandles,
providing access to control GnuPG via versatile and extensible means.
This module is based on GnuPG::Interface, a Perl module by the same author.
Normally, using this module will involve creating a
GnuPG object, setting some options in it's 'options' data member
(which is of type Options), creating some pipes
to talk with GnuPG, and then calling the run() method, which will
connect those pipes to the GnuPG process. run() returns a
Process object, which contains the filehandles to talk to GnuPG with.
Example code:
>>> import gpginterface
>>>
>>> plaintext = "Three blind mice"
>>> passphrase = "This is the passphrase"
>>>
>>> gnupg = gpginterface.GnuPG()
>>> gnupg.options.armor = 1
>>> gnupg.options.meta_interactive = 0
>>> gnupg.options.extra_args.append('--no-secmem-warning')
>>>
>>> # Normally we might specify something in
>>> # gnupg.options.recipients, like
>>> # gnupg.options.recipients = [ '0xABCD1234', 'bob@foo.bar' ]
>>> # but since we're doing symmetric-only encryption, it's not needed.
>>> # If you are doing standard, public-key encryption, using
>>> # --encrypt, you will need to specify recipients before
>>> # calling gnupg.run()
>>>
>>> # First we'll encrypt the test_text input symmetrically
>>> p1 = gnupg.run(['--symmetric'],
... create_fhs=['stdin', 'stdout', 'passphrase'])
>>>
>>> p1.handles['passphrase'].write(passphrase)
>>> p1.handles['passphrase'].close()
>>>
>>> p1.handles['stdin'].write(plaintext)
>>> p1.handles['stdin'].close()
>>>
>>> ciphertext = p1.handles['stdout'].read()
>>> p1.handles['stdout'].close()
>>>
>>> # process cleanup
>>> p1.wait()
>>>
>>> # Now we'll decrypt what we just encrypted it,
>>> # using the convience method to get the
>>> # passphrase to GnuPG
>>> gnupg.passphrase = passphrase
>>>
>>> p2 = gnupg.run(['--decrypt'], create_fhs=['stdin', 'stdout'])
>>>
>>> p2.handles['stdin'].write(ciphertext)
>>> p2.handles['stdin'].close()
>>>
>>> decrypted_plaintext = p2.handles['stdout'].read()
>>> p2.handles['stdout'].close()
>>>
>>> # process cleanup
>>> p2.wait()
>>>
>>> # Our decrypted plaintext:
>>> decrypted_plaintext
'Three blind mice'
>>>
>>> # ...and see it's the same as what we orignally encrypted
>>> assert decrypted_plaintext == plaintext, \
"GnuPG decrypted output does not match original input"
>>>
>>>
>>> ##################################################
>>> # Now let's trying using run()'s attach_fhs paramter
>>>
>>> # we're assuming we're running on a unix...
>>> input = open('/etc/motd')
>>>
>>> p1 = gnupg.run(['--symmetric'], create_fhs=['stdout'],
... attach_fhs={'stdin': input})
>>>
>>> # GnuPG will read the stdin from /etc/motd
>>> ciphertext = p1.handles['stdout'].read()
>>>
>>> # process cleanup
>>> p1.wait()
>>>
>>> # Now let's run the output through GnuPG
>>> # We'll write the output to a temporary file,
>>> import tempfile
>>> temp = tempfile.TemporaryFile()
>>>
>>> p2 = gnupg.run(['--decrypt'], create_fhs=['stdin'],
... attach_fhs={'stdout': temp})
>>>
>>> # give GnuPG our encrypted stuff from the first run
>>> p2.handles['stdin'].write(ciphertext)
>>> p2.handles['stdin'].close()
>>>
>>> # process cleanup
>>> p2.wait()
>>>
>>> # rewind the tempfile and see what GnuPG gave us
>>> temp.seek(0)
>>> decrypted_plaintext = temp.read()
>>>
>>> # compare what GnuPG decrypted with our original input
>>> input.seek(0)
>>> input_data = input.read()
>>>
>>> assert decrypted_plaintext == input_data, \
"GnuPG decrypted output does not match original input"
To do things like public-key encryption, simply pass do something
like:
gnupg.passphrase = 'My passphrase'
gnupg.options.recipients = [ 'bob@foobar.com' ]
gnupg.run( ['--sign', '--encrypt'], create_fhs=..., attach_fhs=...)
Here is an example of subclassing gpginterface.GnuPG,
so that it has an encrypt_string() method that returns
ciphertext.
>>> import gpginterface
>>>
>>> class MyGnuPG(gpginterface.GnuPG):
...
... def __init__(self):
... gpginterface.GnuPG.__init__(self)
... self.setup_my_options()
...
... def setup_my_options(self):
... self.options.armor = 1
... self.options.meta_interactive = 0
... self.options.extra_args.append('--no-secmem-warning')
...
... def encrypt_string(self, string, recipients):
... gnupg.options.recipients = recipients # a list!
...
... proc = gnupg.run(['--encrypt'], create_fhs=['stdin', 'stdout'])
...
... proc.handles['stdin'].write(string)
... proc.handles['stdin'].close()
...
... output = proc.handles['stdout'].read()
... proc.handles['stdout'].close()
...
... proc.wait()
... return output
...
>>> gnupg = MyGnuPG()
>>> ciphertext = gnupg.encrypt_string("The secret", ['0x260C4FA3'])
>>>
>>> # just a small sanity test here for doctest
>>> import types
>>> assert isinstance(ciphertext, types.StringType), \
"What GnuPG gave back is not a string!"
Here is an example of generating a key:
>>> import gpginterface
>>> gnupg = gpginterface.GnuPG()
>>> gnupg.options.meta_interactive = 0
>>>
>>> # We will be creative and use the logger filehandle to capture
>>> # what GnuPG says this time, instead stderr; no stdout to listen to,
>>> # but we capture logger to surpress the dry-run command.
>>> # We also have to capture stdout since otherwise doctest complains;
>>> # Normally you can let stdout through when generating a key.
>>>
>>> proc = gnupg.run(['--gen-key'], create_fhs=['stdin', 'stdout',
... 'logger'])
>>>
>>> proc.handles['stdin'].write('''Key-Type: DSA
... Key-Length: 1024
... # We are only testing syntax this time, so dry-run
... %dry-run
... Subkey-Type: ELG-E
... Subkey-Length: 1024
... Name-Real: Joe Tester
... Name-Comment: with stupid passphrase
... Name-Email: joe@foo.bar
... Expire-Date: 2y
... Passphrase: abc
... %pubring foo.pub
... %secring foo.sec
... ''')
>>>
>>> proc.handles['stdin'].close()
>>>
>>> report = proc.handles['logger'].read()
>>> proc.handles['logger'].close()
>>>
>>> proc.wait()
COPYRIGHT:
Copyright (C) 2001 Frank J. Tobin, ftobin@neverending.org
LICENSE:
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see http://www.gnu.org/copyleft/lesser.html
"""
import os
import sys
import fcntl
from duplicity import log
try:
import threading
except ImportError:
import dummy_threading #@UnusedImport
log.Warn(_("Threading not available -- zombie processes may appear"))
__author__ = "Frank J. Tobin, ftobin@neverending.org"
__version__ = "0.3.2"
__revision__ = "$Id: GnuPGInterface.py,v 1.6 2009/06/06 17:35:19 loafman Exp $"
# "standard" filehandles attached to processes
_stds = [ 'stdin', 'stdout', 'stderr' ]
# the permissions each type of fh needs to be opened with
_fd_modes = { 'stdin': 'w',
'stdout': 'r',
'stderr': 'r',
'passphrase': 'w',
'command': 'w',
'logger': 'r',
'status': 'r'
}
# correlation between handle names and the arguments we'll pass
_fd_options = { 'passphrase': '--passphrase-fd',
'logger': '--logger-fd',
'status': '--status-fd',
'command': '--command-fd' }
class GnuPG:
"""Class instances represent GnuPG.
Instance attributes of a GnuPG object are:
* call -- string to call GnuPG with. Defaults to "gpg"
* passphrase -- Since it is a common operation
to pass in a passphrase to GnuPG,
and working with the passphrase filehandle mechanism directly
can be mundane, if set, the passphrase attribute
works in a special manner. If the passphrase attribute is set,
and no passphrase file object is sent in to run(),
then GnuPG instnace will take care of sending the passphrase to
GnuPG, the executable instead of having the user sent it in manually.
* options -- Object of type gpginterface.Options.
Attribute-setting in options determines
the command-line options used when calling GnuPG.
"""
def __init__(self):
self.call = 'gpg'
self.passphrase = None
self.options = Options()
def run(self, gnupg_commands, args=None, create_fhs=None, attach_fhs=None):
"""Calls GnuPG with the list of string commands gnupg_commands,
complete with prefixing dashes.
For example, gnupg_commands could be
'["--sign", "--encrypt"]'
Returns a gpginterface.Process object.
args is an optional list of GnuPG command arguments (not options),
such as keyID's to export, filenames to process, etc.
create_fhs is an optional list of GnuPG filehandle
names that will be set as keys of the returned Process object's
'handles' attribute. The generated filehandles can be used
to communicate with GnuPG via standard input, standard output,
the status-fd, passphrase-fd, etc.
Valid GnuPG filehandle names are:
* stdin
* stdout
* stderr
* status
* passphase
* command
* logger
The purpose of each filehandle is described in the GnuPG
documentation.
attach_fhs is an optional dictionary with GnuPG filehandle
names mapping to opened files. GnuPG will read or write
to the file accordingly. For example, if 'my_file' is an
opened file and 'attach_fhs[stdin] is my_file', then GnuPG
will read its standard input from my_file. This is useful
if you want GnuPG to read/write to/from an existing file.
For instance:
f = open("encrypted.gpg")
gnupg.run(["--decrypt"], attach_fhs={'stdin': f})
Using attach_fhs also helps avoid system buffering
issues that can arise when using create_fhs, which
can cause the process to deadlock.
If not mentioned in create_fhs or attach_fhs,
GnuPG filehandles which are a std* (stdin, stdout, stderr)
are defaulted to the running process' version of handle.
Otherwise, that type of handle is simply not used when calling GnuPG.
For example, if you do not care about getting data from GnuPG's
status filehandle, simply do not specify it.
run() returns a Process() object which has a 'handles'
which is a dictionary mapping from the handle name
(such as 'stdin' or 'stdout') to the respective
newly-created FileObject connected to the running GnuPG process.
For instance, if the call was
process = gnupg.run(["--decrypt"], stdin=1)
after run returns 'process.handles["stdin"]'
is a FileObject connected to GnuPG's standard input,
and can be written to.
"""
if args == None: args = []
if create_fhs == None: create_fhs = []
if attach_fhs == None: attach_fhs = {}
for std in _stds:
if not attach_fhs.has_key(std) \
and std not in create_fhs:
attach_fhs.setdefault(std, getattr(sys, std))
handle_passphrase = 0
if self.passphrase != None \
and not attach_fhs.has_key('passphrase') \
and 'passphrase' not in create_fhs:
handle_passphrase = 1
create_fhs.append('passphrase')
process = self._attach_fork_exec(gnupg_commands, args,
create_fhs, attach_fhs)
if handle_passphrase:
passphrase_fh = process.handles['passphrase']
passphrase_fh.write( self.passphrase )
passphrase_fh.close()
del process.handles['passphrase']
return process
def _attach_fork_exec(self, gnupg_commands, args, create_fhs, attach_fhs):
"""This is like run(), but without the passphrase-helping
(note that run() calls this)."""
process = Process()
for fh_name in create_fhs + attach_fhs.keys():
if not _fd_modes.has_key(fh_name):
raise KeyError, \
"unrecognized filehandle name '%s'; must be one of %s" \
% (fh_name, _fd_modes.keys())
for fh_name in create_fhs:
# make sure the user doesn't specify a filehandle
# to be created *and* attached
if attach_fhs.has_key(fh_name):
raise ValueError, \
"cannot have filehandle '%s' in both create_fhs and attach_fhs" \
% fh_name
pipe = os.pipe()
# fix by drt@un.bewaff.net noting
# that since pipes are unidirectional on some systems,
# so we have to 'turn the pipe around'
# if we are writing
if _fd_modes[fh_name] == 'w': pipe = (pipe[1], pipe[0])
process._pipes[fh_name] = Pipe(pipe[0], pipe[1], 0)
for fh_name, fh in attach_fhs.items():
process._pipes[fh_name] = Pipe(fh.fileno(), fh.fileno(), 1)
process.pid = os.fork()
if process.pid != 0:
# start a threaded_waitpid on the child
process.thread = threading.Thread(target=threaded_waitpid,
name="wait%d" % process.pid,
args=(process,))
process.thread.start()
if process.pid == 0: self._as_child(process, gnupg_commands, args)
return self._as_parent(process)
def _as_parent(self, process):
"""Stuff run after forking in parent"""
for k, p in process._pipes.items():
if not p.direct:
os.close(p.child)
process.handles[k] = os.fdopen(p.parent, _fd_modes[k])
# user doesn't need these
del process._pipes
return process
def _as_child(self, process, gnupg_commands, args):
"""Stuff run after forking in child"""
# child
for std in _stds:
p = process._pipes[std]
os.dup2( p.child, getattr(sys, "__%s__" % std).fileno() )
for k, p in process._pipes.items():
if p.direct and k not in _stds:
# we want the fh to stay open after execing
fcntl.fcntl( p.child, fcntl.F_SETFD, 0 )
fd_args = []
for k, p in process._pipes.items():
# set command-line options for non-standard fds
if k not in _stds:
fd_args.extend([ _fd_options[k], "%d" % p.child ])
if not p.direct: os.close(p.parent)
command = [ self.call ] + fd_args + self.options.get_args() \
+ gnupg_commands + args
os.execvp( command[0], command )
class Pipe:
"""simple struct holding stuff about pipes we use"""
def __init__(self, parent, child, direct):
self.parent = parent
self.child = child
self.direct = direct
class Options:
"""Objects of this class encompass options passed to GnuPG.
This class is responsible for determining command-line arguments
which are based on options. It can be said that a GnuPG
object has-a Options object in its options attribute.
Attributes which correlate directly to GnuPG options:
Each option here defaults to false or None, and is described in
GnuPG documentation.
Booleans (set these attributes to booleans)
* armor
* no_greeting
* no_verbose
* quiet
* batch
* always_trust
* rfc1991
* openpgp
* force_v3_sigs
* no_options
* textmode
Strings (set these attributes to strings)
* homedir
* default_key
* comment
* compress_algo
* options
Lists (set these attributes to lists)
* recipients (***NOTE*** plural of 'recipient')
* encrypt_to
Meta options
Meta options are options provided by this module that do
not correlate directly to any GnuPG option by name,
but are rather bundle of options used to accomplish
a specific goal, such as obtaining compatibility with PGP 5.
The actual arguments each of these reflects may change with time. Each
defaults to false unless otherwise specified.
meta_pgp_5_compatible -- If true, arguments are generated to try
to be compatible with PGP 5.x.
meta_pgp_2_compatible -- If true, arguments are generated to try
to be compatible with PGP 2.x.
meta_interactive -- If false, arguments are generated to try to
help the using program use GnuPG in a non-interactive
environment, such as CGI scripts. Default is true.
extra_args -- Extra option arguments may be passed in
via the attribute extra_args, a list.
>>> import gpginterface
>>>
>>> gnupg = gpginterface.GnuPG()
>>> gnupg.options.armor = 1
>>> gnupg.options.recipients = ['Alice', 'Bob']
>>> gnupg.options.extra_args = ['--no-secmem-warning']
>>>
>>> # no need for users to call this normally; just for show here
>>> gnupg.options.get_args()
['--armor', '--recipient', 'Alice', '--recipient', 'Bob', '--no-secmem-warning']
"""
def __init__(self):
# booleans
self.armor = 0
self.no_greeting = 0
self.verbose = 0
self.no_verbose = 0
self.quiet = 0
self.batch = 0
self.always_trust = 0
self.rfc1991 = 0
self.openpgp = 0
self.force_v3_sigs = 0
self.no_options = 0
self.textmode = 0
# meta-option booleans
self.meta_pgp_5_compatible = 0
self.meta_pgp_2_compatible = 0
self.meta_interactive = 1
# strings
self.homedir = None
self.default_key = None
self.comment = None
self.compress_algo = None
self.options = None
# lists
self.encrypt_to = []
self.recipients = []
self.hidden_recipients = []
# miscellaneous arguments
self.extra_args = []
def get_args( self ):
"""Generate a list of GnuPG arguments based upon attributes."""
return self.get_meta_args() + self.get_standard_args() + self.extra_args
def get_standard_args( self ):
"""Generate a list of standard, non-meta or extra arguments"""
args = []
if self.homedir != None: args.extend( [ '--homedir', self.homedir ] )
if self.options != None: args.extend( [ '--options', self.options ] )
if self.comment != None: args.extend( [ '--comment', self.comment ] )
if self.compress_algo != None: args.extend( [ '--compress-algo', self.compress_algo ] )
if self.default_key != None: args.extend( [ '--default-key', self.default_key ] )
if self.no_options: args.append( '--no-options' )
if self.armor: args.append( '--armor' )
if self.textmode: args.append( '--textmode' )
if self.no_greeting: args.append( '--no-greeting' )
if self.verbose: args.append( '--verbose' )
if self.no_verbose: args.append( '--no-verbose' )
if self.quiet: args.append( '--quiet' )
if self.batch: args.append( '--batch' )
if self.always_trust: args.append( '--always-trust' )
if self.force_v3_sigs: args.append( '--force-v3-sigs' )
if self.rfc1991: args.append( '--rfc1991' )
if self.openpgp: args.append( '--openpgp' )
for r in self.recipients: args.extend( [ '--recipient', r ] )
for r in self.hidden_recipients: args.extend( [ '--hidden-recipient', r ] )
for r in self.encrypt_to: args.extend( [ '--encrypt-to', r ] )
return args
def get_meta_args( self ):
"""Get a list of generated meta-arguments"""
args = []
if self.meta_pgp_5_compatible: args.extend( [ '--compress-algo', '1',
'--force-v3-sigs'
] )
if self.meta_pgp_2_compatible: args.append( '--rfc1991' )
if not self.meta_interactive: args.extend( [ '--batch', '--no-tty' ] )
return args
class Process:
"""Objects of this class encompass properties of a GnuPG
process spawned by GnuPG.run().
# gnupg is a GnuPG object
process = gnupg.run( [ '--decrypt' ], stdout = 1 )
out = process.handles['stdout'].read()
...
os.waitpid( process.pid, 0 )
Data Attributes
handles -- This is a map of filehandle-names to
the file handles, if any, that were requested via run() and hence
are connected to the running GnuPG process. Valid names
of this map are only those handles that were requested.
pid -- The PID of the spawned GnuPG process.
Useful to know, since once should call
os.waitpid() to clean up the process, especially
if multiple calls are made to run().
"""
def __init__(self):
self._pipes = {}
self.handles = {}
self.pid = None
self._waited = None
self.thread = None
self.returned = None
def wait(self):
"""
Wait on threaded_waitpid to exit and examine results.
Will raise an IOError if the process exits non-zero.
"""
if self.returned == None:
self.thread.join()
if self.returned != 0:
raise IOError, "GnuPG exited non-zero, with code %d" % (self.returned >> 8)
def threaded_waitpid(process):
"""
When started as a thread with the Process object, thread
will execute an immediate waitpid() against the process
pid and will collect the process termination info. This
will allow us to reap child processes as soon as possible,
thus freeing resources quickly.
"""
try:
process.returned = os.waitpid(process.pid, 0)[1]
except:
log.Debug(_("GPG process %d terminated before wait()") % process.pid)
process.returned = 0
def _run_doctests():
import doctest, gpginterface #@UnresolvedImport
return doctest.testmod(GnuPGInterface)
# deprecated
GnuPGInterface = GnuPG
if __name__ == '__main__':
_run_doctests()
| {
"content_hash": "3b4d9c6df624b22eec0c682dab4256c8",
"timestamp": "",
"source": "github",
"line_count": 689,
"max_line_length": 95,
"avg_line_length": 33.61828737300436,
"alnum_prop": 0.6080386823813841,
"repo_name": "yasoob/PythonRSSReader",
"id": "351ccd5115aa8e306ee6fd4d38bac1f3308c36ee",
"size": "23163",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/dist-packages/duplicity/gpginterface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "58615"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "HTML",
"bytes": "1638"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "22979347"
},
{
"name": "Shell",
"bytes": "5224"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe, unittest
from frappe.model.db_query import DatabaseQuery
from frappe.desk.reportview import get_filters_cond
class TestReportview(unittest.TestCase):
def test_basic(self):
self.assertTrue({"name":"DocType"} in DatabaseQuery("DocType").execute(limit_page_length=None))
def test_fields(self):
self.assertTrue({"name":"DocType", "issingle":0} \
in DatabaseQuery("DocType").execute(fields=["name", "issingle"], limit_page_length=None))
def test_filters_1(self):
self.assertFalse({"name":"DocType"} \
in DatabaseQuery("DocType").execute(filters=[["DocType", "name", "like", "J%"]]))
def test_filters_2(self):
self.assertFalse({"name":"DocType"} \
in DatabaseQuery("DocType").execute(filters=[{"name": ["like", "J%"]}]))
def test_filters_3(self):
self.assertFalse({"name":"DocType"} \
in DatabaseQuery("DocType").execute(filters={"name": ["like", "J%"]}))
def test_filters_4(self):
self.assertTrue({"name":"DocField"} \
in DatabaseQuery("DocType").execute(filters={"name": "DocField"}))
def test_or_filters(self):
data = DatabaseQuery("DocField").execute(
filters={"parent": "DocType"}, fields=["fieldname", "fieldtype"],
or_filters=[{"fieldtype":"Table"}, {"fieldtype":"Select"}])
self.assertTrue({"fieldtype":"Table", "fieldname":"fields"} in data)
self.assertTrue({"fieldtype":"Select", "fieldname":"document_type"} in data)
self.assertFalse({"fieldtype":"Check", "fieldname":"issingle"} in data)
def test_between_filters(self):
""" test case to check between filter for date fields """
frappe.db.sql("delete from tabEvent")
# create events to test the between operator filter
todays_event = create_event()
event = create_event(starts_on="2016-07-06 12:00:00")
# if the values are not passed in filters then todays event should be return
data = DatabaseQuery("Event").execute(
filters={"starts_on": ["between", None]}, fields=["name"])
self.assertTrue({ "name": todays_event.name } in data)
self.assertTrue({ "name": event.name } not in data)
# if both from and to_date values are passed
data = DatabaseQuery("Event").execute(
filters={"starts_on": ["between", ["2016-07-05 12:00:00", "2016-07-07 12:00:00"]]},
fields=["name"])
self.assertTrue({ "name": event.name } in data)
self.assertTrue({ "name": todays_event.name } not in data)
# if only one value is passed in the filter
data = DatabaseQuery("Event").execute(
filters={"starts_on": ["between", ["2016-07-05 12:00:00"]]},
fields=["name"])
self.assertTrue({ "name": todays_event.name } in data)
self.assertTrue({ "name": event.name } in data)
def test_ignore_permissions_for_get_filters_cond(self):
frappe.set_user('test1@example.com')
self.assertRaises(frappe.PermissionError, get_filters_cond, 'DocType', dict(istable=1), [])
self.assertTrue(get_filters_cond('DocType', dict(istable=1), [], ignore_permissions=True))
frappe.set_user('Administrator')
def create_event(subject="_Test Event", starts_on=None):
""" create a test event """
from frappe.utils import get_datetime
event = frappe.get_doc({
"doctype": "Event",
"subject": subject,
"event_type": "Public",
"starts_on": get_datetime(starts_on),
}).insert(ignore_permissions=True)
return event | {
"content_hash": "a58299d0d2fc492a21548d90cbf3f63f",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 97,
"avg_line_length": 36.68888888888889,
"alnum_prop": 0.6856450635978195,
"repo_name": "rmehta/frappe",
"id": "5451ce263b130b848ffe2d09e5e5d9ee81e78784",
"size": "3402",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/tests/test_db_query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "385819"
},
{
"name": "HTML",
"bytes": "207086"
},
{
"name": "JavaScript",
"bytes": "1664500"
},
{
"name": "Python",
"bytes": "1861322"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
"""A server that runs FFmpeg on files in Google Cloud Storage.
"""
from concurrent import futures
import logging
import os
import signal
import subprocess
import sys
import tempfile
import threading
import time
from typing import Iterator
from typing import List
from google.protobuf.duration_pb2 import Duration
import grpc
from worker.ffmpeg_worker_pb2 import ExitStatus
from worker.ffmpeg_worker_pb2 import FFmpegRequest
from worker.ffmpeg_worker_pb2 import FFmpegResponse
from worker.ffmpeg_worker_pb2 import ResourceUsage
from worker import ffmpeg_worker_pb2_grpc
MOUNT_POINT = '/buckets/'
_LOGGER = logging.getLogger(__name__)
_ABORT_EVENT = threading.Event()
_GRACE_PERIOD = 20
class FFmpegServicer(ffmpeg_worker_pb2_grpc.FFmpegServicer): # pylint: disable=too-few-public-methods
"""Implements FFmpeg service"""
def transcode(self, request: FFmpegRequest, context) -> FFmpegResponse:
"""Runs ffmpeg according to the request's specification.
Args:
request: The FFmpeg request.
context: The gRPC context.
Yields:
A Log object with a line of ffmpeg's output.
"""
_LOGGER.info('Starting transcode.')
cancel_event = threading.Event()
def handle_cancel():
_LOGGER.debug('Termination callback called.')
cancel_event.set()
context.add_callback(handle_cancel)
process = Process(['ffmpeg', *request.ffmpeg_arguments])
if cancel_event.is_set():
_LOGGER.info('Stopping transcode due to cancellation.')
return
if _ABORT_EVENT.is_set():
_LOGGER.info('Stopping transcode due to SIGTERM.')
context.abort(grpc.StatusCode.UNAVAILABLE, 'Request was killed with SIGTERM.')
return
for stdout_data in process:
if cancel_event.is_set():
_LOGGER.info('Killing ffmpeg process due to cancellation.')
process.terminate()
return
if _ABORT_EVENT.is_set():
_LOGGER.info('Killing ffmpeg process due to SIGTERM.')
process.terminate()
break
yield FFmpegResponse(log_line=stdout_data)
yield FFmpegResponse(exit_status=ExitStatus(
exit_code=process.returncode,
real_time=_time_to_duration(process.real_time),
resource_usage=ResourceUsage(ru_utime=process.rusage.ru_utime,
ru_stime=process.rusage.ru_stime,
ru_maxrss=process.rusage.ru_maxrss,
ru_ixrss=process.rusage.ru_ixrss,
ru_idrss=process.rusage.ru_idrss,
ru_isrss=process.rusage.ru_isrss,
ru_minflt=process.rusage.ru_minflt,
ru_majflt=process.rusage.ru_majflt,
ru_nswap=process.rusage.ru_nswap,
ru_inblock=process.rusage.ru_inblock,
ru_oublock=process.rusage.ru_oublock,
ru_msgsnd=process.rusage.ru_msgsnd,
ru_msgrcv=process.rusage.ru_msgrcv,
ru_nsignals=process.rusage.ru_nsignals,
ru_nvcsw=process.rusage.ru_nvcsw,
ru_nivcsw=process.rusage.ru_nivcsw)))
_LOGGER.info('Finished transcode.')
class Process:
"""
Wrapper class around subprocess.Popen class.
This class records the resource usage of the terminated process.
"""
def __init__(self, args):
self._start_time = None
self._args = args
self._subprocess = None
self.returncode = None
self.rusage = None
self.real_time = None
def __iter__(self):
self._start_time = time.time()
self._subprocess = subprocess.Popen(self._args,
env={'LD_LIBRARY_PATH': '/usr/grte/v4/lib64/'},
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
bufsize=1)
yield from self._subprocess.stdout
self.wait()
def terminate(self):
"""Terminates the process with a SIGTERM signal."""
if self._subprocess is None: # process has not been created yet
return
self._subprocess.terminate()
self.wait()
def wait(self):
"""Waits for the process to finish and collects exit status information."""
_, self.returncode, self.rusage = os.wait4(self._subprocess.pid, 0)
self.real_time = time.time() - self._start_time
def serve():
"""Starts the gRPC server"""
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
ffmpeg_worker_pb2_grpc.add_FFmpegServicer_to_server(FFmpegServicer(),
server)
server.add_insecure_port('[::]:8080')
def _sigterm_handler(*_):
_LOGGER.warning('Recieved SIGTERM. Terminating...')
_ABORT_EVENT.set()
server.stop(_GRACE_PERIOD)
signal.signal(signal.SIGTERM, _sigterm_handler)
server.start()
server.wait_for_termination()
def _time_to_duration(seconds: float) -> Duration:
duration = Duration()
duration.FromNanoseconds(int(seconds * 10**9))
return duration
if __name__ == '__main__':
os.chdir(MOUNT_POINT)
logging.basicConfig(level=logging.INFO)
serve()
| {
"content_hash": "f6d26ca2951333ec56a70333d6c1d258",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 102,
"avg_line_length": 37.42948717948718,
"alnum_prop": 0.5632813837985956,
"repo_name": "googleinterns/ffmpeg-on-cloud",
"id": "9de73247b1d3ad9e90d3cd658259c5d4016db1f7",
"size": "6495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "worker/ffmpeg_worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1876"
},
{
"name": "Python",
"bytes": "22876"
},
{
"name": "Shell",
"bytes": "393"
}
],
"symlink_target": ""
} |
import unittest
from provider.base import BaseProvider
from nose.tools import eq_, ok_
class TestBaseProvider(unittest.TestCase):
def test_base_init(self):
base = BaseProvider('client_id', 'client_secret', 'authorize_url', 'access_token_url', 'base_url', 'name', 'redirect_uri', state='test')
eq_(base.settings['client_id'], 'client_id')
eq_(base.settings['client_secret'], 'client_secret')
eq_(base.settings['authorize_url'], 'authorize_url')
eq_(base.settings['access_token_url'], 'access_token_url')
eq_(base.settings['base_url'], 'base_url')
eq_(base.settings['name'], 'name')
eq_(base.settings['redirect_uri'], 'redirect_uri')
eq_(base.settings['state'], 'test')
def test_base_init_no_state(self):
base = BaseProvider('client_id', 'client_secret', 'authorize_url', 'access_token_url', 'base_url', 'name', 'redirect_uri')
ok_(base.settings['state'] is None)
def test_do_no_mutate_state(self):
base = BaseProvider('client_id', 'client_secret', 'authorize_url', 'access_token_url', 'base_url', 'name', 'redirect_uri', state='test')
eq_(base.get_state(), 'test')
def test_mutate_state(self):
base = BaseProvider('client_id', 'client_secret', 'authorize_url', 'access_token_url', 'base_url', 'name', 'redirect_uri')
ok_(base.get_state() is not None)
ok_(base.settings['state'] is not None)
def test_get_user_info(self):
base = BaseProvider('client_id', 'client_secret', 'authorize_url', 'access_token_url', 'base_url', 'name', 'redirect_uri')
self.assertRaises(NotImplementedError, base.get_user_info, 'test')
def test_get_user(self):
base = BaseProvider('client_id', 'client_secret', 'authorize_url', 'access_token_url', 'base_url', 'name', 'redirect_uri')
self.assertRaises(NotImplementedError, base.get_user)
def test_get_email(self):
base = BaseProvider('client_id', 'client_secret', 'authorize_url', 'access_token_url', 'base_url', 'name', 'redirect_uri')
self.assertRaises(NotImplementedError, base.get_email) | {
"content_hash": "9d165b5ad4d21f3a569c5691e11d0fda",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 144,
"avg_line_length": 46.95652173913044,
"alnum_prop": 0.6314814814814815,
"repo_name": "marinewater/pyramid-social-auth",
"id": "cd3cd7d2051662bf52fbf257db9f007596f1fe7d",
"size": "2160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "provider/tests/test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11701"
}
],
"symlink_target": ""
} |
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions as fxn
### data structures ###
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
fw = fxn.gp_fluweeks
wklab = fxn.gp_weeklabels
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/My_Work/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
### program ###
# import data
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
# plot values
retrozOR = [d_classifzOR[s][0] for s in ps]
earlyzOR = [d_classifzOR[s][1] for s in ps]
peakweek = [fxn.peak_flu_week_index(d_incid53ls[s]) for s in ps]
print peakweek
# mean retro zOR vs peak timing
plt.plot(peakweek, retrozOR, marker = 'o', color = 'black', linestyle = 'None')
for s, x, y in zip(sl, peakweek, retrozOR):
plt.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
plt.ylabel('Mean Retrospective zOR', fontsize=fs)
plt.xlabel('Peak Week', fontsize=fs)
plt.xticks(range(fw)[::5], wklab[:fw:5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.show()
# mean retro zOR vs peak timing
plt.plot(peakweek, earlyzOR, marker = 'o', color = 'black', linestyle = 'None')
for s, x, y in zip(sl, peakweek, earlyzOR):
plt.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
plt.ylabel('Mean Early Warning zOR', fontsize=fs)
plt.xlabel('Peak Week', fontsize=fs)
plt.xticks(range(fw)[::5], wklab[:fw:5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.show()
| {
"content_hash": "0d1bc868510534034a9359deb5079ed5",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 176,
"avg_line_length": 38.72727272727273,
"alnum_prop": 0.7057902973395931,
"repo_name": "eclee25/flu-SDI-exploratory-age",
"id": "9d4c482de28fc2226779902ba93d20019100e5a9",
"size": "3038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/create_fluseverity_figs/Supp_zOR_peaktime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1262523"
},
{
"name": "R",
"bytes": "301185"
}
],
"symlink_target": ""
} |
import os
import re
from unittest import mock
from xmlrpc import client as xmlrpc_client
from flexget.plugins.clients.rtorrent import RTorrent
torrent_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'private.torrent')
torrent_url = 'file:///%s' % torrent_file
torrent_info_hash = '09977FE761B8D293AD8A929CCAF2E9322D525A6C'
with open(torrent_file, 'rb') as tor_file:
torrent_raw = tor_file.read()
@mock.patch('flexget.plugins.clients.rtorrent.xmlrpc_client.ServerProxy')
class TestRTorrentClient:
def test_load(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.execute.throw.return_value = 0
mocked_proxy.load.raw_start.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.load(
torrent_raw,
fields={'priority': 3, 'directory': '/data/downloads', 'custom1': 'testing'},
start=True,
mkdir=True,
)
assert resp == 0
# Ensure mkdir was called
mocked_proxy.execute.throw.assert_called_with('', 'mkdir', '-p', '/data/downloads')
# Ensure load was called
assert mocked_proxy.load.raw_start.called
called_args = mocked_proxy.load.raw_start.call_args_list[0][0]
assert len(called_args) == 5
assert '' == called_args[0]
assert xmlrpc_client.Binary(torrent_raw) in called_args
fields = [p for p in called_args[2:]]
assert len(fields) == 3
# TODO: check the note in clients/rtorrent.py about this escaping.
# The client should be fixed to work consistenly on all python versions
# Calling re.escape here is a workaround so test works on python 3.7 and older versions
assert ('d.directory.set=' + re.escape('/data/downloads')) in fields
assert 'd.custom1.set=testing' in fields
assert 'd.priority.set=3' in fields
def test_torrent(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [
['/data/downloads'],
['private.torrent'],
[torrent_info_hash],
['test_custom1'],
[123456],
]
client = RTorrent('http://localhost/RPC2')
torrent = client.torrent(
torrent_info_hash, fields=['custom1', 'down_rate']
) # Required fields should be added
assert isinstance(torrent, dict)
assert torrent.get('base_path') == '/data/downloads'
assert torrent.get('hash') == torrent_info_hash
assert torrent.get('custom1') == 'test_custom1'
assert torrent.get('name') == 'private.torrent'
assert torrent.get('down_rate') == 123456
assert mocked_proxy.system.multicall.called_with(
(
[
{'params': (torrent_info_hash,), 'methodName': 'd.base_path'},
{'params': (torrent_info_hash,), 'methodName': 'd.name'},
{'params': (torrent_info_hash,), 'methodName': 'd.hash'},
{'params': (torrent_info_hash,), 'methodName': 'd.custom1'},
{'params': (torrent_info_hash,), 'methodName': 'd.down.rate'},
]
)
)
def test_torrents(self, mocked_proxy):
mocked_proxy = mocked_proxy()
hash1 = '09977FE761AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
hash2 = '09977FE761BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB'
mocked_proxy.d.multicall.return_value = (
['/data/downloads', 'private.torrent', hash1, 'test_custom1'],
['/data/downloads', 'private.torrent', hash2, 'test_custom2'],
)
client = RTorrent('http://localhost/RPC2')
torrents = client.torrents(fields=['custom1']) # Required fields should be added
assert isinstance(torrents, list)
for torrent in torrents:
assert torrent.get('base_path') == '/data/downloads'
assert torrent.get('name') == 'private.torrent'
if torrent.get('hash') == hash1:
assert torrent.get('custom1') == 'test_custom1'
elif torrent.get('hash') == hash2:
assert torrent.get('custom1') == 'test_custom2'
else:
assert False, 'Invalid hash returned'
assert mocked_proxy.system.multicall.called_with(
(['main', 'd.directory_base=', 'd.name=', 'd.hash=', 'd.custom1='],)
)
def test_update(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [[0]]
client = RTorrent('http://localhost/RPC2')
update_fields = {
'custom1': 'test_custom1',
'directory_base': '/data/downloads',
'priority': 3,
}
resp = client.update(torrent_info_hash, fields=update_fields)
assert resp == 0
assert mocked_proxy.system.multicall.called_with(
(
[
{
'params': (torrent_info_hash, '/data/downloads'),
'methodName': 'd.directory_base',
},
{'params': (torrent_info_hash, 'test_custom1'), 'methodName': 'd.custom1'},
{'params': (torrent_info_hash, '/data/downloads'), 'methodName': 'd.custom1'},
]
)
)
def test_delete(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.erase.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.delete(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.erase.called_with((torrent_info_hash,))
def test_move(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [
['private.torrent'],
[torrent_info_hash],
['/data/downloads'],
]
mocked_proxy.move.return_value = 0
mocked_proxy.d.directory.set.return_value = 0
mocked_proxy.execute.throw.return_value = 0
client = RTorrent('http://localhost/RPC2')
client.move(torrent_info_hash, '/new/folder')
mocked_proxy.execute.throw.assert_has_calls(
[
mock.call('', 'mkdir', '-p', '/new/folder'),
mock.call('', 'mv', '-u', '/data/downloads', '/new/folder'),
]
)
def test_start(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.start.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.start(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.start.called_with((torrent_info_hash,))
def test_stop(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.close.return_value = 0
mocked_proxy.d.stop.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.stop(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.stop.called_with((torrent_info_hash,))
assert mocked_proxy.d.close.called_with((torrent_info_hash,))
@mock.patch('flexget.plugins.clients.rtorrent.RTorrent')
class TestRTorrentOutputPlugin:
config = (
"""
tasks:
test_add_torrent:
accept_all: yes
mock:
- {title: 'test', url: '"""
+ torrent_url
+ """'}
rtorrent:
action: add
start: yes
mkdir: yes
uri: http://localhost/SCGI
priority: high
path: /data/downloads
custom1: test_custom1
test_add_torrent_set:
accept_all: yes
set:
path: /data/downloads
custom1: test_custom1
priority: low
custom2: test_custom2
mock:
- {title: 'test', url: '"""
+ torrent_url
+ """'}
rtorrent:
action: add
start: no
mkdir: no
uri: http://localhost/SCGI
test_update:
accept_all: yes
set:
path: /data/downloads
priority: low
mock:
- {title: 'test', url: '"""
+ torrent_url
+ """', 'torrent_info_hash': '09977FE761B8D293AD8A929CCAF2E9322D525A6C'}
rtorrent:
action: update
uri: http://localhost/SCGI
custom1: test_custom1
test_update_path:
accept_all: yes
mock:
- {title: 'test', url: '"""
+ torrent_url
+ """', 'torrent_info_hash': '09977FE761B8D293AD8A929CCAF2E9322D525A6C'}
rtorrent:
action: update
custom1: test_custom1
uri: http://localhost/SCGI
path: /new/path
test_delete:
accept_all: yes
mock:
- {title: 'test', url: '"""
+ torrent_url
+ """', 'torrent_info_hash': '09977FE761B8D293AD8A929CCAF2E9322D525A6C'}
rtorrent:
action: delete
uri: http://localhost/SCGI
custom1: test_custom1
"""
)
def test_add(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.load.return_value = 0
mocked_client.version = [0, 9, 4]
mocked_client.torrent.side_effect = [False, {'hash': torrent_info_hash}]
execute_task('test_add_torrent')
mocked_client.load.assert_called_with(
torrent_raw,
fields={'priority': 3, 'directory': '/data/downloads', 'custom1': 'test_custom1'},
start=True,
mkdir=True,
)
def test_add_set(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.load.return_value = 0
mocked_client.version = [0, 9, 4]
mocked_client.torrent.side_effect = [False, {'hash': torrent_info_hash}]
execute_task('test_add_torrent_set')
mocked_client.load.assert_called_with(
torrent_raw,
fields={
'priority': 1,
'directory': '/data/downloads',
'custom1': 'test_custom1',
'custom2': 'test_custom2',
},
start=False,
mkdir=False,
)
def test_update(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.version = [0, 9, 4]
mocked_client.update.return_value = 0
# ntpath complains on windows if base_path is a MagicMock
mocked_client.torrent.side_effect = [False, {'base_path': ''}]
execute_task('test_update')
mocked_client.update.assert_called_with(
torrent_info_hash, {'priority': 1, 'custom1': 'test_custom1'}
)
def test_update_path(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.version = [0, 9, 4]
mocked_client.update.return_value = 0
mocked_client.move.return_value = 0
mocked_client.torrent.return_value = {'base_path': '/some/path'}
execute_task('test_update_path')
mocked_client.update.assert_called_with(torrent_info_hash, {'custom1': 'test_custom1'})
mocked_client.move.assert_called_with(torrent_info_hash, '/new/path')
def test_delete(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.load.return_value = 0
mocked_client.version = [0, 9, 4]
mocked_client.delete.return_value = 0
execute_task('test_delete')
mocked_client.delete.assert_called_with(torrent_info_hash)
@mock.patch('flexget.plugins.clients.rtorrent.RTorrent')
class TestRTorrentInputPlugin:
config = """
tasks:
test_input:
accept_all: yes
from_rtorrent:
uri: http://localhost/RPC2
view: complete
fields:
- custom1
- custom3
- down_rate
"""
def test_input(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.version = [0, 9, 4]
mocked_torrent = {
'name': 'private.torrent',
'hash': torrent_info_hash,
'base_path': '/data/downloads/private',
'custom1': 'test_custom1',
'custom3': 'test_custom3',
'down_rate': 123456,
}
mocked_client.torrents.return_value = [mocked_torrent, mocked_torrent]
task = execute_task('test_input')
mocked_client.torrents.assert_called_with(
'complete', fields=['custom1', 'custom3', 'down_rate']
)
assert len(task.all_entries) == 2
for entry in task.entries:
assert entry['url'] == 'http://localhost/RPC2/%s' % torrent_info_hash
assert entry['name'] == 'private.torrent'
assert entry['torrent_info_hash'] == torrent_info_hash
assert entry['path'] == '/data/downloads/private'
assert entry['custom1'] == 'test_custom1'
assert entry['custom3'] == 'test_custom3'
| {
"content_hash": "b82ae2aff7830b89b557ba4ca42ba028",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 98,
"avg_line_length": 34.15601023017903,
"alnum_prop": 0.5508798202920254,
"repo_name": "crawln45/Flexget",
"id": "38b0dae6b6394d6d14e9213b8076f95d8d004abe",
"size": "13355",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/tests/test_rtorrent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1233"
},
{
"name": "HTML",
"bytes": "82565"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3761134"
},
{
"name": "SCSS",
"bytes": "11875"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1586"
}
],
"symlink_target": ""
} |
import sys, os
from glob import glob
# Install setuptools if it isn't available:
try:
import setuptools
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from distutils.command.install import INSTALL_SCHEMES
from distutils.command.install_headers import install_headers
from setuptools import find_packages
from setuptools import setup
NAME = 'neurokernel-antenna'
VERSION = '0.1'
AUTHOR = 'Chung-Heng Yeh'
AUTHOR_EMAIL = 'chungheng.yeh@gmail.com'
URL = 'https://github.com/neurokernel/antenna/'
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
DESCRIPTION = 'Fly Antenna on Neurokernel'
LONG_DESCRIPTION = 'An open source software for emulating the fly antenna in the Neurokernel environment'
DOWNLOAD_URL = URL
LICENSE = 'BSD'
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development']
NAMESPACE_PACKAGES = ['neurokernel-antenna']
# Explicitly switch to parent directory of setup.py in case it
# is run from elsewhere:
os.chdir(os.path.dirname(os.path.realpath(__file__)))
PACKAGES = find_packages()
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This enables the installation of neurokernel/__init__.py as a data
# file:
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
setup(
name = NAME,
version = VERSION,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
license = LICENSE,
classifiers = CLASSIFIERS,
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
url = URL,
maintainer = MAINTAINER,
maintainer_email = MAINTAINER_EMAIL,
namespace_packages = NAMESPACE_PACKAGES,
packages = PACKAGES,
# Force installation of __init__.py in namespace package:
data_files = [('neurokernel', ['antenna/__init__.py',
'antenna/antenna/LPU/neurons/cuda/*.cu'])],
include_package_data = True,
version='1.0',
packages=PACKAGES,
install_requires=[
'neurokernel >= 0.1'
]
)
| {
"content_hash": "fb8d781cf173a592d199790aa711c74e",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 107,
"avg_line_length": 32.48051948051948,
"alnum_prop": 0.6313474610155938,
"repo_name": "neurokernel/antenna",
"id": "b3557e78f4458537b642b8d9ecc61746c564f3f8",
"size": "2524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cuda",
"bytes": "4542"
},
{
"name": "Python",
"bytes": "61237"
}
],
"symlink_target": ""
} |
"""
Openstack Heat
--------------
Due to the strange nature of the OpenStack compatability layer, some values
that should be integers fail to validate and need to be represented as
strings. For this reason, we duplicate the AWS::AutoScaling::AutoScalingGroup
and change these types.
"""
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# Copyright (c) 2014, Andy Botting <andy.botting@theguardian.com>
# All rights reserved.
#
# See LICENSE file for full license.
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
from troposphere import AWSObject
from troposphere.validators import integer
# ----------------------------------------------------------------------------
# Class: AWSAutoScalingGroup
# ----------------------------------------------------------------------------
class AWSAutoScalingGroup(AWSObject):
"""Fix issues with OpenStack compatability layer.
Due to the strange nature of the OpenStack compatability layer, some
values that should be integers fail to validate and need to be
represented as strings. For this reason, we duplicate the
AWS::AutoScaling::AutoScalingGroup and change these types.
"""
resource_type = "AWS::AutoScaling::AutoScalingGroup"
props = {
'AvailabilityZones': (list, True),
'Cooldown': (integer, False),
'DesiredCapacity': (basestring, False),
'HealthCheckGracePeriod': (integer, False),
'HealthCheckType': (basestring, False),
'LaunchConfigurationName': (basestring, True),
'LoadBalancerNames': (list, False),
'MaxSize': (basestring, True),
'MinSize': (basestring, True),
'Tags': (list, False),
'VPCZoneIdentifier': (list, False),
}
| {
"content_hash": "e545c4cbf562b1d703ec54d5465b003a",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 35.26923076923077,
"alnum_prop": 0.5752453653217012,
"repo_name": "ikben/troposphere",
"id": "1b3bc80c78c8ffbf0f3e9c0c56a5142c620d1b1f",
"size": "1858",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "troposphere/openstack/heat.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1555"
},
{
"name": "Python",
"bytes": "790849"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
'''
Created on Feb 2, 2011
@author: bolme
'''
import pyvision as pv
import cv
def surf(im,mask=None,extended=False,hessianThreshold=500, nOctaves=3, nOctaveLayers=4):
'''
Keypoints contain a
0: center point
1 sign of laplacian (-1,0,+1)
2 scale - diameter or radius
3 angle
4 response value
Descriptors contain 64 floating point numbers
@param im: image to extract features from.
@type im: pv.Image
@param mask: a mask that controls where features are extracted from.
@type mask: OpenCV 8bit image
@return: (keypoints,descriptors)
'''
cvim = im.asOpenCVBW()
keypoints,descriptors = cv.ExtractSURF(cvim,mask,cv.CreateMemStorage(),(int(extended),hessianThreshold,nOctaves,nOctaveLayers))
return keypoints,descriptors | {
"content_hash": "e32ad84c46b47d37d2b25ced7fceea81",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 131,
"avg_line_length": 27.82758620689655,
"alnum_prop": 0.6877323420074349,
"repo_name": "tigerking/pyvision",
"id": "bc73434d7448dfb109cfd9590b41870c2d260ebe",
"size": "807",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/pyvision/other/surf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1260818"
},
{
"name": "R",
"bytes": "1487"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
} |
"""Deployment tests for ipmi driver.
These require an actual HIL setup with a real node, and are
somewhat particular to the MOC's development environment. They may be
difficult to run in other contexts.
"""
from hil.test_common import config_testsuite, fresh_database, \
fail_on_log_warnings, with_request_context, site_layout, server_init
from hil.model import Node
from hil import config, api
import pytest
@pytest.fixture
def configure():
"""Configure HIL"""
config_testsuite()
config.load_extensions()
fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings)
fresh_database = pytest.fixture(fresh_database)
server_init = pytest.fixture(server_init)
with_request_context = pytest.yield_fixture(with_request_context)
site_layout = pytest.fixture(site_layout)
pytestmark = pytest.mark.usefixtures('configure',
'server_init',
'fresh_database',
'with_request_context',
'site_layout')
class TestIpmi():
""" Test IPMI driver calls using functions included in the IPMI driver. """
def collect_nodes(self):
"""Collects nodes in the free list."""
free_nodes = Node.query.filter_by(project_id=None).all()
return free_nodes
def test_node_power_cycle(self):
"""Test power cycling nodes."""
nodes = self.collect_nodes()
for node in nodes:
api.node_power_cycle(node.label)
def test_node_power_force(self):
"""Test power cycling nodes, with force=True."""
nodes = self.collect_nodes()
for node in nodes:
api.node_power_cycle(node.label, True)
def test_node_power_off(self):
"""Test shutting down nodes properly"""
nodes = self.collect_nodes()
for node in nodes:
api.node_power_off(node.label)
def test_node_set_bootdev(self):
"""Test setting the boot device."""
nodes = self.collect_nodes()
for node in nodes:
# change a node's bootdevice to a valid boot device
api.node_set_bootdev(node.label, 'pxe')
api.node_set_bootdev(node.label, 'disk')
api.node_set_bootdev(node.label, 'none')
# set the bootdevice to something invalid
with pytest.raises(api.BadArgumentError):
api.node_set_bootdev(node.label, 'invalid-device')
# register a node with erroneous ipmi details to raise OBMError
# XXX: In theory, this could actually be a real node; we should take
# some measure to ensure this never collides with something actually
# in our test setup.
api.node_register('node-99-z4qa63', obm={
"type": "http://schema.massopencloud.org/haas/v0/obm/ipmi",
"host": "ipmihost",
"user": "root",
"password": "tapeworm"})
with pytest.raises(api.OBMError):
api.node_set_bootdev('node-99-z4qa63', 'none')
| {
"content_hash": "86ad9abf63d3df6e310700ee55eddf9b",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 36.141176470588235,
"alnum_prop": 0.6123046875,
"repo_name": "SahilTikale/haas",
"id": "3a66a6679eefd3f48cf9cad41e6bf48e3d3a9849",
"size": "3072",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/deployment/ipmi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "451"
},
{
"name": "Python",
"bytes": "357764"
}
],
"symlink_target": ""
} |
"""
A helper script used to create files for new linter
"""
from __future__ import print_function
import logging
import re
import sys
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)-8s %(message)s',
)
def add_linter(linter_id, linter_name):
"""
:type linter_id int
:type linter_name str
"""
logger = logging.getLogger('add_linter')
# normalize values
linter_id_fmt = '{:04d}'.format(linter_id)
linter_name = re.sub(r'[^a-z]+', '-', linter_name.strip().lower())
logger.info("Creating a new linter: %s - %s ...", linter_id_fmt, linter_name)
# /sql directory
sql_name = 'sql/{}-{}'.format(linter_id_fmt, linter_name.replace('_', '-'))
logger.info("Add SQL schema and log files (%s) ...", sql_name)
with open(sql_name + '.sql', mode='wt', encoding='utf-8') as file_name:
# 0002_not_used_indices
table_name = '{}_{}'.format(linter_id_fmt, linter_name.replace('-', '_'))
file_name.writelines([
'-- Report ...\n',
'--\n',
'-- https://github.com/macbre/index-digest/issues/{}\n'.format(linter_id),
'DROP TABLE IF EXISTS `{}`;\n'.format(table_name),
'CREATE TABLE `{}` (\n'.format(table_name),
'-- \n',
');\n',
])
logger.info('... %s created', file_name.name)
with open(sql_name + '-log', mode='wt', encoding='utf-8') as file_name:
file_name.writelines([
'-- \n',
])
logger.info('... %s created', file_name.name)
# /indexdigest/linters directory
linter_name = linter_name.replace('-', '_')
logger.info("Add a Python code for %s linter ...", linter_name)
with open('indexdigest/linters/linter_{}_{}.py'.
format(linter_id_fmt, linter_name), mode='wt', encoding='utf-8') as file_name:
file_name.writelines([
'"""\n',
'This linter checks for ...\n',
'"""\n',
'from collections import defaultdict\n',
'\n',
'from indexdigest.utils import LinterEntry, explain_queries\n',
'\n',
'\n',
'def check_{}(database, queries):\n'.format(linter_name),
' """\n',
' :type database indexdigest.database.Database\n',
' :type queries list[str]\n',
' :rtype: list[LinterEntry]\n',
' """\n',
' yield LinterEntry(linter_type=\'{}\', table_name=table_name,\n'.
format(linter_name),
' message=\'"{}" ...\'.\n',
' format("foo"),\n',
' context={"foo": str("bar")})\n',
])
logger.info('... %s created', file_name.name)
logger.info("Add a test ...")
with open('indexdigest/test/linters/test_{}_{}.py'.format(linter_id_fmt, linter_name),
mode='wt', encoding='utf-8') \
as file_name:
file_name.writelines([
'from __future__ import print_function\n',
'\n',
'from unittest import TestCase\n',
'\n',
'from indexdigest.linters.linter_{0}_{1} import check_{1}\n'.
format(linter_id_fmt, linter_name),
'from indexdigest.test import DatabaseTestMixin, read_queries_from_log\n',
'\n',
'\n',
'class TestLinter(TestCase, DatabaseTestMixin):\n',
'\n',
' def test_{}(self):\n'.format(linter_name),
' pass\n',
])
logger.info('... %s created', file_name.name)
def main():
"""
usage: add_linter 89 empty_tables
"""
try:
linter_id = int(sys.argv[1])
linter_name = str(sys.argv[2])
add_linter(linter_id, linter_name)
except IndexError:
print('Usage: add_linter 89 empty_tables')
sys.exit(1)
| {
"content_hash": "2461cf1aedb8b3a443b737c7d0d591b1",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 92,
"avg_line_length": 32.54545454545455,
"alnum_prop": 0.5083798882681564,
"repo_name": "macbre/index-digest",
"id": "c66ebfb20ab57308e4705781e2f3ecdfab27b557",
"size": "3938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indexdigest/cli/add_linter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "858"
},
{
"name": "Makefile",
"bytes": "1000"
},
{
"name": "Python",
"bytes": "154363"
},
{
"name": "Shell",
"bytes": "333"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.HomeView.as_view(), name='home'),
]
| {
"content_hash": "58278a5e0f9dadcdf7be4adb76cb8deb",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 54,
"avg_line_length": 18.285714285714285,
"alnum_prop": 0.65625,
"repo_name": "Candihub/pixel",
"id": "ed42f576f77e4e6883466ecef759cd083985394c",
"size": "128",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/core/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "15017"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "58864"
},
{
"name": "JavaScript",
"bytes": "1180"
},
{
"name": "Makefile",
"bytes": "4184"
},
{
"name": "Python",
"bytes": "414705"
},
{
"name": "R",
"bytes": "3817"
},
{
"name": "Shell",
"bytes": "2928"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
urlpatterns = patterns('',
(r'^', include('shared.apps.base.urls')),
(r'^account/', include('shared.apps.account.urls')),
(r'^client/', include('myresource.apps.client.urls')),
(r'^oauth2/', include('myresource.apps.oauth2.urls')),
(r'^api/', include('myresource.apps.api.urls')),
)
| {
"content_hash": "12af48f1194840adeef44583ac2118b3",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 60,
"avg_line_length": 39.8,
"alnum_prop": 0.6708542713567839,
"repo_name": "pingali/aadhaar-oauth2-server",
"id": "2fe9618f078adfb0d2ea4408be8cfb964fe45364",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myresource/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "54980"
},
{
"name": "Python",
"bytes": "78518"
}
],
"symlink_target": ""
} |
"""Module containing the various stages that a builder runs."""
import json
import logging
import os
from chromite.cbuildbot import commands
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import cbuildbot_run
from chromite.cbuildbot.stages import artifact_stages
from chromite.lib import cros_build_lib
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import timeout_util
class InvalidTestConditionException(Exception):
"""Raised when pre-conditions for a test aren't met."""
class SignerTestStage(artifact_stages.ArchivingStage):
"""Run signer related tests."""
option_name = 'tests'
config_name = 'signer_tests'
# If the signer tests take longer than 30 minutes, abort. They usually take
# five minutes to run.
SIGNER_TEST_TIMEOUT = 1800
def PerformStage(self):
if not self.archive_stage.WaitForRecoveryImage():
raise InvalidTestConditionException('Missing recovery image.')
with timeout_util.Timeout(self.SIGNER_TEST_TIMEOUT):
commands.RunSignerTests(self._build_root, self._current_board)
class SignerResultsTimeout(failures_lib.StepFailure):
"""The signer did not produce any results inside the expected time."""
class SignerFailure(failures_lib.StepFailure):
"""The signer returned an error result."""
class MissingInstructionException(failures_lib.StepFailure):
"""We didn't receive the list of signing instructions PushImage uploaded."""
class MalformedResultsException(failures_lib.StepFailure):
"""The Signer results aren't formatted as we expect."""
class PaygenSigningRequirementsError(failures_lib.StepFailure):
"""Paygen stage can't run if signing failed."""
class PaygenCrostoolsNotAvailableError(failures_lib.StepFailure):
"""Paygen stage can't run if signing failed."""
class PaygenNoPaygenConfigForBoard(failures_lib.StepFailure):
"""Paygen can't run with a release.conf config for the board."""
class PaygenStage(artifact_stages.ArchivingStage):
"""Stage that generates release payloads.
If this stage is created with a 'channels' argument, it can run
independantly. Otherwise, it's dependent on values queued up by
the ArchiveStage (push_image).
"""
option_name = 'paygen'
config_name = 'paygen'
# Poll for new results every 30 seconds.
SIGNING_PERIOD = 30
# Timeout for PushImage to finish uploading images. 2 hours in seconds.
PUSHIMAGE_TIMEOUT = 2 * 60 * 60
# Timeout for the signing process. 2 hours in seconds.
SIGNING_TIMEOUT = 2 * 60 * 60
FINISHED = 'finished'
def __init__(self, builder_run, board, archive_stage, channels=None,
**kwargs):
"""Init that accepts the channels argument, if present.
Args:
builder_run: See builder_run on ArchivingStage.
board: See board on ArchivingStage.
archive_stage: See archive_stage on ArchivingStage.
channels: Explicit list of channels to generate payloads for.
If empty, will instead wait on values from push_image.
Channels is normally None in release builds, and normally set
for trybot 'payloads' builds.
"""
super(PaygenStage, self).__init__(builder_run, board, archive_stage,
**kwargs)
self.signing_results = {}
self.channels = channels
def _HandleStageException(self, exc_info):
"""Override and don't set status to FAIL but FORGIVEN instead."""
exc_type, exc_value, _exc_tb = exc_info
# If Paygen fails to find anything needed in release.conf, treat it
# as a warning, not a failure. This is common during new board bring up.
if issubclass(exc_type, PaygenNoPaygenConfigForBoard):
return self._HandleExceptionAsWarning(exc_info)
# If the exception is a TestLabFailure that means we couldn't schedule the
# test. We don't fail the build for that. We do the CompoundFailure dance,
# because that's how we'll get failures from background processes returned
# to us.
if (issubclass(exc_type, failures_lib.TestLabFailure) or
(issubclass(exc_type, failures_lib.CompoundFailure) and
exc_value.MatchesFailureType(failures_lib.TestLabFailure))):
return self._HandleExceptionAsWarning(exc_info)
return super(PaygenStage, self)._HandleStageException(exc_info)
def _JsonFromUrl(self, gs_ctx, url):
"""Fetch a GS Url, and parse it as Json.
Args:
gs_ctx: GS Context.
url: Url to fetch and parse.
Returns:
None if the Url doesn't exist.
Parsed Json structure if it did.
Raises:
MalformedResultsException if it failed to parse.
"""
try:
signer_txt = gs_ctx.Cat(url).output
except gs.GSNoSuchKey:
return None
try:
return json.loads(signer_txt)
except ValueError:
# We should never see malformed Json, even for intermediate statuses.
raise MalformedResultsException(signer_txt)
def _SigningStatusFromJson(self, signer_json):
"""Extract a signing status from a signer result Json DOM.
Args:
signer_json: The parsed json status from a signer operation.
Returns:
string with a simple status: 'passed', 'failed', 'downloading', etc,
or '' if the json doesn't contain a status.
"""
return (signer_json or {}).get('status', {}).get('status', '')
def _CheckForResults(self, gs_ctx, instruction_urls_per_channel,
channel_notifier):
"""timeout_util.WaitForSuccess func to check a list of signer results.
Args:
gs_ctx: Google Storage Context.
instruction_urls_per_channel: Urls of the signer result files
we're expecting.
channel_notifier: BackgroundTaskRunner into which we push channels for
processing.
Returns:
Number of results not yet collected.
"""
COMPLETED_STATUS = ('passed', 'failed')
# Assume we are done, then try to prove otherwise.
results_completed = True
for channel in instruction_urls_per_channel.keys():
self.signing_results.setdefault(channel, {})
if (len(self.signing_results[channel]) ==
len(instruction_urls_per_channel[channel])):
continue
for url in instruction_urls_per_channel[channel]:
# Convert from instructions URL to instructions result URL.
url += '.json'
# We already have a result for this URL.
if url in self.signing_results[channel]:
continue
signer_json = self._JsonFromUrl(gs_ctx, url)
if self._SigningStatusFromJson(signer_json) in COMPLETED_STATUS:
# If we find a completed result, remember it.
self.signing_results[channel][url] = signer_json
# If we don't have full results for this channel, we aren't done
# waiting.
if (len(self.signing_results[channel]) !=
len(instruction_urls_per_channel[channel])):
results_completed = False
continue
# If we reach here, the channel has just been completed for the first
# time.
# If all results 'passed' the channel was successfully signed.
channel_success = True
for signer_result in self.signing_results[channel].values():
if self._SigningStatusFromJson(signer_result) != 'passed':
channel_success = False
# If we successfully completed the channel, inform paygen.
if channel_success:
channel_notifier(channel)
return results_completed
def _WaitForPushImage(self):
"""Block until push_image data is ready.
Returns:
Push_image results, expected to be of the form:
{ 'channel': ['gs://instruction_uri1', 'gs://signer_instruction_uri2'] }
Raises:
MissingInstructionException: If push_image sent us an error, or timed out.
"""
try:
instruction_urls_per_channel = self.board_runattrs.GetParallel(
'instruction_urls_per_channel', timeout=self.PUSHIMAGE_TIMEOUT)
except cbuildbot_run.AttrTimeoutError:
instruction_urls_per_channel = None
# A value of None signals an error, either in PushImage, or a timeout.
if instruction_urls_per_channel is None:
raise MissingInstructionException('PushImage results not available.')
return instruction_urls_per_channel
def _WaitForSigningResults(self,
instruction_urls_per_channel,
channel_notifier):
"""Do the work of waiting for signer results and logging them.
Args:
instruction_urls_per_channel: push_image data (see _WaitForPushImage).
channel_notifier: BackgroundTaskRunner into which we push channels for
processing.
Raises:
ValueError: If the signer result isn't valid json.
RunCommandError: If we are unable to download signer results.
"""
gs_ctx = gs.GSContext(dry_run=self._run.debug)
try:
cros_build_lib.Info('Waiting for signer results.')
timeout_util.WaitForReturnTrue(
self._CheckForResults,
func_args=(gs_ctx, instruction_urls_per_channel, channel_notifier),
timeout=self.SIGNING_TIMEOUT, period=self.SIGNING_PERIOD)
except timeout_util.TimeoutError:
msg = 'Image signing timed out.'
cros_build_lib.Error(msg)
cros_build_lib.PrintBuildbotStepText(msg)
raise SignerResultsTimeout(msg)
# Log all signer results, then handle any signing failures.
failures = []
for url_results in self.signing_results.values():
for url, signer_result in url_results.iteritems():
result_description = os.path.basename(url)
cros_build_lib.PrintBuildbotStepText(result_description)
cros_build_lib.Info('Received results for: %s', result_description)
cros_build_lib.Info(json.dumps(signer_result, indent=4))
status = self._SigningStatusFromJson(signer_result)
if status != 'passed':
failures.append(result_description)
cros_build_lib.Error('Signing failed for: %s', result_description)
if failures:
cros_build_lib.Error('Failure summary:')
for failure in failures:
cros_build_lib.Error(' %s', failure)
raise SignerFailure(failures)
def PerformStage(self):
"""Do the work of generating our release payloads."""
# Convert to release tools naming for boards.
board = self._current_board.replace('_', '-')
version = self._run.attrs.release_tag
assert version, "We can't generate payloads without a release_tag."
logging.info("Generating payloads for: %s, %s", board, version)
# Test to see if the current board has a Paygen configuration. We do
# this here, no in the sub-process so we don't have to pass back a
# failure reason.
try:
from crostools.lib import paygen_build_lib
paygen_build_lib.ValidateBoardConfig(board)
except paygen_build_lib.BoardNotConfigured:
raise PaygenNoPaygenConfigForBoard(
'No release.conf entry was found for board %s. Get a TPM to fix.' %
board)
except ImportError:
raise PaygenCrostoolsNotAvailableError()
with parallel.BackgroundTaskRunner(self._RunPaygenInProcess) as per_channel:
def channel_notifier(channel):
per_channel.put((channel, board, version, self._run.debug,
self._run.config.paygen_skip_testing,
self._run.config.paygen_skip_delta_payloads))
if self.channels:
logging.info("Using explicit channels: %s", self.channels)
# If we have an explicit list of channels, use it.
for channel in self.channels:
channel_notifier(channel)
else:
instruction_urls_per_channel = self._WaitForPushImage()
self._WaitForSigningResults(instruction_urls_per_channel,
channel_notifier)
def _RunPaygenInProcess(self, channel, board, version, debug,
skip_test_payloads, skip_delta_payloads):
"""Helper for PaygenStage that invokes payload generation.
This method is intended to be safe to invoke inside a process.
Args:
channel: Channel of payloads to generate ('stable', 'beta', etc)
board: Board of payloads to generate ('x86-mario', 'x86-alex-he', etc)
version: Version of payloads to generate.
debug: Flag telling if this is a real run, or a test run.
skip_test_payloads: Skip generating test payloads, and auto tests.
skip_delta_payloads: Skip generating delta payloads.
"""
# TODO(dgarrett): Remove when crbug.com/341152 is fixed.
# These modules are imported here because they aren't always available at
# cbuildbot startup.
# pylint: disable=F0401
try:
from crostools.lib import gspaths
from crostools.lib import paygen_build_lib
except ImportError:
# We can't generate payloads without crostools.
raise PaygenCrostoolsNotAvailableError()
# Convert to release tools naming for channels.
if not channel.endswith('-channel'):
channel += '-channel'
with osutils.TempDir(sudo_rm=True) as tempdir:
# Create the definition of the build to generate payloads for.
build = gspaths.Build(channel=channel,
board=board,
version=version)
try:
# Generate the payloads.
self._PrintLoudly('Starting %s, %s, %s' % (channel, version, board))
paygen_build_lib.CreatePayloads(build,
work_dir=tempdir,
dry_run=debug,
run_parallel=True,
run_on_builder=True,
skip_delta_payloads=skip_delta_payloads,
skip_test_payloads=skip_test_payloads,
skip_autotest=skip_test_payloads)
except (paygen_build_lib.BuildFinished,
paygen_build_lib.BuildLocked,
paygen_build_lib.BuildSkip) as e:
# These errors are normal if it's possible for another process to
# work on the same build. This process could be a Paygen server, or
# another builder (perhaps by a trybot generating payloads on request).
#
# This means the build was finished by the other process, is already
# being processed (so the build is locked), or that it's been marked
# to skip (probably done manually).
cros_build_lib.Info('Paygen skipped because: %s', e)
| {
"content_hash": "16717dec8ace793ac8f8f0b0c9e5d4b6",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 80,
"avg_line_length": 37.71907216494845,
"alnum_prop": 0.6649812094294499,
"repo_name": "bpsinc-native/src_third_party_chromite",
"id": "245c5c42b8beb25b49f2d072165f8e499420928d",
"size": "14805",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cbuildbot/stages/release_stages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85"
},
{
"name": "HTML",
"bytes": "2661"
},
{
"name": "Python",
"bytes": "3534807"
},
{
"name": "Shell",
"bytes": "24031"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django_jinja import views
from .views import BasicTestView
from .views import I18nTestView, I18nTestViewDTL
from .views import StreamingTestView
from .views import PipelineTestView
from .views import CreateTestView, DeleteTestView, DetailTestView, UpdateTestView
from .views import ListTestView
from .views import ArchiveIndexTestView, YearArchiveTestView, MonthArchiveTestView, WeekArchiveTestView, DayArchiveTestView, TodayArchiveTestView, DateDetailTestView
urlpatterns = [
url(r"^test1/$", BasicTestView.as_view(), name="test-1"),
url(r"^test1/(?P<data>\d+)/$", BasicTestView.as_view(), name="test-1"),
url(r"^test-i18n/$", I18nTestView.as_view(), name="i18n-test"),
url(r"^test-i18n-dtl/$", I18nTestViewDTL.as_view(), name="i18n-dtl-test"),
url(r"^test-pipeline/$", PipelineTestView.as_view(), name="pipeline-test"),
url(r"^test/404$", views.PageNotFound.as_view(), name="page-404"),
url(r"^test/403$", views.PermissionDenied.as_view(), name="page-403"),
url(r"^test/500$", views.ServerError.as_view(), name="page-500"),
url(r"^test-streaming/$", StreamingTestView.as_view(), name='streaming-test'),
url(r"^testmodel/$", ListTestView.as_view()),
url(r"^testmodel/create$", CreateTestView.as_view()),
url(r"^testmodel/(?P<pk>\d+)/delete$", DeleteTestView.as_view()),
url(r"^testmodel/(?P<pk>\d+)/detail$", DetailTestView.as_view()),
url(r"^testmodel/(?P<pk>\d+)/update$", UpdateTestView.as_view()),
url(r"^testmodel/archive/$", ArchiveIndexTestView.as_view()),
url(r"^testmodel/archive/(?P<year>\d{4})/$", YearArchiveTestView.as_view()),
url(r"^testmodel/archive/(?P<year>\d{4})/week/(?P<week>\d+)/$", WeekArchiveTestView.as_view()),
url(r"^testmodel/archive/(?P<year>\d{4})/(?P<month>[\w-]+)/$", MonthArchiveTestView.as_view()),
url(r"^testmodel/archive/(?P<year>\d{4})/(?P<month>[\w-]+)/(?P<day>\d+)/$", DayArchiveTestView.as_view()),
url(r"^testmodel/archive/today/$", TodayArchiveTestView.as_view()),
url(r"^testmodel/archive/(?P<year>\d{4})/(?P<month>[\w-]+)/(?P<day>\d+)/(?P<pk>\d+)$", DateDetailTestView.as_view())
]
| {
"content_hash": "a5d43ba41ea5d7dc2e341acce9674511",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 165,
"avg_line_length": 60.083333333333336,
"alnum_prop": 0.680998613037448,
"repo_name": "akx/django-jinja",
"id": "4552800e073ad267d2da0ca84db556a5ae85135e",
"size": "2163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/testapp/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "26"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "JavaScript",
"bytes": "28"
},
{
"name": "Python",
"bytes": "65813"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
} |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class TransferSweepGetRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'sweep_id': (str,), # noqa: E501
'client_id': (str,), # noqa: E501
'secret': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'sweep_id': 'sweep_id', # noqa: E501
'client_id': 'client_id', # noqa: E501
'secret': 'secret', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, sweep_id, *args, **kwargs): # noqa: E501
"""TransferSweepGetRequest - a model defined in OpenAPI
Args:
sweep_id (str): Plaid’s unique identifier for a sweep.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
client_id (str): Your Plaid API `client_id`. The `client_id` is required and may be provided either in the `PLAID-CLIENT-ID` header or as part of a request body.. [optional] # noqa: E501
secret (str): Your Plaid API `secret`. The `secret` is required and may be provided either in the `PLAID-SECRET` header or as part of a request body.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.sweep_id = sweep_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| {
"content_hash": "9ddcc2344e14de6ac80a82a4688c28f1",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 199,
"avg_line_length": 40.93063583815029,
"alnum_prop": 0.5592430447676882,
"repo_name": "plaid/plaid-python",
"id": "356a2c78637ecbbc57144b9dcb09d78d4aa5f3c6",
"size": "7083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/transfer_sweep_get_request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
} |
"""
test_djangocms-instagram
------------
Tests for `djangocms-instagram` models module.
"""
import os
import shutil
from django.test import TestCase
from djangocms_instagram import models
class TestDjangoCMS_Instagram(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass | {
"content_hash": "f2c9a1f7dfd83786d0d45323ab075238",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 46,
"avg_line_length": 14.08,
"alnum_prop": 0.6704545454545454,
"repo_name": "mishbahr/djangocms-instagram",
"id": "6c2830f5b3048ab2752f56b13d8f00efb30f9ae6",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "646"
},
{
"name": "HTML",
"bytes": "5492"
},
{
"name": "JavaScript",
"bytes": "22398"
},
{
"name": "Makefile",
"bytes": "1269"
},
{
"name": "Python",
"bytes": "38300"
}
],
"symlink_target": ""
} |
from datetime import datetime
from time import sleep
from django import db
from django.core.management import BaseCommand
import pytz
from psycopg2._psycopg import InterfaceError
from dimagi.utils.logging import notify_exception
from pillow_retry.api import process_pillow_retry
from pillow_retry.models import PillowError
from corehq.apps.change_feed.producer import ChangeProducer
from corehq.sql_db.util import handle_connection_failure
BATCH_SIZE = 10000
producer = ChangeProducer(auto_flush=False)
class PillowRetryEnqueuingOperation(BaseCommand):
help = "Runs the Pillow Retry Queue"
def handle(self, **options):
while True:
try:
num_processed = self.process_queue()
except Exception:
num_processed = 0
notify_exception(None, message="Could not fetch due survey actions")
sleep_time = 10 if num_processed < BATCH_SIZE else 0
sleep(sleep_time)
@handle_connection_failure()
def process_queue(self):
utcnow = datetime.utcnow()
errors = self.get_items_to_be_processed(utcnow)
for error in errors:
process_pillow_retry(error, producer=producer)
producer.flush()
return len(errors)
def get_items_to_be_processed(self, utcnow):
# We're just querying for ids here, so no need to limit
utcnow = utcnow.replace(tzinfo=pytz.UTC)
try:
return self._get_items(utcnow)
except InterfaceError:
db.connection.close()
return self._get_items(utcnow)
def _get_items(self, utcnow):
errors = PillowError.get_errors_to_process(utcnow=utcnow, limit=BATCH_SIZE)
return list(errors)
class Command(PillowRetryEnqueuingOperation):
pass
| {
"content_hash": "ef067c4164c2a7f2cd0c86fae1a04621",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 84,
"avg_line_length": 30.423728813559322,
"alnum_prop": 0.6763231197771588,
"repo_name": "dimagi/commcare-hq",
"id": "ea63f5c2a9379ce2215b2260f1b23c2f25f79ddb",
"size": "1795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/ex-submodules/pillow_retry/management/commands/run_pillow_retry_queue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
no_cache = 1
base_template_path = "templates/www/desk.html"
import os, re
import frappe
from frappe import _
import frappe.sessions
def get_context(context):
if frappe.session.user == "Guest":
frappe.throw(_("Log in to access this page."), frappe.PermissionError)
elif frappe.db.get_value("User", frappe.session.user, "user_type") == "Website User":
frappe.throw(_("You are not permitted to access this page."), frappe.PermissionError)
hooks = frappe.get_hooks()
try:
boot = frappe.sessions.get()
except Exception as e:
boot = frappe._dict(status='failed', error = str(e))
print(frappe.get_traceback())
# this needs commit
csrf_token = frappe.sessions.get_csrf_token()
frappe.db.commit()
boot_json = frappe.as_json(boot)
# remove script tags from boot
boot_json = re.sub("\<script\>[^<]*\</script\>", "", boot_json)
context.update({
"no_cache": 1,
"build_version": get_build_version(),
"include_js": hooks["app_include_js"],
"include_css": hooks["app_include_css"],
"sounds": hooks["sounds"],
"boot": boot if context.get("for_mobile") else boot_json,
"csrf_token": csrf_token,
"google_analytics_id": frappe.conf.get("google_analytics_id"),
"google_analytics_anonymize_ip": frappe.conf.get("google_analytics_anonymize_ip"),
"mixpanel_id": frappe.conf.get("mixpanel_id")
})
return context
@frappe.whitelist()
def get_desk_assets(build_version):
"""Get desk assets to be loaded for mobile app"""
data = get_context({"for_mobile": True})
assets = [{"type": "js", "data": ""}, {"type": "css", "data": ""}]
if build_version != data["build_version"]:
# new build, send assets
for path in data["include_js"]:
# assets path shouldn't start with /
# as it points to different location altogether
if path.startswith('/assets/'):
path = path.replace('/assets/', 'assets/')
try:
with open(os.path.join(frappe.local.sites_path, path) ,"r") as f:
assets[0]["data"] = assets[0]["data"] + "\n" + frappe.safe_decode(f.read(), "utf-8")
except IOError:
pass
for path in data["include_css"]:
if path.startswith('/assets/'):
path = path.replace('/assets/', 'assets/')
try:
with open(os.path.join(frappe.local.sites_path, path) ,"r") as f:
assets[1]["data"] = assets[1]["data"] + "\n" + frappe.safe_decode(f.read(), "utf-8")
except IOError:
pass
return {
"build_version": data["build_version"],
"boot": data["boot"],
"assets": assets
}
def get_build_version():
try:
return str(os.path.getmtime(os.path.join(frappe.local.sites_path, '.build')))
except OSError:
# .build can sometimes not exist
# this is not a major problem so send fallback
return frappe.utils.random_string(8)
| {
"content_hash": "9630122aa2c64777a5dbe2d798a0889a",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 89,
"avg_line_length": 30.95505617977528,
"alnum_prop": 0.6631578947368421,
"repo_name": "adityahase/frappe",
"id": "c6bce850a52d1b4b23e2f6b8c0aa35c22f2d6484",
"size": "2856",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/www/desk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "288806"
},
{
"name": "HTML",
"bytes": "209164"
},
{
"name": "JavaScript",
"bytes": "2350450"
},
{
"name": "Less",
"bytes": "160693"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3035663"
},
{
"name": "SCSS",
"bytes": "45340"
},
{
"name": "Shell",
"bytes": "517"
},
{
"name": "Vue",
"bytes": "73943"
}
],
"symlink_target": ""
} |
import socket
import uuid
from oslo.config import cfg
from heat.openstack.common import context
from heat.openstack.common.gettextutils import _ # noqa
from heat.openstack.common import importutils
from heat.openstack.common import jsonutils
from heat.openstack.common import log as logging
from heat.openstack.common import timeutils
LOG = logging.getLogger(__name__)
notifier_opts = [
cfg.MultiStrOpt('notification_driver',
default=[],
help='Driver or drivers to handle sending notifications'),
cfg.StrOpt('default_notification_level',
default='INFO',
help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id',
default=None,
help='Default publisher_id for outgoing notifications'),
]
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
WARN = 'WARN'
INFO = 'INFO'
ERROR = 'ERROR'
CRITICAL = 'CRITICAL'
DEBUG = 'DEBUG'
log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
class BadPriorityException(Exception):
pass
def notify_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param function: - object of the function
:returns: function -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
notify(ctxt,
CONF.default_publisher_id or socket.gethostname(),
name,
CONF.default_notification_level,
body)
return fn(*args, **kwarg)
return wrapped_func
def publisher_id(service, host=None):
if not host:
try:
host = CONF.host
except AttributeError:
host = CONF.default_publisher_id or socket.gethostname()
return "%s.%s" % (service, host)
def notify(context, publisher_id, event_type, priority, payload):
"""Sends a notification using the specified driver
:param publisher_id: the source worker_type.host of the message
:param event_type: the literal type of event (ex. Instance Creation)
:param priority: patterned after the enumeration of Python logging
levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
:param payload: A python dictionary of attributes
Outgoing message format includes the above parameters, and appends the
following:
message_id
a UUID representing the id for this notification
timestamp
the GMT timestamp the notification was sent at
The composite message will be constructed as a dictionary of the above
attributes, which will then be sent via the transport mechanism defined
by the driver.
Message example::
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': timeutils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
"""
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities') % priority)
# Ensure everything is JSON serializable.
payload = jsonutils.to_primitive(payload, convert_instances=True)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
event_type=event_type,
priority=priority,
payload=payload,
timestamp=str(timeutils.utcnow()))
for driver in _get_drivers():
try:
driver.notify(context, msg)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to "
"send to notification system. "
"Payload=%(payload)s")
% dict(e=e, payload=payload))
_drivers = None
def _get_drivers():
"""Instantiate, cache, and return drivers based on the CONF."""
global _drivers
if _drivers is None:
_drivers = {}
for notification_driver in CONF.notification_driver:
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
return _drivers.values()
def _reset_drivers():
"""Used by unit tests to reset the drivers."""
global _drivers
_drivers = None
| {
"content_hash": "0ebeac1ad4f4bba1724b9ce0f22a92e6",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 79,
"avg_line_length": 30.79746835443038,
"alnum_prop": 0.6095355528154541,
"repo_name": "varunarya10/heat",
"id": "cd1e7b4bf98b2b4e4e64ffcfed8398969b0208d0",
"size": "5503",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "heat/openstack/common/notifier/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2793404"
},
{
"name": "Shell",
"bytes": "21618"
}
],
"symlink_target": ""
} |
import logging
import os
from . import config
class DummyMPLogHandler(logging.Handler):
"""DummyMP logging handler to allow multiprocess logging.
This class is a custom logging handler to allow spawned processes
(from :py:mod:`multiprocessing`) to log without any issues. This
works by intercepting emitted log records, and sending them via
queue to the master process. The master process will process each
record and call :py:meth:`logging.Logger.handle` to emit the
logging record at the master process level.
Note that this class can be used as a general multiprocess logging
handler simply by removing the int_pid attribute.
Attributes:
queue (:py:class:`multiprocessing.Queue`): The Queue object to
forward logging records to.
int_pid (int): The internal PID used to reference the process.
"""
def __init__(self, int_pid, queue):
"""Initializes DummyMPLogHandler with the inputted internal PID
and Queue object."""
logging.Handler.__init__(self)
self.queue = queue
self.int_pid = int_pid
def emit(self, record):
"""Method override to forward logging records to the internal
Queue object."""
try:
# Format: [ [queueMsgID, PID, internal PID], record ]
self.queue.put([[config.DUMMYMP_LOG_ID, os.getpid(), self.int_pid], record])
except:
# Something went wrong...
self.handleError(record)
| {
"content_hash": "1291968c22ee1236717d352de47f763b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 88,
"avg_line_length": 38.125,
"alnum_prop": 0.6537704918032787,
"repo_name": "alberthdev/dummymp",
"id": "b4c3f4d7fddd01281843294be1c460e479fcf0a5",
"size": "2322",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dummymp/loghandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46211"
}
],
"symlink_target": ""
} |
from framework import auth
from website import settings
from osf.models import Contributor
from addons.osfstorage.models import Region
from website.filters import profile_image_url
from osf.utils.permissions import READ
from osf.utils import workflows
from api.waffle.utils import storage_i18n_flag_active
def get_profile_image_url(user, size=settings.PROFILE_IMAGE_MEDIUM):
return profile_image_url(settings.PROFILE_IMAGE_PROVIDER,
user,
use_ssl=True,
size=size)
def serialize_user(user, node=None, admin=False, full=False, is_profile=False, include_node_counts=False):
"""
Return a dictionary representation of a registered user.
:param User user: A User object
:param bool full: Include complete user properties
"""
contrib = None
if isinstance(user, Contributor):
contrib = user
user = contrib.user
fullname = user.display_full_name(node=node)
ret = {
'id': str(user._id),
'registered': user.is_registered,
'surname': user.family_name,
'fullname': fullname,
'shortname': fullname if len(fullname) < 50 else fullname[:23] + '...' + fullname[-23:],
'profile_image_url': user.profile_image_url(size=settings.PROFILE_IMAGE_MEDIUM),
'active': user.is_active,
}
if node is not None:
if admin:
flags = {
'visible': False,
'permission': READ,
}
else:
if not contrib:
try:
contrib = node.contributor_set.get(user=user)
except Contributor.NotFoundError:
contrib = None
is_contributor_obj = isinstance(contrib, Contributor)
flags = {
'visible': contrib.visible if is_contributor_obj else node.contributor_set.filter(user=user, visible=True).exists(),
'permission': contrib.permission if is_contributor_obj else None
}
ret.update(flags)
if user.is_registered:
ret.update({
'url': user.url,
'absolute_url': user.absolute_url,
'display_absolute_url': user.display_absolute_url,
'date_registered': user.date_registered.strftime('%Y-%m-%d'),
})
if full:
# Add emails
if is_profile:
ret['emails'] = [
{
'address': each,
'primary': each.strip().lower() == user.username.strip().lower(),
'confirmed': True,
} for each in user.emails.values_list('address', flat=True)
] + [
{
'address': each,
'primary': each.strip().lower() == user.username.strip().lower(),
'confirmed': False
}
for each in user.get_unconfirmed_emails_exclude_external_identity()
]
if user.is_merged:
merger = user.merged_by
merged_by = {
'id': str(merger._primary_key),
'url': merger.url,
'absolute_url': merger.absolute_url
}
else:
merged_by = None
default_region = user.get_addon('osfstorage').default_region
available_regions = [region for region in Region.objects.all().values('_id', 'name')]
ret.update({
'activity_points': user.get_activity_points(),
'profile_image_url': user.profile_image_url(size=settings.PROFILE_IMAGE_LARGE),
'is_merged': user.is_merged,
'available_regions': available_regions,
'storage_flag_is_active': storage_i18n_flag_active(),
'default_region': {'name': default_region.name, '_id': default_region._id},
'merged_by': merged_by,
})
if include_node_counts:
projects = user.nodes.exclude(is_deleted=True).filter(type='osf.node').get_roots()
ret.update({
'number_projects': projects.count(),
'number_public_projects': projects.filter(is_public=True).count(),
})
return ret
def serialize_contributors(contribs, node, **kwargs):
return [
serialize_user(contrib, node, **kwargs)
for contrib in contribs
]
def serialize_visible_contributors(node):
# This is optimized when node has .include('contributor__user__guids')
return [
serialize_user(c, node) for c in node.contributor_set.all() if c.visible
]
def add_contributor_json(user, current_user=None, node=None):
"""
Generate a dictionary representation of a user, optionally including # projects shared with `current_user`
:param User user: The user object to serialize
:param User current_user : The user object for a different user, to calculate number of projects in common
:return dict: A dict representing the serialized user data
"""
# get shared projects
if current_user:
n_projects_in_common = current_user.n_projects_in_common(user)
else:
n_projects_in_common = 0
current_employment = None
education = None
if user.jobs:
current_employment = user.jobs[0]['institution']
if user.schools:
education = user.schools[0]['institution']
contributor_json = {
'fullname': user.fullname,
'email': user.email,
'id': user._primary_key,
'employment': current_employment,
'education': education,
'n_projects_in_common': n_projects_in_common,
'registered': user.is_registered,
'active': user.is_active,
'profile_image_url': user.profile_image_url(size=settings.PROFILE_IMAGE_MEDIUM),
'profile_url': user.profile_url
}
if node:
contributor_info = user.contributor_set.get(node=node.parent_node)
contributor_json['permission'] = contributor_info.permission
contributor_json['visible'] = contributor_info.visible
return contributor_json
def serialize_unregistered(fullname, email):
"""Serializes an unregistered user."""
user = auth.get_user(email=email)
if user is None:
serialized = {
'fullname': fullname,
'id': None,
'registered': False,
'active': False,
'profile_image_url': profile_image_url(settings.PROFILE_IMAGE_PROVIDER,
email,
use_ssl=True,
size=settings.PROFILE_IMAGE_MEDIUM),
'email': email,
}
else:
serialized = add_contributor_json(user)
serialized['fullname'] = fullname
serialized['email'] = email
return serialized
def serialize_access_requests(node):
"""Serialize access requests for a node"""
return [
{
'user': serialize_user(access_request.creator),
'comment': access_request.comment,
'id': access_request._id
} for access_request in node.requests.filter(
request_type=workflows.RequestTypes.ACCESS.value,
machine_state=workflows.DefaultStates.PENDING.value
).select_related('creator')
]
| {
"content_hash": "c5ca7468a26a948198712dffe06e7e84",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 132,
"avg_line_length": 35.88780487804878,
"alnum_prop": 0.5755063205110779,
"repo_name": "mfraezz/osf.io",
"id": "92c3d5846205f90567ba1df804e9001765b11352",
"size": "7381",
"binary": false,
"copies": "8",
"ref": "refs/heads/develop",
"path": "website/profile/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92846"
},
{
"name": "Dockerfile",
"bytes": "5868"
},
{
"name": "HTML",
"bytes": "341209"
},
{
"name": "JavaScript",
"bytes": "1787097"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "684258"
},
{
"name": "Python",
"bytes": "11879565"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
"""
Seismic: 2D finite difference simulation of scalar wave propagation.
Difraction example in cylindrical wedge model. Based on:
R. M. Alford, K. R. Kelly and D. M. Boore -
Accuracy of finite-difference modeling of the acoustic wave equation.
Geophysics 1974
"""
import numpy as np
from matplotlib import animation
from fatiando.seismic import wavefd
from fatiando.vis import mpl
# Set the parameters of the finite difference grid
shape = (200, 200)
ds = 100. # spacing
area = [0, shape[0] * ds, 0, shape[1] * ds]
# Set the parameters of the finite difference grid
velocity = np.zeros(shape) + 6000.
velocity[100:, 100:] = 0.
fc = 15.
simulation = wavefd.Scalar(velocity, (ds, ds))
simulation.add_point_source((125, 75), -1*wavefd.Gauss(1., fc))
duration = 2.6
maxit = int(duration / simulation.dt)
maxt = duration
# This part makes an animation using matplotlibs animation API
background = (velocity - 6000) * 10 ** -3
fig = mpl.figure(figsize=(8, 6))
mpl.subplots_adjust(right=0.98, left=0.11, hspace=0.5, top=0.93)
mpl.subplot2grid((4, 3), (0, 0), colspan=3, rowspan=3)
wavefield = mpl.imshow(np.zeros_like(velocity), extent=area,
cmap=mpl.cm.gray_r, vmin=-0.05, vmax=0.05)
mpl.points([75*ds, 125*ds], '^b', size=8) # seismometer position
mpl.ylim(area[2:][::-1])
mpl.xlabel('x (km)')
mpl.ylabel('z (km)')
mpl.m2km()
mpl.subplot2grid((4, 3), (3, 0), colspan=3)
seismogram1, = mpl.plot([], [], '-k')
mpl.xlim(0, duration)
mpl.ylim(-0.05, 0.05)
mpl.ylabel('Amplitude')
mpl.xlabel('Time (s)')
times = np.linspace(0, maxt, maxit)
# This function updates the plot every few timesteps
simulation.run(maxit)
seismogram = simulation[:, 125, 75] # (time, z and x) shape
def animate(i):
u = simulation[i]
seismogram1.set_data(times[:i], seismogram[:i])
wavefield.set_array(background[::-1] + u[::-1])
return wavefield, seismogram1
anim = animation.FuncAnimation(
fig, animate, frames=maxit, interval=1)
mpl.show()
| {
"content_hash": "a89b3af8bf9e335159174a8355bfecf4",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 69,
"avg_line_length": 32.68333333333333,
"alnum_prop": 0.6884242733299337,
"repo_name": "eusoubrasileiro/fatiando",
"id": "2640c125cfecb9019e1880158030d063a86501ad",
"size": "1961",
"binary": false,
"copies": "2",
"ref": "refs/heads/sim-class-improvements",
"path": "cookbook/seismic_wavefd_scalar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5213509"
},
{
"name": "Makefile",
"bytes": "7884"
},
{
"name": "Python",
"bytes": "946895"
},
{
"name": "Shell",
"bytes": "5112"
}
],
"symlink_target": ""
} |
def matrix_challenge(arr):
for i, row in enumerate(arr):
arr[i] = [c for c in row]
rows = len(arr)
cols = len(arr[0])
possible, visited_start = search((0, 0), (rows-1, cols-1), arr)
if possible:
return True
_, visited_end = search((rows-1, cols-1), (0, 0), arr)
neighbors_start = get_all_neighbors(visited_start, arr)
neighbors_end = get_all_neighbors(visited_end, arr)
num_intersect = len(neighbors_start.intersection(neighbors_end))
if num_intersect == 0:
return "not possible"
else:
return num_intersect
def get_all_neighbors(vertices, arr):
neighbors = set()
for v in vertices:
neighbors = neighbors.union(get_neighbors(v, arr, "0"))
return neighbors
def search(start, target, arr):
stack = [start]
visited = {start}
while stack:
v = stack.pop()
if v == target:
return True, None
for n in get_neighbors(v, arr, "1"):
if n in visited:
continue
stack.append(n)
visited.add(n)
return False, visited
def get_neighbors(v, arr, val):
rows = len(arr)
cols = len(arr[0])
i, j = v
neighbors = []
neighbors.extend(valid(i-1, j, rows, cols, arr, val))
neighbors.extend(valid(i+1, j, rows, cols, arr, val))
neighbors.extend(valid(i, j-1, rows, cols, arr, val))
neighbors.extend(valid(i, j+1, rows, cols, arr, val))
return neighbors
def valid(i, j, rows, cols, arr, val):
result = []
if i >= 0 and j >= 0 and i < rows and j < cols and arr[i][j] == val:
result = [(i, j)]
return result
print(matrix_challenge(["11100", "10011", "10101", "10011"])) | {
"content_hash": "96b1b475604afa4f7a32149b6ee63cb9",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 70,
"avg_line_length": 24.671875,
"alnum_prop": 0.6238125395820139,
"repo_name": "saisankargochhayat/algo_quest",
"id": "0bbabde336976eba66ca34b140ac6ddafcb7a1bf",
"size": "1670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Company-Based/amazon/matrix_challenge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "405"
},
{
"name": "C++",
"bytes": "9149"
},
{
"name": "HTML",
"bytes": "1679"
},
{
"name": "Java",
"bytes": "3648"
},
{
"name": "JavaScript",
"bytes": "786"
},
{
"name": "Python",
"bytes": "248621"
},
{
"name": "Ruby",
"bytes": "2761"
},
{
"name": "Shell",
"bytes": "610"
}
],
"symlink_target": ""
} |
import pytest
from tg import tmpl_context as c
from bson import ObjectId
from allura.lib.helpers import set_context
from allura.lib.exceptions import NoSuchProjectError, NoSuchNeighborhoodError
from allura.tests.unit import WithDatabase
from allura.tests.unit import patches
from allura.tests.unit.factories import (create_project,
create_app_config,
create_neighborhood)
class TestWhenProjectIsFoundAndAppIsNot(WithDatabase):
def setup_method(self, method):
super().setup_method(method)
self.myproject = create_project('myproject')
set_context('myproject', neighborhood=self.myproject.neighborhood)
def test_that_it_sets_the_project(self):
assert c.project is self.myproject
def test_that_it_sets_the_app_to_none(self):
assert c.app is None, c.app
class TestWhenProjectIsFoundInNeighborhood(WithDatabase):
def setup_method(self, method):
super().setup_method(method)
self.myproject = create_project('myproject')
set_context('myproject', neighborhood=self.myproject.neighborhood)
def test_that_it_sets_the_project(self):
assert c.project is self.myproject
def test_that_it_sets_the_app_to_none(self):
assert c.app is None
class TestWhenAppIsFoundByID(WithDatabase):
patches = [patches.project_app_loading_patch]
def setup_method(self, method):
super().setup_method(method)
self.myproject = create_project('myproject')
self.app_config = create_app_config(self.myproject, 'my_mounted_app')
set_context('myproject', app_config_id=self.app_config._id,
neighborhood=self.myproject.neighborhood)
def test_that_it_sets_the_app(self):
assert c.app is self.fake_app
def test_that_it_gets_the_app_by_its_app_config(self):
self.project_app_instance_function.assert_called_with(self.app_config)
class TestWhenAppIsFoundByMountPoint(WithDatabase):
patches = [patches.project_app_loading_patch]
def setup_method(self, method):
super().setup_method(method)
self.myproject = create_project('myproject')
self.app_config = create_app_config(self.myproject, 'my_mounted_app')
set_context('myproject', mount_point='my_mounted_app',
neighborhood=self.myproject.neighborhood)
def test_that_it_sets_the_app(self):
assert c.app is self.fake_app
def test_that_it_gets_the_app_by_its_mount_point(self):
self.project_app_instance_function.assert_called_with(
'my_mounted_app')
class TestWhenProjectIsNotFound(WithDatabase):
def test_that_it_raises_an_exception(self):
nbhd = create_neighborhood()
pytest.raises(NoSuchProjectError,
set_context,
'myproject',
neighborhood=nbhd)
def test_proper_exception_when_id_lookup(self):
create_neighborhood()
pytest.raises(NoSuchProjectError,
set_context,
ObjectId(),
neighborhood=None)
class TestWhenNeighborhoodIsNotFound(WithDatabase):
def test_that_it_raises_an_exception(self):
pytest.raises(NoSuchNeighborhoodError,
set_context,
'myproject',
neighborhood='myneighborhood')
| {
"content_hash": "2e68a5e65244366d53008d88693ac84b",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 78,
"avg_line_length": 34.33,
"alnum_prop": 0.6536556947276435,
"repo_name": "apache/allura",
"id": "171cf4425626059b81ca6254c69ea5ecf24fc566",
"size": "4303",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Allura/allura/tests/unit/test_helpers/test_set_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "181457"
},
{
"name": "Dockerfile",
"bytes": "4748"
},
{
"name": "HTML",
"bytes": "867332"
},
{
"name": "JavaScript",
"bytes": "1191836"
},
{
"name": "Makefile",
"bytes": "6248"
},
{
"name": "Python",
"bytes": "4499987"
},
{
"name": "RAML",
"bytes": "27600"
},
{
"name": "Roff",
"bytes": "41"
},
{
"name": "Ruby",
"bytes": "1280"
},
{
"name": "SCSS",
"bytes": "27742"
},
{
"name": "Shell",
"bytes": "131207"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
""" This file implements linking semantics common to all unixes. On unix, static
libraries must be specified in a fixed order on the linker command line. Generators
declared there store information about the order and use it properly.
"""
import builtin
from b2.build import generators, type
from b2.util.utility import *
from b2.util import set, sequence
class UnixLinkingGenerator (builtin.LinkingGenerator):
def __init__ (self, id, composing, source_types, target_types, requirements):
builtin.LinkingGenerator.__init__ (self, id, composing, source_types, target_types, requirements)
def run (self, project, name, prop_set, sources):
result = builtin.LinkingGenerator.run (self, project, name, prop_set, sources)
if result:
set_library_order (project.manager (), sources, prop_set, result [1])
return result
def generated_targets (self, sources, prop_set, project, name):
sources2 = []
libraries = []
for l in sources:
if type.is_derived (l.type (), 'LIB'):
libraries.append (l)
else:
sources2.append (l)
sources = sources2 + order_libraries (libraries)
return builtin.LinkingGenerator.generated_targets (self, sources, prop_set, project, name)
class UnixArchiveGenerator (builtin.ArchiveGenerator):
def __init__ (self, id, composing, source_types, target_types_and_names, requirements):
builtin.ArchiveGenerator.__init__ (self, id, composing, source_types, target_types_and_names, requirements)
def run (self, project, name, prop_set, sources):
result = builtin.ArchiveGenerator.run(self, project, name, prop_set, sources)
set_library_order(project.manager(), sources, prop_set, result)
return result
class UnixSearchedLibGenerator (builtin.SearchedLibGenerator):
def __init__ (self):
builtin.SearchedLibGenerator.__init__ (self)
def optional_properties (self):
return self.requirements ()
def run (self, project, name, prop_set, sources):
result = SearchedLibGenerator.run (project, name, prop_set, sources)
set_library_order (sources, prop_set, result)
return result
class UnixPrebuiltLibGenerator (generators.Generator):
def __init__ (self, id, composing, source_types, target_types_and_names, requirements):
generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements)
def run (self, project, name, prop_set, sources):
f = prop_set.get ('<file>')
set_library_order_aux (f, sources)
return f + sources
### # The derived toolset must specify their own rules and actions.
# FIXME: restore?
# action.register ('unix.prebuilt', None, None)
generators.register (UnixPrebuiltLibGenerator ('unix.prebuilt', False, [], ['LIB'], ['<file>', '<toolset>unix']))
### # Declare generators
### generators.register [ new UnixLinkingGenerator unix.link : LIB OBJ : EXE
### : <toolset>unix ] ;
generators.register (UnixArchiveGenerator ('unix.archive', True, ['OBJ'], ['STATIC_LIB'], ['<toolset>unix']))
### generators.register [ new UnixLinkingGenerator unix.link.dll : LIB OBJ : SHARED_LIB
### : <toolset>unix ] ;
###
### generators.register [ new UnixSearchedLibGenerator
### unix.SearchedLibGenerator : : SEARCHED_LIB : <toolset>unix ] ;
###
###
### # The derived toolset must specify their own actions.
### actions link {
### }
###
### actions link.dll {
### }
def unix_archive (manager, targets, sources, properties):
pass
# FIXME: restore?
#action.register ('unix.archive', unix_archive, [''])
### actions searched-lib-generator {
### }
###
### actions prebuilt {
### }
from b2.util.order import Order
__order = Order ()
def set_library_order_aux (from_libs, to_libs):
for f in from_libs:
for t in to_libs:
if f != t:
__order.add_pair (f, t)
def set_library_order (manager, sources, prop_set, result):
used_libraries = []
deps = prop_set.dependency ()
sources.extend(d.value() for d in deps)
sources = sequence.unique(sources)
for l in sources:
if l.type () and type.is_derived (l.type (), 'LIB'):
used_libraries.append (l)
created_libraries = []
for l in result:
if l.type () and type.is_derived (l.type (), 'LIB'):
created_libraries.append (l)
created_libraries = set.difference (created_libraries, used_libraries)
set_library_order_aux (created_libraries, used_libraries)
def order_libraries (libraries):
return __order.order (libraries)
| {
"content_hash": "ff91d605695fa4cc801f9a51c349d6c4",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 115,
"avg_line_length": 32.28472222222222,
"alnum_prop": 0.6588513658851366,
"repo_name": "bureau14/qdb-benchmark",
"id": "681a872027a249aab2274fa039d30bebd64a8ff6",
"size": "4867",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "thirdparty/boost/tools/build/src/tools/unix.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "422"
},
{
"name": "C++",
"bytes": "175377"
},
{
"name": "CMake",
"bytes": "9783"
},
{
"name": "CSS",
"bytes": "2470"
},
{
"name": "HTML",
"bytes": "792"
},
{
"name": "JavaScript",
"bytes": "29642"
},
{
"name": "Shell",
"bytes": "1475"
}
],
"symlink_target": ""
} |
import sys
class Bracket:
def __init__(self, bracket_type, position):
self.bracket_type = bracket_type
self.position = position
def Match(self, c):
if self.bracket_type == '[' and c == ']':
return True
if self.bracket_type == '{' and c == '}':
return True
if self.bracket_type == '(' and c == ')':
return True
return False
if __name__ == "__main__":
text = sys.stdin.read()
opening_brackets_stack = []
for i, next in enumerate(text):
if next == '(' or next == '[' or next == '{':
# Process opening bracket, write your code here
pass
if next == ')' or next == ']' or next == '}':
# Process closing bracket, write your code here
pass
# Printing answer, write your code here
| {
"content_hash": "963b35230800eb00c706ac32058251a9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 59,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.5105882352941177,
"repo_name": "xunilrj/sandbox",
"id": "f9fca1867d5584a573f8cd0aa4252271e50f53fc",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courses/coursera-sandiego-algorithms/data-structures/assignment001/check_brackets_in_code/check_brackets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "235"
},
{
"name": "ASP.NET",
"bytes": "110"
},
{
"name": "Assembly",
"bytes": "28409"
},
{
"name": "Asymptote",
"bytes": "22978"
},
{
"name": "C",
"bytes": "1022035"
},
{
"name": "C#",
"bytes": "474510"
},
{
"name": "C++",
"bytes": "33387716"
},
{
"name": "CMake",
"bytes": "1288737"
},
{
"name": "CSS",
"bytes": "49690"
},
{
"name": "Common Lisp",
"bytes": "858"
},
{
"name": "Coq",
"bytes": "6200"
},
{
"name": "Dockerfile",
"bytes": "2912"
},
{
"name": "Elixir",
"bytes": "34"
},
{
"name": "Erlang",
"bytes": "8204"
},
{
"name": "F#",
"bytes": "33187"
},
{
"name": "Fortran",
"bytes": "20472"
},
{
"name": "GDB",
"bytes": "701"
},
{
"name": "GLSL",
"bytes": "7478"
},
{
"name": "Go",
"bytes": "8971"
},
{
"name": "HTML",
"bytes": "6469462"
},
{
"name": "Handlebars",
"bytes": "8236"
},
{
"name": "Haskell",
"bytes": "18581"
},
{
"name": "Java",
"bytes": "120539"
},
{
"name": "JavaScript",
"bytes": "5055335"
},
{
"name": "Jupyter Notebook",
"bytes": "1849172"
},
{
"name": "LLVM",
"bytes": "43431"
},
{
"name": "MATLAB",
"bytes": "462980"
},
{
"name": "Makefile",
"bytes": "1622666"
},
{
"name": "Objective-C",
"bytes": "2001"
},
{
"name": "PostScript",
"bytes": "45490"
},
{
"name": "PowerShell",
"bytes": "192867"
},
{
"name": "Python",
"bytes": "726138"
},
{
"name": "R",
"bytes": "31364"
},
{
"name": "Roff",
"bytes": "5700"
},
{
"name": "Ruby",
"bytes": "5865"
},
{
"name": "Rust",
"bytes": "797104"
},
{
"name": "Sage",
"bytes": "654"
},
{
"name": "Scala",
"bytes": "42383"
},
{
"name": "Shell",
"bytes": "154039"
},
{
"name": "TLA",
"bytes": "16779"
},
{
"name": "TSQL",
"bytes": "3412"
},
{
"name": "TeX",
"bytes": "6989202"
},
{
"name": "TypeScript",
"bytes": "8845"
},
{
"name": "Visual Basic .NET",
"bytes": "1090"
},
{
"name": "WebAssembly",
"bytes": "70321"
},
{
"name": "q",
"bytes": "13889"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1Initializer(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None):
"""
V1Initializer - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str'
}
self.attribute_map = {
'name': 'name'
}
self._name = name
@property
def name(self):
"""
Gets the name of this V1Initializer.
name of the process that is responsible for initializing this object.
:return: The name of this V1Initializer.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1Initializer.
name of the process that is responsible for initializing this object.
:param name: The name of this V1Initializer.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1Initializer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "f5fcdf57bcb398ef037c92d08efee3ba",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 105,
"avg_line_length": 26.700854700854702,
"alnum_prop": 0.5220870678617158,
"repo_name": "sebgoa/client-python",
"id": "3190a2e77777eabf093cadcf529b94135b672577",
"size": "3141",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_initializer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5855378"
},
{
"name": "Shell",
"bytes": "16387"
}
],
"symlink_target": ""
} |
from .base import CodeGenerator
| {
"content_hash": "5d35f02dc0f0d3f1e098ee018d2d6008",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.84375,
"repo_name": "tclose/PyPe9",
"id": "73f307b9bf53708ff2e6b8b407d13cc78112a4d5",
"size": "32",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pype9/simulate/neuron/code_gen/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1575"
},
{
"name": "Python",
"bytes": "383807"
},
{
"name": "Shell",
"bytes": "4546"
}
],
"symlink_target": ""
} |
import os
import logging
import slugify
from ovbpclient.models import oteams as oteams_models
from ovbpclient.json import json_dump
def sanitize_name(name):
# https://newbedev.com/regular-expression-for-valid-filename
return slugify.slugify(name, regex_pattern=r"[^\w\-.\s]")
logger = logging.getLogger(__name__)
def download_organization(organization: "oteams_models.Organization", target_dir_path):
if not os.path.isdir(target_dir_path):
os.mkdir(target_dir_path)
if len(os.listdir(target_dir_path)) > 0:
raise ValueError("Target directory is not empty, can't download.")
for project in organization.list_all_projects():
logger.info(f"Downloading project {project.name}.")
download_project(project, os.path.join(target_dir_path, sanitize_name(project.name)))
def download_project(project: "oteams_models.Project", target_dir_path):
if not os.path.isdir(target_dir_path):
os.mkdir(target_dir_path)
if len(os.listdir(target_dir_path)) > 0:
raise ValueError("Target directory is not empty, can't download.")
# gates
logger.info("Downloading gates.")
gates_path = os.path.join(target_dir_path, "1-gates")
os.mkdir(gates_path)
for gate in project.list_all_gates():
data = gate.data.copy()
if gate.base_feeder is not None:
base_feeder = gate.get_base_feeder()
data["base_feeder"] = base_feeder.data
child_feeder = base_feeder.get_child()
if child_feeder is not None:
data["base_feeder"]["child_data"] = child_feeder.data.copy()
json_dump(data, os.path.join(gates_path, f"{sanitize_name(gate.name)}.json"), indent=4)
# importers
logger.info("Downloading importers.")
importers_path = os.path.join(target_dir_path, "2-importers")
os.mkdir(importers_path)
for importer in project.list_all_importers():
json_dump(importer.data, os.path.join(importers_path, f"{sanitize_name(importer.name)}.json"), indent=4)
# cleaners
logger.info("Downloading cleaners.")
cleaners_path = os.path.join(target_dir_path, "3-cleaners")
os.mkdir(cleaners_path)
for cleaner in project.list_all_cleaners():
cleaner_dir_path = os.path.join(cleaners_path, sanitize_name(cleaner.name))
os.mkdir(cleaner_dir_path)
json_dump(cleaner.data, os.path.join(cleaner_dir_path, "#cleaner.json"), indent=4)
for unitcleaner in cleaner.list_all_unitcleaners():
json_dump(
unitcleaner.data,
os.path.join(cleaner_dir_path, f"{sanitize_name(unitcleaner.name)}.json"), indent=4)
# analyses
logger.info("Downloading analyses.")
analyses_path = os.path.join(target_dir_path, "4-analyses")
os.mkdir(analyses_path)
for analysis in project.list_all_analyses():
# download analysisconfig
analysis.reload()
# prepare data
data = analysis.data.copy()
# inputs
data["inputs"] = [i.data.copy() for i in analysis.list_all_analysis_inputs()]
# outputs
data["outputs"] = [o.data.copy() for o in analysis.list_all_analysis_outputs()]
json_dump(data, os.path.join(analyses_path, f"{sanitize_name(analysis.name)}.json"), indent=4)
| {
"content_hash": "6b6f38f9233e0b20541d0cbb76124926",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 112,
"avg_line_length": 38.61176470588235,
"alnum_prop": 0.6587446678854357,
"repo_name": "openergy/openergy",
"id": "4bc10d985418dd7b73704c9617f589cd42e68bf5",
"size": "3282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ovbpclient/tools/download.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131846"
}
],
"symlink_target": ""
} |
import urllib
import urllib2
import re
#loosely based on http://stackoverflow.com/questions/16011497/youtube-stream-fmt-stream-map-quality
def getYoutubeMovie(url):
try:
conn = urllib2.urlopen(url)
encoding = conn.headers.getparam('charset')
content = conn.read().decode(encoding)
#get available streams
s = re.findall(r'"url_encoded_fmt_stream_map": ?"([^"]+)"', content)
print s
if s and len(s):
s = s[0].split(',')
values = {}
for stream in s:
stream = stream.replace('\\u0026', '&')
stream = urllib2.parse_keqv_list(stream.split('&'))
values[stream.get('itag') or "0"] = stream
itags = values.keys()
sorted(itags, reverse=True)
print itags
link = None
for itag in itags:
z = values[itag]
if itag == '84' or itag == '82' or itag == '38' or itag == '37' or itag == '22' or itag == '18':
try:
link = urllib.unquote(z['url'] + '&signature=%s' % z['sig'])
except:
link = urllib.unquote(z['url'])
return link
except Exception as e:
print e
return None
| {
"content_hash": "a9ff449fa4f3e89ffc746153aa83cb09",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 100,
"avg_line_length": 28.97222222222222,
"alnum_prop": 0.6241610738255033,
"repo_name": "strainu/plugins.video.cinepub",
"id": "8e3fa49a3e0c78559494309b645529caa2a088c4",
"size": "1043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/lib/youtube.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85561"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import sys
sys.path.append("..")
import paddle
import paddle.fluid as fluid
from op_test import OpTest
paddle.enable_static()
class TestSizeOp(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "size"
self.config()
input = np.zeros(self.shape, dtype=self.dtype)
self.inputs = {'Input': input}
self.outputs = {'Out': np.array([np.size(input)], dtype=np.int64)}
def config(self):
self.shape = [1, 2]
self.dtype = np.int32
def test_check_output(self):
self.check_output_with_place(self.place)
def set_npu(self):
self.__class__.use_npu = True
class TestSizeOp1(TestSizeOp):
def config(self):
self.shape = [2]
self.dtype = np.float64
class TestSizeOp2(TestSizeOp):
def config(self):
self.shape = [2, 3]
self.dtype = np.float32
class TestSizeOp3(TestSizeOp):
def config(self):
self.shape = [2, 3, 100]
self.dtype = np.float16
class TestSizeOp4(TestSizeOp):
def config(self):
self.shape = [2**10]
self.dtype = np.bool_
class TestSizeOp5(TestSizeOp):
def config(self):
self.shape = [7, 8, 9, 10]
self.dtype = np.int64
class TestSizeOp6(TestSizeOp):
def config(self):
self.shape = []
self.dtype = np.int64
class TestSizeAPI(unittest.TestCase):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
def set_npu(self):
self.__class__.use_npu = True
def test_size_static(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
shape1 = [2, 1, 4, 5]
shape2 = [1, 4, 5]
x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1')
x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2')
input_1 = np.random.random(shape1).astype("int32")
input_2 = np.random.random(shape2).astype("int32")
out_1 = paddle.fluid.layers.size(x_1)
out_2 = paddle.fluid.layers.size(x_2)
exe = paddle.static.Executor(place=self.place)
res_1, res_2 = exe.run(
feed={
"x_1": input_1,
"x_2": input_2,
},
fetch_list=[out_1, out_2],
)
assert np.array_equal(
res_1, np.array([np.size(input_1)]).astype("int64")
)
assert np.array_equal(
res_2, np.array([np.size(input_2)]).astype("int64")
)
def test_size_imperative(self):
paddle.disable_static(self.place)
input_1 = np.random.random([2, 1, 4, 5]).astype("int32")
input_2 = np.random.random([1, 4, 5]).astype("int32")
x_1 = paddle.to_tensor(input_1)
x_2 = paddle.to_tensor(input_2)
out_1 = paddle.fluid.layers.size(x_1)
out_2 = paddle.fluid.layers.size(x_2)
assert np.array_equal(out_1.numpy().item(0), np.size(input_1))
assert np.array_equal(out_2.numpy().item(0), np.size(input_2))
paddle.enable_static()
def test_error(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
def test_x_type():
shape = [1, 4, 5]
input_1 = np.random.random(shape).astype("int32")
out_1 = paddle.fluid.layers.size(input_1)
self.assertRaises(TypeError, test_x_type)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0a0dc4355106cc0b447d7c0bbaf4fbd6",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 76,
"avg_line_length": 28.34090909090909,
"alnum_prop": 0.5565356856455493,
"repo_name": "luotao1/Paddle",
"id": "1e768a5dd185a4d0acb8970f26a47f58a3f7075e",
"size": "4354",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
from rh_aligner.stitching.match_sift_features_and_filter_cv2 import match_single_sift_features_and_filter, match_multiple_sift_features_and_filter
import os
import time
import argparse
import re
def wait_after_file(filename, timeout_seconds):
if timeout_seconds > 0:
cur_time = time.time()
mod_time = os.path.getmtime(filename)
end_wait_time = mod_time + timeout_seconds
while cur_time < end_wait_time:
print "Waiting for file: {}".format(filename)
cur_time = time.time()
mod_time = os.path.getmtime(filename)
end_wait_time = mod_time + timeout_seconds
if cur_time < end_wait_time:
time.sleep(end_wait_time - cur_time)
def main():
# Command line parser
parser = argparse.ArgumentParser(description='Iterates over the tilespecs in a file, computing matches for each overlapping tile.')
parser.add_argument('tiles_file', metavar='tiles_file', type=str,
help='the json file of tilespecs')
parser.add_argument('features_file1', metavar='features_file1', type=str,
help='a file that contains the features json file of the first tile (if a single pair is matched) or a list of features json files (if multiple pairs are matched)')
parser.add_argument('features_file2', metavar='features_file2', type=str,
help='a file that contains the features json file of the second tile (if a single pair is matched) or a list of features json files (if multiple pairs are matched)')
parser.add_argument('--index_pairs', metavar='index_pairs', type=str, nargs='+',
help='a colon separated indices of the tiles in the tilespec file that correspond to the feature files that need to be matched. The format is [mfov1_index]_[tile_index]:[mfov2_index]_[tile_index]')
parser.add_argument('-o', '--output_file', type=str,
help='an output file name where the correspondent_spec file will be (if a single pair is matched, default: ./matched_sifts.json) or a list of output files (if multiple pairs are matched, default: ./matched_siftsX.json)',
default='./matched_sifts.json')
parser.add_argument('-c', '--conf_file_name', type=str,
help='the configuration file with the parameters for each step of the alignment process in json format (uses default parameters, if not supplied)',
default=None)
parser.add_argument('-w', '--wait_time', type=int,
help='the time to wait since the last modification date of the features_file (default: None)',
default=0)
parser.add_argument('-t', '--threads_num', type=int,
help='the number of threads (processes) to use (default: 1)',
default=1)
args = parser.parse_args()
print("args:", args)
if len(args.index_pairs) == 1 and not args.features_file1.endswith('.txt'):
wait_after_file(args.features_file1, args.wait_time)
wait_after_file(args.features_file2, args.wait_time)
m = re.match('([0-9]+)_([0-9]+):([0-9]+)_([0-9]+)', args.index_pairs[0])
index_pair = (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)))
match_single_sift_features_and_filter(args.tiles_file, args.features_file1, args.features_file2,
args.output_file, index_pair, conf_fname=args.conf_file_name)
else: # More than one pair
with open(args.features_file1, 'r') as f_fnames:
features_files_lst1 = [fname.strip() for fname in f_fnames.readlines()]
with open(args.features_file2, 'r') as f_fnames:
features_files_lst2 = [fname.strip() for fname in f_fnames.readlines()]
for feature_file in zip(features_files_lst1, features_files_lst2):
wait_after_file(feature_file[0], args.wait_time)
wait_after_file(feature_file[1], args.wait_time)
with open(args.output_file, 'r') as f_fnames:
output_files_lst = [fname.strip() for fname in f_fnames.readlines()]
index_pairs = []
for index_pair in args.index_pairs:
m = re.match('([0-9]+)_([0-9]+):([0-9]+)_([0-9]+)', index_pair)
index_pairs.append( (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))) )
match_multiple_sift_features_and_filter(args.tiles_file, features_files_lst1, features_files_lst2,
output_files_lst, index_pairs, conf_fname=args.conf_file_name,
processes_num=args.threads_num)
print("Done.")
if __name__ == '__main__':
main()
| {
"content_hash": "a182ccec4454bfdbdfdc71aef3d28919",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 244,
"avg_line_length": 59.27160493827161,
"alnum_prop": 0.6104978129556342,
"repo_name": "Rhoana/rh_aligner",
"id": "60ba45cb3b29b08310dc0bc66230e9a8bd2f709b",
"size": "4801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/wrappers/match_sift_features_and_filter_cv2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "117"
},
{
"name": "HTML",
"bytes": "1795"
},
{
"name": "JavaScript",
"bytes": "608374"
},
{
"name": "Python",
"bytes": "861406"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
} |
import imp
from .authorization import Authorization
from .card import Card
from .combatant import Combatant
from .combatant_authorization import CombatantAuthorization
from .combatant_warrant import CombatantWarrant
from .discipline import Discipline
from .marshal import Marshal
from .privacy_acceptance import PrivacyAcceptance
from .user_discipline_role import UserDisciplineRole
from .user_global_role import UserGlobalRole
from .waiver import Waiver, WaiverReminder
| {
"content_hash": "e44cc56c6330bb4e7ec5702c8fda3e8b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 36.30769230769231,
"alnum_prop": 0.8580508474576272,
"repo_name": "lrt512/emol",
"id": "4eefee49cd888026b91e6c436f03069e54a3acd3",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "emol/cards/models/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5404"
},
{
"name": "HTML",
"bytes": "36437"
},
{
"name": "JavaScript",
"bytes": "31682"
},
{
"name": "Less",
"bytes": "5352"
},
{
"name": "Python",
"bytes": "153090"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2014 Cuble Desarrollo S.L.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import unicode_literals
from django.shortcuts import render, get_object_or_404
from django.views.generic import View
from blog.models import Post, Tag
from projects.forms import BudgetForm
from projects.models import Project
class ProjectsListView(View):
"""
List of posts.
"""
@staticmethod
def get(request):
"""
@param request:
@return:
"""
projects = Project.objects.requested_objects(request)
return render(request, "projects/list.html", {"projects": projects})
class ProjectsTagListView(View):
"""
List of posts.
"""
@staticmethod
def get(request, slug):
"""
@param request:
@return:
"""
tag = get_object_or_404(Tag, slug=slug)
projects = Project.objects.requested_objects(request, queryset=tag.projects.all())
return render(request, "projects/list.html", {"projects": projects, "tag": tag})
class ProjectDetailsView(View):
"""
List of posts.
"""
@staticmethod
def get(request, slug):
"""
@param request:
@param slug:
@return:
"""
project = get_object_or_404(Project, slug=slug)
return render(request, "projects/details.html", {"project": project})
class NewBudget(View):
"""
View for showing and handling budget form.
"""
def get(self, request):
"""
@param request:
@return:
"""
budget_form = BudgetForm()
return render(request, "projects/budgets/create.html", {"budget_form": budget_form})
| {
"content_hash": "e1aad2591e129f21ee839aa874b82c1b",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 92,
"avg_line_length": 28.526315789473685,
"alnum_prop": 0.6785977859778598,
"repo_name": "cubledesarrollo/cubledotes",
"id": "e60adb83c6fb12e012b385e9045692b1fd31c172",
"size": "2734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cuble/projects/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83773"
},
{
"name": "CoffeeScript",
"bytes": "1175"
},
{
"name": "JavaScript",
"bytes": "15618"
},
{
"name": "Python",
"bytes": "242692"
}
],
"symlink_target": ""
} |
import unittest
import warnings
from unittest import mock
from airflow.utils.log.logging_mixin import StreamLogWriter, set_context
class TestLoggingMixin(unittest.TestCase):
def setUp(self):
warnings.filterwarnings(action='always')
def test_set_context(self):
handler1 = mock.MagicMock()
handler2 = mock.MagicMock()
parent = mock.MagicMock()
parent.propagate = False
parent.handlers = [
handler1,
]
log = mock.MagicMock()
log.handlers = [
handler2,
]
log.parent = parent
log.propagate = True
value = "test"
set_context(log, value)
handler1.set_context.assert_called_once_with(value)
handler2.set_context.assert_called_once_with(value)
def tearDown(self):
warnings.resetwarnings()
class TestStreamLogWriter(unittest.TestCase):
def test_write(self):
logger = mock.MagicMock()
logger.log = mock.MagicMock()
log = StreamLogWriter(logger, 1)
msg = "test_message"
log.write(msg)
assert log._buffer == msg
log.write(" \n")
logger.log.assert_called_once_with(1, msg)
assert log._buffer == ""
def test_flush(self):
logger = mock.MagicMock()
logger.log = mock.MagicMock()
log = StreamLogWriter(logger, 1)
msg = "test_message"
log.write(msg)
assert log._buffer == msg
log.flush()
logger.log.assert_called_once_with(1, msg)
assert log._buffer == ""
def test_isatty(self):
logger = mock.MagicMock()
logger.log = mock.MagicMock()
log = StreamLogWriter(logger, 1)
assert not log.isatty()
def test_encoding(self):
logger = mock.MagicMock()
logger.log = mock.MagicMock()
log = StreamLogWriter(logger, 1)
assert log.encoding is None
def test_iobase_compatibility(self):
log = StreamLogWriter(None, 1)
assert not log.closed
# has no specific effect
log.close()
| {
"content_hash": "9f68563c5130a445b77e1f66a3b42b56",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 72,
"avg_line_length": 23.59550561797753,
"alnum_prop": 0.5923809523809523,
"repo_name": "dhuang/incubator-airflow",
"id": "7d19b9d5632a0b49e603e9b36079f55b72b4fc6c",
"size": "2888",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tests/utils/test_logging_mixin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
} |
import base64
import copy
import glob
import json
import logging
import os
import socket
import sys
import threading
import time
import traceback
import urllib2
import urlparse
import yaml
DEFAULT_REGISTRY_DIR = '/opt/spinnaker-monitoring/registry'
# pylint: disable=invalid-name
_cached_registry_catalog = None
_cached_registry_timestamp = None
def get_source_catalog(options):
"""Returns a dictionary of metric source name to configuration document.
Args:
options: [dict] Specifies where the catalog is.
If 'registry_dir' is specified use that.
Otherwise default to the DEFAULT_REGISTRY_DIR
Returns:
Dictionary keyed by the root name of the config file in the registry
directory whose value is the dictionary of the YAML file content.
"""
registry_dir = options.get('registry_dir') or DEFAULT_REGISTRY_DIR
global _cached_registry_catalog
global _cached_registry_timestamp
try:
timestamp = os.path.getmtime(registry_dir)
except OSError as err:
logging.error(err)
return _cached_registry_catalog or {}
if _cached_registry_timestamp == timestamp:
return _cached_registry_catalog
logging.info('Updating catalog from %s at %ld', registry_dir, timestamp)
catalog = {}
for source in glob.glob(os.path.join(registry_dir, '*.yml')):
name = os.path.splitext(os.path.basename(source))[0]
logging.info('loading %s', source)
with open(source) as stream:
doc = yaml.safe_load(stream)
url = doc.get('metrics_url')
if url is None:
logging.error('%s is missing "metrics_url"', source)
continue
doc['metrics_url'] = url if isinstance(url, list) else [url]
catalog[name] = doc
_cached_registry_catalog = catalog
_cached_registry_timestamp = timestamp
return catalog
def __foreach_metric_tag_binding(
service, metric_name, metric_data, service_data,
visitor, visitor_pos_args, visitor_kwargs):
for metric_instance in metric_data['values']:
visitor(service, metric_name, metric_instance, metric_data, service_data,
*visitor_pos_args, **visitor_kwargs)
def foreach_metric_in_service_map(
service_map, visitor, *visitor_pos_args, **visitor_kwargs):
for service, service_metric_list in service_map.items():
if not service_metric_list:
continue
for service_metrics in service_metric_list:
for metric_name, metric_data in service_metrics['metrics'].items():
__foreach_metric_tag_binding(
service, metric_name, metric_data, service_metrics,
visitor, visitor_pos_args, visitor_kwargs)
def normalize_name_and_tags(name, metric_instance, metric_metadata):
tags = metric_instance.get('tags', None)
if not tags:
return name, None # signal this metric had no tags so we can ignore it.
is_timer = metric_metadata['kind'] == 'Timer'
if is_timer:
tags = list(tags)
for index, tag in enumerate(tags):
if tag['key'] == 'statistic':
name = name + '__{0}'.format(tag['value'])
del tags[index]
break
return name, tags
class SpectatorClient(object):
"""Helper class for pulling data from Spectator servers."""
@staticmethod
def add_standard_parser_arguments(parser):
parser.add_argument('--prototype_path', default='',
help='Optional filter to restrict metrics of interest.')
parser.add_argument(
'--log_metric_diff',
default=False, action='store_true',
help='Keep track of the last set of metrics/bindings were'
' and show the differences with the current metric/bindings.'
' This is to show a change in what metrics are available, not'
' the values of the metrics themselves.')
parser.add_argument('--registry_dir', default=None,
help='The directory containing the *.yml files specifying'
' each of the URLs to collect metrics from.')
def __init__(self, options):
self.__prototype = None
self.__default_scan_params = {'tagNameRegex': '.+'}
self.__previous_scan_lock = threading.Lock()
self.__previous_scan = {} if options.get('log_metric_diff') else None
if options['prototype_path']:
# pylint: disable=invalid-name
with open(options['prototype_path']) as fd:
self.__prototype = json.JSONDecoder().decode(fd.read())
def __log_scan_diff(self, host, port, metrics):
"""Diff this scan with the previous one for debugging purposes."""
if self.__previous_scan is None:
return
key = '{0}:{1}'.format(host, port)
with self.__previous_scan_lock:
previous_metrics = self.__previous_scan.get(key, {})
self.__previous_scan[key] = copy.deepcopy(metrics)
if not previous_metrics:
return
previous_keys = set(previous_metrics.keys())
keys = set(metrics.keys())
new_keys = keys.difference(previous_keys)
same_keys = keys.intersection(previous_keys)
lost_keys = previous_keys.difference(keys)
lines = []
if lost_keys:
lines.append('Stopped metrics for:\n - {0}\n'
.format('\n - '.join(lost_keys)))
if new_keys:
lines.append('Started metrics for:\n - {0}\n'
.format('\n - '.join(new_keys)))
def normalize_tags(tag_list):
result = set([])
for item in sorted(tag_list):
result.add('{0}={1}'.format(item['key'], item['value']))
return ', '.join(result)
for check_key in same_keys:
tag_sets = set(
[normalize_tags(item.get('tags', []))
for item in metrics[check_key].get('values', [])])
prev_tag_sets = set(
[normalize_tags(item.get('tags', []))
for item in previous_metrics[check_key].get('values', [])])
added_tags = tag_sets.difference(prev_tag_sets)
lost_tags = prev_tag_sets.difference(tag_sets)
if added_tags:
lines.append('"{0}" started data points for\n - {1}\n'
.format(check_key, '\n - '.join(added_tags)))
if lost_tags:
lines.append('"{0}" stopped data points for\n - {1}\n'
.format(check_key, '\n - '.join(lost_tags)))
if lines:
logging.info('==== DIFF %s ===\n%s\n', key, '\n'.join(lines))
def create_request(self, url, authorization):
"""Helper function to create a request to facilitate testing.
Wrapper around creating a Request because Request does not implement
equals so it's difficult to test directly.
Args:
url: [string] The url for the request.
authorization: [string] None or the base64 encoded authorization string.
Returns:
urllib2.Request instance
"""
request = urllib2.Request(url)
if authorization:
request.add_header('Authorization', 'Basic %s' % authorization)
return request
def collect_metrics(self, base_url, params=None):
"""Return JSON metrics from the given server."""
info = urlparse.urlsplit(base_url)
host = info.hostname
port = info.port or 80
netloc = host
if info.port:
netloc += ':{0}'.format(info.port)
base_url = '{scheme}://{netloc}{path}'.format(
scheme=info.scheme, netloc=netloc, path=info.path)
authorization = None
if info.username or info.password:
authorization = base64.encodestring(
'%s:%s' % (info.username, info.password)).replace('\n', '')
query = '?' + info.query if info.query else ''
sep = '&' if info.query else '?'
query_params = dict(self.__default_scan_params)
if params is None:
params = {}
keys_to_copy = [key
for key in ['tagNameRegex', 'tagValueRegex',
'meterNameRegex']
if key in params]
for key in keys_to_copy:
query_params[key] = params[key]
for key, value in query_params.items():
query += sep + key + "=" + urllib2.quote(value)
sep = "&"
url = '{base_url}{query}'.format(base_url=base_url, query=query)
response = urllib2.urlopen(self.create_request(url, authorization))
all_metrics = json.JSONDecoder(encoding='utf-8').decode(response.read())
try:
self.__log_scan_diff(host, port + 1012, all_metrics.get('metrics', {}))
except:
extype, exvalue, ignore_tb = sys.exc_info()
logging.error(traceback.format_exception_only(extype, exvalue))
# Record how many data values we collected.
# Add success tag so we have a tag and dont get filtered out.
num_metrics = 0
for metric_data in all_metrics.get('metrics', {}).values():
num_metrics += len(metric_data.get('values', []))
all_metrics['__port'] = port
all_metrics['__host'] = (socket.getfqdn()
if host in ['localhost', '127.0.0.1', None, '']
else host)
all_metrics['metrics']['spectator.datapoints'] = {
'kind': 'Gauge',
'values': [{
'tags': [{'key': 'success', 'value': "true"}],
'values': [{'v': num_metrics, 't': int(time.time() * 1000)}]
}]
}
return (self.filter_metrics(all_metrics, self.__prototype)
if self.__prototype else all_metrics)
def filter_metrics(self, instance, prototype):
"""Filter metrics entries in |instance| to those that match |prototype|.
Only the names and tags are checked. The instance must contain a
tag binding found in the prototype, but may also contain additional tags.
The prototype is the same format as the json of the metrics returned.
"""
filtered = {}
metrics = instance.get('metrics') or {}
for key, expect in prototype.get('metrics', {}).items():
got = metrics.get(key)
if not got:
continue
expect_values = expect.get('values')
if not expect_values:
filtered[key] = got
continue
expect_tags = [elem.get('tags') for elem in expect_values]
# Clone the dict because we are going to modify it to remove values
# we dont care about
keep_values = []
def have_tags(expect_tags, got_tags):
for wanted_set in expect_tags:
# pylint: disable=invalid-name
ok = True
for want in wanted_set:
if want not in got_tags:
ok = False
break
if ok:
return True
return expect_tags == []
for got_value in got.get('values', []):
got_tags = got_value.get('tags')
if have_tags(expect_tags, got_tags):
keep_values.append(got_value)
if not keep_values:
continue
keep = dict(got)
keep['values'] = keep_values
filtered[key] = keep
result = dict(instance)
result['metrics'] = filtered
return result
def scan_by_service(self, service_catalog, params=None):
result = {}
start = time.time()
service_time = {service: 0 for service in service_catalog.keys()}
result = {service: None for service in service_catalog.keys()}
threads = {}
def timed_collect(self, service, url_endpoints):
now = time.time()
endpoint_data_list = []
for service_url in url_endpoints:
try:
endpoint_data_list.append(self.collect_metrics(
service_url, params=params))
except IOError as ioex:
logging.getLogger(__name__).error(
'%s failed %s with %s',
service, service_url, ioex)
result[service] = endpoint_data_list
service_time[service] = int((time.time() - now) * 1000)
for service, config in service_catalog.items():
threads[service] = threading.Thread(
target=timed_collect,
args=(self, service, config['metrics_url']))
threads[service].start()
for service in service_catalog.keys():
threads[service].join()
logging.info('Collection times %d (ms): %s',
(time.time() - start) * 1000, service_time)
return result
def scan_by_type(self, service_catalog, params=None):
service_map = self.scan_by_service(service_catalog, params=params)
return self.service_map_to_type_map(service_map)
@staticmethod
def ingest_metrics(service, response_data, type_map):
"""Add JSON |metric_data| from |service| name and add to |type_map|"""
metric_data = response_data.get('metrics', {})
for key, value in metric_data.items():
if key in type_map:
have = type_map[key].get(service, [])
have.append(value)
type_map[key][service] = have
else:
type_map[key] = {service: [value]}
@staticmethod
def service_map_to_type_map(service_map):
type_map = {}
for service, got_from_each_endpoint in service_map.items():
for got in got_from_each_endpoint or []:
SpectatorClient.ingest_metrics(service, got, type_map)
return type_map
| {
"content_hash": "b60fe5c338a24493400abfbe2e6219cb",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 82,
"avg_line_length": 34.17553191489362,
"alnum_prop": 0.6211673151750973,
"repo_name": "okoye/spinnaker-monitoring",
"id": "5dd2254a1dac15489e8d717bd2a2f4c4f1bf041d",
"size": "13519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spinnaker-monitoring-daemon/spinnaker-monitoring/spectator_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "169934"
},
{
"name": "Shell",
"bytes": "5496"
}
],
"symlink_target": ""
} |
import numpy as np
from Orange.data import Table, Domain, ContinuousVariable, DiscreteVariable
from Orange.widgets.data.owoutliers import OWOutliers
from Orange.widgets.tests.base import WidgetTest
class TestOWOutliers(WidgetTest):
def setUp(self):
self.widget = self.create_widget(OWOutliers)
self.iris = Table("iris")
def test_data(self):
"""Check widget's data and the output with data on the input"""
self.send_signal("Data", self.iris)
self.assertEqual(self.widget.data, self.iris)
self.assertEqual(len(self.get_output("Inliers")), 76)
self.assertEqual(len(self.get_output("Outliers")), 74)
self.send_signal("Data", None)
self.assertEqual(self.widget.data, None)
self.assertIsNone(self.get_output("Data"))
def test_multiclass(self):
"""Check widget for multiclass dataset"""
attrs = [ContinuousVariable("c1"), ContinuousVariable("c2")]
class_vars = [DiscreteVariable("cls", ["a", "b", "c"]),
DiscreteVariable("cls", ["aa", "bb", "cc"])]
domain = Domain(attrs, class_vars)
X = np.arange(12).reshape(6, 2)
Y = np.array([[0, 1], [2, 1], [0, 2], [1, 1], [1, 2], [2, 0]])
data = Table(domain, X, Y)
self.send_signal("Data", data)
self.assertTrue(self.widget.Error.multiclass_error.is_shown())
self.send_signal("Data", None)
self.assertFalse(self.widget.Error.multiclass_error.is_shown())
| {
"content_hash": "0cf9d3133f784febb9b15c0285df5955",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 42.714285714285715,
"alnum_prop": 0.6280936454849498,
"repo_name": "cheral/orange3",
"id": "2783d97a930cbbded80859fd23a538ead6ec19c2",
"size": "1595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Orange/widgets/data/tests/test_owoutliers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12023"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20217"
},
{
"name": "Python",
"bytes": "4139574"
},
{
"name": "Shell",
"bytes": "47441"
}
],
"symlink_target": ""
} |
cubes = [x**3 for x in range(10)]
odd_cubes1 = filter(lambda cube: cube % 2, cubes)
odd_cubes2 = (cube for cube in cubes if cube % 2)
| {
"content_hash": "72c5bfd89b59f4d942c50de6893ab30d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 49,
"avg_line_length": 33.75,
"alnum_prop": 0.6592592592592592,
"repo_name": "mkhuthir/learnPython",
"id": "14386402e91c04adb23f07aa68d5f8bec8c64084",
"size": "135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Book_learning-python-r1.1/ch5/gen.filter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7706"
}
],
"symlink_target": ""
} |
""" Utility functions for sparse matrix module
"""
from __future__ import division, print_function, absolute_import
__all__ = ['upcast','getdtype','isscalarlike','isintlike',
'isshape','issequence','isdense']
import numpy as np
# keep this list syncronized with sparsetools
#supported_dtypes = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
# 'int64', 'uint64', 'float32', 'float64',
# 'complex64', 'complex128']
supported_dtypes = ['int8','uint8','short','ushort','intc','uintc',
'longlong','ulonglong','single','double','longdouble',
'csingle','cdouble','clongdouble']
supported_dtypes = [np.typeDict[x] for x in supported_dtypes]
_upcast_memo = {}
def upcast(*args):
"""Returns the nearest supported sparse dtype for the
combination of one or more types.
upcast(t0, t1, ..., tn) -> T where T is a supported dtype
Examples
--------
>>> upcast('int32')
<type 'numpy.int32'>
>>> upcast('bool')
<type 'numpy.int8'>
>>> upcast('int32','float32')
<type 'numpy.float64'>
>>> upcast('bool',complex,float)
<type 'numpy.complex128'>
"""
t = _upcast_memo.get(hash(args))
if t is not None:
return t
upcast = np.find_common_type(args, [])
for t in supported_dtypes:
if np.can_cast(upcast, t):
_upcast_memo[hash(args)] = t
return t
raise TypeError('no supported conversion for types: %r' % (args,))
def upcast_char(*args):
"""Same as `upcast` but taking dtype.char as input (faster)."""
t = _upcast_memo.get(args)
if t is not None:
return t
t = upcast(*map(np.dtype, args))
_upcast_memo[args] = t
return t
def to_native(A):
return np.asarray(A,dtype=A.dtype.newbyteorder('native'))
def getdtype(dtype, a=None, default=None):
"""Function used to simplify argument processing. If 'dtype' is not
specified (is None), returns a.dtype; otherwise returns a np.dtype
object created from the specified dtype argument. If 'dtype' and 'a'
are both None, construct a data type out of the 'default' parameter.
Furthermore, 'dtype' must be in 'allowed' set.
"""
#TODO is this really what we want?
canCast = True
if dtype is None:
try:
newdtype = a.dtype
except AttributeError:
if default is not None:
newdtype = np.dtype(default)
canCast = False
else:
raise TypeError("could not interpret data type")
else:
newdtype = np.dtype(dtype)
return newdtype
def isscalarlike(x):
"""Is x either a scalar, an array scalar, or a 0-dim array?"""
return np.isscalar(x) or (isdense(x) and x.ndim == 0)
def isintlike(x):
"""Is x appropriate as an index into a sparse matrix? Returns True
if it can be cast safely to a machine int.
"""
if issequence(x):
return False
else:
try:
if int(x) == x:
return True
else:
return False
except TypeError:
return False
def isshape(x):
"""Is x a valid 2-tuple of dimensions?
"""
try:
# Assume it's a tuple of matrix dimensions (M, N)
(M, N) = x
except:
return False
else:
if isintlike(M) and isintlike(N):
if np.rank(M) == 0 and np.rank(N) == 0:
return True
return False
def issequence(t):
return isinstance(t, (list, tuple))\
or (isinstance(t, np.ndarray) and (t.ndim == 1))
def isdense(x):
return isinstance(x, np.ndarray)
| {
"content_hash": "9673be684f5aa886097510e866f14813",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 75,
"avg_line_length": 27.93076923076923,
"alnum_prop": 0.5877168824015423,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "7898fb8d6f23e1b3a2febcc7efb2df85c4538725",
"size": "3631",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/scipy/sparse/sputils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.