text
stringlengths 4
1.02M
| meta
dict |
---|---|
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"Featurestore",
},
)
class Featurestore(proto.Message):
r"""Vertex AI Feature Store provides a centralized repository for
organizing, storing, and serving ML features. The Featurestore
is a top-level container for your features and their values.
Attributes:
name (str):
Output only. Name of the Featurestore. Format:
``projects/{project}/locations/{location}/featurestores/{featurestore}``
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Featurestore
was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Featurestore
was last updated.
etag (str):
Optional. Used to perform consistent
read-modify-write updates. If not set, a blind
"overwrite" update happens.
labels (Mapping[str, str]):
Optional. The labels with user-defined
metadata to organize your Featurestore.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
on and examples of labels. No more than 64 user
labels can be associated with one
Featurestore(System labels are excluded)."
System reserved label keys are prefixed with
"aiplatform.googleapis.com/" and are immutable.
online_serving_config (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig):
Optional. Config for online storage resources. The field
should not co-exist with the field of
``OnlineStoreReplicationConfig``. If both of it and
OnlineStoreReplicationConfig are unset, the feature store
will not have an online store and cannot be used for online
serving.
state (google.cloud.aiplatform_v1beta1.types.Featurestore.State):
Output only. State of the featurestore.
encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec):
Optional. Customer-managed encryption key
spec for data storage. If set, both of the
online and offline data storage will be secured
by this key.
"""
class State(proto.Enum):
r"""Possible states a featurestore can have."""
STATE_UNSPECIFIED = 0
STABLE = 1
UPDATING = 2
class OnlineServingConfig(proto.Message):
r"""OnlineServingConfig specifies the details for provisioning
online serving resources.
Attributes:
fixed_node_count (int):
The number of nodes for the online store. The
number of nodes doesn't scale automatically, but
you can manually update the number of nodes. If
set to 0, the featurestore will not have an
online store and cannot be used for online
serving.
scaling (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig.Scaling):
Online serving scaling configuration. Only one of
``fixed_node_count`` and ``scaling`` can be set. Setting one
will reset the other.
"""
class Scaling(proto.Message):
r"""Online serving scaling configuration. If min_node_count and
max_node_count are set to the same value, the cluster will be
configured with the fixed number of node (no auto-scaling).
Attributes:
min_node_count (int):
Required. The minimum number of nodes to
scale down to. Must be greater than or equal to
1.
max_node_count (int):
The maximum number of nodes to scale up to. Must be greater
than min_node_count, and less than or equal to 10 times of
'min_node_count'.
"""
min_node_count = proto.Field(
proto.INT32,
number=1,
)
max_node_count = proto.Field(
proto.INT32,
number=2,
)
fixed_node_count = proto.Field(
proto.INT32,
number=2,
)
scaling = proto.Field(
proto.MESSAGE,
number=4,
message="Featurestore.OnlineServingConfig.Scaling",
)
name = proto.Field(
proto.STRING,
number=1,
)
create_time = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
etag = proto.Field(
proto.STRING,
number=5,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=6,
)
online_serving_config = proto.Field(
proto.MESSAGE,
number=7,
message=OnlineServingConfig,
)
state = proto.Field(
proto.ENUM,
number=8,
enum=State,
)
encryption_spec = proto.Field(
proto.MESSAGE,
number=10,
message=gca_encryption_spec.EncryptionSpec,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "306039c779f6bd76844278e5033d3c07",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 103,
"avg_line_length": 35.895705521472394,
"alnum_prop": 0.5957955904973509,
"repo_name": "googleapis/python-aiplatform",
"id": "aa7b45e938d4dd7c88259f08a50e3b37dcad72cc",
"size": "6451",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/aiplatform_v1beta1/types/featurestore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
""" Task 703 """
def average_vectors(first_idx, second_idx, clusters):
""" rozw """
return [(a+b)/2 for a, b in \
zip(clusters[first_idx].vec, clusters[second_idx].vec)] | {
"content_hash": "fa2b7008f29e8e89f22be201971de392",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 59,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.5845410628019324,
"repo_name": "katzoo/amu",
"id": "9d1b55bd32cbcf3750ae4422ec821299a980218c",
"size": "209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isi/cluster/Task703.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Bison",
"bytes": "12146"
},
{
"name": "Java",
"bytes": "78450"
},
{
"name": "Perl",
"bytes": "2597"
},
{
"name": "Python",
"bytes": "26282"
}
],
"symlink_target": ""
} |
import datetime
print('now :', datetime.datetime.now())
print('today :', datetime.datetime.today())
print('utcnow:', datetime.datetime.utcnow())
d = datetime.datetime.now()
FIELDS = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']
for f in FIELDS:
print('%15s : %s' % (f, getattr(d, f)))
t = datetime.time(1, 2, 3)
print('t:', t)
d = datetime.date.today()
print('d:', d)
dt = datetime.datetime.combine(d, t)
print('dt:', dt)
| {
"content_hash": "5fb28c44d4d32e054a106e5b3f0b1d5a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 76,
"avg_line_length": 26.647058823529413,
"alnum_prop": 0.6291390728476821,
"repo_name": "eroicaleo/ThePythonStandardLibraryByExample",
"id": "8122cedb3f1bb2014b8e76d2afd65fb2e01aa552",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch04DatesAndTimes/4.2Datetime/Datetimes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34587"
}
],
"symlink_target": ""
} |
from django.forms import ModelForm
from django.utils import translation
from hvad.admin import TranslatableModelAdminMixin
from hvad.forms import translatable_inlineformset_factory, translationformset_factory
from hvad.test_utils.testcase import HvadTestCase
from hvad.test_utils.project.app.models import Normal, Related
from hvad.test_utils.fixtures import NormalFixture
from hvad.test_utils.data import NORMAL
from hvad.test_utils.forms import FormData
class TestBasicInline(HvadTestCase):
def setUp(self):
with translation.override("en"):
self.object = Normal.objects.language().create(shared_field="test", translated_field="translated test")
self.request = self.request_factory.post('/url/')
def test_create_fields_inline(self):
with translation.override("en"):
# Fixtures (should eventually be shared with other tests)
translate_mixin = TranslatableModelAdminMixin()
formset = translatable_inlineformset_factory(translate_mixin._language(self.request),
Normal, Related)(#self.request.POST,
instance=self.object)
self.assertTrue("normal" in formset.forms[0].fields)
self.assertTrue("translated" in formset.forms[0].fields)
self.assertTrue("translated_to_translated" in formset.forms[0].fields)
self.assertFalse("language_code" in formset.forms[0].fields)
class TestTranslationsInline(HvadTestCase, NormalFixture):
normal_count = 1
def test_render_formset(self):
instance = Normal.objects.language('en').get(pk=self.normal_id[1])
with self.assertNumQueries(1):
Formset = translationformset_factory(Normal, extra=1, exclude=[])
formset = Formset(instance=instance)
self.assertEqual(len(formset.forms), 3)
self.assertIn('translated_field', formset.forms[0].fields)
self.assertIn('language_code', formset.forms[0].fields)
self.assertIn('DELETE', formset.forms[0].fields)
self.assertIn('id', formset.forms[0].fields)
self.assertNotIn('master', formset.forms[0].fields)
self.assertEqual(formset.forms[0].initial['language_code'], 'en')
self.assertEqual(formset.forms[0].initial['translated_field'],
NORMAL[1].translated_field['en'])
self.assertEqual(formset.forms[1].initial['language_code'], 'ja')
self.assertEqual(formset.forms[1].initial['translated_field'],
NORMAL[1].translated_field['ja'])
self.assertEqual(formset.forms[2].initial, {})
with self.assertNumQueries(1):
class Form(ModelForm):
class Meta:
fields = ('translated_field',)
Formset = translationformset_factory(Normal, form=Form, extra=1, exclude=[])
formset = Formset(instance=instance)
self.assertIn('translated_field', formset.forms[0].fields)
self.assertIn('language_code', formset.forms[0].fields)
self.assertIn('DELETE', formset.forms[0].fields)
self.assertIn('id', formset.forms[0].fields)
self.assertNotIn('master', formset.forms[0].fields)
def test_create_translations(self):
instance = Normal.objects.untranslated().get(pk=self.normal_id[1])
Formset = translationformset_factory(Normal, extra=1, exclude=[])
initial = Formset(instance=instance)
data = FormData(initial)
data.set_formset_field(initial, 2, 'language_code', 'de')
data.set_formset_field(initial, 2, 'translated_field', 'Deutsch')
formset = Formset(data=data, instance=instance)
formset.save()
obj = Normal.objects.language('de').get(pk=instance.pk)
self.assertEqual(obj.translated_field, 'Deutsch')
self.assertEqual(obj.translations.count(), 3)
def test_delete_translations(self):
instance = Normal.objects.language('en').get(pk=self.normal_id[1])
Formset = translationformset_factory(Normal, extra=1, exclude=[])
# Delete one of the two translations
initial = Formset(instance=instance)
data = FormData(initial)
data.set_formset_field(initial, 0, 'DELETE', 'DELETE')
formset = Formset(data=data, instance=instance)
self.assertTrue(formset.is_valid())
formset.save()
self.assertCountEqual(instance.get_available_languages(), ('ja',))
# Try to delete the other translation - should fail
initial = Formset(instance=instance)
data = FormData(initial)
data.set_formset_field(initial, 0, 'DELETE', 'DELETE')
formset = Formset(data=data, instance=instance)
self.assertFalse(formset.is_valid())
def test_mixed_update_translations(self):
instance = Normal.objects.language('en').get(pk=self.normal_id[1])
Formset = translationformset_factory(Normal, extra=1, exclude=[])
initial = Formset(instance=instance)
data = FormData(initial)
data.set_formset_field(initial, 0, 'DELETE', 'DELETE')
data.set_formset_field(initial, 1, 'translated_field', 'updated_ja')
data.set_formset_field(initial, 2, 'language_code', 'de')
data.set_formset_field(initial, 2, 'translated_field', 'Deutsch')
formset = Formset(data=data, instance=instance)
self.assertTrue(formset.is_valid())
formset.save()
self.assertCountEqual(instance.get_available_languages(), ('ja', 'de'))
obj = Normal.objects.language('ja').get(pk=instance.pk)
self.assertEqual(obj.shared_field, NORMAL[1].shared_field)
self.assertEqual(obj.translated_field, 'updated_ja')
obj = Normal.objects.language('de').get(pk=instance.pk)
self.assertEqual(obj.shared_field, NORMAL[1].shared_field)
self.assertEqual(obj.translated_field, 'Deutsch')
| {
"content_hash": "6ad11f087ee36b1cd76186d1033267da",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 115,
"avg_line_length": 46.57692307692308,
"alnum_prop": 0.6439306358381502,
"repo_name": "philippeowagner/django-hvad",
"id": "956fec7b4edfdf0b4556f31040186947a5cfc486",
"size": "6079",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "hvad/tests/forms_inline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "13766"
},
{
"name": "Python",
"bytes": "445037"
}
],
"symlink_target": ""
} |
from utils import parse_args, create_experiment_dirs, calculate_flops, show_parameters
from model import ShuffleNet
from train import Train
from data_loader import DataLoader
from summarizer import Summarizer
import tensorflow as tf
def main():
# Parse the JSON arguments
config_args = parse_args()
# Create the experiment directories
_, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(config_args.experiment_dir)
# Reset the default Tensorflow graph
tf.reset_default_graph()
# Tensorflow specific configuration
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# Data loading
# The batch size is equal to 1 when testing to simulate the real experiment.
data_batch_size = config_args.batch_size if config_args.train_or_test == "train" else 1
data = DataLoader(data_batch_size, config_args.shuffle)
print("Loading Data...")
config_args.img_height, config_args.img_width, config_args.num_channels, \
config_args.train_data_size, config_args.test_data_size = data.load_data()
print("Data loaded\n\n")
# Model creation
print("Building the model...")
model = ShuffleNet(config_args)
print("Model is built successfully\n\n")
# Parameters visualization
show_parameters()
# Summarizer creation
summarizer = Summarizer(sess, config_args.summary_dir)
# Train class
trainer = Train(sess, model, data, summarizer)
if config_args.train_or_test == 'train':
try:
# print("FLOPs for batch size = " + str(config_args.batch_size) + "\n")
# calculate_flops()
print("Training...")
trainer.train()
print("Training Finished\n\n")
except KeyboardInterrupt:
trainer.save_model()
elif config_args.train_or_test == 'test':
# print("FLOPs for single inference \n")
# calculate_flops()
# This can be 'val' or 'test' or even 'train' according to the needs.
print("Testing...")
trainer.test('val')
print("Testing Finished\n\n")
else:
raise ValueError("Train or Test options only are allowed")
if __name__ == '__main__':
main()
| {
"content_hash": "4a47f7ff05ef41f7a4d6a6c08132e61e",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 111,
"avg_line_length": 33.11594202898551,
"alnum_prop": 0.6603938730853391,
"repo_name": "MG2033/ShuffleNet",
"id": "84453b3f4053a5f35b87db3a8a599a8976fe187f",
"size": "2285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46143"
}
],
"symlink_target": ""
} |
import google.api_core.grpc_helpers
from google.cloud.tasks_v2beta3.proto import cloudtasks_pb2_grpc
class CloudTasksGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.tasks.v2beta3 CloudTasks API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
def __init__(self,
channel=None,
credentials=None,
address='cloudtasks.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.', )
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'cloud_tasks_stub': cloudtasks_pb2_grpc.CloudTasksStub(channel),
}
@classmethod
def create_channel(cls,
address='cloudtasks.googleapis.com:443',
credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def list_queues(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists queues.
Queues are returned in lexicographical order.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].ListQueues
@property
def get_queue(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets a queue.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].GetQueue
@property
def create_queue(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates a queue.
Queues created with this method allow tasks to live for a maximum of 31
days. After a task is 31 days old, the task will be deleted regardless
of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if you are
using an App Engine ``queue.yaml`` or ``queue.xml`` file to manage your
queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__ before
using this method.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].CreateQueue
@property
def update_queue(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates a queue.
This method creates the queue if it does not exist and updates the queue
if it does exist.
Queues created with this method allow tasks to live for a maximum of 31
days. After a task is 31 days old, the task will be deleted regardless
of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if you are
using an App Engine ``queue.yaml`` or ``queue.xml`` file to manage your
queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__ before
using this method.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].UpdateQueue
@property
def delete_queue(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes a queue.
This command will delete the queue even if it has tasks in it.
Note: If you delete a queue, a queue with the same name can't be created
for 7 days.
WARNING: Using this method may have unintended side effects if you are
using an App Engine ``queue.yaml`` or ``queue.xml`` file to manage your
queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__ before
using this method.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].DeleteQueue
@property
def purge_queue(self):
"""Return the gRPC stub for {$apiMethod.name}.
Purges a queue by deleting all of its tasks.
All tasks created before this method is called are permanently deleted.
Purge operations can take up to one minute to take effect. Tasks
might be dispatched before the purge takes effect. A purge is irreversible.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].PurgeQueue
@property
def pause_queue(self):
"""Return the gRPC stub for {$apiMethod.name}.
Pauses the queue.
If a queue is paused then the system will stop dispatching tasks until
the queue is resumed via ``ResumeQueue``. Tasks can still be added when
the queue is paused. A queue is paused if its ``state`` is ``PAUSED``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].PauseQueue
@property
def resume_queue(self):
"""Return the gRPC stub for {$apiMethod.name}.
Resume a queue.
This method resumes a queue after it has been ``PAUSED`` or
``DISABLED``. The state of a queue is stored in the queue's ``state``;
after calling this method it will be set to ``RUNNING``.
WARNING: Resuming many high-QPS queues at the same time can lead to
target overloading. If you are resuming high-QPS queues, follow the
500/50/5 pattern described in `Managing Cloud Tasks Scaling
Risks <https://cloud.google.com/tasks/docs/manage-cloud-task-scaling>`__.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].ResumeQueue
@property
def get_iam_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets the access control policy for a ``Queue``. Returns an empty policy
if the resource exists and does not have a policy set.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the specified
resource parent:
- ``cloudtasks.queues.getIamPolicy``
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].GetIamPolicy
@property
def set_iam_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the access control policy for a ``Queue``. Replaces any existing
policy.
Note: The Cloud Console does not check queue-level IAM permissions yet.
Project-level permissions are required to use the Cloud Console.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the specified
resource parent:
- ``cloudtasks.queues.setIamPolicy``
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].SetIamPolicy
@property
def test_iam_permissions(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns permissions that a caller has on a ``Queue``. If the resource
does not exist, this will return an empty set of permissions, not a
``NOT_FOUND`` error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for authorization
checking. This operation may "fail open" without warning.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].TestIamPermissions
@property
def list_tasks(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists the tasks in a queue.
By default, only the ``BASIC`` view is retrieved due to performance
considerations; ``response_view`` controls the subset of information
which is returned.
The tasks may be returned in any order. The ordering may change at any
time.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].ListTasks
@property
def get_task(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets a task.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].GetTask
@property
def create_task(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates a task and adds it to a queue.
Tasks cannot be updated after creation; there is no UpdateTask command.
- For ``App Engine queues``, the maximum task size is 100KB.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].CreateTask
@property
def delete_task(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes a task.
A task can be deleted if it is scheduled or dispatched. A task
cannot be deleted if it has executed successfully or permanently
failed.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].DeleteTask
@property
def run_task(self):
"""Return the gRPC stub for {$apiMethod.name}.
Forces a task to run now.
When this method is called, Cloud Tasks will dispatch the task, even if
the task is already running, the queue has reached its ``RateLimits`` or
is ``PAUSED``.
This command is meant to be used for manual debugging. For example,
``RunTask`` can be used to retry a failed task after a fix has been made
or to manually force a task to be dispatched now.
The dispatched task is returned. That is, the task that is returned
contains the ``status`` after the task is dispatched but before the task
is received by its target.
If Cloud Tasks receives a successful response from the task's target,
then the task will be deleted; otherwise the task's ``schedule_time``
will be reset to the time that ``RunTask`` was called plus the retry
delay specified in the queue's ``RetryConfig``.
``RunTask`` returns ``NOT_FOUND`` when it is called on a task that has
already succeeded or permanently failed.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cloud_tasks_stub'].RunTask
| {
"content_hash": "b7453ca089e5c836410fc048affac199",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 83,
"avg_line_length": 36.255528255528255,
"alnum_prop": 0.6248305773922472,
"repo_name": "jonparrott/gcloud-python",
"id": "6efcb26ec89d32e567bc32552fd28861ebeb4ea1",
"size": "15358",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tasks/google/cloud/tasks_v2beta3/gapic/transports/cloud_tasks_grpc_transport.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3459300"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
} |
from sentinels import NOTHING
__all__ = [
"TypesRegistry",
"get_registry"
]
class TypesRegistry:
def __init__(self):
self._mapping = {}
self._back = {}
def register(self, type_, alias):
assert isinstance(type_, type)
assert isinstance(alias, type)
self._mapping[type_] = alias
self._back[alias] = type_
def get_alias(self, type_, default=NOTHING):
if default is NOTHING:
return self._mapping[type_]
else:
return self._mapping.get(type_, default)
def get_type(self, alias, default=NOTHING):
if default is NOTHING:
return self._back[alias]
else:
return self._back.get(alias, default)
_registry = TypesRegistry()
def get_registry():
return _registry
| {
"content_hash": "460290a7c9a9d459ad62fbd3d243a9df",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 52,
"avg_line_length": 21.42105263157895,
"alnum_prop": 0.5773955773955773,
"repo_name": "Evgenus/versioned-data",
"id": "afdb042ff9590f84ee79cb282d8cd0d7a84d359d",
"size": "814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "versioned/types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "572"
},
{
"name": "Python",
"bytes": "7439"
}
],
"symlink_target": ""
} |
from urllib2 import urlopen, build_opener, install_opener
from urllib2 import Request, HTTPSHandler
from urllib2 import URLError, HTTPError
from urllib import urlencode
import random
import datetime
import time
import uuid
import hashlib
import socket
def generate_uuid(basedata = None):
""" Provides a _random_ UUID with no input, or a UUID4-format MD5 checksum of any input data provided """
if basedata is None:
return str(uuid.uuid4())
elif isinstance(basedata, basestring):
checksum = hashlib.md5(basedata).hexdigest()
return '%8s-%4s-%4s-%4s-%12s' % (checksum[0:8], checksum[8:12], checksum[12:16], checksum[16:20], checksum[20:32])
class Time(datetime.datetime):
""" Wrappers and convenience methods for processing various time representations """
@classmethod
def from_unix(cls, seconds, milliseconds = 0):
""" Produce a full |datetime.datetime| object from a Unix timestamp """
base = list(time.gmtime(seconds))[0:6]
base.append(milliseconds * 1000) # microseconds
return cls(* base)
@classmethod
def to_unix(cls, timestamp):
""" Wrapper over time module to produce Unix epoch time as a float """
if not isinstance(timestamp, datetime.datetime):
raise TypeError, 'Time.milliseconds expects a datetime object'
base = time.mktime(timestamp.timetuple())
return base
@classmethod
def milliseconds_offset(cls, timestamp, now = None):
""" Offset time (in milliseconds) from a |datetime.datetime| object to now """
if isinstance(timestamp, (int, float)):
base = timestamp
else:
base = cls.to_unix(timestamp)
base = base + (timestamp.microsecond / 1000000)
if now is None:
now = time.time()
return (now - base) * 1000
class HTTPRequest(object):
""" URL Construction and request handling abstraction.
This is not intended to be used outside this module.
Automates mapping of persistent state (i.e. query parameters)
onto transcient datasets for each query.
"""
endpoint = 'https://www.google-analytics.com/collect'
@staticmethod
def debug():
""" Activate debugging on urllib2 """
handler = HTTPSHandler(debuglevel = 1)
opener = build_opener(handler)
install_opener(opener)
# Store properties for all requests
def __init__(self, user_agent = None, *args, **opts):
self.user_agent = user_agent or 'Analytics Pros - Universal Analytics (Python)'
@classmethod
def fixUTF8(cls, data): # Ensure proper encoding for UA's servers...
""" Convert all strings to UTF-8 """
for key in data:
if isinstance(data[ key ], basestring):
data[ key ] = data[ key ].encode('utf-8')
return data
# Apply stored properties to the given dataset & POST to the configured endpoint
def send(self, data):
request = Request(
self.endpoint + '?' + urlencode(self.fixUTF8(data)),
headers = {
'User-Agent': self.user_agent
}
)
self.open(request)
def open(self, request):
try:
return urlopen(request)
except HTTPError as e:
return False
except URLError as e:
self.cache_request(request)
return False
def cache_request(self, request):
# TODO: implement a proper caching mechanism here for re-transmitting hits
# record = (Time.now(), request.get_full_url(), request.get_data(), request.headers)
pass
class HTTPPost(HTTPRequest):
# Apply stored properties to the given dataset & POST to the configured endpoint
def send(self, data):
request = Request(
self.endpoint,
data = urlencode(self.fixUTF8(data)),
headers = {
'User-Agent': self.user_agent
}
)
self.open(request)
class Tracker(object):
""" Primary tracking interface for Universal Analytics """
params = None
parameter_alias = {}
valid_hittypes = ('pageview', 'event', 'social', 'screenview', 'transaction', 'item', 'exception', 'timing')
@classmethod
def alias(cls, typemap, base, *names):
""" Declare an alternate (humane) name for a measurement protocol parameter """
cls.parameter_alias[ base ] = (typemap, base)
for i in names:
cls.parameter_alias[ i ] = (typemap, base)
@classmethod
def coerceParameter(cls, name, value = None):
if isinstance(name, basestring) and name[0] == '&':
return name[1:], str(value)
elif name in cls.parameter_alias:
typecast, param_name = cls.parameter_alias.get(name)
return param_name, typecast(value)
else:
raise KeyError, 'Parameter "{0}" is not recognized'.format(name)
def payload(self, data):
for key, value in data.iteritems():
try:
yield self.coerceParameter(key, value)
except KeyError:
continue
option_sequence = {
'pageview': [ (basestring, 'dp') ],
'event': [ (basestring, 'ec'), (basestring, 'ea'), (basestring, 'el'), (int, 'ev') ],
'social': [ (basestring, 'sn'), (basestring, 'sa'), (basestring, 'st') ],
'timing': [ (basestring, 'utc'), (basestring, 'utv'), (basestring, 'utt'), (basestring, 'utl') ]
}
@classmethod
def consume_options(cls, data, hittype, args):
""" Interpret sequential arguments related to known hittypes based on declared structures """
opt_position = 0
data[ 't' ] = hittype # integrate hit type parameter
if hittype in cls.option_sequence:
for expected_type, optname in cls.option_sequence[ hittype ]:
if opt_position < len(args) and isinstance(args[opt_position], expected_type):
data[ optname ] = args[ opt_position ]
opt_position += 1
@classmethod
def hittime(cls, timestamp = None, age = None, milliseconds = None):
""" Returns an integer represeting the milliseconds offset for a given hit (relative to now) """
if isinstance(timestamp, (int, float)):
return int(Time.milliseconds_offset(Time.from_unix(timestamp, milliseconds = milliseconds)))
if isinstance(timestamp, datetime.datetime):
return int(Time.milliseconds_offset(timestamp))
if isinstance(age, (int, float)):
return int(age * 1000) + (milliseconds or 0)
@property
def account(self):
return self.params.get('tid', None)
def __init__(self, account, name = None, client_id = None, hash_client_id = False, user_id = None, user_agent = None, use_post = True):
if use_post is False:
self.http = HTTPRequest(user_agent = user_agent)
else:
self.http = HTTPPost(user_agent = user_agent)
self.params = { 'v': 1, 'tid': account }
if client_id is None:
client_id = generate_uuid()
self.params[ 'cid' ] = client_id
self.hash_client_id = hash_client_id
if user_id is not None:
self.params[ 'uid' ] = user_id
def set_timestamp(self, data):
""" Interpret time-related options, apply queue-time parameter as needed """
if 'hittime' in data: # an absolute timestamp
data['qt'] = self.hittime(timestamp = data.pop('hittime', None))
if 'hitage' in data: # a relative age (in seconds)
data['qt'] = self.hittime(age = data.pop('hitage', None))
def send(self, hittype, *args, **data):
""" Transmit HTTP requests to Google Analytics using the measurement protocol """
if hittype not in self.valid_hittypes:
raise KeyError('Unsupported Universal Analytics Hit Type: {0}'.format(repr(hittype)))
self.set_timestamp(data)
self.consume_options(data, hittype, args)
for item in args: # process dictionary-object arguments of transcient data
if isinstance(item, dict):
for key, val in self.payload(item):
data[ key ] = val
for k, v in self.params.iteritems(): # update only absent parameters
if k not in data:
data[ k ] = v
data = dict(self.payload(data))
if self.hash_client_id:
data[ 'cid' ] = generate_uuid(data[ 'cid' ])
# Transmit the hit to Google...
self.http.send(data)
# Setting persistent attibutes of the session/hit/etc (inc. custom dimensions/metrics)
def set(self, name, value = None):
if isinstance(name, dict):
for key, value in name.iteritems():
try:
param, value = self.coerceParameter(key, value)
self.params[param] = value
except KeyError:
pass
elif isinstance(name, basestring):
try:
param, value = self.coerceParameter(name, value)
self.params[param] = value
except KeyError:
pass
def __getitem__(self, name):
param, value = self.coerceParameter(name, None)
return self.params.get(param, None)
def __setitem__(self, name, value):
param, value = self.coerceParameter(name, value)
self.params[param] = value
def __delitem__(self, name):
param, value = self.coerceParameter(name, None)
if param in self.params:
del self.params[param]
def safe_unicode(obj):
""" Safe convertion to the Unicode string version of the object """
try:
return unicode(obj)
except UnicodeDecodeError:
return obj.decode('utf-8')
# Declaring name mappings for Measurement Protocol parameters
MAX_CUSTOM_DEFINITIONS = 200
MAX_EC_LISTS = 11 # 1-based index
MAX_EC_PRODUCTS = 11 # 1-based index
MAX_EC_PROMOTIONS = 11 # 1-based index
Tracker.alias(int, 'v', 'protocol-version')
Tracker.alias(safe_unicode, 'cid', 'client-id', 'clientId', 'clientid')
Tracker.alias(safe_unicode, 'tid', 'trackingId', 'account')
Tracker.alias(safe_unicode, 'uid', 'user-id', 'userId', 'userid')
Tracker.alias(safe_unicode, 'uip', 'user-ip', 'userIp', 'ipaddr')
Tracker.alias(safe_unicode, 'ua', 'userAgent', 'userAgentOverride', 'user-agent')
Tracker.alias(safe_unicode, 'dp', 'page', 'path')
Tracker.alias(safe_unicode, 'dt', 'title', 'pagetitle', 'pageTitle' 'page-title')
Tracker.alias(safe_unicode, 'dl', 'location')
Tracker.alias(safe_unicode, 'dh', 'hostname')
Tracker.alias(safe_unicode, 'sc', 'sessioncontrol', 'session-control', 'sessionControl')
Tracker.alias(safe_unicode, 'dr', 'referrer', 'referer')
Tracker.alias(int, 'qt', 'queueTime', 'queue-time')
Tracker.alias(safe_unicode, 't', 'hitType', 'hittype')
Tracker.alias(int, 'aip', 'anonymizeIp', 'anonIp', 'anonymize-ip')
# Campaign attribution
Tracker.alias(safe_unicode, 'cn', 'campaign', 'campaignName', 'campaign-name')
Tracker.alias(safe_unicode, 'cs', 'source', 'campaignSource', 'campaign-source')
Tracker.alias(safe_unicode, 'cm', 'medium', 'campaignMedium', 'campaign-medium')
Tracker.alias(safe_unicode, 'ck', 'keyword', 'campaignKeyword', 'campaign-keyword')
Tracker.alias(safe_unicode, 'cc', 'content', 'campaignContent', 'campaign-content')
Tracker.alias(safe_unicode, 'ci', 'campaignId', 'campaignID', 'campaign-id')
# Technical specs
Tracker.alias(safe_unicode, 'sr', 'screenResolution', 'screen-resolution', 'resolution')
Tracker.alias(safe_unicode, 'vp', 'viewport', 'viewportSize', 'viewport-size')
Tracker.alias(safe_unicode, 'de', 'encoding', 'documentEncoding', 'document-encoding')
Tracker.alias(int, 'sd', 'colors', 'screenColors', 'screen-colors')
Tracker.alias(safe_unicode, 'ul', 'language', 'user-language', 'userLanguage')
# Mobile app
Tracker.alias(safe_unicode, 'an', 'appName', 'app-name', 'app')
Tracker.alias(safe_unicode, 'cd', 'contentDescription', 'screenName', 'screen-name', 'content-description')
Tracker.alias(safe_unicode, 'av', 'appVersion', 'app-version', 'version')
Tracker.alias(safe_unicode, 'aid', 'appID', 'appId', 'application-id', 'app-id', 'applicationId')
Tracker.alias(safe_unicode, 'aiid', 'appInstallerId', 'app-installer-id')
# Ecommerce
Tracker.alias(safe_unicode, 'ta', 'affiliation', 'transactionAffiliation', 'transaction-affiliation')
Tracker.alias(safe_unicode, 'ti', 'transaction', 'transactionId', 'transaction-id')
Tracker.alias(float, 'tr', 'revenue', 'transactionRevenue', 'transaction-revenue')
Tracker.alias(float, 'ts', 'shipping', 'transactionShipping', 'transaction-shipping')
Tracker.alias(float, 'tt', 'tax', 'transactionTax', 'transaction-tax')
Tracker.alias(safe_unicode, 'cu', 'currency', 'transactionCurrency', 'transaction-currency') # Currency code, e.g. USD, EUR
Tracker.alias(safe_unicode, 'in', 'item-name', 'itemName')
Tracker.alias(float, 'ip', 'item-price', 'itemPrice')
Tracker.alias(float, 'iq', 'item-quantity', 'itemQuantity')
Tracker.alias(safe_unicode, 'ic', 'item-code', 'sku', 'itemCode')
Tracker.alias(safe_unicode, 'iv', 'item-variation', 'item-category', 'itemCategory', 'itemVariation')
# Events
Tracker.alias(safe_unicode, 'ec', 'event-category', 'eventCategory', 'category')
Tracker.alias(safe_unicode, 'ea', 'event-action', 'eventAction', 'action')
Tracker.alias(safe_unicode, 'el', 'event-label', 'eventLabel', 'label')
Tracker.alias(int, 'ev', 'event-value', 'eventValue', 'value')
Tracker.alias(int, 'ni', 'noninteractive', 'nonInteractive', 'noninteraction', 'nonInteraction')
# Social
Tracker.alias(safe_unicode, 'sa', 'social-action', 'socialAction')
Tracker.alias(safe_unicode, 'sn', 'social-network', 'socialNetwork')
Tracker.alias(safe_unicode, 'st', 'social-target', 'socialTarget')
# Exceptions
Tracker.alias(safe_unicode, 'exd', 'exception-description', 'exceptionDescription', 'exDescription')
Tracker.alias(int, 'exf', 'exception-fatal', 'exceptionFatal', 'exFatal')
# User Timing
Tracker.alias(safe_unicode, 'utc', 'timingCategory', 'timing-category')
Tracker.alias(safe_unicode, 'utv', 'timingVariable', 'timing-variable')
Tracker.alias(float, 'utt', 'time', 'timingTime', 'timing-time')
Tracker.alias(safe_unicode, 'utl', 'timingLabel', 'timing-label')
Tracker.alias(float, 'dns', 'timingDNS', 'timing-dns')
Tracker.alias(float, 'pdt', 'timingPageLoad', 'timing-page-load')
Tracker.alias(float, 'rrt', 'timingRedirect', 'timing-redirect')
Tracker.alias(safe_unicode, 'tcp', 'timingTCPConnect', 'timing-tcp-connect')
Tracker.alias(safe_unicode, 'srt', 'timingServerResponse', 'timing-server-response')
# Custom dimensions and metrics
for i in range(0,200):
Tracker.alias(safe_unicode, 'cd{0}'.format(i), 'dimension{0}'.format(i))
Tracker.alias(int, 'cm{0}'.format(i), 'metric{0}'.format(i))
# Enhanced Ecommerce
Tracker.alias(str, 'pa') # Product action
Tracker.alias(str, 'tcc') # Coupon code
Tracker.alias(unicode, 'pal') # Product action list
Tracker.alias(int, 'cos') # Checkout step
Tracker.alias(str, 'col') # Checkout step option
Tracker.alias(str, 'promoa') # Promotion action
for product_index in range(1, MAX_EC_PRODUCTS):
Tracker.alias(str, 'pr{0}id'.format(product_index)) # Product SKU
Tracker.alias(unicode, 'pr{0}nm'.format(product_index)) # Product name
Tracker.alias(unicode, 'pr{0}br'.format(product_index)) # Product brand
Tracker.alias(unicode, 'pr{0}ca'.format(product_index)) # Product category
Tracker.alias(unicode, 'pr{0}va'.format(product_index)) # Product variant
Tracker.alias(str, 'pr{0}pr'.format(product_index)) # Product price
Tracker.alias(int, 'pr{0}qt'.format(product_index)) # Product quantity
Tracker.alias(str, 'pr{0}cc'.format(product_index)) # Product coupon code
Tracker.alias(int, 'pr{0}ps'.format(product_index)) # Product position
for custom_index in range(MAX_CUSTOM_DEFINITIONS):
Tracker.alias(str, 'pr{0}cd{1}'.format(product_index, custom_index)) # Product custom dimension
Tracker.alias(int, 'pr{0}cm{1}'.format(product_index, custom_index)) # Product custom metric
for list_index in range(1, MAX_EC_LISTS):
Tracker.alias(str, 'il{0}pi{1}id'.format(list_index, product_index)) # Product impression SKU
Tracker.alias(unicode, 'il{0}pi{1}nm'.format(list_index, product_index)) # Product impression name
Tracker.alias(unicode, 'il{0}pi{1}br'.format(list_index, product_index)) # Product impression brand
Tracker.alias(unicode, 'il{0}pi{1}ca'.format(list_index, product_index)) # Product impression category
Tracker.alias(unicode, 'il{0}pi{1}va'.format(list_index, product_index)) # Product impression variant
Tracker.alias(int, 'il{0}pi{1}ps'.format(list_index, product_index)) # Product impression position
Tracker.alias(int, 'il{0}pi{1}pr'.format(list_index, product_index)) # Product impression price
for custom_index in range(MAX_CUSTOM_DEFINITIONS):
Tracker.alias(str, 'il{0}pi{1}cd{2}'.format(list_index, product_index, custom_index)) # Product impression custom dimension
Tracker.alias(int, 'il{0}pi{1}cm{2}'.format(list_index, product_index, custom_index)) # Product impression custom metric
for list_index in range(1, MAX_EC_LISTS):
Tracker.alias(unicode, 'il{0}nm'.format(list_index)) # Product impression list name
for promotion_index in range(1, MAX_EC_PROMOTIONS):
Tracker.alias(str, 'promo{0}id'.format(promotion_index)) # Promotion ID
Tracker.alias(unicode, 'promo{0}nm'.format(promotion_index)) # Promotion name
Tracker.alias(str, 'promo{0}cr'.format(promotion_index)) # Promotion creative
Tracker.alias(str, 'promo{0}ps'.format(promotion_index)) # Promotion position
# Shortcut for creating trackers
def create(account, *args, **kwargs):
return Tracker(account, *args, **kwargs)
# vim: set nowrap tabstop=4 shiftwidth=4 softtabstop=0 expandtab textwidth=0 filetype=python foldmethod=indent foldcolumn=4
| {
"content_hash": "f90cd5a6344b17f174b9875e859a68b1",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 139,
"avg_line_length": 40.9191011235955,
"alnum_prop": 0.6478664396726893,
"repo_name": "1987yama3/power-analytics.appspot.com",
"id": "9306d642938922e9fbf9462505948b47751a0ab5",
"size": "18661",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "appengine/vendor/UniversalAnalytics/Tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4540"
},
{
"name": "JavaScript",
"bytes": "61694"
},
{
"name": "Python",
"bytes": "131360"
},
{
"name": "Smarty",
"bytes": "176819"
}
],
"symlink_target": ""
} |
from django.urls import path
from blog import views
urlpatterns = [
path('categories/', views.get_categories, name='get_categories'),
path('posts/', views.get_posts, name='get_posts'),
path('post/<int:post_id>', views.get_post, name='get_post'),
]
| {
"content_hash": "69dc29602da3c4c5801f3020ce4b5b42",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 69,
"avg_line_length": 29.11111111111111,
"alnum_prop": 0.6755725190839694,
"repo_name": "Roba-VRSTU/VRSTU",
"id": "4f64110130c74542913bfe99bc10831dcbe2ef3d",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "blog/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3428"
},
{
"name": "PHP",
"bytes": "80024"
},
{
"name": "Vue",
"bytes": "563"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
from rest_framework.routers import DefaultRouter
from djangocms_rest_api.views import PageViewSet, PlaceHolderViewSet, PluginViewSet
router = DefaultRouter()
router.register(r'pages', PageViewSet, 'page')
router.register(r'placeholders', PlaceHolderViewSet, 'placeholder')
router.register(r'plugins', PluginViewSet, 'plugin')
urlpatterns = router.urls
| {
"content_hash": "5413f550fca9da3a26f9698af84c44cb",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 83,
"avg_line_length": 38.90909090909091,
"alnum_prop": 0.8084112149532711,
"repo_name": "divio/djangocms-rest-api",
"id": "0dbac5a99ba857cadb23977c772610258d8a683e",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangocms_rest_api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "74"
},
{
"name": "HTML",
"bytes": "4624"
},
{
"name": "JavaScript",
"bytes": "212"
},
{
"name": "Python",
"bytes": "47186"
}
],
"symlink_target": ""
} |
import mimetypes
import os
import socket
from collections import defaultdict
from swift import gettext_ as _
from random import shuffle
from time import time
import functools
import sys
from eventlet import Timeout
import six
from swift import __canonical_version__ as swift_version
from swift.common import constraints
from swift.common.storage_policy import POLICIES
from swift.common.ring import Ring
from swift.common.utils import cache_from_env, get_logger, \
get_remote_client, split_path, config_true_value, generate_trans_id, \
affinity_key_function, affinity_locality_predicate, list_from_csv, \
register_swift_info, readconf
from swift.common.constraints import check_utf8, valid_api_version
from swift.proxy.controllers import AccountController, ContainerController, \
ObjectControllerRouter, InfoController
from swift.proxy.controllers.base import get_container_info, NodeIter, \
DEFAULT_RECHECK_CONTAINER_EXISTENCE, DEFAULT_RECHECK_ACCOUNT_EXISTENCE
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \
HTTPServerError, HTTPException, Request, HTTPServiceUnavailable
from swift.common.exceptions import APIVersionError
# List of entry points for mandatory middlewares.
#
# Fields:
#
# "name" (required) is the entry point name from setup.py.
#
# "after_fn" (optional) a function that takes a PipelineWrapper object as its
# single argument and returns a list of middlewares that this middleware
# should come after. Any middlewares in the returned list that are not present
# in the pipeline will be ignored, so you can safely name optional middlewares
# to come after. For example, ["catch_errors", "bulk"] would install this
# middleware after catch_errors and bulk if both were present, but if bulk
# were absent, would just install it after catch_errors.
required_filters = [
{'name': 'catch_errors'},
{'name': 'gatekeeper',
'after_fn': lambda pipe: (['catch_errors']
if pipe.startswith('catch_errors')
else [])},
{'name': 'dlo', 'after_fn': lambda _junk: [
'copy', 'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']},
{'name': 'versioned_writes', 'after_fn': lambda _junk: [
'slo', 'dlo', 'copy', 'staticweb', 'tempauth',
'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']},
# Put copy before dlo, slo and versioned_writes
{'name': 'copy', 'after_fn': lambda _junk: [
'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']}]
def _label_for_policy(policy):
if policy is not None:
return 'policy %s (%s)' % (policy.idx, policy.name)
return '(default)'
class ProxyOverrideOptions(object):
"""
Encapsulates proxy server options that may be overridden e.g. for
policy specific configurations.
:param conf: the proxy-server config dict.
:param override_conf: a dict of overriding configuration options.
"""
def __init__(self, base_conf, override_conf):
def get(key, default):
return override_conf.get(key, base_conf.get(key, default))
self.sorting_method = get('sorting_method', 'shuffle').lower()
self.read_affinity = get('read_affinity', '')
try:
self.read_affinity_sort_key = affinity_key_function(
self.read_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid read_affinity value: %r (%s)" %
(self.read_affinity, err.message))
self.write_affinity = get('write_affinity', '')
try:
self.write_affinity_is_local_fn \
= affinity_locality_predicate(self.write_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid write_affinity value: %r (%s)" %
(self.write_affinity, err.message))
self.write_affinity_node_count = get(
'write_affinity_node_count', '2 * replicas').lower()
value = self.write_affinity_node_count.split()
if len(value) == 1:
wanc_value = int(value[0])
self.write_affinity_node_count_fn = lambda replicas: wanc_value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
wanc_value = int(value[0])
self.write_affinity_node_count_fn = \
lambda replicas: wanc_value * replicas
else:
raise ValueError(
'Invalid write_affinity_node_count value: %r' %
(' '.join(value)))
def __repr__(self):
return '%s({}, {%s})' % (self.__class__.__name__, ', '.join(
'%r: %r' % (k, getattr(self, k)) for k in (
'sorting_method',
'read_affinity',
'write_affinity',
'write_affinity_node_count')))
def __eq__(self, other):
if not isinstance(other, ProxyOverrideOptions):
return False
return all(getattr(self, k) == getattr(other, k) for k in (
'sorting_method',
'read_affinity',
'write_affinity',
'write_affinity_node_count'))
class Application(object):
"""WSGI application for the proxy server."""
def __init__(self, conf, memcache=None, logger=None, account_ring=None,
container_ring=None):
if conf is None:
conf = {}
if logger is None:
self.logger = get_logger(conf, log_route='proxy-server')
else:
self.logger = logger
self._override_options = self._load_per_policy_config(conf)
self.sorts_by_timing = any(pc.sorting_method == 'timing'
for pc in self._override_options.values())
self._error_limiting = {}
swift_dir = conf.get('swift_dir', '/etc/swift')
self.swift_dir = swift_dir
self.node_timeout = float(conf.get('node_timeout', 10))
self.recoverable_node_timeout = float(
conf.get('recoverable_node_timeout', self.node_timeout))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.put_queue_depth = int(conf.get('put_queue_depth', 10))
self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
self.trans_id_suffix = conf.get('trans_id_suffix', '')
self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
self.error_suppression_interval = \
int(conf.get('error_suppression_interval', 60))
self.error_suppression_limit = \
int(conf.get('error_suppression_limit', 10))
self.recheck_container_existence = \
int(conf.get('recheck_container_existence',
DEFAULT_RECHECK_CONTAINER_EXISTENCE))
self.recheck_account_existence = \
int(conf.get('recheck_account_existence',
DEFAULT_RECHECK_ACCOUNT_EXISTENCE))
self.allow_account_management = \
config_true_value(conf.get('allow_account_management', 'no'))
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
self.account_ring = account_ring or Ring(swift_dir,
ring_name='account')
# ensure rings are loaded for all configured storage policies
for policy in POLICIES:
policy.load_ring(swift_dir)
self.obj_controller_router = ObjectControllerRouter()
self.memcache = memcache
mimetypes.init(mimetypes.knownfiles +
[os.path.join(swift_dir, 'mime.types')])
self.account_autocreate = \
config_true_value(conf.get('account_autocreate', 'no'))
self.auto_create_account_prefix = (
conf.get('auto_create_account_prefix') or '.')
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
self.max_containers_per_account = \
int(conf.get('max_containers_per_account') or 0)
self.max_containers_whitelist = [
a.strip()
for a in conf.get('max_containers_whitelist', '').split(',')
if a.strip()]
self.deny_host_headers = [
host.strip() for host in
conf.get('deny_host_headers', '').split(',') if host.strip()]
self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
self.cors_allow_origin = [
a.strip()
for a in conf.get('cors_allow_origin', '').split(',')
if a.strip()]
self.cors_expose_headers = [
a.strip()
for a in conf.get('cors_expose_headers', '').split(',')
if a.strip()]
self.strict_cors_mode = config_true_value(
conf.get('strict_cors_mode', 't'))
self.node_timings = {}
self.timing_expiry = int(conf.get('timing_expiry', 300))
self.concurrent_gets = \
config_true_value(conf.get('concurrent_gets'))
self.concurrency_timeout = float(conf.get('concurrency_timeout',
self.conn_timeout))
value = conf.get('request_node_count', '2 * replicas').lower().split()
if len(value) == 1:
rnc_value = int(value[0])
self.request_node_count = lambda replicas: rnc_value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
rnc_value = int(value[0])
self.request_node_count = lambda replicas: rnc_value * replicas
else:
raise ValueError(
'Invalid request_node_count value: %r' % ''.join(value))
# swift_owner_headers are stripped by the account and container
# controllers; we should extend header stripping to object controller
# when a privileged object header is implemented.
swift_owner_headers = conf.get(
'swift_owner_headers',
'x-container-read, x-container-write, '
'x-container-sync-key, x-container-sync-to, '
'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
'x-account-access-control')
self.swift_owner_headers = [
name.strip().title()
for name in swift_owner_headers.split(',') if name.strip()]
# Initialization was successful, so now apply the client chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because it affects the client as well, currently, we use the
# client chunk size as the govenor and not the object chunk size.
socket._fileobject.default_bufsize = self.client_chunk_size
self.expose_info = config_true_value(
conf.get('expose_info', 'yes'))
self.disallowed_sections = list_from_csv(
conf.get('disallowed_sections', 'swift.valid_api_versions'))
self.admin_key = conf.get('admin_key', None)
register_swift_info(
version=swift_version,
strict_cors_mode=self.strict_cors_mode,
policies=POLICIES.get_policy_info(),
allow_account_management=self.allow_account_management,
account_autocreate=self.account_autocreate,
**constraints.EFFECTIVE_CONSTRAINTS)
def _make_policy_override(self, policy, conf, override_conf):
label_for_policy = _label_for_policy(policy)
try:
override = ProxyOverrideOptions(conf, override_conf)
self.logger.debug("Loaded override config for %s: %r" %
(label_for_policy, override))
return override
except ValueError as err:
raise ValueError(err.message + ' for %s' % label_for_policy)
def _load_per_policy_config(self, conf):
"""
Loads per-policy config override values from proxy server conf file.
:param conf: the proxy server local conf dict
:return: a dict mapping :class:`BaseStoragePolicy` to an instance of
:class:`ProxyOverrideOptions` that has policy-specific config
attributes
"""
# the default options will be used when looking up a policy that had no
# override options
default_options = self._make_policy_override(None, conf, {})
overrides = defaultdict(lambda: default_options)
# force None key to be set in the defaultdict so that it is found when
# iterating over items in check_config
overrides[None] = default_options
for index, override_conf in conf.get('policy_config', {}).items():
try:
index = int(index)
except ValueError:
# require policies to be referenced by index; using index *or*
# name isn't possible because names such as "3" are allowed
raise ValueError(
'Override config must refer to policy index: %r' % index)
try:
policy = POLICIES[index]
except KeyError:
raise ValueError(
"No policy found for override config, index: %s" % index)
override = self._make_policy_override(policy, conf, override_conf)
overrides[policy] = override
return overrides
def get_policy_options(self, policy):
"""
Return policy specific options.
:param policy: an instance of :class:`BaseStoragePolicy`
:return: an instance of :class:`ProxyOverrideOptions`
"""
return self._override_options[policy]
def check_config(self):
"""
Check the configuration for possible errors
"""
for policy, options in self._override_options.items():
if options.read_affinity and options.sorting_method != 'affinity':
self.logger.warning(
_("sorting_method is set to '%(method)s', not 'affinity'; "
"%(label)s read_affinity setting will have no effect."),
{'label': _label_for_policy(policy),
'method': options.sorting_method})
def get_object_ring(self, policy_idx):
"""
Get the ring object to use to handle a request based on its policy.
:param policy_idx: policy index as defined in swift.conf
:returns: appropriate ring object
"""
return POLICIES.get_object_ring(policy_idx, self.swift_dir)
def get_controller(self, req):
"""
Get the controller to handle a request.
:param req: the request
:returns: tuple of (controller class, path dictionary)
:raises: ValueError (thrown by split_path) if given invalid path
"""
if req.path == '/info':
d = dict(version=None,
expose_info=self.expose_info,
disallowed_sections=self.disallowed_sections,
admin_key=self.admin_key)
return InfoController, d
version, account, container, obj = split_path(req.path, 1, 4, True)
d = dict(version=version,
account_name=account,
container_name=container,
object_name=obj)
if account and not valid_api_version(version):
raise APIVersionError('Invalid path')
if obj and container and account:
info = get_container_info(req.environ, self)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
info['storage_policy'])
policy = POLICIES.get_by_index(policy_index)
if not policy:
# This indicates that a new policy has been created,
# with rings, deployed, released (i.e. deprecated =
# False), used by a client to create a container via
# another proxy that was restarted after the policy
# was released, and is now cached - all before this
# worker was HUPed to stop accepting new
# connections. There should never be an "unknown"
# index - but when there is - it's probably operator
# error and hopefully temporary.
raise HTTPServiceUnavailable('Unknown Storage Policy')
return self.obj_controller_router[policy], d
elif container and account:
return ContainerController, d
elif account and not container and not obj:
return AccountController, d
return None, d
def __call__(self, env, start_response):
"""
WSGI entry point.
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
try:
if self.memcache is None:
self.memcache = cache_from_env(env, True)
req = self.update_request(Request(env))
return self.handle_request(req)(env, start_response)
except UnicodeError:
err = HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
return err(env, start_response)
except (Exception, Timeout):
start_response('500 Server Error',
[('Content-Type', 'text/plain')])
return ['Internal server error.\n']
def update_request(self, req):
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
return req
def handle_request(self, req):
"""
Entry point for proxy server.
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
try:
self.logger.set_statsd_prefix('proxy-server')
if req.content_length and req.content_length < 0:
self.logger.increment('errors')
return HTTPBadRequest(request=req,
body='Invalid Content-Length')
try:
if not check_utf8(req.path_info):
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
except UnicodeError:
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
try:
controller, path_parts = self.get_controller(req)
p = req.path_info
if isinstance(p, six.text_type):
p = p.encode('utf-8')
except APIVersionError:
self.logger.increment('errors')
return HTTPBadRequest(request=req)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if not controller:
self.logger.increment('errors')
return HTTPPreconditionFailed(request=req, body='Bad URL')
if self.deny_host_headers and \
req.host.split(':')[0] in self.deny_host_headers:
return HTTPForbidden(request=req, body='Invalid host header')
self.logger.set_statsd_prefix('proxy-server.' +
controller.server_type.lower())
controller = controller(self, **path_parts)
if 'swift.trans_id' not in req.environ:
# if this wasn't set by an earlier middleware, set it now
trans_id_suffix = self.trans_id_suffix
trans_id_extra = req.headers.get('x-trans-id-extra')
if trans_id_extra:
trans_id_suffix += '-' + trans_id_extra[:32]
trans_id = generate_trans_id(trans_id_suffix)
req.environ['swift.trans_id'] = trans_id
self.logger.txn_id = trans_id
req.headers['x-trans-id'] = req.environ['swift.trans_id']
controller.trans_id = req.environ['swift.trans_id']
self.logger.client_ip = get_remote_client(req)
if req.method not in controller.allowed_methods:
return HTTPMethodNotAllowed(request=req, headers={
'Allow': ', '.join(controller.allowed_methods)})
handler = getattr(controller, req.method)
old_authorize = None
if 'swift.authorize' in req.environ:
# We call authorize before the handler, always. If authorized,
# we remove the swift.authorize hook so isn't ever called
# again. If not authorized, we return the denial unless the
# controller's method indicates it'd like to gather more
# information and try again later.
resp = req.environ['swift.authorize'](req)
if not resp:
# No resp means authorized, no delayed recheck required.
old_authorize = req.environ['swift.authorize']
else:
# Response indicates denial, but we might delay the denial
# and recheck later. If not delayed, return the error now.
if not getattr(handler, 'delay_denial', None):
return resp
# Save off original request method (GET, POST, etc.) in case it
# gets mutated during handling. This way logging can display the
# method the client actually sent.
req.environ.setdefault('swift.orig_req_method', req.method)
try:
if old_authorize:
req.environ.pop('swift.authorize', None)
return handler(req)
finally:
if old_authorize:
req.environ['swift.authorize'] = old_authorize
except HTTPException as error_response:
return error_response
except (Exception, Timeout):
self.logger.exception(_('ERROR Unhandled exception in request'))
return HTTPServerError(request=req)
def sort_nodes(self, nodes, policy=None):
"""
Sorts nodes in-place (and returns the sorted list) according to
the configured strategy. The default "sorting" is to randomly
shuffle the nodes. If the "timing" strategy is chosen, the nodes
are sorted according to the stored timing data.
:param nodes: a list of nodes
:param policy: an instance of :class:`BaseStoragePolicy`
"""
# In the case of timing sorting, shuffling ensures that close timings
# (ie within the rounding resolution) won't prefer one over another.
# Python's sort is stable (http://wiki.python.org/moin/HowTo/Sorting/)
shuffle(nodes)
policy_options = self.get_policy_options(policy)
if policy_options.sorting_method == 'timing':
now = time()
def key_func(node):
timing, expires = self.node_timings.get(node['ip'], (-1.0, 0))
return timing if expires > now else -1.0
nodes.sort(key=key_func)
elif policy_options.sorting_method == 'affinity':
nodes.sort(key=policy_options.read_affinity_sort_key)
return nodes
def set_node_timing(self, node, timing):
if not self.sorts_by_timing:
return
now = time()
timing = round(timing, 3) # sort timings to the millisecond
self.node_timings[node['ip']] = (timing, now + self.timing_expiry)
def _error_limit_node_key(self, node):
return "{ip}:{port}/{device}".format(**node)
def error_limited(self, node):
"""
Check if the node is currently error limited.
:param node: dictionary of node to check
:returns: True if error limited, False otherwise
"""
now = time()
node_key = self._error_limit_node_key(node)
error_stats = self._error_limiting.get(node_key)
if error_stats is None or 'errors' not in error_stats:
return False
if 'last_error' in error_stats and error_stats['last_error'] < \
now - self.error_suppression_interval:
self._error_limiting.pop(node_key, None)
return False
limited = error_stats['errors'] > self.error_suppression_limit
if limited:
self.logger.debug(
_('Node error limited %(ip)s:%(port)s (%(device)s)'), node)
return limited
def error_limit(self, node, msg):
"""
Mark a node as error limited. This immediately pretends the
node received enough errors to trigger error suppression. Use
this for errors like Insufficient Storage. For other errors
use :func:`error_occurred`.
:param node: dictionary of node to error limit
:param msg: error message
"""
node_key = self._error_limit_node_key(node)
error_stats = self._error_limiting.setdefault(node_key, {})
error_stats['errors'] = self.error_suppression_limit + 1
error_stats['last_error'] = time()
self.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'),
{'msg': msg, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
def _incr_node_errors(self, node):
node_key = self._error_limit_node_key(node)
error_stats = self._error_limiting.setdefault(node_key, {})
error_stats['errors'] = error_stats.get('errors', 0) + 1
error_stats['last_error'] = time()
def error_occurred(self, node, msg):
"""
Handle logging, and handling of errors.
:param node: dictionary of node to handle errors for
:param msg: error message
"""
self._incr_node_errors(node)
self.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'),
{'msg': msg.decode('utf-8'), 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
def iter_nodes(self, ring, partition, node_iter=None, policy=None):
return NodeIter(self, ring, partition, node_iter=node_iter,
policy=policy)
def exception_occurred(self, node, typ, additional_info,
**kwargs):
"""
Handle logging of generic exceptions.
:param node: dictionary of node to log the error for
:param typ: server type
:param additional_info: additional information to log
"""
self._incr_node_errors(node)
if 'level' in kwargs:
log = functools.partial(self.logger.log, kwargs.pop('level'))
if 'exc_info' not in kwargs:
kwargs['exc_info'] = sys.exc_info()
else:
log = self.logger.exception
log(_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s'
' re: %(info)s'),
{'type': typ, 'ip': node['ip'],
'port': node['port'], 'device': node['device'],
'info': additional_info.decode('utf-8')},
**kwargs)
def modify_wsgi_pipeline(self, pipe):
"""
Called during WSGI pipeline creation. Modifies the WSGI pipeline
context to ensure that mandatory middleware is present in the pipeline.
:param pipe: A PipelineWrapper object
"""
pipeline_was_modified = False
for filter_spec in reversed(required_filters):
filter_name = filter_spec['name']
if filter_name not in pipe:
afters = filter_spec.get('after_fn', lambda _junk: [])(pipe)
insert_at = 0
for after in afters:
try:
insert_at = max(insert_at, pipe.index(after) + 1)
except ValueError: # not in pipeline; ignore it
pass
self.logger.info(
_('Adding required filter %(filter_name)s to pipeline at '
'position %(insert_at)d'),
{'filter_name': filter_name, 'insert_at': insert_at})
ctx = pipe.create_filter(filter_name)
pipe.insert_filter(ctx, index=insert_at)
pipeline_was_modified = True
if pipeline_was_modified:
self.logger.info(_("Pipeline was modified. "
"New pipeline is \"%s\"."), pipe)
else:
self.logger.debug(_("Pipeline is \"%s\""), pipe)
def parse_per_policy_config(conf):
"""
Search the config file for any per-policy config sections and load those
sections to a dict mapping policy reference (name or index) to policy
options.
:param conf: the proxy server conf dict
:return: a dict mapping policy reference -> dict of policy options
:raises ValueError: if a policy config section has an invalid name
"""
policy_config = {}
all_conf = readconf(conf['__file__'])
policy_section_prefix = conf['__name__'] + ':policy:'
for section, options in all_conf.items():
if not section.startswith(policy_section_prefix):
continue
policy_ref = section[len(policy_section_prefix):]
policy_config[policy_ref] = options
return policy_config
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI proxy apps."""
conf = global_conf.copy()
conf.update(local_conf)
# Do this here so that the use of conf['__file__'] and conf['__name__'] is
# isolated from the Application. This also simplifies tests that construct
# an Application instance directly.
conf['policy_config'] = parse_per_policy_config(conf)
app = Application(conf)
app.check_config()
return app
| {
"content_hash": "485d5883674f17996ad655a4721b590b",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 79,
"avg_line_length": 44.36857142857143,
"alnum_prop": 0.5832635713825746,
"repo_name": "redbo/swift",
"id": "ba0fc58beab3708dfccc5e90a4d276c553b3d715",
"size": "31653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swift/proxy/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2719304"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
import os
import json
from logging import getLogger
from django.conf import settings
from django.db import models
from django.db.models.signals import post_delete
from django.db.models.query import Q
from django.utils.translation import ugettext_lazy as _
from django.utils.datastructures import MultiValueDict
from django.contrib.auth.models import User
from django.dispatch import receiver
from ..pypi_metadata.models import Classifier, ClassifierSerializer
from ..pypi_metadata.models import DistributionType
from ..pypi_metadata.models import PythonVersion
from ..pypi_metadata.models import PlatformName
log = getLogger(__name__)
class ConfigurationManager(models.Manager):
def latest(self):
try:
return super(ConfigurationManager, self).latest()
except Configuration.DoesNotExist:
configuration = Configuration()
configuration.save()
return configuration
class Configuration(models.Model):
'''Stores the configuration of this site. As a rule, the most
recent configuration is always used, and past configurations
are kept for reference and for rollback.
'''
objects = ConfigurationManager()
timestamp = models.DateTimeField(auto_now_add=True)
allow_version_overwrite = models.BooleanField(default=False)
upload_directory = models.CharField(max_length=256, default='dists',
help_text='Direcory relative to MEDIA_ROOT in which user uploads are kept')
class Meta:
ordering = ('-timestamp', )
verbose_name = _(u'Configuration')
verbose_name_plural = _(u'Configurations')
get_latest_by = 'timestamp'
class PackageInfoField(models.Field):
description = u'Python Package Information Field'
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(PackageInfoField,self).__init__(*args, **kwargs)
def to_python(self, value):
if isinstance(value, basestring):
if value:
return MultiValueDict(json.loads(value))
else:
return MultiValueDict()
if isinstance(value, dict):
return MultiValueDict(value)
if isinstance(value, MultiValueDict):
return value
raise ValueError('Unexpected value encountered when converting data to python')
def get_prep_value(self, value):
if isinstance(value, MultiValueDict):
return json.dumps(dict(value.iterlists()), default = ClassifierSerializer)
if isinstance(value, dict):
return json.dumps(value)
if isinstance(value, basestring) or value is None:
return value
raise ValueError('Unexpected value encountered when preparing for database')
def get_internal_type(self):
return 'TextField'
class Package(models.Model):
name = models.CharField(max_length=255, unique=True, primary_key=True,
editable=False)
auto_hide = models.BooleanField(default=True, blank=False)
allow_comments = models.BooleanField(default=True, blank=False)
owners = models.ManyToManyField(User, blank=True,
related_name="packages_owned")
maintainers = models.ManyToManyField(User, blank=True,
related_name="packages_maintained")
class Meta:
verbose_name = _(u"package")
verbose_name_plural = _(u"packages")
get_latest_by = "releases__latest"
ordering = ['name',]
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('djangopypi2-package', (), {'package_name': self.name})
@property
def latest(self):
try:
return self.releases.latest()
except Release.DoesNotExist:
return None
def get_release(self, version):
"""Return the release object for version, or None"""
try:
return self.releases.get(version=version)
except Release.DoesNotExist:
return None
@staticmethod
def simple_search(query = ""):
return Package.objects.filter(Q(name__icontains=query) | Q(releases__package_info__icontains=query)).distinct()
@staticmethod
def advanced_search(name = "", summary = "", description = "", classifier = None, keyword = None):
classifier = classifier if classifier is not None else set()
keyword = keyword if keyword is not None else set()
qset = Package.objects.all()
if name:
qset = qset.filter(name__icontains = name)
# manual filtering
evaled = False
if summary:
if not evaled:
qset = list(qset)
evaled = True
qset = filter(lambda x: all(y in x.latest.summary.lower() for y in summary.lower().split()), qset)
if description:
if not evaled:
qset = list(qset)
evaled = True
qset = filter(lambda x: all(y in x.latest.description.lower() for y in description.lower().split()), qset)
if classifier:
classifier = set(unicode(x) for x in classifier)
if not evaled:
qset = list(qset)
evaled = True
qset = filter(lambda x: set(x.latest.classifiers) & classifier == classifier, qset)
if keyword:
keyword = set(kword.lower() for kword in keyword)
if not evaled:
qset = list(qset)
evaled = True
qset = filter(lambda x: set(y.lower() for y in x.latest.keywords) & keyword == keyword, qset)
if not evaled:
result = list(qset)
else:
result = qset
return result
class Release(models.Model):
package = models.ForeignKey(Package, related_name="releases", editable=False)
version = models.CharField(max_length=128, editable=False)
metadata_version = models.CharField(max_length=64, default='1.0')
package_info = PackageInfoField(blank=False)
hidden = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True, editable=False)
class Meta:
verbose_name = _(u"release")
verbose_name_plural = _(u"releases")
unique_together = ("package", "version")
get_latest_by = 'created'
ordering = ['-created']
def __unicode__(self):
return self.release_name
@property
def release_name(self):
return u"%s-%s" % (self.package.name, self.version)
@property
def summary(self):
return self.package_info.get('summary', u'')
@property
def author(self):
return self.package_info.get('author', u'')
@property
def home_page(self):
return self.package_info.get('home_page', u'')
@property
def license(self):
return self.package_info.get('license', u'')
@property
def description(self):
return self.package_info.get('description', u'')
@property
def classifiers(self):
return self.package_info.getlist('classifier')
@property
def keywords(self):
# return keywords as set
keywords = self.package_info.getlist('keywords')
if keywords:
return set(self.package_info.getlist('keywords')[0].split())
else:
return set()
@models.permalink
def get_absolute_url(self):
return ('djangopypi2-release', (), {'package_name': self.package.name,
'version': self.version})
@staticmethod
def simple_search(name = "", summary = ""):
return Release.objects.filter(Q(package__name__icontains=name) | Q(package_info__icontains=summary)).distinct()
def distribution_upload_path(instance, filename):
configuration = Configuration.objects.latest()
return os.path.join(str(configuration.upload_directory), filename)
class Distribution(models.Model):
release = models.ForeignKey(Release, related_name="distributions",
editable=False)
content = models.FileField(upload_to=distribution_upload_path)
md5_digest = models.CharField(max_length=32, blank=True, editable=False)
filetype = models.ForeignKey(DistributionType, related_name='distributions')
pyversion = models.ForeignKey(PythonVersion, related_name='distributions', null=True,
help_text='Python version, or None for any version of Python')
platform = models.ForeignKey(PlatformName, related_name='distributions', null=True,
help_text='Platform name or None if platform agnostic')
comment = models.CharField(max_length=255, blank=True)
signature = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True, editable=False)
uploader = models.ForeignKey(User, related_name='distributions_uploaded',
editable=False, blank=True, null=True)
@property
def filename(self):
return os.path.basename(self.content.name)
@property
def display_filetype(self):
return self.filetype.name
@property
def path(self):
return self.content.name
def get_absolute_url(self):
return "%s#md5=%s" % (self.content.url, self.md5_digest)
class Meta:
verbose_name = _(u"distribution")
verbose_name_plural = _(u"distributions")
unique_together = ("release", "filetype", "pyversion", "platform")
def __unicode__(self):
return self.filename
@receiver(post_delete, sender=Distribution)
def handle_media_delete(instance, **kwargs):
path = os.path.join(settings.MEDIA_ROOT, instance.path)
log.info("Deleting file {}".format(path))
os.remove(path)
class Review(models.Model):
release = models.ForeignKey(Release, related_name="reviews")
rating = models.PositiveSmallIntegerField(blank=True)
comment = models.TextField(blank=True)
class Meta:
verbose_name = _(u'release review')
verbose_name_plural = _(u'release reviews')
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^djangopypi2\.apps\.pypi_frontend\.models\.PackageInfoField"])
except ImportError:
pass
| {
"content_hash": "300fff55c58e16023d38df67bd7d69d7",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 119,
"avg_line_length": 35.9375,
"alnum_prop": 0.6382608695652174,
"repo_name": "pitrho/djangopypi2",
"id": "6eb068837264de94f9d8e63bcc80ef6a79ea6160",
"size": "10350",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djangopypi2/apps/pypi_packages/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1294"
},
{
"name": "HTML",
"bytes": "32146"
},
{
"name": "Python",
"bytes": "140711"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: na_ontap_net_subnet
short_description: NetApp ONTAP Create, delete, modify network subnets.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.8'
author: Storage Engineering (@Albinpopote) <ansible@black-perl.fr>
description:
- Create, modify, destroy the network subnet
options:
state:
description:
- Whether the specified network interface group should exist or not.
choices: ['present', 'absent']
default: present
broadcast_domain:
description:
- Specify the required broadcast_domain name for the subnet.
- A broadcast domain can not be modified after the subnet has been created
required: true
name:
description:
- Specify the subnet name.
required: true
from_name:
description:
- Name of the subnet to be renamed
gateway:
description:
- Specify the gateway for the default route of the subnet.
ipspace:
description:
- Specify the ipspace for the subnet.
- The default value for this parameter is the default IPspace, named 'Default'.
ip_ranges:
description:
- Specify the list of IP address ranges associated with the subnet.
subnet:
description:
- Specify the subnet (ip and mask).
required: true
"""
EXAMPLES = """
- name: create subnet
na_ontap_net_subnet:
state: present
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
subnet: 10.10.10.0/24
name: subnet-adm
ip_ranges: [ '10.10.10.30-10.10.10.40', '10.10.10.51' ]
gateway: 10.10.10.254
ipspace: Default
broadcast_domain: Default
- name: delete subnet
na_ontap_net_subnet:
state: absent
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
name: subnet-adm
ipspace: Default
- name: rename subnet
na_ontap_net_subnet:
state: present
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
name: subnet-adm-new
from_name: subnet-adm
ipspace: Default
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapSubnet(object):
"""
Create, Modifies and Destroys a subnet
"""
def __init__(self):
"""
Initialize the ONTAP Subnet class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str'),
from_name=dict(required=False, type='str'),
broadcast_domain=dict(required=False, type='str'),
gateway=dict(required=False, type='str'),
ip_ranges=dict(required=False, type=list),
ipspace=dict(required=False, type='str'),
subnet=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
return
def get_subnet(self, name=None):
"""
Return details about the subnet
:param:
name : Name of the subnet
:return: Details about the subnet. None if not found.
:rtype: dict
"""
if name is None:
name = self.parameters.get('name')
subnet_iter = netapp_utils.zapi.NaElement('net-subnet-get-iter')
subnet_info = netapp_utils.zapi.NaElement('net-subnet-info')
subnet_info.add_new_child('subnet-name', name)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(subnet_info)
subnet_iter.add_child_elem(query)
result = self.server.invoke_successfully(subnet_iter, True)
return_value = None
# check if query returns the expected subnet
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) == 1:
subnet_attributes = result.get_child_by_name('attributes-list').get_child_by_name('net-subnet-info')
broadcast_domain = subnet_attributes.get_child_content('broadcast-domain')
gateway = subnet_attributes.get_child_content('gateway')
ipspace = subnet_attributes.get_child_content('ipspace')
subnet = subnet_attributes.get_child_content('subnet')
name = subnet_attributes.get_child_content('subnet-name')
ip_ranges = []
range_obj = subnet_attributes.get_child_by_name('ip-ranges').get_children()
for elem in range_obj:
ip_ranges.append(elem.get_content())
return_value = {
'name': name,
'broadcast_domain': broadcast_domain,
'gateway': gateway,
'ip_ranges': ip_ranges,
'ipspace': ipspace,
'subnet': subnet
}
return return_value
def create_subnet(self):
"""
Creates a new subnet
"""
options = {'subnet-name': self.parameters.get('name'),
'broadcast-domain': self.parameters.get('broadcast_domain'),
'subnet': self.parameters.get('subnet')}
subnet_create = netapp_utils.zapi.NaElement.create_node_with_children(
'net-subnet-create', **options)
if self.parameters.get('gateway'):
subnet_create.add_new_child('gateway', self.parameters.get('gateway'))
if self.parameters.get('ip_ranges'):
subnet_ips = netapp_utils.zapi.NaElement('ip-ranges')
subnet_create.add_child_elem(subnet_ips)
for ip_range in self.parameters.get('ip_ranges'):
subnet_ips.add_new_child('ip-range', ip_range)
if self.parameters.get('ipspace'):
subnet_create.add_new_child('ipspace', self.parameters.get('ipspace'))
try:
self.server.invoke_successfully(subnet_create, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
exception=traceback.format_exc())
def delete_subnet(self):
"""
Deletes a subnet
"""
subnet_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'net-subnet-destroy', **{'subnet-name': self.parameters.get('name')})
try:
self.server.invoke_successfully(subnet_delete, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
exception=traceback.format_exc())
def modify_subnet(self):
"""
Modifies a subnet
"""
options = {'subnet-name': self.parameters.get('name')}
subnet_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'net-subnet-modify', **options)
if self.parameters.get('gateway'):
subnet_modify.add_new_child('gateway', self.parameters.get('gateway'))
if self.parameters.get('ip_ranges'):
subnet_ips = netapp_utils.zapi.NaElement('ip-ranges')
subnet_modify.add_child_elem(subnet_ips)
for ip_range in self.parameters.get('ip_ranges'):
subnet_ips.add_new_child('ip-range', ip_range)
if self.parameters.get('ipspace'):
subnet_modify.add_new_child('ipspace', self.parameters.get('ipspace'))
if self.parameters.get('subnet'):
subnet_modify.add_new_child('subnet', self.parameters.get('subnet'))
try:
self.server.invoke_successfully(subnet_modify, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
exception=traceback.format_exc())
def rename_subnet(self):
"""
TODO
"""
options = {'subnet-name': self.parameters.get('from_name'),
'new-name': self.parameters.get('name')}
subnet_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'net-subnet-rename', **options)
if self.parameters.get('ipspace'):
subnet_rename.add_new_child('ipspace', self.parameters.get('ipspace'))
try:
self.server.invoke_successfully(subnet_rename, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error renaming subnet %s: %s' % (self.parameters.get('name'), to_native(error)),
exception=traceback.format_exc())
def apply(self):
'''Apply action to subnet'''
current = self.get_subnet()
cd_action, rename = None, None
if self.parameters.get('from_name'):
rename = self.na_helper.is_rename_action(self.get_subnet(self.parameters.get('from_name')), current)
if rename is False:
self.module.fail_json(msg="Error renaming: subnet %s does not exist" %
self.parameters.get('from_name'))
else:
cd_action = self.na_helper.get_cd_action(current, self.parameters)
modify = self.na_helper.get_modified_attributes(current, self.parameters)
for attribute in modify:
if attribute in ['broadcast_domain']:
self.module.fail_json(msg='Error modifying subnet %s: cannot modify broadcast_domain parameter.' % self.parameters.get('name'))
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if rename:
self.rename_subnet()
# If rename is True, cd_action is NOne but modify could be true
if cd_action == 'create':
for attribute in ['subnet', 'broadcast_domain']:
if not self.parameters.get(attribute):
self.module.fail_json(msg='Error - missing required arguments: %s.' % attribute)
self.create_subnet()
elif cd_action == 'delete':
self.delete_subnet()
elif modify:
self.modify_subnet()
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""
Creates the NetApp ONTAP Net Route object and runs the correct play task
"""
subnet_obj = NetAppOntapSubnet()
subnet_obj.apply()
if __name__ == '__main__':
main()
| {
"content_hash": "51854a8972e39040b2377880d94b92e6",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 143,
"avg_line_length": 36.608150470219435,
"alnum_prop": 0.5941085802363418,
"repo_name": "SergeyCherepanov/ansible",
"id": "d38ceaa45c2d6d9f0601572913999d93312f1730",
"size": "11791",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/storage/netapp/na_ontap_net_subnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
"""vol
volume control in the shell for your mac
Usage:
vol (in|out|alert) <volume>
vol mute
vol unmute
vol load <profile>
vol info
vol (-h | --help)
vol --version
Options:
-h --help Show this screen.
--version Show version.
"""
import ConfigParser
import os
import subprocess
import sys
from docopt import docopt
from . import __version__ as version
def asrun(ascript):
"Run the given AppleScript and return the standard output and error."
## shamelessly stolen from
## http://www.leancrew.com/all-this/2013/03/combining-python-and-applescript/
osa = subprocess.Popen(['osascript', '-'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return osa.communicate(ascript)[0]
def setvolume(device, amount):
"Set the volume to 'amount' on 'device' "
if device == 'out':
device = 'output'
if device == 'in':
device = 'input'
cmd = 'set volume {0} volume {1}'.format(device, amount)
return asrun(cmd)
def main():
"Run the main programm."
args = docopt(__doc__, version='vol version '+ version)
if args['load'] and args['<profile>']:
home = os.path.expanduser("~")
cfile = os.path.join(home, '.vol')
try:
cfg = ConfigParser.RawConfigParser()
cfg.read(cfile)
profile = args['<profile>']
if cfg.has_section(profile):
for o in cfg.options(profile):
if o == 'out' or o == 'in' or o == 'alert':
setvolume(o, cfg.get(profile, o))
elif o == 'mute':
if cfg.getboolean(profile, o):
asrun('set volume output muted true')
else:
asrun('set volume output muted false')
else:
raise Error
except Exception, e:
print "Error: {0} in {1} does not exist or is malformed".format(args['<profile>'], cfile)
elif args['info']:
print asrun('get volume settings')
elif args['mute']:
asrun('set volume output muted true')
elif args['unmute']:
asrun('set volume output muted false')
elif args['out']:
setvolume('out', args['<volume>'])
elif args['in']:
setvolume('in', args['<volume>'])
elif args['alert']:
setvolume('alert', args['<volume>'])
sys.exit(0)
| {
"content_hash": "f2dbab0d9feaf1c483f10810f73de16f",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 101,
"avg_line_length": 23.552380952380954,
"alnum_prop": 0.5458956732713304,
"repo_name": "bmaeser/vol",
"id": "4189c1032a14966b832afa69a8795f5adce6462c",
"size": "2520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vol/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3645"
}
],
"symlink_target": ""
} |
"""
Django settings for latch_demo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import abspath, basename, dirname, join, normpath
from sys import path
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SITE_ROOT = dirname(DJANGO_ROOT)
path.append(DJANGO_ROOT)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+0xk1zk82@e1_^fvo0)474cqc60ee@#xp6of=6fqn*2*rs%@89'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'latch',
'django_extensions',
'werkzeug',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'latch_demo.urls'
WSGI_APPLICATION = 'latch_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
AUTH_PROFILE_MODULE='latch.UserProfile'
AUTHENTICATION_BACKENDS = (
'latch.auth_backend.LatchAuthBackend',
)
LOGIN_REDIRECT_URL = '/admin/'
TEMPLATE_DIRS = (
os.path.join(DJANGO_ROOT, 'latch_demo/templates'),
)
| {
"content_hash": "059256ce24b7c73f65b33ff5a9d6902d",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 71,
"avg_line_length": 24.52,
"alnum_prop": 0.7210440456769984,
"repo_name": "rootedcon/django-latch",
"id": "da435d5c563cee2c4aed6562a8033265c43ed119",
"size": "2452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "latch-demo/latch_demo/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23069"
}
],
"symlink_target": ""
} |
"""The tests the cover command line platform."""
import logging
import pytest
from homeassistant import setup
from homeassistant.components.cover import ATTR_POSITION, ATTR_TILT_POSITION, DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_CLOSE_COVER_TILT,
SERVICE_OPEN_COVER,
SERVICE_OPEN_COVER_TILT,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_STOP_COVER,
SERVICE_TOGGLE,
SERVICE_TOGGLE_COVER_TILT,
STATE_CLOSED,
STATE_OFF,
STATE_ON,
STATE_OPEN,
STATE_UNAVAILABLE,
)
from tests.common import assert_setup_component, async_mock_service
_LOGGER = logging.getLogger(__name__)
ENTITY_COVER = "cover.test_template_cover"
@pytest.fixture(name="calls")
def calls_fixture(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_template_state_text(hass, calls):
"""Test the state text of a template."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ states.cover.test_state.state }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.async_set("cover.test_state", STATE_OPEN)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
state = hass.states.async_set("cover.test_state", STATE_CLOSED)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_CLOSED
async def test_template_state_boolean(hass, calls):
"""Test the value_template attribute."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ 1 == 1 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
async def test_template_position(hass, calls):
"""Test the position_template attribute."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ states.cover.test.attributes.position }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.async_set("cover.test", STATE_CLOSED)
await hass.async_block_till_done()
entity = hass.states.get("cover.test")
attrs = {}
attrs["position"] = 42
hass.states.async_set(entity.entity_id, entity.state, attributes=attrs)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 42.0
assert state.state == STATE_OPEN
state = hass.states.async_set("cover.test", STATE_OPEN)
await hass.async_block_till_done()
entity = hass.states.get("cover.test")
attrs["position"] = 0.0
hass.states.async_set(entity.entity_id, entity.state, attributes=attrs)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 0.0
assert state.state == STATE_CLOSED
async def test_template_tilt(hass, calls):
"""Test the tilt_template attribute."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ 1 == 1 }}",
"tilt_template": "{{ 42 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 42.0
async def test_template_out_of_bounds(hass, calls):
"""Test template out-of-bounds condition."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ -1 }}",
"tilt_template": "{{ 110 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") is None
assert state.attributes.get("current_position") is None
async def test_template_mutex(hass, calls):
"""Test that only value or position template can be used."""
with assert_setup_component(0, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ 1 == 1 }}",
"position_template": "{{ 42 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"icon_template": "{% if states.cover.test_state.state %}"
"mdi:check"
"{% endif %}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_template_open_or_position(hass, caplog):
"""Test that at least one of open_cover or set_position is used."""
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {"test_template_cover": {"value_template": "{{ 1 == 1 }}"}},
}
},
)
await hass.async_block_till_done()
assert hass.states.async_all() == []
assert "Invalid config for [cover.template]" in caplog.text
async def test_template_open_and_close(hass, calls):
"""Test that if open_cover is specified, close_cover is too."""
with assert_setup_component(0, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ 1 == 1 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_template_non_numeric(hass, calls):
"""Test that tilt_template values are numeric."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ on }}",
"tilt_template": "{% if states.cover.test_state.state %}"
"on"
"{% else %}"
"off"
"{% endif %}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") is None
assert state.attributes.get("current_position") is None
async def test_open_action(hass, calls):
"""Test the open_cover command."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 0 }}",
"open_cover": {"service": "test.automation"},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_CLOSED
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_close_stop_action(hass, calls):
"""Test the close-cover and stop_cover commands."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 100 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {"service": "test.automation"},
"stop_cover": {"service": "test.automation"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
assert len(calls) == 2
async def test_set_position(hass, calls):
"""Test the set_position command."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"input_number",
{"input_number": {"test": {"min": "0", "max": "100", "initial": "42"}}},
)
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ states.input_number.test.state | int }}",
"set_cover_position": {
"service": "input_number.set_value",
"entity_id": "input_number.test",
"data_template": {"value": "{{ position }}"},
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.async_set("input_number.test", 42)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 100.0
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 0.0
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 100.0
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 0.0
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: ENTITY_COVER, ATTR_POSITION: 25},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 25.0
async def test_set_tilt_position(hass, calls):
"""Test the set_tilt_position command."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 100 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"set_cover_tilt_position": {"service": "test.automation"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: ENTITY_COVER, ATTR_TILT_POSITION: 42},
blocking=True,
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_open_tilt_action(hass, calls):
"""Test the open_cover_tilt command."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 100 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"set_cover_tilt_position": {"service": "test.automation"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_close_tilt_action(hass, calls):
"""Test the close_cover_tilt command."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 100 }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"set_cover_tilt_position": {"service": "test.automation"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_set_position_optimistic(hass, calls):
"""Test optimistic position mode."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"set_cover_position": {"service": "test.automation"}
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") is None
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: ENTITY_COVER, ATTR_POSITION: 42},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_position") == 42.0
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_CLOSED
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_CLOSED
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state == STATE_OPEN
async def test_set_tilt_position_optimistic(hass, calls):
"""Test the optimistic tilt_position mode."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"position_template": "{{ 100 }}",
"set_cover_position": {"service": "test.automation"},
"set_cover_tilt_position": {"service": "test.automation"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") is None
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: ENTITY_COVER, ATTR_TILT_POSITION: 42},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 42.0
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 0.0
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 100.0
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 0.0
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE_COVER_TILT, {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True
)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("current_tilt_position") == 100.0
async def test_icon_template(hass, calls):
"""Test icon template."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ states.cover.test_state.state }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"icon_template": "{% if states.cover.test_state.state %}"
"mdi:check"
"{% endif %}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("icon") == ""
state = hass.states.async_set("cover.test_state", STATE_OPEN)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes["icon"] == "mdi:check"
async def test_entity_picture_template(hass, calls):
"""Test icon template."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ states.cover.test_state.state }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"entity_picture_template": "{% if states.cover.test_state.state %}"
"/local/cover.png"
"{% endif %}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("entity_picture") == ""
state = hass.states.async_set("cover.test_state", STATE_OPEN)
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes["entity_picture"] == "/local/cover.png"
async def test_availability_template(hass, calls):
"""Test availability template."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "open",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
"availability_template": "{{ is_state('availability_state.state','on') }}",
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
hass.states.async_set("availability_state.state", STATE_OFF)
await hass.async_block_till_done()
assert hass.states.get("cover.test_template_cover").state == STATE_UNAVAILABLE
hass.states.async_set("availability_state.state", STATE_ON)
await hass.async_block_till_done()
assert hass.states.get("cover.test_template_cover").state != STATE_UNAVAILABLE
async def test_availability_without_availability_template(hass, calls):
"""Test that component is available if there is no."""
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "open",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.state != STATE_UNAVAILABLE
async def test_invalid_availability_template_keeps_component_available(hass, caplog):
"""Test that an invalid availability keeps the device available."""
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"availability_template": "{{ x - 12 }}",
"value_template": "open",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("cover.test_template_cover") != STATE_UNAVAILABLE
assert ("UndefinedError: 'x' is undefined") in caplog.text
async def test_device_class(hass, calls):
"""Test device class."""
with assert_setup_component(1, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ states.cover.test_state.state }}",
"device_class": "door",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert state.attributes.get("device_class") == "door"
async def test_invalid_device_class(hass, calls):
"""Test device class."""
with assert_setup_component(0, "cover"):
assert await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover": {
"value_template": "{{ states.cover.test_state.state }}",
"device_class": "barnacle_bill",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("cover.test_template_cover")
assert not state
async def test_unique_id(hass):
"""Test unique_id option only creates one cover per id."""
await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"test_template_cover_01": {
"unique_id": "not-so-unique-anymore",
"value_template": "{{ true }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
},
"test_template_cover_02": {
"unique_id": "not-so-unique-anymore",
"value_template": "{{ false }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
},
},
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
async def test_state_gets_lowercased(hass):
"""Test True/False is lowercased."""
hass.states.async_set("binary_sensor.garage_door_sensor", "off")
await setup.async_setup_component(
hass,
"cover",
{
"cover": {
"platform": "template",
"covers": {
"garage_door": {
"friendly_name": "Garage Door",
"value_template": "{{ is_state('binary_sensor.garage_door_sensor', 'off') }}",
"open_cover": {
"service": "cover.open_cover",
"entity_id": "cover.test_state",
},
"close_cover": {
"service": "cover.close_cover",
"entity_id": "cover.test_state",
},
},
},
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("cover.garage_door").state == STATE_OPEN
hass.states.async_set("binary_sensor.garage_door_sensor", "on")
await hass.async_block_till_done()
assert hass.states.get("cover.garage_door").state == STATE_CLOSED
| {
"content_hash": "9d6b82dc59cf399341f44abb5cd79a51",
"timestamp": "",
"source": "github",
"line_count": 1122,
"max_line_length": 103,
"avg_line_length": 34.79144385026738,
"alnum_prop": 0.4603699149503023,
"repo_name": "tchellomello/home-assistant",
"id": "5deb540782c05d7569707fe5faeac8763d5cbb6c",
"size": "39036",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/template/test_cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
import unittest
from quickbooks import utils
class UtilsTests(unittest.TestCase):
def test_build_where_clause(self):
where_clause = utils.build_where_clause(field1=1,
field2="Someone's Company")
self.assertTrue("field1 = 1" in where_clause)
self.assertTrue("field2 = 'Someone\\\'s Company'" in where_clause)
def test_build_where_clause_unicode(self):
where_clause = utils.build_where_clause(field1=u"Test 1",
field2=u"Someone's Company")
self.assertTrue("field1 = 'Test 1'" in where_clause)
self.assertTrue("field2 = 'Someone\\\'s Company'" in where_clause)
def test_build_choose_clause_integers(self):
where_clause = utils.build_choose_clause(choices=[1, 2],
field="field1")
self.assertEqual(where_clause, "field1 in (1, 2)")
def test_build_choose_clause_strings(self):
where_clause = utils.build_choose_clause(choices=["val1", "val2"],
field="field1")
self.assertEqual(where_clause, "field1 in ('val1', 'val2')")
def test_build_choose_clause_quoted_value(self):
where_clause = utils.build_choose_clause(choices=["val1",
"Someone's Company"],
field="field1")
self.assertEqual(where_clause, "field1 in ('val1', 'Someone\\\'s Company')")
def test_build_choose_clause_unicode(self):
where_clause = utils.build_choose_clause(choices=[u"Test - & % $", u"Another Test"],
field="field1")
self.assertEqual(where_clause, "field1 in ('Test - & % $', 'Another Test')")
def test_build_choose_clause_unicode_escaped(self):
where_clause = utils.build_choose_clause(choices=[u"Test - & % $", u"Another's Test"],
field="field1")
self.assertEqual(where_clause, "field1 in ('Test - & % $', 'Another\\\'s Test')")
| {
"content_hash": "4e10103c0d42884eeaf2c28a3250c6be",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 94,
"avg_line_length": 44.204081632653065,
"alnum_prop": 0.53601108033241,
"repo_name": "ZachGoldberg/python-quickbooks",
"id": "11afad58746e9779f5ea5ef6973065487bea6041",
"size": "2166",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "222448"
}
],
"symlink_target": ""
} |
from shutit_module import ShutItModule
import base64
class openshift_secrets(ShutItModule):
def build(self, shutit):
shutit.send('cd /tmp/openshift_vm')
shutit.login(command='vagrant ssh')
shutit.login(command='sudo su -',password='vagrant',note='Become root (there is a problem logging in as admin with the vagrant user')
shutit.send('oc login -u user2 -p anystringwilldo')
shutit.send('oc project user2')
# Chapter 11 image pull secrets
# SECRETS
# Chapter 10 Secrets
# TODO
shutit.send_file('secret.json','''{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mysecret-secrets"
},
"namespace": "user2",
"data": {
"username": "''' + base64.b64encode('myusername') + '''"
}
}''')
shutit.send('oc create -f secret.json')
shutit.send('''cat > docker.cfg << END
{
"https://index.docker.io/v1/": {
"auth": "W1pIWxdOaRoXYp6YXJka",
"email": "ian.miell@gmail.com"
}
}
END''',note='create a secret docker.cfg')
# TODO use these
shutit.logout()
shutit.logout()
return True
def module():
return openshift_secrets(
'shutit.openshift_vm.openshift_vm.openshift_secrets', 1418326706.006,
description='',
maintainer='',
delivery_methods=['bash'],
depends=['shutit.openshift_vm.openshift_vm.openshift_vm']
)
| {
"content_hash": "7d5eed28cac5b60c57f7d2b7adf7327f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 135,
"avg_line_length": 24.71153846153846,
"alnum_prop": 0.669260700389105,
"repo_name": "ianmiell/shutit-openshift-vm",
"id": "6372bcac12bfe77367883f6f448f8f17b1b33d06",
"size": "1285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "secrets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32204"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import sys
from functools import partial
from . import converters, exceptions, filters, setters, validators
from ._config import get_run_validators, set_run_validators
from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
from ._make import (
NOTHING,
Attribute,
Factory,
attrib,
attrs,
fields,
fields_dict,
make_class,
validate,
)
from ._version_info import VersionInfo
__version__ = "20.3.0"
__version_info__ = VersionInfo._from_version_string(__version__)
__title__ = "attrs"
__description__ = "Classes Without Boilerplate"
__url__ = "https://www.attrs.org/"
__uri__ = __url__
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2015 Hynek Schlawack"
s = attributes = attrs
ib = attr = attrib
dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
__all__ = [
"Attribute",
"Factory",
"NOTHING",
"asdict",
"assoc",
"astuple",
"attr",
"attrib",
"attributes",
"attrs",
"converters",
"evolve",
"exceptions",
"fields",
"fields_dict",
"filters",
"get_run_validators",
"has",
"ib",
"make_class",
"resolve_types",
"s",
"set_run_validators",
"setters",
"validate",
"validators",
]
if sys.version_info[:2] >= (3, 6):
from ._next_gen import define, field, frozen, mutable
__all__.extend((define, field, frozen, mutable))
| {
"content_hash": "135ac1a386a2b7c78539a322749882f1",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 70,
"avg_line_length": 20.63157894736842,
"alnum_prop": 0.6045918367346939,
"repo_name": "TeamSPoon/logicmoo_workspace",
"id": "bf329cad5c88580c3e1e89ed9196dd58bdf7fe3b",
"size": "1568",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "packs_web/butterfly/lib/python3.7/site-packages/attr/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "342"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "CSS",
"bytes": "126627"
},
{
"name": "HTML",
"bytes": "839172"
},
{
"name": "Java",
"bytes": "11116"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "PHP",
"bytes": "42253"
},
{
"name": "Perl 6",
"bytes": "23"
},
{
"name": "Prolog",
"bytes": "440882"
},
{
"name": "PureBasic",
"bytes": "1334"
},
{
"name": "Rich Text Format",
"bytes": "3436542"
},
{
"name": "Roff",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "61603"
},
{
"name": "TeX",
"bytes": "99504"
}
],
"symlink_target": ""
} |
import threading
import os
import sublime, sublime_plugin
from .libs.upyun import UpYun, ED_AUTO
import hashlib
from .local_settings import *
def upload_file(bucket_name, folder_path, filename, fp):
"""
bucket_name: 空间名称
folder_path: 子目录的绝对路径,比如/wenyouxi/,默认为/(即不使用子目录)
filename: 需要存在子目录下的文件名,比如tmp.db
fp: 文件指针,需要用rb模式打开
"""
u = UpYun(bucket_name, OPERATOR_NAME, OPERATOR_PASSWORD, endpoint=ED_AUTO)
try:
# create folder
u.mkdir(folder_path)
except:
pass
upload_path = os.path.join(folder_path, filename)
u.put(upload_path, fp, checksum=True)
return u.getinfo(upload_path), upload_path
class AsyncUploadThread(threading.Thread):
def __init__(self, filepath, callback):
self.filepath = filepath
self.callback = callback
self.result = None
threading.Thread.__init__(self)
def run(self):
if not self.filepath:
return self.callback(None)
try:
filename, file_ext = os.path.splitext(os.path.basename(self.filepath))
upload_filename = ''.join([hashlib.sha1(self.filepath.encode('utf8')).hexdigest(), file_ext])
with open(self.filepath, 'rb') as fp:
info, url = upload_file(UPYUN_BUCKET, '/upload/', upload_filename, fp)
if info:
return self.callback(url)
else:
return self.callback(None)
except Exception as e:
print(e)
return self.callback(None)
# Extends TextCommand so that run() receives a View to modify.
class UploadUpyunCommand(sublime_plugin.TextCommand):
@staticmethod
def async_upload_callback(result):
if result:
sublime.message_dialog(''.join(['File upload success: ',
'http://{bucket}.b0.upaiyun.com'.format(bucket=UPYUN_BUCKET), result]))
else:
sublime.message_dialog('Upload failed, please retry.')
def run(self, edit):
sublime.status_message('Uploading file to UPYUN...')
filepath = self.view.file_name()
new_thread = AsyncUploadThread(filepath=filepath, callback=self.async_upload_callback)
new_thread.start()
| {
"content_hash": "94476fc1e72b2ed9a6a840880ad44459",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 115,
"avg_line_length": 31.15277777777778,
"alnum_prop": 0.6188140882746321,
"repo_name": "ritksm/upyun-upload-sublime",
"id": "d02251218254732c9699ea35916028d3c5830182",
"size": "2401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12356"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('home', '0021_faqpage_tags'),
]
operations = [
migrations.CreateModel(
name='FAQPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='faqpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='home.FAQPageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='faqpagetag',
name='content_object',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='home.FAQPage'),
),
migrations.AddField(
model_name='faqpagetag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='home_faqpagetag_items', to='taggit.Tag'),
),
]
| {
"content_hash": "593893df85681695b63ac576f078a66e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 191,
"avg_line_length": 35.170731707317074,
"alnum_prop": 0.6033287101248266,
"repo_name": "aapris/tilajakamo",
"id": "297c4323ae315f444f214e219f751f8e8619832d",
"size": "1514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tilajakamoweb/home/migrations/0022_auto_20160209_0541.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23885"
},
{
"name": "HTML",
"bytes": "94930"
},
{
"name": "JavaScript",
"bytes": "8528"
},
{
"name": "Python",
"bytes": "117027"
}
],
"symlink_target": ""
} |
from networking_cisco import backwards_compatibility as bc
from networking_cisco.backwards_compatibility import constants as p_const
from networking_cisco.backwards_compatibility import ml2_api as api
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
constants as const)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import type_nexus_vxlan
from neutron.tests.unit import testlib_api
VNI_RANGES = [(100, 102), (200, 202)]
VNI_RANGE_LOW_INVALID = [str(type_nexus_vxlan.MIN_NEXUS_VNI - 1) + ':' +
str(p_const.MAX_VXLAN_VNI)]
VNI_RANGE_HIGH_INVALID = [str(type_nexus_vxlan.MIN_NEXUS_VNI) + ':' +
str(p_const.MAX_VXLAN_VNI + 1)]
MCAST_GROUP_RANGES = ['224.0.0.1:224.0.0.2', '224.0.1.1:224.0.1.2']
class NexusVxlanTypeTest(testlib_api.SqlTestCase):
def setUp(self):
super(NexusVxlanTypeTest, self).setUp()
self.driver = type_nexus_vxlan.NexusVxlanTypeDriver()
self.driver.conf_mcast_ranges = MCAST_GROUP_RANGES
self.driver.tunnel_ranges = VNI_RANGES
self.driver.sync_allocations()
self.context = bc.get_db_ref(bc.get_context())
def vni_in_range(self, vni):
# SegmentTypeDriver.allocate_partially_specified_segment allocates
# a random VNI from the range
return any(lower <= vni <= upper for (lower, upper) in VNI_RANGES)
def test_allocate_tenant_segment(self):
segment = self.driver.allocate_tenant_segment(self.context)
self.assertEqual(segment[api.NETWORK_TYPE], const.TYPE_NEXUS_VXLAN)
self.assertEqual(segment[api.PHYSICAL_NETWORK], '224.0.0.1')
self.assertTrue(self.vni_in_range(segment[api.SEGMENTATION_ID]))
def test_allocate_shared_mcast_group(self):
segments = []
for i in range(0, 6):
segments.append(self.driver.allocate_tenant_segment(
self.context))
self.assertEqual(segments[0][api.NETWORK_TYPE],
const.TYPE_NEXUS_VXLAN)
self.assertEqual(segments[0][api.PHYSICAL_NETWORK], '224.0.0.1')
self.assertTrue(self.vni_in_range(segments[0][api.SEGMENTATION_ID]))
self.assertEqual(segments[-1][api.NETWORK_TYPE],
const.TYPE_NEXUS_VXLAN)
self.assertEqual(segments[-1][api.PHYSICAL_NETWORK], '224.0.0.1')
self.assertTrue(self.vni_in_range(segments[-1][api.SEGMENTATION_ID]))
self.assertNotEqual(segments[0], segments[-1])
def test_reserve_provider_segment_full_specs(self):
segment = {api.NETWORK_TYPE: const.TYPE_NEXUS_VXLAN,
api.PHYSICAL_NETWORK: '224.0.0.1',
api.SEGMENTATION_ID: '5000'}
result = self.driver.reserve_provider_segment(self.context, segment)
alloc = self.driver.get_allocation(
self.context,
result[api.SEGMENTATION_ID])
mcast_group = self.driver._get_mcast_group_for_vni(
self.context,
alloc.vxlan_vni)
self.assertTrue(alloc.allocated)
self.assertEqual(alloc.vxlan_vni, 5000)
self.assertEqual(mcast_group, '224.0.0.1')
def test_reserve_provider_segment_partial_specs(self):
segment = {api.NETWORK_TYPE: const.TYPE_NEXUS_VXLAN,
api.PHYSICAL_NETWORK: '224.0.0.1'}
result = self.driver.reserve_provider_segment(self.context, segment)
alloc = self.driver.get_allocation(
self.context,
result[api.SEGMENTATION_ID])
mcast_group = self.driver._get_mcast_group_for_vni(
self.context,
alloc.vxlan_vni)
self.assertTrue(alloc.allocated)
self.assertTrue(self.vni_in_range(alloc.vxlan_vni))
self.assertEqual(mcast_group, '224.0.0.1')
def test_invalid_vni_ranges(self):
for invalid_vni_range in [VNI_RANGE_LOW_INVALID,
VNI_RANGE_HIGH_INVALID]:
type_nexus_vxlan.cfg.CONF.set_override('vni_ranges',
invalid_vni_range,
'ml2_type_nexus_vxlan')
self.assertRaises(SystemExit, self.driver._verify_vni_ranges)
| {
"content_hash": "534bb2bbeea3cacfa33b10c804ea57cd",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 77,
"avg_line_length": 46.252747252747255,
"alnum_prop": 0.6284153005464481,
"repo_name": "Tehsmash/networking-cisco",
"id": "d15a1a8ed71c623a511306cf7c61d4e77d54da07",
"size": "4853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_cisco/tests/unit/ml2/drivers/cisco/nexus/test_type_nexus_vxlan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "3715465"
},
{
"name": "Shell",
"bytes": "35749"
}
],
"symlink_target": ""
} |
import numpy
from chainer.functions.normalization._standardize import _standardize
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product([
[
{'ch_out': 1},
{'ch_out': 5},
],
[
{'size': 10},
{'size': 20},
],
[
{'dtype': numpy.float64},
{'dtype': numpy.float32},
{'dtype': numpy.float16},
],
[
# same (str): flag whether input elems are same values.
# 'no' : all elems are randamly-chosen,
# 'equal': all elems are equal,
# 'near' : all elems are (randomly-chosen small values
# + same value).
{'eps': 1e-5, 'same': 'no'},
{'eps': 1e-1, 'same': 'no'},
{'eps': 1e-1, 'same': 'equal'},
{'eps': 1e-1, 'same': 'near'},
],
]))
@testing.backend.inject_backend_tests(
None,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
)
class TestStandardize(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-3, 'rtol': 2e-3})
self.check_backward_options.update({'atol': 5e-3, 'rtol': 1e-2})
self.check_double_backward_options.update({
'atol': 5e-3, 'rtol': 1e-2})
if self.same in ('equal', 'near'):
self.check_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2, 'eps': 1e-4})
self.skip_double_backward_test = True
def generate_inputs(self):
shape = self.ch_out, self.size
if self.same in ('equal', 'near'):
# Make self.x have same values
x = numpy.ones(shape, self.dtype)
x *= numpy.random.uniform(-1, 1)
if self.same == 'near':
# Make self.x have slightly different values
zero_scale = 10. ** numpy.random.randint(-16, -3)
x *= 1. + numpy.random.uniform(-zero_scale, zero_scale, shape)
else:
x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return _standardize(x, self.eps),
def forward_expected(self, inputs):
x, = inputs
mu = numpy.mean(x, axis=1, keepdims=True)
x_mu = x - mu
var = numpy.mean(numpy.square(x_mu), axis=1, keepdims=True)
std = numpy.sqrt(var, dtype=x.dtype) + x.dtype.type(self.eps)
return utils.force_array(x_mu / std, dtype=self.dtype),
testing.run_module(__name__, __file__)
| {
"content_hash": "b7540ed5356834c0190a8ba83522be33",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 31.467391304347824,
"alnum_prop": 0.5284974093264249,
"repo_name": "okuta/chainer",
"id": "55d14c6bd4c6d9a73d259b494b5f77ad0b467b05",
"size": "2895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/functions_tests/normalization_tests/test_standardize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1548487"
},
{
"name": "CMake",
"bytes": "51604"
},
{
"name": "Cuda",
"bytes": "128377"
},
{
"name": "Dockerfile",
"bytes": "1457"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5851909"
},
{
"name": "Shell",
"bytes": "41045"
}
],
"symlink_target": ""
} |
from ado.model import Model
from ado.project import Project
from ado.recipe import Recipe
class Portfolio(Model):
FIELDS = {
"alias" : "text",
"archived_at" : "timestamp",
"created_at" : "timestamp",
"description" : "text",
"name" : "text"
}
def projects(self, conn=None):
if not conn:
conn = self.conn
sql = "select * from %s where portfolio_id = ? and archived_at IS NULL" % Project.table_name()
return [Project.load(conn, row) for row in conn.execute(sql, [self.id])]
def recipes(self, conn=None):
if not conn:
conn = self.conn
sql = "select * from %s where portfolio_id = ?" % Recipe.table_name()
return [Recipe.load(conn, row) for row in conn.execute(sql, [self.id])]
def display_line(self):
return "Portfolio %02d) %s: %s" % (self.id, self.name, self.description)
def show(self):
show_text = []
show_text.append("Portfolio %s: %s" % (self.id, self.name))
if self.description:
show_text.append(self.description)
return "\n".join(show_text)
| {
"content_hash": "2cc8337964388721123920939c0d41e7",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 102,
"avg_line_length": 33.470588235294116,
"alnum_prop": 0.5790861159929701,
"repo_name": "ananelson/ado",
"id": "7db5343335f7de707c8e48c1a9782002edba0a96",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ado/portfolio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "161173"
},
{
"name": "Python",
"bytes": "65110"
},
{
"name": "R",
"bytes": "426"
},
{
"name": "Shell",
"bytes": "5951"
}
],
"symlink_target": ""
} |
import datetime
import uuid
import freezegun
from keystone.common import controller
from keystone.common import resource_options
from keystone.common import sql
from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.identity.backends import base
from keystone.identity.backends import resource_options as iro
from keystone.identity.backends import sql_model as model
from keystone.tests.unit import test_backend_sql
CONF = keystone.conf.CONF
class UserResourceOptionTests(test_backend_sql.SqlTests):
def setUp(self):
super(UserResourceOptionTests, self).setUp()
# RESET STATE OF REGISTRY OPTIONS
self.addCleanup(iro.register_user_options)
self.addCleanup(iro.USER_OPTIONS_REGISTRY._registered_options.clear)
self.option1 = resource_options.ResourceOption('opt1', 'option1')
self.option2 = resource_options.ResourceOption('opt2', 'option2')
self.cleanup_instance('option1', 'option2')
iro.USER_OPTIONS_REGISTRY._registered_options.clear()
iro.USER_OPTIONS_REGISTRY.register_option(self.option1)
iro.USER_OPTIONS_REGISTRY.register_option(self.option2)
def test_user_set_option_in_resource_option(self):
user = self._create_user(self._get_user_dict())
opt_value = uuid.uuid4().hex
user['options'][self.option1.option_name] = opt_value
new_ref = self.identity_api.update_user(user['id'], user)
self.assertEqual(opt_value,
new_ref['options'][self.option1.option_name])
raw_ref = self._get_user_ref(user['id'])
self.assertIn(self.option1.option_id, raw_ref._resource_option_mapper)
self.assertEqual(
opt_value,
raw_ref._resource_option_mapper[
self.option1.option_id].option_value)
api_get_ref = self.identity_api.get_user(user['id'])
# Ensure options are properly set in a .get_user call.
self.assertEqual(opt_value,
api_get_ref['options'][self.option1.option_name])
def test_user_add_update_delete_option_in_resource_option(self):
user = self._create_user(self._get_user_dict())
opt_value = uuid.uuid4().hex
new_opt_value = uuid.uuid4().hex
# Update user to add the new value option
user['options'][self.option1.option_name] = opt_value
new_ref = self.identity_api.update_user(user['id'], user)
self.assertEqual(opt_value,
new_ref['options'][self.option1.option_name])
# Update the option Value and confirm it is updated
user['options'][self.option1.option_name] = new_opt_value
new_ref = self.identity_api.update_user(user['id'], user)
self.assertEqual(new_opt_value,
new_ref['options'][self.option1.option_name])
# Set the option value to None, meaning delete the option
user['options'][self.option1.option_name] = None
new_ref = self.identity_api.update_user(user['id'], user)
self.assertNotIn(self.option1.option_name, new_ref['options'])
def test_user_add_delete_resource_option_existing_option_values(self):
user = self._create_user(self._get_user_dict())
opt_value = uuid.uuid4().hex
opt2_value = uuid.uuid4().hex
# Update user to add the new value option
user['options'][self.option1.option_name] = opt_value
new_ref = self.identity_api.update_user(user['id'], user)
self.assertEqual(opt_value,
new_ref['options'][self.option1.option_name])
# Update the option value for option 2 and confirm it is updated and
# option1's value remains the same. Option 1 is not specified in the
# updated user ref.
del user['options'][self.option1.option_name]
user['options'][self.option2.option_name] = opt2_value
new_ref = self.identity_api.update_user(user['id'], user)
self.assertEqual(opt_value,
new_ref['options'][self.option1.option_name])
self.assertEqual(opt2_value,
new_ref['options'][self.option2.option_name])
raw_ref = self._get_user_ref(user['id'])
self.assertEqual(
opt_value,
raw_ref._resource_option_mapper[
self.option1.option_id].option_value)
self.assertEqual(
opt2_value,
raw_ref._resource_option_mapper[
self.option2.option_id].option_value)
# Set the option value to None, meaning delete the option, ensure
# option 2 still remains and has the right value
user['options'][self.option1.option_name] = None
new_ref = self.identity_api.update_user(user['id'], user)
self.assertNotIn(self.option1.option_name, new_ref['options'])
self.assertEqual(opt2_value,
new_ref['options'][self.option2.option_name])
raw_ref = self._get_user_ref(user['id'])
self.assertNotIn(raw_ref._resource_option_mapper,
self.option1.option_id)
self.assertEqual(
opt2_value,
raw_ref._resource_option_mapper[
self.option2.option_id].option_value)
def test_unregistered_resource_option_deleted(self):
user = self._create_user(self._get_user_dict())
opt_value = uuid.uuid4().hex
opt2_value = uuid.uuid4().hex
# Update user to add the new value option
user['options'][self.option1.option_name] = opt_value
new_ref = self.identity_api.update_user(user['id'], user)
self.assertEqual(opt_value,
new_ref['options'][self.option1.option_name])
# Update the option value for option 2 and confirm it is updated and
# option1's value remains the same. Option 1 is not specified in the
# updated user ref.
del user['options'][self.option1.option_name]
user['options'][self.option2.option_name] = opt2_value
new_ref = self.identity_api.update_user(user['id'], user)
self.assertEqual(opt_value,
new_ref['options'][self.option1.option_name])
self.assertEqual(opt2_value,
new_ref['options'][self.option2.option_name])
raw_ref = self._get_user_ref(user['id'])
self.assertEqual(
opt_value,
raw_ref._resource_option_mapper[
self.option1.option_id].option_value)
self.assertEqual(
opt2_value,
raw_ref._resource_option_mapper[
self.option2.option_id].option_value)
# clear registered options and only re-register option1, update user
# and confirm option2 is gone from the ref and returned dict
iro.USER_OPTIONS_REGISTRY._registered_options.clear()
iro.USER_OPTIONS_REGISTRY.register_option(self.option1)
user['name'] = uuid.uuid4().hex
new_ref = self.identity_api.update_user(user['id'], user)
self.assertNotIn(self.option2.option_name, new_ref['options'])
self.assertEqual(opt_value,
new_ref['options'][self.option1.option_name])
raw_ref = self._get_user_ref(user['id'])
self.assertNotIn(raw_ref._resource_option_mapper,
self.option2.option_id)
self.assertEqual(
opt_value,
raw_ref._resource_option_mapper[
self.option1.option_id].option_value)
def _get_user_ref(self, user_id):
with sql.session_for_read() as session:
return session.query(model.User).get(user_id)
def _create_user(self, user_dict):
user_dict['id'] = uuid.uuid4().hex
user_dict = utils.hash_user_password(user_dict)
with sql.session_for_write() as session:
user_ref = model.User.from_dict(user_dict)
session.add(user_ref)
return base.filter_user(user_ref.to_dict())
def _get_user_dict(self):
user = {
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'enabled': True,
'password': uuid.uuid4().hex
}
return user
class DisableInactiveUserTests(test_backend_sql.SqlTests):
def setUp(self):
super(DisableInactiveUserTests, self).setUp()
self.password = uuid.uuid4().hex
self.user_dict = self._get_user_dict(self.password)
self.max_inactive_days = 90
self.config_fixture.config(
group='security_compliance',
disable_user_account_days_inactive=self.max_inactive_days)
def test_authenticate_user_disabled_due_to_inactivity(self):
# create user and set last_active_at beyond the max
last_active_at = (
datetime.datetime.utcnow() -
datetime.timedelta(days=self.max_inactive_days + 1))
user = self._create_user(self.user_dict, last_active_at.date())
self.assertRaises(exception.UserDisabled,
self.identity_api.authenticate,
self.make_request(),
user_id=user['id'],
password=self.password)
# verify that the user is actually disabled
user = self.identity_api.get_user(user['id'])
self.assertFalse(user['enabled'])
# set the user to enabled and authenticate
user['enabled'] = True
self.identity_api.update_user(user['id'], user)
user = self.identity_api.authenticate(self.make_request(),
user_id=user['id'],
password=self.password)
self.assertTrue(user['enabled'])
def test_authenticate_user_not_disabled_due_to_inactivity(self):
# create user and set last_active_at just below the max
last_active_at = (
datetime.datetime.utcnow() -
datetime.timedelta(days=self.max_inactive_days - 1)).date()
user = self._create_user(self.user_dict, last_active_at)
user = self.identity_api.authenticate(self.make_request(),
user_id=user['id'],
password=self.password)
self.assertTrue(user['enabled'])
def test_get_user_disabled_due_to_inactivity(self):
user = self.identity_api.create_user(self.user_dict)
# set last_active_at just beyond the max
last_active_at = (
datetime.datetime.utcnow() -
datetime.timedelta(self.max_inactive_days + 1)).date()
self._update_user_last_active_at(user['id'], last_active_at)
# get user and verify that the user is actually disabled
user = self.identity_api.get_user(user['id'])
self.assertFalse(user['enabled'])
# set enabled and test
user['enabled'] = True
self.identity_api.update_user(user['id'], user)
user = self.identity_api.get_user(user['id'])
self.assertTrue(user['enabled'])
def test_get_user_not_disabled_due_to_inactivity(self):
user = self.identity_api.create_user(self.user_dict)
self.assertTrue(user['enabled'])
# set last_active_at just below the max
last_active_at = (
datetime.datetime.utcnow() -
datetime.timedelta(self.max_inactive_days - 1)).date()
self._update_user_last_active_at(user['id'], last_active_at)
# get user and verify that the user is still enabled
user = self.identity_api.get_user(user['id'])
self.assertTrue(user['enabled'])
def test_enabled_after_create_update_user(self):
self.config_fixture.config(group='security_compliance',
disable_user_account_days_inactive=90)
# create user without enabled; assert enabled
del self.user_dict['enabled']
user = self.identity_api.create_user(self.user_dict)
user_ref = self._get_user_ref(user['id'])
self.assertTrue(user_ref.enabled)
now = datetime.datetime.utcnow().date()
self.assertGreaterEqual(now, user_ref.last_active_at)
# set enabled and test
user['enabled'] = True
self.identity_api.update_user(user['id'], user)
user_ref = self._get_user_ref(user['id'])
self.assertTrue(user_ref.enabled)
# set disabled and test
user['enabled'] = False
self.identity_api.update_user(user['id'], user)
user_ref = self._get_user_ref(user['id'])
self.assertFalse(user_ref.enabled)
# re-enable user and test
user['enabled'] = True
self.identity_api.update_user(user['id'], user)
user_ref = self._get_user_ref(user['id'])
self.assertTrue(user_ref.enabled)
def _get_user_dict(self, password):
user = {
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'enabled': True,
'password': password
}
return user
def _get_user_ref(self, user_id):
with sql.session_for_read() as session:
return session.query(model.User).get(user_id)
def _create_user(self, user_dict, last_active_at):
user_dict['id'] = uuid.uuid4().hex
user_dict = utils.hash_user_password(user_dict)
with sql.session_for_write() as session:
user_ref = model.User.from_dict(user_dict)
user_ref.last_active_at = last_active_at
session.add(user_ref)
return base.filter_user(user_ref.to_dict())
def _update_user_last_active_at(self, user_id, last_active_at):
with sql.session_for_write() as session:
user_ref = session.query(model.User).get(user_id)
user_ref.last_active_at = last_active_at
return user_ref
class PasswordHistoryValidationTests(test_backend_sql.SqlTests):
def setUp(self):
super(PasswordHistoryValidationTests, self).setUp()
self.max_cnt = 3
self.config_fixture.config(group='security_compliance',
unique_last_password_count=self.max_cnt)
def test_validate_password_history_with_invalid_password(self):
password = uuid.uuid4().hex
user = self._create_user(password)
# Attempt to change to the same password
self.assertRaises(exception.PasswordValidationError,
self.identity_api.change_password,
self.make_request(),
user_id=user['id'],
original_password=password,
new_password=password)
# Attempt to change to a unique password
new_password = uuid.uuid4().hex
self.assertValidChangePassword(user['id'], password, new_password)
# Attempt to change back to the initial password
self.assertRaises(exception.PasswordValidationError,
self.identity_api.change_password,
self.make_request(),
user_id=user['id'],
original_password=new_password,
new_password=password)
def test_validate_password_history_with_valid_password(self):
passwords = [uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex,
uuid.uuid4().hex]
user = self._create_user(passwords[0])
self.assertValidChangePassword(user['id'], passwords[0], passwords[1])
self.assertValidChangePassword(user['id'], passwords[1], passwords[2])
self.assertValidChangePassword(user['id'], passwords[2], passwords[3])
# Now you should be able to change the password to match the initial
# password because the password history only contains password elements
# 1, 2, 3
self.assertValidChangePassword(user['id'], passwords[3], passwords[0])
def test_validate_password_history_but_start_with_password_none(self):
passwords = [uuid.uuid4().hex, uuid.uuid4().hex]
# Create user and confirm password is None
user = self._create_user(None)
user_ref = self._get_user_ref(user['id'])
self.assertIsNone(user_ref.password)
# Admin password reset
user['password'] = passwords[0]
self.identity_api.update_user(user['id'], user)
# Self-service change password
self.assertValidChangePassword(user['id'], passwords[0], passwords[1])
# Attempt to update with a previous password
self.assertRaises(exception.PasswordValidationError,
self.identity_api.change_password,
self.make_request(),
user_id=user['id'],
original_password=passwords[1],
new_password=passwords[0])
def test_disable_password_history_and_repeat_same_password(self):
self.config_fixture.config(group='security_compliance',
unique_last_password_count=1)
password = uuid.uuid4().hex
user = self._create_user(password)
# Repeatedly change password with the same password
self.assertValidChangePassword(user['id'], password, password)
self.assertValidChangePassword(user['id'], password, password)
def test_admin_password_reset_is_not_validated_by_password_history(self):
passwords = [uuid.uuid4().hex, uuid.uuid4().hex]
user = self._create_user(passwords[0])
# Attempt to change password to a unique password
user['password'] = passwords[1]
self.identity_api.update_user(user['id'], user)
self.identity_api.authenticate(self.make_request(),
user_id=user['id'],
password=passwords[1])
# Attempt to change password with the same password
user['password'] = passwords[1]
self.identity_api.update_user(user['id'], user)
self.identity_api.authenticate(self.make_request(),
user_id=user['id'],
password=passwords[1])
# Attempt to change password with the initial password
user['password'] = passwords[0]
self.identity_api.update_user(user['id'], user)
self.identity_api.authenticate(self.make_request(),
user_id=user['id'],
password=passwords[0])
def test_truncate_passwords(self):
user = self._create_user(uuid.uuid4().hex)
self._add_passwords_to_history(user, n=4)
user_ref = self._get_user_ref(user['id'])
self.assertEqual(
len(user_ref.local_user.passwords), (self.max_cnt + 1))
def test_truncate_passwords_when_max_is_default(self):
self.max_cnt = 1
expected_length = self.max_cnt + 1
self.config_fixture.config(group='security_compliance',
unique_last_password_count=self.max_cnt)
user = self._create_user(uuid.uuid4().hex)
self._add_passwords_to_history(user, n=4)
user_ref = self._get_user_ref(user['id'])
self.assertEqual(len(user_ref.local_user.passwords), expected_length)
# Start with multiple passwords and then change max_cnt to one
self.max_cnt = 4
self.config_fixture.config(group='security_compliance',
unique_last_password_count=self.max_cnt)
self._add_passwords_to_history(user, n=self.max_cnt)
user_ref = self._get_user_ref(user['id'])
self.assertEqual(
len(user_ref.local_user.passwords), (self.max_cnt + 1))
self.max_cnt = 1
self.config_fixture.config(group='security_compliance',
unique_last_password_count=self.max_cnt)
self._add_passwords_to_history(user, n=1)
user_ref = self._get_user_ref(user['id'])
self.assertEqual(len(user_ref.local_user.passwords), expected_length)
def test_truncate_passwords_when_max_is_default_and_no_password(self):
expected_length = 1
self.max_cnt = 1
self.config_fixture.config(group='security_compliance',
unique_last_password_count=self.max_cnt)
user = {
'name': uuid.uuid4().hex,
'domain_id': 'default',
'enabled': True,
}
user = self.identity_api.create_user(user)
self._add_passwords_to_history(user, n=1)
user_ref = self._get_user_ref(user['id'])
self.assertEqual(len(user_ref.local_user.passwords), expected_length)
def _create_user(self, password):
user = {
'name': uuid.uuid4().hex,
'domain_id': 'default',
'enabled': True,
'password': password
}
return self.identity_api.create_user(user)
def assertValidChangePassword(self, user_id, password, new_password):
self.identity_api.change_password(self.make_request(),
user_id=user_id,
original_password=password,
new_password=new_password)
self.identity_api.authenticate(self.make_request(),
user_id=user_id,
password=new_password)
def _add_passwords_to_history(self, user, n):
for _ in range(n):
user['password'] = uuid.uuid4().hex
self.identity_api.update_user(user['id'], user)
def _get_user_ref(self, user_id):
with sql.session_for_read() as session:
return self.identity_api._get_user(session, user_id)
class LockingOutUserTests(test_backend_sql.SqlTests):
def setUp(self):
super(LockingOutUserTests, self).setUp()
self.config_fixture.config(
group='security_compliance',
lockout_failure_attempts=6)
self.config_fixture.config(
group='security_compliance',
lockout_duration=5)
# create user
self.password = uuid.uuid4().hex
user_dict = {
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'enabled': True,
'password': self.password
}
self.user = self.identity_api.create_user(user_dict)
def test_locking_out_user_after_max_failed_attempts(self):
# authenticate with wrong password
self.assertRaises(AssertionError,
self.identity_api.authenticate,
self.make_request(),
user_id=self.user['id'],
password=uuid.uuid4().hex)
# authenticate with correct password
self.identity_api.authenticate(self.make_request(),
user_id=self.user['id'],
password=self.password)
# test locking out user after max failed attempts
self._fail_auth_repeatedly(self.user['id'])
self.assertRaises(exception.AccountLocked,
self.identity_api.authenticate,
self.make_request(),
user_id=self.user['id'],
password=uuid.uuid4().hex)
def test_lock_out_for_ignored_user(self):
# mark the user as exempt from failed password attempts
# ignore user and reset password, password not expired
self.user['options'][iro.IGNORE_LOCKOUT_ATTEMPT_OPT.option_name] = True
self.identity_api.update_user(self.user['id'], self.user)
# fail authentication repeatedly the max number of times
self._fail_auth_repeatedly(self.user['id'])
# authenticate with wrong password, account should not be locked
self.assertRaises(AssertionError,
self.identity_api.authenticate,
self.make_request(),
user_id=self.user['id'],
password=uuid.uuid4().hex)
# authenticate with correct password, account should not be locked
self.identity_api.authenticate(self.make_request(),
user_id=self.user['id'],
password=self.password)
def test_set_enabled_unlocks_user(self):
# lockout user
self._fail_auth_repeatedly(self.user['id'])
self.assertRaises(exception.AccountLocked,
self.identity_api.authenticate,
self.make_request(),
user_id=self.user['id'],
password=uuid.uuid4().hex)
# set enabled, user should be unlocked
self.user['enabled'] = True
self.identity_api.update_user(self.user['id'], self.user)
user_ret = self.identity_api.authenticate(self.make_request(),
user_id=self.user['id'],
password=self.password)
self.assertTrue(user_ret['enabled'])
def test_lockout_duration(self):
# freeze time
with freezegun.freeze_time(datetime.datetime.utcnow()) as frozen_time:
# lockout user
self._fail_auth_repeatedly(self.user['id'])
self.assertRaises(exception.AccountLocked,
self.identity_api.authenticate,
self.make_request(),
user_id=self.user['id'],
password=uuid.uuid4().hex)
# freeze time past the duration, user should be unlocked and failed
# auth count should get reset
frozen_time.tick(delta=datetime.timedelta(
seconds=CONF.security_compliance.lockout_duration + 1))
self.identity_api.authenticate(self.make_request(),
user_id=self.user['id'],
password=self.password)
# test failed auth count was reset by authenticating with the wrong
# password, should raise an assertion error and not account locked
self.assertRaises(AssertionError,
self.identity_api.authenticate,
self.make_request(),
user_id=self.user['id'],
password=uuid.uuid4().hex)
def test_lockout_duration_failed_auth_cnt_resets(self):
# freeze time
with freezegun.freeze_time(datetime.datetime.utcnow()) as frozen_time:
# lockout user
self._fail_auth_repeatedly(self.user['id'])
self.assertRaises(exception.AccountLocked,
self.identity_api.authenticate,
self.make_request(),
user_id=self.user['id'],
password=uuid.uuid4().hex)
# freeze time past the duration, failed_auth_cnt should reset
frozen_time.tick(delta=datetime.timedelta(
seconds=CONF.security_compliance.lockout_duration + 1))
# repeat failed auth the max times
self._fail_auth_repeatedly(self.user['id'])
# test user account is locked
self.assertRaises(exception.AccountLocked,
self.identity_api.authenticate,
self.make_request(),
user_id=self.user['id'],
password=uuid.uuid4().hex)
def _fail_auth_repeatedly(self, user_id):
wrong_password = uuid.uuid4().hex
for _ in range(CONF.security_compliance.lockout_failure_attempts):
self.assertRaises(AssertionError,
self.identity_api.authenticate,
self.make_request(),
user_id=user_id,
password=wrong_password)
class PasswordExpiresValidationTests(test_backend_sql.SqlTests):
def setUp(self):
super(PasswordExpiresValidationTests, self).setUp()
self.password = uuid.uuid4().hex
self.user_dict = self._get_test_user_dict(self.password)
self.config_fixture.config(
group='security_compliance',
password_expires_days=90)
def test_authenticate_with_expired_password(self):
# set password created_at so that the password will expire
password_created_at = (
datetime.datetime.utcnow() -
datetime.timedelta(
days=CONF.security_compliance.password_expires_days + 1)
)
user = self._create_user(self.user_dict, password_created_at)
# test password is expired
self.assertRaises(exception.PasswordExpired,
self.identity_api.authenticate,
self.make_request(),
user_id=user['id'],
password=self.password)
def test_authenticate_with_expired_password_v2(self):
# set password created_at so that the password will expire
password_created_at = (
datetime.datetime.utcnow() -
datetime.timedelta(
days=CONF.security_compliance.password_expires_days + 1)
)
user = self._create_user(self.user_dict, password_created_at)
# test password_expires_at is not returned for v2
user = controller.V2Controller.v3_to_v2_user(user)
self.assertNotIn('password_expires_at', user)
# test password is expired
self.assertRaises(exception.PasswordExpired,
self.identity_api.authenticate,
self.make_request(),
user_id=user['id'],
password=self.password)
def test_authenticate_with_non_expired_password(self):
# set password created_at so that the password will not expire
password_created_at = (
datetime.datetime.utcnow() -
datetime.timedelta(
days=CONF.security_compliance.password_expires_days - 1)
)
user = self._create_user(self.user_dict, password_created_at)
# test password is not expired
self.identity_api.authenticate(self.make_request(),
user_id=user['id'],
password=self.password)
def test_authenticate_with_expired_password_for_ignore_user_option(self):
# set user to have the 'ignore_password_expiry' option set to False
self.user_dict.setdefault('options', {})[
iro.IGNORE_PASSWORD_EXPIRY_OPT.option_name] = False
# set password created_at so that the password will expire
password_created_at = (
datetime.datetime.utcnow() -
datetime.timedelta(
days=CONF.security_compliance.password_expires_days + 1)
)
user = self._create_user(self.user_dict, password_created_at)
self.assertRaises(exception.PasswordExpired,
self.identity_api.authenticate,
self.make_request(),
user_id=user['id'],
password=self.password)
# update user to explicitly have the expiry option to True
user['options'][
iro.IGNORE_PASSWORD_EXPIRY_OPT.option_name] = True
user = self.identity_api.update_user(user['id'],
user)
# test password is not expired due to ignore option
self.identity_api.authenticate(self.make_request(),
user_id=user['id'],
password=self.password)
def _get_test_user_dict(self, password):
test_user_dict = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'enabled': True,
'password': password
}
return test_user_dict
def _create_user(self, user_dict, password_created_at):
# Bypass business logic and go straight for the identity driver
# (SQL in this case)
driver = self.identity_api.driver
driver.create_user(user_dict['id'], user_dict)
with sql.session_for_write() as session:
user_ref = session.query(model.User).get(user_dict['id'])
user_ref.password_ref.created_at = password_created_at
user_ref.password_ref.expires_at = (
user_ref._get_password_expires_at(password_created_at))
return base.filter_user(user_ref.to_dict())
class MinimumPasswordAgeTests(test_backend_sql.SqlTests):
def setUp(self):
super(MinimumPasswordAgeTests, self).setUp()
self.config_fixture.config(
group='security_compliance',
minimum_password_age=1)
self.initial_password = uuid.uuid4().hex
self.user = self._create_new_user(self.initial_password)
def test_user_cannot_change_password_before_min_age(self):
# user can change password after create
new_password = uuid.uuid4().hex
self.assertValidChangePassword(self.user['id'], self.initial_password,
new_password)
# user cannot change password before min age
self.assertRaises(exception.PasswordAgeValidationError,
self.identity_api.change_password,
self.make_request(),
user_id=self.user['id'],
original_password=new_password,
new_password=uuid.uuid4().hex)
def test_user_can_change_password_after_min_age(self):
# user can change password after create
new_password = uuid.uuid4().hex
self.assertValidChangePassword(self.user['id'], self.initial_password,
new_password)
# set password_created_at so that the min password age has past
password_created_at = (
datetime.datetime.utcnow() -
datetime.timedelta(
days=CONF.security_compliance.minimum_password_age + 1))
self._update_password_created_at(self.user['id'], password_created_at)
# user can change their password after min password age has past
self.assertValidChangePassword(self.user['id'], new_password,
uuid.uuid4().hex)
def test_user_can_change_password_after_admin_reset(self):
# user can change password after create
new_password = uuid.uuid4().hex
self.assertValidChangePassword(self.user['id'], self.initial_password,
new_password)
# user cannot change password before min age
self.assertRaises(exception.PasswordAgeValidationError,
self.identity_api.change_password,
self.make_request(),
user_id=self.user['id'],
original_password=new_password,
new_password=uuid.uuid4().hex)
# admin reset
new_password = uuid.uuid4().hex
self.user['password'] = new_password
self.identity_api.update_user(self.user['id'], self.user)
# user can change password after admin reset
self.assertValidChangePassword(self.user['id'], new_password,
uuid.uuid4().hex)
def assertValidChangePassword(self, user_id, password, new_password):
self.identity_api.change_password(self.make_request(),
user_id=user_id,
original_password=password,
new_password=new_password)
self.identity_api.authenticate(self.make_request(),
user_id=user_id,
password=new_password)
def _create_new_user(self, password):
user = {
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'enabled': True,
'password': password
}
return self.identity_api.create_user(user)
def _update_password_created_at(self, user_id, password_create_at):
# User instance has an attribute password_ref. This attribute is used
# in authentication. It always points to the last created password. The
# order of passwords is determined by `created_at` field.
# By changing `created_at`, this method interferes with password_ref
# behaviour, making it return not last value. That's why all passwords
# except the latest, need to have `created_at` slightly less than
# the latest password.
with sql.session_for_write() as session:
user_ref = session.query(model.User).get(user_id)
latest_password = user_ref.password_ref
slightly_less = datetime.timedelta(minutes=1)
for password_ref in user_ref.local_user.passwords:
password_ref.created_at = password_create_at - slightly_less
latest_password.created_at = password_create_at
class ChangePasswordRequiredAfterFirstUse(test_backend_sql.SqlTests):
def _create_user(self, password, change_password_upon_first_use):
self.config_fixture.config(
group='security_compliance',
change_password_upon_first_use=change_password_upon_first_use)
user_dict = {
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'enabled': True,
'password': password
}
return self.identity_api.create_user(user_dict)
def assertPasswordIsExpired(self, user_id, password):
self.assertRaises(exception.PasswordExpired,
self.identity_api.authenticate,
self.make_request(),
user_id=user_id,
password=password)
def assertPasswordIsNotExpired(self, user_id, password):
self.identity_api.authenticate(self.make_request(),
user_id=user_id,
password=password)
def test_password_expired_after_create(self):
# create user, password expired
initial_password = uuid.uuid4().hex
user = self._create_user(initial_password, True)
self.assertPasswordIsExpired(user['id'], initial_password)
# change password (self-service), password not expired
new_password = uuid.uuid4().hex
self.identity_api.change_password(self.make_request(),
user['id'],
initial_password,
new_password)
self.assertPasswordIsNotExpired(user['id'], new_password)
def test_password_expired_after_reset(self):
# create user with feature disabled, password not expired
initial_password = uuid.uuid4().hex
user = self._create_user(initial_password, False)
self.assertPasswordIsNotExpired(user['id'], initial_password)
# enable change_password_upon_first_use
self.config_fixture.config(
group='security_compliance',
change_password_upon_first_use=True)
# admin reset, password expired
admin_password = uuid.uuid4().hex
user['password'] = admin_password
self.identity_api.update_user(user['id'], user)
self.assertPasswordIsExpired(user['id'], admin_password)
# change password (self-service), password not expired
new_password = uuid.uuid4().hex
self.identity_api.change_password(self.make_request(),
user['id'],
admin_password,
new_password)
self.assertPasswordIsNotExpired(user['id'], new_password)
def test_password_not_expired_when_feature_disabled(self):
# create user with feature disabled
initial_password = uuid.uuid4().hex
user = self._create_user(initial_password, False)
self.assertPasswordIsNotExpired(user['id'], initial_password)
# admin reset
admin_password = uuid.uuid4().hex
user['password'] = admin_password
self.identity_api.update_user(user['id'], user)
self.assertPasswordIsNotExpired(user['id'], admin_password)
def test_password_not_expired_for_ignore_user(self):
# create user with feature disabled, password not expired
initial_password = uuid.uuid4().hex
user = self._create_user(initial_password, False)
self.assertPasswordIsNotExpired(user['id'], initial_password)
# enable change_password_upon_first_use
self.config_fixture.config(
group='security_compliance',
change_password_upon_first_use=True)
# ignore user and reset password, password not expired
user['options'][iro.IGNORE_CHANGE_PASSWORD_OPT.option_name] = True
admin_password = uuid.uuid4().hex
user['password'] = admin_password
self.identity_api.update_user(user['id'], user)
self.assertPasswordIsNotExpired(user['id'], admin_password)
# set ignore user to false and reset password, password is expired
user['options'][iro.IGNORE_CHANGE_PASSWORD_OPT.option_name] = False
admin_password = uuid.uuid4().hex
user['password'] = admin_password
self.identity_api.update_user(user['id'], user)
self.assertPasswordIsExpired(user['id'], admin_password)
| {
"content_hash": "38021a553cce16028bdc879fcc54a0b3",
"timestamp": "",
"source": "github",
"line_count": 903,
"max_line_length": 79,
"avg_line_length": 46.863787375415285,
"alnum_prop": 0.5812656552767144,
"repo_name": "ilay09/keystone",
"id": "da0a9d593fedd67446a5daa7edd59fbe902f8d3a",
"size": "42864",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/tests/unit/identity/test_backend_sql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "5000747"
},
{
"name": "Shell",
"bytes": "7522"
}
],
"symlink_target": ""
} |
import collections
import copy
import os
import ddt
import mock
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder import utils
from cinder.volume.drivers import remotefs
@ddt.ddt
class RemoteFsSnapDriverTestCase(test.TestCase):
_FAKE_MNT_POINT = '/mnt/fake_hash'
def setUp(self):
super(RemoteFsSnapDriverTestCase, self).setUp()
self._driver = remotefs.RemoteFSSnapDriver()
self._driver._remotefsclient = mock.Mock()
self._driver._execute = mock.Mock()
self._driver._delete = mock.Mock()
self.context = context.get_admin_context()
self._fake_volume = fake_volume.fake_volume_obj(
self.context, provider_location='fake_share')
self._fake_volume_path = os.path.join(self._FAKE_MNT_POINT,
self._fake_volume.name)
self._fake_snapshot = fake_snapshot.fake_snapshot_obj(self.context)
self._fake_snapshot_path = (self._fake_volume_path + '.' +
self._fake_snapshot.id)
self._fake_snapshot.volume = self._fake_volume
def _test_delete_snapshot(self, volume_in_use=False,
stale_snapshot=False,
is_active_image=True):
# If the snapshot is not the active image, it is guaranteed that
# another snapshot exists having it as backing file.
fake_snapshot_name = os.path.basename(self._fake_snapshot_path)
fake_info = {'active': fake_snapshot_name,
self._fake_snapshot.id: fake_snapshot_name}
fake_snap_img_info = mock.Mock()
fake_base_img_info = mock.Mock()
if stale_snapshot:
fake_snap_img_info.backing_file = None
else:
fake_snap_img_info.backing_file = self._fake_volume.name
fake_snap_img_info.file_format = 'qcow2'
fake_base_img_info.backing_file = None
fake_base_img_info.file_format = 'raw'
self._driver._local_path_volume_info = mock.Mock(
return_value=mock.sentinel.fake_info_path)
self._driver._qemu_img_info = mock.Mock(
side_effect=[fake_snap_img_info, fake_base_img_info])
self._driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._driver._read_info_file = mock.Mock()
self._driver._write_info_file = mock.Mock()
self._driver._img_commit = mock.Mock()
self._driver._rebase_img = mock.Mock()
self._driver._ensure_share_writable = mock.Mock()
self._driver._delete_stale_snapshot = mock.Mock()
self._driver._delete_snapshot_online = mock.Mock()
expected_info = {
'active': fake_snapshot_name,
self._fake_snapshot.id: fake_snapshot_name
}
if volume_in_use:
self._fake_snapshot.volume.status = 'in-use'
self._driver._read_info_file.return_value = fake_info
self._driver._delete_snapshot(self._fake_snapshot)
if stale_snapshot:
self._driver._delete_stale_snapshot.assert_called_once_with(
self._fake_snapshot)
else:
expected_online_delete_info = {
'active_file': fake_snapshot_name,
'snapshot_file': fake_snapshot_name,
'base_file': self._fake_volume.name,
'base_id': None,
'new_base_file': None
}
self._driver._delete_snapshot_online.assert_called_once_with(
self.context, self._fake_snapshot,
expected_online_delete_info)
elif is_active_image:
self._driver._read_info_file.return_value = fake_info
self._driver._delete_snapshot(self._fake_snapshot)
self._driver._img_commit.assert_called_once_with(
self._fake_snapshot_path)
self.assertNotIn(self._fake_snapshot.id, fake_info)
self._driver._write_info_file.assert_called_once_with(
mock.sentinel.fake_info_path, fake_info)
else:
fake_upper_snap_id = 'fake_upper_snap_id'
fake_upper_snap_path = (
self._fake_volume_path + '-snapshot' + fake_upper_snap_id)
fake_upper_snap_name = os.path.basename(fake_upper_snap_path)
fake_backing_chain = [
{'filename': fake_upper_snap_name,
'backing-filename': fake_snapshot_name},
{'filename': fake_snapshot_name,
'backing-filename': self._fake_volume.name},
{'filename': self._fake_volume.name,
'backing-filename': None}]
fake_info[fake_upper_snap_id] = fake_upper_snap_name
fake_info[self._fake_snapshot.id] = fake_snapshot_name
fake_info['active'] = fake_upper_snap_name
expected_info = copy.deepcopy(fake_info)
del expected_info[self._fake_snapshot.id]
self._driver._read_info_file.return_value = fake_info
self._driver._get_backing_chain_for_path = mock.Mock(
return_value=fake_backing_chain)
self._driver._delete_snapshot(self._fake_snapshot)
self._driver._img_commit.assert_called_once_with(
self._fake_snapshot_path)
self._driver._rebase_img.assert_called_once_with(
fake_upper_snap_path, self._fake_volume.name,
fake_base_img_info.file_format)
self._driver._write_info_file.assert_called_once_with(
mock.sentinel.fake_info_path, expected_info)
def test_delete_snapshot_when_active_file(self):
self._test_delete_snapshot()
def test_delete_snapshot_in_use(self):
self._test_delete_snapshot(volume_in_use=True)
def test_delete_snapshot_in_use_stale_snapshot(self):
self._test_delete_snapshot(volume_in_use=True,
stale_snapshot=True)
def test_delete_snapshot_with_one_upper_file(self):
self._test_delete_snapshot(is_active_image=False)
def test_delete_stale_snapshot(self):
fake_snapshot_name = os.path.basename(self._fake_snapshot_path)
fake_snap_info = {
'active': self._fake_volume.name,
self._fake_snapshot.id: fake_snapshot_name
}
expected_info = {'active': self._fake_volume.name}
self._driver._local_path_volume_info = mock.Mock(
return_value=mock.sentinel.fake_info_path)
self._driver._read_info_file = mock.Mock(
return_value=fake_snap_info)
self._driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._driver._write_info_file = mock.Mock()
self._driver._delete_stale_snapshot(self._fake_snapshot)
self._driver._delete.assert_called_once_with(self._fake_snapshot_path)
self._driver._write_info_file.assert_called_once_with(
mock.sentinel.fake_info_path, expected_info)
def test_do_create_snapshot(self):
self._driver._local_volume_dir = mock.Mock(
return_value=self._fake_volume_path)
fake_backing_path = os.path.join(
self._driver._local_volume_dir(),
self._fake_volume.name)
self._driver._execute = mock.Mock()
self._driver._set_rw_permissions = mock.Mock()
self._driver._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format=mock.sentinel.backing_fmt))
self._driver._do_create_snapshot(self._fake_snapshot,
self._fake_volume.name,
self._fake_snapshot_path)
command1 = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s,backing_fmt=%s' %
(fake_backing_path,
mock.sentinel.backing_fmt),
self._fake_snapshot_path,
"%dG" % self._fake_volume.size]
command2 = ['qemu-img', 'rebase', '-u',
'-b', self._fake_volume.name,
'-F', mock.sentinel.backing_fmt,
self._fake_snapshot_path]
self._driver._execute.assert_any_call(*command1, run_as_root=True)
self._driver._execute.assert_any_call(*command2, run_as_root=True)
def _test_create_snapshot(self, volume_in_use=False):
fake_snapshot_info = {}
fake_snapshot_file_name = os.path.basename(self._fake_snapshot_path)
self._driver._local_path_volume_info = mock.Mock(
return_value=mock.sentinel.fake_info_path)
self._driver._read_info_file = mock.Mock(
return_value=fake_snapshot_info)
self._driver._do_create_snapshot = mock.Mock()
self._driver._create_snapshot_online = mock.Mock()
self._driver._write_info_file = mock.Mock()
self._driver.get_active_image_from_info = mock.Mock(
return_value=self._fake_volume.name)
self._driver._get_new_snap_path = mock.Mock(
return_value=self._fake_snapshot_path)
expected_snapshot_info = {
'active': fake_snapshot_file_name,
self._fake_snapshot.id: fake_snapshot_file_name
}
if volume_in_use:
self._fake_snapshot.volume.status = 'in-use'
expected_method_called = '_create_snapshot_online'
else:
self._fake_snapshot.volume.status = 'available'
expected_method_called = '_do_create_snapshot'
self._driver._create_snapshot(self._fake_snapshot)
fake_method = getattr(self._driver, expected_method_called)
fake_method.assert_called_with(
self._fake_snapshot, self._fake_volume.name,
self._fake_snapshot_path)
self._driver._write_info_file.assert_called_with(
mock.sentinel.fake_info_path,
expected_snapshot_info)
def test_create_snapshot_volume_available(self):
self._test_create_snapshot()
def test_create_snapshot_volume_in_use(self):
self._test_create_snapshot(volume_in_use=True)
def test_create_snapshot_invalid_volume(self):
self._fake_snapshot.volume.status = 'error'
self.assertRaises(exception.InvalidVolume,
self._driver._create_snapshot,
self._fake_snapshot)
@mock.patch('cinder.db.snapshot_get')
@mock.patch('time.sleep')
def test_create_snapshot_online_with_concurrent_delete(
self, mock_sleep, mock_snapshot_get):
self._driver._nova = mock.Mock()
# Test what happens when progress is so slow that someone
# decides to delete the snapshot while the last known status is
# "creating".
mock_snapshot_get.side_effect = [
{'status': 'creating', 'progress': '42%'},
{'status': 'creating', 'progress': '45%'},
{'status': 'deleting'},
]
with mock.patch.object(self._driver, '_do_create_snapshot') as \
mock_do_create_snapshot:
self.assertRaises(exception.RemoteFSConcurrentRequest,
self._driver._create_snapshot_online,
self._fake_snapshot,
self._fake_volume.name,
self._fake_snapshot_path)
mock_do_create_snapshot.assert_called_once_with(
self._fake_snapshot, self._fake_volume.name,
self._fake_snapshot_path)
self.assertEqual([mock.call(1), mock.call(1)],
mock_sleep.call_args_list)
self.assertEqual(3, mock_snapshot_get.call_count)
mock_snapshot_get.assert_called_with(self._fake_snapshot._context,
self._fake_snapshot.id)
@mock.patch.object(utils, 'synchronized')
def _locked_volume_operation_test_helper(self, mock_synchronized, func,
expected_exception=False,
*args, **kwargs):
def mock_decorator(*args, **kwargs):
def mock_inner(f):
return f
return mock_inner
mock_synchronized.side_effect = mock_decorator
expected_lock = '%s-%s' % (self._driver.driver_prefix,
self._fake_volume.id)
if expected_exception:
self.assertRaises(expected_exception, func,
self._driver,
*args, **kwargs)
else:
ret_val = func(self._driver, *args, **kwargs)
mock_synchronized.assert_called_with(expected_lock,
external=False)
self.assertEqual(mock.sentinel.ret_val, ret_val)
def test_locked_volume_id_operation(self):
mock_volume = mock.Mock()
mock_volume.id = self._fake_volume.id
@remotefs.locked_volume_id_operation
def synchronized_func(inst, volume):
return mock.sentinel.ret_val
self._locked_volume_operation_test_helper(func=synchronized_func,
volume=mock_volume)
def test_locked_volume_id_snapshot_operation(self):
mock_snapshot = mock.Mock()
mock_snapshot.volume.id = self._fake_volume.id
@remotefs.locked_volume_id_operation
def synchronized_func(inst, snapshot):
return mock.sentinel.ret_val
self._locked_volume_operation_test_helper(func=synchronized_func,
snapshot=mock_snapshot)
def test_locked_volume_id_operation_exception(self):
@remotefs.locked_volume_id_operation
def synchronized_func(inst):
return mock.sentinel.ret_val
self._locked_volume_operation_test_helper(
func=synchronized_func,
expected_exception=exception.VolumeBackendAPIException)
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.basename')
def _test_qemu_img_info(self, mock_basename,
mock_qemu_img_info, backing_file, basedir,
valid_backing_file=True):
fake_vol_name = 'fake_vol_name'
mock_info = mock_qemu_img_info.return_value
mock_info.image = mock.sentinel.image_path
mock_info.backing_file = backing_file
self._driver._VALID_IMAGE_EXTENSIONS = ['vhd', 'vhdx', 'raw', 'qcow2']
mock_basename.side_effect = [mock.sentinel.image_basename,
mock.sentinel.backing_file_basename]
if valid_backing_file:
img_info = self._driver._qemu_img_info_base(
mock.sentinel.image_path, fake_vol_name, basedir)
self.assertEqual(mock_info, img_info)
self.assertEqual(mock.sentinel.image_basename,
mock_info.image)
expected_basename_calls = [mock.call(mock.sentinel.image_path)]
if backing_file:
self.assertEqual(mock.sentinel.backing_file_basename,
mock_info.backing_file)
expected_basename_calls.append(mock.call(backing_file))
mock_basename.assert_has_calls(expected_basename_calls)
else:
self.assertRaises(exception.RemoteFSException,
self._driver._qemu_img_info_base,
mock.sentinel.image_path,
fake_vol_name, basedir)
mock_qemu_img_info.assert_called_with(mock.sentinel.image_path)
@ddt.data([None, '/fake_basedir'],
['/fake_basedir/cb2016/fake_vol_name', '/fake_basedir'],
['/fake_basedir/cb2016/fake_vol_name.vhd', '/fake_basedir'],
['/fake_basedir/cb2016/fake_vol_name.404f-404',
'/fake_basedir'],
['/fake_basedir/cb2016/fake_vol_name.tmp-snap-404f-404',
'/fake_basedir'])
@ddt.unpack
def test_qemu_img_info_valid_backing_file(self, backing_file, basedir):
self._test_qemu_img_info(backing_file=backing_file,
basedir=basedir)
@ddt.data(['/other_random_path', '/fake_basedir'],
['/other_basedir/cb2016/fake_vol_name', '/fake_basedir'],
['/fake_basedir/invalid_hash/fake_vol_name', '/fake_basedir'],
['/fake_basedir/cb2016/invalid_vol_name', '/fake_basedir'],
['/fake_basedir/cb2016/fake_vol_name.info', '/fake_basedir'],
['/fake_basedir/cb2016/fake_vol_name-random-suffix',
'/fake_basedir'],
['/fake_basedir/cb2016/fake_vol_name.invalidext',
'/fake_basedir'])
@ddt.unpack
def test_qemu_img_info_invalid_backing_file(self, backing_file, basedir):
self._test_qemu_img_info(backing_file=backing_file,
basedir=basedir,
valid_backing_file=False)
def test_create_cloned_volume(self):
drv = self._driver
with mock.patch.object(drv, '_create_snapshot') as \
mock_create_snapshot,\
mock.patch.object(drv, '_delete_snapshot') as \
mock_delete_snapshot,\
mock.patch.object(drv, '_copy_volume_from_snapshot') as \
mock_copy_volume_from_snapshot:
volume = fake_volume.fake_volume_obj(self.context)
src_vref_id = '375e32b2-804a-49f2-b282-85d1d5a5b9e1'
src_vref = fake_volume.fake_volume_obj(
self.context,
id=src_vref_id,
name='volume-%s' % src_vref_id)
vol_attrs = ['provider_location', 'size', 'id', 'name', 'status',
'volume_type', 'metadata']
Volume = collections.namedtuple('Volume', vol_attrs)
snap_attrs = ['volume_name', 'volume_size', 'name',
'volume_id', 'id', 'volume']
Snapshot = collections.namedtuple('Snapshot', snap_attrs)
volume_ref = Volume(id=volume.id,
name=volume.name,
status=volume.status,
provider_location=volume.provider_location,
size=volume.size,
volume_type=volume.volume_type,
metadata=volume.metadata)
snap_ref = Snapshot(volume_name=volume.name,
name='clone-snap-%s' % src_vref.id,
volume_size=src_vref.size,
volume_id=src_vref.id,
id='tmp-snap-%s' % src_vref.id,
volume=src_vref)
drv.create_cloned_volume(volume, src_vref)
mock_create_snapshot.assert_called_once_with(snap_ref)
mock_copy_volume_from_snapshot.assert_called_once_with(
snap_ref, volume_ref, volume['size'])
self.assertTrue(mock_delete_snapshot.called)
| {
"content_hash": "c263f9d534129a75ca4ff85cd9d55e29",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 78,
"avg_line_length": 42.94273127753304,
"alnum_prop": 0.5647825194911776,
"repo_name": "cloudbase/cinder",
"id": "03c403640d063cc42a5a747a89599af3f5026c02",
"size": "20113",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/drivers/test_remotefs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17586629"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
} |
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from ..data_structures.sarray_builder import SArrayBuilder
import unittest
import array
import datetime as dt
from ..util.timezone import GMT
class SArrayBuilderTest(unittest.TestCase):
def __test_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype(), _type)
self.assertEqual(len(_sarray), len(_data))
self.assertSequenceEqual(list(_sarray.head(_sarray.size())), _data)
def __test_append(self, sb, data, dtype):
for i in data:
sb.append(i)
self.assertEquals(sb.get_type(), dtype)
sa = sb.close()
self.__test_equal(sa, data, dtype)
def __test_append_multiple(self, sb, data, dtype):
sb.append_multiple(data)
self.assertEquals(sb.get_type(), dtype)
sa = sb.close()
self.__test_equal(sa, data, dtype)
def test_basic(self):
data_to_test = [([1,-1,None,2],int),
([i for i in range(20000)], int),
([None, 1.0, -1.0, 2.3],float),
(["hi", None, "hello", "None"],str),
([dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0)),None],dt.datetime),
([["hi",1],None,["hi",2,3],["hello"]],list),
([array.array('d',[1.0,2.0]),array.array('d',[3.0,4.0]),None],array.array),
([{'a':1,'b':2},{'c':3,'d':4},None],dict),
]
for i in data_to_test:
sb = SArrayBuilder(i[1])
self.__test_append(sb, i[0], i[1])
sb = SArrayBuilder(i[1])
self.__test_append_multiple(sb, i[0], i[1])
def test_history(self):
sb = SArrayBuilder(int, history_size=10)
sb.append_multiple((i for i in range(8)))
hist = sb.read_history(3)
self.assertEquals(hist,[5,6,7])
hist = sb.read_history(20)
self.assertEquals(hist, [i for i in range(8)])
hist = sb.read_history()
self.assertEquals(hist, [i for i in range(8)])
sb.append_multiple((i for i in range(5)))
hist = sb.read_history(10)
self.assertEquals(hist, [3,4,5,6,7,0,1,2,3,4])
sb.append(50)
hist = sb.read_history(10)
self.assertEquals(hist, [4,5,6,7,0,1,2,3,4,50])
hist = sb.read_history(-1)
self.assertEquals(hist, [])
hist = sb.read_history(0)
self.assertEquals(hist, [])
sa = sb.close()
self.__test_equal(sa,[i for i in range(8)] + [i for i in range(5)] + [50],int)
def test_segments(self):
sb = SArrayBuilder(int, num_segments=4)
sb.append_multiple((i for i in range(20,30)), segment=2)
sb.append_multiple((i for i in range(10,20)), segment=1)
sb.append_multiple((i for i in range(30,40)), segment=3)
sb.append_multiple((i for i in range(0,10)), segment=0)
hist = sb.read_history(3, segment=0)
self.assertSequenceEqual(hist, [7,8,9])
hist = sb.read_history(3, segment=1)
self.assertSequenceEqual(hist, [17,18,19])
hist = sb.read_history(3, segment=2)
self.assertSequenceEqual(hist, [27,28,29])
hist = sb.read_history(3, segment=3)
self.assertSequenceEqual(hist, [37,38,39])
with self.assertRaises(RuntimeError):
sb.read_history(3, segment=99)
sa = sb.close()
self.__test_equal(sa, range(40), int)
| {
"content_hash": "6798955527836b92e88a7b7fe357fb0e",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 108,
"avg_line_length": 36.59,
"alnum_prop": 0.552063405301995,
"repo_name": "TobyRoseman/SFrame",
"id": "0a3309cbe3dbd69a58bf7a4cd954353a1060b12e",
"size": "3659",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "oss_src/unity/python/sframe/test/test_sarray_builder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "142942"
},
{
"name": "C++",
"bytes": "11674775"
},
{
"name": "CMake",
"bytes": "104941"
},
{
"name": "CSS",
"bytes": "127000"
},
{
"name": "HTML",
"bytes": "24407"
},
{
"name": "JavaScript",
"bytes": "20909"
},
{
"name": "Makefile",
"bytes": "9614"
},
{
"name": "Perl",
"bytes": "9663"
},
{
"name": "Python",
"bytes": "2225333"
},
{
"name": "R",
"bytes": "537"
},
{
"name": "Scala",
"bytes": "5232"
},
{
"name": "Shell",
"bytes": "53145"
},
{
"name": "Smarty",
"bytes": "966"
},
{
"name": "XSLT",
"bytes": "74068"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2017 by Benjamin Manns
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Benjamin Manns
This code shows you, how to use the hydroligcal signatures. They can also be implemented in the def objective function.
"""
from pprint import pprint
import spotpy.signatures as sig
from spotpy.examples.spot_setup_hymod import spot_setup
print("INFO: For this example you need the folder >hymod< in the examples folder")
spot_setup = spot_setup()
parameterset = spot_setup.parameters()["random"]
simulation = spot_setup.simulation(parameterset)
observation = spot_setup.evaluation()
# Beispiele zum einfachen Copy & Paste
print(sig.getMeanFlow(simulation, observation, mode="get_signature"))
print(sig.getMeanFlow(simulation, observation, mode="get_raw_data"))
print(sig.getMeanFlow(simulation, observation, mode="calc_Dev"))
print(sig.getMedianFlow(simulation, observation, mode="get_signature"))
print(sig.getMedianFlow(simulation, observation, mode="get_raw_data"))
print(sig.getMedianFlow(simulation, observation, mode="calc_Dev"))
print(sig.getSkewness(simulation, observation, mode="get_signature"))
print(sig.getSkewness(simulation, observation, mode="get_raw_data"))
print(sig.getSkewness(simulation, observation, mode="calc_Dev"))
print(sig.getCoeffVariation(simulation, observation, mode="get_signature"))
print(sig.getCoeffVariation(simulation, observation, mode="get_raw_data"))
print(sig.getCoeffVariation(simulation, observation, mode="calc_Dev"))
print(sig.getQ001(simulation, observation, mode="get_signature"))
print(sig.getQ001(simulation, observation, mode="get_raw_data"))
print(sig.getQ001(simulation, observation, mode="calc_Dev"))
print(sig.getQ01(simulation, observation, mode="get_signature"))
print(sig.getQ01(simulation, observation, mode="get_raw_data"))
print(sig.getQ01(simulation, observation, mode="calc_Dev"))
print(sig.getQ1(simulation, observation, mode="get_signature"))
print(sig.getQ1(simulation, observation, mode="get_raw_data"))
print(sig.getQ1(simulation, observation, mode="calc_Dev"))
print(sig.getQ5(simulation, observation, mode="get_signature"))
print(sig.getQ5(simulation, observation, mode="get_raw_data"))
print(sig.getQ5(simulation, observation, mode="calc_Dev"))
print(sig.getQ10(simulation, observation, mode="get_signature"))
print(sig.getQ10(simulation, observation, mode="get_raw_data"))
print(sig.getQ10(simulation, observation, mode="calc_Dev"))
print(sig.getQ20(simulation, observation, mode="get_signature"))
print(sig.getQ20(simulation, observation, mode="get_raw_data"))
print(sig.getQ20(simulation, observation, mode="calc_Dev"))
print(sig.getQ85(simulation, observation, mode="get_signature"))
print(sig.getQ85(simulation, observation, mode="get_raw_data"))
print(sig.getQ85(simulation, observation, mode="calc_Dev"))
print(sig.getQ95(simulation, observation, mode="get_signature"))
print(sig.getQ95(simulation, observation, mode="get_raw_data"))
print(sig.getQ95(simulation, observation, mode="calc_Dev"))
print(sig.getQ99(simulation, observation, mode="get_signature"))
print(sig.getQ99(simulation, observation, mode="get_raw_data"))
print(sig.getQ99(simulation, observation, mode="calc_Dev"))
print(sig.getSlopeFDC(simulation, observation, mode="get_signature"))
print(sig.getSlopeFDC(simulation, observation, mode="get_raw_data"))
print(sig.getSlopeFDC(simulation, observation, mode="calc_Dev"))
try:
import matplotlib.pyplot as plt
import pandas as pd
timespanlen = simulation.__len__()
ddd = pd.date_range("2015-01-01 11:00", freq="5min", periods=timespanlen)
dd_daily = pd.date_range("2015-05-01", periods=timespanlen)
print(
sig.getAverageFloodOverflowPerSection(
simulation,
observation,
mode="get_signature",
datetime_series=dd_daily,
threshold_value=1,
)
)
print(
sig.getAverageFloodOverflowPerSection(
simulation,
observation,
mode="get_raw_data",
datetime_series=dd_daily,
threshold_value=1,
)
)
print(
sig.getAverageFloodOverflowPerSection(
simulation,
observation,
mode="calc_Dev",
datetime_series=dd_daily,
threshold_value=1,
)
)
print(
sig.getAverageFloodFrequencyPerSection(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=1,
mode="get_signature",
)
)
print(
sig.getAverageFloodFrequencyPerSection(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=1,
mode="get_raw_data",
)
)
# If you want to plot the raw data, simple do:
vals = sig.getAverageFloodFrequencyPerSection(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=1,
mode="get_raw_data",
)
vals.plot()
plt.show()
print(
sig.getAverageFloodFrequencyPerSection(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=1,
mode="calc_Dev",
)
)
print(
sig.getAverageFloodDuration(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=3,
mode="get_signature",
)
)
print(
sig.getAverageFloodDuration(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=3,
mode="get_raw_data",
)
)
print(
sig.getAverageFloodDuration(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=3,
mode="calc_Dev",
)
)
print(
sig.getAverageBaseflowUnderflowPerSection(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=4,
mode="get_signature",
)
)
print(
sig.getAverageBaseflowUnderflowPerSection(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=4,
mode="get_raw_data",
)
)
print(
sig.getAverageBaseflowUnderflowPerSection(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=4,
mode="calc_Dev",
)
)
print(
sig.getAverageBaseflowFrequencyPerSection(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=3,
mode="get_signature",
)
)
print(
sig.getAverageBaseflowFrequencyPerSection(
simulation,
observation,
datetime_series=ddd,
threshold_value=5,
mode="get_raw_data",
)
)
print(
sig.getAverageBaseflowFrequencyPerSection(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=3,
mode="calc_Dev",
)
)
print(
sig.getAverageBaseflowDuration(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=3,
mode="get_signature",
)
)
print(
sig.getAverageBaseflowDuration(
simulation,
observation,
datetime_series=ddd,
threshold_value=5,
mode="get_raw_data",
)
)
print(
sig.getAverageBaseflowDuration(
simulation,
observation,
datetime_series=dd_daily,
threshold_value=3,
mode="calc_Dev",
)
)
print(
sig.getFloodFrequency(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
threshold_value=3,
mode="get_signature",
)
)
print(
sig.getFloodFrequency(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
threshold_value=3,
mode="get_raw_data",
)
)
print(
sig.getFloodFrequency(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
threshold_value=3,
mode="calc_Dev",
)
)
print(
sig.getBaseflowFrequency(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
threshold_value=3,
mode="get_signature",
)
)
print(
sig.getBaseflowFrequency(
simulation,
observation,
datetime_series=pd.date_range(
"2015-05-01", freq="5min", periods=timespanlen
),
threshold_value=3,
mode="get_raw_data",
)
)
print(
sig.getBaseflowFrequency(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
threshold_value=3,
mode="calc_Dev",
)
)
print(
sig.getLowFlowVar(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
mode="get_signature",
)
)
print(
sig.getLowFlowVar(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
mode="get_raw_data",
)
)
print(
sig.getLowFlowVar(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
mode="calc_Dev",
)
)
print(
sig.getHighFlowVar(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
mode="get_signature",
)
)
print(
sig.getHighFlowVar(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
mode="get_raw_data",
)
)
print(
sig.getHighFlowVar(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
mode="calc_Dev",
)
)
print(
sig.getBaseflowIndex(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
mode="get_signature",
)
)
print(
sig.getBaseflowIndex(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
mode="get_raw_data",
)
)
print(
sig.getBaseflowIndex(
simulation,
observation,
datetime_series=pd.date_range("2015-05-01", periods=timespanlen),
mode="calc_Dev",
)
)
except ImportError:
print("Please install Pandas to use these signature functions")
| {
"content_hash": "596c5800ea85ad3b7ed20a1fd402f4c9",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 119,
"avg_line_length": 28.621890547263682,
"alnum_prop": 0.5885624891361029,
"repo_name": "thouska/spotpy",
"id": "9ff1614edf6615462c1b8e04af8b3a41dfb2c931",
"size": "11530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/tutorial_signatures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1032"
},
{
"name": "Cython",
"bytes": "4110"
},
{
"name": "Makefile",
"bytes": "433"
},
{
"name": "Python",
"bytes": "690905"
},
{
"name": "Shell",
"bytes": "304"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django import VERSION as DJANGO_VERSION
from django.middleware.csrf import CsrfViewMiddleware
from django.middleware.csrf import _sanitize_token as sanitize_csrf_token
if DJANGO_VERSION < (1, 10):
from django.middleware.csrf import _get_new_csrf_key as generate_csrf_token
else:
from django.middleware.csrf import _get_new_csrf_token as generate_csrf_token
from yepes.conf import settings
class CsrfTokenMiddleware(CsrfViewMiddleware):
"""
Middleware that ensures that all views have a correct ``csrf_token``
available to ``RequestContext``, but without the CSRF protection that
``CsrfViewMiddleware`` enforces.
Very useful when you need to render forms targeting a view with CSRF
protection.
"""
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
cookie_token = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
# Generate token and store it in the request, so it's
# available to the view.
request.META['CSRF_COOKIE'] = generate_csrf_token()
else:
csrf_token = sanitize_csrf_token(cookie_token)
if csrf_token != cookie_token:
# Cookie token needed to be replaced; the cookie
# needs to be reset.
request.csrf_cookie_needs_reset = True
# Use same token next time.
request.META['CSRF_COOKIE'] = csrf_token
return self._accept(request)
| {
"content_hash": "189506ec144d0208d6e22fdfd147ac9e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 81,
"avg_line_length": 36.2,
"alnum_prop": 0.6660527931246163,
"repo_name": "samuelmaudo/yepes",
"id": "f321cbbcda0d091766c7a6adcd0ca94ef6a32b4e",
"size": "1653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yepes/middleware/csrf_token.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1485"
},
{
"name": "CSS",
"bytes": "2805"
},
{
"name": "HTML",
"bytes": "18543"
},
{
"name": "JavaScript",
"bytes": "56039"
},
{
"name": "Python",
"bytes": "2415982"
}
],
"symlink_target": ""
} |
class User:
def __init__(self, name, email):
self.name = name
self.email = email
| {
"content_hash": "1607ea87d54bd0fdf5a2a1e7281191e1",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 36,
"avg_line_length": 25.25,
"alnum_prop": 0.5445544554455446,
"repo_name": "tiangolo/uwsgi-nginx-flask-docker",
"id": "5389428ce4f08eb33f2e85c38a9bad0bd6fc37f5",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example-flask-package-python3.8/app/app/models/user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7974"
},
{
"name": "HTML",
"bytes": "251"
},
{
"name": "Python",
"bytes": "26900"
},
{
"name": "Shell",
"bytes": "3933"
}
],
"symlink_target": ""
} |
import unittest
import utils
from testbin import TestBin
class TestBinOsprocci(TestBin, unittest.TestCase):
def setUp(self):
self.bin = 'osprocci'
def tearDown(self):
pass
| {
"content_hash": "a0556519501bc951ca812fb3c847496e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 50,
"avg_line_length": 18.09090909090909,
"alnum_prop": 0.6934673366834171,
"repo_name": "ow2-mirrors/compatibleone",
"id": "e02d0a8d859cfd8a101839a389ee693fa70047a3",
"size": "228",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "testsuite/basic/osprocci.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8939064"
},
{
"name": "C++",
"bytes": "12433"
},
{
"name": "Java",
"bytes": "158731"
},
{
"name": "Objective-C",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "751"
},
{
"name": "Python",
"bytes": "157987"
},
{
"name": "Shell",
"bytes": "67378"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.translation import pgettext_lazy
from ...account.models import User
from ...core.utils import get_paginator_items
from ..emails import (
send_promote_customer_to_staff_email, send_set_password_staff_email)
from ..views import staff_member_required
from .filters import StaffFilter
from .forms import StaffForm
from .utils import remove_staff_member
@staff_member_required
@permission_required('account.manage_staff')
def staff_list(request):
staff_members = User.objects.filter(is_staff=True).prefetch_related(
'default_billing_address').order_by('email')
staff_filter = StaffFilter(request.GET, queryset=staff_members)
staff_members = get_paginator_items(
staff_filter.qs, settings.DASHBOARD_PAGINATE_BY,
request.GET.get('page'))
ctx = {
'staff': staff_members, 'filter_set': staff_filter,
'is_empty': not staff_filter.queryset.exists()}
return TemplateResponse(request, 'dashboard/staff/list.html', ctx)
@staff_member_required
@permission_required('account.manage_staff')
def staff_details(request, pk):
queryset = User.objects.filter(is_staff=True)
staff_member = get_object_or_404(queryset, pk=pk)
form = StaffForm(
request.POST or None, instance=staff_member, user=request.user)
if form.is_valid():
form.save()
msg = pgettext_lazy(
'Dashboard message', 'Updated staff member %s') % (staff_member,)
messages.success(request, msg)
return redirect('dashboard:staff-list')
ctx = {'staff_member': staff_member, 'form': form}
return TemplateResponse(request, 'dashboard/staff/detail.html', ctx)
@staff_member_required
@permission_required('account.manage_staff')
def staff_create(request):
try:
staff = User.objects.get(email=request.POST.get('email'))
created = False
except User.DoesNotExist:
staff = User()
created = True
form = StaffForm(
request.POST or None,
instance=staff, user=request.user, initial={'is_staff': True})
if form.is_valid():
form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added staff member %s') % (staff,)
messages.success(request, msg)
if created:
send_set_password_staff_email.delay(staff.pk)
else:
send_promote_customer_to_staff_email.delay(staff.pk)
return redirect('dashboard:staff-list')
ctx = {'form': form}
return TemplateResponse(request, 'dashboard/staff/detail.html', ctx)
@staff_member_required
@permission_required('account.manage_staff')
def staff_delete(request, pk):
queryset = User.objects.prefetch_related('orders')
staff = get_object_or_404(queryset, pk=pk)
if request.method == 'POST':
remove_staff_member(staff)
msg = pgettext_lazy(
'Dashboard message', 'Removed staff member %s') % (staff,)
messages.success(request, msg)
return redirect('dashboard:staff-list')
ctx = {'staff': staff, 'orders': staff.orders.count()}
return TemplateResponse(
request, 'dashboard/staff/modal/confirm_delete.html', ctx)
| {
"content_hash": "233832e774796447f0d9743c7eeb9bd1",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 77,
"avg_line_length": 38.1123595505618,
"alnum_prop": 0.6860259433962265,
"repo_name": "UITools/saleor",
"id": "b6b156088893b3207c372a1ae008dd3bea905320",
"size": "3392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/dashboard/staff/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96006"
},
{
"name": "Dockerfile",
"bytes": "1859"
},
{
"name": "HTML",
"bytes": "556961"
},
{
"name": "JavaScript",
"bytes": "64679"
},
{
"name": "Python",
"bytes": "2316144"
},
{
"name": "Shell",
"bytes": "1265"
},
{
"name": "TypeScript",
"bytes": "2526265"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: khmer-project@idyll.org
#
# Tests for the ReadParser and Read classes.
import khmer
from khmer import ReadParser
from . import khmer_tst_utils as utils
from nose.plugins.attrib import attr
from functools import reduce
def test_read_properties():
# Note: Using a data file with only one read.
rparser = ReadParser(utils.get_test_data("single-read.fq"))
# Check the properties of all one reads in data set.
for read in rparser:
assert read.name == "895:1:1:1246:14654 1:N:0:NNNNN"
assert read.sequence == "CAGGCGCCCACCACCGTGCCCTCCAACCTGATGGT"
assert read.annotations == ""
assert read.quality == """][aaX__aa[`ZUZ[NONNFNNNNNO_____^RQ_"""
def test_with_default_arguments():
read_names = []
# Note: Using a data file where read names are just integers on [0,99).
rparser = ReadParser(utils.get_test_data("random-20-a.fa"))
for read in rparser:
read_names.append(int(read.name))
# "Derandomize".
read_names.sort()
# Each read number should match the corresponding name.
for m, n in enumerate(read_names):
assert m == n
def test_num_reads():
"""Test ReadParser.num_reads"""
reads_count = 0
rparser = ReadParser(utils.get_test_data("100-reads.fq.gz"))
for _ in rparser:
reads_count += 1
assert reads_count == 100
assert rparser.num_reads == 100
@attr('multithread')
def test_num_reads_threads():
"""Test threadsaftey of ReadParser's read counting"""
import threading
def count_reads(rparser):
for _ in rparser:
pass
n_threads = 4
threads = []
rparser = ReadParser(utils.get_test_data("100-reads.fq.gz"))
for _ in range(n_threads):
thr = threading.Thread(target=count_reads, args=[rparser, ])
threads.append(thr)
thr.start()
for thr in threads:
thr.join()
assert rparser.num_reads == 100
def test_num_reads_truncated():
n_reads = 0
rparser = ReadParser(utils.get_test_data("truncated.fq"))
try:
for read in rparser:
n_reads += 1
except ValueError as err:
assert "Sequence is empty" in str(err), str(err)
assert rparser.num_reads == 1, "%d valid reads in file, got %d" % (
n_reads, rparser.num_reads)
def test_gzip_decompression():
reads_count = 0
rparser = ReadParser(utils.get_test_data("100-reads.fq.gz"))
for read in rparser:
reads_count += 1
assert 100 == reads_count
def test_gzip_decompression_truncated():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.gz"))
try:
for read in rparser:
pass
assert 0, "this should fail"
except OSError as err:
print(str(err))
def test_gzip_decompression_truncated_pairiter():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.gz"))
try:
for read in rparser.iter_read_pairs():
pass
assert 0, "this should fail"
except OSError as err:
print(str(err))
except ValueError as err:
print(str(err))
def test_bzip2_decompression():
reads_count = 0
rparser = ReadParser(utils.get_test_data("100-reads.fq.bz2"))
for read in rparser:
reads_count += 1
assert 100 == reads_count
def test_bzip2_decompression_truncated():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.bz2"))
try:
for read in rparser:
pass
assert 0, "this should fail"
except OSError as err:
print(str(err))
except ValueError as err:
print(str(err))
def test_bzip2_decompression_truncated_pairiter():
rparser = ReadParser(utils.get_test_data("100-reads.fq.truncated.bz2"))
try:
for read in rparser.iter_read_pairs():
pass
assert 0, "this should fail"
except OSError as err:
print(str(err))
except ValueError as err:
print(str(err))
def test_badbzip2():
try:
rparser = ReadParser(utils.get_test_data("test-empty.fa.bz2"))
for read in rparser:
pass
assert 0, "this should fail"
except OSError as err:
print(str(err))
except ValueError as err:
print(str(err))
@attr('multithread')
def test_with_multiple_threads(testfile="test-reads.fq.bz2"):
import operator
import threading
reads_count_1thr = 0
rparser = ReadParser(utils.get_test_data(testfile))
for read in rparser:
reads_count_1thr += 1
def count_reads(rparser, counters, tnum):
counters[tnum] = reduce(operator.add, (1 for read in rparser))
N_THREADS = 4
threads = []
reads_counts_per_thread = [0] * N_THREADS
rparser = ReadParser(utils.get_test_data(testfile))
for tnum in range(N_THREADS):
t = \
threading.Thread(
target=count_reads,
args=[rparser, reads_counts_per_thread, tnum]
)
threads.append(t)
t.start()
for t in threads:
t.join()
assert reads_count_1thr == sum(reads_counts_per_thread), \
reads_counts_per_thread
@attr('multithread')
def test_with_multiple_threads_big():
test_with_multiple_threads(testfile="test-large.fa")
@attr('multithread')
def test_old_illumina_pair_mating():
import threading
rparser = ReadParser(utils.get_test_data("test-reads.fa"))
def thread_1_runtime(rparser):
for read in rparser:
pass
def thread_2_runtime(rparser):
for readnum, read in enumerate(rparser):
if 0 == readnum:
pass
t1 = threading.Thread(target=thread_1_runtime, args=[rparser])
t2 = threading.Thread(target=thread_2_runtime, args=[rparser])
t1.start()
t2.start()
t1.join()
t2.join()
@attr('multithread')
def test_casava_1_8_pair_mating():
import threading
# Note: This file, when used in conjunction with a 64 KiB per-thread
# prefetch buffer, tests the paired read mating logic with the
# Casava >= 1.8 read name format.
rparser = ReadParser(utils.get_test_data("test-reads.fq.bz2"))
def thread_1_runtime(rparser):
for read in rparser:
pass
def thread_2_runtime(rparser):
for readnum, read in enumerate(rparser):
if 0 == readnum:
pass
# assert "895:1:1:1761:13189 2:N:0:NNNNN" == read.name, read.name
t1 = threading.Thread(target=thread_1_runtime, args=[rparser])
t2 = threading.Thread(target=thread_2_runtime, args=[rparser])
t1.start()
t2.start()
t1.join()
t2.join()
def test_read_truncated():
rparser = ReadParser(utils.get_test_data("truncated.fq"))
try:
for read in rparser:
pass
assert 0, "No exception raised on a truncated file"
except ValueError as err:
assert "Sequence is empty" in str(err), str(err)
def test_iterator_identities():
rparser = \
ReadParser(utils.get_test_data("test-abund-read-paired.fa"))
assert rparser is rparser.__iter__()
assert rparser is rparser.iter_reads()
@attr('known_failing')
def test_read_pair_iterator_in_error_mode():
assert 0
rparser = \
ReadParser(utils.get_test_data("test-abund-read-paired.fa"))
# If walks like an iterator and quacks like an iterator...
rpi = rparser.iter_read_pairs()
assert "__iter__" in dir(rpi)
assert "next" in dir(rpi)
# Are the alleged pairs actually pairs?
read_pairs_1 = []
for read_1, read_2 in rpi:
read_pairs_1.append([read_1, read_2])
assert read_1.name[: 19] == read_2.name[: 19]
# Reload parser.
# Note: No 'rewind' or 'reset' capability at the time of this writing.
rparser = \
ReadParser(utils.get_test_data("test-abund-read-paired.fa"))
# Ensure that error mode is the default mode.
read_pairs_2 = []
for read_1, read_2 \
in rparser.iter_read_pairs(ReadParser.PAIR_MODE_ERROR_ON_UNPAIRED):
read_pairs_2.append([read_1, read_2])
matches = \
list(map(
lambda rp1, rp2: rp1[0].name == rp2[0].name,
read_pairs_1, read_pairs_2
))
assert all(matches) # Assert ALL the matches. :-]
def test_read_pair_iterator_in_error_mode_xfail():
rparser = \
ReadParser(utils.get_test_data("test-abund-read-impaired.fa"))
failed = True
try:
for rpair in rparser.iter_read_pairs():
pass
failed = False
except ValueError as exc:
assert "Invalid read pair" in str(exc), str(exc)
assert failed
@attr('known_failing')
def test_read_pair_iterator_in_ignore_mode():
assert 0
rparser = \
ReadParser(utils.get_test_data("test-abund-read-impaired.fa"))
read_pairs = []
for read_1, read_2 \
in rparser.iter_read_pairs(ReadParser.PAIR_MODE_IGNORE_UNPAIRED):
read_pairs.append([read_1, read_2])
assert read_1.name[: 19] == read_2.name[: 19]
assert 2 == len(read_pairs)
def test_constructor():
# Note: Using a data file with only one read.
try:
rparser = ReadParser(utils.get_test_data("single-read.fq"), "a")
assert 0, ("ReadParser's constructor shouldn't accept a character for "
"the number of threads")
except TypeError as err:
print(str(err))
try:
rparser = ReadParser("non-existent-file-name")
assert 0, "ReadParser shouldn't accept a non-existant file name"
except ValueError as err:
print(str(err))
except OSError as err:
print(str(err))
def test_iternext():
try:
rparser = ReadParser(utils.get_test_data("fakelump.fa.stoptags.txt"))
read_pairs = []
for read_1, read_2 in rparser.iter_read_pairs():
read_pairs.append(read_1, read_2)
assert 0, "Shouldn't be able to iterate over non FASTA file"
except OSError as err:
print(str(err))
except ValueError as err:
print(str(err))
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
| {
"content_hash": "fb50b5e97985e758c04a90e549fca2fb",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 79,
"avg_line_length": 26.955844155844154,
"alnum_prop": 0.6215070341106186,
"repo_name": "Winterflower/khmer",
"id": "d60d8bd580caca332eabed8d104123f81317c90d",
"size": "10378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_read_parsers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "487200"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "8291"
},
{
"name": "Makefile",
"bytes": "15435"
},
{
"name": "Python",
"bytes": "710742"
},
{
"name": "Shell",
"bytes": "4469"
}
],
"symlink_target": ""
} |
"""Evaluation functionality for GLO NeRF Model."""
from typing import Any, Iterator
from absl import logging
import einops
from flax import jax_utils
import gin
import jax
import jax.numpy as jnp
from jax3d.projects.generative.nerf import camera as jax_camera
from jax3d.projects.generative.nerf import metrics
from jax3d.projects.generative.nerf import visualization
from jax3d.projects.generative.nerf.glo_nerf import models
import numpy as np
_MAX_ANIMATION_RESOLUTION = 128
_EVAL_BATCHES_PER_CHECKPOINT = 400
EVAL_IDS_PER_BATCH = 16
RENDER_RESULT_TO_LABEL = {
"linear_rgb": "Volumetric Reconstruction",
"bg_gamma_rgb": "Background Reconstruction",
"fg_gamma_rgb": "Foreground Volumetric Reconstruction",
"gamma_rgb": "Volumetric Reconstruction (gamma)",
"shading_radiance": "Surface Reconstruction (no exposure correction)",
"shading_linear_rgb": "Surface Reconstruction",
"shading_gamma_rgb": "Surface Reconstruction (gamma)",
"alpha": "Volumetric Alpha",
"depth": "Expected Depth",
"analytic_normal": "Analytic Normal at Expected Depth",
"predicted_normal": "Predicted Normal at Expected Depth",
"uv": "UV Coordinates",
}
def _pad_and_center_image(image, width, height):
"""Pads image(s) to desired width and height, while centering.
Args:
image: Input image(s) to pad. Assumed to be in [..., H, W, C] format.
width: Output image width
height: Output image height
Returns:
A Tensor of the same dimensionality of the input (preserves any batching
dimensions), but with images padded to the desired width and height.
"""
assert len(image.shape) >= 3
batch_dimensions = len(image.shape) - 3
width_pad = width - image.shape[-2]
assert width_pad >= 0
height_pad = height - image.shape[-3]
assert height_pad >= 0
# Center image in the padding
image = jnp.pad(image, ((0, 0),) * batch_dimensions + (
(height_pad // 2, height_pad - (height_pad // 2)),
(width_pad // 2, width_pad - (width_pad // 2)),
(0, 0),
))
return image
def render_id_view_grid(image_renderer, summary_data, model_parameters, step):
"""Render images of each summary identity from multiple viewpoints.
Returns a composite image grid of identities (rows) and views (columns) for
all render results.
Args:
image_renderer: Renderer function from Model.create_image_renderer.
summary_data: Summary data from load_summary_data(). Images are assumed to
have dimension [identity, view, height, width, channels].
model_parameters: Model parameters from checkpoint.
step: Training step (needed to set scheduled values correctly).
Returns:
A dictionary containing gamma-encoded RGB images visualizing the rendered
outputs of the model for each identity and view.
"""
summary_images = summary_data["image"]
id_count = summary_images.shape[0]
view_count = summary_images.shape[1]
# summary_data is padded to the maximum image resolution from
# tf.Dataset.padded_batch() in data.load_summary_data().
max_width = summary_images.shape[3]
max_height = summary_images.shape[2]
logging.info("Rendering %d ID X %d view image grid (%dx%d resolution each).",
id_count, view_count, max_width, max_height)
# Render an image for each view in each identity
multi_id_multi_view_images = {}
for id_idx in range(id_count):
multi_view_images = {}
for view_idx in range(view_count):
single_view_latents = summary_data["latents"][id_idx, view_idx]
logging.info("Rendering identity:%d view_subindex:%d", id_idx, view_idx)
# pylint: disable=cell-var-from-loop
camera = jax.tree_map(lambda t: t[id_idx, view_idx],
summary_data["camera"])
inputs = models.ModelInputs(latent_tokens=single_view_latents)
render_results = image_renderer(
camera, inputs, model_parameters=model_parameters, step=step)
rgb_results = visualization.convert_results_to_rgb(render_results)
for name in rgb_results:
image = _pad_and_center_image(rgb_results[name], max_width, max_height)
multi_view_images.setdefault(name, [])
multi_view_images[name].append(image)
for name in multi_view_images:
multi_id_multi_view_images.setdefault(name, [])
# Concatenate views along H axis (0)
multi_id_multi_view_images[name].append(
np.concatenate(multi_view_images[name], axis=0))
image_grids = {}
for name in multi_id_multi_view_images:
# Concatenate IDs along W axis (1)
image_grids[name] = np.concatenate(multi_id_multi_view_images[name], axis=1)
return image_grids
@gin.configurable(allowlist=["apply_mask"])
def compute_batch_psnr(model_parameters: models.ModelParameters,
latents: np.ndarray,
data: ...,
step: int,
apply_mask: bool = False) -> float:
"""Computes the reconstruction PSNR for a batch of data.
Args:
model_parameters: Model parameters from checkpoint.
latents: ConditionVariables object of all latents required for model.
data: A batch of data to evaluate.
step: Training step (needed to set scheduled values correctly).
apply_mask: Use masked data for PSNR.
Returns:
The computed scalar PSNR value.
"""
# Combine identity and view dimensions as the model does not use them.
def flatten_views(t):
return einops.rearrange(t, "V I ... -> (V I) ...")
data_flat = jax.tree_map(flatten_views, data)
latents = jax.tree_map(flatten_views, latents)
origins, directions = jax.vmap(jax_camera.pixels_to_rays)(
data_flat["camera"], data_flat["pixel_coordinates"])
rays = jnp.concatenate([origins, directions], axis=-1)
# Use a constant RNG as randomness is not required
rng = jax.random.PRNGKey(step)
inputs = models.ModelInputs(latent_tokens=latents)
render = models.Model().apply(
model_parameters, inputs, rays, rng=rng, step=step)
pred = render["gamma_rgb"]
gt = data_flat["gamma_rgb"]
if apply_mask:
pred *= data_flat["weight"]
gt *= data_flat["weight"]
psnr = metrics.psnr(pred, gt)
return psnr
def compute_eval_psnr(model_parameters: models.ModelParameters,
latent_table: np.ndarray, data_iterator: Iterator[Any],
psnr_function: ..., step: int) -> float:
"""Computes eval PSNR for one loaded checkpoint.
Args:
model_parameters: Model parameters from checkpoint.
latent_table: ConditionVariables object from checkpoint.
data_iterator: An iterator yielding batches of data to evaluate.
psnr_function: A pre-pmap'ed copy of 'compute_batch_psnr'.
step: Training step (needed to set scheduled values correctly).
Returns:
The computed scalar PSNR value.
"""
model_parameters_replicated = jax_utils.replicate(model_parameters)
step_replicated = jax_utils.replicate(step)
mean_psnr = 0.0
for i in range(_EVAL_BATCHES_PER_CHECKPOINT):
logging.info("Computing PSNR of Eval data - batch %d/%d", i + 1,
_EVAL_BATCHES_PER_CHECKPOINT)
batch = next(data_iterator)
batch_latents = latent_table[batch["identity_index"]]
batch_psnr = psnr_function(model_parameters_replicated, batch_latents,
batch, step_replicated)
mean_psnr += jnp.mean(batch_psnr)
return mean_psnr / _EVAL_BATCHES_PER_CHECKPOINT
| {
"content_hash": "58c9fc1dd26b999815c00ee34070f86a",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 80,
"avg_line_length": 36.19512195121951,
"alnum_prop": 0.6824797843665769,
"repo_name": "google-research/jax3d",
"id": "9d54fb6169590bf397aaf6d42ca5dbc3af0a5691",
"size": "8003",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax3d/projects/generative/nerf/glo_nerf/eval.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "47972"
},
{
"name": "Python",
"bytes": "1239944"
}
],
"symlink_target": ""
} |
from implicittest import *
def check(a, b):
if a != b:
raise RuntimeError(str(a) + " does not equal " + str(b))
def is_new_style_class(cls):
return hasattr(cls, "__class__")
#### Class ####
# No implicit conversion
check(1, A(1).get())
check(2, A(1.0).get())
check(3, A(B()).get())
check(4, A("hello").get())
try:
check(3, A(None).get())
raise RuntimeError
except ValueError:
# ValueError: invalid null reference in method 'new_A', argument 1 of type 'B const &'
# Arguably A(char *) should be chosen, but there is a bug to do with None passed to methods overloaded by value,
# references and pointers to different types, where pointers ought to be
# given a slightly higher precedence.
pass
check(1, get(1))
check(2, get(1.0))
check(3, get(B()))
# Explicit constructor:
try:
check(4, get("hello"))
raise RuntimeError
except TypeError:
pass
#### Template Class ####
# No implicit conversion
check(1, A_int(1).get())
check(2, A_int(1.0).get())
check(3, A_int(B()).get())
check(4, A_int("hello").get())
if is_new_style_class(A_int):
A_int_static = A_int
else:
A_int_static = A_int(0)
check(1, A_int_static.sget(1))
check(2, A_int_static.sget(1.0))
check(3, A_int_static.sget(B()))
# explicit constructor:
try:
check(4, A_int_static.sget("hello"))
raise RuntimeError
except TypeError:
pass
#### Global variable assignment ####
cvar.foo = Foo(1)
check(cvar.foo.ii, 1)
cvar.foo = 1
check(cvar.foo.ii, 1)
cvar.foo = 1.0
check(cvar.foo.ii, 2)
cvar.foo = Foo("hello")
check(cvar.foo.ii, 3)
# explicit constructor:
try:
cvar.foo = "hello"
raise RuntimeError
except TypeError:
pass
#### Member variable assignment ####
# Note: also needs naturalvar
b = Bar()
check(b.f.ii, 0)
b.f = Foo("hello")
check(b.f.ii, 3)
b.f = 1
check(b.f.ii, 1)
b.f = 1.0
check(b.f.ii, 2)
# explicit constructor:
try:
b.f = "hello"
raise RuntimeError
except TypeError:
pass
#### Class testing None ####
# No implicit conversion
check(1, AA(1).get())
check(2, AA(1.0).get())
check(3, AA(B()).get())
check(3, AA(None).get())
check(4, AA("hello").get())
check(5, AA(BB()).get())
check(1, get_AA_val(1))
check(2, get_AA_val(1.0))
check(3, get_AA_val(B()))
check(3, get_AA_val(None))
check(5, get_AA_val(BB()))
# Explicit constructor:
try:
check(4, get_AA_val("hello"))
raise RuntimeError
except TypeError:
pass
check(1, get_AA_ref(1))
check(2, get_AA_ref(1.0))
check(3, get_AA_ref(B()))
check(3, get_AA_ref(None))
check(5, get_AA_ref(BB()))
# Explicit constructor:
try:
check(4, get_AA_ref("hello"))
raise RuntimeError
except TypeError:
pass
### overloading priority test ###
ccc = CCC(B())
check(ccc.checkvalue, 10)
check(ccc.xx(123), 11)
check(ccc.yy(123, 123), 111)
| {
"content_hash": "f3c5b69612944632fc5d6074578548e4",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 116,
"avg_line_length": 19.58450704225352,
"alnum_prop": 0.6339446242358864,
"repo_name": "DEKHTIARJonathan/BilletterieUTC",
"id": "4646d08c0059bff49dde0419c2254bc01ecc63b8",
"size": "2781",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "badgingServer/Install/swigwin-3.0.7/Examples/test-suite/python/implicittest_runme.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "505"
},
{
"name": "C",
"bytes": "1489570"
},
{
"name": "C#",
"bytes": "323243"
},
{
"name": "C++",
"bytes": "2646678"
},
{
"name": "CSS",
"bytes": "1309792"
},
{
"name": "Common Lisp",
"bytes": "13780"
},
{
"name": "D",
"bytes": "260374"
},
{
"name": "DIGITAL Command Language",
"bytes": "16078"
},
{
"name": "Forth",
"bytes": "2411"
},
{
"name": "Go",
"bytes": "95670"
},
{
"name": "Groff",
"bytes": "17548"
},
{
"name": "HTML",
"bytes": "8474268"
},
{
"name": "Java",
"bytes": "517584"
},
{
"name": "JavaScript",
"bytes": "1574272"
},
{
"name": "Limbo",
"bytes": "2902"
},
{
"name": "Lua",
"bytes": "103853"
},
{
"name": "M",
"bytes": "58261"
},
{
"name": "Makefile",
"bytes": "193313"
},
{
"name": "Mathematica",
"bytes": "113"
},
{
"name": "Matlab",
"bytes": "49071"
},
{
"name": "Mercury",
"bytes": "4136"
},
{
"name": "OCaml",
"bytes": "25948"
},
{
"name": "Objective-C",
"bytes": "9721"
},
{
"name": "PHP",
"bytes": "336290"
},
{
"name": "Perl",
"bytes": "140021"
},
{
"name": "Perl6",
"bytes": "6403"
},
{
"name": "Pike",
"bytes": "6601"
},
{
"name": "Python",
"bytes": "271706"
},
{
"name": "R",
"bytes": "6053"
},
{
"name": "Ruby",
"bytes": "129514"
},
{
"name": "SQLPL",
"bytes": "10237"
},
{
"name": "Scheme",
"bytes": "81765"
},
{
"name": "Scilab",
"bytes": "84725"
},
{
"name": "Shell",
"bytes": "86284"
},
{
"name": "Standard ML",
"bytes": "2587"
},
{
"name": "Tcl",
"bytes": "38028"
},
{
"name": "Yacc",
"bytes": "211262"
}
],
"symlink_target": ""
} |
import sys
import os
import pyjswidgets
import pyjswaddons
sys.path += [os.path.dirname(pyjswidgets.__file__),
os.path.dirname(pyjswaddons.__file__), ]
from pyjs.runners import RunnerManager
#TODO: very ugly to self-import and setattr(self) ... remove ASAP!
import pyjd
pyjdversion = '0.9'
_manager = RunnerManager()
_manager.set_conf()
for key, value in _manager._conf.iteritems():
setattr(pyjd, key, value)
_manager.set_runner()
#TODO: perm delete ASAP unless someone claims use; disable for now
sys.path += [os.path.dirname(__file__)]
add_setup_callback = _manager.add_setup_listener
setup = _manager.setup
run = _manager.run
| {
"content_hash": "1f48f553aa9de71a123354f858a25aeb",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 21.866666666666667,
"alnum_prop": 0.711890243902439,
"repo_name": "Hasimir/pyjs",
"id": "4a51466d3649065831ac008538018c312a5c1f36",
"size": "656",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pyjd/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5515375"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
} |
"""
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
# Arthur Mensch <arthur.mensch@m4x.org
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Unchanged.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver not in ['liblinear', 'saga']:
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if solver != 'liblinear':
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver not in ['sag', 'saga']:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
if penalty == 'l1':
alpha = 0.
beta = 1. / C
else:
alpha = 1. / C
beta = 0.
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Does not work for
liblinear solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
.. versionadded:: 0.19
l1 penalty with SAGA solver (allowing 'multinomial' + L1)
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, default: None
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'},
default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty, whereas
'liblinear' and 'saga' handle L1 penalty.
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can
preprocess the data with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float, default: 1e-4
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Does not work for liblinear
solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
n_jobs : int, default: 1
Number of CPU cores used when parallelizing over classes
if multi_class='ovr'".
If given a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ['sag', 'saga']:
backend = 'threading'
else:
backend = 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
penalty=self.penalty,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : string, callable, or None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'},
default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty, whereas
'liblinear' and 'saga' handle L1 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can preprocess the data
with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = dict((label_encoder.transform([cls])[0], v)
for cls, v in class_weight.items())
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
if self.solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if self.multi_class == 'multinomial':
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(class_weight,
np.arange(len(self.classes_)),
y)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ['sag', 'saga']:
backend = 'threading'
else:
backend = 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_encoded_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)):
if self.multi_class == 'ovr':
# The scores_ / coefs_paths_ dict have unencoded class
# labels as their keys
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = logistic_regression_path(
X, y, pos_class=encoded_label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| {
"content_hash": "abd02dab7653baf7c6a5229ca3639eb7",
"timestamp": "",
"source": "github",
"line_count": 1757,
"max_line_length": 79,
"avg_line_length": 39.135458167330675,
"alnum_prop": 0.6006893442503745,
"repo_name": "jakobworldpeace/scikit-learn",
"id": "196d7f697d0e8a1efde71b3860555e94a0600112",
"size": "68761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/linear_model/logistic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451977"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7051964"
},
{
"name": "Shell",
"bytes": "19484"
}
],
"symlink_target": ""
} |
import smtplib
from constants import SMTPSecureConnection
from models import User
from models import SystemOption
from models import EmailJob
# Import the email modules we'll need
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import Encoders
from os.path import basename
def create_email_job(db_session, logger, message, username):
system_option = SystemOption.get(db_session)
if not system_option.enable_email_notify:
return
user = get_user(db_session, username)
if user is None:
logger.error('mailer: Unable to locate user "%s"' % username)
return
email_job = EmailJob(recipients=user.email, message=message, created_by=username)
db_session.add(email_job)
db_session.commit()
def sendmail(logger, server, server_port, sender, recipients, message, use_authentication, username, password,
secure_connection, file_attachments=None):
"""
:param recipients: String containing email recipient(s), separated by comma
:param file_attachments: String containing absolute file paths for file attachment(s), separated by comma
"""
try:
msg = MIMEMultipart('alternative')
msg['Subject'] = 'Notification from CSM Server'
msg['From'] = sender
msg['To'] = recipients
part = MIMEText(message, 'html')
msg.attach(part)
if use_authentication:
s = None
if secure_connection == SMTPSecureConnection.SSL:
s = smtplib.SMTP_SSL(server, int(server_port))
elif secure_connection == SMTPSecureConnection.TLS:
s = smtplib.SMTP(server, int(server_port))
s.starttls()
s.login(username, password)
else:
if server_port is None or len(server_port) == 0:
s = smtplib.SMTP(server)
else:
s = smtplib.SMTP(server, int(server_port))
if file_attachments:
for file_attachment in file_attachments.split(","):
try:
part = MIMEBase('application', "octet-stream")
part.set_payload(open(file_attachment, "rb").read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % basename(file_attachment))
msg.attach(part)
except:
logger.exception('sendmail: Unable to attach %s', basename(file_attachment))
s.sendmail(sender, recipients.split(","), msg.as_string())
s.close()
return True
except:
logger.exception('sendmail() hit exception')
return False
def get_user(db_session, username):
return db_session.query(User).filter(User.username == username).first()
| {
"content_hash": "7439c65c6c5a0b5a3f67920f0eb70db5",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 115,
"avg_line_length": 35.305882352941175,
"alnum_prop": 0.6034655114961679,
"repo_name": "smjurcak/csm",
"id": "117401cef360eb22388c9f51c27a42ccec779b42",
"size": "4516",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "csmserver/mailer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "84140"
},
{
"name": "HTML",
"bytes": "618824"
},
{
"name": "JavaScript",
"bytes": "572667"
},
{
"name": "Python",
"bytes": "978958"
},
{
"name": "Shell",
"bytes": "3584"
}
],
"symlink_target": ""
} |
import logging
import uuid
from typing import List, Optional
from flask_appbuilder import const as c
from flask_appbuilder.models.sqla import Base
from flask_appbuilder.models.sqla.interface import SQLAInterface
from sqlalchemy import and_, func, literal
from sqlalchemy.orm.exc import MultipleResultsFound
from werkzeug.security import generate_password_hash
from airflow.compat import sqlalchemy as sqla_compat
from airflow.www.fab_security.manager import BaseSecurityManager
from airflow.www.fab_security.sqla.models import (
Action,
Permission,
RegisterUser,
Resource,
Role,
User,
assoc_permission_role,
)
log = logging.getLogger(__name__)
class SecurityManager(BaseSecurityManager):
"""
Responsible for authentication, registering security views,
role and permission auto management
If you want to change anything just inherit and override, then
pass your own security manager to AppBuilder.
"""
user_model = User
""" Override to set your own User Model """
role_model = Role
""" Override to set your own Role Model """
action_model = Action
resource_model = Resource
permission_model = Permission
registeruser_model = RegisterUser
def __init__(self, appbuilder):
"""
Class constructor
param appbuilder:
F.A.B AppBuilder main object
"""
super().__init__(appbuilder)
user_datamodel = SQLAInterface(self.user_model)
if self.auth_type == c.AUTH_DB:
self.userdbmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_LDAP:
self.userldapmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_OID:
self.useroidmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_OAUTH:
self.useroauthmodelview.datamodel = user_datamodel
elif self.auth_type == c.AUTH_REMOTE_USER:
self.userremoteusermodelview.datamodel = user_datamodel
if self.userstatschartview:
self.userstatschartview.datamodel = user_datamodel
if self.auth_user_registration:
self.registerusermodelview.datamodel = SQLAInterface(self.registeruser_model)
self.rolemodelview.datamodel = SQLAInterface(self.role_model)
self.actionmodelview.datamodel = SQLAInterface(self.action_model)
self.resourcemodelview.datamodel = SQLAInterface(self.resource_model)
self.permissionmodelview.datamodel = SQLAInterface(self.permission_model)
self.create_db()
@property
def get_session(self):
return self.appbuilder.get_session
def register_views(self):
super().register_views()
def create_db(self):
try:
engine = self.get_session.get_bind(mapper=None, clause=None)
inspector = sqla_compat.inspect(engine)
if "ab_user" not in inspector.get_table_names():
log.info(c.LOGMSG_INF_SEC_NO_DB)
Base.metadata.create_all(engine)
log.info(c.LOGMSG_INF_SEC_ADD_DB)
super().create_db()
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_CREATE_DB.format(str(e)))
exit(1)
def find_register_user(self, registration_hash):
return (
self.get_session.query(self.registeruser_model)
.filter(self.registeruser_model.registration_hash == registration_hash)
.scalar()
)
def add_register_user(self, username, first_name, last_name, email, password="", hashed_password=""):
"""
Add a registration request for the user.
:rtype : RegisterUser
"""
register_user = self.registeruser_model()
register_user.username = username
register_user.email = email
register_user.first_name = first_name
register_user.last_name = last_name
if hashed_password:
register_user.password = hashed_password
else:
register_user.password = generate_password_hash(password)
register_user.registration_hash = str(uuid.uuid1())
try:
self.get_session.add(register_user)
self.get_session.commit()
return register_user
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_REGISTER_USER.format(str(e)))
self.appbuilder.get_session.rollback()
return None
def del_register_user(self, register_user):
"""
Deletes registration object from database
:param register_user: RegisterUser object to delete
"""
try:
self.get_session.delete(register_user)
self.get_session.commit()
return True
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_REGISTER_USER.format(str(e)))
self.get_session.rollback()
return False
def find_user(self, username=None, email=None):
"""Finds user by username or email"""
if username:
try:
if self.auth_username_ci:
return (
self.get_session.query(self.user_model)
.filter(func.lower(self.user_model.username) == func.lower(username))
.one_or_none()
)
else:
return (
self.get_session.query(self.user_model)
.filter(self.user_model.username == username)
.one_or_none()
)
except MultipleResultsFound:
log.error("Multiple results found for user %s", username)
return None
elif email:
try:
return self.get_session.query(self.user_model).filter_by(email=email).one_or_none()
except MultipleResultsFound:
log.error("Multiple results found for user with email %s", email)
return None
def get_all_users(self):
return self.get_session.query(self.user_model).all()
def add_user(
self,
username,
first_name,
last_name,
email,
role,
password="",
hashed_password="",
):
"""Generic function to create user"""
try:
user = self.user_model()
user.first_name = first_name
user.last_name = last_name
user.username = username
user.email = email
user.active = True
user.roles = role if isinstance(role, list) else [role]
if hashed_password:
user.password = hashed_password
else:
user.password = generate_password_hash(password)
self.get_session.add(user)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_USER.format(username))
return user
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_USER.format(str(e)))
self.get_session.rollback()
return False
def count_users(self):
return self.get_session.query(func.count(self.user_model.id)).scalar()
def update_user(self, user):
try:
self.get_session.merge(user)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_UPD_USER.format(user))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_UPD_USER.format(str(e)))
self.get_session.rollback()
return False
def get_user_by_id(self, pk):
return self.get_session.query(self.user_model).get(pk)
def add_role(self, name: str) -> Optional[Role]:
role = self.find_role(name)
if role is None:
try:
role = self.role_model()
role.name = name
self.get_session.add(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_ROLE.format(name))
return role
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_ROLE.format(str(e)))
self.get_session.rollback()
return role
def update_role(self, role_id, name: str) -> Optional[Role]:
role = self.get_session.query(self.role_model).get(role_id)
if not role:
return None
try:
role.name = name
self.get_session.merge(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_UPD_ROLE.format(role))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_UPD_ROLE.format(str(e)))
self.get_session.rollback()
return None
return role
def find_role(self, name):
return self.get_session.query(self.role_model).filter_by(name=name).one_or_none()
def get_all_roles(self):
return self.get_session.query(self.role_model).all()
def get_public_role(self):
return self.get_session.query(self.role_model).filter_by(name=self.auth_role_public).one_or_none()
def get_action(self, name: str) -> Action:
"""
Gets an existing action record.
:param name: name
:return: Action record, if it exists
:rtype: Action
"""
return self.get_session.query(self.action_model).filter_by(name=name).one_or_none()
def permission_exists_in_one_or_more_roles(
self, resource_name: str, action_name: str, role_ids: List[int]
) -> bool:
"""
Method to efficiently check if a certain permission exists
on a list of role id's. This is used by `has_access`
:param resource_name: The view's name to check if exists on one of the roles
:param action_name: The permission name to check if exists
:param role_ids: a list of Role ids
:return: Boolean
"""
q = (
self.appbuilder.get_session.query(self.permission_model)
.join(
assoc_permission_role,
and_(self.permission_model.id == assoc_permission_role.c.permission_view_id),
)
.join(self.role_model)
.join(self.action_model)
.join(self.resource_model)
.filter(
self.resource_model.name == resource_name,
self.action_model.name == action_name,
self.role_model.id.in_(role_ids),
)
.exists()
)
# Special case for MSSQL/Oracle (works on PG and MySQL > 8)
if self.appbuilder.get_session.bind.dialect.name in ("mssql", "oracle"):
return self.appbuilder.get_session.query(literal(True)).filter(q).scalar()
return self.appbuilder.get_session.query(q).scalar()
def filter_roles_by_perm_with_action(self, action_name: str, role_ids: List[int]):
"""Find roles with permission"""
return (
self.appbuilder.get_session.query(self.permission_model)
.join(
assoc_permission_role,
and_(self.permission_model.id == assoc_permission_role.c.permission_view_id),
)
.join(self.role_model)
.join(self.action_model)
.join(self.resource_model)
.filter(
self.action_model.name == action_name,
self.role_model.id.in_(role_ids),
)
).all()
def create_action(self, name):
"""
Adds an action to the backend, model action
:param name:
name of the action: 'can_add','can_edit' etc...
"""
action = self.get_action(name)
if action is None:
try:
action = self.action_model()
action.name = name
self.get_session.add(action)
self.get_session.commit()
return action
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMISSION.format(str(e)))
self.get_session.rollback()
return action
def delete_action(self, name: str) -> bool:
"""
Deletes a permission action.
:param name: Name of action to delete (e.g. can_read).
:return: Whether or not delete was successful.
:rtype: bool
"""
action = self.get_action(name)
if not action:
log.warning(c.LOGMSG_WAR_SEC_DEL_PERMISSION.format(name))
return False
try:
perms = (
self.get_session.query(self.permission_model)
.filter(self.permission_model.action == action)
.all()
)
if perms:
log.warning(c.LOGMSG_WAR_SEC_DEL_PERM_PVM.format(action, perms))
return False
self.get_session.delete(action)
self.get_session.commit()
return True
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMISSION.format(str(e)))
self.get_session.rollback()
return False
def get_resource(self, name: str) -> Resource:
"""
Returns a resource record by name, if it exists.
:param name: Name of resource
:return: Resource record
:rtype: Resource
"""
return self.get_session.query(self.resource_model).filter_by(name=name).one_or_none()
def get_all_resources(self) -> List[Resource]:
"""
Gets all existing resource records.
:return: List of all resources
:rtype: List[Resource]
"""
return self.get_session.query(self.resource_model).all()
def create_resource(self, name) -> Resource:
"""
Create a resource with the given name.
:param name: The name of the resource to create created.
:return: The FAB resource created.
:rtype: Resource
"""
resource = self.get_resource(name)
if resource is None:
try:
resource = self.resource_model()
resource.name = name
self.get_session.add(resource)
self.get_session.commit()
return resource
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_VIEWMENU.format(str(e)))
self.get_session.rollback()
return resource
def delete_resource(self, name: str) -> bool:
"""
Deletes a Resource from the backend
:param name:
name of the resource
"""
resource = self.get_resource(name)
if not resource:
log.warning(c.LOGMSG_WAR_SEC_DEL_VIEWMENU.format(name))
return False
try:
perms = (
self.get_session.query(self.permission_model)
.filter(self.permission_model.resource == resource)
.all()
)
if perms:
log.warning(c.LOGMSG_WAR_SEC_DEL_VIEWMENU_PVM.format(resource, perms))
return False
self.get_session.delete(resource)
self.get_session.commit()
return True
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMISSION.format(str(e)))
self.get_session.rollback()
return False
"""
----------------------
PERMISSION VIEW MENU
----------------------
"""
def get_permission(self, action_name: str, resource_name: str) -> Optional[Permission]:
"""
Gets a permission made with the given action->resource pair, if the permission already exists.
:param action_name: Name of action
:param resource_name: Name of resource
:return: The existing permission
:rtype: Permission
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
if action and resource:
return (
self.get_session.query(self.permission_model)
.filter_by(action=action, resource=resource)
.one_or_none()
)
return None
def get_resource_permissions(self, resource: Resource) -> Permission:
"""
Retrieve permission pairs associated with a specific resource object.
:param resource: Object representing a single resource.
:return: Action objects representing resource->action pair
:rtype: Permission
"""
return self.get_session.query(self.permission_model).filter_by(resource_id=resource.id).all()
def create_permission(self, action_name, resource_name) -> Optional[Permission]:
"""
Adds a permission on a resource to the backend
:param action_name:
name of the action to add: 'can_add','can_edit' etc...
:param resource_name:
name of the resource to add
"""
if not (action_name and resource_name):
return None
perm = self.get_permission(action_name, resource_name)
if perm:
return perm
resource = self.create_resource(resource_name)
action = self.create_action(action_name)
perm = self.permission_model()
perm.resource_id, perm.action_id = resource.id, action.id
try:
self.get_session.add(perm)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_PERMVIEW.format(str(perm)))
return perm
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMVIEW.format(str(e)))
self.get_session.rollback()
return None
def delete_permission(self, action_name: str, resource_name: str) -> None:
"""
Deletes the permission linking an action->resource pair. Doesn't delete the
underlying action or resource.
:param action_name: Name of existing action
:param resource_name: Name of existing resource
:return: None
:rtype: None
"""
if not (action_name and resource_name):
return
perm = self.get_permission(action_name, resource_name)
if not perm:
return
roles = (
self.get_session.query(self.role_model).filter(self.role_model.permissions.contains(perm)).first()
)
if roles:
log.warning(c.LOGMSG_WAR_SEC_DEL_PERMVIEW.format(resource_name, action_name, roles))
return
try:
# delete permission on resource
self.get_session.delete(perm)
self.get_session.commit()
# if no more permission on permission view, delete permission
if not self.get_session.query(self.permission_model).filter_by(action=perm.action).all():
self.delete_action(perm.action.name)
log.info(c.LOGMSG_INF_SEC_DEL_PERMVIEW.format(action_name, resource_name))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMVIEW.format(str(e)))
self.get_session.rollback()
def perms_include_action(self, perms, action_name):
for perm in perms:
if perm.action and perm.action.name == action_name:
return True
return False
def add_permission_to_role(self, role: Role, permission: Permission) -> None:
"""
Add an existing permission pair to a role.
:param role: The role about to get a new permission.
:param permission: The permission pair to add to a role.
:return: None
:rtype: None
"""
if permission and permission not in role.permissions:
try:
role.permissions.append(permission)
self.get_session.merge(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_ADD_PERMROLE.format(str(permission), role.name))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMROLE.format(str(e)))
self.get_session.rollback()
def remove_permission_from_role(self, role: Role, permission: Permission) -> None:
"""
Remove a permission pair from a role.
:param role: User role containing permissions.
:param permission: Object representing resource-> action pair
"""
if permission in role.permissions:
try:
role.permissions.remove(permission)
self.get_session.merge(role)
self.get_session.commit()
log.info(c.LOGMSG_INF_SEC_DEL_PERMROLE.format(str(permission), role.name))
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_PERMROLE.format(str(e)))
self.get_session.rollback()
| {
"content_hash": "24d7a432ebac3dc18c5cc78e537fb4c3",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 110,
"avg_line_length": 36.49036777583187,
"alnum_prop": 0.5783739681320791,
"repo_name": "danielvdende/incubator-airflow",
"id": "8ee1d900f6ce09a876d5401bd1fbc78c5e14ec0c",
"size": "21622",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/www/fab_security/sqla/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
from hyperstream.stream import StreamInstance, StreamMetaInstance
from hyperstream.tool import MultiOutputTool
class AssetSplitter(MultiOutputTool):
def __init__(self, element=None):
"""
Special tool to extract data from the asset channel
:param element: The element to extract
"""
super(AssetSplitter, self).__init__(element=element)
def _execute(self, source, splitting_stream, interval, meta_data_id, output_plate_values):
for timestamp, data in source.window(interval, force_calculation=True):
if self.element in data:
for key, value in data[self.element].items():
yield StreamMetaInstance(StreamInstance(timestamp=timestamp, value=value), (meta_data_id, key))
| {
"content_hash": "30950b3e6c9600e2dfee51f6be57586c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 115,
"avg_line_length": 45.411764705882355,
"alnum_prop": 0.6813471502590673,
"repo_name": "IRC-SPHERE/HyperStream",
"id": "e1a7778aabb43e5c314edd643f5f1b7215b39adb",
"size": "1917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperstream/tools/asset_splitter/2016-11-24_v0.1.0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24331"
},
{
"name": "HTML",
"bytes": "16016"
},
{
"name": "JavaScript",
"bytes": "94024"
},
{
"name": "Jupyter Notebook",
"bytes": "60569"
},
{
"name": "Makefile",
"bytes": "7617"
},
{
"name": "Python",
"bytes": "742564"
},
{
"name": "Shell",
"bytes": "1300"
}
],
"symlink_target": ""
} |
import pytest
from run_dtox import run
class TestNoToxIni:
def test_dtox_empty_params(self):
r = run(expect_error=True)
assert r.returncode != 0
assert r.stdout == "using CODE_DIR=/code\n"
assert r.stderr == "ERROR: toxini file 'tox.ini' not found\n"
assert r.files_after == {}
def test_dtox_dot_dir_param(self):
r = run(".", expect_error=True)
assert r.returncode != 0
assert r.stdout == ""
assert r.stderr == "Error: Working directory must be absolute: .\n"
assert r.files_after == {}
@pytest.mark.parametrize("code_dir",
["/code",
"/this/dir/does/not/exist/yet",
"/root/dir"])
def test_dtox_abs_dir_param(self, code_dir):
r = run(code_dir, expect_error=True)
assert r.returncode != 0
assert r.stdout == "using CODE_DIR={}\n".format(code_dir)
assert r.stderr == "ERROR: toxini file 'tox.ini' not found\n"
assert r.files_after == {}
| {
"content_hash": "38cd3e32e29460d947e1042bd9bf2b83",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 75,
"avg_line_length": 29.72222222222222,
"alnum_prop": 0.5429906542056074,
"repo_name": "realcundo/docker-tox",
"id": "26d5a5ed9fbfab4577f16e0951014e97447740c6",
"size": "1070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_no_ini.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "4262"
}
],
"symlink_target": ""
} |
import json
import math
import os
import numpy
import rasterio
from affine import Affine
from rasterio import features
from rasterio.warp import transform_geom
def rasterize_geojson(geojson, template_path, out_path, crs=None, save_geojson=False):
"""
Creates transition spatial multipliers from a GeoJSON dictionary or list of dictionaries.
:param geojson: GeoJSON-formatted dictionary.
:param template_path: Path to the template raster to constrain the shapes to.
:param out_path: Path to the outputted raster with burned shapes into it.
:param crs: CRS of the input geometry. Default is EPSG:4326.
:param save_geojson: If True, save a copy of the rasterized GeoJSON next to the file.
"""
if crs is None:
crs = {'init': 'EPSG:4326'}
# Handle single geometries as well as lists
if type(geojson) is dict:
geojson = [geojson]
if save_geojson:
ext = '.{}'.format(out_path.split('.')[-1])
json_path = out_path.replace(ext, '.json')
with open(json_path, 'w') as f:
json.dump(geojson, f)
with rasterio.open(template_path, 'r') as template:
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
with rasterio.open(out_path, 'w', **template.meta.copy()) as dest:
image = features.rasterize(
((transform_geom(crs, template.crs, g), 255.0) # Todo, should value be 255 or 100?
for g in [f['geometry'] for f in geojson]),
out_shape=template.shape,
transform=template.transform,
dtype='float64'
)
image[numpy.where(image == 0)] = 1.0 # Where a polygon wasn't drawn, set multiplier to 1.0
dest.write(image, 1)
""" Functions here are directly copied from rasterstats """
def rowcol(x, y, affine, op=math.floor):
""" Get row/col for a x/y """
r = int(op((y - affine.f) / affine.e))
c = int(op((x - affine.c) / affine.a))
return r, c
def bounds_window(bounds, affine):
""" Create a full cover rasterio-style window """
w, s, e, n = bounds
row_start, col_start = rowcol(w, n, affine)
row_stop, col_stop = rowcol(e, s, affine, op=math.ceil)
return (row_start, row_stop), (col_start, col_stop)
def window_bounds(window, affine):
""" Calculate the window bounds from a given Affine. """
(row_start, row_stop), (col_start, col_stop) = window
w, s = (col_start, row_stop) * affine
e, n = (col_stop, row_start) * affine
return w, s, e, n
def rasterize_geom(geom, affine, shape):
""" Rasterize geometry to a matching affine and shape. """
geoms = [(geom, 1)]
rv_array = features.rasterize(
geoms,
out_shape=shape,
transform=affine,
fill=0,
dtype='uint8'
)
return rv_array.astype(bool)
def read_window(raster, affine, bounds, band=1):
""" Read a window of data from a raster and return the window data, shape and Affine. """
# Calculate the window
win = bounds_window(bounds, affine)
# Calculate the new window's Affine transformation
c, _, _, f = window_bounds(win, affine) # c ~ west, f ~ north
a, b, _, d, e, _, _, _, _ = tuple(affine)
new_affine = Affine(a, b, c, d, e, f)
# Read from the GDAL source
new_array = raster.read(band, window=win, boundless=True)
return new_array, new_array.shape, new_affine
def zonal_stats(feature, raster, band=1, f_crs=None):
"""
A stripped down version of rasterstats.zonal_stats.
This circumvents issues with shapely to prevent errors with GEOS setup.
Feature is assumed to be geographic (WGS84) unless a separate CRS is specified.
:param feature A dictionary that adheres to the __geo_interface__ for a Feature.
:param raster The path to a rasterio-readable dataset.
:param band The band to perform the zonal statistics on.
:param f_crs The feature's coordinate reference system. Default is geographic (WGS84).
"""
if f_crs is None:
f_crs = {'init': 'EPSG:4326'}
with rasterio.open(raster, 'r') as src:
# Get the overall raster affine
src_affine = src.transform
# What's the nodata value?
nodata = src.nodata
# Project geometry to src CRS
geom = transform_geom(f_crs, src.crs, feature['geometry'])
# Get bounds
geom_bounds = features.bounds(geom)
# Get the source data from the bounds
fsrc, shape, affine = read_window(src, src_affine, geom_bounds, band=band)
# Get the rasterized geometry with similar affine and shape
rv_array = rasterize_geom(geom, affine=affine, shape=shape)
# Get a nodata mask
isnodata = (fsrc == nodata)
# Add a nan mask
if numpy.issubdtype(fsrc.dtype, float) and numpy.isnan(fsrc.min()):
isnodata = (isnodata | numpy.isnan(fsrc))
# Create the masked array
masked = numpy.ma.MaskedArray(fsrc, mask=(isnodata | ~rv_array))
# Calculate pixel counts
if masked.compressed().size == 0:
feature_stats = {}
else:
keys, counts = numpy.unique(masked.compressed(), return_counts=True)
feature_stats = dict(
dict(
zip(
[numpy.asscalar(k) for k in keys],
[numpy.asscalar(c) for c in counts]
)
)
)
return feature_stats, masked
| {
"content_hash": "e72e208b4a5280b73ad787f9a35fcb5a",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 105,
"avg_line_length": 33.84146341463415,
"alnum_prop": 0.6113513513513513,
"repo_name": "consbio/landscapesim",
"id": "fa3e87bde4eff3b211dd7579f41c33d54a4a1c61",
"size": "5550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "landscapesim/common/geojson.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "30150"
},
{
"name": "HTML",
"bytes": "44914"
},
{
"name": "JavaScript",
"bytes": "116793"
},
{
"name": "Python",
"bytes": "206671"
}
],
"symlink_target": ""
} |
from nose.tools import *
from antwort.lexer import AntwortLexer
from antwort.parser import AntwortParser
def assert_not_none(obj):
assert_not_equal(obj, None)
mute = True
# call tests with -vs
def log(fn):
if mute:
return fn
def w(*args, **kwargs):
s = args[0]
print(s.next())
return fn(*args, **kwargs)
return w
AntwortParser.match = log(AntwortParser.match)
def test_question_with_checkboxes():
'Case: Matches a question with a checkbox'
# Careful: We need the newline at the end!
input_file = ("1. Geschlecht (gender)\n"
"[ ] Männlich (m)\n"
"[ ] Weiblich (w)\n"
"[ ] Andere (o)\n")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
parser.question()
def test_question_with_checkboxes_and_explanation():
'Case: Matches a question with checkboxes and optional explanatory text'
# Careful: We need this newline at the end!
input_file = ("1. Geschlecht (gender)\n"
"Sag mir, wie du dich fühlst!\n"
"[ ] Männlich (m)\n"
"[ ] Weiblich (w)\n"
"[ ] Andere (o)\n")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
parser.question()
def test_two_questions_with_checkboxes():
'Case: Matches two questions with checkboxes'
input_file = ("1. Geschlecht (gender)\n"
"[ ] Männlich (m)\n"
"[ ] Weiblich (w)\n"
"\n"
"2. Student (student)\n"
"[ ] Ja (y)\n"
"[ ] Nein (n)\n")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
parser.questions()
def test_case_question_with_checkboxes():
'Case: Question with checkboxes'
input_file = """1. Geschlecht (gender)
Sag mir, wie du dich fühlst!
[ ] Männlich (m)
[ ] Weiblich (w)
[ ] Andere (o)
"""
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
parser.question()
def test_case_question_with_radio_buttons():
'Case: Question with radios'
input_file = """1. Geschlecht (gender)
Sag mir, wie du dich fühlst!
( ) Männlich (m)
( ) Weiblich (w)
( ) Andere (o)
"""
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
parser.question()
def test_question_with_dropdown():
'Case: Question with dropdown'
input_file = """1. Wo wohnst du? (city)
Bitte gib deinen jetzigen Wohnort an!
[
Heidelberg (HD)
Mannheim (Ma)
]
"""
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
parser.question()
def test_question_with_input_field():
'Case: Question with input_field'
input_file = """1. Wo wohnst du? (city)
Bitte gib deinen jetzigen Wohnort an!
[__Wohnort__]
"""
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
parser.question()
def test_question_with_scale():
'Case: Question with scale'
input_file = """1. Wie glücklich bist du an deinem Wohnort? (happy)
{ Sau Geil (1) -- Geil (2) -- Nicht So Geil (3) }
"""
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
expression = parser.question()
assert_not_none(expression.header)
assert_not_none(expression.options)
def test_question_head():
'Matches a question head: 1. Alter (age)'
input_file = ("1. Alter (age)\n")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.question_head()
assert_equal(expression.number.value, 1)
assert_equal(expression.variable.label.text, 'Alter')
assert_equal(expression.variable.value.name, 'age')
assert_equal(expression.explanation, None)
def test_question_head_with_number():
'''Matches a question head that contains a number:
1. Hast du im Jahr 2013 sch
on einmal mitgemacht? (follow_up)'''
input_file = (
"1. Hast du im Jahr 2013 schon einmal mitgemacht? (follow_up)\n")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.question_head()
assert_equal(expression.number.value, 1)
assert_equal(expression.variable.label.text,
'Hast du im Jahr 2013 schon einmal mitgemacht?')
def test_question_head():
'Matches a question head with asterisk: 1. Alter (age) *'
input_file = ("1. Alter (age) *\n")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.question_head()
assert_equal(expression.number.value, 1)
assert_equal(expression.variable.label.text, 'Alter')
assert_equal(expression.variable.value.name, 'age')
assert_equal(expression.required, True)
assert_equal(expression.explanation, None)
def test_question_head_with_explanation():
'Matches a question head with explanation: 1. Alter (age) \ Wie alt bist du?'
input_file = ("1. Alter (age)\n"
"That explains many\n")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.question_head()
assert_equal(expression.number.value, 1)
assert_equal(expression.variable.label.text, 'Alter')
assert_equal(expression.variable.value.name, 'age')
assert_equal(expression.required, False)
assert_equal(expression.explanation.text, 'That explains many')
def test_option_checkboxes():
'Matches checkbox list'
input_file = ("[ ] Männlich (m)\n"
"[ ] Weiblich (w)\n"
"[ ] Andere (o)\n")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.checkboxes()
assert_equal(expression.checkboxes[0].variable.label.text, 'Männlich')
assert_equal(expression.checkboxes[0].variable.value.name, 'm')
assert_equal(expression.checkboxes[1].variable.label.text, 'Weiblich')
assert_equal(expression.checkboxes[1].variable.value.name, 'w')
assert_equal(expression.checkboxes[2].variable.label.text, 'Andere')
assert_equal(expression.checkboxes[2].variable.value.name, 'o')
def test_option_radios():
'Matches checkbox list'
input_file = ("() Männlich (m)\n"
"() Weiblich (w)\n"
"() Andere (o)\n")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.radio_buttons()
assert_equal(expression.radios[0].variable.label.text, 'Männlich')
assert_equal(expression.radios[0].variable.value.name, 'm')
assert_equal(expression.radios[1].variable.label.text, 'Weiblich')
assert_equal(expression.radios[1].variable.value.name, 'w')
assert_equal(expression.radios[2].variable.label.text, 'Andere')
assert_equal(expression.radios[2].variable.value.name, 'o')
def test_option_radio():
'Matches a radio button: '
input_file = ("( ) Männlich (m)")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.radio()
assert_equal(expression.variable.label.text, 'Männlich')
assert_equal(expression.variable.value.name, 'm')
def test_option_checkbox():
'Matches a checkbox: '
input_file = ("[ ] Männlich (m)")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.checkbox()
assert_equal(expression.variable.label.text, 'Männlich')
assert_equal(expression.variable.value.name, 'm')
def test_input_field():
'Matches an input field: [__test__] \ [__ __]'
input_file = ("[__ placeholder __]\n"
"[_________________]")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
expression = parser.input_field()
assert_equal(expression.placeholder.placeholder, 'placeholder')
assert_equal(expression.placeholder.length, 2 + len('placeholder') + 2)
assert_equal(expression.lines, 2)
def test_field():
'Matches a field placeholder: [__test__]'
input_file = "[__ placeholder __]"
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.field()
assert_equal(expression.placeholder, 'placeholder')
assert_equal(expression.length, 2 + len('placeholder') + 2)
def test_input_field_with_range():
'Matches a field with range: [__test__] (1 - 5)'
input_file = "[__ placeholder __] (1 - 999)"
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
expression = parser.input_field()
assert_equal(expression.type, 'number')
assert_equal(expression.range.min, 1)
assert_equal(expression.range.max, 999)
def test_matrix():
'Matches a matrix (Scale and List of Items)'
input_file = """{ Sau Geil (1) -- Geil (2) -- Nicht So Geil (3) }
[
Mannheim (MA)
Heidelberg (HD)
]
"""
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
expression = parser.options()
assert_equal(expression.scale.steps[0].label.text, 'Sau Geil')
assert_equal(expression.list.elements[0].variable.label.text, 'Mannheim')
def test_scale():
'Matches a scale: { Sehr gut (1) -- Gut (2) }'
input_file = ("{ Sehr gut (1) -- Gut (2) }")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.scale()
assert_equal(expression.steps[0].label.text, 'Sehr gut')
assert_equal(expression.steps[0].value.value, 1)
assert_equal(expression.steps[1].label.text, 'Gut')
assert_equal(expression.steps[1].value.value, 2)
def test_list():
'Matches a list of items: [ A (a) \ B (b) ...]'
input_file = ("[\n"
"Sehr gut (sg)\n"
"Gut (g)\n"
"]\n")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.list()
assert_equal(expression.elements[0].variable.label.text, 'Sehr gut')
assert_equal(expression.elements[0].variable.value.name, 'sg')
assert_equal(expression.elements[1].variable.label.text, 'Gut')
assert_equal(expression.elements[1].variable.value.name, 'g')
def test_element():
'Matches an element in a list'
input_file = "Sehr gut (sg)\n"
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.element()
assert_equal(expression.variable.label.text, 'Sehr gut')
assert_equal(expression.variable.value.name, 'sg')
def test_string_variable():
'Matches a string variable: Hauptschule (hs)'
input_file = ("Hauptschule (hs)")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.string_variable()
assert_equal(expression.label.text, 'Hauptschule')
assert_equal(expression.value.name, 'hs')
def test_string_variable():
'Matches a number variable: Verhandlungssicher (5)'
input_file = ("Verhandlungssicher (5)")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.number_variable()
assert_equal(expression.label.text, 'Verhandlungssicher')
assert_equal(expression.value.value, 5)
def test_numbering():
'Matches a numbering: 123.'
input_file = ("123.")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.numbering()
assert_equal(expression.value, 123)
def test_identifier():
'Matches an identifier: (m)'
input_file = ("(m)")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.identifier()
assert_equal(expression.name, 'm')
def test_range():
'Matches a range: (1 - 2)'
input_file = ("(1 - 2)")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.range()
assert_equal(expression.min, 1)
assert_equal(expression.max, 2)
def test_identifier_with_type_constraint():
'Matches an identifier with type constraint: (m)'
input_file = ("(m)")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 1)
expression = parser.identifier()
assert_equal(expression.name, 'm')
def test_identifier_with_underscores():
'Matches an identifier with underscores: (years_active)'
input_file = ("(years_active)")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
expression = parser.identifier()
assert_equal(expression.name, 'years_active')
def test_value():
'Matches a value: (12)'
input_file = ("(12)")
lexer = AntwortLexer(input_file)
parser = AntwortParser(lexer, 2)
expression = parser.number_value()
assert_equal(expression.value, 12)
| {
"content_hash": "cf2851918257685193c6c0fda2b3e6ee",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 81,
"avg_line_length": 32.224489795918366,
"alnum_prop": 0.6401994933502216,
"repo_name": "cessor/antwort",
"id": "2784ad760cd5d749099dd5dc1ccad528e74309ed",
"size": "12667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/test_antwortparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42726"
}
],
"symlink_target": ""
} |
import datetime
from django.utils import six
from django.utils.six.moves.urllib import parse
from django.views.generic import ListView, DetailView
from django.shortcuts import redirect, get_object_or_404
from django.contrib.syndication.views import Feed
from django.core import urlresolvers
from widgy.utils import build_url
from widgy.templatetags.widgy_tags import render_root
from widgy.models import Node
from widgy.contrib.form_builder.views import HandleFormMixin
from .models import Blog, BlogLayout, Tag
from .site import site
from .utils import date_list_to_archive_list
class RedirectGetHandleFormMixin(HandleFormMixin):
"""
A HandleFormMixin that redirects away the `?from=...` URLs for get
requests.
"""
def get(self, request, *args, **kwargs):
from_ = request.GET.get('from')
if from_:
return redirect(from_)
else:
return super(RedirectGetHandleFormMixin, self).get(request, *args, **kwargs)
class BlogRenderer(object):
def __init__(self, blog, url_kwargs, request):
self.blog = blog
self.url_kwargs = url_kwargs
root_node_pk = url_kwargs.get('root_node_pk')
if root_node_pk:
self.root_node = get_object_or_404(Node, pk=root_node_pk)
else:
self.root_node = blog._meta.get_field('content').get_render_node(blog, {'request': request})
if not self.root_node:
self.root_node = (blog.content.head and blog.content.head.root_node) or blog.content.working_copy
self.content = self.root_node.content
@property
def title(self):
return self.content.title
def get_absolute_url(self):
return self.blog.get_absolute_url_with_layout(self.content)
@property
def slug(self):
return self.content.slug
@property
def date(self):
return self.blog.date
def render(self):
return render_root({'root_node_override': self.root_node}, self.blog, 'content')
@property
def has_incorrect_slug(self):
try:
return self.slug != self.url_kwargs['slug']
except KeyError:
# preview/form submit, slug doesn't matter
return False
class BlogQuerysetMixin(object):
model = BlogLayout
def get_queryset(self):
return self.get_published_blogs()
def get_published_blogs(self):
return self.model.objects.select_related('image').published().order_by('-date')
def get_archive_years(self, qs):
return date_list_to_archive_list(qs.values_list('date', flat=True).order_by('-date'))
def get_context_data(self, **kwargs):
data = super(BlogQuerysetMixin, self).get_context_data(**kwargs)
data['blog_archive'] = self.get_archive_years(self.get_published_blogs())
return data
class BlogListView(BlogQuerysetMixin, ListView):
context_object_name = 'blog_list'
template_name = 'widgy/widgy_blog/blog_list.html'
paginate_by = 10
def get_canonical_url(self):
"""Determine whether to send a canonical url for the blog list.
A blog list view without any query should be the same as a blog
list view with query `page=1`. The `page=1` view should have a canonical
link to the simpler URL.
"""
if self.request.GET.get('page') == '1':
querystring = self.request.GET.copy()
del querystring['page']
return parse.urlunsplit(('', '', self.request.path, querystring.urlencode(), ''))
else:
return None
def get_neighbor_pages(self):
querystring = self.request.GET.copy()
paginator = self.get_paginator(self.get_queryset(), self.paginate_by)
page = self.get_current_page(paginator)
prev_page = None
next_page = None
if page.has_previous():
prev_page = page.previous_page_number()
if page.has_next():
next_page = page.next_page_number()
return {'prev': prev_page, 'next': next_page}
def get_neighbor_rel_links(self):
neighbor_pages = self.get_neighbor_pages()
querystring = self.request.GET.copy()
prev_link = None
next_link = None
if neighbor_pages['prev']:
if neighbor_pages['prev'] == 1:
# Link to the canonical url
del querystring['page']
else:
querystring['page'] = neighbor_pages['prev']
prev_link = build_url(self.request.path, querystring)
if neighbor_pages['next']:
querystring['page'] = neighbor_pages['next']
next_link = build_url(self.request.path, querystring)
return {'prev_link': prev_link, 'next_link': next_link}
def get_current_page(self, paginator):
"""Return the current page number.
Taken from paginate_queryset in ListView."""
page = self.kwargs.get(self.page_kwarg) or self.request.GET.get(self.page_kwarg) or 1
try:
page_num = int(page)
except ValueError:
if page == 'last':
page_num = paginator.num_pages
else:
raise Http404("Page is not 'last', nor can it be converted to an int.")
return paginator.page(page_num)
def get_context_data(self, **kwargs):
kwargs = super(BlogListView, self).get_context_data(**kwargs)
kwargs['tags'] = Tag.objects.all()
kwargs['canonical_url'] = self.get_canonical_url()
kwargs.update(self.get_neighbor_rel_links())
return kwargs
class TagView(BlogListView):
def get_queryset(self):
self.tag = get_object_or_404(Tag, slug=self.kwargs['tag'])
return super(TagView, self).get_queryset().filter(tags=self.tag)
def get_context_data(self, **kwargs):
kwargs = super(TagView, self).get_context_data(**kwargs)
kwargs['current_tag'] = self.tag
return kwargs
class BlogYearArchiveView(BlogListView):
def get_queryset(self):
qs = super(BlogYearArchiveView, self).get_queryset()
return qs.filter(date__year=self.kwargs['year'])
def get_context_data(self, **kwargs):
kwargs = super(BlogYearArchiveView, self).get_context_data(**kwargs)
year = int(self.kwargs['year'])
kwargs['archive_date'] = datetime.date(year, 1, 1)
return kwargs
class BlogMonthArchiveView(BlogListView):
def get_queryset(self):
qs = super(BlogMonthArchiveView, self).get_queryset()
return qs.filter(
date__year=self.kwargs['year'],
date__month=self.kwargs['month']
)
def get_context_data(self, **kwargs):
kwargs = super(BlogMonthArchiveView, self).get_context_data(**kwargs)
year = int(self.kwargs['year'])
month = int(self.kwargs['month'])
kwargs['archive_date'] = datetime.date(year, month, 1)
return kwargs
class BlogDetailView(BlogQuerysetMixin, RedirectGetHandleFormMixin, DetailView):
context_object_name = 'blog'
template_name = 'widgy/widgy_blog/blog_detail.html'
site = site
owner_class = Blog
def get_object(self):
try:
# this method is called more than once
return self.object
except AttributeError:
pass
self.root_node_pk = self.kwargs.get('root_node_pk')
qs = self.owner_class.objects.filter(
pk=self.kwargs['pk']
).select_related(
'content__head__root_node',
)
if self.root_node_pk:
self.site.authorize_view(self.request, self)
else:
qs = qs.filter(
content__commits__root_node__content_id__in=self.get_queryset()
)
blog = get_object_or_404(qs)
return BlogRenderer(blog, self.kwargs, self.request)
def dispatch(self, request, *args, **kwargs):
self.object = blog = self.get_object()
if blog.has_incorrect_slug:
return redirect(blog)
return super(BlogDetailView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs = super(BlogDetailView, self).get_context_data(**kwargs)
kwargs['object'] = self.object
if hasattr(self, 'form_node'):
self.object.root_node = self.form_node.get_root()
# BlogRenderer calculates and fetches this
kwargs['root_node_override'] = self.object.root_node
return kwargs
list = BlogListView.as_view()
year_archive = BlogYearArchiveView.as_view()
month_archive = BlogMonthArchiveView.as_view()
detail = BlogDetailView.as_view()
tag = TagView.as_view()
class RssFeed(Feed):
title = "Blog Feed"
link = urlresolvers.reverse_lazy('blog_list')
model = BlogLayout
def get_object(self, request, tag=None):
if tag is not None:
return get_object_or_404(Tag, slug=tag)
else:
return None
def items(self, obj=None):
qs = self.model.objects.published()
if obj is not None:
qs = qs.filter(tags=obj)
return qs
def item_title(self, item):
return item.title
def item_pubdate(self, item):
# Instead of this, BlogLayout.date should be a datetime.
return datetime.datetime.combine(item.date, datetime.time())
def item_author_name(self, item):
return item.author.get_full_name()
def item_description(self, item):
return item.description
feed = RssFeed()
| {
"content_hash": "5414fe440b26f0413cd0b39c87522cad",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 113,
"avg_line_length": 32.33673469387755,
"alnum_prop": 0.6229094351530451,
"repo_name": "fusionbox/django-widgy-blog",
"id": "0d09911dbbf047792c36af8565bb6256551f058f",
"size": "9507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "widgy_blog/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "1903"
},
{
"name": "Python",
"bytes": "67436"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.urls import reverse
from django.db import models as django_db_models
from django.shortcuts import get_object_or_404
from django.utils.translation import get_language
from rest_framework import serializers as rest_serializers
from rest_framework import serializers as rest_fields
from .models import Theme, RecordSource, TargetPortal, FileType, Attachment, Label
class TranslatedModelSerializer(rest_serializers.ModelSerializer):
def get_field(self, model_field):
kwargs = {}
if issubclass(model_field.__class__,
(django_db_models.CharField,
django_db_models.TextField)):
if model_field.null:
kwargs['allow_none'] = True
kwargs['max_length'] = getattr(model_field, 'max_length')
return rest_fields.CharField(**kwargs)
return super().get_field(model_field)
class PictogramSerializerMixin(rest_serializers.ModelSerializer):
pictogram = rest_serializers.ReadOnlyField(source='get_pictogram_url')
class PicturesSerializerMixin(rest_serializers.ModelSerializer):
thumbnail = rest_serializers.ReadOnlyField(source='serializable_thumbnail')
pictures = rest_serializers.ReadOnlyField(source='serializable_pictures')
videos = rest_serializers.ReadOnlyField(source='serializable_videos')
files = rest_serializers.ReadOnlyField(source='serializable_files')
class Meta:
fields = ('thumbnail', 'pictures', 'videos', 'files')
class BasePublishableSerializerMixin(rest_serializers.ModelSerializer):
class Meta:
fields = ('published', 'published_status', 'publication_date')
class PublishableSerializerMixin(BasePublishableSerializerMixin):
printable = rest_serializers.SerializerMethodField('get_printable_url')
filelist_url = rest_serializers.SerializerMethodField()
def get_printable_url(self, obj):
if settings.ONLY_EXTERNAL_PUBLIC_PDF:
file_type = get_object_or_404(FileType, type="Topoguide")
if not Attachment.objects.attachments_for_object_only_type(obj, file_type).exists():
return None
appname = obj._meta.app_label
modelname = obj._meta.model_name
return reverse('%s:%s_printable' % (appname, modelname),
kwargs={'lang': get_language(), 'pk': obj.pk, 'slug': obj.slug})
def get_filelist_url(self, obj):
appname = obj._meta.app_label
modelname = obj._meta.model_name
return reverse('get_attachments', kwargs={'app_label': appname,
'model_name': modelname,
'pk': obj.pk})
class Meta:
fields = ('name', 'slug', 'map_image_url', 'filelist_url', 'printable') + \
BasePublishableSerializerMixin.Meta.fields
class ThemeSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
class Meta:
model = Theme
fields = ('id', 'pictogram', 'label')
class RecordSourceSerializer(PictogramSerializerMixin, rest_serializers.ModelSerializer):
class Meta:
model = RecordSource
fields = ('name', 'website', 'pictogram')
class TargetPortalSerializer(rest_serializers.ModelSerializer):
class Meta:
model = TargetPortal
fields = ('name', 'website')
class LabelSerializer(PictogramSerializerMixin, TranslatedModelSerializer):
filter_rando = rest_serializers.ReadOnlyField(source='filter')
class Meta:
model = Label
fields = ('id', 'pictogram', 'name', 'advice', 'filter_rando')
| {
"content_hash": "3bc9eacc707d391b3e7dee5434a6b30c",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 96,
"avg_line_length": 38.680851063829785,
"alnum_prop": 0.672992299229923,
"repo_name": "makinacorpus/Geotrek",
"id": "dd76f5f9637d2f7feab97438180fbd538727ab22",
"size": "3636",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geotrek/common/serializers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "30638"
},
{
"name": "HTML",
"bytes": "141008"
},
{
"name": "JavaScript",
"bytes": "184508"
},
{
"name": "Makefile",
"bytes": "4170"
},
{
"name": "PLpgSQL",
"bytes": "85546"
},
{
"name": "Python",
"bytes": "2768434"
},
{
"name": "Shell",
"bytes": "18090"
}
],
"symlink_target": ""
} |
import sys
sys.path.insert(0,'..')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'dev' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'cghr2a5@x&ftmpsd-rjwzd3k3k08zeng1^+o=)ee)!v_--bwj0'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'synagg',
)
| {
"content_hash": "846194aa94f79af93f44271336d7ec2e",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 108,
"avg_line_length": 34.74074074074074,
"alnum_prop": 0.7043354655294953,
"repo_name": "justquick/django-synagg",
"id": "1bb3c94ebd337efab6027e9159c0a5d0f76aa9b7",
"size": "2853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8850"
}
],
"symlink_target": ""
} |
import unittest
from mock import Mock, patch
from mangrove.errors.MangroveException import DataObjectNotFound
from mangrove.form_model.form_submission import FormSubmission
from mangrove.form_model.field import HierarchyField, GeoCodeField, ShortCodeField
from mangrove.form_model.form_model import LOCATION_TYPE_FIELD_NAME
from mangrove.datastore.database import DatabaseManager
from mangrove.datastore.entity import Entity
from mangrove.form_model.field import TextField
from mangrove.form_model.form_model import FormModel
from mangrove.utils.test_utils.dummy_location_tree import DummyLocationTree
from mangrove.utils.types import is_empty
from mangrove.transport.work_flow import ActivityReportWorkFlow, RegistrationWorkFlow
from mangrove.transport.contract.response import create_response_from_form_submission
class TestRegistrationWorkFlow(unittest.TestCase):
def setUp(self):
self.dbm = Mock(spec=DatabaseManager)
self.form_model_mock = Mock(spec=FormModel)
self.form_model_mock.get_field_by_name = self._location_field
self.get_entity_count = patch('mangrove.transport.work_flow.get_entity_count_for_type', new=dummy_get_entity_count_for_type,spec=True)
self.get_by_short_code_include_voided = patch('mangrove.transport.work_flow.get_by_short_code_include_voided', new=dummy_get_by_short_code_include_voided)
self.get_by_short_code_include_voided.start()
self.get_entity_count.start()
def tearDown(self):
self.get_entity_count.stop()
def test_should_generate_default_code_if_short_code_is_empty(self):
registration_work_flow = RegistrationWorkFlow(self.dbm, self.form_model_mock, DummyLocationTree())
self.form_model_mock.get_short_code = Mock(return_value=None)
self.form_model_mock.entity_type=['clinic']
self.form_model_mock.entity_questions = [ShortCodeField(name="entity question", code="s", label="bar")]
values = registration_work_flow.process({'t': 'clinic', 'l':'pune'})
self.assertEqual({'s': 'cli1', 't': 'clinic', 'l': ['pune', 'mh', 'india']}, values)
def test_should_set_location_data(self):
self._generate_short_code_if_empty_patch = patch('mangrove.transport.work_flow.RegistrationWorkFlow._generate_short_code_if_empty')
self._generate_short_code_if_empty_mock = self._generate_short_code_if_empty_patch.start()
self.process_submission_patch = patch('mangrove.form_model.location.Location.process_submission')
self.process_submission_mock = self.process_submission_patch.start()
values = ['a','b']
RegistrationWorkFlow(self.dbm, self.form_model_mock, DummyLocationTree()).process(values)
self.assertEquals(1, self.process_submission_mock.call_count)
self._generate_short_code_if_empty_patch.stop()
self.process_submission_patch.stop()
def _location_field(self,*args,**kwargs):
name = kwargs.get('name')
if name is LOCATION_TYPE_FIELD_NAME:
location_field = Mock(spec=HierarchyField)
location_field.code='l'
return location_field
geo_code_field=Mock(spec=GeoCodeField)
geo_code_field.code='g'
return geo_code_field
def dummy_get_by_short_code_include_voided(dbm,short_code,entity_type):
raise DataObjectNotFound("Entity","Not found",short_code)
def dummy_get_entity_count_for_type(dbm, entity_type):
return 0
def dummy_get_location_hierarchy(foo):
return [u'arantany']
class TestResponse(unittest.TestCase):
def test_should_initialize_response(self):
response = create_response_from_form_submission(reporters=None, form_submission=None)
self.assertFalse(response.success)
self.assertTrue(is_empty(response.errors))
self.assertTrue(is_empty(response.reporters))
def test_should_initialize_response_with_reporters(self):
reporters=[1]
response = create_response_from_form_submission(reporters=reporters, form_submission=None)
self.assertEquals(reporters,response.reporters)
def test_should_initialize_response_from_form_submission(self):
form_submission_mock = Mock(spec=FormSubmission)
form_submission_mock.saved=True
form_submission_mock.errors=[]
expected_data_record_id = 123
form_submission_mock.data_record_id= expected_data_record_id
expected_short_code = 456
form_submission_mock.short_code= expected_short_code
expected_cleanned_data = {'a': 1}
form_submission_mock.cleaned_data= expected_cleanned_data
form_submission_mock.is_registration=False
expected_entity_type = 'entity_type'
form_submission_mock.entity_type= expected_entity_type
expected_form_code = '1'
form_model_mock = Mock()
form_model_mock.form_code = expected_form_code
form_submission_mock.form_model = form_model_mock
response = create_response_from_form_submission(reporters=None, form_submission=form_submission_mock)
self.assertTrue(response.success)
self.assertTrue(is_empty(response.errors))
self.assertTrue(is_empty(response.reporters))
self.assertTrue(response.success)
self.assertEquals(expected_data_record_id,response.datarecord_id)
self.assertEquals(expected_short_code,response.short_code)
self.assertEquals(expected_cleanned_data,response.processed_data)
self.assertFalse(response.is_registration)
self.assertEquals(expected_entity_type,response.entity_type)
self.assertEquals(expected_entity_type,response.entity_type)
self.assertEquals(expected_form_code,response.form_code)
| {
"content_hash": "d5b55826f7aa9fcb809b21a1f5d1dc6c",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 162,
"avg_line_length": 48.666666666666664,
"alnum_prop": 0.7207586933614331,
"repo_name": "ICT4H/dcs-mangrove",
"id": "d9eb14d83d0e1b746e23adb86f0c44751c223e19",
"size": "5694",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "mangrove/transport/tests/test_facade.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "700265"
}
],
"symlink_target": ""
} |
from pbr import version
version_info = version.VersionInfo('mistral')
version_string = version_info.version_string
| {
"content_hash": "3497ae5720b5faeee2be9395101ff128",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 29,
"alnum_prop": 0.8017241379310345,
"repo_name": "dmitryilyin/mistral",
"id": "f3abddf6d1f51b17f6d508e5adcf618dfbb4e3f7",
"size": "750",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mistral/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
MIT License
Copyright (c) 2016 Jesse Hogan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from diff_match_patch import diff_match_patch
from entities import entity
from pdb import set_trace; B=set_trace
# TODO Write test
class diff(entity):
def __init__(self, data1, data2=None):
# If data2 is None, data1 is assumed to be a text-based version of the
# patch
self._data1 = data1
self._data2 = data2
self._ps = None
self._dmp = None
@property
def _diff_match_patch(self):
if not self._dmp:
self._dmp = diff_match_patch()
return self._dmp;
@property
def _patches(self):
if self._ps == None:
dmp = self._diff_match_patch
if self._data2:
diffs = dmp.diff_main(self._data1, self._data2)
dmp.diff_cleanupSemantic(diffs)
self._ps = dmp.patch_make(diffs)
else:
self._ps = dmp.patch_fromText(self._data1)
return self._ps
def applyto(self, data):
dmp = self._diff_match_patch
return dmp.patch_apply(self._patches, data)[0]
def __str__(self):
dmp = self._diff_match_patch
return dmp.patch_toText(self._patches)
| {
"content_hash": "bf125702b96b5b1c7db07db3395f90c1",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 78,
"avg_line_length": 35.63492063492063,
"alnum_prop": 0.6770601336302895,
"repo_name": "jhogan/epiphany-py",
"id": "5984ca994d9556edd5ca6dd232ad11eedc3c8510",
"size": "2280",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "diff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182769"
}
],
"symlink_target": ""
} |
import re
from datetime import datetime
from typing import Any, Dict, List
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.actions import check_send_stream_message
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.models import Client, UserProfile
SUBJECT_TEMPLATE = "{service_url}"
def send_message_for_event(event: Dict[str, Any], user_profile: UserProfile,
client: Client, stream: str) -> None:
event_type = get_event_type(event)
subject = SUBJECT_TEMPLATE.format(service_url=event['check']['url'])
body = EVENT_TYPE_BODY_MAPPER[event_type](event)
check_send_stream_message(user_profile, client, stream, subject, body)
def get_body_for_up_event(event: Dict[str, Any]) -> str:
body = "Service is `up`"
event_downtime = event['downtime']
if event_downtime['started_at']:
body = "{} again".format(body)
string_date = get_time_string_based_on_duration(event_downtime['duration'])
if string_date:
body = "{} after {}".format(body, string_date)
return "{}.".format(body)
def get_time_string_based_on_duration(duration: int) -> str:
days, reminder = divmod(duration, 86400)
hours, reminder = divmod(reminder, 3600)
minutes, seconds = divmod(reminder, 60)
string_date = ''
string_date += add_time_part_to_string_date_if_needed(days, 'day')
string_date += add_time_part_to_string_date_if_needed(hours, 'hour')
string_date += add_time_part_to_string_date_if_needed(minutes, 'minute')
string_date += add_time_part_to_string_date_if_needed(seconds, 'second')
return string_date.rstrip()
def add_time_part_to_string_date_if_needed(value: int, text_name: str) -> str:
if value == 1:
return "1 {} ".format(text_name)
if value > 1:
return "{} {}s ".format(value, text_name)
return ''
def get_body_for_down_event(event: Dict[str, Any]) -> str:
return "Service is `down`. It returned a {} error at {}.".format(
event['downtime']['error'],
event['downtime']['started_at'].replace('T', ' ').replace('Z', ' UTC'))
@api_key_only_webhook_view('Updown')
@has_request_variables
def api_updown_webhook(request, user_profile,
payload=REQ(argument_type='body'),
stream=REQ(default='updown')):
# type: (HttpRequest, UserProfile, List[Dict[str, Any]], str) -> HttpResponse
for event in payload:
send_message_for_event(event, user_profile, request.client, stream)
return json_success()
EVENT_TYPE_BODY_MAPPER = {
'up': get_body_for_up_event,
'down': get_body_for_down_event
}
def get_event_type(event: Dict[str, Any]) -> str:
event_type_match = re.match('check.(.*)', event['event'])
if event_type_match:
event_type = event_type_match.group(1)
if event_type in EVENT_TYPE_BODY_MAPPER:
return event_type
raise JsonableError(_('Unsupported Updown event type: %s') % (event['event'],))
| {
"content_hash": "31b11961cdc76046dd8e2d6a8a4da931",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 83,
"avg_line_length": 40.620253164556964,
"alnum_prop": 0.6625116858834528,
"repo_name": "mahim97/zulip",
"id": "a1b89221528e8e224134303b7c4bc1a3683cff73",
"size": "3247",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zerver/webhooks/updown/view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "299188"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "542463"
},
{
"name": "JavaScript",
"bytes": "1605569"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3510480"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
} |
import os
import shutil
from b3.project_filesystems import ProjectFilesystemBase
class DefaultProjectFilesystem(ProjectFilesystemBase):
"""This class incapsulates file system utils relative to the project root."""
def __init__(self, config):
ProjectFilesystemBase.__init__(self, config)
def read_file(self, path, must_exist=True):
"""Reads the file and returns its content. Raises IOError if file doesn't
exist and must_exist is True, otherwise returns None.
"""
if not self.exists(path) or not self.is_file(path):
if must_exist:
raise IOError()
else:
return None
with open(path, "r") as f:
return f.read()
def write_to_file(self, path, data):
with open(path, "w") as f:
f.write(data)
def is_file(self, path):
return os.path.isfile(path)
def is_dir(self, path):
"""Returns True if path is an existing directory."""
return os.path.isdir(path)
def remove_dirs(self, path):
if self.exists(path):
shutil.rmtree(path)
def make_dirs(self, path, mode=0777):
os.makedirs(path, mode)
def exists(self, path):
return os.path.exists(self.resolve(path))
def resolve(self, path):
if os.path.isabs(path) and os.path.exists(path):
return path
return os.path.abspath(os.path.join(self._root_dir, path))
def get_bin_dir(self):
return self._bin_dir
def get_root_dir(self):
return self._root_dir
| {
"content_hash": "38d947cbf74516c1c2aa61bebd6fc8b9",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 26.072727272727274,
"alnum_prop": 0.6659693165969317,
"repo_name": "robionica/b3",
"id": "de319b0e0ffe3924329c7831824534e3de6abd97",
"size": "2057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/b3/project_filesystems/default_project_filesystem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "143712"
},
{
"name": "Shell",
"bytes": "274"
}
],
"symlink_target": ""
} |
import sys
sys.stderr.buffer.write(b"\xe2" * 2048)
sys.stderr.buffer.write("foo bar baz".encode("utf8"))
sys.stderr.flush()
sys.exit(1)
| {
"content_hash": "50e9bc6e7db79aec0ea23c17ff660f52",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 53,
"avg_line_length": 19.714285714285715,
"alnum_prop": 0.7101449275362319,
"repo_name": "JoelMarcey/buck",
"id": "2a53103e6e996b8a9c0534b78d13f1c02926fd5b",
"size": "162",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "test/com/facebook/buck/testutil/endtoend/testdata/cli/bad_utf8/out.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "579"
},
{
"name": "Batchfile",
"bytes": "2093"
},
{
"name": "C",
"bytes": "255521"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "10992"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Go",
"bytes": "16819"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "6115"
},
{
"name": "Haskell",
"bytes": "895"
},
{
"name": "IDL",
"bytes": "385"
},
{
"name": "Java",
"bytes": "19430296"
},
{
"name": "JavaScript",
"bytes": "932672"
},
{
"name": "Kotlin",
"bytes": "2079"
},
{
"name": "Lex",
"bytes": "2731"
},
{
"name": "Makefile",
"bytes": "1816"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "4384"
},
{
"name": "Objective-C",
"bytes": "138150"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "244"
},
{
"name": "Prolog",
"bytes": "858"
},
{
"name": "Python",
"bytes": "1786899"
},
{
"name": "Roff",
"bytes": "1109"
},
{
"name": "Rust",
"bytes": "3618"
},
{
"name": "Scala",
"bytes": "4906"
},
{
"name": "Shell",
"bytes": "49876"
},
{
"name": "Smalltalk",
"bytes": "3355"
},
{
"name": "Standard ML",
"bytes": "15"
},
{
"name": "Swift",
"bytes": "6897"
},
{
"name": "Thrift",
"bytes": "26256"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
} |
from instr.base import TemperatureController
class Sci9700(TemperatureController):
""" Scientific Instruments Model 9700"""
def __init__(self, rsrc=None, timeout_sec=5, reset=True):
idn = 'Scientific Instruments,9700'
self._channel = 'A'
super().__init__(rsrc, idn, timeout_sec, reset)
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
if value not in ['A', 'B']:
raise ValueError
self._channel = value
def read_heater(self):
if self._debug_mode:
return 1234.56789
else:
tmp = self.q('HTR?').split()
return float(tmp[1])
def read_system_status(self):
"""
Set point temp, heater, controlmode, heater alarm status, control type,
zone number
"""
if self._debug_mode:
return 'STA 020.000,00.00,1,0,1,1,2'
else:
tmp = self.q('STA?')
return tmp
def read_temp(self):
if self._debug_mode:
return 1234.56789
else:
tmp = self.q('T{}?'.format(self.channel)).split()
return float(tmp[1])
def set_temp(self, temp):
"""
:type temp: float
"""
if self._debug_mode:
pass
else:
self.w('SET {}'.format(temp))
if __name__ == '__main__':
import visa
rm = visa.ResourceManager()
sci_rsrc = rm.open_resource('GPIB0::1::INSTR')
sci = Sci9700(sci_rsrc)
sci.channel = 'A'
tmp = sci.read_temp()
tmp = sci.read_system_status()
sci.set_temp(200)
| {
"content_hash": "33c6e0127dd9e04073121db6c0577f9f",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 79,
"avg_line_length": 25.09090909090909,
"alnum_prop": 0.5320048309178744,
"repo_name": "wataash/Instr",
"id": "4b63e9df5820e7888a7c787896fb86107b387d5b",
"size": "1658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instr/sci9700.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "43577"
},
{
"name": "Python",
"bytes": "65761"
}
],
"symlink_target": ""
} |
from hwt.hdl.constants import Time
from hwt.hdl.types.bits import Bits
from hwt.interfaces.intf_map import IntfMap
from hwt.interfaces.std import BramPort_withoutClk, VldSynced, RegCntrl, \
VectSignal, Signal
from hwt.interfaces.utils import addClkRstn, propagateClkRstn
from hwt.simulator.simTestCase import SimTestCase
from hwt.synthesizer.param import Param
from hwt.synthesizer.unit import Unit
from hwtLib.abstract.discoverAddressSpace import AddressSpaceProbe
from hwtLib.amba.axi4Lite import Axi4Lite
from hwtLib.amba.axiLite_comp.endpoint import AxiLiteEndpoint
from hwtLib.amba.axiLite_comp.endpoint_test import addrGetter
from hwtLib.amba.axiLite_comp.sim.memSpaceMaster import AxiLiteMemSpaceMaster
from hwtLib.amba.constants import RESP_OKAY
class Loop(Unit):
"""
Simple loop for any interface
"""
def __init__(self, interfaceCls):
self.interfaceCls = interfaceCls
super(Loop, self).__init__()
def _config(self):
self.interfaceCls._config(self)
def _declr(self):
with self._paramsShared():
self.din = self.interfaceCls()
self.dout = self.interfaceCls()._m()
def _impl(self):
self.dout(self.din)
class SigLoop(Unit):
def _config(self):
self.DATA_WIDTH = Param(32)
def _declr(self):
self.din = VectSignal(self.DATA_WIDTH)
self.dout = VectSignal(self.DATA_WIDTH)._m()
def _impl(self):
self.dout(self.din)
class TestUnittWithChilds(Unit):
"""
Container of AxiLiteEndpoint constructed by fromInterfaceMap
"""
def _config(self):
self.ADDR_WIDTH = Param(32)
self.DATA_WIDTH = Param(32)
def _declr(self):
addClkRstn(self)
with self._paramsShared():
self.bus = Axi4Lite()
self.signalLoop = SigLoop()
self.signalIn = VectSignal(self.DATA_WIDTH)
self.regCntrlLoop = Loop(RegCntrl)
self.regCntrlOut = RegCntrl()._m()
self.vldSyncedLoop = Loop(VldSynced)
self.vldSyncedOut = VldSynced()._m()
with self._paramsShared(exclude=({"ADDR_WIDTH"}, set())):
self.bramLoop = Loop(BramPort_withoutClk)
self.bramLoop.ADDR_WIDTH = 2
self.bramOut = BramPort_withoutClk()._m()
self.bramOut.ADDR_WIDTH = 2
def _impl(self):
self.signalLoop.din(self.signalIn)
self.regCntrlOut(self.regCntrlLoop.dout)
self.vldSyncedOut(self.vldSyncedLoop.dout)
self.bramOut(self.bramLoop.dout)
def configEp(ep):
ep._updateParamsFrom(self)
rltSig10 = self._sig("sig", Bits(self.DATA_WIDTH), def_val=10)
interfaceMap = IntfMap([
(rltSig10, "rltSig10"),
(self.signalLoop.dout, "signal"),
(self.regCntrlLoop.din, "regCntrl"),
(self.vldSyncedLoop.din, "vldSynced"),
(self.bramLoop.din, "bram"),
(Bits(self.DATA_WIDTH), None),
])
axiLiteConv = AxiLiteEndpoint.fromInterfaceMap(interfaceMap)
axiLiteConv._updateParamsFrom(self)
self.conv = axiLiteConv
axiLiteConv.connectByInterfaceMap(interfaceMap)
axiLiteConv.bus(self.bus)
axiLiteConv.decoded.vldSynced.din(None)
propagateClkRstn(self)
TestUnittWithChilds_add_space_str = """\
struct {
<Bits, 32bits> rltSig10 // start:0x0(bit) 0x0(byte)
<Bits, 32bits> signal // start:0x20(bit) 0x4(byte)
<Bits, 32bits> regCntrl // start:0x40(bit) 0x8(byte)
<Bits, 32bits> vldSynced // start:0x60(bit) 0xc(byte)
<Bits, 32bits>[4] bram // start:0x80(bit) 0x10(byte)
//<Bits, 32bits> empty space // start:0x100(bit) 0x20(byte)
}"""
class TestUnittWithArr(Unit):
"""
Container of AxiLiteEndpoint constructed by fromInterfaceMap
"""
def _config(self):
self.ADDR_WIDTH = Param(32)
self.DATA_WIDTH = Param(32)
def _declr(self):
addClkRstn(self)
with self._paramsShared():
self.bus = Axi4Lite()
self.regCntrlLoop0 = Loop(RegCntrl)
self.regCntrlOut0 = RegCntrl()._m()
self.regCntrlLoop1 = Loop(RegCntrl)
self.regCntrlOut1 = RegCntrl()._m()
self.regCntrlLoop2 = Loop(RegCntrl)
self.regCntrlOut2 = RegCntrl()._m()
def _impl(self):
self.regCntrlOut0(self.regCntrlLoop0.dout)
self.regCntrlOut1(self.regCntrlLoop1.dout)
self.regCntrlOut2(self.regCntrlLoop2.dout)
def configEp(ep):
ep._updateParamsFrom(self)
interfaceMap = IntfMap([
([self.regCntrlLoop0.din,
self.regCntrlLoop1.din,
self.regCntrlLoop2.din,
], "regCntrl"),
])
axiLiteConv = AxiLiteEndpoint.fromInterfaceMap(interfaceMap)
axiLiteConv._updateParamsFrom(self)
self.conv = axiLiteConv
axiLiteConv.connectByInterfaceMap(interfaceMap)
axiLiteConv.bus(self.bus)
propagateClkRstn(self)
TestUnittWithArr_addr_space_str = """\
struct {
<Bits, 32bits>[3] regCntrl // start:0x0(bit) 0x0(byte)
}"""
class AxiLiteEndpoint_fromInterfaceTC(SimTestCase):
def tearDown(self):
self.rmSim()
SimTestCase.tearDown(self)
def mkRegisterMap(self, u):
self.addrProbe = AddressSpaceProbe(u.bus, addrGetter)
self.regs = AxiLiteMemSpaceMaster(u.bus, self.addrProbe.discovered)
def randomizeAll(self):
u = self.u
for intf in u._interfaces:
if u not in (u.clk, u.rst_n, u.bus)\
and not isinstance(intf, (BramPort_withoutClk, VldSynced, Signal)):
self.randomize(intf)
self.randomize(u.bus.ar)
self.randomize(u.bus.aw)
self.randomize(u.bus.r)
self.randomize(u.bus.w)
self.randomize(u.bus.b)
def mySetUp(self, data_width=32):
u = self.u = TestUnittWithChilds()
self.DATA_WIDTH = data_width
u.DATA_WIDTH = self.DATA_WIDTH
self.compileSimAndStart(self.u, onAfterToRtl=self.mkRegisterMap)
return u
def test_nop(self):
u = self.mySetUp(32)
self.randomizeAll()
self.runSim(100 * Time.ns)
self.assertEmpty(u.bus._ag.r.data)
self.assertEmpty(u.bus._ag.b.data)
self.assertEmpty(u.regCntrlOut._ag.dout)
self.assertEmpty(u.vldSyncedOut._ag.data)
self.assertEqual(u.bramOut._ag.mem, {})
def test_read(self):
u = self.mySetUp(32)
MAGIC = 100
r = self.regs
r.rltSig10.read()
u.signalIn._ag.data.append(MAGIC)
r.signal.read()
u.regCntrlOut._ag.din.extend([MAGIC + 1])
r.regCntrl.read()
r.vldSynced.read()
for i in range(4):
u.bramOut._ag.mem[i] = MAGIC + 2 + i
r.bram[i].read()
self.randomizeAll()
self.runSim(600 * Time.ns)
self.assertValSequenceEqual(
u.bus.r._ag.data, [
(10, RESP_OKAY),
(MAGIC, RESP_OKAY),
(MAGIC + 1, RESP_OKAY),
(None, RESP_OKAY), ] + [
(MAGIC + 2 + i, RESP_OKAY)
for i in range(4)
])
def test_write(self):
u = self.mySetUp(32)
MAGIC = 100
r = self.regs
r.regCntrl.write(MAGIC)
r.vldSynced.write(MAGIC + 1)
for i in range(4):
r.bram[i].write(MAGIC + 2 + i)
self.randomizeAll()
self.runSim(800 * Time.ns)
self.assertValSequenceEqual(u.regCntrlOut._ag.dout,
[MAGIC, ])
self.assertValSequenceEqual(u.vldSyncedOut._ag.data,
[MAGIC + 1, ])
self.assertValSequenceEqual(u.bus.b._ag.data, [RESP_OKAY for _ in range(6)])
def test_registerMap(self):
self.mySetUp(32)
s = self.addrProbe.discovered.__repr__(withAddr=0, expandStructs=True)
self.assertEqual(s, TestUnittWithChilds_add_space_str)
class AxiLiteEndpoint_fromInterface_arr_TC(AxiLiteEndpoint_fromInterfaceTC):
def mySetUp(self, data_width=32):
u = self.u = TestUnittWithArr()
self.DATA_WIDTH = data_width
u.DATA_WIDTH = self.DATA_WIDTH
self.compileSimAndStart(self.u, onAfterToRtl=self.mkRegisterMap)
return u
def test_nop(self):
u = self.mySetUp(32)
self.randomizeAll()
self.runSim(100 * Time.ns)
self.assertEmpty(u.bus._ag.r.data)
self.assertEmpty(u.bus._ag.b.data)
self.assertEmpty(u.regCntrlOut0._ag.dout)
self.assertEmpty(u.regCntrlOut1._ag.dout)
self.assertEmpty(u.regCntrlOut2._ag.dout)
def test_read(self):
u = self.mySetUp(32)
MAGIC = 100
r = self.regs
u.regCntrlOut0._ag.din.extend([MAGIC])
r.regCntrl[0].read()
u.regCntrlOut1._ag.din.extend([MAGIC + 1])
r.regCntrl[1].read()
u.regCntrlOut2._ag.din.extend([MAGIC + 2])
r.regCntrl[2].read()
self.randomizeAll()
self.runSim(600 * Time.ns)
self.assertValSequenceEqual(
u.bus.r._ag.data, [
(MAGIC, RESP_OKAY),
(MAGIC + 1, RESP_OKAY),
(MAGIC + 2, RESP_OKAY)
])
def test_write(self):
u = self.mySetUp(32)
MAGIC = 100
r = self.regs
for i in range(3):
r.regCntrl[i].write(MAGIC + i)
self.randomizeAll()
self.runSim(800 * Time.ns)
for i in range(3):
intf = getattr(u, f"regCntrlOut{i:d}")
self.assertValSequenceEqual(intf._ag.dout,
[MAGIC + i, ])
self.assertValSequenceEqual(
u.bus.b._ag.data,
[RESP_OKAY for _ in range(3)])
def test_registerMap(self):
self.mySetUp(32)
s = self.addrProbe.discovered.__repr__(withAddr=0, expandStructs=True)
self.assertEqual(s, TestUnittWithArr_addr_space_str)
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
# suite.addTest(AxiLiteEndpoint_fromInterface_arr_TC('test_read'))
suite.addTest(unittest.makeSuite(AxiLiteEndpoint_fromInterfaceTC))
suite.addTest(unittest.makeSuite(AxiLiteEndpoint_fromInterface_arr_TC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| {
"content_hash": "d112b5d22b17aae4117f0c3c8629eaee",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 87,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.597608799617408,
"repo_name": "Nic30/hwtLib",
"id": "fd239fdb02c942ced41565fca27c132cc02d5ee6",
"size": "10503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hwtLib/amba/axiLite_comp/endpoint_fromInterfaces_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41560"
},
{
"name": "Python",
"bytes": "2523349"
},
{
"name": "VHDL",
"bytes": "117346"
},
{
"name": "Verilog",
"bytes": "36444"
}
],
"symlink_target": ""
} |
__author__ = 'tspycher'
import mongoengine
class Aerodrome(mongoengine.Document):
code = mongoengine.StringField(max_length=4, required=True, primary_key=True, unique=True)
name = mongoengine.StringField(required=True)
msl = mongoengine.IntField(default=0)
def __str__(self):
return "%s - %s" % (self.code, self.name)
| {
"content_hash": "d151d7caee0698af7cf25a6ecee000f7",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 94,
"avg_line_length": 31.363636363636363,
"alnum_prop": 0.6898550724637681,
"repo_name": "tspycher/pyaeromanager",
"id": "8d6206ab6b63c1d7f9b8814ae035be78dc5b98d0",
"size": "345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/documents/aerodrome/aerodrome.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40030"
}
],
"symlink_target": ""
} |
"""Control flow statements: loops, conditionals, etc.
Note: most of these operators accept pairs of get_state/set_state functions, to
capture mutations that the corresponding code blocks might make. These
mutations only need to be captured when staging the control flow, and they just
work when reverting to Python behavior.
__Examples__
```
while cond:
self.x += i
```
When the functionalized version is executed as a Python loop, it just works:
```
def loop_body():
self.x += i # works as expected for Python loops
```
But it won't work for TF loops:
```
def loop_body():
self.x += i # self.x has the wrong value!
```
get_state/set_state allow piping the mutations through the loop variables as
well, in effect changing the loop body:
```
def loop_body(self_x):
self.x = self_x # self.x now has the proper value
self.x += i # the original block
self_x = self.x # write self.x back into the loop vars
return self_x
self_x = tf.while_loop(...)
self.x = self_x # the result is not properly captured
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import traceback
import numpy as np
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.autograph.operators import special_values
from tensorflow.python.autograph.utils import ag_logging
from tensorflow.python.autograph.utils import compat_util
from tensorflow.python.autograph.utils import misc
from tensorflow.python.autograph.utils import tensors
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.experimental.ops import take_while_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest
# TODO(b/145618471): Remove this dependency.
# Lazy import to work around circular dependencies
input_lib = lazy_loader.LazyLoader(
'input_lib', globals(),
'tensorflow.python.distribute.input_lib')
PYTHON_MAX_ITERATIONS = 100000000 # Fails in about one minute for empty loops.
WARN_INEFFICIENT_UNROLL = True
INEFFICIENT_UNROLL_MIN_ITERATIONS = 3000
INEFFICIENT_UNROLL_MIN_OPS = 1
# TODO(mdan): Use the custom operator pattern instead of type dispatch.
# An example of this pattern is found in the implementation of distributed
# datasets. Before it can be used though, we need to standardize the interface.
# TODO(mdan): Use existing symbol names rather than carrying them separately.
def _disallow_undefs_into_loop(*values):
"""Ensures that all values in the state are defined when entering a loop."""
undefined = tuple(filter(special_values.is_undefined, values))
if undefined:
raise ValueError(
'{} must be defined before the loop.'.format(
','.join(s.symbol_name for s in undefined)))
for value in values:
if special_values.is_undefined_return(value):
# Assumption: the loop will only capture the variable which tracks the
# return value if the loop contained a return statement.
# TODO(mdan): This should be checked at the place where return occurs.
raise ValueError(
'return statements are not supported within a TensorFlow loop.')
def _is_subshape(left, right):
"""Returns True if left shape is at least as specific as right shape."""
# TODO(mdan): This code should be in TensorShape.
# Note: this is not the same as TensorShape.is_compatible_with, which is
# symmetric.
# This code also duplicates _ShapeLessThanOrEqual from control_flow_ops.py.
if right.dims is None:
return True
if left.ndims != right.ndims:
return False
for ldim, rdim in zip(left.dims, right.dims):
if rdim.value is not None and ldim.value != rdim.value:
return False
return True
# TODO(mdan): Remove these verifications once TF ops can properly report names.
def _verify_single_loop_var(
name, check_shape, init, entry, exit_, shape_invariant):
"""Verifies whether the initial, entry and exit values are consistent."""
if isinstance(init, (bool, int, float, str, np.ndarray)):
init = ops.convert_to_tensor_v2(init)
if isinstance(entry, (bool, int, float, str, np.ndarray)):
entry = ops.convert_to_tensor_v2(entry)
if isinstance(exit_, (bool, int, float, str)):
exit_ = ops.convert_to_tensor_v2(exit_)
if (not tensor_util.is_tensor(entry) or
not tensor_util.is_tensor(exit_)):
return
# TODO(mdan): Properly account for CompositeTensors.
if (not hasattr(entry, 'dtype') or
not hasattr(exit_, 'dtype')):
return
if (not hasattr(entry, 'shape') or
not hasattr(exit_, 'shape')):
return
if entry.dtype != exit_.dtype:
raise TypeError(
'"{}" has dtype {} before the loop, but dtype {} after one'
' iteration. TensorFlow control flow requires it stays the'
' same.'.format(
name,
entry.dtype.name,
exit_.dtype.name,
))
if check_shape:
exit_shape = exit_.shape
if shape_invariant is None:
entry_shape = entry.shape
if not _is_subshape(exit_shape, entry_shape):
raise ValueError(
'"{}" has shape {} before the loop, but shape {} after one'
' iteration. Use tf.autograph.experimental.set_loop_options to set'
' shape invariants.'.format(name, entry_shape, exit_shape))
else:
init_shape = init.shape
if not _is_subshape(init_shape, shape_invariant):
raise ValueError(
'"{}" has shape {} before the loop, which does not conform with'
' the shape invariant {}.'.format(name, init_shape,
shape_invariant))
if not _is_subshape(exit_shape, shape_invariant):
raise ValueError(
'"{}" has shape {} after one iteration, which does not conform with'
' the shape invariant {}.'.format(
name, exit_shape, shape_invariant))
def _verify_tf_loop_vars(init_vars,
iter_entry_vars,
iter_exit_vars,
symbol_names,
opts,
check_shapes=True):
"""Verifies loop variables for consistency."""
if check_shapes and 'shape_invariants' in opts:
shape_invariants = opts['shape_invariants']
else:
shape_invariants = nest.map_structure(lambda _: None, iter_entry_vars)
assert len(symbol_names) == len(shape_invariants)
assert len(symbol_names) == len(init_vars)
assert len(symbol_names) == len(iter_entry_vars)
assert len(symbol_names) == len(iter_exit_vars)
for i in range(len(symbol_names)):
name = symbol_names[i]
init = init_vars[i]
entry = iter_entry_vars[i]
exit_ = iter_exit_vars[i]
invariant = shape_invariants[i]
try:
nest.assert_same_structure(init, entry, expand_composites=True)
nest.assert_same_structure(entry, exit_, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError('"{}" does not have the same nested structure after one'
' iteration.\n\n{}'.format(name, e))
if invariant is not None:
try:
nest.assert_same_structure(init, invariant, expand_composites=False)
except (ValueError, TypeError) as e:
raise TypeError('"{}" does not have the same nested structure as its'
' corresponding shape invariant.\n\n{}'.format(name, e))
nest.map_structure(
functools.partial(_verify_single_loop_var, name, check_shapes), init,
entry, exit_, invariant)
def _verify_single_cond_var(name, body_var, orelse_var):
"""Verifies whether body_var and orelse_var are consistent."""
if isinstance(body_var, (bool, int, float, str)):
body_var = ops.convert_to_tensor_v2(body_var)
if isinstance(orelse_var, (bool, int, float, str)):
orelse_var = ops.convert_to_tensor_v2(orelse_var)
if (not tensor_util.is_tensor(body_var) or
not tensor_util.is_tensor(orelse_var)):
return
# TODO(mdan): Properly account for CompositeTensors.
if (not hasattr(body_var, 'dtype') or
not hasattr(orelse_var, 'dtype')):
return
if body_var.dtype != orelse_var.dtype:
raise TypeError(
'"{}" has dtype {} in the TRUE branch, but dtype={} in the FALSE'
' branch. TensorFlow control flow requires that they are the'
' same.'.format(name, body_var.dtype.name,
orelse_var.dtype.name))
def _verify_tf_cond_vars(body_vars, orelse_vars, symbol_names):
"""Verifies variables manipulated by a conditional for consistency."""
basic_body_vars, composite_body_vars = body_vars
basic_orelse_vars, composite_orelse_vars = orelse_vars
assert isinstance(composite_body_vars, tuple)
assert isinstance(composite_orelse_vars, tuple)
# TODO(kkimlabs): Make this more consistent.
# The basic outputs should always be a tuple.
if not isinstance(basic_body_vars, tuple):
basic_body_vars = (basic_body_vars,)
if not isinstance(basic_orelse_vars, tuple):
basic_orelse_vars = (basic_orelse_vars,)
body_vars = basic_body_vars + composite_body_vars
orelse_vars = basic_orelse_vars + composite_orelse_vars
named_vars = zip(symbol_names, body_vars, orelse_vars)
for name, body_var, orelse_var in named_vars:
try:
nest.assert_same_structure(
body_var, orelse_var, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError(
'"{}" does not have the same nested structure in the TRUE and FALSE'
' branches.\n\n{}'.format(name, str(e)))
nest.map_structure(
functools.partial(_verify_single_cond_var, name), body_var, orelse_var)
def for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Functional form of a for statement.
The loop operates on a state, which includes all symbols that are
variant across loop iterations, excluding the variables local to the loop.
For example, given the loop below that calculates the geometric and
arithmetic means or some numbers:
```
geo_mean = 1
arith_mean = 0
for i in range(n):
a = numbers[i]
geo_mean *= a
arith_mean += a
```
The state is represented by the variables geo_mean and arith_mean. The
`extra_test`, `body`, `get_state` and `set_state` functions must bind to the
original `geo_mean` and `arith_mean` symbols, using `nonlocal`.
Args:
iter_: The entity being iterated over.
extra_test: Callable with the state as arguments, and boolean return type.
An additional loop condition.
body: Callable with the iterate and the state as arguments, and state as
return type. The actual loop body.
get_state: Additional callable which can capture additional state (such as
the values of composite symbols). This is only useful when staging the
loop.
set_state: Additional callable which save values captured by get_state back
into the Python environment. This is only useful when staging the loop.
symbol_names: Tuple containing names of the loop variables returned by
get_state.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
if tensor_util.is_tensor(iter_):
if tensors.is_range_tensor(iter_):
_tf_range_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
else:
_known_len_tf_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, dataset_ops.DatasetV2):
_tf_dataset_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, iterator_ops.OwnedIterator):
_tf_iterator_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, ragged_tensor.RaggedTensor):
_tf_ragged_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, input_lib.DistributedIterator):
raise NotImplementedError(
'distributed iterators not supported yet, use the distributed dataset'
' directly')
# TODO(mdan): Resolve the private access issue.
elif isinstance(iter_, input_lib._IterableInput): # pylint:disable=protected-access
_tf_distributed_iterable_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
else:
_py_for_stmt(iter_, extra_test, body, None, None)
def _py_for_stmt(iter_, extra_test, body, get_state, set_state):
"""Overload of for_stmt that executes a Python for loop."""
del get_state, set_state
if __debug__:
checker = _PythonLoopChecker()
before_iteration = checker.before_iteration
after_iteration = checker.after_iteration
before_iteration()
original_body = body
def protected_body(protected_iter):
original_body(protected_iter)
after_iteration()
before_iteration()
body = protected_body
if extra_test is not None:
if extra_test():
for target in iter_:
body(target)
if not extra_test():
break
else:
for target in iter_:
body(target)
def _known_len_tf_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF entities that admit a length."""
n = py_builtins.len_(iter_)
# TODO(b/117628877): Revisit performance once XLA has the necessary support.
# Note: using a TensorArray creates an extra copy, but can calculate
# gradients more efficiently than StridedSlice.
ta = tensor_array_ops.TensorArray(iter_.dtype, size=n)
iter_ = ta.unstack(iter_)
iterate_index = compat_util.BasicRef(0)
def aug_get_state():
return (iterate_index.value,) + get_state()
def aug_set_state(aug_loop_vars):
# TOOD(mdan): Use starred assignment once we can switch to Py3-only syntax.
iterate_index.value, loop_vars = aug_loop_vars[0], aug_loop_vars[1:]
# The iteration index is not "output" by the for loop. If the iterate
# is used outside the loop, it will appear in the loop vars separately.
set_state(loop_vars)
def aug_body():
body(iter_.read(iterate_index.value))
iterate_index.value += 1
def aug_test():
main_test = iterate_index.value < n
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
opts['maximum_iterations'] = n
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
('<internal iterate>',) + symbol_names,
opts,
)
def _tf_ragged_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF ragged tensors."""
init_vars = get_state()
_disallow_undefs_into_loop(*init_vars)
# TODO(mdan): Move this into len()? Requires eager support.
if iter_.shape and iter_.shape[0] is not None:
n = iter_.shape[0]
else:
n = iter_.row_lengths()[0]
iterate_index = compat_util.BasicRef(0)
def aug_get_state():
return (iterate_index.value,) + get_state()
def aug_set_state(aug_loop_vars):
# TOOD(mdan): Use starred assignment once we can switch to Py3-only syntax.
iterate_index.value, loop_vars = aug_loop_vars[0], aug_loop_vars[1:]
# The iteration index is not "output" by the for loop. If the iterate
# is used outside the loop, it will appear in the loop vars separately.
set_state(loop_vars)
def aug_body():
body(iter_[iterate_index.value])
iterate_index.value += 1
def aug_test():
main_test = iterate_index.value < n
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
opts['maximum_iterations'] = n
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
('<internal iterate>',) + symbol_names,
opts)
def _tf_range_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over a TF range (and elides it)."""
start, limit, delta = iter_.op.inputs
iterate = compat_util.BasicRef(start)
def aug_get_state():
return (iterate.value,) + get_state()
def aug_set_state(aug_loop_vars):
# TOOD(mdan): Use starred assignment once we can switch to Py3-only syntax.
iterate.value, loop_vars = aug_loop_vars[0], aug_loop_vars[1:]
# The iteration index is not "output" by the for loop. If the iterate
# is used outside the loop, it will appear in the loop vars separately.
set_state(loop_vars)
def aug_body():
body(iterate.value)
iterate.value += delta
def aug_test():
main_test = math_ops.logical_or(
math_ops.logical_and(delta >= 0, iterate.value < limit),
math_ops.logical_and(delta < 0, iterate.value > limit))
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
opts['maximum_iterations'] = math_ops.cast(
misc.get_range_len(start, limit, delta), dtypes.int32)
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
('<internal iterate>',) + symbol_names,
opts)
def _tf_iterator_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF Iterators. See for_loop."""
symbol_names = ('<internal has_next>',) + symbol_names
has_next = compat_util.BasicRef(True)
def aug_get_state():
return (has_next.value,) + get_state()
def aug_set_state(aug_loop_vars):
# TOOD(mdan): Use starred assignment once we can switch to Py3-only syntax.
has_next.value, loop_vars = aug_loop_vars[0], aug_loop_vars[1:]
set_state(loop_vars)
init_vars = aug_get_state()
_disallow_undefs_into_loop(*init_vars)
def aug_body():
"""Main body passed to _tf_while_stmt."""
opt_iterate = iterator_ops.get_next_as_optional(iter_)
has_next.value = opt_iterate.has_value()
loop_vars = aug_get_state() # updated by set_state() in _tf_while_loop.
def main_path():
body(opt_iterate.get_value())
new_loop_vars = aug_get_state()
# Note: this verification duplicates the one performed in tf_while_stmt,
# but needs to be done earlier to prevent the tf.cond from blowing up
# first.
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts)
return new_loop_vars
def noop_path():
return loop_vars
# TODO(mdan): If tf.while_loop supported Optional, this could be avoided.
# Calling set_state so that get_state() _tf_while_loop sees the conditional
# tensors.
aug_set_state(
control_flow_ops.cond(has_next.value, main_path, noop_path))
def aug_test():
# This value takes a complicated path to get here:
# prev_iteration_body -> get_state -> tf.while_loop (as loop var)
# -> current_iteration_body -> set_state -> has_next.value
main_test = has_next.value
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
symbol_names,
opts)
def _general_purpose_scan(ds, init_state, body):
"""Variant of Dataset.scan with semantics of general-purpose computation."""
# Datasets are typically intended for data preprocessing. However, in
# autograph loops they usually appear as general-purpose computations (for
# example, a custom training loop). These two use cases require significantly
# different optimization policies, the most important of which is the device
# placement. The flag override for use_default_device below instructs the
# runtime to treat the computation as general-purpose, rather than data
# preprocessing.
# TODO(mdan): s/use_default_device/specialize_for_input_pipeline.
# TODO(mdan): Don't use private symbols.
return scan_ops._ScanDataset(ds, init_state, body, use_default_device=False) # pylint:disable=protected-access
def _tf_dataset_for_stmt(
ds, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of _dataset_for_stmt with early stopping. See for_stmt."""
# Note: This is easier to follow with the insight that the computations in
# a dataset pipeline are transposed (aka fused).
# For example, given a pipeline input -> scan -> take_while -> reduce,
# and a dataset with input [1, 2, 3], the computations occur in the following
# order:
# reduce(take_while(scan(1)))
# reduce(take_while(scan(2)))
# reduce(take_while(scan(3)))
init_vars = get_state()
_disallow_undefs_into_loop(*init_vars)
# Workaround for Dataset.reduce not allowing empty state tensors - create
# a dummy state variable that remains unused.
# TODO(mdan): reduce should allow and match empty structures.
if not init_vars:
init_vars = (constant_op.constant(0),)
symbol_names = ('<internal dummy>',)
def dummy_set_state(unused_dummy):
pass
def dummy_get_state():
return (constant_op.constant(0),)
get_state, set_state = dummy_get_state, dummy_set_state
def scan_body(scan_state, scan_inputs):
"""Main body of the Dataset.scan."""
loop_vars, iterate = scan_state, scan_inputs
set_state(loop_vars)
def main_path():
body(iterate)
new_loop_vars = get_state()
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts,
check_shapes=False)
return new_loop_vars
if extra_test is not None:
extra_cond = extra_test()
new_loop_vars = control_flow_ops.cond(
extra_cond, main_path, lambda: loop_vars)
else:
# TODO(mdan): the optimizer should be able to remove an invariant cond?
extra_cond = (constant_op.constant(True),) # dummy value, unused
new_loop_vars = main_path()
scan_outputs = new_loop_vars, extra_cond
new_scan_state = new_loop_vars
return new_scan_state, scan_outputs
def take_while_predicate(unused_loop_vars, extra_cond):
return extra_cond
def reduce_body(unused_reduce_state, scan_outputs):
output_loop_vars, unused_extra_cond = scan_outputs
new_reduce_state = output_loop_vars
return new_reduce_state
ds = _general_purpose_scan(ds, init_vars, scan_body)
if extra_test is not None:
ds = ds.apply(take_while_ops.take_while(take_while_predicate))
final_loop_vars = ds.reduce(init_vars, reduce_body)
set_state(final_loop_vars)
def _tf_distributed_iterable_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF distributed datasets."""
if extra_test is not None:
raise NotImplementedError(
'break and return statements are not yet supported in '
'for ... in distributed input loops.')
init_vars = get_state()
_disallow_undefs_into_loop(init_vars)
if 'shape_invariants' in opts:
opts['shape_invariants'] = _shape_invariants_mapping_to_positional_list(
opts['shape_invariants'], init_vars)
def reduce_body(loop_vars, iterate):
set_state(loop_vars)
body(iterate)
new_loop_vars = get_state()
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts)
return new_loop_vars
set_state(iter_.reduce(init_vars, reduce_body))
def while_stmt(test, body, get_state, set_state, symbol_names, opts):
"""Functional form of a while statement.
The loop operates on a so-called state, which includes all symbols that are
variant across loop iterations. In what follows we refer to state as either
a tuple of entities that represent an actual state, or a list of arguments
of the corresponding types.
Args:
test: Callable with the state as arguments, and boolean return type. The
loop condition.
body: Callable with the state as arguments, and state as return type. The
actual loop body.
get_state: Additional callable which can capture additional state (such as
the values of composite symbols). This is only useful when staging the
loop.
set_state: Additional callable which save values captured by get_state back
into the Python environment. This is only useful when staging the loop.
symbol_names: Tuple containing the names of all loop variables.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
# Evaluate the initial test once in order to do the dispatch. The evaluation
# is isolated to minimize unwanted side effects.
# TODO(mdan): Do a full iteration - some state types might lower to Tensor.
with func_graph.FuncGraph('tmp').as_default():
init_test = test()
# TensorFlow: Multiple evaluations are acceptable in this case, so we're fine
# with the re-evaluation of `test` that `_tf_while_stmt` will make.
if tensors.is_dense_tensor(init_test):
_tf_while_stmt(test, body, get_state, set_state, symbol_names, opts)
return
# Normal Python: We already consumed one evaluation of `test`; consistently,
# unroll one iteration before dispatching to a normal loop.
# TODO(mdan): Push the "init_test" value via opts into _py_while_stmt?
if not init_test:
return
body()
_py_while_stmt(test, body, get_state, set_state, opts)
class _PythonLoopChecker(object):
"""Verifies Python loops for TF-specific limits."""
__slots__ = (
'iterations',
'check_inefficient_unroll',
'check_op_count_after_iteration',
'ops_before_iteration',
)
def __init__(self):
self.iterations = 1
self.check_inefficient_unroll = WARN_INEFFICIENT_UNROLL
# Triggered when we decided to test the op counts.
self.check_op_count_after_iteration = False
def _get_ops(self):
return ops.get_default_graph().get_operations()
def _check_unroll_limits(self):
if self.iterations > PYTHON_MAX_ITERATIONS:
raise ValueError('iteration limit exceeded')
def _stop_checking_inefficient_unroll(self):
self.check_inefficient_unroll = False
self.check_op_count_after_iteration = False
self.ops_before_iteration = None
def _verify_ineffcient_unroll(self):
"""Checks for possibly-inefficient creation of ops in a Python loop."""
assert self.ops_before_iteration is not None
ops_after_iteration = self._get_ops()
new_ops = tuple(
op for op in ops_after_iteration if op not in self.ops_before_iteration)
if len(new_ops) < INEFFICIENT_UNROLL_MIN_OPS:
return False
ag_logging.warn(
'Large unrolled loop detected. Did you mean to use a TF loop?'
' The following ops were created after iteration %s: %s'
'\nSee'
' https://github.com/tensorflow/tensorflow/blob/master/'
'tensorflow/python/autograph/g3doc/reference/common_errors.md'
'#warning-large-unrolled-loop-detected'
'\n'
'Location:'
'\n%s'
'', self.iterations, new_ops, '\n'.join(traceback.format_stack()))
return True
def before_iteration(self):
"""Called before each iteration in a Python loop."""
if (self.check_inefficient_unroll and
self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS):
self.ops_before_iteration = self._get_ops()
self.check_op_count_after_iteration = True
def after_iteration(self):
"""Called after each iteration in a Python loop."""
self.iterations += 1
self._check_unroll_limits()
if self.check_op_count_after_iteration:
did_warn = self._verify_ineffcient_unroll()
if did_warn:
self._stop_checking_inefficient_unroll() # Only warn once.
elif self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS + 3:
# Once deciding to check the op counts, only do it for a few iterations.
self._stop_checking_inefficient_unroll()
def _py_while_stmt(test, body, get_state, set_state, opts):
"""Overload of while_stmt that executes a Python while loop."""
del opts, get_state, set_state
if __debug__:
checker = _PythonLoopChecker()
before_iteration = checker.before_iteration
after_iteration = checker.after_iteration
before_iteration()
original_body = body
def protected_body():
original_body()
after_iteration()
before_iteration()
body = protected_body
while test():
body()
def _shape_invariants_mapping_to_positional_list(mapping, keys):
# The keys are not expected to be hashable.
mapping = {id(k): (k, v) for k, v in mapping}
result = []
for k in keys:
map_key, map_val = mapping.get(id(k), (None, None))
result.append(map_val if map_key is k else None)
return tuple(result)
def _tf_while_stmt(test, body, get_state, set_state, symbol_names, opts):
"""Overload of while_stmt that stages a TF while_stmt."""
init_vars = get_state()
_disallow_undefs_into_loop(*init_vars)
def aug_test(*loop_vars):
set_state(loop_vars)
return test()
def aug_body(*loop_vars):
set_state(loop_vars)
body()
new_loop_vars = get_state()
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts)
return new_loop_vars
# Non-v2 while_loop unpacks the results when there is only one return value.
# This enforces consistency across versions.
opts['return_same_structure'] = True
if 'shape_invariants' in opts:
opts['shape_invariants'] = _shape_invariants_mapping_to_positional_list(
opts['shape_invariants'], init_vars)
final_loop_vars = control_flow_ops.while_loop(
aug_test, aug_body, init_vars, **opts)
set_state(final_loop_vars)
def if_stmt(cond,
body,
orelse,
get_state,
set_state,
basic_symbol_names,
composite_symbol_names):
"""Functional form of an if statement.
Args:
cond: Boolean.
body: Callable with no arguments, and outputs of the positive (if) branch as
return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
get_state: Function that returns a tuple containing the values of all
composite symbols modified within the conditional. This allows access to
state that branches may mutate through side effects. This function is not
needed and should not be called when dispatching to code matching Python's
default semantics. This is useful for checkpointing to avoid unintended
side-effects when staging requires evaluating all code-paths.
set_state: Function to set the values of all composite symbols modified
within the conditional. This is the complement to get_state, used to
restore checkpointed values. The single argument a tuple containing values
for each composite symbol that may be modified in a branch of the
conditional. The is usually the result of a call to get_state.
basic_symbol_names: Tuple containing basic loop var names.
composite_symbol_names: Tuple containing composite loop var names.
Returns:
Tuple containing the statement outputs.
"""
# Note: tf.cond doesn't support SparseTensor.
if tensors.is_dense_tensor(cond):
return tf_if_stmt(cond, body, orelse, get_state, set_state,
basic_symbol_names, composite_symbol_names)
else:
return _py_if_stmt(cond, body, orelse)
def tf_if_stmt(cond, body, orelse, get_state, set_state, basic_symbol_names,
composite_symbol_names):
"""Overload of if_stmt that stages a TF cond."""
body = _wrap_disallow_undefs_from_cond(body, branch_name='if')
orelse = _wrap_disallow_undefs_from_cond(orelse, branch_name='else')
body = _isolate_state(body, get_state, set_state)
orelse = _isolate_state(orelse, get_state, set_state)
# `state` currently includes the values of any composite symbols (e.g. `a.b`)
# composites modified by the loop. `final_vars` includes the values of basic
# symbols (e.g. `a`) which cannot be passed by reference and must be returned.
# See _isolate_state.
# TODO(mdan): We should minimize calls to get/set_state.
body_branch = 0
orelse_branch = 1
result = [None, None]
def error_checking_body():
result[body_branch] = body()
if result[orelse_branch] is not None:
_verify_tf_cond_vars(result[body_branch], result[orelse_branch],
basic_symbol_names + composite_symbol_names)
return result[body_branch]
def error_checking_orelse():
result[orelse_branch] = orelse()
if result[body_branch] is not None:
_verify_tf_cond_vars(result[body_branch], result[orelse_branch],
basic_symbol_names + composite_symbol_names)
return result[orelse_branch]
final_vars, final_state = control_flow_ops.cond(cond, error_checking_body,
error_checking_orelse)
set_state(final_state)
return final_vars
def _isolate_state(func, get_state, set_state):
"""Wraps func to (best-effort) isolate state mutations that func may do.
The simplest example of state mutation is mutation of variables (via e.g.
attributes), or modification of globals.
This allows us to more safely execute this function without worrying about
side effects when the function wasn't normally expected to execute. For
example, staging requires that the function is executed ahead of time, and
we need to ensure its effects are not observed during normal execution.
Args:
func: () -> Any
get_state: () -> Any, returns the current state
set_state: (Any) -> None, resets the state to the specified values.
Typically the result of an earlier call to `get_state`.
Returns:
Tuple[Any, Any], where the first element is the return value of `func`,
and the second is the final state values.
"""
def wrapper():
init_state = get_state()
new_vars = func()
# TODO(mdan): These should be copies, lest set_state might affect them.
new_state = get_state()
set_state(init_state)
return new_vars, new_state
return wrapper
def _wrap_disallow_undefs_from_cond(func, branch_name):
"""Wraps conditional branch to disallow returning undefined symbols."""
def wrapper():
"""Calls function and raises an error if undefined symbols are returned."""
results = func()
if isinstance(results, tuple):
results_tuple = results
else:
results_tuple = results,
undefined = tuple(filter(special_values.is_undefined, results_tuple))
if undefined:
raise ValueError(
'The following symbols must also be initialized in the {} branch: {}.'
' Alternatively, you may initialize them before the if'
' statement.'.format(branch_name,
tuple(s.symbol_name for s in undefined)))
for result in results_tuple:
if special_values.is_undefined_return(result):
raise ValueError(
'A value must also be returned from the {} branch. If a value is '
'returned from one branch of a conditional a value must be '
'returned from all branches.'.format(branch_name))
return results
return wrapper
def _py_if_stmt(cond, body, orelse):
"""Overload of if_stmt that executes a Python if statement."""
return body() if cond else orelse()
compat_util.deprecated_py2_support(__name__)
| {
"content_hash": "7b200ffc41982868203a0f5c0b3dc19b",
"timestamp": "",
"source": "github",
"line_count": 1017,
"max_line_length": 113,
"avg_line_length": 35.45132743362832,
"alnum_prop": 0.6781771786764298,
"repo_name": "jhseu/tensorflow",
"id": "c75356eaa3f3b6443e282103a135239fcc5d0787",
"size": "36743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/operators/control_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "27480"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "875455"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "80051513"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112748"
},
{
"name": "Go",
"bytes": "1853641"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1729057"
},
{
"name": "Makefile",
"bytes": "62498"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "304661"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "19515"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "36791185"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "56741"
},
{
"name": "Shell",
"bytes": "685877"
},
{
"name": "Smarty",
"bytes": "35147"
},
{
"name": "Starlark",
"bytes": "3504187"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import os
import sys
from distutils.dir_util import mkpath
from nn.layer import DOUBLEMAX
from util.common import JTensor
from util.common import JavaValue
from util.common import callBigDlFunc
from util.common import callJavaFunc
from util.common import get_spark_context
if sys.version >= '3':
long = int
unicode = str
class MaxIteration(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
MaxIteration is a trigger that triggers an action when training reaches
the number of iterations specified by "max".
Usually used as end_trigger when creating an Optimizer.
>>> maxIteration = MaxIteration(20)
creating: createMaxIteration
"""
def __init__(self, max, bigdl_type="float"):
"""
Create a MaxIteration trigger.
:param max: max
"""
JavaValue.__init__(self, None, bigdl_type, max)
class MaxEpoch(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
MaxEpoch is a trigger that triggers an action when training reaches
the number of epochs specified by "max_epoch".
Usually used as end_trigger when creating an Optimizer.
>>> maxEpoch = MaxEpoch(2)
creating: createMaxEpoch
"""
def __init__(self, max_epoch, bigdl_type="float"):
"""
Create a MaxEpoch trigger.
:param max_epoch: max_epoch
"""
JavaValue.__init__(self, None, bigdl_type, max_epoch)
class EveryEpoch(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
EveryEpoch is a trigger that triggers an action when each epoch finishs.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> everyEpoch = EveryEpoch()
creating: createEveryEpoch
"""
def __init__(self, bigdl_type="float"):
"""
Create a EveryEpoch trigger.
"""
JavaValue.__init__(self, None, bigdl_type)
class SeveralIteration(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
SeveralIteration is a trigger that triggers an action every "n"
iterations.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> serveralIteration = SeveralIteration(2)
creating: createSeveralIteration
"""
def __init__(self, interval, bigdl_type="float"):
"""
Create a SeveralIteration trigger.
:param interval: interval is the "n" where an action is triggered
every "n" iterations
"""
JavaValue.__init__(self, None, bigdl_type, interval)
class Poly(JavaValue):
"""
A learning rate decay policy, where the effective learning rate
follows a polynomial decay, to be zero by the max_iteration.
Calculation: base_lr (1 - iter/max_iteration) ^ (power)
:param power
:param max_iteration
>>> poly = Poly(0.5, 2)
creating: createPoly
"""
def __init__(self, power, max_iteration, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, power, max_iteration)
class Step(JavaValue):
"""
A learning rate decay policy, where the effective learning rate is
calculated as base_lr * gamma ^ (floor(iter / step_size))
:param step_size
:param gamma
>>> step = Step(2, 0.3)
creating: createStep
"""
def __init__(self, step_size, gamma, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, step_size, gamma)
class Default(JavaValue):
"""
A learning rate decay policy, where the effective learning rate is
calculated as base_lr * gamma ^ (floor(iter / step_size))
:param step_size
:param gamma
>>> step = Default()
creating: createDefault
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class SGD(JavaValue):
"""
A plain implementation of SGD
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
:param momentum momentum
:param dampening dampening for momentum
:param nesterov enables Nesterov momentum
:param learningrates 1D tensor of individual learning rates
:param weightdecays 1D tensor of individual weight decays
>>> sgd = SGD()
creating: createDefault
creating: createSGD
"""
def __init__(self,
learningrate=1e-3,
learningrate_decay=0.0,
weightdecay=0.0,
momentum=0.0,
dampening=DOUBLEMAX,
nesterov=False,
leaningrate_schedule=None,
learningrates=None,
weightdecays=None,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, learningrate, learningrate_decay, weightdecay,
momentum, dampening, nesterov,
leaningrate_schedule if (leaningrate_schedule) else Default(),
JTensor.from_ndarray(learningrates), JTensor.from_ndarray(weightdecays))
class Adagrad(JavaValue):
"""
An implementation of Adagrad. See the original paper:
http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
>>> adagrad = Adagrad()
creating: createAdagrad
"""
def __init__(self,
learningrate=1e-3,
learningrate_decay=0.0,
weightdecay=0.0,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, learningrate, learningrate_decay, weightdecay)
class LBFGS(JavaValue):
"""
This implementation of L-BFGS relies on a user-provided line
search function (state.lineSearch). If this function is not
provided, then a simple learningRate is used to produce fixed
size steps. Fixed size steps are much less costly than line
searches, and can be useful for stochastic problems.
The learning rate is used even when a line search is provided.
This is also useful for large-scale stochastic problems, where
opfunc is a noisy approximation of f(x). In that case, the learning
rate allows a reduction of confidence in the step size.
:param max_iter Maximum number of iterations allowed
:param max_eval Maximum number of function evaluations
:param tolfun Termination tolerance on the first-order optimality
:param tolx Termination tol on progress in terms of func/param changes
:param ncorrection
:param learningrate
:param verbose
:param linesearch A line search function
:param linesearch_options If no line search provided, then a fixed step size is used
>>> lbfgs = LBFGS()
creating: createLBFGS
"""
def __init__(self,
max_iter=20,
max_eval=DOUBLEMAX,
tolfun=1e-5,
tolx=1e-9,
ncorrection=100,
learningrate=1.0,
verbose=False,
linesearch=None,
linesearch_options=None,
bigdl_type="float"):
if linesearch or linesearch_options:
raise ValueError('linesearch and linesearch_options must be None in LBFGS')
JavaValue.__init__(self, None, bigdl_type, max_iter, max_eval, tolfun, tolx,
ncorrection, learningrate, verbose, linesearch, linesearch_options)
class Adadelta(JavaValue):
"""
Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701
:param decayrate interpolation parameter rho
:param epsilon for numerical stability
>>> adagrad = Adadelta()
creating: createAdadelta
"""
def __init__(self,
decayrate = 0.9,
epsilon = 1e-10,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, decayrate, epsilon)
class Adam(JavaValue):
"""
An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adagrad = Adam()
creating: createAdam
"""
def __init__(self,
learningrate = 1e-3,
learningrate_decay = 0.0,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, learningrate, learningrate_decay,
beta1, beta2, epsilon)
class Adamax(JavaValue):
"""
An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adagrad = Adamax()
creating: createAdamax
"""
def __init__(self,
learningrate = 0.002,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-38,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, learningrate, beta1, beta2, epsilon)
class RMSprop(JavaValue):
"""
An implementation of RMSprop
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param decayrate decay rate, also called rho
:param epsilon for numerical stability
>>> adagrad = RMSprop()
creating: createRMSprop
"""
def __init__(self,
learningrate = 1e-2,
learningrate_decay = 0.0,
decayrate = 0.99,
epsilon = 1e-8,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, learningrate, learningrate_decay, decayrate, epsilon)
class MultiStep(JavaValue):
"""
similar to step but it allows non uniform steps defined by stepSizes
:param step_size the series of step sizes used for lr decay
:param gamma coefficient of decay
>>> step = MultiStep([2, 5], 0.3)
creating: createMultiStep
"""
def __init__(self, step_sizes, gamma, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, step_sizes, gamma)
class Optimizer(JavaValue):
"""
An optimizer is in general to minimize any function with respect
to a set of parameters. In case of training a neural network,
an optimizer tries to minimize the loss of the neural net with
respect to its weights/biases, over the training set.
"""
def __init__(self,
model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method=None,
bigdl_type="float"):
"""
Create an optimizer.
:param model: the neural net model
:param traiing_rdd: the training dataset
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
"""
JavaValue.__init__(self, None, bigdl_type, model.value,
training_rdd, criterion,
optim_method if optim_method else SGD(), end_trigger, batch_size)
def set_validation(self, batch_size, val_rdd, trigger, val_method=["Top1Accuracy"]):
"""
Configure validation settings.
:param batch_size: validation batch size
:param val_rdd: validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,
e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
trigger, val_rdd, val_method)
def set_model(self, model):
"""
Set model.
:param model: new model
"""
self.value.setModel(model.value)
def set_checkpoint(self, checkpoint_trigger,
checkpoint_path, isOverWrite=True):
"""
Configure checkpoint settings.
:param checkpoint_trigger: the interval to write snapshots
:param checkpoint_path: the path to write snapshots into
:param isOverWrite: whether to overwrite existing snapshots in path.
default is True
"""
if not os.path.exists(checkpoint_path):
mkpath(checkpoint_path)
callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value,
checkpoint_trigger, checkpoint_path, isOverWrite)
# return a module
def optimize(self):
"""
Do an optimization.
"""
jmodel = callJavaFunc(get_spark_context(), self.value.optimize)
from nn.layer import Model
return Model.of(jmodel)
def set_train_summary(self, summary):
"""
Set train summary. A TrainSummary object contains information
necesary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
"""
callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value,
summary)
return self
def set_val_summary(self, summary):
"""
Set validation summary. A ValidationSummary object contains information
necesary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of ValidationSummary.
:param summary: a ValidationSummary object
"""
callBigDlFunc(self.bigdl_type, "setValSummary", self.value,
summary)
return self
def prepare_input(self):
"""
Load input. Notebook user can call this method to seprate load data and
create optimizer time
"""
print("Loading input ...")
self.value.prepareInput()
class TrainSummary(JavaValue, ):
"""
A logging facility which allows user to trace how indicators (e.g.
learning rate, training loss, throughput, etc.) change with iterations/time
in an optimization process. TrainSummary is for training indicators only
(check ValidationSummary for validation indicators). It contains necessary
information for the optimizer to know where to store the logs, how to
retrieve the logs, and so on. - The logs are written in tensorflow-compatible
format so that they can be visualized directly using tensorboard. Also the
logs can be retrieved as ndarrays and visualized using python libraries
such as matplotlib (in notebook, etc.).
Use optimizer.setTrainSummary to enable train logger.
"""
def __init__(self, log_dir, app_name, bigdl_type="float"):
"""
Create a TrainSummary. Logs will be saved to log_dir/app_name/train.
:param log_dir: the root dir to store the logs
:param app_name: the application name
"""
JavaValue.__init__(self, None, bigdl_type, log_dir, app_name)
def read_scalar(self, tag):
"""
Retrieve train logs by type. Return an array of records in the format
(step,value,wallClockTime). - "Step" is the iteration count by default.
:param tag: the type of the logs, Supported tags are: "LearningRate",
"Loss", "Throughput"
"""
return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value,
tag)
def set_summary_trigger(self, name, trigger):
"""
Set the interval of recording for each indicator.
:param tag: tag name. Supported tag names are "LearningRate", "Loss",
"Throughput", "Parameters". "Parameters" is an umbrella tag that
includes weight, bias, gradWeight, gradBias, and some running status
(eg. runningMean and runningVar in BatchNormalization). If you
didn't set any triggers, we will by default record Loss and Throughput
in each iteration, while *NOT* recording LearningRate and Parameters,
as recording parameters may introduce substantial overhead when the
model is very big, LearningRate is not a public attribute for all
OptimMethod.
:param trigger: trigger
"""
return callBigDlFunc(self.bigdl_type, "summarySetTrigger", self.value,
name, trigger)
class ValidationSummary(JavaValue):
"""
A logging facility which allows user to trace how indicators (e.g.
validation loss, top1 accuray, top5 accuracy etc.) change with
iterations/time in an optimization process. ValidationSummary is for
validation indicators only (check TrainSummary for train indicators).
It contains necessary information for the optimizer to know where to
store the logs, how to retrieve the logs, and so on. - The logs are
written in tensorflow-compatible format so that they can be visualized
directly using tensorboard. Also the logs can be retrieved as ndarrays
and visualized using python libraries such as matplotlib
(in notebook, etc.).
Use optimizer.setValidationSummary to enable validation logger.
"""
def __init__(self, log_dir, app_name, bigdl_type="float"):
"""
Create a ValidationSummary. Logs will be saved to
log_dir/app_name/train. By default, all ValidationMethod set into
optimizer will be recorded and the recording interval is the same
as trigger of ValidationMethod in the optimizer.
:param log_dir: the root dir to store the logs
:param app_name: the application name
"""
JavaValue.__init__(self, None, bigdl_type, log_dir, app_name)
def read_scalar(self, tag):
"""
Retrieve validation logs by type. Return an array of records in the
format (step,value,wallClockTime). - "Step" is the iteration count
by default.
:param tag: the type of the logs. The tag should match the name of
the ValidationMethod set into the optimizer. e.g.
"Top1AccuracyLoss","Top1Accuracy" or "Top5Accuracy".
"""
return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value,
tag)
def _test():
import doctest
from pyspark import SparkContext
from optim import optimizer
from util.common import init_engine
from util.common import create_spark_conf
globs = optimizer.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test optimizer",
conf=create_spark_conf())
init_engine()
globs['sc'] = sc
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "f53cd1a3458b44773a77e06fd304f964",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 104,
"avg_line_length": 36.896103896103895,
"alnum_prop": 0.6310152360838739,
"repo_name": "psyyz10/BigDL",
"id": "9e015c62ce15e0744241a5f08a0a6367d6a35222",
"size": "20475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspark/dl/optim/optimizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "6829"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Python",
"bytes": "297805"
},
{
"name": "RobotFramework",
"bytes": "10583"
},
{
"name": "Scala",
"bytes": "3065914"
},
{
"name": "Shell",
"bytes": "26076"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('../pinance')
from pytz import timezone
from datetime import datetime, timedelta
from __init__ import Pinance
symbol = "AMD"
stock = Pinance(symbol)
# Stock
stock.get_quotes()
print(stock.quotes_data)
# News
stock.get_news()
print(stock.news_data)
# Option
stock.get_options('2017-05-05', 'P', 10)
print(stock.options_data)
| {
"content_hash": "c050efeb1a672412269e717460bb744c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 40,
"avg_line_length": 16.272727272727273,
"alnum_prop": 0.7262569832402235,
"repo_name": "neberej/pinance",
"id": "ceb0740b04be5f0f9d60f8273dd95483362c0da2",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_pinance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10287"
}
],
"symlink_target": ""
} |
from flask import request, session, abort, current_app, redirect, url_for
from werkzeug import generate_password_hash
import database
from datetime import datetime
import pprint
from time import strptime
### About ###
def update_about():
about = {
"sitename": request.form["sitename"],
"description": request.form["description"]
}
database.update_about(about)
return "", 200
def extension(filename):
return filename.rsplit(".", 1)[1]
def allowed_file(filename):
return "." in filename and extension(filename) in ["jpg", "gif", "png"]
def update_site_avatar():
f = request.files["file"]
if f and allowed_file(f.filename):
path = "/static/avatars/site." + extension(f.filename)
database.update_site_avatar(path)
f.save("." + path)
return "", 200
### Users ###
def add_user():
user = {
"email": request.form["email"],
"username": request.form["username"],
"password_hash": generate_password_hash(request.form["password"]),
"permissions": request.form["permissions"]
}
database.add_user(user)
return redirect(url_for("admin_users"))
def update_user():
user = {
"email": request.form["email"],
"username": request.form["username"],
"password_hash": generate_password_hash(request.form["password"]),
"permissions": request.form["permissions"],
"username-prev": request.form["username-prev"]
}
database.update_user(user)
return redirect(url_for("admin_users"))
def delete_user():
user = {
"username": request.form["username"]
}
if user["username"] == session["username"]:
abort(409)
database.delete_user(user)
return redirect(url_for("admin_users"))
### Categories ###
def add_category():
category = {
"name": request.form["name"]
}
database.add_category(category)
return redirect(url_for("admin_categories"))
def update_category():
category = {
"name": request.form["name"],
"name-prev": request.form["name-prev"]
}
database.update_category(category)
return redirect(url_for("admin_categories"))
def delete_category():
category = {
"name": request.form["name"]
}
database.delete_category(category)
return redirect(url_for("admin_categories"))
### Posts ###
def update_post():
post = {
"author": session["username"],
"creation_date": request.form["date"],
"category": database.category_id(request.form["category"]),
"title": request.form["title"],
"body": request.form["body"],
"id": request.form["id"]
}
try:
strptime(post["creation_date"], "%Y-%m-%d %H:%M:%S")
except ValueError:
abort(500)
database.update_post(post)
return redirect(url_for("admin_posts"))
def delete_post():
post = {
"id": request.form["id"]
}
database.delete_post(post)
return redirect(url_for("admin_posts"))
| {
"content_hash": "e652fff9dfe744b68619f96797e2b518",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 75,
"avg_line_length": 24.29032258064516,
"alnum_prop": 0.6092297476759628,
"repo_name": "thebetabox/orthrus",
"id": "d6020fa88777abeece63f018eb14b5d5e2d1841d",
"size": "3012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "93595"
},
{
"name": "HTML",
"bytes": "31184"
},
{
"name": "JavaScript",
"bytes": "164687"
},
{
"name": "Python",
"bytes": "32258"
}
],
"symlink_target": ""
} |
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404
from wagtail.admin.forms.pages import PageViewRestrictionForm
from wagtail.admin.modal_workflow import render_modal_workflow
from wagtail.core.models import Page, PageViewRestriction
def set_privacy(request, page_id):
page = get_object_or_404(Page, id=page_id)
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_set_view_restrictions():
raise PermissionDenied
# fetch restriction records in depth order so that ancestors appear first
restrictions = page.get_view_restrictions().order_by('page__depth')
if restrictions:
restriction = restrictions[0]
restriction_exists_on_ancestor = (restriction.page != page)
else:
restriction = None
restriction_exists_on_ancestor = False
if request.method == 'POST':
form = PageViewRestrictionForm(request.POST, instance=restriction)
if form.is_valid() and not restriction_exists_on_ancestor:
if form.cleaned_data['restriction_type'] == PageViewRestriction.NONE:
# remove any existing restriction
if restriction:
restriction.delete(user=request.user)
else:
restriction = form.save(commit=False)
restriction.page = page
restriction.save(user=request.user)
# Save the groups many-to-many field
form.save_m2m()
return render_modal_workflow(
request, None, None,
None, json_data={
'step': 'set_privacy_done',
'is_public': (form.cleaned_data['restriction_type'] == 'none')
}
)
else: # request is a GET
if not restriction_exists_on_ancestor:
if restriction:
form = PageViewRestrictionForm(instance=restriction)
else:
# no current view restrictions on this page
form = PageViewRestrictionForm(initial={
'restriction_type': 'none'
})
if restriction_exists_on_ancestor:
# display a message indicating that there is a restriction at ancestor level -
# do not provide the form for setting up new restrictions
return render_modal_workflow(
request, 'wagtailadmin/page_privacy/ancestor_privacy.html', None,
{
'page_with_restriction': restriction.page,
}
)
else:
# no restriction set at ancestor level - can set restrictions here
return render_modal_workflow(
request, 'wagtailadmin/page_privacy/set_privacy.html', None, {
'page': page,
'form': form,
}, json_data={'step': 'set_privacy'}
)
| {
"content_hash": "533e5b0836e4ea88458d964c2456cb38",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 86,
"avg_line_length": 40.15277777777778,
"alnum_prop": 0.6046350743687305,
"repo_name": "takeflight/wagtail",
"id": "2efc9b44dbe1ae337aec57c2d2e734a9d28c9bc3",
"size": "2891",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "wagtail/admin/views/page_privacy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "181889"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "367981"
},
{
"name": "JavaScript",
"bytes": "255453"
},
{
"name": "Makefile",
"bytes": "738"
},
{
"name": "Python",
"bytes": "3459754"
},
{
"name": "Shell",
"bytes": "7868"
}
],
"symlink_target": ""
} |
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import unittest
import pyxb.binding.datatypes as xsd
class Test_anyURI (unittest.TestCase):
def testRange (self):
self.fail("Datatype anyURI test not implemented")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "42095047d05424bcd06a037453b9958e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 57,
"avg_line_length": 25.23076923076923,
"alnum_prop": 0.6646341463414634,
"repo_name": "pabigot/pyxb",
"id": "9af8f81449d624462c85acfd3277e044365deac6",
"size": "352",
"binary": false,
"copies": "5",
"ref": "refs/heads/next",
"path": "tests/datatypes/totest-anyURI.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1927697"
},
{
"name": "Shell",
"bytes": "20792"
}
],
"symlink_target": ""
} |
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestModelSaving(test.TestCase):
def test_sequential_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(
fname,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_functional_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
def test_saving_right_after_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model.model._make_train_function()
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
class TestSequential(test.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
def test_sequential_pop(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.compile(loss='mse', optimizer='sgd')
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.fit(x, y, epochs=1)
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertEqual(model.output_shape, (None, num_hidden))
model.compile(loss='mse', optimizer='sgd')
y = np.random.random((batch_size, num_hidden))
model.fit(x, y, epochs=1)
if __name__ == '__main__':
test.main()
| {
"content_hash": "16d91e0bad4a37d747afe9b97aa75d16",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 76,
"avg_line_length": 31.106145251396647,
"alnum_prop": 0.6287715517241379,
"repo_name": "zycdragonball/tensorflow",
"id": "99fd6e1cbe1bbcb6494a06bf26d06edd03f4507b",
"size": "6257",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/keras/python/keras/models_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "194748"
},
{
"name": "C++",
"bytes": "27102301"
},
{
"name": "CMake",
"bytes": "176532"
},
{
"name": "Go",
"bytes": "913136"
},
{
"name": "Java",
"bytes": "323804"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37293"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Protocol Buffer",
"bytes": "249933"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "23523053"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "336620"
}
],
"symlink_target": ""
} |
from django_jinja import library
from ..jinja import SendinBlueExtension
library.extension(SendinBlueExtension)
| {
"content_hash": "41c8b7220d27be0e4b678a5d33c9ba7b",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 40,
"avg_line_length": 28.5,
"alnum_prop": 0.8508771929824561,
"repo_name": "apihackers/wagtail-sendinblue",
"id": "20e8be3b28cfd53f1bb979278f237f5d3016731d",
"size": "178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sendinblue/templatetags/jinja.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4107"
},
{
"name": "HTML",
"bytes": "10873"
},
{
"name": "JavaScript",
"bytes": "203"
},
{
"name": "PHP",
"bytes": "6296"
},
{
"name": "Python",
"bytes": "151193"
}
],
"symlink_target": ""
} |
import uuid
from contracts import Contract, Context, fields, timezone
from contracts.exceptions import ValidationError
from contracts.utils import missing
from datetime import datetime, date
from unittest import TestCase
class BaseTestCase(TestCase):
def _dump_equal(self, field, input_value, expected_value):
self.assertEqual(expected_value, field.dump(input_value, Context()))
def _dump_raises(self, field, input_value, expected_failure):
with self.assertRaises(Exception) as exc_info:
field.dump(input_value, Context())
self.assertEqual(expected_failure, str(exc_info.exception))
def _load_equal(self, field, input_value, expected_value):
self.assertEqual(expected_value, field.load(input_value, Context()))
def _load_raises(self, field, input_value, expected_failure):
with self.assertRaises(Exception) as exc_info:
field.load(input_value, Context())
if isinstance(exc_info.exception, ValidationError):
self.assertEqual(expected_failure, exc_info.exception.messages)
else:
self.assertEqual(expected_failure, str(exc_info.exception))
class TestField(BaseTestCase):
"""
Valid and invalid values for `Field`.
"""
def test_default(self):
field = fields.Field(default=123)
self._load_equal(field, missing, 123)
def test_callable_default(self):
field = fields.Field(default=lambda: 123)
self._load_equal(field, missing, 123)
def test_bypass_default_on_loading(self):
field = fields.Field(default=123)
self._load_equal(field, 456, 456)
def test_required(self):
field = fields.Field(required=True)
self._load_raises(field, missing, ['This field is required.'])
self._load_equal(field, 'abc', 'abc')
def test_non_required(self):
field = fields.Field(required=False)
self._load_equal(field, missing, missing)
self._load_equal(field, 'abc', 'abc')
def test_allow_none(self):
field = fields.Field(allow_none=True)
self._load_equal(field, None, None)
def test_disallow_none(self):
field = fields.Field(allow_none=False)
self._load_raises(field, None, ['This field may not be null.'])
def test_bind(self):
class Parent(Contract):
pass
field = fields.Field()
field.bind('field1', Parent)
self.assertEqual(field.dump_to, 'field1')
self.assertEqual(field.load_from, 'field1')
self.assertEqual(field.name, 'field1')
self.assertEqual(field.parent, Parent)
def test_bind_with_invalid_name(self):
class Parent(Contract):
pass
field = fields.Field()
self.assertRaises(ValueError, field.bind, None, Parent)
self.assertRaises(ValueError, field.bind, '', Parent)
def test_bind_with_invalid_parent(self):
class InvalidContract:
pass
field = fields.Field()
self.assertRaises(ValueError, field.bind, 'field1', None)
self.assertRaises(ValueError, field.bind, 'field1', '')
self.assertRaises(ValueError, field.bind, 'field1', 1)
self.assertRaises(ValueError, field.bind, 'field1', InvalidContract)
def test_validator(self):
def validator(value):
pass
field = fields.Field(validators=[validator])
self._load_equal(field, 123, 123)
def test_validator_returning_true(self):
def validator(value):
return True
field = fields.Field(validators=[validator])
self._load_equal(field, 123, 123)
def test_validator_returning_false(self):
def validator(value):
return False
field = fields.Field(validators=[validator])
self._load_raises(field, 123, ['Invalid value.'])
def test_validator_raising_error(self):
def validator(value):
raise ValueError()
field = fields.Field(validators=[validator])
self.assertRaises(ValueError, field.load, 123, None)
def test_null_error_message(self):
field = fields.Field()
with self.assertRaises(ValidationError) as e:
field._fail('null')
self.assertEqual(e.exception.messages, ['This field may not be null.'])
def test_not_found_error_message(self):
field = fields.Field()
with self.assertRaises(AssertionError) as e:
field._fail('not_found')
def test_custom_error_message(self):
field = fields.Field(error_messages={'custom': 'custom fail'})
with self.assertRaises(ValidationError) as e:
field._fail('custom')
self.assertEqual(e.exception.messages, ['custom fail'])
def test_dict_error_message(self):
field = fields.Field(error_messages={'invalid': {'message': 'error message', 'code': 123}})
with self.assertRaises(ValidationError) as e:
field._fail('invalid')
self.assertEqual(e.exception.messages, [{'message': 'error message', 'code': 123}])
class TestBoolean(BaseTestCase):
"""
Valid and invalid values for `Boolean`.
"""
def test_valid_inputs(self):
field = fields.Boolean()
for value in ('True', 'true', 'TRUE', '1', 1, True):
self._load_equal(field, value, True)
for value in ('False', 'false', 'FALSE', '0', 0, False):
self._load_equal(field, value, False)
def test_invalid_inputs(self):
self._load_raises(fields.Boolean(), 'foo', ['"foo" is not a valid boolean.'])
self._load_raises(fields.Boolean(), [], ['"[]" is not a valid boolean.'])
def test_valid_outputs(self):
field = fields.Boolean()
for value in ('True', 'true', 'TRUE', '1', 'other', 1, True):
self._dump_equal(field, value, True)
for value in ('False', 'false', 'FALSE', '0', 0, False):
self._dump_equal(field, value, False)
def test_invalid_outputs(self):
field = fields.Boolean()
self._dump_raises(field, [], "unhashable type: 'list'")
self._dump_raises(field, {}, "unhashable type: 'dict'")
class TestDate(BaseTestCase):
"""
Valid and invalid values for `Date`.
"""
def test_valid_inputs(self):
field = fields.Date()
self._load_equal(field, '2001-01', date(2001, 1, 1))
self._load_equal(field, '2001-01-20', date(2001, 1, 20))
self._load_equal(field, '20010120', date(2001, 1, 20))
self._load_equal(field, '2001-01-20T01:00:00', date(2001, 1, 20))
self._load_equal(field, date(2001, 1, 20), date(2001, 1, 20))
self._load_equal(field, datetime(2001, 1, 20, 12, 00), date(2001, 1, 20))
def test_invalid_inputs(self):
field = fields.Date()
self._load_raises(field, '', ['Date has wrong format.'])
self._load_raises(field, 'abc', ['Date has wrong format.'])
self._load_raises(field, '2001-13-01', ['Date has wrong format.'])
self._load_raises(field, '2001-01-32', ['Date has wrong format.'])
self._load_raises(field, 20010120, ['Date has wrong format.'])
def test_valid_outputs(self):
field = fields.Date()
self._dump_equal(field, date(2001, 1, 20), '2001-01-20')
self._dump_equal(field, datetime(2001, 1, 20, 12, 00), '2001-01-20')
def test_invalid_outputs(self):
field = fields.Date()
self._dump_raises(field, '2001-01-20', "'str' object has no attribute 'isoformat'")
self._dump_raises(field, 'abc', "'str' object has no attribute 'isoformat'")
self._dump_raises(field, 1, "'int' object has no attribute 'isoformat'")
class TestDateTime(BaseTestCase):
"""
Valid and invalid values for `DateTime`.
"""
def test_valid_inputs(self):
field = fields.DateTime()
self._load_equal(field, '2001-01-01', datetime(2001, 1, 1))
self._load_equal(field, '2001-01-01 13:00', datetime(2001, 1, 1, 13, 00))
self._load_equal(field, '2001-01-01T13:00:01', datetime(2001, 1, 1, 13, 0, 1))
self._load_equal(field, '2001-01-01T13:00:01.001', datetime(2001, 1, 1, 13, 0, 1, 1000))
self._load_equal(field, '2001-01-01T13:00Z', datetime(2001, 1, 1, 13, 00))
self._load_equal(field, '2001-01-01T13:00+00:00', datetime(2001, 1, 1, 13, 00))
self._load_equal(field, datetime(2001, 1, 1, 13, 00), datetime(2001, 1, 1, 13, 00))
self._load_equal(field, datetime(2001, 1, 1, 13, 00, tzinfo=timezone.utc), datetime(2001, 1, 1, 13, 00))
def test_valid_inputs_with_default_timezone(self):
field = fields.DateTime(default_timezone=timezone.utc)
self._load_equal(field, '2001-01-01', datetime(2001, 1, 1, tzinfo=timezone.utc))
self._load_equal(field, '2001-01-01 13:00', datetime(2001, 1, 1, 13, 00, tzinfo=timezone.utc))
self._load_equal(field, '2001-01-01T13:00', datetime(2001, 1, 1, 13, 00, tzinfo=timezone.utc))
self._load_equal(field, '2001-01-01T13:00Z', datetime(2001, 1, 1, 13, 00, tzinfo=timezone.utc))
self._load_equal(field, '2001-01-01T13:00+00:00', datetime(2001, 1, 1, 13, 00, tzinfo=timezone.utc))
self._load_equal(field, datetime(2001, 1, 1, 13, 00), datetime(2001, 1, 1, 13, 00, tzinfo=timezone.utc))
self._load_equal(field, datetime(2001, 1, 1, 13, 00, tzinfo=timezone.utc), datetime(2001, 1, 1, 13, 00, tzinfo=timezone.utc))
def test_invalid_inputs(self):
field = fields.DateTime()
self._load_raises(field, '', ['Datetime has wrong format.'])
self._load_raises(field, 'abc', ['Datetime has wrong format.'])
self._load_raises(field, '2001-13-01', ['Datetime has wrong format.'])
self._load_raises(field, '2001-01-32', ['Datetime has wrong format.'])
# self._load_raises(field, '2001-01-01T99:00', ['Datetime has wrong format.'])
self._load_raises(field, 20010120, ['Datetime has wrong format.'])
self._load_raises(field, date(2001, 1, 1), ['Expected a datetime but got a date.'])
def test_valid_outputs(self):
field = fields.DateTime()
self._dump_equal(field, datetime(2001, 1, 1, 13, 00), '2001-01-01T13:00:00')
self._dump_equal(field, datetime(2001, 1, 1, 13, 00, tzinfo=timezone.utc), '2001-01-01T13:00:00+00:00')
def test_invalid_outputs(self):
field = fields.DateTime()
self._dump_raises(field, '2001-01-01T13:00:00', "'str' object has no attribute 'isoformat'")
self._dump_raises(field, 123, "'int' object has no attribute 'isoformat'")
class TestFloat(BaseTestCase):
"""
Valid and invalid values for `Float`.
"""
def test_valid_inputs(self):
field = fields.Float()
self._load_equal(field, '1', 1.0)
self._load_equal(field, '0', 0.0)
self._load_equal(field, 1, 1.0)
self._load_equal(field, 0, 0.0)
self._load_equal(field, 1.0, 1.0)
self._load_equal(field, 0.0, 0.0)
def test_invalid_inputs(self):
field = fields.Float()
self._load_raises(field, 'abc', ['A valid number is required.'])
def test_valid_outputs(self):
field = fields.Float()
self._dump_equal(field, '1', 1.0)
self._dump_equal(field, '0', 0.0)
self._dump_equal(field, 1, 1.0)
self._dump_equal(field, 0, 0.0)
self._dump_equal(field, 1, 1.0)
self._dump_equal(field, 0, 0.0)
def test_invalid_outputs(self):
field = fields.Float()
self._dump_raises(field, 'abc', "could not convert string to float: 'abc'")
self._dump_raises(field, [], "float() argument must be a string or a number, not 'list'")
class TestMinMaxFloat(BaseTestCase):
"""
Valid and invalid values for `Float`.
"""
def test_valid_inputs(self):
field = fields.Float(min_value=1, max_value=3)
self._load_equal(field, 1.0, 1.0)
self._load_equal(field, 3.0, 3.0)
def test_invalid_inputs(self):
field = fields.Float(min_value=1, max_value=3)
self._load_raises(field, 0.9, ['Ensure this value is greater than or equal to 1.'])
self._load_raises(field, 3.1, ['Ensure this value is less than or equal to 3.'])
def test_valid_outputs(self):
field = fields.Float(min_value=1, max_value=3)
self._dump_equal(field, 0.0, 0.0)
self._dump_equal(field, 4.0, 4.0)
class TestFunction(BaseTestCase):
"""
Valid and invalid values for `Function`.
"""
def test_dump_func(self):
def dump_func(value, context):
return value
field = fields.Function(dump_func=dump_func)
self._dump_equal(field, 'value', 'value')
def test_load_func(self):
def load_func(value, context):
return value
field = fields.Function(load_func=load_func)
self._load_equal(field, 'value', 'value')
def test_without_func(self):
field = fields.Function()
self._load_equal(field, 'value', missing)
self._dump_equal(field, 'value', missing)
def test_func_not_callable(self):
self.assertRaises(ValueError, fields.Function, dump_func='dump_func')
def test_func_with_wrong_parameters(self):
def func(value):
pass
field = fields.Function(dump_func=func, load_func=func)
self._dump_raises(field, 'value', 'func() takes 1 positional argument but 2 were given')
self._load_raises(field, 'value', 'func() takes 1 positional argument but 2 were given')
def test_dump_func_passed_is_dump_only(self):
def func(value, context):
pass
field = fields.Function(dump_func=func)
self.assertEqual(field.dump_only, True)
self.assertEqual(field.load_only, False)
def test_load_func_passed_is_load_only(self):
def func(value, context):
pass
field = fields.Function(load_func=func)
self.assertEqual(field.dump_only, False)
self.assertEqual(field.load_only, True)
class TestInteger(BaseTestCase):
"""
Valid and invalid values for `Integer`.
"""
def test_valid_inputs(self):
field = fields.Integer()
self._load_equal(field, '1', 1)
self._load_equal(field, '0', 0)
self._load_equal(field, 1, 1)
self._load_equal(field, 0, 0)
self._load_equal(field, 1.0, 1)
self._load_equal(field, 0.0, 0)
def test_invalid_inputs(self):
field = fields.Integer()
self._load_raises(field, 'abc', ['A valid integer is required.'])
self._load_raises(field, '1.0', ['A valid integer is required.'])
def test_valid_outputs(self):
field = fields.Integer()
self._dump_equal(field, 1, 1)
self._dump_equal(field, 1.0, 1)
self._dump_equal(field, '1', 1)
def test_invalid_outputs(self):
field = fields.Integer()
self._dump_raises(field, 'abc', "invalid literal for int() with base 10: 'abc'")
self._dump_raises(field, [], "int() argument must be a string or a number, not 'list'")
class TestMinMaxInteger(BaseTestCase):
def test_valid_inputs(self):
field = fields.Integer(min_value=1, max_value=3)
self._load_equal(field, 1, 1)
self._load_equal(field, 3, 3)
def test_invalid_inputs(self):
field = fields.Integer(min_value=1, max_value=3)
self._load_raises(field, 0, ['Must be at least 1.'])
self._load_raises(field, 4, ['Must be at most 3.'])
def test_valid_outputs(self):
field = fields.Integer(min_value=1, max_value=3)
self._dump_equal(field, 0, 0)
self._dump_equal(field, 2, 2)
self._dump_equal(field, 4, 4)
class TestListField(BaseTestCase):
"""
Values for `List` with Integer as child.
"""
def test_valid_inputs(self):
field = fields.List(fields.Integer())
self._load_equal(field, [], [])
self._load_equal(field, [1, 2, 3], [1, 2, 3])
self._load_equal(field, ['1', '2', '3'], [1, 2, 3])
self._load_equal(field, {1, 2}, [1, 2])
self._load_equal(field, (1, 2), [1, 2])
def test_invalid_inputs(self):
field = fields.List(fields.Integer())
self._load_raises(field, 'not a list', ['Not a valid list.'])
self._load_raises(field, [1, 2, 'error'], [{2: ['A valid integer is required.']}])
def test_valid_outputs(self):
field = fields.List(fields.Integer())
self._dump_equal(field, [], [])
self._dump_equal(field, [1, 2, 3], [1, 2, 3])
self._dump_equal(field, ['1', '2', '3'], [1, 2, 3])
self._dump_equal(field, {1, 2, 3}, [1, 2, 3])
self._dump_equal(field, ('1', '2', '3'), [1, 2, 3])
def test_disallow_empty(self):
field = fields.List(fields.Integer(), allow_empty=False)
self._load_raises(field, [], ['This list may not be empty.'])
class TestMethod(BaseTestCase):
"""
Valid and invalid values for `Method`.
"""
def test_dump_method(self):
class MyContract(Contract):
def dump_method(self, value, context):
return value
field = fields.Method(dump_method_name='dump_method')
field.bind('field', MyContract)
self._dump_equal(field, 'value', 'value')
def test_load_method(self):
class MyContract(Contract):
def load_method(self, value, context):
return value
field = fields.Method(load_method_name='load_method')
field.bind('field', MyContract)
self._load_equal(field, 'value', 'value')
def test_without_method(self):
field = fields.Method()
self._dump_equal(field, 'value', missing)
self._load_equal(field, 'value', missing)
def test_method_not_callable(self):
class MyContract(Contract):
dump_method = 'attribute'
field = fields.Method(dump_method_name='dump_method')
self.assertRaises(ValueError, field.bind, 'field', MyContract)
def test_method_missing(self):
class MyContract(Contract):
dump_method = 'attribute'
field = fields.Method(load_method_name='not_found')
self.assertRaises(ValueError, field.bind, 'field', MyContract)
def test_dump_method_passed_is_dump_only(self):
field = fields.Method(dump_method_name='method_name')
self.assertEqual(field.dump_only, True)
self.assertEqual(field.load_only, False)
def test_load_method_passed_is_load_only(self):
field = fields.Method(load_method_name='method_name')
self.assertEqual(field.dump_only, False)
self.assertEqual(field.load_only, True)
class TestString(BaseTestCase):
"""
Valid and invalid values for `String`.
"""
def test_valid_inputs(self):
field = fields.String()
self._load_equal(field, 1, '1')
self._load_equal(field, 'abc', 'abc')
self._load_equal(field, ' abc ', ' abc ')
def test_invalid_inputs(self):
field = fields.String()
self._load_raises(field, '', ['This field may not be blank.'])
def test_valid_outputs(self):
field = fields.String()
self._dump_equal(field, 1, '1')
self._dump_equal(field, 1.0, '1.0')
self._dump_equal(field, 'abc', 'abc')
def test_trim_whitespace(self):
field = fields.String(trim_whitespace=True)
self._load_equal(field, ' abc ', 'abc')
def test_trim_whitespace_with_space_value(self):
field = fields.String(trim_whitespace=True)
self._load_raises(field, ' ', ['This field may not be blank.'])
def test_allow_blank(self):
field = fields.String(allow_blank=True)
self._load_equal(field, '', '')
def test_allow_none_with_empty_value(self):
field = fields.String(allow_none=True)
self._load_equal(field, '', None)
class TestMinMaxString(BaseTestCase):
"""
Valid and invalid values for `String` with min and max limits.
"""
def test_valid_inputs(self):
field = fields.String(min_length=2, max_length=4)
self._load_equal(field, 12, '12')
self._load_equal(field, 1.0, '1.0')
self._load_equal(field, 'ab', 'ab')
self._load_equal(field, 'abcd', 'abcd')
def test_invalid_inputs(self):
field = fields.String(min_length=2, max_length=4)
self._load_raises(field, '1', ['Shorter than minimum length 2.'])
self._load_raises(field, 'abcde', ['Longer than maximum length 4.'])
def test_valid_outputs(self):
field = fields.String(min_length=1, max_length=3)
self._dump_equal(field, '', '')
self._dump_equal(field, '12345', '12345')
class TestUUID(BaseTestCase):
"""
Valid and invalid values for `UUID`.
"""
def test_valid_inputs(self):
field = fields.UUID()
self._load_equal(field, '825d7aeb-05a9-45b5-a5b7-05df87923cda', uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'))
self._load_equal(field, '825d7aeb05a945b5a5b705df87923cda', uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'))
self._load_equal(field, uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'), uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'))
def test_invalid_inputs(self):
field = fields.UUID()
self._load_raises(field, '825d7aeb-05a9-45b5-a5b7', ['"825d7aeb-05a9-45b5-a5b7" is not a valid UUID.'])
self._load_raises(field, (1, 2, 3), ['"(1, 2, 3)" is not a valid UUID.'])
self._load_raises(field, 123, ['"123" is not a valid UUID.'])
def test_valid_outputs(self):
field = fields.UUID()
self._dump_equal(field, uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'), '825d7aeb-05a9-45b5-a5b7-05df87923cda')
def test_invalid_outputs(self):
field = fields.UUID()
self._dump_raises(field, '825d7aeb-05a9-45b5-a5b7-05df87923cda', "'str' object has no attribute 'int'")
self._dump_raises(field, [], "'list' object has no attribute 'int'")
def test_hex_verbose_format(self):
field = fields.UUID(dump_format='hex_verbose')
self._dump_equal(field, uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'), '825d7aeb-05a9-45b5-a5b7-05df87923cda')
def test_hex_format(self):
field = fields.UUID(dump_format='hex')
self._dump_equal(field, uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'), '825d7aeb05a945b5a5b705df87923cda')
def test_int_format(self):
field = fields.UUID(dump_format='int')
self._dump_equal(field, uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'), 173285016134224701509569922458251836634)
def test_invalid_format(self):
self.assertRaises(ValueError, fields.UUID, dump_format='invalid')
| {
"content_hash": "50feea08b5f4e1b781b576d4d08cf07a",
"timestamp": "",
"source": "github",
"line_count": 599,
"max_line_length": 133,
"avg_line_length": 37.93823038397329,
"alnum_prop": 0.6137293729372937,
"repo_name": "viniciuschiele/contracts",
"id": "bcabce3cfa9b6b77335623206a1d32bb32ebf4d1",
"size": "22725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77369"
},
{
"name": "Shell",
"bytes": "358"
}
],
"symlink_target": ""
} |
import mock
from ironicclient.common import cliutils
from ironicclient.tests.unit import utils
import ironicclient.v1.driver as v1_driver
import ironicclient.v1.driver_shell as d_shell
class DriverShellTest(utils.BaseTestCase):
def setUp(self):
super(DriverShellTest, self).setUp()
client_mock = mock.MagicMock()
driver_mock = mock.MagicMock(spec=v1_driver.DriverManager)
client_mock.driver = driver_mock
self.client_mock = client_mock
def test_driver_show(self):
actual = {}
fake_print_dict = lambda data, *args, **kwargs: actual.update(data)
with mock.patch.object(cliutils, 'print_dict', fake_print_dict):
driver = object()
d_shell._print_driver_show(driver)
exp = ['hosts', 'name']
act = actual.keys()
self.assertEqual(sorted(exp), sorted(act))
def test_do_driver_vendor_passthru_with_args(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.http_method = 'POST'
args.method = 'method'
args.arguments = [['arg1=val1', 'arg2=val2']]
d_shell.do_driver_vendor_passthru(client_mock, args)
client_mock.driver.vendor_passthru.assert_called_once_with(
args.driver_name, args.method, http_method=args.http_method,
args={'arg1': 'val1', 'arg2': 'val2'})
def test_do_driver_vendor_passthru_without_args(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.http_method = 'POST'
args.method = 'method'
args.arguments = [[]]
d_shell.do_driver_vendor_passthru(client_mock, args)
client_mock.driver.vendor_passthru.assert_called_once_with(
args.driver_name, args.method, args={},
http_method=args.http_method)
def test_do_driver_properties(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.json = False
d_shell.do_driver_properties(client_mock, args)
client_mock.driver.properties.assert_called_once_with("driver_name")
@mock.patch('ironicclient.common.cliutils.print_dict')
def test_do_driver_properties_with_wrap_default(self, mock_print_dict):
client_mock = self.client_mock
client_mock.driver.properties.return_value = {
'foo': 'bar',
'baz': 'qux'}
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.wrap = 0
args.json = False
d_shell.do_driver_properties(client_mock, args)
mock_print_dict.assert_called_with(
{'foo': 'bar', 'baz': 'qux'},
dict_value='Description',
json_flag=False,
wrap=0)
@mock.patch('ironicclient.common.cliutils.print_dict')
def test_do_driver_properties_with_wrap(self, mock_print_dict):
client_mock = self.client_mock
client_mock.driver.properties.return_value = {
'foo': 'bar',
'baz': 'qux'}
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.wrap = 80
args.json = False
d_shell.do_driver_properties(client_mock, args)
mock_print_dict.assert_called_with(
{'foo': 'bar', 'baz': 'qux'},
dict_value='Description',
json_flag=False,
wrap=80)
@mock.patch('ironicclient.common.cliutils.print_dict')
def _test_do_driver_raid_logical_disk(self, print_dict_mock, wrap=0):
cli_mock = self.client_mock
cli_mock.driver.raid_logical_disk_properties.return_value = {
'foo': 'bar'}
args = mock.MagicMock()
args.driver_name = 'driver_name'
args.wrap = wrap
d_shell.do_driver_raid_logical_disk_properties(cli_mock, args)
cli_mock.driver.raid_logical_disk_properties.assert_called_once_with(
"driver_name")
print_dict_mock.assert_called_with(
{'foo': 'bar'},
dict_value='Description',
wrap=wrap)
def test_do_driver_raid_logical_disk_default_wrap(self):
self._test_do_driver_raid_logical_disk()
def test_do_driver_raid_logical_disk_with_wrap(self):
self._test_do_driver_raid_logical_disk(wrap=80)
def test_do_driver_show(self):
client_mock = self.client_mock
args = mock.MagicMock()
args.driver_name = 'fake'
args.json = False
d_shell.do_driver_show(client_mock, args)
client_mock.driver.get.assert_called_once_with('fake')
def test_do_driver_list(self):
client_mock = self.client_mock
args = mock.MagicMock()
d_shell.do_driver_list(client_mock, args)
client_mock.driver.list.assert_called_once_with()
def test_do_driver_get_vendor_passthru_methods(self):
client_mock = mock.MagicMock()
args = mock.MagicMock()
args.driver_name = 'fake'
d_shell.do_driver_get_vendor_passthru_methods(client_mock, args)
mock_method = client_mock.driver.get_vendor_passthru_methods
mock_method.assert_called_once_with('fake')
| {
"content_hash": "390d00803e63d3e790360041a092cf6a",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 77,
"avg_line_length": 36.361111111111114,
"alnum_prop": 0.6140183346065698,
"repo_name": "NaohiroTamura/python-ironicclient",
"id": "5f9a0f5f69edb4c056fdbd11a777edeab608f647",
"size": "5860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironicclient/tests/unit/v1/test_driver_shell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "923850"
},
{
"name": "Shell",
"bytes": "1765"
}
],
"symlink_target": ""
} |
__version__ = "4.0.0-dev"
from pymemcache.client.base import Client # noqa
from pymemcache.client.base import PooledClient # noqa
from pymemcache.client.hash import HashClient # noqa
from pymemcache.client.base import KeepaliveOpts # noqa
from pymemcache.exceptions import MemcacheError # noqa
from pymemcache.exceptions import MemcacheClientError # noqa
from pymemcache.exceptions import MemcacheUnknownCommandError # noqa
from pymemcache.exceptions import MemcacheIllegalInputError # noqa
from pymemcache.exceptions import MemcacheServerError # noqa
from pymemcache.exceptions import MemcacheUnknownError # noqa
from pymemcache.exceptions import MemcacheUnexpectedCloseError # noqa
| {
"content_hash": "3b3fc3e14a0e27b188532bbf4dc0dfa4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 49.785714285714285,
"alnum_prop": 0.830703012912482,
"repo_name": "sontek/pymemcache",
"id": "436fe2620357ed35d1c636aa7029389500358386",
"size": "697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymemcache/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "214818"
},
{
"name": "Shell",
"bytes": "328"
}
],
"symlink_target": ""
} |
"""Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <denis-alexander.engemann@inria.fr>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils.validation import check_is_fitted
from abc import ABCMeta, abstractmethod
class _BasePCA(TransformerMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(self, X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return np.dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return np.dot(X, self.components_) + self.mean_
| {
"content_hash": "c6e1661b37d169d502f23137046cdd6c",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 34.82993197278912,
"alnum_prop": 0.5986328125,
"repo_name": "ryfeus/lambda-packs",
"id": "b944d23d3388d2cc9525f0cb99b7a88636b2a25d",
"size": "5120",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Sklearn_x86/source/sklearn/decomposition/_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class StorageAccountCredentialsOperations(object):
"""StorageAccountCredentialsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2020_09_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.StorageAccountCredentialList"]
"""Gets all the storage account credentials in a Data Box Edge/Data Box Gateway device.
Gets all the storage account credentials in a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountCredentialList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2020_09_01_preview.models.StorageAccountCredentialList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountCredentialList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountCredentialList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.StorageAccountCredential"
"""Gets the properties of the specified storage account credential.
:param device_name: The device name.
:type device_name: str
:param name: The storage account credential name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountCredential, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2020_09_01_preview.models.StorageAccountCredential
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountCredential"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountCredential', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def _create_or_update_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
storage_account_credential, # type: "_models.StorageAccountCredential"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.StorageAccountCredential"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.StorageAccountCredential"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(storage_account_credential, 'StorageAccountCredential')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountCredential', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def begin_create_or_update(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
storage_account_credential, # type: "_models.StorageAccountCredential"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.StorageAccountCredential"]
"""Creates or updates the storage account credential.
:param device_name: The device name.
:type device_name: str
:param name: The storage account credential name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param storage_account_credential: The storage account credential.
:type storage_account_credential: ~azure.mgmt.databoxedge.v2020_09_01_preview.models.StorageAccountCredential
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either StorageAccountCredential or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databoxedge.v2020_09_01_preview.models.StorageAccountCredential]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountCredential"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
storage_account_credential=storage_account_credential,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('StorageAccountCredential', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def _delete_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def begin_delete(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the storage account credential.
:param device_name: The device name.
:type device_name: str
:param name: The storage account credential name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
| {
"content_hash": "f831a94024f5866aa04a0b64ce2e48af",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 231,
"avg_line_length": 49.210161662817555,
"alnum_prop": 0.6444058569551342,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5f47caaaf3684c99aecc79297099adb09e8ab281",
"size": "21775",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2020_09_01_preview/operations/_storage_account_credentials_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from boto.s3.storage import S3Storage
class Command(BaseCommand):
def handle(self, *args, **kwargs):
s3 = S3Storage(bucket='another-bucket', key='another-key',
secret='another-secret', location='EU')
# ...
print('Done. I guess.')
| {
"content_hash": "830fb5c0e285935dfa7022b489d22f43",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 66,
"avg_line_length": 30.636363636363637,
"alnum_prop": 0.6231454005934718,
"repo_name": "tweekmonster/moult",
"id": "6546df92045ad858834d0c400e4500b39d7db077",
"size": "337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/data/scripts/project/django_project/testapp/management/commands/s3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77888"
},
{
"name": "Shell",
"bytes": "159"
}
],
"symlink_target": ""
} |
"""
This is a unit test class used to validate how qdrouterd
behaves with different command line arguments combinations,
in order to ensure it won't break, causing bad experiences
to the users.
"""
import os
import signal
from subprocess import PIPE, STDOUT
from system_test import TestCase, Qdrouterd, main_module, Process, wait_port
from system_test import unittest
class CommandLineTest(TestCase):
"""
System tests for command line arguments parsing
"""
testport = 0
testname = ""
@classmethod
def setUpClass(cls):
"""Uses a default config for testing"""
super(CommandLineTest, cls).setUpClass()
cls.name = "test-router-1"
CommandLineTest.testname = cls.name
CommandLineTest.testport = cls.tester.get_port()
cls.config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': CommandLineTest.name}),
('listener', {'port': CommandLineTest.testport}),
('log', {'module': 'DEFAULT', 'enable': 'trace+', 'includeSource': 'true', 'outputFile': os.getcwd() + "/" + CommandLineTest.name + '.log'})
])
def run_qdrouterd_as_daemon(self, config_file_name, pid_file_name):
"""
Runs qdrouterd as a daemon, using the provided config_file_name
in order to ensure router is able to load it, be it using a
full or relative path.
:param config_file_name: The configuration file name to be written
:param pid_file_name: PID file name (must be full path)
:return:
"""
pipe = self.popen(
[os.path.join(os.environ.get('BUILD_DIR'), 'router', 'qdrouterd'), '-d',
'-I', os.path.join(os.environ.get('SOURCE_DIR'), 'python'),
'-c', self.config.write(config_file_name), '-P', pid_file_name],
stdout=PIPE, stderr=STDOUT, expect=Process.EXIT_OK,
universal_newlines=True)
out = pipe.communicate()[0]
wait_port(CommandLineTest.testport)
try:
pipe.teardown()
# kill qdrouterd running as a daemon
with open(pid_file_name, 'r') as pidfile:
for line in pidfile:
os.kill(int(line), signal.SIGTERM)
pidfile.close()
except OSError as ex:
raise Exception("%s\n%s" % (ex, out))
def test_01_config_relative_path(self):
"""
Starts qdrouterd as daemon, enforcing a config file name with
relative path.
"""
try:
self.run_qdrouterd_as_daemon("test-router", os.getcwd() + '/test.pid')
except OSError as ex:
self.fail(ex)
class CommandLineTest2(TestCase):
"""
System tests for command line arguments parsing
"""
testport = 0
testname = ""
@classmethod
def setUpClass(cls):
"""Uses a default config for testing"""
super(CommandLineTest2, cls).setUpClass()
cls.name = "test-router-2"
CommandLineTest2.testname = cls.name
CommandLineTest2.testport = cls.tester.get_port()
# output has been deprecated. We are using it here to test backward compatibility.
cls.config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': CommandLineTest2.testname}),
('listener', {'port': CommandLineTest2.testport}),
('log', {'module': 'DEFAULT', 'enable': 'trace+', 'includeSource': 'true', 'output': os.getcwd() + "/" + CommandLineTest2.name + '.log'})
])
def run_qdrouterd_as_daemon(self, config_file_name, pid_file_name):
"""
Runs qdrouterd as a daemon, using the provided config_file_name
in order to ensure router is able to load it, be it using a
full or relative path.
:param config_file_name: The configuration file name to be written
:param pid_file_name: PID file name (must be full path)
:return:
"""
pipe = self.popen(
[os.path.join(os.environ.get('BUILD_DIR'), 'router', 'qdrouterd'), '-d',
'-I', os.path.join(os.environ.get('SOURCE_DIR'), 'python'),
'-c', self.config.write(config_file_name), '-P', pid_file_name],
stdout=PIPE, stderr=STDOUT, expect=Process.EXIT_OK,
universal_newlines=True)
out = pipe.communicate()[0]
wait_port(CommandLineTest2.testport)
try:
pipe.teardown()
# kill qdrouterd running as a daemon
with open(pid_file_name, 'r') as pidfile:
for line in pidfile:
os.kill(int(line), signal.SIGTERM)
pidfile.close()
except OSError as ex:
raise Exception("%s\n%s" % (ex, out))
def test_02_config_full_path(self):
"""
Starts qdrouterd as daemon, enforcing a config file name with
full path.
"""
try:
self.run_qdrouterd_as_daemon(os.getcwd() + "/test-router-2.conf",
pid_file_name=os.getcwd() + '/test.pid')
except OSError as ex:
self.fail(ex)
if __name__ == '__main__':
unittest.main(main_module())
| {
"content_hash": "1232e3fbcad369392fbf1346dd2821e6",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 152,
"avg_line_length": 36.41549295774648,
"alnum_prop": 0.5846064590988204,
"repo_name": "ted-ross/qpid-dispatch",
"id": "5165046c26f7a0b121411022566164f83b950639",
"size": "5961",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tests/system_tests_cmdline_parsing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2866080"
},
{
"name": "C++",
"bytes": "368846"
},
{
"name": "CMake",
"bytes": "56491"
},
{
"name": "CSS",
"bytes": "49129"
},
{
"name": "Dockerfile",
"bytes": "3323"
},
{
"name": "HTML",
"bytes": "2320"
},
{
"name": "JavaScript",
"bytes": "733506"
},
{
"name": "Python",
"bytes": "2827063"
},
{
"name": "Shell",
"bytes": "34107"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask.ext.restful import Api, Resource, request, abort
import redis
import nlevel
app = Flask(__name__)
api = Api(app)
r = redis.Redis()
def flat_dict(d):
return {k: v[0] for k, v in dict(d).items()}
class NodeListResource(Resource):
def get(self, key=None):
if key:
return nlevel.nodes(r, key)
else:
return nlevel.roots(r)
def post(self, key=None):
info = flat_dict(request.form)
if not info:
abort(400, message="failed to create node with empty info")
node_key = nlevel.node(r, info, parent=key)
return nlevel.info(r, node_key)
class NodeInfoResource(Resource):
def get(self, key):
return nlevel.info(r, key)
api.add_resource(NodeListResource,
'/api/v1/nodes',
'/api/v1/nodes/<string:key>/nodes')
api.add_resource(NodeInfoResource,
'/api/v1/nodes/<string:key>')
if __name__ == '__main__':
app.run(debug=True)
| {
"content_hash": "88c55fb53368acb82d7966875a0f7768",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 71,
"avg_line_length": 22,
"alnum_prop": 0.6121212121212121,
"repo_name": "benwilber/nlevel",
"id": "61ec3651d2a2bea3f2b7c1d21a48adf0ada9c0fa",
"size": "991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2821"
}
],
"symlink_target": ""
} |
import six
from distutils.core import Extension, setup
from distutils.sysconfig import get_config_vars
ehata_module = Extension('ehata_its',
sources = ['its/ExtendedHata.cpp',
'its/FindHorizons.cpp',
'its/FindQuantile.cpp',
'its/FineRollingHillyTerrainCorectionFactor.cpp',
'its/GeneralSlopeCorrectionFactor.cpp',
'its/IsolatedRidgeCorrectionFactor.cpp',
'its/LeastSquares.cpp',
'its/MedianBasicPropLoss.cpp',
'its/MedianRollingHillyTerrainCorrectionFactor.cpp',
'its/MixedPathCorrectionFactor.cpp',
'its/PreprocessTerrainPath.cpp',
'ehata_its_py.cpp'],
extra_compile_args=['-D_hypot=hypot'])
# Remove the "-Wstrict-prototypes" compiler option (not valid for C++).
cfg_vars = get_config_vars()
for key, value in cfg_vars.items():
if isinstance(value, six.string_types):
cfg_vars[key] = value.replace("-Wstrict-prototypes", "")
setup(name = 'ehata_its',
version = '1.0',
description = 'ITS eHata propagation model',
ext_modules = [ehata_module])
| {
"content_hash": "3570596ff2fa9e304b6d40d3afa6c809",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 47.3,
"alnum_prop": 0.5116279069767442,
"repo_name": "Wireless-Innovation-Forum/Spectrum-Access-System",
"id": "0cba090063793388cec7792ede22b5e735e3487e",
"size": "2219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/harness/reference_models/propagation/ehata/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10978"
},
{
"name": "C++",
"bytes": "130297"
},
{
"name": "Makefile",
"bytes": "869"
},
{
"name": "PowerShell",
"bytes": "11931"
},
{
"name": "Python",
"bytes": "1836478"
},
{
"name": "Shell",
"bytes": "46984"
}
],
"symlink_target": ""
} |
default_app_config = 'inline_images.startup.InlineImageConfig'
| {
"content_hash": "e01c52e93c5a81490dfb6475eeda6b06",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 62,
"avg_line_length": 63,
"alnum_prop": 0.8253968253968254,
"repo_name": "PrincessTeruko/TsunArt",
"id": "696d1805be07db0b46fa1b95742edc8f26a831eb",
"size": "63",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inline_images/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23615"
},
{
"name": "HTML",
"bytes": "34809"
},
{
"name": "JavaScript",
"bytes": "47538"
},
{
"name": "Python",
"bytes": "114688"
},
{
"name": "Shell",
"bytes": "1392"
}
],
"symlink_target": ""
} |
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import copy
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import socket
import sys
import traceback
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import importutils
import six
from six import moves
_PY26 = sys.version_info[0:2] == (2, 6)
from networking_mlnx.openstack.common._i18n import _
from networking_mlnx.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
"keystonemiddleware=WARN", "routes.middleware=WARN",
"stevedore=WARN"]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
def list_opts():
"""Entry point for oslo.config-generator."""
return [(None, copy.deepcopy(common_cli_opts)),
(None, copy.deepcopy(logging_cli_opts)),
(None, copy.deepcopy(generic_log_opts)),
(None, copy.deepcopy(log_opts)),
]
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def isEnabledFor(self, level):
if _PY26:
# This method was added in python 2.7 (and it does the exact
# same logic, so we need to do the exact same logic so that
# python 2.6 has this capability as well).
return self.logger.isEnabledFor(level)
else:
return super(BaseLoggerAdapter, self).isEnabledFor(level)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
if six.PY3:
# In Python 3, the code fails because the 'manager' attribute
# cannot be found when using a LoggerAdapter as the
# underlying logger. Work around this issue.
self._logger.manager = self._logger.logger.manager
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(msg, six.text_type):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except (moves.configparser.Error, KeyError) as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string=None,
default_log_levels=None):
# Just in case the caller is not setting the
# default_log_level. This is insurance because
# we introduced the default_log_level parameter
# later in a backwards in-compatible change
if default_log_levels is not None:
cfg.set_defaults(
log_opts,
default_log_levels=default_log_levels)
if logging_context_format_string is not None:
cfg.set_defaults(
log_opts,
logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
if CONF.use_syslog:
try:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
except socket.error:
log_root.error('Unable to add syslog handler. Verify that syslog '
'is running.')
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| {
"content_hash": "4a11e9905c742b419505480a92553b62",
"timestamp": "",
"source": "github",
"line_count": 701,
"max_line_length": 78,
"avg_line_length": 36.63623395149786,
"alnum_prop": 0.589401136983101,
"repo_name": "moshele/old-networking-mlnx",
"id": "d2d7d67d3991b72529f40f0da02fb12e717bbeb4",
"size": "26453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_mlnx/openstack/common/log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "187680"
}
],
"symlink_target": ""
} |
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
class MetadataNamespacesTest(base.BaseV2ImageTest):
"""Test the Metadata definition Namespaces basic functionality"""
@test.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768')
def test_basic_metadata_definition_namespaces(self):
# get the available resource types and use one resource_type
body = self.client.list_resource_types()
resource_name = body['resource_types'][0]['name']
name = [{'name': resource_name}]
namespace_name = data_utils.rand_name('namespace')
# create the metadef namespace
body = self.client.create_namespace(namespace=namespace_name,
visibility='public',
description='Tempest',
display_name=namespace_name,
resource_type_associations=name,
protected=True)
self.addCleanup(self._cleanup_namespace, namespace_name)
# get namespace details
body = self.client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
self.assertEqual('public', body['visibility'])
# unable to delete protected namespace
self.assertRaises(lib_exc.Forbidden, self.client.delete_namespace,
namespace_name)
# update the visibility to private and protected to False
body = self.client.update_namespace(namespace=namespace_name,
description='Tempest',
visibility='private',
display_name=namespace_name,
protected=False)
self.assertEqual('private', body['visibility'])
self.assertEqual(False, body['protected'])
# now able to delete the non-protected namespace
self.client.delete_namespace(namespace_name)
def _cleanup_namespace(self, namespace_name):
# this is used to cleanup the resources
try:
body = self.client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
body = self.client.update_namespace(namespace=namespace_name,
description='Tempest',
visibility='private',
display_name=namespace_name,
protected=False)
self.client.delete_namespace(namespace_name)
except lib_exc.NotFound:
pass
| {
"content_hash": "c634def02cf85720dcbe58e5688515cb",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 52.345454545454544,
"alnum_prop": 0.5560958666203543,
"repo_name": "bigswitch/tempest",
"id": "de8299e41e25c85c0db477fb5ce1abfde0bfe4a0",
"size": "3508",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tempest/api/image/v2/test_images_metadefs_namespaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3376324"
},
{
"name": "Shell",
"bytes": "7858"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import gtest_test_utils
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
'HasDeathTest.Test1',
'HasDeathTest.Test2',
] + PARAM_TESTS
param_tests_present = None
# Utilities.
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a test program and returns its exit code and a list of tests run."""
stdout_file = os.popen(command, 'r')
tests_run = []
test_case = ''
test = ''
for line in stdout_file:
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run += [test_case + '.' + test]
exit_code = stdout_file.close()
return (tests_run, exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = os.environ.copy()
os.environ.update(extra_env)
return function(*args, **kwargs)
finally:
for key in extra_env.iterkeys():
if key in original_env:
os.environ[key] = original_env[key]
else:
del os.environ[key]
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, Run, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests GTEST_FILTER env variable or --gtest_filter flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using GTEST_FILTER.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the GTEST_FILTER environment variable. However,
# we can still test the case when the variable is not supplied (i.e.,
# gtest_filter is None).
# pylint: disable-msg=C6403
if not IS_WINDOWS or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = Run(COMMAND)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using --gtest_filter.
if gtest_filter is None:
command = COMMAND
else:
command = '%s --%s=%s' % (COMMAND, FILTER_FLAG, gtest_filter)
tests_run = Run(command)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
command=COMMAND, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
command: A command to invoke the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the GTEST_FILTER environment variable. However,
# we can still test the case when the variable is not supplied (i.e.,
# gtest_filter is None).
# pylint: disable-msg=C6403
if not IS_WINDOWS or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, command)
if check_exit_0:
self.assert_(exit_code is None)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
command = '%s --%s' % (COMMAND, ALSO_RUN_DISABED_TESTS_FLAG)
if gtest_filter is not None:
command = '%s --%s=%s' % (command, FILTER_FLAG, gtest_filter)
tests_run = Run(command)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
'\n'.join(os.popen(COMMAND, 'r').readlines())) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
'HasDeathTest.Test1',
'HasDeathTest.Test2', ] + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-HasDeathTest.Test1', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
'HasDeathTest.Test2',
] + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:HasDeathTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:HasDeathTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
command = '%s --%s=%s' % (COMMAND, FILTER_FLAG, '*One')
tests_run = Run(command)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
stdout_file = InvokeWithModifiedEnv(extra_env, os.popen, COMMAND, 'r')
try:
stdout_file.readlines()
finally:
stdout_file.close()
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with --gtest_list_tests."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
stdout_file = InvokeWithModifiedEnv(extra_env, os.popen,
'%s --gtest_list_tests' % COMMAND, 'r')
try:
stdout_file.readlines()
finally:
stdout_file.close()
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for command in (COMMAND + ' --gtest_death_test_style=threadsafe',
COMMAND + ' --gtest_death_test_style=fast'):
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, command=command)
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, command=command)
if __name__ == '__main__':
gtest_test_utils.Main()
| {
"content_hash": "1047211dfd814b27e56887d4cf1ef4f2",
"timestamp": "",
"source": "github",
"line_count": 592,
"max_line_length": 80,
"avg_line_length": 32.47804054054054,
"alnum_prop": 0.6568887501950382,
"repo_name": "cp16net/virgo-base",
"id": "4e9556b773886a11aa12daaaa7cef29489a12285",
"size": "19227",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "deps/breakpad/src/testing/gtest/test/gtest_filter_unittest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "129112"
},
{
"name": "C++",
"bytes": "3567"
},
{
"name": "DOT",
"bytes": "745"
},
{
"name": "Lua",
"bytes": "71075"
},
{
"name": "Perl",
"bytes": "2365"
},
{
"name": "Python",
"bytes": "48186"
},
{
"name": "Shell",
"bytes": "2152"
}
],
"symlink_target": ""
} |
class Dataset(list):
info = {}
def __init__(self, data=[]):
super(Dataset, self).__init__(data)
class LazyDataset(Dataset):
def __init__(self, lazy_functions):
super(LazyDataset, self).__init__()
self.lazy_functions = lazy_functions
def __iter__(self):
return self.lazy_functions['__iter__']()
| {
"content_hash": "e6f497618803a467aa3d2c2e32a4b28d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 48,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.5722543352601156,
"repo_name": "SMART-Lab/mldata",
"id": "914a3c137001c4f08f3fb8b8cf6bd784fbfbda33",
"size": "372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mldata/dataset.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4177"
}
],
"symlink_target": ""
} |
"""
The acssum module contains a function `acssum` that calls the ACSSUM executable.
Use this function to facilitate batch runs of ACSSUM.
Examples
--------
>>> from acstools import acssum
>>> acssum.acssum('*flt.fits', 'combined_image.fits')
For help usage use ``exe_args=['--help']``
"""
# STDLIB
import os
import subprocess # nosec
__taskname__ = "acssum"
__version__ = "1.0"
__vdate__ = "18-Dec-2012"
__all__ = ['acssum']
def acssum(input, output, exec_path='', time_stamps=False, verbose=False,
quiet=False, exe_args=None):
r"""
Run the acssum.e executable as from the shell.
Parameters
----------
input : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*flt.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
output : str
Output filename.
If `output` is '' and `input` is '\*_asn.fits',
`output` will be automatically set to '\*_sfl.fits'.
Otherwise, it is an error not to provide a specific `output`.
exec_path : str, optional
The complete path to ACSSUM executable.
If not given, run ACSSUM given by 'acssum.e'.
time_stamps : bool, optional
Set to True to turn on the printing of time stamps.
verbose : bool, optional
Set to True for verbose output.
quiet : bool, optional
Set to True for quiet output.
exe_args : list, optional
Arbitrary arguments passed to underlying executable call.
Note: Implementation uses subprocess.call and whitespace is not
permitted. E.g. use exe_args=['--nThreads', '1']
"""
from stsci.tools import parseinput # Optional package dependency
if exec_path:
if not os.path.exists(exec_path):
raise OSError('Executable not found: ' + exec_path)
call_list = [exec_path]
else:
call_list = ['acssum.e']
# Parse input to get list of filenames to process.
# acssum.e only takes 'file1,file2,...'
infiles, dummy_out = parseinput.parseinput(input)
call_list.append(','.join(infiles))
call_list.append(output)
if time_stamps:
call_list.append('-t')
if verbose:
call_list.append('-v')
if quiet:
call_list.append('-q')
if exe_args:
call_list.extend(exe_args)
subprocess.check_call(call_list) # nosec
| {
"content_hash": "d765847fa7d900400618a719b9b8920c",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 80,
"avg_line_length": 27.08695652173913,
"alnum_prop": 0.615569823434992,
"repo_name": "jhunkeler/acstools",
"id": "569f65de075ad9d190c2859214723cc8d955017a",
"size": "2492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acstools/acssum.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "177302"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from kitsune.sumo.models import ModelBase, LocaleField
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import auto_delete_files
class Media(ModelBase):
"""Generic model for media"""
title = models.CharField(max_length=255, db_index=True)
created = models.DateTimeField(default=datetime.now, db_index=True)
updated = models.DateTimeField(default=datetime.now, db_index=True)
updated_by = models.ForeignKey(User, null=True)
description = models.TextField(max_length=10000)
locale = LocaleField(default=settings.GALLERY_DEFAULT_LANGUAGE,
db_index=True)
is_draft = models.NullBooleanField(default=None, null=True, editable=False)
class Meta(object):
abstract = True
ordering = ['-created']
unique_together = (('locale', 'title'), ('is_draft', 'creator'))
def __unicode__(self):
return '[%s] %s' % (self.locale, self.title)
@auto_delete_files
class Image(Media):
creator = models.ForeignKey(User, related_name='gallery_images')
file = models.ImageField(upload_to=settings.GALLERY_IMAGE_PATH,
max_length=settings.MAX_FILEPATH_LENGTH)
thumbnail = models.ImageField(
upload_to=settings.GALLERY_IMAGE_THUMBNAIL_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
def get_absolute_url(self):
return reverse('gallery.media', args=['image', self.id])
def thumbnail_url_if_set(self):
"""Returns self.thumbnail, if set, else self.file"""
return self.thumbnail.url if self.thumbnail else self.file.url
@property
def documents(self):
"""Get the documents that include this image."""
from kitsune.wiki.models import Document
return Document.objects.filter(documentimage__image=self)
@auto_delete_files
class Video(Media):
creator = models.ForeignKey(User, related_name='gallery_videos')
webm = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
ogv = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
flv = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
poster = models.ImageField(upload_to=settings.GALLERY_VIDEO_THUMBNAIL_PATH,
max_length=settings.MAX_FILEPATH_LENGTH,
null=True)
thumbnail = models.ImageField(
upload_to=settings.GALLERY_VIDEO_THUMBNAIL_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
def get_absolute_url(self):
return reverse('gallery.media', args=['video', self.id])
def thumbnail_url_if_set(self):
"""Returns self.thumbnail.url, if set, else default thumbnail URL"""
progress_url = settings.GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL
return self.thumbnail.url if self.thumbnail else progress_url
| {
"content_hash": "45696438842b643e3fa88eda3991138d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 41.5974025974026,
"alnum_prop": 0.6749921948173587,
"repo_name": "dbbhattacharya/kitsune",
"id": "8bab10d15c457f1c8e4de527bf68502d626e8a02",
"size": "3203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kitsune/gallery/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy as sp
import logging
class KernelStandardizer(object):
'''
A KernelStandardizer is a class such as :class:`.DiagKtoN` and :class:`.Identity` to be used by the :meth:`.KernelData.standardize` to standardize Kernel data.
It always works in-place *and* returns the :class:`.KernelData` on which it works.
:Example:
Read and standardize Kernel data.
>>> from pysnptools.kernelstandardizer import DiagKtoN
>>> from pysnptools.kernelreader import KernelNpz
>>> kerneldata1 = KernelNpz('../examples/toydata.kernel.npz').read()
>>> print(np.diag(kerneldata1.val).sum())
5000000.0
>>> kerneldata1 = kerneldata1.standardize(DiagKtoN())
>>> print(np.diag(kerneldata1.val).sum())
500.0
Can also return a constant kernel standardizer that be applied to other :class:`.KernelData`.
>>> kernel_whole = KernelNpz('../examples/toydata.kernel.npz')
>>> train_idx, test_idx = range(10,kernel_whole.iid_count), range(0,10) #test on the first 10, train on the rest
>>> kernel_train, trained_standardizer = DiagKtoN().standardize(kernel_whole[train_idx].read(),return_trained=True)
>>> print('{0:.6f}'.format(np.diag(kernel_train.val).sum()))
490.000000
>>> print('{0:.6f}'.format(trained_standardizer.factor))
0.000100
>>> kernel_whole_test = kernel_whole[:,test_idx].read().standardize(trained_standardizer)
>>> print('{0:.6f}'.format(kernel_whole_test.val[0,0]))
0.992217
Details of Methods & Properties:
'''
def standardize(self, kerneldata, return_trained=False, force_python_only=False):
'''
Applies standardization, in place, to :class:`.KernelData`. For convenience also returns the :class:`KernelData`.
:param snps: kernel values to standardize
:type snps: :class:`.KernelData`
:param return_trained: If true, returns a second value containing a constant :class:`.KernelStandardizer` trained on this data.
:type return_trained: boolean
:param force_python_only: optional -- If False (default), may use outside library code. If True, requests that the read
be done without outside library code.
:type force_python_only: bool
:rtype: :class:`.KernelData`, (optional) constant :class:`.KernelStandardizer`
'''
raise NotImplementedError("subclass {0} needs to implement method '.standardize'".format(self.__class__.__name__))
class Identity(KernelStandardizer):
'''
A :class:`.KernelStandardizer` that does nothing to kernel data.
See :class:`.KernelStandardizer` for more information about standardization.
>>> from pysnptools.kernelstandardizer import Identity as KS_Identity
>>> from pysnptools.kernelreader import KernelNpz
>>> kerneldata1 = KernelNpz('../examples/toydata.kernel.npz').read()
>>> print(np.diag(kerneldata1.val).sum())
5000000.0
>>> kerneldata1 = kerneldata1.standardize(KS_Identity())
>>> print(np.diag(kerneldata1.val).sum())
5000000.0
'''
def __init__(self):
super(Identity, self).__init__()
def standardize(self, kerneldata, return_trained=False, force_python_only=False):
if return_trained:
return kerneldata, self
else:
return kerneldata
def __repr__(self):
return "{0}()".format(self.__class__.__name__)
from pysnptools.standardizer import DiagKtoN #as SN_DiagKtoN
from pysnptools.standardizer import DiagKtoNTrained #as SN_DiagKtoNTrained
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import doctest
doctest.testmod()
| {
"content_hash": "15af9aad59d75d124652501aef113700",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 163,
"avg_line_length": 37.96875,
"alnum_prop": 0.6757201646090535,
"repo_name": "MicrosoftGenomics/PySnpTools",
"id": "faf65a08e4ceb2ce3d6da2d96f92b5e83d503725",
"size": "3681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysnptools/kernelstandardizer/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3659"
},
{
"name": "C++",
"bytes": "29517"
},
{
"name": "Python",
"bytes": "436030"
}
],
"symlink_target": ""
} |
"""Test conformance to the specs."""
from .compat import unittest, is_python2
import semantic_version
# shortcut
Version = semantic_version.Version
class FormatTests(unittest.TestCase):
"""Tests proper version validation."""
def test_major_minor_patch(self):
### SPEC:
# A normal version number MUST take the form X.Y.Z
with self.assertRaises(ValueError):
Version('1')
with self.assertRaises(ValueError):
Version('1.1')
# Doesn't raise
Version('1.2.3')
with self.assertRaises(ValueError):
Version('1.2.3.4')
### SPEC:
# Where X, Y, and Z are non-negative integers,
with self.assertRaises(ValueError):
Version('1.2.A')
with self.assertRaises(ValueError):
Version('1.-2.3')
# Valid
v = Version('1.2.3')
self.assertEqual(1, v.major)
self.assertEqual(2, v.minor)
self.assertEqual(3, v.patch)
### Spec:
# And MUST NOT contain leading zeroes
with self.assertRaises(ValueError):
Version('1.2.01')
with self.assertRaises(ValueError):
Version('1.02.1')
with self.assertRaises(ValueError):
Version('01.2.1')
# Valid
v = Version('0.0.0')
self.assertEqual(0, v.major)
self.assertEqual(0, v.minor)
self.assertEqual(0, v.patch)
def test_prerelease(self):
### SPEC:
# A pre-release version MAY be denoted by appending a hyphen and a
# series of dot separated identifiers immediately following the patch
# version.
with self.assertRaises(ValueError):
Version('1.2.3 -23')
# Valid
v = Version('1.2.3-23')
self.assertEqual(('23',), v.prerelease)
### SPEC:
# Identifiers MUST comprise only ASCII alphanumerics and hyphen.
# Identifiers MUST NOT be empty
with self.assertRaises(ValueError):
Version('1.2.3-a,')
with self.assertRaises(ValueError):
Version('1.2.3-..')
### SPEC:
# Numeric identifiers MUST NOT include leading zeroes.
with self.assertRaises(ValueError):
Version('1.2.3-a0.01')
with self.assertRaises(ValueError):
Version('1.2.3-00')
# Valid
v = Version('1.2.3-0a.0.000zz')
self.assertEqual(('0a', '0', '000zz'), v.prerelease)
def test_build(self):
### SPEC:
# Build metadata MAY be denoted by appending a plus sign and a series of
# dot separated identifiers immediately following the patch or
# pre-release version
v = Version('1.2.3')
self.assertEqual((), v.build)
with self.assertRaises(ValueError):
Version('1.2.3 +4')
### SPEC:
# Identifiers MUST comprise only ASCII alphanumerics and hyphen.
# Identifiers MUST NOT be empty
with self.assertRaises(ValueError):
Version('1.2.3+a,')
with self.assertRaises(ValueError):
Version('1.2.3+..')
# Leading zeroes allowed
v = Version('1.2.3+0.0a.01')
self.assertEqual(('0', '0a', '01'), v.build)
def test_precedence(self):
### SPEC:
# Precedence is determined by the first difference when comparing from
# left to right as follows: Major, minor, and patch versions are always
# compared numerically.
# Example: 1.0.0 < 2.0.0 < 2.1.0 < 2.1.1
self.assertLess(Version('1.0.0'), Version('2.0.0'))
self.assertLess(Version('2.0.0'), Version('2.1.0'))
self.assertLess(Version('2.1.0'), Version('2.1.1'))
### SPEC:
# When major, minor, and patch are equal, a pre-release version has
# lower precedence than a normal version.
# Example: 1.0.0-alpha < 1.0.0
self.assertLess(Version('1.0.0-alpha'), Version('1.0.0'))
### SPEC:
# Precedence for two pre-release versions with the same major, minor,
# and patch version MUST be determined by comparing each dot separated
# identifier from left to right until a difference is found as follows:
# identifiers consisting of only digits are compared numerically
self.assertLess(Version('1.0.0-1'), Version('1.0.0-2'))
# and identifiers with letters or hyphens are compared lexically in
# ASCII sort order.
self.assertLess(Version('1.0.0-aa'), Version('1.0.0-ab'))
# Numeric identifiers always have lower precedence than
# non-numeric identifiers.
self.assertLess(Version('1.0.0-9'), Version('1.0.0-a'))
# A larger set of pre-release fields has a higher precedence than a
# smaller set, if all of the preceding identifiers are equal.
self.assertLess(Version('1.0.0-a.b.c'), Version('1.0.0-a.b.c.0'))
# Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0.
self.assertLess(Version('1.0.0-alpha'), Version('1.0.0-alpha.1'))
self.assertLess(Version('1.0.0-alpha.1'), Version('1.0.0-alpha.beta'))
self.assertLess(Version('1.0.0-alpha.beta'), Version('1.0.0-beta'))
self.assertLess(Version('1.0.0-beta'), Version('1.0.0-beta.2'))
self.assertLess(Version('1.0.0-beta.2'), Version('1.0.0-beta.11'))
self.assertLess(Version('1.0.0-beta.11'), Version('1.0.0-rc.1'))
self.assertLess(Version('1.0.0-rc.1'), Version('1.0.0'))
| {
"content_hash": "e5fe086b5295394f18cbc84bcb041e98",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 131,
"avg_line_length": 36.86754966887417,
"alnum_prop": 0.5884677564217712,
"repo_name": "pombredanne/python-semanticversion",
"id": "43c9d6a5dcb255be85fda8724f4adbbc1d991ff3",
"size": "5726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_spec.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1215"
},
{
"name": "Python",
"bytes": "75949"
}
],
"symlink_target": ""
} |
"""Download and unzip the target file to the destination."""
from __future__ import print_function
import os
import sys
import tempfile
import zipfile
import requests
def main():
if len(sys.argv) != 3:
print("Usage: python download_and_unzip.py [zipfile-url] [destination]")
sys.exit(1)
download_url = sys.argv[1]
destination = sys.argv[2]
with tempfile.TemporaryFile() as tmp_file:
r = requests.get(download_url)
if r.status_code != requests.codes.ok:
print("Download %s failed with [%d] \"%s\"" %
(download_url, r.status_code, r.text()))
sys.exit(1)
else:
tmp_file.write(r.content)
print("Successfully downloaded from %s", download_url)
with zipfile.ZipFile(tmp_file, 'r') as target_zip_file:
target_zip_file.extractall(destination)
print("Successfully unzip to %s" % destination)
if __name__ == "__main__":
main()
| {
"content_hash": "717ff650d6cf120efd3babfcd906f74c",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 27.97142857142857,
"alnum_prop": 0.6036772216547498,
"repo_name": "jtattermusch/grpc",
"id": "440572691fda77cb6a3f31b8a308610ff945a1d2",
"size": "1559",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tools/run_tests/python_utils/download_and_unzip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "5444"
},
{
"name": "Batchfile",
"bytes": "37697"
},
{
"name": "C",
"bytes": "1336485"
},
{
"name": "C#",
"bytes": "113402"
},
{
"name": "C++",
"bytes": "17334639"
},
{
"name": "CMake",
"bytes": "29311"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "Cython",
"bytes": "258846"
},
{
"name": "Dockerfile",
"bytes": "181146"
},
{
"name": "Go",
"bytes": "34794"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "14329"
},
{
"name": "JavaScript",
"bytes": "5572"
},
{
"name": "Objective-C",
"bytes": "724877"
},
{
"name": "Objective-C++",
"bytes": "79586"
},
{
"name": "PHP",
"bytes": "487721"
},
{
"name": "PowerShell",
"bytes": "5008"
},
{
"name": "Python",
"bytes": "3816194"
},
{
"name": "Ruby",
"bytes": "649180"
},
{
"name": "Shell",
"bytes": "771712"
},
{
"name": "Starlark",
"bytes": "859331"
},
{
"name": "Swift",
"bytes": "7487"
},
{
"name": "XSLT",
"bytes": "9846"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import json
from datetime import datetime
import boto3
from airflow import DAG
from airflow.decorators import task
from airflow.models.baseoperator import chain
from airflow.providers.amazon.aws.operators.s3 import (
S3CreateBucketOperator,
S3CreateObjectOperator,
S3DeleteBucketOperator,
)
from airflow.providers.amazon.aws.operators.sagemaker import (
SageMakerDeleteModelOperator,
SageMakerEndpointConfigOperator,
SageMakerEndpointOperator,
SageMakerModelOperator,
SageMakerTrainingOperator,
)
from airflow.providers.amazon.aws.sensors.sagemaker import SageMakerEndpointSensor
from airflow.utils.trigger_rule import TriggerRule
from tests.system.providers.amazon.aws.utils import ENV_ID_KEY, SystemTestContextBuilder, purge_logs
DAG_ID = "example_sagemaker_endpoint"
# Externally fetched variables:
ROLE_ARN_KEY = "ROLE_ARN"
sys_test_context_task = SystemTestContextBuilder().add_variable(ROLE_ARN_KEY).build()
# The URI of a Docker image for handling KNN model training.
# To find the URI of a free Amazon-provided image that can be used, substitute your
# desired region in the following link and find the URI under "Registry Path".
# https://docs.aws.amazon.com/sagemaker/latest/dg/ecr-us-east-1.html#knn-us-east-1.title
# This URI should be in the format of {12-digits}.dkr.ecr.{region}.amazonaws.com/knn
KNN_IMAGES_BY_REGION = {
"us-east-1": "382416733822.dkr.ecr.us-east-1.amazonaws.com/knn:1",
"us-west-2": "174872318107.dkr.ecr.us-west-2.amazonaws.com/knn:1",
}
# For an example of how to obtain the following train and test data, please see
# https://github.com/apache/airflow/blob/main/airflow/providers/amazon/aws/example_dags/example_sagemaker.py
TRAIN_DATA = "0,4.9,2.5,4.5,1.7\n1,7.0,3.2,4.7,1.4\n0,7.3,2.9,6.3,1.8\n2,5.1,3.5,1.4,0.2\n"
SAMPLE_TEST_DATA = "6.4,3.2,4.5,1.5"
@task
def call_endpoint(endpoint_name):
response = (
boto3.Session()
.client("sagemaker-runtime")
.invoke_endpoint(
EndpointName=endpoint_name,
ContentType="text/csv",
Body=SAMPLE_TEST_DATA,
)
)
return json.loads(response["Body"].read().decode())["predictions"]
@task(trigger_rule=TriggerRule.ALL_DONE)
def delete_endpoint_config(endpoint_config_job_name):
boto3.client("sagemaker").delete_endpoint_config(EndpointConfigName=endpoint_config_job_name)
@task(trigger_rule=TriggerRule.ALL_DONE)
def delete_endpoint(endpoint_name):
boto3.client("sagemaker").delete_endpoint(EndpointName=endpoint_name)
@task(trigger_rule=TriggerRule.ALL_DONE)
def delete_logs(env_id, endpoint_name):
generated_logs = [
# Format: ('log group name', 'log stream prefix')
("/aws/sagemaker/TrainingJobs", env_id),
(f"/aws/sagemaker/Endpoints/{endpoint_name}", env_id),
]
purge_logs(generated_logs)
@task
def set_up(env_id, role_arn, ti=None):
bucket_name = f"{env_id}-sagemaker"
input_data_s3_key = f"{env_id}/input-data"
training_output_s3_key = f"{env_id}/results"
endpoint_config_job_name = f"{env_id}-endpoint-config"
endpoint_name = f"{env_id}-endpoint"
model_name = f"{env_id}-KNN-model"
training_job_name = f"{env_id}-train"
region = boto3.session.Session().region_name
try:
knn_image_uri = KNN_IMAGES_BY_REGION[region]
except KeyError:
raise KeyError(
f"Region name {region} does not have a known KNN "
f"Image URI. Please add the region and URI following "
f"the directions at the top of the system testfile "
)
training_config = {
"TrainingJobName": training_job_name,
"RoleArn": role_arn,
"AlgorithmSpecification": {
"TrainingImage": knn_image_uri,
"TrainingInputMode": "File",
},
"HyperParameters": {
"predictor_type": "classifier",
"feature_dim": "4",
"k": "3",
"sample_size": str(TRAIN_DATA.count("\n") - 1),
},
"InputDataConfig": [
{
"ChannelName": "train",
"CompressionType": "None",
"ContentType": "text/csv",
"DataSource": {
"S3DataSource": {
"S3DataDistributionType": "FullyReplicated",
"S3DataType": "S3Prefix",
"S3Uri": f"s3://{bucket_name}/{input_data_s3_key}/train.csv",
}
},
}
],
"OutputDataConfig": {"S3OutputPath": f"s3://{bucket_name}/{training_output_s3_key}/"},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.m5.large",
"VolumeSizeInGB": 1,
},
"StoppingCondition": {"MaxRuntimeInSeconds": 6 * 60},
}
model_config = {
"ModelName": model_name,
"ExecutionRoleArn": role_arn,
"PrimaryContainer": {
"Mode": "SingleModel",
"Image": knn_image_uri,
"ModelDataUrl": f"s3://{bucket_name}/{training_output_s3_key}/{training_job_name}/output/model.tar.gz", # noqa: E501
},
}
endpoint_config_config = {
"EndpointConfigName": endpoint_config_job_name,
"ProductionVariants": [
{
"VariantName": f"{env_id}-demo",
"ModelName": model_name,
"InstanceType": "ml.t2.medium",
"InitialInstanceCount": 1,
},
],
}
deploy_endpoint_config = {
"EndpointName": endpoint_name,
"EndpointConfigName": endpoint_config_job_name,
}
ti.xcom_push(key="bucket_name", value=bucket_name)
ti.xcom_push(key="input_data_s3_key", value=input_data_s3_key)
ti.xcom_push(key="model_name", value=model_name)
ti.xcom_push(key="endpoint_name", value=endpoint_name)
ti.xcom_push(key="endpoint_config_job_name", value=endpoint_config_job_name)
ti.xcom_push(key="training_config", value=training_config)
ti.xcom_push(key="model_config", value=model_config)
ti.xcom_push(key="endpoint_config_config", value=endpoint_config_config)
ti.xcom_push(key="deploy_endpoint_config", value=deploy_endpoint_config)
with DAG(
dag_id=DAG_ID,
schedule="@once",
start_date=datetime(2021, 1, 1),
tags=["example"],
catchup=False,
) as dag:
test_context = sys_test_context_task()
test_setup = set_up(
env_id=test_context[ENV_ID_KEY],
role_arn=test_context[ROLE_ARN_KEY],
)
create_bucket = S3CreateBucketOperator(
task_id="create_bucket",
bucket_name=test_setup["bucket_name"],
)
upload_data = S3CreateObjectOperator(
task_id="upload_data",
s3_bucket=test_setup["bucket_name"],
s3_key=f'{test_setup["input_data_s3_key"]}/train.csv',
data=TRAIN_DATA,
)
train_model = SageMakerTrainingOperator(
task_id="train_model",
config=test_setup["training_config"],
)
create_model = SageMakerModelOperator(
task_id="create_model",
config=test_setup["model_config"],
)
# [START howto_operator_sagemaker_endpoint_config]
configure_endpoint = SageMakerEndpointConfigOperator(
task_id="configure_endpoint",
config=test_setup["endpoint_config_config"],
)
# [END howto_operator_sagemaker_endpoint_config]
# [START howto_operator_sagemaker_endpoint]
deploy_endpoint = SageMakerEndpointOperator(
task_id="deploy_endpoint",
config=test_setup["deploy_endpoint_config"],
)
# [END howto_operator_sagemaker_endpoint]
# SageMakerEndpointOperator waits by default, setting as False to test the Sensor below.
deploy_endpoint.wait_for_completion = False
# [START howto_sensor_sagemaker_endpoint]
await_endpoint = SageMakerEndpointSensor(
task_id="await_endpoint",
endpoint_name=test_setup["endpoint_name"],
)
# [END howto_sensor_sagemaker_endpoint]
delete_model = SageMakerDeleteModelOperator(
task_id="delete_model",
trigger_rule=TriggerRule.ALL_DONE,
config={"ModelName": test_setup["model_name"]},
)
delete_bucket = S3DeleteBucketOperator(
task_id="delete_bucket",
trigger_rule=TriggerRule.ALL_DONE,
bucket_name=test_setup["bucket_name"],
force_delete=True,
)
chain(
# TEST SETUP
test_context,
test_setup,
create_bucket,
upload_data,
# TEST BODY
train_model,
create_model,
configure_endpoint,
deploy_endpoint,
await_endpoint,
call_endpoint(test_setup["endpoint_name"]),
# TEST TEARDOWN
delete_endpoint_config(test_setup["endpoint_config_job_name"]),
delete_endpoint(test_setup["endpoint_name"]),
delete_model,
delete_bucket,
delete_logs(test_context[ENV_ID_KEY], test_setup["endpoint_name"]),
)
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| {
"content_hash": "bca95a123b317d6fc3bf3edad735435f",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 129,
"avg_line_length": 33.16197183098591,
"alnum_prop": 0.6345296241240178,
"repo_name": "apache/airflow",
"id": "1e2d43b83d9a767e38e27e4b90638cce84ffbfad",
"size": "10203",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/system/providers/amazon/aws/example_sagemaker_endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
"""
Chebfun module
==============
Vendorized version from:
https://github.com/pychebfun/pychebfun/blob/master/pychebfun
The rational for not including this library as a strict dependency is that
it has not been released.
.. moduleauthor :: Chris Swierczewski <cswiercz@gmail.com>
.. moduleauthor :: Olivier Verdier <olivier.verdier@gmail.com>
.. moduleauthor :: Gregory Potter <ghpotter@gmail.com>
The copyright notice (BSD-3 clause) is as follows:
Copyright 2017 Olivier Verdier
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division
import operator
from functools import wraps
import numpy as np
import numpy.polynomial as poly
from numpy.polynomial.chebyshev import cheb2poly, Chebyshev
from numpy.polynomial.polynomial import Polynomial
import sys
emach = sys.float_info.epsilon # machine epsilon
global sp_fftpack_ifft
sp_fftpack_ifft = None
def fftpack_ifft(*args, **kwargs):
global sp_fftpack_ifft
if sp_fftpack_ifft is None:
from scipy.fftpack import ifft as sp_fftpack_ifft
return sp_fftpack_ifft(*args, **kwargs)
global sp_fftpack_fft
sp_fftpack_fft = None
def fftpack_fft(*args, **kwargs):
global sp_fftpack_fft
if sp_fftpack_fft is None:
from scipy.fftpack import fft as sp_fftpack_fft
return sp_fftpack_fft(*args, **kwargs)
global sp_eigvals
sp_eigvals = None
def eigvals(*args, **kwargs):
global sp_eigvals
if sp_eigvals is None:
from scipy.linalg import eigvals as sp_eigvals
return sp_eigvals(*args, **kwargs)
global sp_toeplitz
sp_toeplitz = None
def toeplitz(*args, **kwargs):
global sp_toeplitz
if sp_toeplitz is None:
from scipy.linalg import toeplitz as sp_toeplitz
return sp_toeplitz(*args, **kwargs)
def build_pychebfun(f, domain, N=15):
fvec = lambda xs: [f(xi) for xi in xs]
return chebfun(f=fvec, domain=domain, N=N)
def build_solve_pychebfun(f, goal, domain, N=15, N_max=100, find_roots=2):
cache = {}
def cached_fun(x):
# Almost half the points are cached!
if x in cache:
return cache[x]
val = f(x)
cache[x] = val
return val
fun = build_pychebfun(cached_fun, domain, N=N)
roots = (fun - goal).roots()
while (len(roots) < find_roots and len(fun._values) < N_max):
N *= 2
fun = build_pychebfun(cached_fun, domain, N=N)
roots = (fun - goal).roots()
roots = [i for i in roots if domain[0] < i < domain[1]]
return roots, fun
def chebfun_to_poly(coeffs_or_fun, domain=None, text=False):
if isinstance(coeffs_or_fun, Chebfun):
coeffs = coeffs_or_fun.coefficients()
domain = coeffs_or_fun._domain
elif hasattr(coeffs_or_fun, '__class__') and coeffs_or_fun.__class__.__name__ == 'ChebyshevExpansion':
coeffs = coeffs_or_fun.coef()
domain = coeffs_or_fun.xmin(), coeffs_or_fun.xmax()
else:
coeffs = coeffs_or_fun
low, high = domain
# Reverse the coefficients, and use cheb2poly to make it in the polynomial domain
poly_coeffs = cheb2poly(coeffs)[::-1].tolist()
if not text:
return poly_coeffs
s = 'coeffs = %s\n' %poly_coeffs
delta = high - low
delta_sum = high + low
# Generate the expression
s += 'horner(coeffs, %.18g*(x - %.18g))' %(2.0/delta, 0.5*delta_sum)
# return the string
return s
def cheb_to_poly(coeffs_or_fun, domain=None):
"""Just call horner on the outputs!"""
from fluids.numerics import horner as horner_poly
if isinstance(coeffs_or_fun, Chebfun):
coeffs = coeffs_or_fun.coefficients()
domain = coeffs_or_fun._domain
elif hasattr(coeffs_or_fun, '__class__') and coeffs_or_fun.__class__.__name__ == 'ChebyshevExpansion':
coeffs = coeffs_or_fun.coef()
domain = coeffs_or_fun.xmin(), coeffs_or_fun.xmax()
else:
coeffs = coeffs_or_fun
low, high = domain
coeffs = cheb2poly(coeffs)[::-1].tolist() # Convert to polynomial basis
# Mix in limits to make it a normal polynomial
my_poly = Polynomial([-0.5*(high + low)*2.0/(high - low), 2.0/(high - low)])
poly_coeffs = horner_poly(coeffs, my_poly).coef[::-1].tolist()
return poly_coeffs
def cheb_range_simplifier(low, high, text=False):
'''
>>> low, high = 0.0023046250851646434, 4.7088985707840125
>>> cheb_range_simplifier(low, high, text=True)
'chebval(0.42493574399544564724*(x + -2.3556015979345885647), coeffs)'
'''
constant = 0.5*(-low-high)
factor = 2.0/(high-low)
if text:
return 'chebval(%.20g*(x + %.20g), coeffs)' %(factor, constant)
return constant, factor
def cast_scalar(method):
"""Cast scalars to constant interpolating objects."""
@wraps(method)
def new_method(self, other):
if np.isscalar(other):
other = type(self)([other],self.domain())
return method(self, other)
return new_method
class Polyfun(object):
"""Construct a Lagrange interpolating polynomial over arbitrary points.
Polyfun objects consist in essence of two components: 1) An interpolant
on [-1,1], 2) A domain attribute [a,b]. These two pieces of information
are used to define and subsequently keep track of operations upon Chebyshev
interpolants defined on an arbitrary real interval [a,b].
"""
# ----------------------------------------------------------------
# Initialisation methods
# ----------------------------------------------------------------
class NoConvergence(Exception):
"""Raised when dichotomy does not converge."""
class DomainMismatch(Exception):
"""Raised when there is an interval mismatch."""
@classmethod
def from_data(self, data, domain=None):
"""Initialise from interpolation values."""
return self(data,domain)
@classmethod
def from_fun(self, other):
"""Initialise from another instance."""
return self(other.values(),other.domain())
@classmethod
def from_coeff(self, chebcoeff, domain=None, prune=True, vscale=1.):
"""
Initialise from provided coefficients
prune: Whether to prune the negligible coefficients
vscale: the scale to use when pruning
"""
coeffs = np.asarray(chebcoeff)
if prune:
N = self._cutoff(coeffs, vscale)
pruned_coeffs = coeffs[:N]
else:
pruned_coeffs = coeffs
values = self.polyval(pruned_coeffs)
return self(values, domain, vscale)
@classmethod
def dichotomy(self, f, kmin=2, kmax=12, raise_no_convergence=True,):
"""Compute the coefficients for a function f by dichotomy.
kmin, kmax: log2 of number of interpolation points to try
raise_no_convergence: whether to raise an exception if the dichotomy does not converge
"""
for k in range(kmin, kmax):
N = pow(2, k)
sampled = self.sample_function(f, N)
coeffs = self.polyfit(sampled)
# 3) Check for negligible coefficients
# If within bound: get negligible coeffs and bread
bnd = self._threshold(np.max(np.abs(coeffs)))
last = abs(coeffs[-2:])
if np.all(last <= bnd):
break
else:
if raise_no_convergence:
raise self.NoConvergence(last, bnd)
return coeffs
@classmethod
def from_function(self, f, domain=None, N=None):
"""Initialise from a function to sample.
N: optional parameter which indicates the range of the dichotomy
"""
# rescale f to the unit domain
domain = self.get_default_domain(domain)
a,b = domain[0], domain[-1]
map_ui_ab = lambda t: 0.5*(b-a)*t + 0.5*(a+b)
args = {'f': lambda t: f(map_ui_ab(t))}
if N is not None: # N is provided
nextpow2 = int(np.log2(N))+1
args['kmin'] = nextpow2
args['kmax'] = nextpow2+1
args['raise_no_convergence'] = False
else:
args['raise_no_convergence'] = True
# Find out the right number of coefficients to keep
coeffs = self.dichotomy(**args)
return self.from_coeff(coeffs, domain)
@classmethod
def _threshold(self, vscale):
"""Compute the threshold at which coefficients are trimmed."""
bnd = 128*emach*vscale
return bnd
@classmethod
def _cutoff(self, coeffs, vscale):
"""Compute cutoff index after which the coefficients are deemed
negligible."""
bnd = self._threshold(vscale)
inds = np.nonzero(abs(coeffs) >= bnd)
if len(inds[0]):
N = inds[0][-1]
else:
N = 0
return N+1
def __init__(self, values=0., domain=None, vscale=None):
"""Init an object from values at interpolation points.
values: Interpolation values
vscale: The actual vscale; computed automatically if not given
"""
avalues = np.asarray(values,)
avalues1 = np.atleast_1d(avalues)
N = len(avalues1)
points = self.interpolation_points(N)
self._values = avalues1
if vscale is not None:
self._vscale = vscale
else:
self._vscale = np.max(np.abs(self._values))
self.p = self.interpolator(points, avalues1)
domain = self.get_default_domain(domain)
self._domain = np.array(domain)
a,b = domain[0], domain[-1]
# maps from [-1,1] <-> [a,b]
self._ab_to_ui = lambda x: (2.0*x-a-b)/(b-a)
self._ui_to_ab = lambda t: 0.5*(b-a)*t + 0.5*(a+b)
def same_domain(self, fun2):
"""Returns True if the domains of two objects are the same."""
return np.allclose(self.domain(), fun2.domain(), rtol=1e-14, atol=1e-14)
# ----------------------------------------------------------------
# String representations
# ----------------------------------------------------------------
def __repr__(self):
"""Display method."""
a, b = self.domain()
vals = self.values()
return (
'%s \n '
' domain length endpoint values\n '
' [%5.1f, %5.1f] %5d %5.2f %5.2f\n '
'vscale = %1.2e') % (
str(type(self)).split('.')[-1].split('>')[0][:-1],
a,b,self.size(),vals[-1],vals[0],self._vscale,)
def __str__(self):
return "<{0}({1})>".format(
str(type(self)).split('.')[-1].split('>')[0][:-1],self.size(),)
# ----------------------------------------------------------------
# Basic Operator Overloads
# ----------------------------------------------------------------
def __call__(self, x):
return self.p(self._ab_to_ui(x))
def __getitem__(self, s):
"""Components s of the fun."""
return self.from_data(self.values().T[s].T)
def __bool__(self):
"""Test for difference from zero (up to tolerance)"""
return not np.allclose(self.values(), 0)
__nonzero__ = __bool__
def __eq__(self, other):
return not(self - other)
def __ne__(self, other):
return not (self == other)
@cast_scalar
def __add__(self, other):
"""Addition."""
if not self.same_domain(other):
raise self.DomainMismatch(self.domain(),other.domain())
ps = [self, other]
# length difference
diff = other.size() - self.size()
# determine which of self/other is the smaller/bigger
big = diff > 0
small = not big
# pad the coefficients of the small one with zeros
small_coeffs = ps[small].coefficients()
big_coeffs = ps[big].coefficients()
padded = np.zeros_like(big_coeffs)
padded[:len(small_coeffs)] = small_coeffs
# add the values and create a new object with them
chebsum = big_coeffs + padded
new_vscale = np.max([self._vscale, other._vscale])
return self.from_coeff(
chebsum, domain=self.domain(), vscale=new_vscale
)
__radd__ = __add__
@cast_scalar
def __sub__(self, other):
"""Subtraction."""
return self + (-other)
def __rsub__(self, other):
return -(self - other)
def __rmul__(self, other):
return self.__mul__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __neg__(self):
"""Negation."""
return self.from_data(-self.values(),domain=self.domain())
def __abs__(self):
return self.from_function(lambda x: abs(self(x)),domain=self.domain())
# ----------------------------------------------------------------
# Attributes
# ----------------------------------------------------------------
def size(self):
return self.p.n
def coefficients(self):
return self.polyfit(self.values())
def values(self):
return self._values
def domain(self):
return self._domain
# ----------------------------------------------------------------
# Integration and differentiation
# ----------------------------------------------------------------
def integrate(self):
raise NotImplementedError()
def differentiate(self):
raise NotImplementedError()
def dot(self, other):
r"""Return the Hilbert scalar product :math:`\int f.g`."""
prod = self * other
return prod.sum()
def norm(self):
"""
Return: square root of scalar product with itself.
"""
norm = np.sqrt(self.dot(self))
return norm
# ----------------------------------------------------------------
# Miscellaneous operations
# ----------------------------------------------------------------
def restrict(self,subinterval):
"""Return a Polyfun that matches self on subinterval."""
if (subinterval[0] < self._domain[0]) or (subinterval[1] > self._domain[1]):
raise ValueError("Can only restrict to subinterval")
return self.from_function(self, subinterval)
# ----------------------------------------------------------------
# Class method aliases
# ----------------------------------------------------------------
diff = differentiate
cumsum = integrate
class Chebfun(Polyfun):
"""Eventually set this up so that a Chebfun is a collection of Chebfuns.
This will enable piecewise smooth representations al la Matlab Chebfun v2.0.
"""
# ----------------------------------------------------------------
# Standard construction class methods.
# ----------------------------------------------------------------
@classmethod
def get_default_domain(self, domain=None):
if domain is None:
return [-1., 1.]
else:
return domain
@classmethod
def identity(self, domain=[-1., 1.]):
"""The identity function x -> x."""
return self.from_data([domain[1],domain[0]], domain)
@classmethod
def basis(self, n):
"""Chebyshev basis functions T_n."""
if n == 0:
return self(np.array([1.]))
vals = np.ones(n+1)
vals[1::2] = -1
return self(vals)
# ----------------------------------------------------------------
# Integration and differentiation
# ----------------------------------------------------------------
def sum(self):
"""Evaluate the integral over the given interval using Clenshaw-Curtis
quadrature."""
ak = self.coefficients()
ak2 = ak[::2]
n = len(ak2)
Tints = 2/(1-(2*np.arange(n))**2)
val = np.sum((Tints*ak2.T).T, axis=0)
a_, b_ = self.domain()
return 0.5*(b_-a_)*val
def integrate(self):
"""Return the object representing the primitive of self over the domain.
The output starts at zero on the left-hand side of the domain.
"""
coeffs = self.coefficients()
a,b = self.domain()
int_coeffs = 0.5*(b-a)*poly.chebyshev.chebint(coeffs)
antiderivative = self.from_coeff(int_coeffs, domain=self.domain())
return antiderivative - antiderivative(a)
def differentiate(self, n=1):
"""n-th derivative, default 1."""
ak = self.coefficients()
a_, b_ = self.domain()
for _ in range(n):
ak = self.differentiator(ak)
return self.from_coeff((2./(b_-a_))**n*ak, domain=self.domain())
# ----------------------------------------------------------------
# Roots
# ----------------------------------------------------------------
def roots(self):
"""Utilises Boyd's O(n^2) recursive subdivision algorithm.
The chebfun
is recursively subsampled until it is successfully represented to
machine precision by a sequence of piecewise interpolants of degree
100 or less. A colleague matrix eigenvalue solve is then applied to
each of these pieces and the results are concatenated.
See:
J. P. Boyd, Computing zeros on a real interval through Chebyshev
expansion and polynomial rootfinding, SIAM J. Numer. Anal., 40
(2002), pp. 1666–1682.
"""
if self.size() == 1:
return np.array([])
elif self.size() <= 100:
ak = self.coefficients()
v = np.zeros_like(ak[:-1])
v[1] = 0.5
C1 = toeplitz(v)
C2 = np.zeros_like(C1)
C1[0,1] = 1.
C2[-1,:] = ak[:-1]
C = C1 - .5/ak[-1] * C2
eigenvalues = eigvals(C)
roots = [eig.real for eig in eigenvalues
if np.allclose(eig.imag,0,atol=1e-10)
and np.abs(eig.real) <=1]
scaled_roots = self._ui_to_ab(np.array(roots))
return scaled_roots
else:
try:
# divide at a close-to-zero split-point
split_point = self._ui_to_ab(0.0123456789)
return np.concatenate(
(self.restrict([self._domain[0],split_point]).roots(),
self.restrict([split_point,self._domain[1]]).roots()))
except:
# Seems to have many fake roots for high degree fits
coeffs = self.coefficients()
domain = self._domain
possibilities = Chebyshev(coeffs, domain).roots()
return np.array([float(i.real) for i in possibilities if i.imag == 0.0])
# ----------------------------------------------------------------
# Interpolation and evaluation (go from values to coefficients)
# ----------------------------------------------------------------
@classmethod
def interpolation_points(self, N):
"""N Chebyshev points in [-1, 1], boundaries included."""
if N == 1:
return np.array([0.])
return np.cos(np.arange(N)*np.pi/(N-1))
@classmethod
def sample_function(self, f, N):
"""Sample a function on N+1 Chebyshev points."""
x = self.interpolation_points(N+1)
return f(x)
@classmethod
def polyfit(self, sampled):
"""Compute Chebyshev coefficients for values located on Chebyshev
points.
sampled: array; first dimension is number of Chebyshev points
"""
asampled = np.asarray(sampled)
if len(asampled) == 1:
return asampled
evened = even_data(asampled)
coeffs = dct(evened)
return coeffs
@classmethod
def polyval(self, chebcoeff):
"""Compute the interpolation values at Chebyshev points.
chebcoeff: Chebyshev coefficients
"""
N = len(chebcoeff)
if N == 1:
return chebcoeff
data = even_data(chebcoeff)/2
data[0] *= 2
data[N-1] *= 2
fftdata = 2*(N-1)*fftpack_ifft(data, axis=0)
complex_values = fftdata[:N]
# convert to real if input was real
if np.isrealobj(chebcoeff):
values = np.real(complex_values)
else:
values = complex_values
return values
@classmethod
def interpolator(self, x, values):
"""Returns a polynomial with vector coefficients which interpolates the
values at the Chebyshev points x."""
# hacking the barycentric interpolator by computing the weights in advance
from scipy.interpolate import BarycentricInterpolator as Bary
p = Bary([0.])
N = len(values)
weights = np.ones(N)
weights[0] = .5
weights[1::2] = -1
weights[-1] *= .5
p.wi = weights
p.xi = x
p.set_yi(values)
return p
# ----------------------------------------------------------------
# Helper for differentiation.
# ----------------------------------------------------------------
@classmethod
def differentiator(self, A):
"""Differentiate a set of Chebyshev polynomial expansion coefficients
Originally from http://www.scientificpython.net/pyblog/chebyshev-
differentiation.
+ (lots of) bug fixing + pythonisation
"""
m = len(A)
SA = (A.T* 2*np.arange(m)).T
DA = np.zeros_like(A)
if m == 1: # constant
return np.zeros_like(A[0:1])
if m == 2: # linear
return A[1:2,]
DA[m-3:m-1,] = SA[m-2:m,]
for j in range(m//2 - 1):
k = m-3-2*j
DA[k] = SA[k+1] + DA[k+2]
DA[k-1] = SA[k] + DA[k+1]
DA[0] = (SA[1] + DA[2])*0.5
return DA
# ----------------------------------------------------------------
# General utilities
# ----------------------------------------------------------------
def even_data(data):
"""
Construct Extended Data Vector (equivalent to creating an
even extension of the original function)
Return: array of length 2(N-1)
For instance, [0,1,2,3,4] --> [0,1,2,3,4,3,2,1]
"""
return np.concatenate([data, data[-2:0:-1]],)
def dct(data):
"""Compute DCT using FFT."""
N = len(data)//2
fftdata = fftpack_fft(data, axis=0)[:N+1]
fftdata /= N
fftdata[0] /= 2.
fftdata[-1] /= 2.
if np.isrealobj(data):
data = np.real(fftdata)
else:
data = fftdata
return data
# ----------------------------------------------------------------
# Add overloaded operators
# ----------------------------------------------------------------
def _add_operator(cls, op):
def method(self, other):
if not self.same_domain(other):
raise self.DomainMismatch(self.domain(), other.domain())
return self.from_function(
lambda x: op(self(x).T, other(x).T).T, domain=self.domain(), )
cast_method = cast_scalar(method)
name = '__'+op.__name__+'__'
cast_method.__name__ = name
cast_method.__doc__ = "operator {}".format(name)
setattr(cls, name, cast_method)
def rdiv(a, b):
return b/a
for _op in [operator.mul, operator.truediv, operator.pow, rdiv]:
_add_operator(Polyfun, _op)
# ----------------------------------------------------------------
# Add numpy ufunc delegates
# ----------------------------------------------------------------
def _add_delegate(ufunc, nonlinear=True):
def method(self):
return self.from_function(lambda x: ufunc(self(x)), domain=self.domain())
name = ufunc.__name__
method.__name__ = name
method.__doc__ = "delegate for numpy's ufunc {}".format(name)
setattr(Polyfun, name, method)
# Following list generated from:
# https://github.com/numpy/numpy/blob/master/numpy/core/code_generators/generate_umath.py
for func in [np.arccos, np.arccosh, np.arcsin, np.arcsinh, np.arctan, np.arctanh, np.cos, np.sin, np.tan, np.cosh, np.sinh, np.tanh, np.exp, np.exp2, np.expm1, np.log, np.log2, np.log1p, np.sqrt, np.ceil, np.trunc, np.fabs, np.floor, ]:
_add_delegate(func)
# ----------------------------------------------------------------
# General Aliases
# ----------------------------------------------------------------
## chebpts = interpolation_points
# ----------------------------------------------------------------
# Constructor inspired by the Matlab version
# ----------------------------------------------------------------
def chebfun(f=None, domain=[-1,1], N=None, chebcoeff=None,):
"""Create a Chebyshev polynomial approximation of the function $f$ on the
interval :math:`[-1, 1]`.
:param callable f: Python, Numpy, or Sage function
:param int N: (default = None) specify number of interpolating points
:param np.array chebcoeff: (default = np.array(0)) specify the coefficients
"""
# Chebyshev coefficients
if chebcoeff is not None:
return Chebfun.from_coeff(chebcoeff, domain)
# another instance
if isinstance(f, Polyfun):
return Chebfun.from_fun(f)
# callable
if hasattr(f, '__call__'):
return Chebfun.from_function(f, domain, N)
# from here on, assume that f is None, or iterable
if np.isscalar(f):
f = [f]
try:
iter(f) # interpolation values provided
except TypeError:
pass
else:
return Chebfun(f, domain)
raise TypeError('Impossible to initialise the object from an object of type {}'.format(type(f))) | {
"content_hash": "1ac15c1660585efb66d4f2ded0136493",
"timestamp": "",
"source": "github",
"line_count": 780,
"max_line_length": 755,
"avg_line_length": 34.46153846153846,
"alnum_prop": 0.5493675595238096,
"repo_name": "CalebBell/fluids",
"id": "ec47a00fa2a1905fc76b71ed3195d84484d5d850",
"size": "26906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fluids/optional/pychebfun.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "173993"
},
{
"name": "Makefile",
"bytes": "813"
},
{
"name": "Python",
"bytes": "2969085"
}
],
"symlink_target": ""
} |
import sqlite3 as lite
import configparser
config = configparser.ConfigParser()
config.read('deaddrop.ini')
database = config['Options']['Database']
con = lite.connect(database)
cur = con.cursor()
with con:
cur.executescript('''
DROP TABLE IF EXISTS files;
CREATE TABLE files(id INTEGER PRIMARY KEY, offset INT, size INT);
INSERT INTO files(offset, size) VALUES (0,1);
''')
print('Database Reset')
| {
"content_hash": "fb5323bbc0d72b21954a537682556666",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.7250608272506083,
"repo_name": "freesideatlanta/deaddrop",
"id": "accff3e54d3c88f8998c2f23a02435f8b6d75b3e",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deaddropnoblock/dbreset.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4012"
},
{
"name": "Shell",
"bytes": "4774"
}
],
"symlink_target": ""
} |
from os import path
from collections import namedtuple
import click
import graphviz
from functional import seq
ENGINES = ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchword', 'osage']
Edge = namedtuple('Edge', 'left right label')
def split_edge(edge):
edge_label = None
if ':' in edge:
edge, edge_label = edge.split(':')
if '-' in edge:
left, right = edge.split('-')
if right == '':
right = None
else:
left, right = edge
return Edge(left, right, edge_label)
@click.command()
@click.option('--engine', '-e', default='dot', type=click.Choice(ENGINES),
help="Choose layout engine to use")
@click.option('--undirected/--directed', '-u/-d', default=True,
help="Specify undirected or directed edges")
@click.option('--format', default='png', type=str, help='Image format')
@click.option('--name', '-n', default=None, type=str, help='Name of graph in image')
@click.option('--dot', is_flag=True, help='Preserve the source dot file')
@click.option('--no-vertex-labels', is_flag=True, help="Don't label vertex labels")
@click.argument('file', type=click.Path(writable=True))
@click.argument('edges', nargs=-1, required=True)
def main(engine, undirected, format, name, dot, file, edges, no_vertex_labels):
if undirected:
graph = graphviz.Graph(engine=engine, format=format)
else:
graph = graphviz.Digraph(engine=engine, format=format)
if name:
graph.body.append(r'label = "{0}"'.format(name))
edges = seq(edges).map(split_edge)
if no_vertex_labels:
edges.map(lambda e: (e.left, e.right)).flatten().distinct()\
.filter_not(lambda n: n is None).for_each(lambda n: graph.node(n, label=''))
else:
edges.map(lambda e: (e.left, e.right)).flatten().distinct() \
.filter_not(lambda n: n is None).for_each(lambda n: graph.node(n))
edges.filter(lambda e: e.right is not None) \
.for_each(lambda e: graph.edge(e.left, e.right, label=e.label))
filepath, filename = path.split(file)
filepath = filepath if filepath != '' else None
graph.render(filename=filename, directory=filepath, cleanup=not dot)
if __name__ == '__main__':
main()
| {
"content_hash": "7b561d5e6b7a204dae6dcd2d5f5075e0",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 88,
"avg_line_length": 37.91525423728814,
"alnum_prop": 0.6338846669646848,
"repo_name": "EntilZha/pygraph",
"id": "7709eda5367ddfaa36385f41751cf91eaf80c7c5",
"size": "2237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygraph/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3282"
}
],
"symlink_target": ""
} |
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v11.enums",
marshal="google.ads.googleads.v11",
manifest={"OptimizationGoalTypeEnum",},
)
class OptimizationGoalTypeEnum(proto.Message):
r"""Container for enum describing the type of optimization goal.
"""
class OptimizationGoalType(proto.Enum):
r"""The type of optimization goal"""
UNSPECIFIED = 0
UNKNOWN = 1
CALL_CLICKS = 2
DRIVING_DIRECTIONS = 3
APP_PRE_REGISTRATION = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "11b99c0e22952abb648c42270843c09c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 68,
"avg_line_length": 24.541666666666668,
"alnum_prop": 0.6519524617996605,
"repo_name": "googleads/google-ads-python",
"id": "07e95baf813cd0295995bf678cb22026ca3637cc",
"size": "1189",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/enums/types/optimization_goal_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from targets_builder import TARGETSBuilder
import os
import fnmatch
import sys
from util import ColorString
# tests to export as libraries for inclusion in other projects
_EXPORTED_TEST_LIBS = ["env_basic_test"]
# Parse src.mk files as a Dictionary of
# VAR_NAME => list of files
def parse_src_mk(repo_path):
src_mk = repo_path + "/src.mk"
src_files = {}
for line in open(src_mk):
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
if '=' in line:
current_src = line.split('=')[0].strip()
src_files[current_src] = []
elif '.cc' in line:
src_path = line.split('.cc')[0].strip() + '.cc'
src_files[current_src].append(src_path)
return src_files
# get all .cc / .c files
def get_cc_files(repo_path):
cc_files = []
for root, dirnames, filenames in os.walk(repo_path): # noqa: B007 T25377293 Grandfathered in
root = root[(len(repo_path) + 1):]
if "java" in root:
# Skip java
continue
for filename in fnmatch.filter(filenames, '*.cc'):
cc_files.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.c'):
cc_files.append(os.path.join(root, filename))
return cc_files
# Get tests from Makefile
def get_tests(repo_path):
Makefile = repo_path + "/Makefile"
# Dictionary TEST_NAME => IS_PARALLEL
tests = {}
found_tests = False
for line in open(Makefile):
line = line.strip()
if line.startswith("TESTS ="):
found_tests = True
elif found_tests:
if line.endswith("\\"):
# remove the trailing \
line = line[:-1]
line = line.strip()
tests[line] = False
else:
# we consumed all the tests
break
found_parallel_tests = False
for line in open(Makefile):
line = line.strip()
if line.startswith("PARALLEL_TEST ="):
found_parallel_tests = True
elif found_parallel_tests:
if line.endswith("\\"):
# remove the trailing \
line = line[:-1]
line = line.strip()
tests[line] = True
else:
# we consumed all the parallel tests
break
return tests
# Prepare TARGETS file for buck
def generate_targets(repo_path):
print(ColorString.info("Generating TARGETS"))
# parsed src.mk file
src_mk = parse_src_mk(repo_path)
# get all .cc files
cc_files = get_cc_files(repo_path)
# get tests from Makefile
tests = get_tests(repo_path)
if src_mk is None or cc_files is None or tests is None:
return False
TARGETS = TARGETSBuilder("%s/TARGETS" % repo_path)
# rocksdb_lib
TARGETS.add_library(
"rocksdb_lib",
src_mk["LIB_SOURCES"] +
src_mk["TOOL_LIB_SOURCES"])
# rocksdb_test_lib
TARGETS.add_library(
"rocksdb_test_lib",
src_mk.get("MOCK_LIB_SOURCES", []) +
src_mk.get("TEST_LIB_SOURCES", []) +
src_mk.get("EXP_LIB_SOURCES", []) +
src_mk.get("ANALYZER_LIB_SOURCES", []),
[":rocksdb_lib"])
# rocksdb_tools_lib
TARGETS.add_library(
"rocksdb_tools_lib",
src_mk.get("BENCH_LIB_SOURCES", []) +
src_mk.get("ANALYZER_LIB_SOURCES", []) +
["util/testutil.cc"],
[":rocksdb_lib"])
# test for every test we found in the Makefile
for test in sorted(tests):
match_src = [src for src in cc_files if ("/%s.c" % test) in src]
if len(match_src) == 0:
print(ColorString.warning("Cannot find .cc file for %s" % test))
continue
elif len(match_src) > 1:
print(ColorString.warning("Found more than one .cc for %s" % test))
print(match_src)
continue
assert(len(match_src) == 1)
is_parallel = tests[test]
TARGETS.register_test(test, match_src[0], is_parallel)
if test in _EXPORTED_TEST_LIBS:
test_library = "%s_lib" % test
TARGETS.add_library(test_library, match_src, [":rocksdb_test_lib"])
TARGETS.flush_tests()
print(ColorString.info("Generated TARGETS Summary:"))
print(ColorString.info("- %d libs" % TARGETS.total_lib))
print(ColorString.info("- %d binarys" % TARGETS.total_bin))
print(ColorString.info("- %d tests" % TARGETS.total_test))
return True
def get_rocksdb_path():
# rocksdb = {script_dir}/..
script_dir = os.path.dirname(sys.argv[0])
script_dir = os.path.abspath(script_dir)
rocksdb_path = os.path.abspath(
os.path.join(script_dir, "../"))
return rocksdb_path
def exit_with_error(msg):
print(ColorString.error(msg))
sys.exit(1)
def main():
# Generate TARGETS file for buck
ok = generate_targets(get_rocksdb_path())
if not ok:
exit_with_error("Failed to generate TARGETS files")
if __name__ == "__main__":
main()
| {
"content_hash": "f114840028b1a563a18b4b303ad695ee",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 97,
"avg_line_length": 30.56140350877193,
"alnum_prop": 0.5746268656716418,
"repo_name": "fceller/arangodb",
"id": "a5d71b65d4e7ee2a35a959f41e8f98be81ad093c",
"size": "5298",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "3rdParty/rocksdb/6.2/buckifier/buckify_rocksdb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "AppleScript",
"bytes": "1429"
},
{
"name": "Assembly",
"bytes": "142084"
},
{
"name": "Batchfile",
"bytes": "9073"
},
{
"name": "C",
"bytes": "1938354"
},
{
"name": "C#",
"bytes": "55625"
},
{
"name": "C++",
"bytes": "79379178"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "109718"
},
{
"name": "CSS",
"bytes": "1341035"
},
{
"name": "CoffeeScript",
"bytes": "94"
},
{
"name": "DIGITAL Command Language",
"bytes": "27303"
},
{
"name": "Emacs Lisp",
"bytes": "15477"
},
{
"name": "Go",
"bytes": "1018005"
},
{
"name": "Groff",
"bytes": "263567"
},
{
"name": "HTML",
"bytes": "459886"
},
{
"name": "JavaScript",
"bytes": "55446690"
},
{
"name": "LLVM",
"bytes": "39361"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "178253"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "26909"
},
{
"name": "Objective-C",
"bytes": "4430"
},
{
"name": "Objective-C++",
"bytes": "1857"
},
{
"name": "Pascal",
"bytes": "145262"
},
{
"name": "Perl",
"bytes": "227308"
},
{
"name": "Protocol Buffer",
"bytes": "5837"
},
{
"name": "Python",
"bytes": "3563935"
},
{
"name": "Ruby",
"bytes": "1000962"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scheme",
"bytes": "19885"
},
{
"name": "Shell",
"bytes": "488846"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "Yacc",
"bytes": "36950"
}
],
"symlink_target": ""
} |
"""Support for a ScreenLogic heating device."""
import logging
from screenlogicpy.const import DATA as SL_DATA, EQUIPMENT, HEAT_MODE
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.restore_state import RestoreEntity
from . import ScreenlogicEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SUPPORTED_FEATURES = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORTED_MODES = [HVAC_MODE_OFF, HVAC_MODE_HEAT]
SUPPORTED_PRESETS = [
HEAT_MODE.SOLAR,
HEAT_MODE.SOLAR_PREFERRED,
HEAT_MODE.HEATER,
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up entry."""
entities = []
coordinator = hass.data[DOMAIN][config_entry.entry_id]["coordinator"]
for body in coordinator.data[SL_DATA.KEY_BODIES]:
entities.append(ScreenLogicClimate(coordinator, body))
async_add_entities(entities)
class ScreenLogicClimate(ScreenlogicEntity, ClimateEntity, RestoreEntity):
"""Represents a ScreenLogic climate entity."""
def __init__(self, coordinator, body):
"""Initialize a ScreenLogic climate entity."""
super().__init__(coordinator, body)
self._configured_heat_modes = []
# Is solar listed as available equipment?
if self.coordinator.data["config"]["equipment_flags"] & EQUIPMENT.FLAG_SOLAR:
self._configured_heat_modes.extend(
[HEAT_MODE.SOLAR, HEAT_MODE.SOLAR_PREFERRED]
)
self._configured_heat_modes.append(HEAT_MODE.HEATER)
self._last_preset = None
@property
def name(self) -> str:
"""Name of the heater."""
ent_name = self.body["heat_status"]["name"]
return f"{self.gateway_name} {ent_name}"
@property
def min_temp(self) -> float:
"""Minimum allowed temperature."""
return self.body["min_set_point"]["value"]
@property
def max_temp(self) -> float:
"""Maximum allowed temperature."""
return self.body["max_set_point"]["value"]
@property
def current_temperature(self) -> float:
"""Return water temperature."""
return self.body["last_temperature"]["value"]
@property
def target_temperature(self) -> float:
"""Target temperature."""
return self.body["heat_set_point"]["value"]
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
if self.config_data["is_celsius"]["value"] == 1:
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def hvac_mode(self) -> str:
"""Return the current hvac mode."""
if self.body["heat_mode"]["value"] > 0:
return HVAC_MODE_HEAT
return HVAC_MODE_OFF
@property
def hvac_modes(self):
"""Return th supported hvac modes."""
return SUPPORTED_MODES
@property
def hvac_action(self) -> str:
"""Return the current action of the heater."""
if self.body["heat_status"]["value"] > 0:
return CURRENT_HVAC_HEAT
if self.hvac_mode == HVAC_MODE_HEAT:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_OFF
@property
def preset_mode(self) -> str:
"""Return current/last preset mode."""
if self.hvac_mode == HVAC_MODE_OFF:
return HEAT_MODE.NAME_FOR_NUM[self._last_preset]
return HEAT_MODE.NAME_FOR_NUM[self.body["heat_mode"]["value"]]
@property
def preset_modes(self):
"""All available presets."""
return [
HEAT_MODE.NAME_FOR_NUM[mode_num] for mode_num in self._configured_heat_modes
]
@property
def supported_features(self):
"""Supported features of the heater."""
return SUPPORTED_FEATURES
async def async_set_temperature(self, **kwargs) -> None:
"""Change the setpoint of the heater."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
raise ValueError(f"Expected attribute {ATTR_TEMPERATURE}")
async with self.coordinator.api_lock:
success = await self.hass.async_add_executor_job(
self.gateway.set_heat_temp, int(self._data_key), int(temperature)
)
if success:
await self.coordinator.async_request_refresh()
else:
raise HomeAssistantError(
f"Failed to set_temperature {temperature} on body {self.body['body_type']['value']}"
)
async def async_set_hvac_mode(self, hvac_mode) -> None:
"""Set the operation mode."""
if hvac_mode == HVAC_MODE_OFF:
mode = HEAT_MODE.OFF
else:
mode = HEAT_MODE.NUM_FOR_NAME[self.preset_mode]
async with self.coordinator.api_lock:
success = await self.hass.async_add_executor_job(
self.gateway.set_heat_mode, int(self._data_key), int(mode)
)
if success:
await self.coordinator.async_request_refresh()
else:
raise HomeAssistantError(
f"Failed to set_hvac_mode {mode} on body {self.body['body_type']['value']}"
)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode."""
_LOGGER.debug("Setting last_preset to %s", HEAT_MODE.NUM_FOR_NAME[preset_mode])
self._last_preset = mode = HEAT_MODE.NUM_FOR_NAME[preset_mode]
if self.hvac_mode == HVAC_MODE_OFF:
return
async with self.coordinator.api_lock:
success = await self.hass.async_add_executor_job(
self.gateway.set_heat_mode, int(self._data_key), int(mode)
)
if success:
await self.coordinator.async_request_refresh()
else:
raise HomeAssistantError(
f"Failed to set_preset_mode {mode} on body {self.body['body_type']['value']}"
)
async def async_added_to_hass(self):
"""Run when entity is about to be added."""
await super().async_added_to_hass()
_LOGGER.debug("Startup last preset is %s", self._last_preset)
if self._last_preset is not None:
return
prev_state = await self.async_get_last_state()
if (
prev_state is not None
and prev_state.attributes.get(ATTR_PRESET_MODE) is not None
):
_LOGGER.debug(
"Startup setting last_preset to %s from prev_state",
HEAT_MODE.NUM_FOR_NAME[prev_state.attributes.get(ATTR_PRESET_MODE)],
)
self._last_preset = HEAT_MODE.NUM_FOR_NAME[
prev_state.attributes.get(ATTR_PRESET_MODE)
]
else:
_LOGGER.debug(
"Startup setting last_preset to default (%s)",
self._configured_heat_modes[0],
)
self._last_preset = self._configured_heat_modes[0]
@property
def body(self):
"""Shortcut to access body data."""
return self.coordinator.data[SL_DATA.KEY_BODIES][self._data_key]
| {
"content_hash": "6292b2a528d41b263696da7d996d14c5",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 100,
"avg_line_length": 33.93636363636364,
"alnum_prop": 0.6082239485668364,
"repo_name": "kennedyshead/home-assistant",
"id": "b83d2fe03cabafeb5c2c5bc3963f0bee10e6239a",
"size": "7466",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/screenlogic/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
"""
================================
Equivalent Potential Temperature
================================
Use functions from `metpy.calc` as well as pint's unit support to perform calculations.
The code below uses example data from our test suite to calculate the equivalent potential
temperature over the provided sounding data and plots the values up to 300-hPa.
"""
import matplotlib.pyplot as plt
import pandas as pd
from metpy.calc import equivalent_potential_temperature
from metpy.cbook import get_test_data
from metpy.units import units
###########################################
# Upper air data can be obtained using the siphon package, but for this example we will use
# some of MetPy's sample data.
# Set column names
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
# Read in test data using col_names
df = pd.read_fwf(get_test_data('jan20_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
###########################################
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed'
), how='all').reset_index(drop=True)
###########################################
# Isolate pressure, temperature, dewpoint, and height and add units
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
h = df['height'].values * units.meter
###########################################
# Calculate the equivalent potential temperature for the whole sounding
theta_e = equivalent_potential_temperature(p, T, Td)
###########################################
# Define a layer with pressure greater than 300 hPa
layer = p > 300 * units.hPa
###########################################
# Use the layer defined above and plot the equivalent potential temperature
plt.figure(figsize=(8, 8))
plt.plot(theta_e[layer], h[layer])
plt.title('Equivalent Potential Temperature (Kelvin)')
plt.show()
| {
"content_hash": "17d91fb03d060f9dadb88c2416ce6d36",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 91,
"avg_line_length": 37.870370370370374,
"alnum_prop": 0.6122249388753056,
"repo_name": "Unidata/MetPy",
"id": "403cb2e8cfd098257c48db542a770d6989bccb54",
"size": "2183",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/calculations/Equivalent_Potential_Temperature.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "551"
},
{
"name": "Makefile",
"bytes": "59"
},
{
"name": "Python",
"bytes": "1841514"
},
{
"name": "Ruby",
"bytes": "137"
}
],
"symlink_target": ""
} |
from office365.sharepoint.client_context import ClientContext
from office365.sharepoint.tenant.administration.sharing_capabilities import SharingCapabilities
from tests import test_user_credentials
target_site_url = "https://mediadev8.sharepoint.com/sites/team"
admin_client = ClientContext(target_site_url).with_credentials(test_user_credentials)
site_props = admin_client.tenant.get_site_properties_by_url(target_site_url, True).execute_query()
if site_props.properties.get("SharingCapability") != SharingCapabilities.ExternalUserSharingOnly:
print("Changing external sharing on site: {0}...".format(target_site_url))
site_props.set_property('SharingCapability', SharingCapabilities.ExternalUserSharingOnly).update().execute_query()
print("Updated.")
| {
"content_hash": "347f4bfc8a9fc82faa02a24ddf076c50",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 118,
"avg_line_length": 63.916666666666664,
"alnum_prop": 0.8070404172099087,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "eac5251be762fd051ef77f8cb45136440bea48bc",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/sharepoint/tenant/change_external_sharing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='USER',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('userid', models.CharField(max_length=200)),
('userpassword', models.CharField(max_length=200)),
('add', models.CharField(max_length=200)),
('phone', models.CharField(max_length=200)),
],
),
]
| {
"content_hash": "92bb408e87d903ae2138012d331d4c25",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 114,
"avg_line_length": 29.04,
"alnum_prop": 0.5564738292011019,
"repo_name": "meliora000/eb_django_app",
"id": "aa7db91b325fa5a39de0fb4724d24a59e219621c",
"size": "796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_eb/users/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20776"
},
{
"name": "HTML",
"bytes": "14581"
},
{
"name": "JavaScript",
"bytes": "17810"
},
{
"name": "Python",
"bytes": "38184"
}
],
"symlink_target": ""
} |
from setuptools import find_packages, setup
setup(
name='ngx-task',
version='0.2',
description='Testimonial for candidates to show up their code-foo',
author='Dmitry Shulyak',
author_email='dmitri.shulyak@gmail.com',
url='https://github.com/shudmi/ngx-task',
classifiers=[
'License :: Apache License 2.0',
'Programming Language :: Python',
'Programming Language :: Python 3',
'Programming Language :: Python 3.4',
],
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[],
tests_require=[
"pytest==3.0.7",
],
entry_points="""
[console_scripts]
ngx_generate=ngx_task.cli:generate_data
ngx_process=ngx_task.cli:process_data
"""
)
| {
"content_hash": "f21c2993296f222ceff0c4372ede8b43",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 29.53846153846154,
"alnum_prop": 0.61328125,
"repo_name": "shudmi/ngx-task",
"id": "146c1666119fadc456f5d10b33d1c9d62344c5a7",
"size": "768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5012"
}
],
"symlink_target": ""
} |
"""
Routines for filling missing data.
"""
from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
if TYPE_CHECKING:
from pandas import Index
def check_value_size(value, mask: np.ndarray, length: int):
"""
Validate the size of the values passed to ExtensionArray.fillna.
"""
if is_array_like(value):
if len(value) != length:
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {length}"
)
value = value[mask]
return value
def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]:
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
Parameters
----------
arr : ArrayLike
values_to_mask: list, tuple, or scalar
Returns
-------
np.ndarray[bool]
"""
# When called from Block.replace/replace_list, values_to_mask is a scalar
# known to be holdable by arr.
# When called from Series._single_replace, values_to_mask is tuple or list
dtype, values_to_mask = infer_dtype_from(values_to_mask)
# error: Argument "dtype" to "array" has incompatible type "Union[dtype[Any],
# ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
# _DTypeDict, Tuple[Any, Any]]]"
values_to_mask = np.array(values_to_mask, dtype=dtype) # type: ignore[arg-type]
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
# GH 21977
mask = np.zeros(arr.shape, dtype=bool)
for x in nonna:
if is_numeric_v_string_like(arr, x):
# GH#29553 prevent numpy deprecation warnings
pass
else:
mask |= arr == x
if na_mask.any():
mask |= isna(arr)
return mask
def clean_fill_method(method, allow_nearest: bool = False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
if isinstance(method, str):
method = method.lower()
if method == "ffill":
method = "pad"
elif method == "bfill":
method = "backfill"
valid_methods = ["pad", "backfill"]
expecting = "pad (ffill) or backfill (bfill)"
if allow_nearest:
valid_methods.append("nearest")
expecting = "pad (ffill), backfill (bfill) or nearest"
if method not in valid_methods:
raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}")
return method
# interpolation methods that dispatch to np.interp
NP_METHODS = ["linear", "time", "index", "values"]
# interpolation methods that dispatch to _interpolate_scipy_wrapper
SP_METHODS = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"krogh",
"spline",
"polynomial",
"from_derivatives",
"piecewise_polynomial",
"pchip",
"akima",
"cubicspline",
]
def clean_interp_method(method: str, index: Index, **kwargs) -> str:
order = kwargs.get("order")
if method in ("spline", "polynomial") and order is None:
raise ValueError("You must specify the order of the spline or polynomial.")
valid = NP_METHODS + SP_METHODS
if method not in valid:
raise ValueError(f"method must be one of {valid}. Got '{method}' instead.")
if method in ("krogh", "piecewise_polynomial", "pchip"):
if not index.is_monotonic:
raise ValueError(
f"{method} interpolation requires that the index be monotonic."
)
return method
def find_valid_index(values, *, how: str) -> int | None:
"""
Retrieves the index of the first valid value.
Parameters
----------
values : ndarray or ExtensionArray
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
int or None
"""
assert how in ["first", "last"]
if len(values) == 0: # early stop
return None
is_valid = ~isna(values)
if values.ndim == 2:
is_valid = is_valid.any(1) # reduce axis 1
if how == "first":
idxpos = is_valid[::].argmax()
elif how == "last":
idxpos = len(values) - 1 - is_valid[::-1].argmax()
chk_notna = is_valid[idxpos]
if not chk_notna:
return None
return idxpos
def interpolate_array_2d(
data: np.ndarray,
method: str = "pad",
axis: int = 0,
index: Index | None = None,
limit: int | None = None,
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
coerce: bool = False,
downcast: str | None = None,
**kwargs,
):
"""
Wrapper to dispatch to either interpolate_2d or _interpolate_2d_with_fill.
"""
try:
m = clean_fill_method(method)
except ValueError:
m = None
if m is not None:
if fill_value is not None:
# similar to validate_fillna_kwargs
raise ValueError("Cannot pass both fill_value and method")
interp_values = interpolate_2d(
data,
method=m,
axis=axis,
limit=limit,
limit_area=limit_area,
)
else:
assert index is not None # for mypy
interp_values = _interpolate_2d_with_fill(
data=data,
index=index,
axis=axis,
method=method,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
**kwargs,
)
return interp_values
def _interpolate_2d_with_fill(
data: np.ndarray, # floating dtype
index: Index,
axis: int,
method: str = "linear",
limit: int | None = None,
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
**kwargs,
) -> np.ndarray:
"""
Column-wise application of _interpolate_1d.
Notes
-----
The signature does differs from _interpolate_1d because it only
includes what is needed for Block.interpolate.
"""
# validate the interp method
clean_interp_method(method, index, **kwargs)
if is_valid_na_for_dtype(fill_value, data.dtype):
fill_value = na_value_for_dtype(data.dtype, compat=False)
if method == "time":
if not needs_i8_conversion(index.dtype):
raise ValueError(
"time-weighted interpolation only works "
"on Series or DataFrames with a "
"DatetimeIndex"
)
method = "values"
valid_limit_directions = ["forward", "backward", "both"]
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
raise ValueError(
"Invalid limit_direction: expecting one of "
f"{valid_limit_directions}, got '{limit_direction}'."
)
if limit_area is not None:
valid_limit_areas = ["inside", "outside"]
limit_area = limit_area.lower()
if limit_area not in valid_limit_areas:
raise ValueError(
f"Invalid limit_area: expecting one of {valid_limit_areas}, got "
f"{limit_area}."
)
# default limit is unlimited GH #16282
limit = algos.validate_limit(nobs=None, limit=limit)
indices = _index_to_interp_indices(index, method)
def func(yvalues: np.ndarray) -> np.ndarray:
# process 1-d slices in the axis direction, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to _interpolate_1d
return _interpolate_1d(
indices=indices,
yvalues=yvalues,
method=method,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
bounds_error=False,
**kwargs,
)
# interp each column independently
return np.apply_along_axis(func, axis, data)
def _index_to_interp_indices(index: Index, method: str) -> np.ndarray:
"""
Convert Index to ndarray of indices to pass to NumPy/SciPy.
"""
xarr = index._values
if needs_i8_conversion(xarr.dtype):
# GH#1646 for dt64tz
xarr = xarr.view("i8")
if method == "linear":
inds = xarr
inds = cast(np.ndarray, inds)
else:
inds = np.asarray(xarr)
if method in ("values", "index"):
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
return inds
def _interpolate_1d(
indices: np.ndarray,
yvalues: np.ndarray,
method: str | None = "linear",
limit: int | None = None,
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
bounds_error: bool = False,
order: int | None = None,
**kwargs,
):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
indices and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argument.
"""
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
result = np.empty(indices.shape, dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
all_nans = set(np.flatnonzero(invalid))
first_valid_index = find_valid_index(yvalues, how="first")
if first_valid_index is None: # no nan found in start
first_valid_index = 0
start_nans = set(range(first_valid_index))
last_valid_index = find_valid_index(yvalues, how="last")
if last_valid_index is None: # no nan found in end
last_valid_index = len(yvalues)
end_nans = set(range(1 + last_valid_index, len(valid)))
# Like the sets above, preserve_nans contains indices of invalid values,
# but in this case, it is the final set of indices that need to be
# preserved as NaN after the interpolation.
# For example if limit_direction='forward' then preserve_nans will
# contain indices of NaNs at the beginning of the series, and NaNs that
# are more than'limit' away from the prior non-NaN.
# set preserve_nans based on direction using _interp_limit
preserve_nans: list | set
if limit_direction == "forward":
preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
elif limit_direction == "backward":
preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
else:
# both directions... just use _interp_limit
preserve_nans = set(_interp_limit(invalid, limit, limit))
# if limit_area is set, add either mid or outside indices
# to preserve_nans GH #16284
if limit_area == "inside":
# preserve NaNs on the outside
preserve_nans |= start_nans | end_nans
elif limit_area == "outside":
# preserve NaNs on the inside
mid_nans = all_nans - start_nans - end_nans
preserve_nans |= mid_nans
# sort preserve_nans and convert to list
preserve_nans = sorted(preserve_nans)
result = yvalues.copy()
if method in NP_METHODS:
# np.interp requires sorted X values, #21037
indexer = np.argsort(indices[valid])
result[invalid] = np.interp(
indices[invalid], indices[valid][indexer], yvalues[valid][indexer]
)
else:
result[invalid] = _interpolate_scipy_wrapper(
indices[valid],
yvalues[valid],
indices[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order,
**kwargs,
)
result[preserve_nans] = np.nan
return result
def _interpolate_scipy_wrapper(
x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs
):
"""
Passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method.
"""
extra = f"{method} interpolation requires SciPy."
import_optional_dependency("scipy", extra=extra)
from scipy import interpolate
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
"barycentric": interpolate.barycentric_interpolate,
"krogh": interpolate.krogh_interpolate,
"from_derivatives": _from_derivatives,
"piecewise_polynomial": _from_derivatives,
}
if getattr(x, "_is_all_dates", False):
# GH 5975, scipy.interp1d can't handle datetime64s
x, new_x = x._values.astype("i8"), new_x.astype("i8")
if method == "pchip":
alt_methods["pchip"] = interpolate.pchip_interpolate
elif method == "akima":
alt_methods["akima"] = _akima_interpolate
elif method == "cubicspline":
alt_methods["cubicspline"] = _cubicspline_interpolate
interp1d_methods = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"polynomial",
]
if method in interp1d_methods:
if method == "polynomial":
method = order
terp = interpolate.interp1d(
x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error
)
new_y = terp(new_x)
elif method == "spline":
# GH #10633, #24014
if isna(order) or (order <= 0):
raise ValueError(
f"order needs to be specified and greater than 0; got order: {order}"
)
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array-like
sorted 1D array of x-coordinates
yi : array-like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array-like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This number includes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array-like
The result, of length R or length M or M by R.
"""
from scipy import interpolate
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array-like
A sorted list of x-coordinates, of length N.
yi : array-like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array-like
Of length M.
der : int, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array-like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
return P(x, nu=der)
def _cubicspline_interpolate(xi, yi, x, axis=0, bc_type="not-a-knot", extrapolate=None):
"""
Convenience function for cubic spline data interpolator.
See `scipy.interpolate.CubicSpline` for details.
Parameters
----------
xi : array-like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
yi : array-like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
x : scalar or array-like, shape (m,)
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array-like containing derivative values, shape must
be the same as `y`, excluding ``axis`` dimension. For example, if
`y` is 1D, then `deriv_value` must be a scalar. If `y` is 3D with
the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), ``extrapolate`` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
See Also
--------
scipy.interpolate.CubicHermiteSpline
Returns
-------
y : scalar or array-like
The result, of shape (m,)
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
from scipy import interpolate
P = interpolate.CubicSpline(
xi, yi, axis=axis, bc_type=bc_type, extrapolate=extrapolate
)
return P(x)
def _interpolate_with_limit_area(
values: ArrayLike, method: str, limit: int | None, limit_area: str | None
) -> ArrayLike:
"""
Apply interpolation and limit_area logic to values along a to-be-specified axis.
Parameters
----------
values: array-like
Input array.
method: str
Interpolation method. Could be "bfill" or "pad"
limit: int, optional
Index limit on interpolation.
limit_area: str
Limit area for interpolation. Can be "inside" or "outside"
Returns
-------
values: array-like
Interpolated array.
"""
invalid = isna(values)
if not invalid.all():
first = find_valid_index(values, how="first")
if first is None:
first = 0
last = find_valid_index(values, how="last")
if last is None:
last = len(values)
values = interpolate_2d(
values,
method=method,
limit=limit,
)
if limit_area == "inside":
invalid[first : last + 1] = False
elif limit_area == "outside":
invalid[:first] = invalid[last + 1 :] = False
values[invalid] = np.nan
return values
def interpolate_2d(
values,
method: str = "pad",
axis: Axis = 0,
limit: int | None = None,
limit_area: str | None = None,
):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
Parameters
----------
values: array-like
Input array.
method: str, default "pad"
Interpolation method. Could be "bfill" or "pad"
axis: 0 or 1
Interpolation axis
limit: int, optional
Index limit on interpolation.
limit_area: str, optional
Limit area for interpolation. Can be "inside" or "outside"
Returns
-------
values: array-like
Interpolated array.
"""
if limit_area is not None:
return np.apply_along_axis(
partial(
_interpolate_with_limit_area,
method=method,
limit=limit,
limit_area=limit_area,
),
axis,
values,
)
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0")
values = values.reshape(tuple((1,) + values.shape))
method = clean_fill_method(method)
tvalues = transf(values)
if method == "pad":
result, _ = _pad_2d(tvalues, limit=limit)
else:
result, _ = _backfill_2d(tvalues, limit=limit)
result = transf(result)
# reshape back
if ndim == 1:
result = result[0]
return result
def _fillna_prep(values, mask: np.ndarray | None = None) -> np.ndarray:
# boilerplate for _pad_1d, _backfill_1d, _pad_2d, _backfill_2d
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
return mask
def _datetimelike_compat(func: F) -> F:
"""
Wrapper to handle datetime64 and timedelta64 dtypes.
"""
@wraps(func)
def new_func(values, limit=None, mask=None):
if needs_i8_conversion(values.dtype):
if mask is None:
# This needs to occur before casting to int64
mask = isna(values)
result, mask = func(values.view("i8"), limit=limit, mask=mask)
return result.view(values.dtype), mask
return func(values, limit=limit, mask=mask)
return cast(F, new_func)
@_datetimelike_compat
def _pad_1d(
values: np.ndarray,
limit: int | None = None,
mask: np.ndarray | None = None,
) -> tuple[np.ndarray, np.ndarray]:
mask = _fillna_prep(values, mask)
algos.pad_inplace(values, mask, limit=limit)
return values, mask
@_datetimelike_compat
def _backfill_1d(
values: np.ndarray,
limit: int | None = None,
mask: np.ndarray | None = None,
) -> tuple[np.ndarray, np.ndarray]:
mask = _fillna_prep(values, mask)
algos.backfill_inplace(values, mask, limit=limit)
return values, mask
@_datetimelike_compat
def _pad_2d(values, limit=None, mask=None):
mask = _fillna_prep(values, mask)
if np.all(values.shape):
algos.pad_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values, mask
@_datetimelike_compat
def _backfill_2d(values, limit=None, mask=None):
mask = _fillna_prep(values, mask)
if np.all(values.shape):
algos.backfill_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values, mask
_fill_methods = {"pad": _pad_1d, "backfill": _backfill_1d}
def get_fill_func(method, ndim: int = 1):
method = clean_fill_method(method)
if ndim == 1:
return _fill_methods[method]
return {"pad": _pad_2d, "backfill": _backfill_2d}[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def _interp_limit(invalid: np.ndarray, fw_limit, bw_limit):
"""
Get indexers of values that won't be filled
because they exceed the limits.
Parameters
----------
invalid : np.ndarray[bool]
fw_limit : int or None
forward limit to index
bw_limit : int or None
backward limit to index
Returns
-------
set of indexers
Notes
-----
This is equivalent to the more readable, but slower
.. code-block:: python
def _interp_limit(invalid, fw_limit, bw_limit):
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indices from N - 1
N = len(invalid)
f_idx = set()
b_idx = set()
def inner(invalid, limit):
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = set(np.where(windowed)[0] + limit) | set(
np.where((~invalid[: limit + 1]).cumsum() == 0)[0]
)
return idx
if fw_limit is not None:
if fw_limit == 0:
f_idx = set(np.where(invalid)[0])
else:
f_idx = inner(invalid, fw_limit)
if bw_limit is not None:
if bw_limit == 0:
# then we don't even need to care about backwards
# just use forwards
return f_idx
else:
b_idx_inv = list(inner(invalid[::-1], bw_limit))
b_idx = set(N - 1 - np.asarray(b_idx_inv))
if fw_limit == 0:
return b_idx
return f_idx & b_idx
def _rolling_window(a: np.ndarray, window: int):
"""
[True, True, False, True, False], 2 ->
[
[True, True],
[True, False],
[False, True],
[True, False],
]
"""
# https://stackoverflow.com/a/6811241
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
| {
"content_hash": "7a30d535fd266932621a41b211475f04",
"timestamp": "",
"source": "github",
"line_count": 972,
"max_line_length": 88,
"avg_line_length": 29.616255144032923,
"alnum_prop": 0.6006530725674784,
"repo_name": "rs2/pandas",
"id": "9e85cbec0f29920c9422c397147429f2f7b2efd4",
"size": "28787",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/core/missing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360253"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1081551"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17393243"
},
{
"name": "Shell",
"bytes": "10872"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import pandas as pd
from six.moves import cPickle as pickle
colnames = ['heading', 'link', 'topic', 'source', 'content']
fileName = '/home/prakarsh_upmanyu23/filtered_art/20minutes-fr-Spider-data_26136-34848_politique.csv'
#fileName = "/home/prakarsh_upmanyu23/filtered_art/concatenated_27269.csv"
df1=pd.read_csv(fileName, names=colnames)
contentList = df1.content.tolist()
headList = df1.heading.tolist()
#pickleFile = '/home/sarthak/PycharmProjects/silicon-beachNLP/news-in-short/processedData/article_and_heading_data.pickle'
#pickleFile = '/home/melvin/Documents/USC/news-in-short/DataExtractor/art/lesechos-fr-spider-data_6445-8935_politique.pickle'
pickleFile = '/home/prakarsh_upmanyu23/latribune-fr-Spider-data_0-5848_politique.pickle'
pickleFile = '/home/prakarsh_upmanyu23/output_files/concatenated.pickle'
try:
f = open(pickleFile, 'wb')
save = {
'content' : contentList,
'heading' : headList,
}
pickle.dump(save,f,pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save pickle file : ' ,e )
| {
"content_hash": "baf939edb47d1b3aa0d2431e11a44d37",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 125,
"avg_line_length": 38.357142857142854,
"alnum_prop": 0.7392923649906891,
"repo_name": "silicon-beach/news-in-short",
"id": "60c593ac84f7914bfc93b6b94f3147ba32fd1ac5",
"size": "1074",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "summarizer/data_reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73383"
}
],
"symlink_target": ""
} |
'''
This tool is intended to monitor BillGates botnet control commands.
This module can communicate with "Gates" servers.
"Gates" module is usually called cupsdd or sfewfesfs.
'''
from __future__ import print_function
import socket
import time
import re
import struct
from pprint import pprint
def myprint(*args, **kwargs):
print(time.strftime("%c"), *args, **kwargs)
def hexdump(src, length=16):
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
lines = []
for c in xrange(0, len(src), length):
chars = src[c:c+length]
hex = ' '.join(["%02x" % ord(x) for x in chars])
printable = ''.join(["%s" % ((ord(x) <= 127 and FILTER[ord(x)]) or '.') for x in chars])
lines.append("%04x %-*s %s\n" % (c, length*3, hex, printable))
return ''.join(lines)
def get_ip_addresses(ipaddrs):
ips = re.findall("(?:[0-9]{1,3}\.){3}[0-9]{1,3}\x00.." ,ipaddrs)
ip_list = list()
for ip in ips:
addr = ip[:-3]
port = struct.unpack('H', ip[-2:])[0]
ip_list.append((addr, port))
return ip_list
def decode_command(data):
command = data[0]
if command == "\x01":
# DDoS!
ip_address_count = struct.unpack("B", data[0x47])[0]
ips = get_ip_addresses(data[0x4B:])
myprint("Got DDoS command!", ip_address_count, "Hosts:")
pprint(ips, indent=3)
elif command == "\x02":
# Stop DDoS
myprint("STOP DDoS")
elif command == "\x04":
# PING
pass
else:
myprint("UNKNOWN COMMAND!")
print(hexdump(data))
save.write(hexdump(data))
hello = open('hello.bin', 'rb').read()
ping = open('ping.bin', 'rb').read()
save = open('unknown-commands.bin', 'w+b')
def gates():
s = socket.create_connection(('116.10.189.246', 36008))
myprint("Connected")
s.sendall(hello)
myprint("Sent hello")
data = s.recv(1024)
myprint("Received server hello")
while True:
s.sendall(ping)
data = s.recv(4096)
time.sleep(0.1)
decode_command(data)
if __name__ == "__main__":
while True:
try:
gates()
except socket.error:
myprint("Connection lost. Reconnecting...")
time.sleep(5) | {
"content_hash": "62a085a8041532f46962e1944d20ab62",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 96,
"avg_line_length": 27.695121951219512,
"alnum_prop": 0.567591369440775,
"repo_name": "ValdikSS/billgates-botnet-tracker",
"id": "050a449d6b619ec80e8d733a16e939a4b43f506b",
"size": "2294",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gates/gates.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5115"
}
],
"symlink_target": ""
} |
from test.support import run_unittest
from test.test_math import parse_testfile, test_file
import unittest
import os, sys
import cmath, math
from cmath import phase, polar, rect, pi
INF = float('inf')
NAN = float('nan')
complex_zeros = [complex(x, y) for x in [0.0, -0.0] for y in [0.0, -0.0]]
complex_infinities = [complex(x, y) for x, y in [
(INF, 0.0), # 1st quadrant
(INF, 2.3),
(INF, INF),
(2.3, INF),
(0.0, INF),
(-0.0, INF), # 2nd quadrant
(-2.3, INF),
(-INF, INF),
(-INF, 2.3),
(-INF, 0.0),
(-INF, -0.0), # 3rd quadrant
(-INF, -2.3),
(-INF, -INF),
(-2.3, -INF),
(-0.0, -INF),
(0.0, -INF), # 4th quadrant
(2.3, -INF),
(INF, -INF),
(INF, -2.3),
(INF, -0.0)
]]
complex_nans = [complex(x, y) for x, y in [
(NAN, -INF),
(NAN, -2.3),
(NAN, -0.0),
(NAN, 0.0),
(NAN, 2.3),
(NAN, INF),
(-INF, NAN),
(-2.3, NAN),
(-0.0, NAN),
(0.0, NAN),
(2.3, NAN),
(INF, NAN)
]]
def almostEqualF(a, b, rel_err=2e-15, abs_err = 5e-323):
"""Determine whether floating-point values a and b are equal to within
a (small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps."""
# special values testing
if math.isnan(a):
return math.isnan(b)
if math.isinf(a):
return a == b
# if both a and b are zero, check whether they have the same sign
# (in theory there are examples where it would be legitimate for a
# and b to have opposite signs; in practice these hardly ever
# occur).
if not a and not b:
return math.copysign(1., a) == math.copysign(1., b)
# if a-b overflows, or b is infinite, return False. Again, in
# theory there are examples where a is within a few ulps of the
# max representable float, and then b could legitimately be
# infinite. In practice these examples are rare.
try:
absolute_error = abs(b-a)
except OverflowError:
return False
else:
return absolute_error <= max(abs_err, rel_err * abs(a))
class CMathTests(unittest.TestCase):
# list of all functions in cmath
test_functions = [getattr(cmath, fname) for fname in [
'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh',
'cos', 'cosh', 'exp', 'log', 'log10', 'sin', 'sinh',
'sqrt', 'tan', 'tanh']]
# test first and second arguments independently for 2-argument log
test_functions.append(lambda x : cmath.log(x, 1729. + 0j))
test_functions.append(lambda x : cmath.log(14.-27j, x))
def setUp(self):
self.test_values = open(test_file)
def tearDown(self):
self.test_values.close()
def rAssertAlmostEqual(self, a, b, rel_err = 2e-15, abs_err = 5e-323):
"""Check that two floating-point numbers are almost equal."""
# special values testing
if math.isnan(a):
if math.isnan(b):
return
self.fail("%s should be nan" % repr(b))
if math.isinf(a):
if a == b:
return
self.fail("finite result where infinity excpected: "
"expected %s, got %s" % (repr(a), repr(b)))
if not a and not b:
if math.atan2(a, -1.) != math.atan2(b, -1.):
self.fail("zero has wrong sign: expected %s, got %s" %
(repr(a), repr(b)))
# test passes if either the absolute error or the relative
# error is sufficiently small. The defaults amount to an
# error of between 9 ulps and 19 ulps on an IEEE-754 compliant
# machine.
try:
absolute_error = abs(b-a)
except OverflowError:
pass
else:
if absolute_error <= max(abs_err, rel_err * abs(a)):
return
self.fail("%s and %s are not sufficiently close" % (repr(a), repr(b)))
def test_constants(self):
e_expected = 2.71828182845904523536
pi_expected = 3.14159265358979323846
self.assertAlmostEqual(cmath.pi, pi_expected)
self.assertAlmostEqual(cmath.e, e_expected)
def test_user_object(self):
# Test automatic calling of __complex__ and __float__ by cmath
# functions
# some random values to use as test values; we avoid values
# for which any of the functions in cmath is undefined
# (i.e. 0., 1., -1., 1j, -1j) or would cause overflow
cx_arg = 4.419414439 + 1.497100113j
flt_arg = -6.131677725
# a variety of non-complex numbers, used to check that
# non-complex return values from __complex__ give an error
non_complexes = ["not complex", 1, 5, 2., None,
object(), NotImplemented]
# Now we introduce a variety of classes whose instances might
# end up being passed to the cmath functions
# usual case: new-style class implementing __complex__
class MyComplex(object):
def __init__(self, value):
self.value = value
def __complex__(self):
return self.value
# old-style class implementing __complex__
class MyComplexOS:
def __init__(self, value):
self.value = value
def __complex__(self):
return self.value
# classes for which __complex__ raises an exception
class SomeException(Exception):
pass
class MyComplexException(object):
def __complex__(self):
raise SomeException
class MyComplexExceptionOS:
def __complex__(self):
raise SomeException
# some classes not providing __float__ or __complex__
class NeitherComplexNorFloat(object):
pass
class NeitherComplexNorFloatOS:
pass
class MyInt(object):
def __int__(self): return 2
def __long__(self): return 2
def __index__(self): return 2
class MyIntOS:
def __int__(self): return 2
def __long__(self): return 2
def __index__(self): return 2
# other possible combinations of __float__ and __complex__
# that should work
class FloatAndComplex(object):
def __float__(self):
return flt_arg
def __complex__(self):
return cx_arg
class FloatAndComplexOS:
def __float__(self):
return flt_arg
def __complex__(self):
return cx_arg
class JustFloat(object):
def __float__(self):
return flt_arg
class JustFloatOS:
def __float__(self):
return flt_arg
for f in self.test_functions:
# usual usage
self.assertEqual(f(MyComplex(cx_arg)), f(cx_arg))
self.assertEqual(f(MyComplexOS(cx_arg)), f(cx_arg))
# other combinations of __float__ and __complex__
self.assertEqual(f(FloatAndComplex()), f(cx_arg))
self.assertEqual(f(FloatAndComplexOS()), f(cx_arg))
self.assertEqual(f(JustFloat()), f(flt_arg))
self.assertEqual(f(JustFloatOS()), f(flt_arg))
# TypeError should be raised for classes not providing
# either __complex__ or __float__, even if they provide
# __int__, __long__ or __index__. An old-style class
# currently raises AttributeError instead of a TypeError;
# this could be considered a bug.
self.assertRaises(TypeError, f, NeitherComplexNorFloat())
self.assertRaises(TypeError, f, MyInt())
self.assertRaises(Exception, f, NeitherComplexNorFloatOS())
self.assertRaises(Exception, f, MyIntOS())
# non-complex return value from __complex__ -> TypeError
for bad_complex in non_complexes:
self.assertRaises(TypeError, f, MyComplex(bad_complex))
self.assertRaises(TypeError, f, MyComplexOS(bad_complex))
# exceptions in __complex__ should be propagated correctly
self.assertRaises(SomeException, f, MyComplexException())
self.assertRaises(SomeException, f, MyComplexExceptionOS())
def test_input_type(self):
# ints and longs should be acceptable inputs to all cmath
# functions, by virtue of providing a __float__ method
for f in self.test_functions:
for arg in [2, 2.]:
self.assertEqual(f(arg), f(arg.__float__()))
# but strings should give a TypeError
for f in self.test_functions:
for arg in ["a", "long_string", "0", "1j", ""]:
self.assertRaises(TypeError, f, arg)
def test_cmath_matches_math(self):
# check that corresponding cmath and math functions are equal
# for floats in the appropriate range
# test_values in (0, 1)
test_values = [0.01, 0.1, 0.2, 0.5, 0.9, 0.99]
# test_values for functions defined on [-1., 1.]
unit_interval = test_values + [-x for x in test_values] + \
[0., 1., -1.]
# test_values for log, log10, sqrt
positive = test_values + [1.] + [1./x for x in test_values]
nonnegative = [0.] + positive
# test_values for functions defined on the whole real line
real_line = [0.] + positive + [-x for x in positive]
test_functions = {
'acos' : unit_interval,
'asin' : unit_interval,
'atan' : real_line,
'cos' : real_line,
'cosh' : real_line,
'exp' : real_line,
'log' : positive,
'log10' : positive,
'sin' : real_line,
'sinh' : real_line,
'sqrt' : nonnegative,
'tan' : real_line,
'tanh' : real_line}
for fn, values in test_functions.items():
float_fn = getattr(math, fn)
complex_fn = getattr(cmath, fn)
for v in values:
z = complex_fn(v)
self.rAssertAlmostEqual(float_fn(v), z.real)
self.assertEqual(0., z.imag)
# test two-argument version of log with various bases
for base in [0.5, 2., 10.]:
for v in positive:
z = cmath.log(v, base)
self.rAssertAlmostEqual(math.log(v, base), z.real)
self.assertEqual(0., z.imag)
def test_specific_values(self):
if not float.__getformat__("double").startswith("IEEE"):
return
def rect_complex(z):
"""Wrapped version of rect that accepts a complex number instead of
two float arguments."""
return cmath.rect(z.real, z.imag)
def polar_complex(z):
"""Wrapped version of polar that returns a complex number instead of
two floats."""
return complex(*polar(z))
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
arg = complex(ar, ai)
expected = complex(er, ei)
if fn == 'rect':
function = rect_complex
elif fn == 'polar':
function = polar_complex
else:
function = getattr(cmath, fn)
if 'divide-by-zero' in flags or 'invalid' in flags:
try:
actual = function(arg)
except ValueError:
continue
else:
test_str = "%s: %s(complex(%r, %r))" % (id, fn, ar, ai)
self.fail('ValueError not raised in test %s' % test_str)
if 'overflow' in flags:
try:
actual = function(arg)
except OverflowError:
continue
else:
test_str = "%s: %s(complex(%r, %r))" % (id, fn, ar, ai)
self.fail('OverflowError not raised in test %s' % test_str)
actual = function(arg)
if 'ignore-real-sign' in flags:
actual = complex(abs(actual.real), actual.imag)
expected = complex(abs(expected.real), expected.imag)
if 'ignore-imag-sign' in flags:
actual = complex(actual.real, abs(actual.imag))
expected = complex(expected.real, abs(expected.imag))
# for the real part of the log function, we allow an
# absolute error of up to 2e-15.
if fn in ('log', 'log10'):
real_abs_err = 2e-15
else:
real_abs_err = 5e-323
if not (almostEqualF(expected.real, actual.real,
abs_err = real_abs_err) and
almostEqualF(expected.imag, actual.imag)):
error_message = (
"%s: %s(complex(%r, %r))\n" % (id, fn, ar, ai) +
"Expected: complex(%r, %r)\n" %
(expected.real, expected.imag) +
"Received: complex(%r, %r)\n" %
(actual.real, actual.imag) +
"Received value insufficiently close to expected value.")
self.fail(error_message)
def assertCISEqual(self, a, b):
eps = 1E-7
if abs(a[0] - b[0]) > eps or abs(a[1] - b[1]) > eps:
self.fail((a ,b))
def test_polar(self):
self.assertCISEqual(polar(0), (0., 0.))
self.assertCISEqual(polar(1.), (1., 0.))
self.assertCISEqual(polar(-1.), (1., pi))
self.assertCISEqual(polar(1j), (1., pi/2))
self.assertCISEqual(polar(-1j), (1., -pi/2))
def test_phase(self):
self.assertAlmostEqual(phase(0), 0.)
self.assertAlmostEqual(phase(1.), 0.)
self.assertAlmostEqual(phase(-1.), pi)
self.assertAlmostEqual(phase(-1.+1E-300j), pi)
self.assertAlmostEqual(phase(-1.-1E-300j), -pi)
self.assertAlmostEqual(phase(1j), pi/2)
self.assertAlmostEqual(phase(-1j), -pi/2)
# zeros
self.assertEqual(phase(complex(0.0, 0.0)), 0.0)
self.assertEqual(phase(complex(0.0, -0.0)), -0.0)
self.assertEqual(phase(complex(-0.0, 0.0)), pi)
self.assertEqual(phase(complex(-0.0, -0.0)), -pi)
# infinities
self.assertAlmostEqual(phase(complex(-INF, -0.0)), -pi)
self.assertAlmostEqual(phase(complex(-INF, -2.3)), -pi)
self.assertAlmostEqual(phase(complex(-INF, -INF)), -0.75*pi)
self.assertAlmostEqual(phase(complex(-2.3, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(-0.0, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(0.0, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(2.3, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(INF, -INF)), -pi/4)
self.assertEqual(phase(complex(INF, -2.3)), -0.0)
self.assertEqual(phase(complex(INF, -0.0)), -0.0)
self.assertEqual(phase(complex(INF, 0.0)), 0.0)
self.assertEqual(phase(complex(INF, 2.3)), 0.0)
self.assertAlmostEqual(phase(complex(INF, INF)), pi/4)
self.assertAlmostEqual(phase(complex(2.3, INF)), pi/2)
self.assertAlmostEqual(phase(complex(0.0, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-0.0, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-2.3, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-INF, INF)), 0.75*pi)
self.assertAlmostEqual(phase(complex(-INF, 2.3)), pi)
self.assertAlmostEqual(phase(complex(-INF, 0.0)), pi)
# real or imaginary part NaN
for z in complex_nans:
self.assert_(math.isnan(phase(z)))
def test_abs(self):
# zeros
for z in complex_zeros:
self.assertEqual(abs(z), 0.0)
# infinities
for z in complex_infinities:
self.assertEqual(abs(z), INF)
# real or imaginary part NaN
self.assertEqual(abs(complex(NAN, -INF)), INF)
self.assert_(math.isnan(abs(complex(NAN, -2.3))))
self.assert_(math.isnan(abs(complex(NAN, -0.0))))
self.assert_(math.isnan(abs(complex(NAN, 0.0))))
self.assert_(math.isnan(abs(complex(NAN, 2.3))))
self.assertEqual(abs(complex(NAN, INF)), INF)
self.assertEqual(abs(complex(-INF, NAN)), INF)
self.assert_(math.isnan(abs(complex(-2.3, NAN))))
self.assert_(math.isnan(abs(complex(-0.0, NAN))))
self.assert_(math.isnan(abs(complex(0.0, NAN))))
self.assert_(math.isnan(abs(complex(2.3, NAN))))
self.assertEqual(abs(complex(INF, NAN)), INF)
self.assert_(math.isnan(abs(complex(NAN, NAN))))
# result overflows
if float.__getformat__("double").startswith("IEEE"):
self.assertRaises(OverflowError, abs, complex(1.4e308, 1.4e308))
def assertCEqual(self, a, b):
eps = 1E-7
if abs(a.real - b[0]) > eps or abs(a.imag - b[1]) > eps:
self.fail((a ,b))
def test_rect(self):
self.assertCEqual(rect(0, 0), (0, 0))
self.assertCEqual(rect(1, 0), (1., 0))
self.assertCEqual(rect(1, -pi), (-1., 0))
self.assertCEqual(rect(1, pi/2), (0, 1.))
self.assertCEqual(rect(1, -pi/2), (0, -1.))
def test_isnan(self):
self.failIf(cmath.isnan(1))
self.failIf(cmath.isnan(1j))
self.failIf(cmath.isnan(INF))
self.assert_(cmath.isnan(NAN))
self.assert_(cmath.isnan(complex(NAN, 0)))
self.assert_(cmath.isnan(complex(0, NAN)))
self.assert_(cmath.isnan(complex(NAN, NAN)))
self.assert_(cmath.isnan(complex(NAN, INF)))
self.assert_(cmath.isnan(complex(INF, NAN)))
def test_isinf(self):
self.failIf(cmath.isinf(1))
self.failIf(cmath.isinf(1j))
self.failIf(cmath.isinf(NAN))
self.assert_(cmath.isinf(INF))
self.assert_(cmath.isinf(complex(INF, 0)))
self.assert_(cmath.isinf(complex(0, INF)))
self.assert_(cmath.isinf(complex(INF, INF)))
self.assert_(cmath.isinf(complex(NAN, INF)))
self.assert_(cmath.isinf(complex(INF, NAN)))
def test_main():
run_unittest(CMathTests)
if __name__ == "__main__":
test_main()
| {
"content_hash": "82f9439d2d37a74c229983b3837544cf",
"timestamp": "",
"source": "github",
"line_count": 486,
"max_line_length": 80,
"avg_line_length": 38.306584362139915,
"alnum_prop": 0.5442874791856905,
"repo_name": "MalloyPower/parsing-python",
"id": "3c34fecd90492b58d764e65bb7c3db23f4ae3e76",
"size": "18617",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.0/Lib/test/test_cmath.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import logging
import sys
import time
from eventlet import event
from eventlet import greenthread
from cinder.openstack.common._i18n import _LE, _LW
LOG = logging.getLogger(__name__)
# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
# with time.time() called in the standard logging module
# during unittests.
_ts = lambda: time.time()
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCallBase.
The poll-function passed to LoopingCallBase can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCallBase.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCallBase.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = _ts()
self.f(*self.args, **self.kw)
end = _ts()
if not self._running:
break
delay = end - start - interval
if delay > 0:
LOG.warn(_LW('task %(func_name)r run outlasted '
'interval by %(delay).2f sec'),
{'func_name': self.f, 'delay': delay})
greenthread.sleep(-delay if delay < 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_LE('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug('Dynamic looping call %(func_name)r sleeping '
'for %(idle).02f seconds',
{'func_name': self.f, 'idle': idle})
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_LE('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
| {
"content_hash": "9de268ba756bfadfea5a9539bbd8e6a8",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 79,
"avg_line_length": 30.7,
"alnum_prop": 0.5357053370082686,
"repo_name": "yanheven/cinder",
"id": "efe2e97deeba3d5c94440811ee40cf6c0e8f9cb2",
"size": "4761",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "cinder/openstack/common/loopingcall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10655225"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
import argparse
import datetime
import os
import re
import sys
from collections import defaultdict
################################################################################
# Argument Parser
################################################################################
DESCRIPTION = """Python script to help create/update the arm64 lstFile
"""
PARSER = argparse.ArgumentParser(description=DESCRIPTION)
PARSER.add_argument("--test", dest="testing", action="store_true", default=False)
PARSER.add_argument("-lst_file", dest="old_list_file", nargs='?', default=None)
PARSER.add_argument("-pri0_test_dir", dest="pri0_test_dir", nargs='?', default=None)
PARSER.add_argument("-pri1_test_dir", dest="pri1_test_dir", nargs='?', default=None)
PARSER.add_argument("-commit_hash", dest="commit_hash", nargs='?', default=None)
PARSER.add_argument("-failures_csv", dest="failures_csv", nargs='?', default=None)
PARSER.add_argument("--unset_new", dest="unset_new", action="store_true", default=False)
ARGS = PARSER.parse_args(sys.argv[1:])
################################################################################
# Helper Functions
################################################################################
def create_list_file(file_name, metadata):
""" Create a lstFile given a set of metadata input
Args:
file_name (str): Location to write the lstFile
metadata ({ str: { str: str } }): Dictionary mapping test name to
: a tuple, the first tuple's value is
: a dictionary of key/value attributes,
: the second is test index.
"""
current_time = datetime.datetime.now()
current_time_str = current_time.strftime("%d-%b-%Y %H:%M:%S%z")
metadata = [metadata[item] for item in metadata]
metadata = sorted(metadata, key=lambda item: item[1])
new_metadata = [item for item in metadata if item[1] == -1]
old_metadata = [item for item in metadata if item[1] != -1]
with open(file_name, "w") as file_handle:
file_handle.write("## This list file has been produced automatically. Any changes\n")
file_handle.write("## are subject to being overwritten when reproducing this file.\n")
file_handle.write("## \n")
file_handle.write("## Last Updated: %s\n" % current_time_str)
file_handle.write("## Commit: %s\n" % ARGS.commit_hash)
file_handle.write("## \n")
order = ["RelativePath", "WorkingDir", "Expected",
"MaxAllowedDurationSeconds", "Categories", "HostStyle"]
def write_metadata(data, count=None):
for item in data:
test_name = item[0]["RelativePath"]
if item[1] != -1:
count = item[1]
item = item[0]
# Get the test name.
title = "[%s_%d]" % (test_name.split("\\")[-1], count)
count += 1
file_handle.write("%s\n" % title)
attribute_str = ""
for key in order:
attribute_str += "%s=%s\n" % (key, item[key])
file_handle.write(attribute_str + "\n")
write_metadata(old_metadata)
old_number = 0
try:
old_number = old_metadata[-1][1] + 1
except:
# New lstFile
pass
write_metadata(new_metadata, old_number + 1)
def create_metadata(tests):
""" Given a set of tests create the metadata around them
Args:
tests ({str : int}): List of tests for which to determine metadata
: int represents the priority
Returns:
test_metadata ({ str: { str: str } }): Dictionary mapping test name to
: a dictionary of key/value
: attributes.
"""
test_metadata = defaultdict(lambda: None)
failures_csv = ARGS.failures_csv
failure_information = defaultdict(lambda: None)
if failures_csv is not None:
lines = []
assert(os.path.isfile(failures_csv))
with open(failures_csv, "r") as file_handle:
lines = file_handle.readlines()
try:
for line in lines:
split = line.split(",")
relative_path = split[0].replace("/", "\\")
category = split[1]
failure_information[relative_path] = category.strip()
except:
raise Exception("Error. CSV format expects: relativepath,category")
for test in tests:
test_name = test
priority = tests[test]
working_directory = os.path.dirname(test_name).replace("/", "\\")
# Make sure the tests use the windows \ seperator.
relative_path = test_name.replace("/", "\\")
max_duration = "600"
if priority == 0:
categories = "EXPECTED_PASS"
else:
categories = "EXPECTED_PASS;Pri%d" % priority
expected = "0"
host_style = "0"
metadata = defaultdict(lambda: None)
metadata["RelativePath"] = relative_path
metadata["WorkingDir"] = working_directory
metadata["MaxAllowedDurationSeconds"] = max_duration
metadata["HostStyle"] = host_style
metadata["Expected"] = expected
metadata["Categories"] = categories
if failure_information[relative_path] is not None:
metadata["Categories"] = failure_information[relative_path]
test_metadata[relative_path] = metadata
return test_metadata
def get_all_tests(base_dir):
""" Find all of the tests in the enlistment
Args:
base_dir (str): Directory to start traversing from
Returns:
test_list ([str]): List of the tests. Note this is defined to be every
: cmd file under the base_dir.
Note:
To find the tests correctly you must build the tests correctly and
pass that directory. This method will NOT check to make sure that
this has been done correctly.
This is a recursive method.
"""
def get_all_tests_helper(working_dir):
""" Helper function to recursively get all tests.
"""
assert os.path.isdir(working_dir)
items = os.listdir(working_dir)
items = [os.path.join(working_dir, item) for item in items]
dirs = [item for item in items if os.path.isdir(item)]
tests = [item for item in items if ".cmd" in item]
for item in dirs:
tests += get_all_tests_helper(item)
return tests
# Recursively get all of the tests in the directory.
tests = get_all_tests_helper(base_dir)
# Find the correct base directory for the tests.
common_prefix = os.path.commonprefix(tests)
if common_prefix is not None:
tests = [test.replace(common_prefix, "") for test in tests]
return tests
def log(message):
""" Log a debug message. This is to be used when the --test option is passed
"""
if ARGS.testing is True:
print message
def parse_lst_file(lst_file):
"""Parse a lstFile given.
Args:
lst_file(str): location of the lstFile
Returns:
test_metadata (defaultdict(lambda: None)): Key is test name.
"""
assert os.path.isfile(lst_file)
contents = None
with open(lst_file) as file_handle:
contents = file_handle.read()
split = re.split("\[(.*?)\]", contents)
unique_name = None
test_metadata = defaultdict(lambda: None)
for item in split:
if len(item) == 0 or item[0] == "#":
continue
if unique_name is None:
unique_name = item
else:
index = int(unique_name.split("_")[-1])
metadata = defaultdict(lambda: None)
attributes = item.split("\n")
for attribute in attributes:
# Skip the removed new lines.
if len(attribute) == 0:
continue
pair = attribute.split("=")
key = pair[0].strip()
value = pair[1].strip()
metadata[key] = value
# Relative path is unique, while the test name alone is not.
unique_name = metadata["RelativePath"]
test_metadata[unique_name] = (metadata, index)
unique_name = None
return test_metadata
################################################################################
# Main
################################################################################
def main(args):
""" Main method
Args:
args ([str]): the arugments passed to the program.
"""
# Assign all of the passed variables.
pri0_test_dir = args.pri0_test_dir
pri1_test_dir = args.pri1_test_dir
old_list_file = args.old_list_file
commit_hash = args.commit_hash
unset_new = args.unset_new
if commit_hash is None:
print "Error please provide a commit hash."
sys.exit(1)
if pri0_test_dir is None or not os.path.isdir(pri0_test_dir):
print "Error the Pri0 test directory passed is not a valid directory."
sys.exit(1)
if pri1_test_dir is None or not os.path.isdir(pri1_test_dir):
print "Error the Pri1 test directory passed is not a valid directory."
sys.exit(1)
pri0_tests = get_all_tests(pri0_test_dir)
print "Found %d tests in the pri0 test directory." % (len(pri0_tests))
pri1_tests = get_all_tests(pri1_test_dir)
print "Found %d tests in the pri1 test directory." % (len(pri1_tests))
print
priority_marked_tests = defaultdict(lambda: None)
for test in pri1_tests:
priority_marked_tests[test] = 1
for test in pri0_tests:
priority_marked_tests[test] = 0
old_test_metadata = None
# If we are updating an old lstFile. Get all of the tests from that
# lstFile and their metadata.
if old_list_file is not None:
old_test_metadata = parse_lst_file(old_list_file)
print "Found %d tests in the old lstFile." % (len(old_test_metadata))
print
test_metadata = create_metadata(priority_marked_tests)
# Make sure the tuples are set up correctly.
for item in test_metadata:
test_metadata[item] = (test_metadata[item], -1)
if old_test_metadata is not None:
# If the new information has been changed, we will need to update
# the lstFile.
new_test_count = 0
update_count = 0
for test_name in test_metadata:
new_metadata = test_metadata[test_name]
old_metadata = old_test_metadata[test_name]
attributes = None
if old_test_metadata[test_name] is None:
new_test_count += 1
new_metadata[0]["Categories"] += ";NEW"
old_test_metadata[test_name] = (new_metadata[0], -1)
else:
index = old_metadata[1]
old_metadata = old_metadata[0]
attributes = set(old_metadata.keys() + new_metadata[0].keys())
# Make sure we go through all attributes of both sets.
# If an attribute exists in one set but not the other it will
# be None. If the new metadata has a new attribute, write this
# into the old metadata. If the old metadata has an attribute
# that does not exist in the new set. Do not remove it.
overwritten = False
for attribute in attributes:
if attribute == "MaxAllowedDurationSeconds":
continue
if attribute == "Categories":
new_split = new_metadata[0]["Categories"].split(";")
old_split = old_metadata["Categories"].split(";")
if unset_new:
if "NEW" in old_split:
old_split.remove("NEW")
# If an old test is marked as a failing test. Make
# sure that we carry that information along.
if "EXPECTED_PASS" in new_split and "EXPECTED_FAIL" in old_split:
new_split.remove("EXPECTED_PASS")
# If it used to be marked as pass but it is now failing. Make sure
# we remove the old category.
elif "EXPECTED_FAIL" in new_split and "EXPECTED_PASS" in old_split:
old_split.remove("EXPECTED_PASS")
joined_categories = set(old_split + new_split)
if (old_split != new_split):
overwritten = True
ordered_categories = []
for item in old_split:
if item in joined_categories:
ordered_categories.append(item)
joined_categories.remove(item)
ordered_categories = [item for item in ordered_categories if item != ""]
old_metadata[attribute] = ";".join(ordered_categories)
old_metadata[attribute] = old_metadata[attribute] + ";" + ";".join(joined_categories) if len(joined_categories) > 0 else old_metadata[attribute]
old_test_metadata[test_name] = (old_metadata, index)
elif new_metadata[0][attribute] != old_metadata[attribute]:
# If the old information is not the same as the new
# information, keep the new information. overwrite the old
# metadata.
if new_metadata[0][attribute] is not None:
overwritten = True
old_metadata[attribute] = new_metadata[0][attribute]
old_test_metadata[test_name] = (old_metadata, index)
if overwritten:
update_count += 1
tests_removed = 0
tests_to_remove = []
for old_test_name in old_test_metadata:
# Remove all old unreferenced tests
if old_test_name not in test_metadata:
tests_to_remove.append(old_test_name)
tests_removed += 1
for test_name in tests_to_remove:
old_test_metadata.pop(test_name)
print "Added %d tests." % new_test_count
print "Removed %d tests." % tests_removed
print "Finished join. %d tests updated." % update_count
test_metadata = old_test_metadata
# Overwrite the old file if provided, else use the generic name Tests.lst
lst_file = "Tests.lst" if old_list_file is None else old_list_file
# Write out the new lstFile
create_list_file(lst_file, test_metadata)
################################################################################
################################################################################
if __name__ == "__main__":
main(ARGS)
| {
"content_hash": "e4a69c0cb4a87593344c6338f8e35f91",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 168,
"avg_line_length": 35.3587962962963,
"alnum_prop": 0.5359083469721767,
"repo_name": "mmitche/coreclr",
"id": "4db689a98260e09807ae5b21383316dbf43d2faf",
"size": "15747",
"binary": false,
"copies": "63",
"ref": "refs/heads/master",
"path": "tests/scripts/lst_creator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "956111"
},
{
"name": "Awk",
"bytes": "6916"
},
{
"name": "Batchfile",
"bytes": "168475"
},
{
"name": "C",
"bytes": "5588467"
},
{
"name": "C#",
"bytes": "150477201"
},
{
"name": "C++",
"bytes": "65887021"
},
{
"name": "CMake",
"bytes": "715184"
},
{
"name": "Groovy",
"bytes": "225924"
},
{
"name": "M4",
"bytes": "15214"
},
{
"name": "Makefile",
"bytes": "46117"
},
{
"name": "Objective-C",
"bytes": "16829"
},
{
"name": "Perl",
"bytes": "23640"
},
{
"name": "PowerShell",
"bytes": "54894"
},
{
"name": "Python",
"bytes": "548588"
},
{
"name": "Roff",
"bytes": "656420"
},
{
"name": "Scala",
"bytes": "4102"
},
{
"name": "Shell",
"bytes": "490554"
},
{
"name": "Smalltalk",
"bytes": "635930"
},
{
"name": "SuperCollider",
"bytes": "650"
},
{
"name": "TeX",
"bytes": "126781"
},
{
"name": "XSLT",
"bytes": "1016"
},
{
"name": "Yacc",
"bytes": "157492"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_sayama_edosun_q2_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_nboo_n","sayama_edosun_q2_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "ebd48a09f6379c197ef8b3f62ab5f458",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 90,
"avg_line_length": 25.846153846153847,
"alnum_prop": 0.7053571428571429,
"repo_name": "obi-two/Rebelion",
"id": "ee53df6e68baf6643b2dee433777fe973190f9dd",
"size": "481",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/mission/quest_item/shared_sayama_edosun_q2_needed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.